From aa2c8f0a125de50abd0929a5665431bbb3e01151 Mon Sep 17 00:00:00 2001 From: Ruslan Kamaldinov Date: Sat, 29 Nov 2014 06:18:55 +0300 Subject: [PATCH] Update from oslo incubator Details about this change: * Cleaned up openstack-common.conf, this file should contain only direct dependencies of Murano * Removed unused files from openstack/common * Moved xmlutils to murano/common; this module was removed from oslo-incubator long time ago, but is still used by common/wsgi.py, which also was deprecated by Oslo and adopted by Murano Change-Id: I118de30bb0bae577d24d86fa723522580beb13d0 --- murano/common/wsgi.py | 2 +- murano/{openstack => }/common/xmlutils.py | 0 murano/openstack/common/__init__.py | 17 -- murano/openstack/common/_i18n.py | 45 +++++ murano/openstack/common/config/__init__.py | 0 murano/openstack/common/context.py | 23 ++- murano/openstack/common/eventlet_backdoor.py | 9 +- murano/openstack/common/excutils.py | 113 ----------- murano/openstack/common/fileutils.py | 21 +- murano/openstack/common/lockutils.py | 92 ++------- murano/openstack/common/log.py | 195 +++++++++---------- murano/openstack/common/loopingcall.py | 29 +-- murano/openstack/common/network_utils.py | 89 --------- murano/openstack/common/policy.py | 103 +++++++--- murano/openstack/common/processutils.py | 64 ++++-- murano/openstack/common/service.py | 24 +-- murano/openstack/common/sslutils.py | 12 +- murano/openstack/common/test.py | 99 ---------- murano/openstack/common/threadgroup.py | 2 + murano/openstack/common/versionutils.py | 148 -------------- openstack-common.conf | 20 +- 21 files changed, 369 insertions(+), 738 deletions(-) rename murano/{openstack => }/common/xmlutils.py (100%) create mode 100644 murano/openstack/common/_i18n.py delete mode 100644 murano/openstack/common/config/__init__.py delete mode 100644 murano/openstack/common/excutils.py delete mode 100644 murano/openstack/common/network_utils.py delete mode 100644 murano/openstack/common/test.py delete mode 100644 murano/openstack/common/versionutils.py diff --git a/murano/common/wsgi.py b/murano/common/wsgi.py index 1973f1fb3..2b4f7436c 100644 --- a/murano/common/wsgi.py +++ b/murano/common/wsgi.py @@ -36,13 +36,13 @@ from xml.dom import minidom from xml.parsers import expat from murano.api.v1 import schemas +from murano.common import xmlutils from murano.openstack.common import exception from murano.openstack.common.gettextutils import _ from murano.openstack.common import jsonutils from murano.openstack.common import log as logging from murano.openstack.common import service from murano.openstack.common import sslutils -from murano.openstack.common import xmlutils socket_opts = [ cfg.IntOpt('backlog', diff --git a/murano/openstack/common/xmlutils.py b/murano/common/xmlutils.py similarity index 100% rename from murano/openstack/common/xmlutils.py rename to murano/common/xmlutils.py diff --git a/murano/openstack/common/__init__.py b/murano/openstack/common/__init__.py index d1223eaf7..e69de29bb 100644 --- a/murano/openstack/common/__init__.py +++ b/murano/openstack/common/__init__.py @@ -1,17 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - - -six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox')) diff --git a/murano/openstack/common/_i18n.py b/murano/openstack/common/_i18n.py new file mode 100644 index 000000000..ad8f739d1 --- /dev/null +++ b/murano/openstack/common/_i18n.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""oslo.i18n integration module. + +See http://docs.openstack.org/developer/oslo.i18n/usage.html + +""" + +try: + import oslo.i18n + + # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the + # application name when this module is synced into the separate + # repository. It is OK to have more than one translation function + # using the same domain, since there will still only be one message + # catalog. + _translators = oslo.i18n.TranslatorFactory(domain='murano') + + # The primary translation function using the well-known name "_" + _ = _translators.primary + + # Translators for log levels. + # + # The abbreviated names are meant to reflect the usual use of a short + # name like '_'. The "L" is for "log" and the other letter comes from + # the level. + _LI = _translators.log_info + _LW = _translators.log_warning + _LE = _translators.log_error + _LC = _translators.log_critical +except ImportError: + # NOTE(dims): Support for cases where a project wants to use + # code from murano-incubator, but is not ready to be internationalized + # (like tempest) + _ = _LI = _LW = _LE = _LC = lambda x: x diff --git a/murano/openstack/common/config/__init__.py b/murano/openstack/common/config/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/murano/openstack/common/context.py b/murano/openstack/common/context.py index 3eeb445e4..168989004 100644 --- a/murano/openstack/common/context.py +++ b/murano/openstack/common/context.py @@ -77,6 +77,21 @@ class RequestContext(object): 'instance_uuid': self.instance_uuid, 'user_identity': user_idt} + @classmethod + def from_dict(cls, ctx): + return cls( + auth_token=ctx.get("auth_token"), + user=ctx.get("user"), + tenant=ctx.get("tenant"), + domain=ctx.get("domain"), + user_domain=ctx.get("user_domain"), + project_domain=ctx.get("project_domain"), + is_admin=ctx.get("is_admin", False), + read_only=ctx.get("read_only", False), + show_deleted=ctx.get("show_deleted", False), + request_id=ctx.get("request_id"), + instance_uuid=ctx.get("instance_uuid")) + def get_admin_context(show_deleted=False): context = RequestContext(None, @@ -102,10 +117,6 @@ def get_context_from_function_and_args(function, args, kwargs): def is_user_context(context): """Indicates if the request context is a normal user.""" - if not context: + if not context or context.is_admin: return False - if context.is_admin: - return False - if not context.user_id or not context.project_id: - return False - return True + return context.user_id and context.project_id diff --git a/murano/openstack/common/eventlet_backdoor.py b/murano/openstack/common/eventlet_backdoor.py index 842f9d44c..593565ef2 100644 --- a/murano/openstack/common/eventlet_backdoor.py +++ b/murano/openstack/common/eventlet_backdoor.py @@ -16,6 +16,7 @@ from __future__ import print_function +import copy import errno import gc import os @@ -29,7 +30,7 @@ import eventlet.backdoor import greenlet from oslo.config import cfg -from murano.openstack.common.gettextutils import _LI +from murano.openstack.common._i18n import _LI from murano.openstack.common import log as logging help_for_backdoor_port = ( @@ -49,6 +50,12 @@ CONF.register_opts(eventlet_backdoor_opts) LOG = logging.getLogger(__name__) +def list_opts(): + """Entry point for oslo.config-generator. + """ + return [(None, copy.deepcopy(eventlet_backdoor_opts))] + + class EventletBackdoorConfigValueError(Exception): def __init__(self, port_range, help_msg, ex): msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' diff --git a/murano/openstack/common/excutils.py b/murano/openstack/common/excutils.py deleted file mode 100644 index 7d996e07b..000000000 --- a/murano/openstack/common/excutils.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# Copyright 2012, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Exception related utilities. -""" - -import logging -import sys -import time -import traceback - -import six - -from murano.openstack.common.gettextutils import _LE - - -class save_and_reraise_exception(object): - """Save current exception, run some code and then re-raise. - - In some cases the exception context can be cleared, resulting in None - being attempted to be re-raised after an exception handler is run. This - can happen when eventlet switches greenthreads or when running an - exception handler, code raises and catches an exception. In both - cases the exception context will be cleared. - - To work around this, we save the exception state, run handler code, and - then re-raise the original exception. If another exception occurs, the - saved exception is logged and the new exception is re-raised. - - In some cases the caller may not want to re-raise the exception, and - for those circumstances this context provides a reraise flag that - can be used to suppress the exception. For example:: - - except Exception: - with save_and_reraise_exception() as ctxt: - decide_if_need_reraise() - if not should_be_reraised: - ctxt.reraise = False - - If another exception occurs and reraise flag is False, - the saved exception will not be logged. - - If the caller wants to raise new exception during exception handling - he/she sets reraise to False initially with an ability to set it back to - True if needed:: - - except Exception: - with save_and_reraise_exception(reraise=False) as ctxt: - [if statements to determine whether to raise a new exception] - # Not raising a new exception, so reraise - ctxt.reraise = True - """ - def __init__(self, reraise=True): - self.reraise = reraise - - def __enter__(self): - self.type_, self.value, self.tb, = sys.exc_info() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is not None: - if self.reraise: - logging.error(_LE('Original exception being dropped: %s'), - traceback.format_exception(self.type_, - self.value, - self.tb)) - return False - if self.reraise: - six.reraise(self.type_, self.value, self.tb) - - -def forever_retry_uncaught_exceptions(infunc): - def inner_func(*args, **kwargs): - last_log_time = 0 - last_exc_message = None - exc_count = 0 - while True: - try: - return infunc(*args, **kwargs) - except Exception as exc: - this_exc_message = six.u(str(exc)) - if this_exc_message == last_exc_message: - exc_count += 1 - else: - exc_count = 1 - # Do not log any more frequently than once a minute unless - # the exception message changes - cur_time = int(time.time()) - if (cur_time - last_log_time > 60 or - this_exc_message != last_exc_message): - logging.exception( - _LE('Unexpected exception occurred %d time(s)... ' - 'retrying.') % exc_count) - last_log_time = cur_time - last_exc_message = this_exc_message - exc_count = 0 - # This should be a very rare event. In case it isn't, do - # a sleep. - time.sleep(1) - return inner_func diff --git a/murano/openstack/common/fileutils.py b/murano/openstack/common/fileutils.py index c128e1b5e..ec26eaf9c 100644 --- a/murano/openstack/common/fileutils.py +++ b/murano/openstack/common/fileutils.py @@ -15,11 +15,11 @@ import contextlib import errno +import logging import os import tempfile -from murano.openstack.common import excutils -from murano.openstack.common import log as logging +from oslo.utils import excutils LOG = logging.getLogger(__name__) @@ -50,8 +50,8 @@ def read_cached_file(filename, force_reload=False): """ global _FILE_CACHE - if force_reload and filename in _FILE_CACHE: - del _FILE_CACHE[filename] + if force_reload: + delete_cached_file(filename) reloaded = False mtime = os.path.getmtime(filename) @@ -66,6 +66,17 @@ def read_cached_file(filename, force_reload=False): return (reloaded, cache_info['data']) +def delete_cached_file(filename): + """Delete cached file if present. + + :param filename: filename to delete + """ + global _FILE_CACHE + + if filename in _FILE_CACHE: + del _FILE_CACHE[filename] + + def delete_if_exists(path, remove=os.unlink): """Delete a file, but ignore file not found error. @@ -99,7 +110,7 @@ def remove_path_on_error(path, remove=delete_if_exists): def file_open(*args, **kwargs): """Open file - see built-in file() documentation for more details + see built-in open() documentation for more details Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system diff --git a/murano/openstack/common/lockutils.py b/murano/openstack/common/lockutils.py index 4b4b78bd8..77050b743 100644 --- a/murano/openstack/common/lockutils.py +++ b/murano/openstack/common/lockutils.py @@ -15,8 +15,8 @@ import contextlib import errno -import fcntl import functools +import logging import os import shutil import subprocess @@ -29,8 +29,7 @@ import weakref from oslo.config import cfg from murano.openstack.common import fileutils -from murano.openstack.common.gettextutils import _, _LE, _LI -from murano.openstack.common import log as logging +from murano.openstack.common._i18n import _, _LE, _LI LOG = logging.getLogger(__name__) @@ -102,10 +101,8 @@ class _FileLock(object): raise threading.ThreadError(_("Unable to acquire lock on" " `%(filename)s` due to" " %(exception)s") % - { - 'filename': self.fname, - 'exception': e, - }) + {'filename': self.fname, + 'exception': e}) def __enter__(self): self.acquire() @@ -149,56 +146,12 @@ class _FcntlLock(_FileLock): fcntl.lockf(self.lockfile, fcntl.LOCK_UN) -class _PosixLock(object): - def __init__(self, name): - # Hash the name because it's not valid to have POSIX semaphore - # names with things like / in them. Then use base64 to encode - # the digest() instead taking the hexdigest() because the - # result is shorter and most systems can't have shm sempahore - # names longer than 31 characters. - h = hashlib.sha1() - h.update(name.encode('ascii')) - self.name = str((b'/' + base64.urlsafe_b64encode( - h.digest())).decode('ascii')) - - def acquire(self, timeout=None): - self.semaphore = posix_ipc.Semaphore(self.name, - flags=posix_ipc.O_CREAT, - initial_value=1) - self.semaphore.acquire(timeout) - return self - - def __enter__(self): - self.acquire() - return self - - def release(self): - self.semaphore.release() - self.semaphore.close() - - def __exit__(self, exc_type, exc_val, exc_tb): - self.release() - - def exists(self): - try: - semaphore = posix_ipc.Semaphore(self.name) - except posix_ipc.ExistentialError: - return False - else: - semaphore.close() - return True - - if os.name == 'nt': import msvcrt InterProcessLock = _WindowsLock - FileLock = _WindowsLock else: - import base64 - import hashlib - import posix_ipc - InterProcessLock = _PosixLock - FileLock = _FcntlLock + import fcntl + InterProcessLock = _FcntlLock _semaphores = weakref.WeakValueDictionary() _semaphores_lock = threading.Lock() @@ -215,11 +168,7 @@ def _get_lock_path(name, lock_file_prefix, lock_path=None): local_lock_path = lock_path or CONF.lock_path if not local_lock_path: - # NOTE(bnemec): Create a fake lock path for posix locks so we don't - # unnecessarily raise the RequiredOptError below. - if InterProcessLock is not _PosixLock: - raise cfg.RequiredOptError('lock_path') - local_lock_path = 'posixlock:/' + raise cfg.RequiredOptError('lock_path') return os.path.join(local_lock_path, name) @@ -230,16 +179,11 @@ def external_lock(name, lock_file_prefix=None, lock_path=None): lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path) - # NOTE(bnemec): If an explicit lock_path was passed to us then it - # means the caller is relying on file-based locking behavior, so - # we can't use posix locks for those calls. - if lock_path: - return FileLock(lock_file_path) return InterProcessLock(lock_file_path) def remove_external_lock_file(name, lock_file_prefix=None): - """Remove a external lock file when it's not used anymore + """Remove an external lock file when it's not used anymore This will be helpful when we have a lot of lock files """ with internal_lock(name): @@ -255,11 +199,12 @@ def internal_lock(name): with _semaphores_lock: try: sem = _semaphores[name] + LOG.debug('Using existing semaphore "%s"', name) except KeyError: sem = threading.Semaphore() _semaphores[name] = sem + LOG.debug('Created new semaphore "%s"', name) - LOG.debug('Got semaphore "%(lock)s"', {'lock': name}) return sem @@ -281,13 +226,16 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None): """ int_lock = internal_lock(name) with int_lock: - if external and not CONF.disable_process_locking: - ext_lock = external_lock(name, lock_file_prefix, lock_path) - with ext_lock: - yield ext_lock - else: - yield int_lock - LOG.debug('Released semaphore "%(lock)s"', {'lock': name}) + LOG.debug('Acquired semaphore "%(lock)s"', {'lock': name}) + try: + if external and not CONF.disable_process_locking: + ext_lock = external_lock(name, lock_file_prefix, lock_path) + with ext_lock: + yield ext_lock + else: + yield int_lock + finally: + LOG.debug('Releasing semaphore "%(lock)s"', {'lock': name}) def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): diff --git a/murano/openstack/common/log.py b/murano/openstack/common/log.py index f2a39a3a8..95693906b 100644 --- a/murano/openstack/common/log.py +++ b/murano/openstack/common/log.py @@ -27,48 +27,31 @@ It also allows setting of formatting information through conf. """ +import copy import inspect import itertools import logging import logging.config import logging.handlers import os -import re +import socket import sys import traceback from oslo.config import cfg +from oslo.serialization import jsonutils +from oslo.utils import importutils import six from six import moves -from murano.openstack.common.gettextutils import _ -from murano.openstack.common import importutils -from murano.openstack.common import jsonutils +_PY26 = sys.version_info[0:2] == (2, 6) + +from murano.openstack.common._i18n import _ from murano.openstack.common import local _DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" -_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] - -# NOTE(ldbragst): Let's build a list of regex objects using the list of -# _SANITIZE_KEYS we already have. This way, we only have to add the new key -# to the list of _SANITIZE_KEYS and we can generate regular expressions -# for XML and JSON automatically. -_SANITIZE_PATTERNS = [] -_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', - r'(<%(key)s>).*?()', - r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', - r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', - r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])' - '.*?([\'"])', - r'(%(key)s\s*--?[A-z]+\s*).*?([\s])'] - -for key in _SANITIZE_KEYS: - for pattern in _FORMAT_PATTERNS: - reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) - _SANITIZE_PATTERNS.append(reg_ex) - common_cli_opts = [ cfg.BoolOpt('debug', @@ -117,7 +100,7 @@ logging_cli_opts = [ default=False, help='Use syslog for logging. ' 'Existing syslog format is DEPRECATED during I, ' - 'and will chang in J to honor RFC5424.'), + 'and will change in J to honor RFC5424.'), cfg.BoolOpt('use-syslog-rfc-format', # TODO(bogdando) remove or use True after existing # syslog format deprecation in J @@ -138,6 +121,14 @@ generic_log_opts = [ help='Log output to standard error.') ] +DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', + 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', + 'oslo.messaging=INFO', 'iso8601=WARN', + 'requests.packages.urllib3.connectionpool=WARN', + 'urllib3.connectionpool=WARN', 'websocket=WARN', + "keystonemiddleware=WARN", "routes.middleware=WARN", + "stevedore=WARN"] + log_opts = [ cfg.StrOpt('logging_context_format_string', default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' @@ -156,17 +147,7 @@ log_opts = [ '%(instance)s', help='Prefix each line of exception output with this format.'), cfg.ListOpt('default_log_levels', - default=[ - 'amqp=WARN', - 'amqplib=WARN', - 'boto=WARN', - 'qpid=WARN', - 'sqlalchemy=WARN', - 'suds=INFO', - 'oslo.messaging=INFO', - 'iso8601=WARN', - 'requests.packages.urllib3.connectionpool=WARN' - ], + default=DEFAULT_LOG_LEVELS, help='List of logger=LEVEL pairs.'), cfg.BoolOpt('publish_errors', default=False, @@ -181,11 +162,11 @@ log_opts = [ cfg.StrOpt('instance_format', default='[instance: %(uuid)s] ', help='The format for an instance that is passed with the log ' - 'message. '), + 'message.'), cfg.StrOpt('instance_uuid_format', default='[instance: %(uuid)s] ', help='The format for an instance UUID that is passed with the ' - 'log message. '), + 'log message.'), ] CONF = cfg.CONF @@ -194,6 +175,16 @@ CONF.register_cli_opts(logging_cli_opts) CONF.register_opts(generic_log_opts) CONF.register_opts(log_opts) + +def list_opts(): + """Entry point for oslo.config-generator.""" + return [(None, copy.deepcopy(common_cli_opts)), + (None, copy.deepcopy(logging_cli_opts)), + (None, copy.deepcopy(generic_log_opts)), + (None, copy.deepcopy(log_opts)), + ] + + # our new audit level # NOTE(jkoelker) Since we synthesized an audit level, make the logging # module aware of it so it acts like other levels. @@ -244,45 +235,20 @@ def _get_log_file_path(binary=None): return None -def mask_password(message, secret="***"): - """Replace password with 'secret' in message. - - :param message: The string which includes security information. - :param secret: value with which to replace passwords. - :returns: The unicode value of message with the password fields masked. - - For example: - - >>> mask_password("'adminPass' : 'aaaaa'") - "'adminPass' : '***'" - >>> mask_password("'admin_pass' : 'aaaaa'") - "'admin_pass' : '***'" - >>> mask_password('"password" : "aaaaa"') - '"password" : "***"' - >>> mask_password("'original_password' : 'aaaaa'") - "'original_password' : '***'" - >>> mask_password("u'original_password' : u'aaaaa'") - "u'original_password' : u'***'" - """ - message = six.text_type(message) - - # NOTE(ldbragst): Check to see if anything in message contains any key - # specified in _SANITIZE_KEYS, if not then just return the message since - # we don't have to mask any passwords. - if not any(key in message for key in _SANITIZE_KEYS): - return message - - secret = r'\g<1>' + secret + r'\g<2>' - for pattern in _SANITIZE_PATTERNS: - message = re.sub(pattern, secret, message) - return message - - class BaseLoggerAdapter(logging.LoggerAdapter): def audit(self, msg, *args, **kwargs): self.log(logging.AUDIT, msg, *args, **kwargs) + def isEnabledFor(self, level): + if _PY26: + # This method was added in python 2.7 (and it does the exact + # same logic, so we need to do the exact same logic so that + # python 2.6 has this capability as well). + return self.logger.isEnabledFor(level) + else: + return super(BaseLoggerAdapter, self).isEnabledFor(level) + class LazyAdapter(BaseLoggerAdapter): def __init__(self, name='unknown', version='unknown'): @@ -295,6 +261,11 @@ class LazyAdapter(BaseLoggerAdapter): def logger(self): if not self._logger: self._logger = getLogger(self.name, self.version) + if six.PY3: + # In Python 3, the code fails because the 'manager' attribute + # cannot be found when using a LoggerAdapter as the + # underlying logger. Work around this issue. + self._logger.manager = self._logger.logger.manager return self._logger @@ -340,11 +311,10 @@ class ContextAdapter(BaseLoggerAdapter): self.warn(stdmsg, *args, **kwargs) def process(self, msg, kwargs): - # NOTE(mrodden): catch any Message/other object and - # coerce to unicode before they can get - # to the python logging and possibly - # cause string encoding trouble - if not isinstance(msg, six.string_types): + # NOTE(jecarey): If msg is not unicode, coerce it into unicode + # before it can get to the python logging and + # possibly cause string encoding trouble + if not isinstance(msg, six.text_type): msg = six.text_type(msg) if 'extra' not in kwargs: @@ -424,9 +394,7 @@ class JSONFormatter(logging.Formatter): def _create_logging_excepthook(product_name): def logging_excepthook(exc_type, value, tb): - extra = {} - if CONF.verbose or CONF.debug: - extra['exc_info'] = (exc_type, value, tb) + extra = {'exc_info': (exc_type, value, tb)} getLogger(product_name).critical( "".join(traceback.format_exception_only(exc_type, value)), **extra) @@ -450,7 +418,7 @@ def _load_log_config(log_config_append): try: logging.config.fileConfig(log_config_append, disable_existing_loggers=False) - except moves.configparser.Error as exc: + except (moves.configparser.Error, KeyError) as exc: raise LogConfigError(log_config_append, six.text_type(exc)) @@ -463,10 +431,20 @@ def setup(product_name, version='unknown'): sys.excepthook = _create_logging_excepthook(product_name) -def set_defaults(logging_context_format_string): - cfg.set_defaults(log_opts, - logging_context_format_string= - logging_context_format_string) +def set_defaults(logging_context_format_string=None, + default_log_levels=None): + # Just in case the caller is not setting the + # default_log_level. This is insurance because + # we introduced the default_log_level parameter + # later in a backwards in-compatible change + if default_log_levels is not None: + cfg.set_defaults( + log_opts, + default_log_levels=default_log_levels) + if logging_context_format_string is not None: + cfg.set_defaults( + log_opts, + logging_context_format_string=logging_context_format_string) def _find_facility_from_conf(): @@ -515,18 +493,6 @@ def _setup_logging_from_conf(project, version): for handler in log_root.handlers: log_root.removeHandler(handler) - if CONF.use_syslog: - facility = _find_facility_from_conf() - # TODO(bogdando) use the format provided by RFCSysLogHandler - # after existing syslog format deprecation in J - if CONF.use_syslog_rfc_format: - syslog = RFCSysLogHandler(address='/dev/log', - facility=facility) - else: - syslog = logging.handlers.SysLogHandler(address='/dev/log', - facility=facility) - log_root.addHandler(syslog) - logpath = _get_log_file_path() if logpath: filelog = logging.handlers.WatchedFileHandler(logpath) @@ -544,7 +510,7 @@ def _setup_logging_from_conf(project, version): if CONF.publish_errors: handler = importutils.import_object( - "murano.openstack.common.log_handler.PublishErrorsHandler", + "oslo.messaging.notify.log_handler.PublishErrorsHandler", logging.ERROR) log_root.addHandler(handler) @@ -580,6 +546,22 @@ def _setup_logging_from_conf(project, version): else: logger.setLevel(level_name) + if CONF.use_syslog: + try: + facility = _find_facility_from_conf() + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(address='/dev/log', + facility=facility) + else: + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + except socket.error: + log_root.error('Unable to add syslog handler. Verify that syslog ' + 'is running.') + _loggers = {} @@ -649,6 +631,12 @@ class ContextFormatter(logging.Formatter): def format(self, record): """Uses contextstring if request_id is set, otherwise default.""" + # NOTE(jecarey): If msg is not unicode, coerce it into unicode + # before it can get to the python logging and + # possibly cause string encoding trouble + if not isinstance(record.msg, six.text_type): + record.msg = six.text_type(record.msg) + # store project info record.project = self.project record.version = self.version @@ -668,14 +656,19 @@ class ContextFormatter(logging.Formatter): record.__dict__[key] = '' if record.__dict__.get('request_id'): - self._fmt = CONF.logging_context_format_string + fmt = CONF.logging_context_format_string else: - self._fmt = CONF.logging_default_format_string + fmt = CONF.logging_default_format_string if (record.levelno == logging.DEBUG and CONF.logging_debug_format_suffix): - self._fmt += " " + CONF.logging_debug_format_suffix + fmt += " " + CONF.logging_debug_format_suffix + if sys.version_info < (3, 2): + self._fmt = fmt + else: + self._style = logging.PercentStyle(fmt) + self._fmt = self._style._fmt # Cache this on the record, Logger will respect our formatted copy if record.exc_info: record.exc_text = self.formatException(record.exc_info, record) diff --git a/murano/openstack/common/loopingcall.py b/murano/openstack/common/loopingcall.py index c462504c4..e0754d78f 100644 --- a/murano/openstack/common/loopingcall.py +++ b/murano/openstack/common/loopingcall.py @@ -16,16 +16,21 @@ # under the License. import sys +import time from eventlet import event from eventlet import greenthread -from murano.openstack.common.gettextutils import _LE, _LW +from murano.openstack.common._i18n import _LE, _LW from murano.openstack.common import log as logging -from murano.openstack.common import timeutils LOG = logging.getLogger(__name__) +# NOTE(zyluo): This lambda function was declared to avoid mocking collisions +# with time.time() called in the standard logging module +# during unittests. +_ts = lambda: time.time() + class LoopingCallDone(Exception): """Exception to break out and stop a LoopingCallBase. @@ -72,16 +77,17 @@ class FixedIntervalLoopingCall(LoopingCallBase): try: while self._running: - start = timeutils.utcnow() + start = _ts() self.f(*self.args, **self.kw) - end = timeutils.utcnow() + end = _ts() if not self._running: break - delay = interval - timeutils.delta_seconds(start, end) - if delay <= 0: - LOG.warn(_LW('task run outlasted interval by %s sec') % - -delay) - greenthread.sleep(delay if delay > 0 else 0) + delay = end - start - interval + if delay > 0: + LOG.warn(_LW('task %(func_name)s run outlasted ' + 'interval by %(delay).2f sec'), + {'func_name': repr(self.f), 'delay': delay}) + greenthread.sleep(-delay if delay < 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) @@ -121,8 +127,9 @@ class DynamicLoopingCall(LoopingCallBase): if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) - LOG.debug('Dynamic looping call sleeping for %.02f ' - 'seconds', idle) + LOG.debug('Dynamic looping call %(func_name)s sleeping ' + 'for %(idle).02f seconds', + {'func_name': repr(self.f), 'idle': idle}) greenthread.sleep(idle) except LoopingCallDone as e: self.stop() diff --git a/murano/openstack/common/network_utils.py b/murano/openstack/common/network_utils.py deleted file mode 100644 index d9640d15a..000000000 --- a/murano/openstack/common/network_utils.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Network-related utilities and helper functions. -""" - -# TODO(jd) Use six.moves once -# https://bitbucket.org/gutworth/six/pull-request/28 -# is merged -try: - import urllib.parse - SplitResult = urllib.parse.SplitResult -except ImportError: - import urlparse - SplitResult = urlparse.SplitResult - -from six.moves.urllib import parse - - -def parse_host_port(address, default_port=None): - """Interpret a string as a host:port pair. - - An IPv6 address MUST be escaped if accompanied by a port, - because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 - means both [2001:db8:85a3::8a2e:370:7334] and - [2001:db8:85a3::8a2e:370]:7334. - - >>> parse_host_port('server01:80') - ('server01', 80) - >>> parse_host_port('server01') - ('server01', None) - >>> parse_host_port('server01', default_port=1234) - ('server01', 1234) - >>> parse_host_port('[::1]:80') - ('::1', 80) - >>> parse_host_port('[::1]') - ('::1', None) - >>> parse_host_port('[::1]', default_port=1234) - ('::1', 1234) - >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) - ('2001:db8:85a3::8a2e:370:7334', 1234) - - """ - if address[0] == '[': - # Escaped ipv6 - _host, _port = address[1:].split(']') - host = _host - if ':' in _port: - port = _port.split(':')[1] - else: - port = default_port - else: - if address.count(':') == 1: - host, port = address.split(':') - else: - # 0 means ipv4, >1 means ipv6. - # We prohibit unescaped ipv6 addresses with port. - host = address - port = default_port - - return (host, None if port is None else int(port)) - - -def urlsplit(url, scheme='', allow_fragments=True): - """Parse a URL using urlparse.urlsplit(), splitting query and fragments. - This function papers over Python issue9374 when needed. - - The parameters are the same as urlparse.urlsplit. - """ - scheme, netloc, path, query, fragment = parse.urlsplit( - url, scheme, allow_fragments) - if allow_fragments and '#' in path: - path, fragment = path.split('#', 1) - if '?' in path: - path, query = path.split('?', 1) - return SplitResult(scheme, netloc, path, query, fragment) diff --git a/murano/openstack/common/policy.py b/murano/openstack/common/policy.py index fe34c470b..cd954d57e 100644 --- a/murano/openstack/common/policy.py +++ b/murano/openstack/common/policy.py @@ -77,16 +77,18 @@ as it allows particular rules to be explicitly disabled. import abc import ast +import copy +import os import re from oslo.config import cfg +from oslo.serialization import jsonutils import six import six.moves.urllib.parse as urlparse import six.moves.urllib.request as urlrequest from murano.openstack.common import fileutils -from murano.openstack.common.gettextutils import _, _LE -from murano.openstack.common import jsonutils +from murano.openstack.common._i18n import _, _LE, _LI from murano.openstack.common import log as logging @@ -98,6 +100,14 @@ policy_opts = [ default='default', help=_('Default rule. Enforced when a requested rule is not ' 'found.')), + cfg.MultiStrOpt('policy_dirs', + default=['policy.d'], + help=_('Directories where policy configuration files are ' + 'stored. They can be relative to any directory ' + 'in the search path defined by the config_dir ' + 'option, or absolute paths. The file defined by ' + 'policy_file must exist for these directories to ' + 'be searched.')), ] CONF = cfg.CONF @@ -108,6 +118,11 @@ LOG = logging.getLogger(__name__) _checks = {} +def list_opts(): + """Entry point for oslo.config-generator.""" + return [(None, copy.deepcopy(policy_opts))] + + class PolicyNotAuthorized(Exception): def __init__(self, rule): @@ -184,16 +199,19 @@ class Enforcer(object): :param default_rule: Default rule to use, CONF.default_rule will be used if none is specified. :param use_conf: Whether to load rules from cache or config file. + :param overwrite: Whether to overwrite existing rules when reload rules + from config file. """ def __init__(self, policy_file=None, rules=None, - default_rule=None, use_conf=True): - self.rules = Rules(rules, default_rule) + default_rule=None, use_conf=True, overwrite=True): self.default_rule = default_rule or CONF.policy_default_rule + self.rules = Rules(rules, self.default_rule) self.policy_path = None self.policy_file = policy_file or CONF.policy_file self.use_conf = use_conf + self.overwrite = overwrite def set_rules(self, rules, overwrite=True, use_conf=False): """Create a new Rules object based on the provided dict of rules. @@ -216,6 +234,7 @@ class Enforcer(object): def clear(self): """Clears Enforcer rules, policy's cache and policy's path.""" self.set_rules({}) + fileutils.delete_cached_file(self.policy_path) self.default_rule = None self.policy_path = None @@ -224,7 +243,7 @@ class Enforcer(object): Policy file is cached and will be reloaded if modified. - :param force_reload: Whether to overwrite current rules. + :param force_reload: Whether to reload rules from config file. """ if force_reload: @@ -232,31 +251,55 @@ class Enforcer(object): if self.use_conf: if not self.policy_path: - self.policy_path = self._get_policy_path() + self.policy_path = self._get_policy_path(self.policy_file) + self._load_policy_file(self.policy_path, force_reload, + overwrite=self.overwrite) + for path in CONF.policy_dirs: + try: + path = self._get_policy_path(path) + except cfg.ConfigFilesNotFoundError: + LOG.info(_LI("Can not find policy directory: %s"), path) + continue + self._walk_through_policy_directory(path, + self._load_policy_file, + force_reload, False) + + @staticmethod + def _walk_through_policy_directory(path, func, *args): + # We do not iterate over sub-directories. + policy_files = next(os.walk(path))[2] + policy_files.sort() + for policy_file in [p for p in policy_files if not p.startswith('.')]: + func(os.path.join(path, policy_file), *args) + + def _load_policy_file(self, path, force_reload, overwrite=True): reloaded, data = fileutils.read_cached_file( - self.policy_path, force_reload=force_reload) - if reloaded or not self.rules: + path, force_reload=force_reload) + if reloaded or not self.rules or not overwrite: rules = Rules.load_json(data, self.default_rule) - self.set_rules(rules) + self.set_rules(rules, overwrite=overwrite, use_conf=True) LOG.debug("Rules successfully reloaded") - def _get_policy_path(self): - """Locate the policy json data file. + def _get_policy_path(self, path): + """Locate the policy json data file/path. - :param policy_file: Custom policy file to locate. + :param path: It's value can be a full path or related path. When + full path specified, this function just returns the full + path. When related path specified, this function will + search configuration directories to find one that exists. :returns: The policy path - :raises: ConfigFilesNotFoundError if the file couldn't + :raises: ConfigFilesNotFoundError if the file/path couldn't be located. """ - policy_file = CONF.find_file(self.policy_file) + policy_path = CONF.find_file(path) - if policy_file: - return policy_file + if policy_path: + return policy_path - raise cfg.ConfigFilesNotFoundError((self.policy_file,)) + raise cfg.ConfigFilesNotFoundError((path,)) def enforce(self, rule, target, creds, do_raise=False, exc=None, *args, **kwargs): @@ -271,7 +314,7 @@ class Enforcer(object): :param do_raise: Whether to raise an exception or not if check fails. :param exc: Class of the exception to raise if the check fails. - Any remaining arguments passed to check() (both + Any remaining arguments passed to enforce() (both positional and keyword arguments) will be passed to the exception class. If not specified, PolicyNotAuthorized will be used. @@ -283,10 +326,6 @@ class Enforcer(object): from the expression. """ - # NOTE(flaper87): Not logging target or creds to avoid - # potential security issues. - LOG.debug("Rule %s will be now enforced" % rule) - self.load_rules() # Allow the rule to be a Check tree @@ -788,7 +827,7 @@ def _parse_text_rule(rule): return state.result except ValueError: # Couldn't parse the rule - LOG.exception(_LE("Failed to understand rule %r") % rule) + LOG.exception(_LE("Failed to understand rule %s") % rule) # Fail closed return FalseCheck() @@ -859,7 +898,17 @@ class HttpCheck(Check): """ url = ('http:' + self.match) % target - data = {'target': jsonutils.dumps(target), + + # Convert instances of object() in target temporarily to + # empty dict to avoid circular reference detection + # errors in jsonutils.dumps(). + temp_target = copy.deepcopy(target) + for key in target.keys(): + element = target.get(key) + if type(element) is object: + temp_target[key] = {} + + data = {'target': jsonutils.dumps(temp_target), 'credentials': jsonutils.dumps(creds)} post_data = urlparse.urlencode(data) f = urlrequest.urlopen(url, post_data) @@ -879,7 +928,6 @@ class GenericCheck(Check): 'Member':%(role.name)s """ - # TODO(termie): do dict inspection via dot syntax try: match = self.match % target except KeyError: @@ -892,7 +940,10 @@ class GenericCheck(Check): leftval = ast.literal_eval(self.kind) except ValueError: try: - leftval = creds[self.kind] + kind_parts = self.kind.split('.') + leftval = creds + for kind_part in kind_parts: + leftval = leftval[kind_part] except KeyError: return False return match == six.text_type(leftval) diff --git a/murano/openstack/common/processutils.py b/murano/openstack/common/processutils.py index 9da806c4b..2294f34df 100644 --- a/murano/openstack/common/processutils.py +++ b/murano/openstack/common/processutils.py @@ -18,7 +18,8 @@ System-level utilities and helper functions. """ import errno -import logging as stdlib_logging +import logging +import multiprocessing import os import random import shlex @@ -26,10 +27,10 @@ import signal from eventlet.green import subprocess from eventlet import greenthread +from oslo.utils import strutils import six -from murano.openstack.common.gettextutils import _ -from murano.openstack.common import log as logging +from murano.openstack.common._i18n import _ LOG = logging.getLogger(__name__) @@ -90,6 +91,9 @@ def execute(*cmd, **kwargs): :type cmd: string :param process_input: Send to opened process. :type process_input: string + :param env_variables: Environment variables and their values that + will be set for the process. + :type env_variables: dict :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise :class:`ProcessExecutionError` unless @@ -111,8 +115,7 @@ def execute(*cmd, **kwargs): execute this command. Defaults to false. :type shell: boolean :param loglevel: log level for execute commands. - :type loglevel: int. (Should be stdlib_logging.DEBUG or - stdlib_logging.INFO) + :type loglevel: int. (Should be logging.DEBUG or logging.INFO) :returns: (stdout, stderr) from process execution :raises: :class:`UnknownArgumentError` on receiving unknown arguments @@ -120,6 +123,7 @@ def execute(*cmd, **kwargs): """ process_input = kwargs.pop('process_input', None) + env_variables = kwargs.pop('env_variables', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False delay_on_retry = kwargs.pop('delay_on_retry', True) @@ -127,7 +131,7 @@ def execute(*cmd, **kwargs): run_as_root = kwargs.pop('run_as_root', False) root_helper = kwargs.pop('root_helper', '') shell = kwargs.pop('shell', False) - loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG) + loglevel = kwargs.pop('loglevel', logging.DEBUG) if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code @@ -136,8 +140,7 @@ def execute(*cmd, **kwargs): check_exit_code = [check_exit_code] if kwargs: - raise UnknownArgumentError(_('Got unknown keyword args ' - 'to utils.execute: %r') % kwargs) + raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs) if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: if not root_helper: @@ -147,12 +150,12 @@ def execute(*cmd, **kwargs): cmd = shlex.split(root_helper) + list(cmd) cmd = map(str, cmd) + sanitized_cmd = strutils.mask_password(' '.join(cmd)) while attempts > 0: attempts -= 1 try: - LOG.log(loglevel, 'Running cmd (subprocess): %s', - ' '.join(cmd)) + LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd) _PIPE = subprocess.PIPE # pylint: disable=E1101 if os.name == 'nt': @@ -168,7 +171,8 @@ def execute(*cmd, **kwargs): stderr=_PIPE, close_fds=close_fds, preexec_fn=preexec_fn, - shell=shell) + shell=shell, + env=env_variables) result = None for _i in six.moves.range(20): # NOTE(russellb) 20 is an arbitrary number of retries to @@ -188,16 +192,18 @@ def execute(*cmd, **kwargs): LOG.log(loglevel, 'Result was %s' % _returncode) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result + sanitized_stdout = strutils.mask_password(stdout) + sanitized_stderr = strutils.mask_password(stderr) raise ProcessExecutionError(exit_code=_returncode, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) + stdout=sanitized_stdout, + stderr=sanitized_stderr, + cmd=sanitized_cmd) return result except ProcessExecutionError: if not attempts: raise else: - LOG.log(loglevel, '%r failed. Retrying.', cmd) + LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: @@ -224,7 +230,7 @@ def trycmd(*args, **kwargs): out, err = execute(*args, **kwargs) failed = False except ProcessExecutionError as exn: - out, err = '', str(exn) + out, err = '', six.text_type(exn) failed = True if not failed and discard_warnings and err: @@ -236,7 +242,8 @@ def trycmd(*args, **kwargs): def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): - LOG.debug('Running cmd (SSH): %s', cmd) + sanitized_cmd = strutils.mask_password(cmd) + LOG.debug('Running cmd (SSH): %s', sanitized_cmd) if addl_env: raise InvalidArgumentError(_('Environment not supported over SSH')) @@ -250,7 +257,10 @@ def ssh_execute(ssh, cmd, process_input=None, # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() + sanitized_stdout = strutils.mask_password(stdout) stderr = stderr_stream.read() + sanitized_stderr = strutils.mask_password(stderr) + stdin_stream.close() exit_status = channel.recv_exit_status() @@ -260,8 +270,20 @@ def ssh_execute(ssh, cmd, process_input=None, LOG.debug('Result was %s' % exit_status) if check_exit_code and exit_status != 0: raise ProcessExecutionError(exit_code=exit_status, - stdout=stdout, - stderr=stderr, - cmd=cmd) + stdout=sanitized_stdout, + stderr=sanitized_stderr, + cmd=sanitized_cmd) - return (stdout, stderr) + return (sanitized_stdout, sanitized_stderr) + + +def get_worker_count(): + """Utility to get the default worker count. + + @return: The number of CPUs if that can be determined, else a default + worker count of 1 is returned. + """ + try: + return multiprocessing.cpu_count() + except NotImplementedError: + return 1 diff --git a/murano/openstack/common/service.py b/murano/openstack/common/service.py index 36b978150..7d8e5d3fe 100644 --- a/murano/openstack/common/service.py +++ b/murano/openstack/common/service.py @@ -38,14 +38,12 @@ from eventlet import event from oslo.config import cfg from murano.openstack.common import eventlet_backdoor -from murano.openstack.common.gettextutils import _LE, _LI, _LW -from murano.openstack.common import importutils +from murano.openstack.common._i18n import _LE, _LI, _LW from murano.openstack.common import log as logging from murano.openstack.common import systemd from murano.openstack.common import threadgroup -rpc = importutils.try_import('murano.openstack.common.rpc') CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -180,12 +178,6 @@ class ServiceLauncher(Launcher): status = exc.code finally: self.stop() - if rpc: - try: - rpc.cleanup() - except Exception: - # We're shutting down, so it doesn't matter at this point. - LOG.exception(_LE('Exception during rpc cleanup.')) return status, signo @@ -391,9 +383,12 @@ class ProcessLauncher(object): while True: self.handle_signal() self._respawn_children() - if self.sigcaught: - signame = _signo_to_signame(self.sigcaught) - LOG.info(_LI('Caught %s, stopping children'), signame) + # No signal means that stop was called. Don't clean up here. + if not self.sigcaught: + return + + signame = _signo_to_signame(self.sigcaught) + LOG.info(_LI('Caught %s, stopping children'), signame) if not _is_sighup_and_daemon(self.sigcaught): break @@ -404,6 +399,11 @@ class ProcessLauncher(object): except eventlet.greenlet.GreenletExit: LOG.info(_LI("Wait called after thread killed. Cleaning up.")) + self.stop() + + def stop(self): + """Terminate child processes and wait on each.""" + self.running = False for pid in self.children: try: os.kill(pid, signal.SIGTERM) diff --git a/murano/openstack/common/sslutils.py b/murano/openstack/common/sslutils.py index 578622511..dc40b82ad 100644 --- a/murano/openstack/common/sslutils.py +++ b/murano/openstack/common/sslutils.py @@ -12,12 +12,13 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import os import ssl from oslo.config import cfg -from murano.openstack.common.gettextutils import _ +from murano.openstack.common._i18n import _ ssl_opts = [ @@ -32,9 +33,14 @@ ssl_opts = [ "the server securely."), ] - CONF = cfg.CONF -CONF.register_opts(ssl_opts, "ssl") +config_section = 'ssl' +CONF.register_opts(ssl_opts, config_section) + + +def list_opts(): + """Entry point for oslo.config-generator.""" + return [(config_section, copy.deepcopy(ssl_opts))] def is_enabled(): diff --git a/murano/openstack/common/test.py b/murano/openstack/common/test.py deleted file mode 100644 index 299b55188..000000000 --- a/murano/openstack/common/test.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -############################################################################## -############################################################################## -## -## DO NOT MODIFY THIS FILE -## -## This file is being graduated to the muranoapitest library. Please make all -## changes there, and only backport critical fixes here. - dhellmann -## -############################################################################## -############################################################################## - -"""Common utilities used in testing""" - -import logging -import os -import tempfile - -import fixtures -import testtools - -_TRUE_VALUES = ('True', 'true', '1', 'yes') -_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s" - - -class BaseTestCase(testtools.TestCase): - - def setUp(self): - super(BaseTestCase, self).setUp() - self._set_timeout() - self._fake_output() - self._fake_logs() - self.useFixture(fixtures.NestedTempfile()) - self.useFixture(fixtures.TempHomeDir()) - self.tempdirs = [] - - def _set_timeout(self): - test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) - try: - test_timeout = int(test_timeout) - except ValueError: - # If timeout value is invalid do not set a timeout. - test_timeout = 0 - if test_timeout > 0: - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - - def _fake_output(self): - if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: - stdout = self.useFixture(fixtures.StringStream('stdout')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: - stderr = self.useFixture(fixtures.StringStream('stderr')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) - - def _fake_logs(self): - if os.environ.get('OS_DEBUG') in _TRUE_VALUES: - level = logging.DEBUG - else: - level = logging.INFO - capture_logs = os.environ.get('OS_LOG_CAPTURE') in _TRUE_VALUES - if capture_logs: - self.useFixture( - fixtures.FakeLogger( - format=_LOG_FORMAT, - level=level, - nuke_handlers=capture_logs, - ) - ) - else: - logging.basicConfig(format=_LOG_FORMAT, level=level) - - def create_tempfiles(self, files, ext='.conf'): - tempfiles = [] - for (basename, contents) in files: - if not os.path.isabs(basename): - (fd, path) = tempfile.mkstemp(prefix=basename, suffix=ext) - else: - path = basename + ext - fd = os.open(path, os.O_CREAT | os.O_WRONLY) - tempfiles.append(path) - try: - os.write(fd, contents) - finally: - os.close(fd) - return tempfiles diff --git a/murano/openstack/common/threadgroup.py b/murano/openstack/common/threadgroup.py index 60d591fdb..f59653e07 100644 --- a/murano/openstack/common/threadgroup.py +++ b/murano/openstack/common/threadgroup.py @@ -96,6 +96,8 @@ class ThreadGroup(object): continue try: x.stop() + except eventlet.greenlet.GreenletExit: + pass except Exception as ex: LOG.exception(ex) diff --git a/murano/openstack/common/versionutils.py b/murano/openstack/common/versionutils.py deleted file mode 100644 index edd13e2ce..000000000 --- a/murano/openstack/common/versionutils.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helpers for comparing version strings. -""" - -import functools -import pkg_resources - -from murano.openstack.common.gettextutils import _ -from murano.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -class deprecated(object): - """A decorator to mark callables as deprecated. - - This decorator logs a deprecation message when the callable it decorates is - used. The message will include the release where the callable was - deprecated, the release where it may be removed and possibly an optional - replacement. - - Examples: - - 1. Specifying the required deprecated release - - >>> @deprecated(as_of=deprecated.ICEHOUSE) - ... def a(): pass - - 2. Specifying a replacement: - - >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()') - ... def b(): pass - - 3. Specifying the release where the functionality may be removed: - - >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1) - ... def c(): pass - - """ - - FOLSOM = 'F' - GRIZZLY = 'G' - HAVANA = 'H' - ICEHOUSE = 'I' - - _RELEASES = { - 'F': 'Folsom', - 'G': 'Grizzly', - 'H': 'Havana', - 'I': 'Icehouse', - } - - _deprecated_msg_with_alternative = _( - '%(what)s is deprecated as of %(as_of)s in favor of ' - '%(in_favor_of)s and may be removed in %(remove_in)s.') - - _deprecated_msg_no_alternative = _( - '%(what)s is deprecated as of %(as_of)s and may be ' - 'removed in %(remove_in)s. It will not be superseded.') - - def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None): - """Initialize decorator - - :param as_of: the release deprecating the callable. Constants - are define in this class for convenience. - :param in_favor_of: the replacement for the callable (optional) - :param remove_in: an integer specifying how many releases to wait - before removing (default: 2) - :param what: name of the thing being deprecated (default: the - callable's name) - - """ - self.as_of = as_of - self.in_favor_of = in_favor_of - self.remove_in = remove_in - self.what = what - - def __call__(self, func): - if not self.what: - self.what = func.__name__ + '()' - - @functools.wraps(func) - def wrapped(*args, **kwargs): - msg, details = self._build_message() - LOG.deprecated(msg, details) - return func(*args, **kwargs) - return wrapped - - def _get_safe_to_remove_release(self, release): - # TODO(dstanek): this method will have to be reimplemented once - # when we get to the X release because once we get to the Y - # release, what is Y+2? - new_release = chr(ord(release) + self.remove_in) - if new_release in self._RELEASES: - return self._RELEASES[new_release] - else: - return new_release - - def _build_message(self): - details = dict(what=self.what, - as_of=self._RELEASES[self.as_of], - remove_in=self._get_safe_to_remove_release(self.as_of)) - - if self.in_favor_of: - details['in_favor_of'] = self.in_favor_of - msg = self._deprecated_msg_with_alternative - else: - msg = self._deprecated_msg_no_alternative - return msg, details - - -def is_compatible(requested_version, current_version, same_major=True): - """Determine whether `requested_version` is satisfied by - `current_version`; in other words, `current_version` is >= - `requested_version`. - - :param requested_version: version to check for compatibility - :param current_version: version to check against - :param same_major: if True, the major version must be identical between - `requested_version` and `current_version`. This is used when a - major-version difference indicates incompatibility between the two - versions. Since this is the common-case in practice, the default is - True. - :returns: True if compatible, False if not - """ - requested_parts = pkg_resources.parse_version(requested_version) - current_parts = pkg_resources.parse_version(current_version) - - if same_major and (requested_parts[0] != current_parts[0]): - return False - - return current_parts >= requested_parts diff --git a/openstack-common.conf b/openstack-common.conf index 023417d74..e819968b5 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -1,23 +1,17 @@ [DEFAULT] # The list of modules to copy from openstack-common -module=wsgi +module=context module=exception module=gettextutils -module=importutils module=jsonutils -module=log -module=xmlutils -module=sslutils -module=service -module=local -module=install_venv_common -module=timeutils -module=eventlet_backdoor -module=threadgroup -module=loopingcall -module=fileutils module=lockutils +module=log +module=policy +module=processutils +module=service +module=sslutils +module=timeutils # The base module to hold the copy of openstack.common base=murano