diff --git a/marconi/openstack/common/cache/_backends/memory.py b/marconi/openstack/common/cache/_backends/memory.py index 1c3b0b21c..0f2d52ee2 100644 --- a/marconi/openstack/common/cache/_backends/memory.py +++ b/marconi/openstack/common/cache/_backends/memory.py @@ -147,7 +147,7 @@ class MemoryBackend(backends.BaseCache): try: # NOTE(flaper87): Keys with ttl == 0 # don't exist in the _keys_expires dict - self._keys_expires[value[0]].remove(value[1]) + self._keys_expires[value[0]].remove(key) except (KeyError, ValueError): pass diff --git a/marconi/openstack/common/cache/backends.py b/marconi/openstack/common/cache/backends.py index 2fa4aaeb2..1bea8912a 100644 --- a/marconi/openstack/common/cache/backends.py +++ b/marconi/openstack/common/cache/backends.py @@ -26,9 +26,9 @@ class BaseCache(object): :params parsed_url: Parsed url object. :params options: A dictionary with configuration parameters - for the cache. For example: - - default_ttl: An integer defining the default ttl - for keys. + for the cache. For example: + + - default_ttl: An integer defining the default ttl for keys. """ def __init__(self, parsed_url, options=None): @@ -43,20 +43,17 @@ class BaseCache(object): def set(self, key, value, ttl, not_exists=False): """Sets or updates a cache entry - NOTE: Thread-safety is required and has to be - guaranteed by the backend implementation. + .. note:: Thread-safety is required and has to be guaranteed by the + backend implementation. :params key: Item key as string. :type key: `unicode string` - :params value: Value to assign to the key. This - can be anything that is handled - by the current backend. - :params ttl: Key's timeout in seconds. 0 means - no timeout. + :params value: Value to assign to the key. This can be anything that + is handled by the current backend. + :params ttl: Key's timeout in seconds. 0 means no timeout. :type ttl: int - :params not_exists: If True, the key will be set - if it doesn't exist. Otherwise, - it'll always be set. + :params not_exists: If True, the key will be set if it doesn't exist. + Otherwise, it'll always be set. :type not_exists: bool :returns: True if the operation succeeds, False otherwise. @@ -74,9 +71,8 @@ class BaseCache(object): :params key: Item key as string. :type key: `unicode string` - :params value: Value to assign to the key. This - can be anything that is handled - by the current backend. + :params value: Value to assign to the key. This can be anything that + is handled by the current backend. """ try: return self[key] @@ -91,15 +87,14 @@ class BaseCache(object): def get(self, key, default=None): """Gets one item from the cache - NOTE: Thread-safety is required and it has to be - guaranteed by the backend implementation. + .. note:: Thread-safety is required and it has to be guaranteed + by the backend implementation. - :params key: Key for the item to retrieve - from the cache. + :params key: Key for the item to retrieve from the cache. :params default: The default value to return. - :returns: `key`'s value in the cache if it exists, - otherwise `default` should be returned. + :returns: `key`'s value in the cache if it exists, otherwise + `default` should be returned. """ return self._get(key, default) @@ -115,8 +110,8 @@ class BaseCache(object): def __delitem__(self, key): """Removes an item from cache. - NOTE: Thread-safety is required and it has to be - guaranteed by the backend implementation. + .. note:: Thread-safety is required and it has to be guaranteed by + the backend implementation. :params key: The key to remove. @@ -130,8 +125,8 @@ class BaseCache(object): def clear(self): """Removes all items from the cache. - NOTE: Thread-safety is required and it has to be - guaranteed by the backend implementation. + .. note:: Thread-safety is required and it has to be guaranteed by + the backend implementation. """ return self._clear() @@ -143,9 +138,8 @@ class BaseCache(object): """Increments the value for a key :params key: The key for the value to be incremented - :params delta: Number of units by which to increment - the value. Pass a negative number to - decrement the value. + :params delta: Number of units by which to increment the value. + Pass a negative number to decrement the value. :returns: The new value """ @@ -158,10 +152,8 @@ class BaseCache(object): def append_tail(self, key, tail): """Appends `tail` to `key`'s value. - :params key: The key of the value to which - `tail` should be appended. - :params tail: The list of values to append to the - original. + :params key: The key of the value to which `tail` should be appended. + :params tail: The list of values to append to the original. :returns: The new value """ @@ -181,10 +173,8 @@ class BaseCache(object): def append(self, key, value): """Appends `value` to `key`'s value. - :params key: The key of the value to which - `tail` should be appended. - :params value: The value to append to the - original. + :params key: The key of the value to which `tail` should be appended. + :params value: The value to append to the original. :returns: The new value """ @@ -196,8 +186,7 @@ class BaseCache(object): :params key: The key to verify. - :returns: True if the key exists, - otherwise False. + :returns: True if the key exists, otherwise False. """ @abc.abstractmethod @@ -209,9 +198,8 @@ class BaseCache(object): """Gets keys' value from cache :params keys: List of keys to retrieve. - :params default: The default value to return - for each key that is not in - the cache. + :params default: The default value to return for each key that is not + in the cache. :returns: A generator of (key, value) """ @@ -227,13 +215,12 @@ class BaseCache(object): def set_many(self, data, ttl=None): """Puts several items into the cache at once - Depending on the backend, this operation may or may - not be efficient. The default implementation calls - set for each (key, value) pair passed, other backends - support set_many operations as part of their protocols. + Depending on the backend, this operation may or may not be efficient. + The default implementation calls set for each (key, value) pair + passed, other backends support set_many operations as part of their + protocols. - :params data: A dictionary like {key: val} to store - in the cache. + :params data: A dictionary like {key: val} to store in the cache. :params ttl: Key's timeout in seconds. """ diff --git a/marconi/openstack/common/config/generator.py b/marconi/openstack/common/config/generator.py index aa532a550..f302a5726 100644 --- a/marconi/openstack/common/config/generator.py +++ b/marconi/openstack/common/config/generator.py @@ -64,6 +64,10 @@ BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), WORDWRAP_WIDTH = 60 +def raise_extension_exception(extmanager, ep, err): + raise + + def generate(argv): parser = argparse.ArgumentParser( description='generate sample configuration file', @@ -107,6 +111,7 @@ def generate(argv): 'oslo.config.opts', names=list(set(parsed_args.libraries)), invoke_on_load=False, + on_load_failure_callback=raise_extension_exception ) for ext in loader: for group, opts in ext.plugin(): @@ -145,7 +150,7 @@ def _import_module(mod_str): def _is_in_group(opt, group): - "Check if opt is in group." + """Check if opt is in group.""" for value in group._opts.values(): # NOTE(llu): Temporary workaround for bug #1262148, wait until # newly released oslo.config support '==' operator. @@ -154,7 +159,7 @@ def _is_in_group(opt, group): return False -def _guess_groups(opt, mod_obj): +def _guess_groups(opt): # is it in the DEFAULT group? if _is_in_group(opt, cfg.CONF): return 'DEFAULT' @@ -188,7 +193,7 @@ def _list_opts(obj): ret = {} for opt in opts: - ret.setdefault(_guess_groups(opt, obj), []).append(opt) + ret.setdefault(_guess_groups(opt), []).append(opt) return ret.items() @@ -218,6 +223,8 @@ def _get_my_ip(): def _sanitize_default(name, value): """Set up a reasonably sensible default for pybasedir, my_ip and host.""" + hostname = socket.gethostname() + fqdn = socket.getfqdn() if value.startswith(sys.prefix): # NOTE(jd) Don't use os.path.join, because it is likely to think the # second part is an absolute pathname and therefore drop the first @@ -229,8 +236,13 @@ def _sanitize_default(name, value): return value.replace(BASEDIR, '') elif value == _get_my_ip(): return '10.0.0.1' - elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name: - return 'marconi' + elif value in (hostname, fqdn): + if 'host' in name: + return 'marconi' + elif value.endswith(hostname): + return value.replace(hostname, 'marconi') + elif value.endswith(fqdn): + return value.replace(fqdn, 'marconi') elif value.strip() != value: return '"%s"' % value return value @@ -241,7 +253,6 @@ def _print_opt(opt): if not opt_help: sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name) opt_help = "" - opt_type = None try: opt_type = OPTION_REGEX.search(str(type(opt))).group(0) except (ValueError, AttributeError) as err: diff --git a/marconi/openstack/common/excutils.py b/marconi/openstack/common/excutils.py index c0817428d..3d7c3002f 100644 --- a/marconi/openstack/common/excutils.py +++ b/marconi/openstack/common/excutils.py @@ -49,9 +49,22 @@ class save_and_reraise_exception(object): decide_if_need_reraise() if not should_be_reraised: ctxt.reraise = False + + If another exception occurs and reraise flag is False, + the saved exception will not be logged. + + If the caller wants to raise new exception during exception handling + he/she sets reraise to False initially with an ability to set it back to + True if needed:: + + except Exception: + with save_and_reraise_exception(reraise=False) as ctxt: + [if statements to determine whether to raise a new exception] + # Not raising a new exception, so reraise + ctxt.reraise = True """ - def __init__(self): - self.reraise = True + def __init__(self, reraise=True): + self.reraise = reraise def __enter__(self): self.type_, self.value, self.tb, = sys.exc_info() @@ -59,10 +72,11 @@ class save_and_reraise_exception(object): def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: - logging.error(_LE('Original exception being dropped: %s'), - traceback.format_exception(self.type_, - self.value, - self.tb)) + if self.reraise: + logging.error(_LE('Original exception being dropped: %s'), + traceback.format_exception(self.type_, + self.value, + self.tb)) return False if self.reraise: six.reraise(self.type_, self.value, self.tb) diff --git a/marconi/openstack/common/fileutils.py b/marconi/openstack/common/fileutils.py index b7c2285ab..bbf445e39 100644 --- a/marconi/openstack/common/fileutils.py +++ b/marconi/openstack/common/fileutils.py @@ -99,13 +99,13 @@ def remove_path_on_error(path, remove=delete_if_exists): def file_open(*args, **kwargs): """Open file - see built-in file() documentation for more details + see built-in open() documentation for more details Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ - return file(*args, **kwargs) + return open(*args, **kwargs) def write_to_tempfile(content, path=None, suffix='', prefix='tmp'): diff --git a/marconi/openstack/common/lockutils.py b/marconi/openstack/common/lockutils.py index 963dd5705..65e0b86ef 100644 --- a/marconi/openstack/common/lockutils.py +++ b/marconi/openstack/common/lockutils.py @@ -38,10 +38,10 @@ LOG = logging.getLogger(__name__) util_opts = [ cfg.BoolOpt('disable_process_locking', default=False, - help='Whether to disable inter-process locks'), + help='Enables or disables inter-process locks.'), cfg.StrOpt('lock_path', default=os.environ.get("MARCONI_LOCK_PATH"), - help=('Directory to use for lock files.')) + help='Directory to use for lock files.') ] @@ -239,7 +239,7 @@ def external_lock(name, lock_file_prefix=None, lock_path=None): def remove_external_lock_file(name, lock_file_prefix=None): - """Remove a external lock file when it's not used anymore + """Remove an external lock file when it's not used anymore This will be helpful when we have a lot of lock files """ with internal_lock(name): @@ -276,7 +276,7 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None): :param external: The external keyword argument denotes whether this lock should work across multiple processes. This means that if two different - workers both run a a method decorated with @synchronized('mylock', + workers both run a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. """ int_lock = internal_lock(name) @@ -287,6 +287,7 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None): yield ext_lock else: yield int_lock + LOG.debug('Released semaphore "%(lock)s"', {'lock': name}) def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):