oslo: sync all modules that depend on incubator log module

We're going to switch to oslo.log, so we need to untangle incubator
modules from the old log module. This is achieved by synchronizing all
the modules that used the module before from oslo-incubator.

Note that incubator modules now use standard logging module (and not
e.g. oslo.log).

The following modules (and their dependencies) are synced:
- eventlet_backdoor
- fileutils
- loopingcall
- periodic_task
- policy
- service
- systemd
- threadgroup
- versionutils

The latest commit in oslo-incubator at the moment of sync is:
- f989c4397d7e83c3e73e8da01a9f27bf4ca38b73

Changes to modules are minor and hence are not expected to break
anything.

Change-Id: I360863a5a4fa6a179c6884cff84553a35d889719
Related-Bug: #1425013
This commit is contained in:
Ihar Hrachyshka 2015-02-27 00:00:24 +01:00
parent 5c606875ac
commit a82357cc31
10 changed files with 214 additions and 97 deletions

View File

@ -17,14 +17,14 @@ See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
try:
import oslo.i18n
import oslo_i18n
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
# application name when this module is synced into the separate
# repository. It is OK to have more than one translation function
# using the same domain, since there will still only be one message
# catalog.
_translators = oslo.i18n.TranslatorFactory(domain='neutron')
_translators = oslo_i18n.TranslatorFactory(domain='neutron')
# The primary translation function using the well-known name "_"
_ = _translators.primary
@ -40,6 +40,6 @@ try:
_LC = _translators.log_critical
except ImportError:
# NOTE(dims): Support for cases where a project wants to use
# code from neutron-incubator, but is not ready to be internationalized
# code from oslo-incubator, but is not ready to be internationalized
# (like tempest)
_ = _LI = _LW = _LE = _LC = lambda x: x

View File

@ -16,21 +16,21 @@
from __future__ import print_function
import copy
import errno
import gc
import logging
import os
import pprint
import socket
import sys
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from oslo.config import cfg
from oslo_config import cfg
from neutron.openstack.common._i18n import _LI
from neutron.openstack.common import log as logging
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
@ -49,6 +49,12 @@ CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
def list_opts():
"""Entry point for oslo-config-generator.
"""
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '

View File

@ -15,25 +15,27 @@
import contextlib
import errno
import logging
import os
import stat
import tempfile
from oslo.utils import excutils
from neutron.openstack.common import log as logging
from oslo_utils import excutils
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
DEFAULT_MODE = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
def ensure_tree(path):
def ensure_tree(path, mode=DEFAULT_MODE):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
:param mode: Directory creation permissions
"""
try:
os.makedirs(path)
os.makedirs(path, mode)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):

View File

@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
import time
@ -22,7 +23,6 @@ from eventlet import event
from eventlet import greenthread
from neutron.openstack.common._i18n import _LE, _LW
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@ -84,9 +84,9 @@ class FixedIntervalLoopingCall(LoopingCallBase):
break
delay = end - start - interval
if delay > 0:
LOG.warn(_LW('task %(func_name)s run outlasted '
LOG.warn(_LW('task %(func_name)r run outlasted '
'interval by %(delay).2f sec'),
{'func_name': repr(self.f), 'delay': delay})
{'func_name': self.f, 'delay': delay})
greenthread.sleep(-delay if delay < 0 else 0)
except LoopingCallDone as e:
self.stop()
@ -127,9 +127,9 @@ class DynamicLoopingCall(LoopingCallBase):
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug('Dynamic looping call %(func_name)s sleeping '
LOG.debug('Dynamic looping call %(func_name)r sleeping '
'for %(idle).02f seconds',
{'func_name': repr(self.f), 'idle': idle})
{'func_name': self.f, 'idle': idle})
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()

View File

@ -12,14 +12,14 @@
# under the License.
import copy
import logging
import random
import time
from oslo.config import cfg
from oslo_config import cfg
import six
from neutron.openstack.common._i18n import _, _LE, _LI
from neutron.openstack.common import log as logging
periodic_opts = [
@ -38,7 +38,7 @@ DEFAULT_INTERVAL = 60.0
def list_opts():
"""Entry point for oslo.config-generator."""
"""Entry point for oslo-config-generator."""
return [(None, copy.deepcopy(periodic_opts))]
@ -55,14 +55,15 @@ def periodic_task(*args, **kwargs):
interval of 60 seconds.
2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]])
@periodic_task(spacing=N [, run_immediately=[True|False]]
[, name=[None|"string"])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts.
starts. If name is not provided, __name__ of function is used.
"""
def decorator(f):
# Test for old style invocation
@ -76,6 +77,7 @@ def periodic_task(*args, **kwargs):
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
f._periodic_name = kwargs.pop('name', f.__name__)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
@ -105,6 +107,36 @@ def periodic_task(*args, **kwargs):
class _PeriodicTasksMeta(type):
def _add_periodic_task(cls, task):
"""Add a periodic task to the list of periodic tasks.
The task should already be decorated by @periodic_task.
:return: whether task was actually enabled
"""
name = task._periodic_name
if task._periodic_spacing < 0:
LOG.info(_LI('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
return False
if not task._periodic_enabled:
LOG.info(_LI('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
return False
# A periodic spacing of zero indicates that this task should
# be run on the default interval to avoid running too
# frequently.
if task._periodic_spacing == 0:
task._periodic_spacing = DEFAULT_INTERVAL
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
return True
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
@ -125,28 +157,7 @@ class _PeriodicTasksMeta(type):
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
task = value
name = task.__name__
if task._periodic_spacing < 0:
LOG.info(_LI('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
continue
if not task._periodic_enabled:
LOG.info(_LI('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
continue
# A periodic spacing of zero indicates that this task should
# be run on the default interval to avoid running too
# frequently.
if task._periodic_spacing == 0:
task._periodic_spacing = DEFAULT_INTERVAL
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
cls._add_periodic_task(value)
def _nearest_boundary(last_run, spacing):
@ -178,6 +189,15 @@ class PeriodicTasks(object):
for name, task in self._periodic_tasks:
self._periodic_last_run[name] = task._periodic_last_run
def add_periodic_task(self, task):
"""Add a periodic task to the list of periodic tasks.
The task should already be decorated by @periodic_task.
"""
if self.__class__._add_periodic_task(task):
self._periodic_last_run[task._periodic_name] = (
task._periodic_last_run)
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL

View File

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
@ -22,22 +24,43 @@ string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
combined as with an "or" conjunction. As an example, take the following
rule, expressed in the list-of-lists representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
This is the original way of expressing policies, but there now exists a
new way: the policy language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct class to perform that check::
+===========================================================================+
| TYPE | SYNTAX |
+===========================================================================+
|User's Role | role:admin |
+---------------------------------------------------------------------------+
|Rules already defined on policy | rule:admin_required |
+---------------------------------------------------------------------------+
|Against URL's¹ | http://my-url.org/check |
+---------------------------------------------------------------------------+
|User attributes² | project_id:%(target.project.id)s |
+---------------------------------------------------------------------------+
|Strings | <variable>:'xpto2035abc' |
| | 'myproject':<variable> |
+---------------------------------------------------------------------------+
| | project_id:xpto2035abc |
|Literals | domain_id:20 |
| | True:%(user.enabled)s |
+===========================================================================+
¹URL checking must return 'True' to be valid
²User attributes (obtained through the token): user_id, domain_id or project_id
Conjunction operators are available, allowing for more expressiveness
in crafting policies. So, in the policy language, the previous check in
list-of-lists becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
@ -46,26 +69,16 @@ policy rule::
project_id:%(project_id)s and not role:dunce
It is possible to perform policy checks on the following user
attributes (obtained through the token): user_id, domain_id or
project_id::
domain_id:<some_value>
Attributes sent along with API calls can be used by the policy engine
(on the right side of the expression), by using the following syntax::
<some_value>:user.id
<some_value>:%(user.id)s
Contextual attributes of objects identified by their IDs are loaded
from the database. They are also available to the policy engine and
can be checked through the `target` keyword::
<some_value>:target.role.name
All these attributes (related to users, API calls, and context) can be
checked against each other or against constants, be it literals (True,
<a_number>) or strings.
<some_value>:%(target.role.name)s
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
@ -77,18 +90,19 @@ as it allows particular rules to be explicitly disabled.
import abc
import ast
import copy
import logging
import os
import re
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from neutron.openstack.common import fileutils
from neutron.openstack.common._i18n import _, _LE, _LW
from neutron.openstack.common import log as logging
from neutron.openstack.common._i18n import _, _LE
policy_opts = [
@ -102,7 +116,12 @@ policy_opts = [
cfg.MultiStrOpt('policy_dirs',
default=['policy.d'],
help=_('Directories where policy configuration files are '
'stored.')),
'stored. They can be relative to any directory '
'in the search path defined by the config_dir '
'option, or absolute paths. The file defined by '
'policy_file must exist for these directories to '
'be searched. Missing or empty directories are '
'ignored.')),
]
CONF = cfg.CONF
@ -113,6 +132,11 @@ LOG = logging.getLogger(__name__)
_checks = {}
def list_opts():
"""Entry point for oslo-config-generator."""
return [(None, copy.deepcopy(policy_opts))]
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
@ -189,16 +213,19 @@ class Enforcer(object):
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from cache or config file.
:param overwrite: Whether to overwrite existing rules when reload rules
from config file.
"""
def __init__(self, policy_file=None, rules=None,
default_rule=None, use_conf=True):
default_rule=None, use_conf=True, overwrite=True):
self.default_rule = default_rule or CONF.policy_default_rule
self.rules = Rules(rules, self.default_rule)
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
self.use_conf = use_conf
self.overwrite = overwrite
def set_rules(self, rules, overwrite=True, use_conf=False):
"""Create a new Rules object based on the provided dict of rules.
@ -230,7 +257,7 @@ class Enforcer(object):
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to overwrite current rules.
:param force_reload: Whether to reload rules from config file.
"""
if force_reload:
@ -240,18 +267,19 @@ class Enforcer(object):
if not self.policy_path:
self.policy_path = self._get_policy_path(self.policy_file)
self._load_policy_file(self.policy_path, force_reload)
self._load_policy_file(self.policy_path, force_reload,
overwrite=self.overwrite)
for path in CONF.policy_dirs:
try:
path = self._get_policy_path(path)
except cfg.ConfigFilesNotFoundError:
LOG.warn(_LW("Can not find policy directory: %s"), path)
continue
self._walk_through_policy_directory(path,
self._load_policy_file,
force_reload, False)
def _walk_through_policy_directory(self, path, func, *args):
@staticmethod
def _walk_through_policy_directory(path, func, *args):
# We do not iterate over sub-directories.
policy_files = next(os.walk(path))[2]
policy_files.sort()
@ -261,10 +289,11 @@ class Enforcer(object):
def _load_policy_file(self, path, force_reload, overwrite=True):
reloaded, data = fileutils.read_cached_file(
path, force_reload=force_reload)
if reloaded or not self.rules:
if reloaded or not self.rules or not overwrite:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules, overwrite)
LOG.debug("Rules successfully reloaded")
self.set_rules(rules, overwrite=overwrite, use_conf=True)
LOG.debug("Reloaded policy file: %(path)s",
{'path': path})
def _get_policy_path(self, path):
"""Locate the policy json data file/path.
@ -883,7 +912,17 @@ class HttpCheck(Check):
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
# Convert instances of object() in target temporarily to
# empty dict to avoid circular reference detection
# errors in jsonutils.dumps().
temp_target = copy.deepcopy(target)
for key in target.keys():
element = target.get(key)
if type(element) is object:
temp_target[key] = {}
data = {'target': jsonutils.dumps(temp_target),
'credentials': jsonutils.dumps(creds)}
post_data = urlparse.urlencode(data)
f = urlrequest.urlopen(url, post_data)

View File

@ -18,7 +18,7 @@
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging as std_logging
import logging
import os
import random
import signal
@ -35,11 +35,10 @@ except ImportError:
import eventlet
from eventlet import event
from oslo.config import cfg
from oslo_config import cfg
from neutron.openstack.common import eventlet_backdoor
from neutron.openstack.common._i18n import _LE, _LI, _LW
from neutron.openstack.common import log as logging
from neutron.openstack.common import systemd
from neutron.openstack.common import threadgroup
@ -163,7 +162,7 @@ class ServiceLauncher(Launcher):
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
CONF.log_opt_values(LOG, logging.DEBUG)
try:
if ready_callback:
@ -377,7 +376,7 @@ class ProcessLauncher(object):
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
CONF.log_opt_values(LOG, logging.DEBUG)
try:
while True:
@ -397,7 +396,7 @@ class ProcessLauncher(object):
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
@ -434,8 +433,8 @@ class Service(object):
def start(self):
pass
def stop(self):
self.tg.stop()
def stop(self, graceful=False):
self.tg.stop(graceful)
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():

View File

@ -16,12 +16,11 @@
Helper module for systemd service readiness notification.
"""
import logging
import os
import socket
import sys
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)

View File

@ -11,12 +11,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
import eventlet
from eventlet import greenpool
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
@ -96,6 +96,8 @@ class ThreadGroup(object):
continue
try:
x.stop()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)

View File

@ -19,15 +19,24 @@ Helpers for comparing version strings.
import functools
import inspect
import logging
from oslo_config import cfg
import pkg_resources
import six
from neutron.openstack.common._i18n import _
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
opts = [
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
]
class deprecated(object):
@ -127,7 +136,7 @@ class deprecated(object):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
LOG.deprecated(msg, details)
report_deprecated_feature(LOG, msg, details)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
@ -136,10 +145,10 @@ class deprecated(object):
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
# and added to the neutron-incubator requrements
# and added to the oslo-incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
LOG.deprecated(msg, details)
report_deprecated_feature(LOG, msg, details)
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
return func_or_cls
@ -201,3 +210,44 @@ def is_compatible(requested_version, current_version, same_major=True):
return False
return current_parts >= requested_parts
# Track the messages we have sent already. See
# report_deprecated_feature().
_deprecated_messages_sent = {}
def report_deprecated_feature(logger, msg, *args, **kwargs):
"""Call this function when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
CONF.register_opts(opts)
if CONF.fatal_deprecations:
logger.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = _deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
logger.warn(stdmsg, *args, **kwargs)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))