Sync with oslo incubator

Sync with oslo incubator at 45890dae35cd14a25175cdce7c12a5f9acbd712f

Change-Id: I44ecc69f1d952061be7cb2f41678b879ded93eae
This commit is contained in:
Thomas Herve 2014-06-17 11:22:37 +02:00
parent 5a9ee2ddd8
commit 7f7817c157
20 changed files with 106 additions and 50 deletions

View File

@ -290,7 +290,7 @@
#log_dir=<None>
# Use syslog for logging. Existing syslog format is DEPRECATED
# during I, and will chang in J to honor RFC5424. (boolean
# during I, and will change in J to honor RFC5424. (boolean
# value)
#use_syslog=false
@ -415,6 +415,10 @@
# (string value)
#kombu_ssl_ca_certs=
# How long to wait before reconnecting in response to an AMQP
# consumer cancel notification. (floating point value)
#kombu_reconnect_delay=1.0
# The RabbitMQ broker address where a single node is used
# (string value)
#rabbit_host=localhost

View File

@ -150,7 +150,7 @@ def _import_module(mod_str):
def _is_in_group(opt, group):
"Check if opt is in group."
"""Check if opt is in group."""
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
@ -159,7 +159,7 @@ def _is_in_group(opt, group):
return False
def _guess_groups(opt, mod_obj):
def _guess_groups(opt):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
@ -193,7 +193,7 @@ def _list_opts(obj):
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
ret.setdefault(_guess_groups(opt), []).append(opt)
return ret.items()
@ -253,7 +253,6 @@ def _print_opt(opt):
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:

View File

@ -33,7 +33,7 @@ Recommended ways to use sessions within this framework:
Examples:
.. code:: python
.. code-block:: python
def get_foo(context, foo):
return (model_query(context, models.Foo).
@ -71,7 +71,7 @@ Recommended ways to use sessions within this framework:
If you create models within the session, they need to be added, but you
do not need to call `model.save()`:
.. code:: python
.. code-block:: python
def create_many_foo(context, foos):
session = sessionmaker()
@ -100,7 +100,7 @@ Recommended ways to use sessions within this framework:
which avoids the need for an explicit transaction. It can be expressed like
so:
.. code:: python
.. code-block:: python
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
@ -113,7 +113,7 @@ Recommended ways to use sessions within this framework:
For reference, this emits approximately the following SQL statement:
.. code:: sql
.. code-block:: sql
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
@ -123,7 +123,7 @@ Recommended ways to use sessions within this framework:
instances with same primary key, must catch the exception out of context
managed by a single session:
.. code:: python
.. code-block:: python
def create_duplicate_foo(context):
foo1 = models.Foo()
@ -152,7 +152,7 @@ Recommended ways to use sessions within this framework:
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
.. code:: python
.. code-block:: python
def myfunc(foo):
session = sessionmaker()
@ -201,7 +201,7 @@ Enabling soft deletes:
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
to your model class. For example:
.. code:: python
.. code-block:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
@ -218,7 +218,7 @@ Efficient use of soft deletes:
* In almost all cases you should use `query.soft_delete()`. Some examples:
.. code:: python
.. code-block:: python
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
@ -241,7 +241,7 @@ Efficient use of soft deletes:
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
.. code:: python
.. code-block:: python
def soft_delete_bar_model():
session = sessionmaker()
@ -253,7 +253,7 @@ Efficient use of soft deletes:
However, if you need to work with all entries that correspond to query and
then soft delete them you should use the `query.soft_delete()` method:
.. code:: python
.. code-block:: python
def soft_delete_multi_models():
session = sessionmaker()
@ -270,7 +270,7 @@ Efficient use of soft deletes:
which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient.
.. code:: python
.. code-block:: python
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)

View File

@ -151,10 +151,12 @@ class OpportunisticTestCase(DbTestCase):
class MySQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'mysql'
DBNAME = '' # connect to MySQL server, but not to the openstack_citest db
class PostgreSQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'postgresql'
DBNAME = 'postgres' # PostgreSQL requires the db name here,use service one
class MySQLOpportunisticTestCase(OpportunisticTestCase):

View File

@ -41,7 +41,6 @@ help_for_backdoor_port = (
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
default=None,
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]

View File

@ -99,13 +99,13 @@ def remove_path_on_error(path, remove=delete_if_exists):
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
see built-in open() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
return open(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):

View File

@ -24,10 +24,10 @@ import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError):
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))

View File

@ -31,6 +31,7 @@ This module provides a few things:
'''
import codecs
import datetime
import functools
import inspect
@ -52,6 +53,7 @@ import six.moves.xmlrpc_client as xmlrpclib
from heat.openstack.common import gettextutils
from heat.openstack.common import importutils
from heat.openstack.common import strutils
from heat.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
@ -166,12 +168,12 @@ def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def loads(s, encoding='utf-8', **kwargs):
return json.loads(strutils.safe_decode(s, encoding), **kwargs)
def load(fp):
return json.load(fp)
def load(fp, encoding='utf-8', **kwargs):
return json.load(codecs.getreader(encoding)(fp), **kwargs)
try:

View File

@ -92,7 +92,6 @@ logging_cli_opts = [
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
@ -118,7 +117,7 @@ logging_cli_opts = [
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will chang in J to honor RFC5424.'),
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
@ -425,9 +424,7 @@ class JSONFormatter(logging.Formatter):
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose or CONF.debug:
extra['exc_info'] = (exc_type, value, tb)
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
@ -572,9 +569,15 @@ def _setup_logging_from_conf(project, version):
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
_loggers = {}
@ -663,14 +666,19 @@ class ContextFormatter(logging.Formatter):
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
self._fmt = CONF.logging_context_format_string
fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)

View File

@ -49,8 +49,12 @@ def parse_host_port(address, default_port=None):
('::1', 1234)
>>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234)
('2001:db8:85a3::8a2e:370:7334', 1234)
>>> parse_host_port(None)
(None, None)
"""
if not address:
return (None, None)
if address[0] == '[':
# Escaped ipv6
_host, _port = address[1:].split(']')
@ -113,14 +117,13 @@ def set_tcp_keepalive(sock, tcp_keepalive=True,
This function configures tcp keepalive parameters if users wish to do
so.
:param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are
not sure, this should be True, and default values will be used.
:param tcp_keepidle: time to wait before starting to send keepalive probes
:param tcp_keepalive_interval: time between successive probes, once the
initial wait time is over
:param tcp_keepalive_count: number of probes to send before the connection
is killed
"""

View File

@ -36,7 +36,6 @@ notifier_opts = [
default='INFO',
help='Default notification level for outgoing notifications'),
cfg.StrOpt('default_publisher_id',
default=None,
help='Default publisher_id for outgoing notifications'),
]

View File

@ -52,6 +52,10 @@ kombu_opts = [
default='',
help='SSL certification authority file '
'(valid only if SSL enabled)'),
cfg.FloatOpt('kombu_reconnect_delay',
default=1.0,
help='How long to wait before reconnecting in response to an '
'AMQP consumer cancel notification.'),
cfg.StrOpt('rabbit_host',
default='localhost',
help='The RabbitMQ broker address where a single node is used'),
@ -498,6 +502,17 @@ class Connection(object):
LOG.info(_LI("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
# XXX(nic): when reconnecting to a RabbitMQ cluster
# with mirrored queues in use, the attempt to release the
# connection can hang "indefinitely" somewhere deep down
# in Kombu. Blocking the thread for a bit prior to
# release seems to kludge around the problem where it is
# otherwise reproduceable.
if self.conf.kombu_reconnect_delay > 0:
LOG.info(_("Delaying reconnect for %1.1f seconds...") %
self.conf.kombu_reconnect_delay)
time.sleep(self.conf.kombu_reconnect_delay)
self.connection.release()
except self.connection_errors:
pass

View File

@ -63,7 +63,7 @@ zmq_opts = [
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
cfg.IntOpt('rpc_zmq_topic_backlog',
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),

View File

@ -34,7 +34,6 @@ matchmaker_redis_opts = [
default=6379,
help='Use this port to connect to redis host.'),
cfg.StrOpt('password',
default=None,
help='Password for Redis server. (optional)'),
]

View File

@ -391,9 +391,12 @@ class ProcessLauncher(object):
while True:
self.handle_signal()
self._respawn_children()
if self.sigcaught:
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
@ -404,6 +407,11 @@ class ProcessLauncher(object):
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)

View File

@ -22,15 +22,12 @@ from heat.openstack.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients."),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely."),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely."),
]

View File

@ -50,14 +50,16 @@ def _sd_notify(unset_env, msg):
def notify():
"""Send notification to Systemd that service is ready.
For details see
http://www.freedesktop.org/software/systemd/man/sd_notify.html
http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure

View File

@ -114,7 +114,7 @@ def utcnow():
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formatted date from timestamp."""
"""Returns an iso8601 formatted date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
@ -134,7 +134,7 @@ def set_time_override(override_time=None):
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
assert utcnow.override_time is not None
try:
for dt in utcnow.override_time:
dt += timedelta

View File

@ -16,6 +16,10 @@ TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
trap "rm -rf $TEMPDIR" EXIT
tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
if [ $? != 0 ]
then
exit 1
fi
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
then

View File

@ -1,5 +1,15 @@
#!/usr/bin/env bash
# Generate sample configuration for your project.
#
# Aside from the command line flags, it also respects a config file which
# should be named oslo.config.generator.rc and be placed in the same directory.
#
# You can then export the following variables:
# HEAT_CONFIG_GENERATOR_EXTRA_MODULES: list of modules to interrogate for options.
# HEAT_CONFIG_GENERATOR_EXTRA_LIBRARIES: list of libraries to discover.
# HEAT_CONFIG_GENERATOR_EXCLUDED_FILES: list of files to remove from automatic listing.
print_hint() {
echo "Try \`${0##*/} --help' for more information." >&2
}
@ -115,6 +125,11 @@ DEFAULT_MODULEPATH=heat.openstack.common.config.generator
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
if [ $? != 0 ]
then
echo "Can not generate $OUTPUTFILE"
exit 1
fi
# Hook to allow projects to append custom config file snippets
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)