Merge "Update olso modules"

This commit is contained in:
Jenkins 2014-05-08 17:44:02 +00:00 committed by Gerrit Code Review
commit 8202a84045
16 changed files with 137 additions and 72 deletions

View File

@ -16,10 +16,13 @@ import base64
from Crypto.Hash import HMAC
from Crypto import Random
import six
from heat.openstack.common.gettextutils import _
from heat.openstack.common import importutils
bchr = six.int2byte
class CryptoutilsException(Exception):
"""Generic Exception for Crypto utilities."""
@ -64,7 +67,7 @@ class HKDF(object):
:param salt: optional salt value (a non-secret random value)
"""
if salt is None:
salt = '\x00' * self.hashfn.digest_size
salt = b'\x00' * self.hashfn.digest_size
return HMAC.new(salt, ikm, self.hashfn).digest()
@ -80,12 +83,12 @@ class HKDF(object):
if length > self.max_okm_length:
raise HKDFOutputLengthTooLong(length, self.max_okm_length)
N = (length + self.hashfn.digest_size - 1) / self.hashfn.digest_size
N = (length + self.hashfn.digest_size - 1) // self.hashfn.digest_size
okm = ""
tmp = ""
okm = b""
tmp = b""
for block in range(1, N + 1):
tmp = HMAC.new(prk, tmp + info + chr(block), self.hashfn).digest()
tmp = HMAC.new(prk, tmp + info + bchr(block), self.hashfn).digest()
okm += tmp
return okm[:length]
@ -135,8 +138,8 @@ class SymmetricCrypto(object):
raise CipherBlockLengthTooBig(self.cipher.block_size, MAX_CB_SIZE)
r = len(msg) % self.cipher.block_size
padlen = self.cipher.block_size - r - 1
msg += '\x00' * padlen
msg += chr(padlen)
msg += b'\x00' * padlen
msg += bchr(padlen)
enc = iv + cipher.encrypt(msg)
if b64encode:
@ -160,7 +163,7 @@ class SymmetricCrypto(object):
cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)
padded = cipher.decrypt(msg[self.cipher.block_size:])
l = ord(padded[-1]) + 1
l = ord(padded[-1:]) + 1
plain = padded[:-l]
return plain

View File

@ -18,12 +18,12 @@ import functools
import os
import fixtures
from oslotest import base as test_base
import six
from heat.openstack.common.db.sqlalchemy import provision
from heat.openstack.common.db.sqlalchemy import session
from heat.openstack.common.db.sqlalchemy import utils
from heat.openstack.common import test as test_base
class DbFixture(fixtures.Fixture):

View File

@ -20,6 +20,7 @@ import os
import subprocess
import lockfile
from oslotest import base as test_base
from six import moves
from six.moves.urllib import parse
import sqlalchemy
@ -27,7 +28,6 @@ import sqlalchemy.exc
from heat.openstack.common.db.sqlalchemy import utils
from heat.openstack.common.gettextutils import _LE
from heat.openstack.common import test as test_base
LOG = logging.getLogger(__name__)

View File

@ -34,7 +34,7 @@ import six
_AVAILABLE_LANGUAGES = {}
# FIXME(dhellmann): Remove this when moving to heat.i18n.
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
USE_LAZY = False
@ -116,7 +116,7 @@ class TranslatorFactory(object):
# NOTE(dhellmann): When this module moves out of the incubator into
# heat.i18n, these global variables can be moved to an integration
# oslo.i18n, these global variables can be moved to an integration
# module within each application.
# Create the global translation functions.
@ -147,7 +147,7 @@ def enable_lazy():
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
# FIXME(dhellmann): This function will be removed in heat.i18n,
# FIXME(dhellmann): This function will be removed in oslo.i18n,
# because the TranslatorFactory makes it superfluous.
global _, _LI, _LW, _LE, _LC, USE_LAZY
tf = TranslatorFactory('heat', lazy=True)

View File

@ -35,7 +35,17 @@ import datetime
import functools
import inspect
import itertools
import json
import sys
if sys.version_info < (2, 7):
# On Python <= 2.6, json module is not C boosted, so try to use
# simplejson module if available
try:
import simplejson as json
except ImportError:
import json
else:
import json
import six
import six.moves.xmlrpc_client as xmlrpclib
@ -160,8 +170,8 @@ def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
def load(fp):
return json.load(fp)
try:

View File

@ -38,7 +38,7 @@ LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
help='Enables or disables inter-process locks.'),
cfg.StrOpt('lock_path',
default=os.environ.get("HEAT_LOCK_PATH"),
help='Directory to use for lock files.')
@ -276,7 +276,7 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None):
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
"""
int_lock = internal_lock(name)
@ -287,6 +287,7 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None):
yield ext_lock
else:
yield int_lock
LOG.debug('Released semaphore "%(lock)s"', {'lock': name})
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):

View File

@ -84,12 +84,10 @@ logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of logging configuration file. It does not '
'disable existing loggers, but just appends specified '
'logging configuration to any other existing logging '
'options. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
@ -103,7 +101,7 @@ logging_cli_opts = [
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
@ -112,30 +110,30 @@ logging_cli_opts = [
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and then will be changed in J to honor RFC5424'),
'and will chang in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Use syslog rfc5424 format for logging. '
'If enabled, will add APP-NAME (RFC5424) before the '
'MSG part of the syslog message. The old format '
'without APP-NAME is deprecated in I, '
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines')
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
help='Log output to standard error.')
]
log_opts = [
@ -143,18 +141,18 @@ log_opts = [
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context'),
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context'),
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG'),
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format'),
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
@ -167,25 +165,25 @@ log_opts = [
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
],
help='List of logger=LEVEL pairs'),
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Publish error events'),
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Make deprecations fatal'),
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
help='The format for an instance that is passed with the log '
'message. '),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
help='The format for an instance UUID that is passed with the '
'log message. '),
]
CONF = cfg.CONF
@ -451,7 +449,7 @@ def _load_log_config(log_config_append):
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, str(exc))
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):

View File

@ -17,18 +17,15 @@
Network-related utilities and helper functions.
"""
# TODO(jd) Use six.moves once
# https://bitbucket.org/gutworth/six/pull-request/28
# is merged
try:
import urllib.parse
SplitResult = urllib.parse.SplitResult
except ImportError:
import urlparse
SplitResult = urlparse.SplitResult
import socket
from six.moves.urllib import parse
from heat.openstack.common.gettextutils import _LW
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def parse_host_port(address, default_port=None):
"""Interpret a string as a host:port pair.
@ -74,7 +71,7 @@ def parse_host_port(address, default_port=None):
return (host, None if port is None else int(port))
class ModifiedSplitResult(SplitResult):
class ModifiedSplitResult(parse.SplitResult):
"""Split results class for urlsplit."""
# NOTE(dims): The functions below are needed for Python 2.6.x.
@ -106,3 +103,58 @@ def urlsplit(url, scheme='', allow_fragments=True):
path, query = path.split('?', 1)
return ModifiedSplitResult(scheme, netloc,
path, query, fragment)
def set_tcp_keepalive(sock, tcp_keepalive=True,
tcp_keepidle=None,
tcp_keepalive_interval=None,
tcp_keepalive_count=None):
"""Set values for tcp keepalive parameters
This function configures tcp keepalive parameters if users wish to do
so.
:param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are
not sure, this should be True, and default values will be used.
:param tcp_keepidle: time to wait before starting to send keepalive probes
:param tcp_keepalive_interval: time between successive probes, once the
initial wait time is over
:param tcp_keepalive_count: number of probes to send before the connection
is killed
"""
# NOTE(praneshp): Despite keepalive being a tcp concept, the level is
# still SOL_SOCKET. This is a quirk.
if isinstance(tcp_keepalive, bool):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, tcp_keepalive)
else:
raise TypeError("tcp_keepalive must be a boolean")
if not tcp_keepalive:
return
# These options aren't available in the OS X version of eventlet,
# Idle + Count * Interval effectively gives you the total timeout.
if tcp_keepidle is not None:
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
tcp_keepidle)
else:
LOG.warning(_LW('tcp_keepidle not available on your system'))
if tcp_keepalive_interval is not None:
if hasattr(socket, 'TCP_KEEPINTVL'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL,
tcp_keepalive_interval)
else:
LOG.warning(_LW('tcp_keepintvl not available on your system'))
if tcp_keepalive_count is not None:
if hasattr(socket, 'TCP_KEEPCNT'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPCNT,
tcp_keepalive_count)
else:
LOG.warning(_LW('tcp_keepknt not available on your system'))

View File

@ -93,10 +93,11 @@ from heat.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('JSON file containing policy')),
help=_('The JSON file that defines policies.')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Rule enforced when requested rule is not found')),
help=_('Default rule. Enforced when a requested rule is not '
'found.')),
]
CONF = cfg.CONF

View File

@ -202,7 +202,7 @@ class ReplyProxy(ConnectionContext):
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
', message : %(data)s'), {'msg_id': msg_id,
'data': message_data})
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
LOG.warn(_('_call_waiters: %s') % self._call_waiters)
else:
waiter.put(message_data)

View File

@ -549,7 +549,7 @@ class Connection(object):
raise
log_info = {}
log_info['err_str'] = str(e)
log_info['err_str'] = e
log_info['max_retries'] = self.max_retries
log_info.update(params)
@ -621,7 +621,7 @@ class Connection(object):
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
log_info = {'topic': topic, 'err_str': exc}
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
@ -641,11 +641,11 @@ class Connection(object):
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.debug('Timed out waiting for RPC response: %s' %
str(exc))
exc)
raise rpc_common.Timeout()
else:
LOG.exception(_LE('Failed to consume message from queue: %s') %
str(exc))
exc)
info['do_consume'] = True
def _consume():
@ -682,7 +682,7 @@ class Connection(object):
"""Send to a publisher based on the publisher class."""
def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
log_info = {'topic': topic, 'err_str': exc}
LOG.exception(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)

View File

@ -571,7 +571,7 @@ class Connection(object):
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
log_info = {'topic': topic, 'err_str': exc}
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
@ -588,11 +588,11 @@ class Connection(object):
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug('Timed out waiting for RPC response: %s' %
str(exc))
exc)
raise rpc_common.Timeout()
else:
LOG.exception(_LE('Failed to consume message from queue: %s') %
str(exc))
exc)
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
@ -625,7 +625,7 @@ class Connection(object):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
log_info = {'topic': topic, 'err_str': exc}
LOG.exception(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)

View File

@ -48,7 +48,7 @@ class RpcProxy(object):
basis.
:param version_cap: Optionally cap the maximum version used for sent
messages.
:param serializer: Optionaly (de-)serialize entities with a
:param serializer: Optionally (de-)serialize entities with a
provided helper.
"""
self.topic = topic

View File

@ -78,7 +78,7 @@ def bool_from_string(subject, strict=False, default=False):
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = str(subject)
subject = six.text_type(subject)
lowered = subject.strip().lower()

View File

@ -18,7 +18,7 @@
##
## DO NOT MODIFY THIS FILE
##
## This file is being graduated to the heattest library. Please make all
## This file is being graduated to the oslotest library. Please make all
## changes there, and only backport critical fixes here. - dhellmann
##
##############################################################################

View File

@ -99,7 +99,7 @@ class ThreadGroup(object):
except Exception as ex:
LOG.exception(ex)
def _stop_timers(self):
def stop_timers(self):
for x in self.timers:
try:
x.stop()
@ -114,7 +114,7 @@ class ThreadGroup(object):
Never kill threads.
* In case of graceful=False, kill threads immediately.
"""
self._stop_timers()
self.stop_timers()
if graceful:
# In case of graceful=True, wait for all threads to be
# finished, never kill threads