oslo: update the remainder of the modules

Change-Id: I83796c1e1f2f69d2ce27e7180c4004a5f345f063
This commit is contained in:
Angus Salkeld 2013-12-09 08:29:35 +11:00
parent 28da5c3f06
commit 1179a8eda4
37 changed files with 860 additions and 221 deletions

View File

@ -166,7 +166,7 @@
# format string to use for log messages with context (string
# value)
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# format string to use for log messages without context
# (string value)
@ -181,7 +181,7 @@
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# list of logger=LEVEL pairs (list value)
#default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN
#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,keystone=INFO,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN
# publish error events (boolean value)
#publish_errors=false
@ -197,12 +197,13 @@
# it like this (string value)
#instance_uuid_format="[instance: %(uuid)s] "
# If this option is specified, the logging configuration file
# specified is used and overrides any other logging options
# specified. Please see the Python logging module
# documentation for details on logging configuration files.
# (string value)
#log_config=<None>
# The name of logging configuration file. It does not disable
# existing loggers, but just appends specified logging
# configuration to any other existing logging options. Please
# see the Python logging module documentation for details on
# logging configuration files. (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append=<None>
# DEPRECATED. A logging.Formatter log message format string
# which may use any of the available logging.LogRecord
@ -261,7 +262,7 @@
# Options defined in heat.openstack.common.notifier.rpc_notifier
#
# AMQP topic used for openstack notifications (list value)
# AMQP topic used for OpenStack notifications (list value)
#notification_topics=notifications
@ -725,7 +726,7 @@
# Options defined in heat.openstack.common.notifier.rpc_notifier2
#
# AMQP topic(s) used for openstack notifications (list value)
# AMQP topic(s) used for OpenStack notifications (list value)
#topics=notifications

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -23,12 +21,11 @@ context or provide additional information in their specific WSGI pipeline.
"""
import itertools
from heat.openstack.common import uuidutils
import uuid
def generate_request_id():
return 'req-%s' % uuidutils.generate_uuid()
return 'req-%s' % str(uuid.uuid4())
class RequestContext(object):
@ -39,26 +36,46 @@ class RequestContext(object):
accesses the system, as well as additional request information.
"""
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None):
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
user_domain=None, project_domain=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None,
instance_uuid=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.domain = domain
self.user_domain = user_domain
self.project_domain = project_domain
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
self.instance_uuid = instance_uuid
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
user_idt = (
self.user_idt_format.format(user=self.user or '-',
tenant=self.tenant or '-',
domain=self.domain or '-',
user_domain=self.user_domain or '-',
p_domain=self.project_domain or '-'))
return {'user': self.user,
'tenant': self.tenant,
'domain': self.domain,
'user_domain': self.user_domain,
'project_domain': self.project_domain,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id}
'request_id': self.request_id,
'instance_uuid': self.instance_uuid,
'user_identity': user_idt}
def get_admin_context(show_deleted=False):

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudscaling Group, Inc
# All Rights Reserved.
#

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@ -49,3 +47,8 @@ class DbMigrationError(DBError):
"""Wraps migration specific exception."""
def __init__(self, message=None):
super(DbMigrationError, self).__init__(str(message))
class DBConnectionError(DBError):
"""Wraps connection specific exception."""
pass

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudscaling Group, Inc
# All Rights Reserved.
#

View File

@ -36,7 +36,8 @@
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import distutils.version as dist_version
import os

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
@ -41,13 +39,13 @@ class ModelBase(object):
if not session:
session = sa.get_session()
# NOTE(boris-42): This part of code should be look like:
# sesssion.add(self)
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicity.
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()

View File

@ -0,0 +1,187 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provision test environment for specific DB backends"""
import argparse
import os
import random
import string
import sqlalchemy
from heat.openstack.common.db import exception as exc
SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://')
def _gen_credentials(*names):
"""Generate credentials."""
auth_dict = {}
for name in names:
val = ''.join(random.choice(string.lowercase) for i in xrange(10))
auth_dict[name] = val
return auth_dict
def _get_engine(uri=SQL_CONNECTION):
"""Engine creation
By default the uri is SQL_CONNECTION which is admin credentials.
Call the function without arguments to get admin connection. Admin
connection required to create temporary user and database for each
particular test. Otherwise use existing connection to recreate connection
to the temporary database.
"""
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)
def _execute_sql(engine, sql, driver):
"""Initialize connection, execute sql query and close it."""
try:
with engine.connect() as conn:
if driver == 'postgresql':
conn.connection.set_isolation_level(0)
for s in sql:
conn.execute(s)
except sqlalchemy.exc.OperationalError:
msg = ('%s does not match database admin '
'credentials or database does not exist.')
raise exc.DBConnectionError(msg % SQL_CONNECTION)
def create_database(engine):
"""Provide temporary user and database for each particular test."""
driver = engine.name
auth = _gen_credentials('database', 'user', 'passwd')
sqls = {
'mysql': [
"drop database if exists %(database)s;",
"grant all on %(database)s.* to '%(user)s'@'localhost'"
" identified by '%(passwd)s';",
"create database %(database)s;",
],
'postgresql': [
"drop database if exists %(database)s;",
"drop user if exists %(user)s;",
"create user %(user)s with password '%(passwd)s';",
"create database %(database)s owner %(user)s;",
]
}
if driver == 'sqlite':
return 'sqlite:////tmp/%s' % auth['database']
try:
sql_rows = sqls[driver]
except KeyError:
raise ValueError('Unsupported RDBMS %s' % driver)
sql_query = map(lambda x: x % auth, sql_rows)
_execute_sql(engine, sql_query, driver)
params = auth.copy()
params['backend'] = driver
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
def drop_database(engine, current_uri):
"""Drop temporary database and user after each particular test."""
engine = _get_engine(current_uri)
admin_engine = _get_engine()
driver = engine.name
auth = {'database': engine.url.database, 'user': engine.url.username}
if driver == 'sqlite':
try:
os.remove(auth['database'])
except OSError:
pass
return
sqls = {
'mysql': [
"drop database if exists %(database)s;",
"drop user '%(user)s'@'localhost';",
],
'postgresql': [
"drop database if exists %(database)s;",
"drop user if exists %(user)s;",
]
}
try:
sql_rows = sqls[driver]
except KeyError:
raise ValueError('Unsupported RDBMS %s' % driver)
sql_query = map(lambda x: x % auth, sql_rows)
_execute_sql(admin_engine, sql_query, driver)
def main():
"""Controller to handle commands
::create: Create test user and database with random names.
::drop: Drop user and database created by previous command.
"""
parser = argparse.ArgumentParser(
description='Controller to handle database creation and dropping'
' commands.',
epilog='Under normal circumstances is not used directly.'
' Used in .testr.conf to automate test database creation'
' and dropping processes.')
subparsers = parser.add_subparsers(
help='Subcommands to manipulate temporary test databases.')
create = subparsers.add_parser(
'create',
help='Create temporary test '
'databases and users.')
create.set_defaults(which='create')
create.add_argument(
'instances_count',
type=int,
help='Number of databases to create.')
drop = subparsers.add_parser(
'drop',
help='Drop temporary test databases and users.')
drop.set_defaults(which='drop')
drop.add_argument(
'instances',
nargs='+',
help='List of databases uri to be dropped.')
args = parser.parse_args()
engine = _get_engine()
which = args.which
if which == "create":
for i in range(int(args.instances_count)):
print(create_database(engine))
elif which == "drop":
for db in args.instances:
drop_database(engine, db)
if __name__ == "__main__":
main()

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@ -109,7 +107,7 @@ Recommended ways to use sessions within this framework:
filter_by(id=subq.as_scalar()).\
update({'bar': newbar})
For reference, this emits approximagely the following SQL statement:
For reference, this emits approximately the following SQL statement:
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
@ -613,7 +611,7 @@ def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
dbapi_conn.cursor().execute('select 1')
except dbapi_conn.OperationalError as ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
LOG.warn(_('Got mysql server has gone away: %s'), ex)
LOG.warning(_('Got mysql server has gone away: %s'), ex)
raise sqla_exc.DisconnectionError("Database server went away")
else:
raise
@ -695,7 +693,7 @@ def create_engine(sql_connection, sqlite_fk=False):
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
LOG.warn(msg % remaining)
LOG.warning(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(CONF.database.retry_interval)

View File

@ -0,0 +1,305 @@
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import functools
import os
import lockfile
import sqlalchemy
import sqlalchemy.exc
from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging
from heat.openstack.common import processutils
from heat.openstack.common.py3kcompat import urlutils
from heat.openstack.common import test
LOG = logging.getLogger(__name__)
def _get_connect_string(backend, user, passwd, database):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
elif backend == "mysql":
backend = "mysql+mysqldb"
else:
raise Exception("Unrecognized backend: '%s'" % backend)
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
% {'backend': backend, 'user': user, 'passwd': passwd,
'database': database})
def _is_backend_avail(backend, user, passwd, database):
try:
connect_uri = _get_connect_string(backend, user, passwd, database)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def _have_mysql(user, passwd, database):
present = os.environ.get('TEST_MYSQL_PRESENT')
if present is None:
return _is_backend_avail('mysql', user, passwd, database)
return present.lower() in ('', 'true')
def _have_postgresql(user, passwd, database):
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
if present is None:
return _is_backend_avail('postgres', user, passwd, database)
return present.lower() in ('', 'true')
def get_db_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)
def _set_db_lock(lock_path=None, lock_prefix=None):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
path = lock_path or os.environ.get("HEAT_LOCK_PATH")
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
with lock:
LOG.debug(_('Got lock "%s"') % f.__name__)
return f(*args, **kwargs)
finally:
LOG.debug(_('Lock released "%s"') % f.__name__)
return wrapper
return decorator
class BaseMigrationTestCase(test.BaseTestCase):
"""Base class fort testing of migration utils."""
def __init__(self, *args, **kwargs):
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the TEST_MIGRATIONS_CONF variable
# to override the location of the config file for migration testing
self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF',
self.DEFAULT_CONFIG_FILE)
self.test_databases = {}
self.migration_api = None
def setUp(self):
super(BaseMigrationTestCase, self).setUp()
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
if os.path.exists(self.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
cp.read(self.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.test_databases[key] = value
except ConfigParser.ParsingError as e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
# We start each test case with a completely blank slate.
self._reset_databases()
def tearDown(self):
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_databases()
super(BaseMigrationTestCase, self).tearDown()
def execute_cmd(self, cmd=None):
out, err = processutils.trycmd(cmd, shell=True, discard_warnings=True)
output = out or err
LOG.debug(output)
self.assertEqual('', err,
"Failed to run: %s\n%s" % (cmd, output))
@_set_db_lock('pgadmin', 'tests-')
def _reset_pg(self, conn_pieces):
(user, password, database, host) = get_db_connection_info(conn_pieces)
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
# drop database which we have connected to, so for such
# operations there is a special database template1.
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
" '%(sql)s' -d template1")
sql = ("drop database if exists %s;") % database
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
self.execute_cmd(droptable)
sql = ("create database %s;") % database
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
self.execute_cmd(createtable)
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
def _reset_databases(self):
for key, engine in self.engines.items():
conn_string = self.test_databases[key]
conn_pieces = urlutils.urlparse(conn_string)
engine.dispose()
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
# the easiest and cleanest solution
db_path = conn_pieces.path.strip('/')
if os.path.exists(db_path):
os.unlink(db_path)
# No need to recreate the SQLite DB. SQLite will
# create it for us if it's not there...
elif conn_string.startswith('mysql'):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
(user, password, database, host) = \
get_db_connection_info(conn_pieces)
sql = ("drop database if exists %(db)s; "
"create database %(db)s;") % {'db': database}
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
"-e \"%(sql)s\"") % {'user': user, 'password': password,
'host': host, 'sql': sql}
self.execute_cmd(cmd)
elif conn_string.startswith('postgresql'):
self._reset_pg(conn_pieces)
class WalkVersionsMixin(object):
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
# Place the database under version control
self.migration_api.version_control(engine, self.REPOSITORY,
self.INIT_VERSION)
self.assertEqual(self.INIT_VERSION,
self.migration_api.db_version(engine,
self.REPOSITORY))
LOG.debug('latest version is %s' % self.REPOSITORY.latest)
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
for version in versions:
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version, with_data=True)
if snake_walk:
downgraded = self._migrate_down(
engine, version - 1, with_data=True)
if downgraded:
self._migrate_up(engine, version)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(versions):
# downgrade -> upgrade -> downgrade
downgraded = self._migrate_down(engine, version - 1)
if snake_walk and downgraded:
self._migrate_up(engine, version)
self._migrate_down(engine, version - 1)
def _migrate_down(self, engine, version, with_data=False):
try:
self.migration_api.downgrade(engine, self.REPOSITORY, version)
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(
version, self.migration_api.db_version(engine, self.REPOSITORY))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%03d" % (version + 1), None)
if post_downgrade:
post_downgrade(engine)
return True
def _migrate_up(self, engine, version, with_data=False):
"""migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _pre_upgrade_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%03d" % version, None)
if pre_upgrade:
data = pre_upgrade(engine)
self.migration_api.upgrade(engine, self.REPOSITORY, version)
self.assertEqual(version,
self.migration_api.db_version(engine,
self.REPOSITORY))
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if check:
check(engine, data)
except Exception:
LOG.error("Failed to migrate to version %s on engine %s" %
(version, engine))
raise

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
@ -96,7 +94,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
LOG.warning(_('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2012, Red Hat, Inc.
#
@ -24,6 +22,8 @@ import sys
import time
import traceback
import six
from heat.openstack.common.gettextutils import _ # noqa
@ -65,7 +65,7 @@ class save_and_reraise_exception(object):
self.tb))
return False
if self.reraise:
raise self.type_, self.value, self.tb
six.reraise(self.type_, self.value, self.tb)
def forever_retry_uncaught_exceptions(infunc):
@ -77,7 +77,8 @@ def forever_retry_uncaught_exceptions(infunc):
try:
return infunc(*args, **kwargs)
except Exception as exc:
if exc.message == last_exc_message:
this_exc_message = six.u(str(exc))
if this_exc_message == last_exc_message:
exc_count += 1
else:
exc_count = 1
@ -85,12 +86,12 @@ def forever_retry_uncaught_exceptions(infunc):
# the exception message changes
cur_time = int(time.time())
if (cur_time - last_log_time > 60 or
exc.message != last_exc_message):
this_exc_message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = exc.message
last_exc_message = this_exc_message
exc_count = 0
# This should be a very rare event. In case it isn't, do
# a sleep.

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -19,6 +17,7 @@
import contextlib
import errno
import os
import tempfile
from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _ # noqa
@ -69,33 +68,34 @@ def read_cached_file(filename, force_reload=False):
return (reloaded, cache_info['data'])
def delete_if_exists(path):
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
:param remove: Optional function to remove passed path
"""
try:
os.unlink(path)
remove(path)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def remove_path_on_error(path):
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
:param remove: Optional function to remove passed path
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
delete_if_exists(path)
remove(path)
def file_open(*args, **kwargs):
@ -108,3 +108,30 @@ def file_open(*args, **kwargs):
state at all (for unit tests)
"""
return file(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
"""Create temporary file or use existing file.
This util is needed for creating temporary file with
specified content, suffix and prefix. If path is not None,
it will be used for writing content. If the path doesn't
exist it'll be created.
:param content: content for temporary file.
:param path: same as parameter 'dir' for mkstemp
:param suffix: same as parameter 'suffix' for mkstemp
:param prefix: same as parameter 'prefix' for mkstemp
For example: it can be used in database tests for creating
configuration files.
"""
if path:
ensure_tree(path)
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
try:
os.write(fd, content)
finally:
os.close(fd)
return path

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
@ -317,7 +315,7 @@ def get_available_languages(domain):
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and all projects udpate
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
@ -329,13 +327,21 @@ def get_available_languages(domain):
def get_localized_message(message, user_locale):
"""Gets a localized version of the given message in the given locale."""
"""Gets a localized version of the given message in the given locale.
If the message is not a Message object the message is returned as-is.
If the locale is None the message is translated to the default locale.
:returns: the translated message in unicode, or the original message if
it could not be translated
"""
translated = message
if isinstance(message, Message):
if user_locale:
message.locale = user_locale
return six.text_type(message)
else:
return message
original_locale = message.locale
message.locale = user_locale
translated = six.text_type(message)
message.locale = original_locale
return translated
class LocaleHandler(logging.Handler):

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
@ -38,14 +36,19 @@ import functools
import inspect
import itertools
import json
import types
import xmlrpclib
try:
import xmlrpclib
except ImportError:
# NOTE(jd): xmlrpclib is not shipped with Python 3
xmlrpclib = None
import netaddr
import six
from heat.openstack.common import gettextutils
from heat.openstack.common import importutils
from heat.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
@ -53,7 +56,8 @@ _nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (types.NoneType, int, basestring, bool, float, long)
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
@ -125,11 +129,13 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
@ -138,7 +144,7 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif isinstance(value, netaddr.IPAddress):
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -20,10 +18,14 @@ import contextlib
import errno
import functools
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from eventlet import semaphore
from oslo.config import cfg
from heat.openstack.common import fileutils
@ -39,6 +41,7 @@ util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.environ.get("HEAT_LOCK_PATH"),
help=('Directory to use for lock files.'))
]
@ -131,13 +134,15 @@ else:
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `semaphore.Semaphore` instance unless external is
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
@ -152,15 +157,12 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None):
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with _semaphores_lock:
try:
sem = _semaphores[name]
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
@ -240,13 +242,14 @@ def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return inner
return wrap
@ -274,3 +277,27 @@ def synchronized_with_prefix(lock_file_prefix):
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["HEAT_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
@ -35,10 +33,12 @@ import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from heat.openstack.common.gettextutils import _ # noqa
@ -49,6 +49,24 @@ from heat.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
@ -63,11 +81,13 @@ common_cli_opts = [
]
logging_cli_opts = [
cfg.StrOpt('log-config',
cfg.StrOpt('log-config-append',
metavar='PATH',
help='If this option is specified, the logging configuration '
'file specified is used and overrides any other logging '
'options specified. Please see the Python logging module '
deprecated_name='log-config',
help='The name of logging configuration file. It does not '
'disable existing loggers, but just appends specified '
'logging configuration to any other existing logging '
'options. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
@ -110,7 +130,7 @@ generic_log_opts = [
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
@ -126,12 +146,14 @@ log_opts = [
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=WARN'
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'iso8601=WARN',
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
@ -207,6 +229,41 @@ def _get_log_file_path(binary=None):
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords, defaults to "***".
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
@ -249,6 +306,13 @@ class ContextAdapter(BaseLoggerAdapter):
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
@ -260,18 +324,20 @@ class ContextAdapter(BaseLoggerAdapter):
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
else:
instance_uuid = kwargs.pop('instance_uuid', None)
if instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.update({"project": self.project})
extra.update({"version": self.version})
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
@ -285,7 +351,7 @@ class JSONFormatter(logging.Formatter):
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
@ -323,10 +389,10 @@ class JSONFormatter(logging.Formatter):
def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (type, value, tb)
extra['exc_info'] = (exc_type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
@ -344,17 +410,18 @@ class LogConfigError(Exception):
err_msg=self.err_msg)
def _load_log_config(log_config):
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config)
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config, str(exc))
raise LogConfigError(log_config_append, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
@ -410,7 +477,7 @@ def _setup_logging_from_conf():
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
@ -19,7 +17,7 @@
Network-related utilities and helper functions.
"""
import urlparse
from heat.openstack.common.py3kcompat import urlutils
def parse_host_port(address, default_port=None):
@ -72,10 +70,10 @@ def urlsplit(url, scheme='', allow_fragments=True):
The parameters are the same as urlparse.urlsplit.
"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(
scheme, netloc, path, query, fragment = urlutils.urlsplit(
url, scheme, allow_fragments)
if allow_fragments and '#' in path:
path, fragment = path.split('#', 1)
if '?' in path:
path, query = path.split('?', 1)
return urlparse.SplitResult(scheme, netloc, path, query, fragment)
return urlutils.SplitResult(scheme, netloc, path, query, fragment)

View File

@ -25,7 +25,7 @@ CONF = cfg.CONF
def notify(_context, message):
"""Notifies the recipient of the desired event given the model.
Log notifications using openstack's default logging system.
Log notifications using OpenStack's default logging system.
"""
priority = message.get('priority',

View File

@ -24,7 +24,7 @@ LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'notification_topics', default=['notifications', ],
help='AMQP topic used for openstack notifications')
help='AMQP topic used for OpenStack notifications')
CONF = cfg.CONF
CONF.register_opt(notification_topic_opt)
@ -43,4 +43,5 @@ def notify(context, message):
rpc.notify(context, topic, message)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())
"Payload=%(message)s"),
{"topic": topic, "message": message})

View File

@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'topics', default=['notifications', ],
help='AMQP topic(s) used for openstack notifications')
help='AMQP topic(s) used for OpenStack notifications')
opt_group = cfg.OptGroup(name='rpc_notifier2',
title='Options for rpc_notifier2')
@ -49,4 +49,5 @@ def notify(context, message):
rpc.notify(context, topic, message, envelope=True)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())
"Payload=%(message)s"),
{"topic": topic, "message": message})

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
@ -279,11 +277,10 @@ class Enforcer(object):
return result
@six.add_metaclass(abc.ABCMeta)
class BaseCheck(object):
"""Abstract base class for Check classes."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
@ -506,7 +503,7 @@ def _parse_list_rule(rule):
continue
# Handle bare strings
if isinstance(inner_rule, basestring):
if isinstance(inner_rule, six.string_types):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
@ -626,6 +623,7 @@ def reducer(*tokens):
return decorator
@six.add_metaclass(ParseStateMeta)
class ParseState(object):
"""Implement the core of parsing the policy language.
@ -638,8 +636,6 @@ class ParseState(object):
shouldn't be that big a problem.
"""
__metaclass__ = ParseStateMeta
def __init__(self):
"""Initialize the ParseState."""
@ -765,7 +761,7 @@ def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, basestring):
if isinstance(rule, six.string_types):
return _parse_text_rule(rule)
return _parse_list_rule(rule)

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -19,6 +17,7 @@
System-level utilities and helper functions.
"""
import logging as stdlib_logging
import os
import random
import shlex
@ -27,7 +26,7 @@ import signal
from eventlet.green import subprocess
from eventlet import greenthread
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _ # noqa
from heat.openstack.common import log as logging
@ -74,9 +73,9 @@ def _subprocess_setup():
def execute(*cmd, **kwargs):
"""
Helper method to shell out and execute a command through subprocess with
optional retry.
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
@ -102,6 +101,9 @@ def execute(*cmd, **kwargs):
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
@ -116,6 +118,7 @@ def execute(*cmd, **kwargs):
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
@ -127,7 +130,7 @@ def execute(*cmd, **kwargs):
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and os.geteuid() != 0:
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=('Command requested root, but did not specify a root '
@ -139,7 +142,7 @@ def execute(*cmd, **kwargs):
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
@ -163,20 +166,19 @@ def execute(*cmd, **kwargs):
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.debug(_('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
LOG.log(loglevel, _('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
LOG.log(loglevel, _('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
@ -187,8 +189,7 @@ def execute(*cmd, **kwargs):
def trycmd(*args, **kwargs):
"""
A wrapper around execute() to more easily handle warnings and errors.
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
@ -203,7 +204,7 @@ def trycmd(*args, **kwargs):
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError, exn:
except ProcessExecutionError as exn:
out, err = '', str(exn)
failed = True

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
@ -20,6 +18,7 @@
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging as std_logging
import os
import random
import signal
@ -28,7 +27,6 @@ import time
import eventlet
from eventlet import event
import logging as std_logging
from oslo.config import cfg
from heat.openstack.common import eventlet_backdoor
@ -43,6 +41,29 @@ CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_sighup(signo):
return _sighup_supported() and signo == signal.SIGHUP
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
@ -100,18 +121,13 @@ class SignalExit(SystemExit):
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGHUP, signal.SIG_DFL)
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
signal.signal(signal.SIGHUP, self._handle_signal)
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self):
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
@ -119,11 +135,11 @@ class ServiceLauncher(Launcher):
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT',
signal.SIGHUP: 'SIGHUP'}[exc.signo]
signame = _signo_to_signame(exc.signo)
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
@ -140,11 +156,11 @@ class ServiceLauncher(Launcher):
return status, signo
def wait(self):
def wait(self, ready_callback=None):
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal()
if signo != signal.SIGHUP:
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup(signo):
return status
self.restart()
@ -167,18 +183,14 @@ class ProcessLauncher(object):
self.handle_signal()
def handle_signal(self):
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
signal.signal(signal.SIGHUP, self._handle_signal)
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGHUP, signal.SIG_DFL)
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
@ -200,20 +212,22 @@ class ProcessLauncher(object):
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
signal.signal(signal.SIGHUP, _sighup)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = None
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT',
signal.SIGHUP: 'SIGHUP'}[exc.signo]
signame = _signo_to_signame(exc.signo)
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
@ -262,14 +276,11 @@ class ProcessLauncher(object):
pid = os.fork()
if pid == 0:
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if signo != signal.SIGHUP:
if not _is_sighup(signo):
break
launcher.restart()
@ -339,11 +350,9 @@ class ProcessLauncher(object):
self.handle_signal()
self._respawn_children()
if self.sigcaught:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT',
signal.SIGHUP: 'SIGHUP'}[self.sigcaught]
signame = _signo_to_signame(self.sigcaught)
LOG.info(_('Caught %s, stopping children'), signame)
if self.sigcaught != signal.SIGHUP:
if not _is_sighup(self.sigcaught):
break
for pid in self.children:

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -21,6 +19,7 @@ Time related utilities and helper functions.
import calendar
import datetime
import time
import iso8601
import six
@ -49,9 +48,9 @@ def parse_isotime(timestr):
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(unicode(e))
raise ValueError(six.text_type(e))
except TypeError as e:
raise ValueError(unicode(e))
raise ValueError(six.text_type(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
@ -90,6 +89,11 @@ def is_newer_than(after, seconds):
def utcnow_ts():
"""Timestamp version of our utcnow function."""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
return int(time.time())
return calendar.timegm(utcnow().timetuple())
@ -111,12 +115,15 @@ def iso8601_from_timestamp(timestamp):
utcnow.override_time = None
def set_time_override(override_time=datetime.datetime.utcnow()):
def set_time_override(override_time=None):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
:param override_time: datetime instance or list thereof. If not
given, defaults to the current UTC time.
"""
utcnow.override_time = override_time
utcnow.override_time = override_time or datetime.datetime.utcnow()
def advance_time_delta(timedelta):
@ -169,6 +176,15 @@ def delta_seconds(before, after):
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
return total_seconds(delta)
def total_seconds(delta):
"""Return the total seconds of datetime.timedelta object.
Compute total seconds of datetime.timedelta, datetime.timedelta
doesn't have method total_seconds in Python2.6, calculate it manually.
"""
try:
return delta.total_seconds()
except AttributeError:

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Intel Corporation.
# All Rights Reserved.
#

View File

@ -14,3 +14,4 @@ testscenarios>=0.4
python-glanceclient>=0.9.0
sphinx>=1.1.2
oslo.sphinx
lockfile>=0.8

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
@ -114,12 +112,12 @@ class InstallVenv(object):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools.
self.pip_install('pip>=1.3')
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements)
self.pip_install('-r', self.test_requirements)
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""