From aebf5ec9d0159c85f07d656dd81b5ad5e0e9386e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 29 Jan 2014 17:37:45 +0100 Subject: [PATCH] Update oslo This updates to: commit b3e5166e3f47148856d898c4653b0c089c2315f5 Merge: 0002ffa 39e1c5c Author: Jenkins Date: Thu Jan 30 10:18:54 2014 +0000 Merge "Move db tests base.py to common code" Change-Id: Iaa76b1165e195f0c3874bc5c8bcbc9ecbb6ca5d7 --- .../openstack/common/config/generator.py | 30 +- .../common/db/sqlalchemy/migration.py | 19 +- .../openstack/common/db/sqlalchemy/models.py | 4 +- .../openstack/common/db/sqlalchemy/session.py | 10 +- .../common/db/sqlalchemy/test_base.py | 154 ++++ .../common/db/sqlalchemy/test_migrations.py | 65 +- .../openstack/common/db/sqlalchemy/utils.py | 49 ++ .../openstack/common/deprecated/__init__.py | 21 - .../openstack/common/deprecated/wsgi.py | 735 ------------------ ceilometer/openstack/common/fileutils.py | 1 - ceilometer/openstack/common/fixture/config.py | 1 + .../openstack/common/fixture/mockpatch.py | 10 +- ceilometer/openstack/common/lockutils.py | 105 ++- ceilometer/openstack/common/log.py | 61 +- ceilometer/openstack/common/log_handler.py | 1 + .../openstack/common/middleware/audit.py | 1 + .../openstack/common/middleware/base.py | 1 + .../openstack/common/middleware/context.py | 59 -- .../openstack/common/middleware/debug.py | 1 + .../openstack/common/middleware/notifier.py | 1 + .../openstack/common/middleware/sizelimit.py | 5 +- .../common/notifier/test_notifier.py | 1 - ceilometer/openstack/common/policy.py | 7 +- ceilometer/openstack/common/processutils.py | 248 ------ ceilometer/openstack/common/rpc/__init__.py | 33 +- ceilometer/openstack/common/rpc/impl_fake.py | 1 + ceilometer/openstack/common/rpc/impl_kombu.py | 28 +- ceilometer/openstack/common/rpc/impl_zmq.py | 2 +- ceilometer/openstack/common/rpc/matchmaker.py | 1 + .../openstack/common/rpc/matchmaker_redis.py | 1 + .../openstack/common/rpc/matchmaker_ring.py | 1 + ceilometer/openstack/common/service.py | 11 +- ceilometer/openstack/common/test.py | 38 +- ceilometer/openstack/common/threadgroup.py | 8 +- ceilometer/openstack/common/timeutils.py | 2 +- ceilometer/openstack/common/xmlutils.py | 72 -- etc/ceilometer/ceilometer.conf.sample | 81 +- tools/config/generate_sample.sh | 18 +- 38 files changed, 498 insertions(+), 1389 deletions(-) create mode 100644 ceilometer/openstack/common/db/sqlalchemy/test_base.py delete mode 100644 ceilometer/openstack/common/deprecated/__init__.py delete mode 100644 ceilometer/openstack/common/deprecated/wsgi.py delete mode 100644 ceilometer/openstack/common/middleware/context.py delete mode 100644 ceilometer/openstack/common/processutils.py delete mode 100644 ceilometer/openstack/common/xmlutils.py diff --git a/ceilometer/openstack/common/config/generator.py b/ceilometer/openstack/common/config/generator.py index 0d3571a4a0..8b2ae6518c 100644 --- a/ceilometer/openstack/common/config/generator.py +++ b/ceilometer/openstack/common/config/generator.py @@ -18,6 +18,7 @@ from __future__ import print_function +import argparse import imp import os import re @@ -27,6 +28,7 @@ import textwrap from oslo.config import cfg import six +import stevedore.named from ceilometer.openstack.common import gettextutils from ceilometer.openstack.common import importutils @@ -59,9 +61,16 @@ BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), WORDWRAP_WIDTH = 60 -def generate(srcfiles): +def generate(argv): + parser = argparse.ArgumentParser( + description='generate sample configuration file', + ) + parser.add_argument('-l', dest='libraries', action='append') + parser.add_argument('srcfiles', nargs='*') + parsed_args = parser.parse_args(argv) + mods_by_pkg = dict() - for filepath in srcfiles: + for filepath in parsed_args.srcfiles: pkg_name = filepath.split(os.sep)[1] mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]), os.path.basename(filepath).split('.')[0]]) @@ -85,6 +94,23 @@ def generate(srcfiles): opts_by_group.setdefault(group, []).append((module_name, opts)) + # Look for entry points defined in libraries (or applications) for + # option discovery, and include their return values in the output. + # + # Each entry point should be a function returning an iterable + # of pairs with the group name (or None for the default group) + # and the list of Opt instances for that group. + if parsed_args.libraries: + loader = stevedore.named.NamedExtensionManager( + 'oslo.config.opts', + names=list(set(parsed_args.libraries)), + invoke_on_load=False, + ) + for ext in loader: + for group, opts in ext.plugin(): + opt_list = opts_by_group.setdefault(group or 'DEFAULT', []) + opt_list.append((ext.name, opts)) + for pkg_name in pkg_names: mods = mods_by_pkg.get(pkg_name) mods.sort() diff --git a/ceilometer/openstack/common/db/sqlalchemy/migration.py b/ceilometer/openstack/common/db/sqlalchemy/migration.py index 8c5cfdf230..5deba879ce 100644 --- a/ceilometer/openstack/common/db/sqlalchemy/migration.py +++ b/ceilometer/openstack/common/db/sqlalchemy/migration.py @@ -192,6 +192,7 @@ def db_sync(abs_path, version=None, init_version=0): current_version = db_version(abs_path, init_version) repository = _find_migrate_repo(abs_path) + _db_schema_sanity_check() if version is None or version > current_version: return versioning_api.upgrade(get_engine(), repository, version) else: @@ -199,6 +200,22 @@ def db_sync(abs_path, version=None, init_version=0): version) +def _db_schema_sanity_check(): + engine = get_engine() + if engine.name == 'mysql': + onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' + 'from information_schema.TABLES ' + 'where TABLE_SCHEMA=%s and ' + 'TABLE_COLLATION NOT LIKE "%%utf8%%"') + + table_names = [res[0] for res in engine.execute(onlyutf8_sql, + engine.url.database)] + if len(table_names) > 0: + raise ValueError(_('Tables "%s" have non utf8 collation, ' + 'please make sure all tables are CHARSET=utf8' + ) % ','.join(table_names)) + + def db_version(abs_path, init_version): """Show the current version of the repository. @@ -213,7 +230,7 @@ def db_version(abs_path, init_version): engine = get_engine() meta.reflect(bind=engine) tables = meta.tables - if len(tables) == 0: + if len(tables) == 0 or 'alembic_version' in tables: db_version_control(abs_path, init_version) return versioning_api.db_version(get_engine(), repository) else: diff --git a/ceilometer/openstack/common/db/sqlalchemy/models.py b/ceilometer/openstack/common/db/sqlalchemy/models.py index 6adeba2c65..c2c53e4e6c 100644 --- a/ceilometer/openstack/common/db/sqlalchemy/models.py +++ b/ceilometer/openstack/common/db/sqlalchemy/models.py @@ -102,8 +102,8 @@ class ModelBase(object): class TimestampMixin(object): - created_at = Column(DateTime, default=timeutils.utcnow) - updated_at = Column(DateTime, onupdate=timeutils.utcnow) + created_at = Column(DateTime, default=lambda: timeutils.utcnow()) + updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow()) class SoftDeleteMixin(object): diff --git a/ceilometer/openstack/common/db/sqlalchemy/session.py b/ceilometer/openstack/common/db/sqlalchemy/session.py index 69b7ae9ff5..ce407816b5 100644 --- a/ceilometer/openstack/common/db/sqlalchemy/session.py +++ b/ceilometer/openstack/common/db/sqlalchemy/session.py @@ -291,10 +291,10 @@ from ceilometer.openstack.common import timeutils sqlite_db_opts = [ cfg.StrOpt('sqlite_db', default='ceilometer.sqlite', - help='the filename to use with sqlite'), + help='The file name to use with SQLite'), cfg.BoolOpt('sqlite_synchronous', default=True, - help='If true, use synchronous mode for sqlite'), + help='If True, SQLite uses synchronous mode'), ] database_opts = [ @@ -324,7 +324,7 @@ database_opts = [ group='DATABASE'), cfg.DeprecatedOpt('idle_timeout', group='sql')], - help='timeout before idle sql connections are reaped'), + help='Timeout before idle sql connections are reaped'), cfg.IntOpt('min_pool_size', default=1, deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size', @@ -347,7 +347,7 @@ database_opts = [ group='DEFAULT'), cfg.DeprecatedOpt('sql_max_retries', group='DATABASE')], - help='maximum db connection retries during startup. ' + help='Maximum db connection retries during startup. ' '(setting -1 implies an infinite retry count)'), cfg.IntOpt('retry_interval', default=10, @@ -355,7 +355,7 @@ database_opts = [ group='DEFAULT'), cfg.DeprecatedOpt('reconnect_interval', group='DATABASE')], - help='interval between retries of opening a sql connection'), + help='Interval between retries of opening a sql connection'), cfg.IntOpt('max_overflow', default=None, deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow', diff --git a/ceilometer/openstack/common/db/sqlalchemy/test_base.py b/ceilometer/openstack/common/db/sqlalchemy/test_base.py new file mode 100644 index 0000000000..2833f2984b --- /dev/null +++ b/ceilometer/openstack/common/db/sqlalchemy/test_base.py @@ -0,0 +1,154 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import functools +import os + +import fixtures +from oslo.config import cfg +import six + +from ceilometer.openstack.common.db.sqlalchemy import session +from ceilometer.openstack.common.db.sqlalchemy import utils +from ceilometer.openstack.common import test + + +class DbFixture(fixtures.Fixture): + """Basic database fixture. + + Allows to run tests on various db backends, such as SQLite, MySQL and + PostgreSQL. By default use sqlite backend. To override default backend + uri set env variable OS_TEST_DBAPI_CONNECTION with database admin + credentials for specific backend. + """ + + def _get_uri(self): + return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://') + + def __init__(self): + super(DbFixture, self).__init__() + self.conf = cfg.CONF + self.conf.import_opt('connection', + 'ceilometer.openstack.common.db.sqlalchemy.session', + group='database') + + def setUp(self): + super(DbFixture, self).setUp() + + self.conf.set_default('connection', self._get_uri(), group='database') + self.addCleanup(self.conf.reset) + + +class DbTestCase(test.BaseTestCase): + """Base class for testing of DB code. + + Using `DbFixture`. Intended to be the main database test case to use all + the tests on a given backend with user defined uri. Backend specific + tests should be decorated with `backend_specific` decorator. + """ + + FIXTURE = DbFixture + + def setUp(self): + super(DbTestCase, self).setUp() + self.useFixture(self.FIXTURE()) + + self.addCleanup(session.cleanup) + + +ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql'] + + +def backend_specific(*dialects): + """Decorator to skip backend specific tests on inappropriate engines. + + ::dialects: list of dialects names under which the test will be launched. + """ + def wrap(f): + @functools.wraps(f) + def ins_wrap(self): + if not set(dialects).issubset(ALLOWED_DIALECTS): + raise ValueError( + "Please use allowed dialects: %s" % ALLOWED_DIALECTS) + engine = session.get_engine() + if engine.name not in dialects: + msg = ('The test "%s" can be run ' + 'only on %s. Current engine is %s.') + args = (f.__name__, ' '.join(dialects), engine.name) + self.skip(msg % args) + else: + return f(self) + return ins_wrap + return wrap + + +@six.add_metaclass(abc.ABCMeta) +class OpportunisticFixture(DbFixture): + """Base fixture to use default CI databases. + + The databases exist in OpenStack CI infrastructure. But for the + correct functioning in local environment the databases must be + created manually. + """ + + DRIVER = abc.abstractproperty(lambda: None) + DBNAME = PASSWORD = USERNAME = 'openstack_citest' + + def _get_uri(self): + return utils.get_connect_string(backend=self.DRIVER, + user=self.USERNAME, + passwd=self.PASSWORD, + database=self.DBNAME) + + +@six.add_metaclass(abc.ABCMeta) +class OpportunisticTestCase(DbTestCase): + """Base test case to use default CI databases. + + The subclasses of the test case are running only when openstack_citest + database is available otherwise a tests will be skipped. + """ + + FIXTURE = abc.abstractproperty(lambda: None) + + def setUp(self): + credentials = { + 'backend': self.FIXTURE.DRIVER, + 'user': self.FIXTURE.USERNAME, + 'passwd': self.FIXTURE.PASSWORD, + 'database': self.FIXTURE.DBNAME} + + if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials): + msg = '%s backend is not available.' % self.FIXTURE.DRIVER + return self.skip(msg) + + super(OpportunisticTestCase, self).setUp() + + +class MySQLOpportunisticFixture(OpportunisticFixture): + DRIVER = 'mysql' + + +class PostgreSQLOpportunisticFixture(OpportunisticFixture): + DRIVER = 'postgresql' + + +class MySQLOpportunisticTestCase(OpportunisticTestCase): + FIXTURE = MySQLOpportunisticFixture + + +class PostgreSQLOpportunisticTestCase(OpportunisticTestCase): + FIXTURE = PostgreSQLOpportunisticFixture diff --git a/ceilometer/openstack/common/db/sqlalchemy/test_migrations.py b/ceilometer/openstack/common/db/sqlalchemy/test_migrations.py index ad978a6e72..124b9cc422 100644 --- a/ceilometer/openstack/common/db/sqlalchemy/test_migrations.py +++ b/ceilometer/openstack/common/db/sqlalchemy/test_migrations.py @@ -23,6 +23,7 @@ from six import moves import sqlalchemy import sqlalchemy.exc +from ceilometer.openstack.common.db.sqlalchemy import utils from ceilometer.openstack.common.gettextutils import _ from ceilometer.openstack.common import log as logging from ceilometer.openstack.common.py3kcompat import urlutils @@ -31,67 +32,26 @@ from ceilometer.openstack.common import test LOG = logging.getLogger(__name__) -def _get_connect_string(backend, user, passwd, database): - """Get database connection - - Try to get a connection with a very specific set of values, if we get - these then we'll run the tests, otherwise they are skipped - """ - if backend == "postgres": - backend = "postgresql+psycopg2" - elif backend == "mysql": - backend = "mysql+mysqldb" - else: - raise Exception("Unrecognized backend: '%s'" % backend) - - return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" - % {'backend': backend, 'user': user, 'passwd': passwd, - 'database': database}) - - -def _is_backend_avail(backend, user, passwd, database): - try: - connect_uri = _get_connect_string(backend, user, passwd, database) - engine = sqlalchemy.create_engine(connect_uri) - connection = engine.connect() - except Exception: - # intentionally catch all to handle exceptions even if we don't - # have any backend code loaded. - return False - else: - connection.close() - engine.dispose() - return True - - def _have_mysql(user, passwd, database): present = os.environ.get('TEST_MYSQL_PRESENT') if present is None: - return _is_backend_avail('mysql', user, passwd, database) + return utils.is_backend_avail(backend='mysql', + user=user, + passwd=passwd, + database=database) return present.lower() in ('', 'true') def _have_postgresql(user, passwd, database): present = os.environ.get('TEST_POSTGRESQL_PRESENT') if present is None: - return _is_backend_avail('postgres', user, passwd, database) + return utils.is_backend_avail(backend='postgres', + user=user, + passwd=passwd, + database=database) return present.lower() in ('', 'true') -def get_db_connection_info(conn_pieces): - database = conn_pieces.path.strip('/') - loc_pieces = conn_pieces.netloc.split('@') - host = loc_pieces[1] - - auth_pieces = loc_pieces[0].split(':') - user = auth_pieces[0] - password = "" - if len(auth_pieces) > 1: - password = auth_pieces[1].strip() - - return (user, password, database, host) - - def _set_db_lock(lock_path=None, lock_prefix=None): def decorator(f): @functools.wraps(f) @@ -166,7 +126,10 @@ class BaseMigrationTestCase(test.BaseTestCase): "Failed to run: %s\n%s" % (cmd, output)) def _reset_pg(self, conn_pieces): - (user, password, database, host) = get_db_connection_info(conn_pieces) + (user, + password, + database, + host) = utils.get_db_connection_info(conn_pieces) os.environ['PGPASSWORD'] = password os.environ['PGUSER'] = user # note(boris-42): We must create and drop database, we can't @@ -205,7 +168,7 @@ class BaseMigrationTestCase(test.BaseTestCase): # the MYSQL database, which is easier and less error-prone # than using SQLAlchemy to do this via MetaData...trust me. (user, password, database, host) = \ - get_db_connection_info(conn_pieces) + utils.get_db_connection_info(conn_pieces) sql = ("drop database if exists %(db)s; " "create database %(db)s;") % {'db': database} cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s " diff --git a/ceilometer/openstack/common/db/sqlalchemy/utils.py b/ceilometer/openstack/common/db/sqlalchemy/utils.py index f19eb0aaee..08d64ede86 100644 --- a/ceilometer/openstack/common/db/sqlalchemy/utils.py +++ b/ceilometer/openstack/common/db/sqlalchemy/utils.py @@ -497,3 +497,52 @@ def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, where(new_table.c.deleted == deleted).\ values(deleted=default_deleted_value).\ execute() + + +def get_connect_string(backend, database, user=None, passwd=None): + """Get database connection + + Try to get a connection with a very specific set of values, if we get + these then we'll run the tests, otherwise they are skipped + """ + args = {'backend': backend, + 'user': user, + 'passwd': passwd, + 'database': database} + if backend == 'sqlite': + template = '%(backend)s:///%(database)s' + else: + template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" + return template % args + + +def is_backend_avail(backend, database, user=None, passwd=None): + try: + connect_uri = get_connect_string(backend=backend, + database=database, + user=user, + passwd=passwd) + engine = sqlalchemy.create_engine(connect_uri) + connection = engine.connect() + except Exception: + # intentionally catch all to handle exceptions even if we don't + # have any backend code loaded. + return False + else: + connection.close() + engine.dispose() + return True + + +def get_db_connection_info(conn_pieces): + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + password = auth_pieces[1].strip() + + return (user, password, database, host) diff --git a/ceilometer/openstack/common/deprecated/__init__.py b/ceilometer/openstack/common/deprecated/__init__.py deleted file mode 100644 index d0ce7ddcab..0000000000 --- a/ceilometer/openstack/common/deprecated/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2013 Red Hat, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import warnings - -msg = ("Modules in this package are deprecated " - "and will be removed in future releases") - -warnings.warn(msg, DeprecationWarning) diff --git a/ceilometer/openstack/common/deprecated/wsgi.py b/ceilometer/openstack/common/deprecated/wsgi.py deleted file mode 100644 index 3cacad7369..0000000000 --- a/ceilometer/openstack/common/deprecated/wsgi.py +++ /dev/null @@ -1,735 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utility methods for working with WSGI servers.""" - -import eventlet -eventlet.patcher.monkey_patch(all=False, socket=True) - -import datetime -import errno -import socket -import time -from xml.dom import minidom -from xml.parsers import expat - -import eventlet.wsgi -from oslo.config import cfg -import routes -import routes.middleware -import six -import webob.dec -import webob.exc - -from ceilometer.openstack.common.gettextutils import _ # noqa -from ceilometer.openstack.common import jsonutils -from ceilometer.openstack.common import log as logging -from ceilometer.openstack.common import service -from ceilometer.openstack.common import sslutils -from ceilometer.openstack.common import xmlutils - - -socket_opts = [ - cfg.IntOpt('backlog', - default=4096, - help="Number of backlog requests to configure the socket with"), - cfg.IntOpt('tcp_keepidle', - default=600, - help="Sets the value of TCP_KEEPIDLE in seconds for each " - "server socket. Not supported on OS X."), -] - -CONF = cfg.CONF -CONF.register_opts(socket_opts) - -LOG = logging.getLogger(__name__) - - -class MalformedRequestBody(Exception): - def __init__(self, reason): - super(MalformedRequestBody, self).__init__( - "Malformed message body: %s", reason) - - -class InvalidContentType(Exception): - def __init__(self, content_type): - super(InvalidContentType, self).__init__( - "Invalid content type %s", content_type) - - -def run_server(application, port, **kwargs): - """Run a WSGI server with the given application.""" - sock = eventlet.listen(('0.0.0.0', port)) - eventlet.wsgi.server(sock, application, **kwargs) - - -class Service(service.Service): - """Provides a Service API for wsgi servers. - - This gives us the ability to launch wsgi servers with the - Launcher classes in service.py. - """ - - def __init__(self, application, port, - host='0.0.0.0', backlog=4096, threads=1000): - self.application = application - self._port = port - self._host = host - self._backlog = backlog if backlog else CONF.backlog - self._socket = self._get_socket(host, port, self._backlog) - super(Service, self).__init__(threads) - - def _get_socket(self, host, port, backlog): - # TODO(dims): eventlet's green dns/socket module does not actually - # support IPv6 in getaddrinfo(). We need to get around this in the - # future or monitor upstream for a fix - info = socket.getaddrinfo(host, - port, - socket.AF_UNSPEC, - socket.SOCK_STREAM)[0] - family = info[0] - bind_addr = info[-1] - - sock = None - retry_until = time.time() + 30 - while not sock and time.time() < retry_until: - try: - sock = eventlet.listen(bind_addr, - backlog=backlog, - family=family) - if sslutils.is_enabled(): - sock = sslutils.wrap(sock) - - except socket.error as err: - if err.args[0] != errno.EADDRINUSE: - raise - eventlet.sleep(0.1) - if not sock: - raise RuntimeError(_("Could not bind to %(host)s:%(port)s " - "after trying for 30 seconds") % - {'host': host, 'port': port}) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - # sockets can hang around forever without keepalive - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - - # This option isn't available in the OS X version of eventlet - if hasattr(socket, 'TCP_KEEPIDLE'): - sock.setsockopt(socket.IPPROTO_TCP, - socket.TCP_KEEPIDLE, - CONF.tcp_keepidle) - - return sock - - def start(self): - """Start serving this service using the provided server instance. - - :returns: None - - """ - super(Service, self).start() - self.tg.add_thread(self._run, self.application, self._socket) - - @property - def backlog(self): - return self._backlog - - @property - def host(self): - return self._socket.getsockname()[0] if self._socket else self._host - - @property - def port(self): - return self._socket.getsockname()[1] if self._socket else self._port - - def stop(self): - """Stop serving this API. - - :returns: None - - """ - super(Service, self).stop() - - def _run(self, application, socket): - """Start a WSGI server in a new green thread.""" - logger = logging.getLogger('eventlet.wsgi') - eventlet.wsgi.server(socket, - application, - custom_pool=self.tg.pool, - log=logging.WritableLogger(logger)) - - -class Router(object): - - """WSGI middleware that maps incoming requests to WSGI apps.""" - - def __init__(self, mapper): - """Create a router for the given routes.Mapper. - - Each route in `mapper` must specify a 'controller', which is a - WSGI app to call. You'll probably want to specify an 'action' as - well and have your controller be a wsgi.Controller, who will route - the request to the action method. - - Examples: - mapper = routes.Mapper() - sc = ServerController() - - # Explicit mapping of one route to a controller+action - mapper.connect(None, "/svrlist", controller=sc, action="list") - - # Actions are all implicitly defined - mapper.resource("server", "servers", controller=sc) - - # Pointing to an arbitrary WSGI app. You can specify the - # {path_info:.*} parameter so the target app can be handed just that - # section of the URL. - mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) - """ - self.map = mapper - self._router = routes.middleware.RoutesMiddleware(self._dispatch, - self.map) - - @webob.dec.wsgify - def __call__(self, req): - """Route the incoming request to a controller based on self.map. - - If no match, return a 404. - """ - return self._router - - @staticmethod - @webob.dec.wsgify - def _dispatch(req): - """Gets application from the environment. - - Called by self._router after matching the incoming request to a route - and putting the information into req.environ. Either returns 404 - or the routed WSGI app's response. - """ - match = req.environ['wsgiorg.routing_args'][1] - if not match: - return webob.exc.HTTPNotFound() - app = match['controller'] - return app - - -class Request(webob.Request): - """Add some Openstack API-specific logic to the base webob.Request.""" - - default_request_content_types = ('application/json', 'application/xml') - default_accept_types = ('application/json', 'application/xml') - default_accept_type = 'application/json' - - def best_match_content_type(self, supported_content_types=None): - """Determine the requested response content-type. - - Based on the query extension then the Accept header. - Defaults to default_accept_type if we don't find a preference - - """ - supported_content_types = (supported_content_types or - self.default_accept_types) - - parts = self.path.rsplit('.', 1) - if len(parts) > 1: - ctype = 'application/{0}'.format(parts[1]) - if ctype in supported_content_types: - return ctype - - bm = self.accept.best_match(supported_content_types) - return bm or self.default_accept_type - - def get_content_type(self, allowed_content_types=None): - """Determine content type of the request body. - - Does not do any body introspection, only checks header - - """ - if "Content-Type" not in self.headers: - return None - - content_type = self.content_type - allowed_content_types = (allowed_content_types or - self.default_request_content_types) - - if content_type not in allowed_content_types: - raise InvalidContentType(content_type=content_type) - return content_type - - -class Resource(object): - """WSGI app that handles (de)serialization and controller dispatch. - - Reads routing information supplied by RoutesMiddleware and calls - the requested action method upon its deserializer, controller, - and serializer. Those three objects may implement any of the basic - controller action methods (create, update, show, index, delete) - along with any that may be specified in the api router. A 'default' - method may also be implemented to be used in place of any - non-implemented actions. Deserializer methods must accept a request - argument and return a dictionary. Controller methods must accept a - request argument. Additionally, they must also accept keyword - arguments that represent the keys returned by the Deserializer. They - may raise a webob.exc exception or return a dict, which will be - serialized by requested content type. - """ - def __init__(self, controller, deserializer=None, serializer=None): - """Initiates Resource object. - - :param controller: object that implement methods created by routes lib - :param deserializer: object that supports webob request deserialization - through controller-like actions - :param serializer: object that supports webob response serialization - through controller-like actions - """ - self.controller = controller - self.serializer = serializer or ResponseSerializer() - self.deserializer = deserializer or RequestDeserializer() - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, request): - """WSGI method that controls (de)serialization and method dispatch.""" - - try: - action, action_args, accept = self.deserialize_request(request) - except InvalidContentType: - msg = _("Unsupported Content-Type") - return webob.exc.HTTPUnsupportedMediaType(explanation=msg) - except MalformedRequestBody: - msg = _("Malformed request body") - return webob.exc.HTTPBadRequest(explanation=msg) - - action_result = self.execute_action(action, request, **action_args) - try: - return self.serialize_response(action, action_result, accept) - # return unserializable result (typically a webob exc) - except Exception: - return action_result - - def deserialize_request(self, request): - return self.deserializer.deserialize(request) - - def serialize_response(self, action, action_result, accept): - return self.serializer.serialize(action_result, accept, action) - - def execute_action(self, action, request, **action_args): - return self.dispatch(self.controller, action, request, **action_args) - - def dispatch(self, obj, action, *args, **kwargs): - """Find action-specific method on self and call it.""" - try: - method = getattr(obj, action) - except AttributeError: - method = getattr(obj, 'default') - - return method(*args, **kwargs) - - def get_action_args(self, request_environment): - """Parse dictionary created by routes library.""" - try: - args = request_environment['wsgiorg.routing_args'][1].copy() - except Exception: - return {} - - try: - del args['controller'] - except KeyError: - pass - - try: - del args['format'] - except KeyError: - pass - - return args - - -class ActionDispatcher(object): - """Maps method name to local methods through action name.""" - - def dispatch(self, *args, **kwargs): - """Find and call local method.""" - action = kwargs.pop('action', 'default') - action_method = getattr(self, str(action), self.default) - return action_method(*args, **kwargs) - - def default(self, data): - raise NotImplementedError() - - -class DictSerializer(ActionDispatcher): - """Default request body serialization.""" - - def serialize(self, data, action='default'): - return self.dispatch(data, action=action) - - def default(self, data): - return "" - - -class JSONDictSerializer(DictSerializer): - """Default JSON request body serialization.""" - - def default(self, data): - def sanitizer(obj): - if isinstance(obj, datetime.datetime): - _dtime = obj - datetime.timedelta(microseconds=obj.microsecond) - return _dtime.isoformat() - return six.text_type(obj) - return jsonutils.dumps(data, default=sanitizer) - - -class XMLDictSerializer(DictSerializer): - - def __init__(self, metadata=None, xmlns=None): - """Initiates XMLDictSerializer object. - - :param metadata: information needed to deserialize xml into - a dictionary. - :param xmlns: XML namespace to include with serialized xml - """ - super(XMLDictSerializer, self).__init__() - self.metadata = metadata or {} - self.xmlns = xmlns - - def default(self, data): - # We expect data to contain a single key which is the XML root. - root_key = data.keys()[0] - doc = minidom.Document() - node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) - - return self.to_xml_string(node) - - def to_xml_string(self, node, has_atom=False): - self._add_xmlns(node, has_atom) - return node.toprettyxml(indent=' ', encoding='UTF-8') - - #NOTE (ameade): the has_atom should be removed after all of the - # xml serializers and view builders have been updated to the current - # spec that required all responses include the xmlns:atom, the has_atom - # flag is to prevent current tests from breaking - def _add_xmlns(self, node, has_atom=False): - if self.xmlns is not None: - node.setAttribute('xmlns', self.xmlns) - if has_atom: - node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") - - def _to_xml_node(self, doc, metadata, nodename, data): - """Recursive method to convert data members to XML nodes.""" - result = doc.createElement(nodename) - - # Set the xml namespace if one is specified - # TODO(justinsb): We could also use prefixes on the keys - xmlns = metadata.get('xmlns', None) - if xmlns: - result.setAttribute('xmlns', xmlns) - - #TODO(bcwaldon): accomplish this without a type-check - if type(data) is list: - collections = metadata.get('list_collections', {}) - if nodename in collections: - metadata = collections[nodename] - for item in data: - node = doc.createElement(metadata['item_name']) - node.setAttribute(metadata['item_key'], str(item)) - result.appendChild(node) - return result - singular = metadata.get('plurals', {}).get(nodename, None) - if singular is None: - if nodename.endswith('s'): - singular = nodename[:-1] - else: - singular = 'item' - for item in data: - node = self._to_xml_node(doc, metadata, singular, item) - result.appendChild(node) - #TODO(bcwaldon): accomplish this without a type-check - elif type(data) is dict: - collections = metadata.get('dict_collections', {}) - if nodename in collections: - metadata = collections[nodename] - for k, v in data.items(): - node = doc.createElement(metadata['item_name']) - node.setAttribute(metadata['item_key'], str(k)) - text = doc.createTextNode(str(v)) - node.appendChild(text) - result.appendChild(node) - return result - attrs = metadata.get('attributes', {}).get(nodename, {}) - for k, v in data.items(): - if k in attrs: - result.setAttribute(k, str(v)) - else: - node = self._to_xml_node(doc, metadata, k, v) - result.appendChild(node) - else: - # Type is atom - node = doc.createTextNode(str(data)) - result.appendChild(node) - return result - - def _create_link_nodes(self, xml_doc, links): - link_nodes = [] - for link in links: - link_node = xml_doc.createElement('atom:link') - link_node.setAttribute('rel', link['rel']) - link_node.setAttribute('href', link['href']) - if 'type' in link: - link_node.setAttribute('type', link['type']) - link_nodes.append(link_node) - return link_nodes - - -class ResponseHeadersSerializer(ActionDispatcher): - """Default response headers serialization.""" - - def serialize(self, response, data, action): - self.dispatch(response, data, action=action) - - def default(self, response, data): - response.status_int = 200 - - -class ResponseSerializer(object): - """Encode the necessary pieces into a response object.""" - - def __init__(self, body_serializers=None, headers_serializer=None): - self.body_serializers = { - 'application/xml': XMLDictSerializer(), - 'application/json': JSONDictSerializer(), - } - self.body_serializers.update(body_serializers or {}) - - self.headers_serializer = (headers_serializer or - ResponseHeadersSerializer()) - - def serialize(self, response_data, content_type, action='default'): - """Serialize a dict into a string and wrap in a wsgi.Request object. - - :param response_data: dict produced by the Controller - :param content_type: expected mimetype of serialized response body - - """ - response = webob.Response() - self.serialize_headers(response, response_data, action) - self.serialize_body(response, response_data, content_type, action) - return response - - def serialize_headers(self, response, data, action): - self.headers_serializer.serialize(response, data, action) - - def serialize_body(self, response, data, content_type, action): - response.headers['Content-Type'] = content_type - if data is not None: - serializer = self.get_body_serializer(content_type) - response.body = serializer.serialize(data, action) - - def get_body_serializer(self, content_type): - try: - return self.body_serializers[content_type] - except (KeyError, TypeError): - raise InvalidContentType(content_type=content_type) - - -class RequestHeadersDeserializer(ActionDispatcher): - """Default request headers deserializer""" - - def deserialize(self, request, action): - return self.dispatch(request, action=action) - - def default(self, request): - return {} - - -class RequestDeserializer(object): - """Break up a Request object into more useful pieces.""" - - def __init__(self, body_deserializers=None, headers_deserializer=None, - supported_content_types=None): - - self.supported_content_types = supported_content_types - - self.body_deserializers = { - 'application/xml': XMLDeserializer(), - 'application/json': JSONDeserializer(), - } - self.body_deserializers.update(body_deserializers or {}) - - self.headers_deserializer = (headers_deserializer or - RequestHeadersDeserializer()) - - def deserialize(self, request): - """Extract necessary pieces of the request. - - :param request: Request object - :returns: tuple of (expected controller action name, dictionary of - keyword arguments to pass to the controller, the expected - content type of the response) - - """ - action_args = self.get_action_args(request.environ) - action = action_args.pop('action', None) - - action_args.update(self.deserialize_headers(request, action)) - action_args.update(self.deserialize_body(request, action)) - - accept = self.get_expected_content_type(request) - - return (action, action_args, accept) - - def deserialize_headers(self, request, action): - return self.headers_deserializer.deserialize(request, action) - - def deserialize_body(self, request, action): - if not request.body: - LOG.debug(_("Empty body provided in request")) - return {} - - try: - content_type = request.get_content_type() - except InvalidContentType: - LOG.debug(_("Unrecognized Content-Type provided in request")) - raise - - if content_type is None: - LOG.debug(_("No Content-Type provided in request")) - return {} - - try: - deserializer = self.get_body_deserializer(content_type) - except InvalidContentType: - LOG.debug(_("Unable to deserialize body as provided Content-Type")) - raise - - return deserializer.deserialize(request.body, action) - - def get_body_deserializer(self, content_type): - try: - return self.body_deserializers[content_type] - except (KeyError, TypeError): - raise InvalidContentType(content_type=content_type) - - def get_expected_content_type(self, request): - return request.best_match_content_type(self.supported_content_types) - - def get_action_args(self, request_environment): - """Parse dictionary created by routes library.""" - try: - args = request_environment['wsgiorg.routing_args'][1].copy() - except Exception: - return {} - - try: - del args['controller'] - except KeyError: - pass - - try: - del args['format'] - except KeyError: - pass - - return args - - -class TextDeserializer(ActionDispatcher): - """Default request body deserialization.""" - - def deserialize(self, datastring, action='default'): - return self.dispatch(datastring, action=action) - - def default(self, datastring): - return {} - - -class JSONDeserializer(TextDeserializer): - - def _from_json(self, datastring): - try: - return jsonutils.loads(datastring) - except ValueError: - msg = _("cannot understand JSON") - raise MalformedRequestBody(reason=msg) - - def default(self, datastring): - return {'body': self._from_json(datastring)} - - -class XMLDeserializer(TextDeserializer): - - def __init__(self, metadata=None): - """Initiates XMLDeserializer object. - - :param metadata: information needed to deserialize xml into - a dictionary. - """ - super(XMLDeserializer, self).__init__() - self.metadata = metadata or {} - - def _from_xml(self, datastring): - plurals = set(self.metadata.get('plurals', {})) - - try: - node = xmlutils.safe_minidom_parse_string(datastring).childNodes[0] - return {node.nodeName: self._from_xml_node(node, plurals)} - except expat.ExpatError: - msg = _("cannot understand XML") - raise MalformedRequestBody(reason=msg) - - def _from_xml_node(self, node, listnames): - """Convert a minidom node to a simple Python type. - - :param listnames: list of XML node names whose subnodes should - be considered list items. - - """ - - if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: - return node.childNodes[0].nodeValue - elif node.nodeName in listnames: - return [self._from_xml_node(n, listnames) for n in node.childNodes] - else: - result = dict() - for attr in node.attributes.keys(): - result[attr] = node.attributes[attr].nodeValue - for child in node.childNodes: - if child.nodeType != node.TEXT_NODE: - result[child.nodeName] = self._from_xml_node(child, - listnames) - return result - - def find_first_child_named(self, parent, name): - """Search a nodes children for the first child with a given name.""" - for node in parent.childNodes: - if node.nodeName == name: - return node - return None - - def find_children_named(self, parent, name): - """Return all of a nodes children who have the given name.""" - for node in parent.childNodes: - if node.nodeName == name: - yield node - - def extract_text(self, node): - """Get the text field contained by the given node.""" - if len(node.childNodes) == 1: - child = node.childNodes[0] - if child.nodeType == child.TEXT_NODE: - return child.nodeValue - return "" - - def default(self, datastring): - return {'body': self._from_xml(datastring)} diff --git a/ceilometer/openstack/common/fileutils.py b/ceilometer/openstack/common/fileutils.py index 2047ae4fbc..858ed578cf 100644 --- a/ceilometer/openstack/common/fileutils.py +++ b/ceilometer/openstack/common/fileutils.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. - import contextlib import errno import os diff --git a/ceilometer/openstack/common/fixture/config.py b/ceilometer/openstack/common/fixture/config.py index 0bf90ff7a0..4b0efd2f82 100644 --- a/ceilometer/openstack/common/fixture/config.py +++ b/ceilometer/openstack/common/fixture/config.py @@ -14,6 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + import fixtures from oslo.config import cfg import six diff --git a/ceilometer/openstack/common/fixture/mockpatch.py b/ceilometer/openstack/common/fixture/mockpatch.py index 858e77cd06..a8ffeb3702 100644 --- a/ceilometer/openstack/common/fixture/mockpatch.py +++ b/ceilometer/openstack/common/fixture/mockpatch.py @@ -22,14 +22,15 @@ import mock class PatchObject(fixtures.Fixture): """Deal with code around mock.""" - def __init__(self, obj, attr, **kwargs): + def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs): self.obj = obj self.attr = attr self.kwargs = kwargs + self.new = new def setUp(self): super(PatchObject, self).setUp() - _p = mock.patch.object(self.obj, self.attr, **self.kwargs) + _p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs) self.mock = _p.start() self.addCleanup(_p.stop) @@ -38,12 +39,13 @@ class Patch(fixtures.Fixture): """Deal with code around mock.patch.""" - def __init__(self, obj, **kwargs): + def __init__(self, obj, new=mock.DEFAULT, **kwargs): self.obj = obj self.kwargs = kwargs + self.new = new def setUp(self): super(Patch, self).setUp() - _p = mock.patch(self.obj, **self.kwargs) + _p = mock.patch(self.obj, self.new, **self.kwargs) self.mock = _p.start() self.addCleanup(_p.stop) diff --git a/ceilometer/openstack/common/lockutils.py b/ceilometer/openstack/common/lockutils.py index d614c7d462..e2db5b3ae4 100644 --- a/ceilometer/openstack/common/lockutils.py +++ b/ceilometer/openstack/common/lockutils.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. - import contextlib import errno import functools @@ -30,7 +29,6 @@ from oslo.config import cfg from ceilometer.openstack.common import fileutils from ceilometer.openstack.common.gettextutils import _ -from ceilometer.openstack.common import local from ceilometer.openstack.common import log as logging @@ -76,7 +74,13 @@ class _InterProcessLock(object): self.lockfile = None self.fname = name - def __enter__(self): + def acquire(self): + basedir = os.path.dirname(self.fname) + + if not os.path.exists(basedir): + fileutils.ensure_tree(basedir) + LOG.info(_('Created lock path: %s'), basedir) + self.lockfile = open(self.fname, 'w') while True: @@ -86,22 +90,37 @@ class _InterProcessLock(object): # Also upon reading the MSDN docs for locking(), it seems # to have a laughable 10 attempts "blocking" mechanism. self.trylock() - return self + LOG.debug(_('Got file lock "%s"'), self.fname) + return True except IOError as e: if e.errno in (errno.EACCES, errno.EAGAIN): # external locks synchronise things like iptables # updates - give it some time to prevent busy spinning time.sleep(0.01) else: - raise + raise threading.ThreadError(_("Unable to acquire lock on" + " `%(filename)s` due to" + " %(exception)s") % + { + 'filename': self.fname, + 'exception': e, + }) - def __exit__(self, exc_type, exc_val, exc_tb): + def __enter__(self): + self.acquire() + return self + + def release(self): try: self.unlock() self.lockfile.close() except IOError: LOG.exception(_("Could not release the acquired lock `%s`"), self.fname) + LOG.debug(_('Released file lock "%s"'), self.fname) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.release() def trylock(self): raise NotImplementedError() @@ -137,46 +156,26 @@ _semaphores = weakref.WeakValueDictionary() _semaphores_lock = threading.Lock() -@contextlib.contextmanager -def external_lock(name, lock_file_prefix=None, lock_path=None): +def external_lock(name, lock_file_prefix=None): with internal_lock(name): - LOG.debug(_('Attempting to grab file lock "%(lock)s"'), + LOG.debug(_('Attempting to grab external lock "%(lock)s"'), {'lock': name}) - # We need a copy of lock_path because it is non-local - local_lock_path = lock_path or CONF.lock_path - if not local_lock_path: - raise cfg.RequiredOptError('lock_path') - - if not os.path.exists(local_lock_path): - fileutils.ensure_tree(local_lock_path) - LOG.info(_('Created lock path: %s'), local_lock_path) - - def add_prefix(name, prefix): - if not prefix: - return name - sep = '' if prefix.endswith('-') else '-' - return '%s%s%s' % (prefix, sep, name) - # NOTE(mikal): the lock name cannot contain directory # separators - lock_file_name = add_prefix(name.replace(os.sep, '_'), - lock_file_prefix) + name = name.replace(os.sep, '_') + if lock_file_prefix: + sep = '' if lock_file_prefix.endswith('-') else '-' + name = '%s%s%s' % (lock_file_prefix, sep, name) - lock_file_path = os.path.join(local_lock_path, lock_file_name) + if not CONF.lock_path: + raise cfg.RequiredOptError('lock_path') - try: - lock = InterProcessLock(lock_file_path) - with lock as lock: - LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - yield lock - finally: - LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) + lock_file_path = os.path.join(CONF.lock_path, name) + + return InterProcessLock(lock_file_path) -@contextlib.contextmanager def internal_lock(name): with _semaphores_lock: try: @@ -185,22 +184,12 @@ def internal_lock(name): sem = threading.Semaphore() _semaphores[name] = sem - with sem: - LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) - - # NOTE(mikal): I know this looks odd - if not hasattr(local.strong_store, 'locks_held'): - local.strong_store.locks_held = [] - local.strong_store.locks_held.append(name) - - try: - yield sem - finally: - local.strong_store.locks_held.remove(name) + LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) + return sem @contextlib.contextmanager -def lock(name, lock_file_prefix=None, external=False, lock_path=None): +def lock(name, lock_file_prefix=None, external=False): """Context based lock This function yields a `threading.Semaphore` instance (if we don't use @@ -214,20 +203,16 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None): should work across multiple processes. This means that if two different workers both run a a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. - - :param lock_path: The lock_path keyword argument is used to specify a - special location for external lock files to live. If nothing is set, then - CONF.lock_path is used as a default. """ if external and not CONF.disable_process_locking: - with external_lock(name, lock_file_prefix, lock_path) as lock: - yield lock + lock = external_lock(name, lock_file_prefix) else: - with internal_lock(name) as lock: - yield lock + lock = internal_lock(name) + with lock: + yield lock -def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): +def synchronized(name, lock_file_prefix=None, external=False): """Synchronization decorator. Decorating a method like so:: @@ -255,7 +240,7 @@ def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): @functools.wraps(f) def inner(*args, **kwargs): try: - with lock(name, lock_file_prefix, external, lock_path): + with lock(name, lock_file_prefix, external): LOG.debug(_('Got semaphore / lock "%(function)s"'), {'function': f.__name__}) return f(*args, **kwargs) diff --git a/ceilometer/openstack/common/log.py b/ceilometer/openstack/common/log.py index f3cc97b6f5..37b09a8344 100644 --- a/ceilometer/openstack/common/log.py +++ b/ceilometer/openstack/common/log.py @@ -115,10 +115,21 @@ logging_cli_opts = [ '--log-file paths'), cfg.BoolOpt('use-syslog', default=False, - help='Use syslog for logging.'), + help='Use syslog for logging. ' + 'Existing syslog format is DEPRECATED during I, ' + 'and then will be changed in J to honor RFC5424'), + cfg.BoolOpt('use-syslog-rfc-format', + # TODO(bogdando) remove or use True after existing + # syslog format deprecation in J + default=False, + help='(Optional) Use syslog rfc5424 format for logging. ' + 'If enabled, will add APP-NAME (RFC5424) before the ' + 'MSG part of the syslog message. The old format ' + 'without APP-NAME is deprecated in I, ' + 'and will be removed in J.'), cfg.StrOpt('syslog-log-facility', default='LOG_USER', - help='syslog facility to receive log lines') + help='Syslog facility to receive log lines') ] generic_log_opts = [ @@ -132,18 +143,18 @@ log_opts = [ default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' '%(name)s [%(request_id)s %(user_identity)s] ' '%(instance)s%(message)s', - help='format string to use for log messages with context'), + help='Format string to use for log messages with context'), cfg.StrOpt('logging_default_format_string', default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' '%(name)s [-] %(instance)s%(message)s', - help='format string to use for log messages without context'), + help='Format string to use for log messages without context'), cfg.StrOpt('logging_debug_format_suffix', default='%(funcName)s %(pathname)s:%(lineno)d', - help='data to append to log format when level is DEBUG'), + help='Data to append to log format when level is DEBUG'), cfg.StrOpt('logging_exception_prefix', default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' '%(instance)s', - help='prefix each line of exception output with this format'), + help='Prefix each line of exception output with this format'), cfg.ListOpt('default_log_levels', default=[ 'amqp=WARN', @@ -153,14 +164,15 @@ log_opts = [ 'sqlalchemy=WARN', 'suds=INFO', 'iso8601=WARN', + 'requests.packages.urllib3.connectionpool=WARN' ], - help='list of logger=LEVEL pairs'), + help='List of logger=LEVEL pairs'), cfg.BoolOpt('publish_errors', default=False, - help='publish error events'), + help='Publish error events'), cfg.BoolOpt('fatal_deprecations', default=False, - help='make deprecations fatal'), + help='Make deprecations fatal'), # NOTE(mikal): there are two options here because sometimes we are handed # a full instance (and could include more information), and other times we @@ -393,7 +405,9 @@ def _create_logging_excepthook(product_name): extra = {} if CONF.verbose or CONF.debug: extra['exc_info'] = (exc_type, value, tb) - getLogger(product_name).critical(str(value), **extra) + getLogger(product_name).critical( + "".join(traceback.format_exception_only(exc_type, value)), + **extra) return logging_excepthook @@ -457,6 +471,17 @@ def _find_facility_from_conf(): return facility +class RFCSysLogHandler(logging.handlers.SysLogHandler): + def __init__(self, *args, **kwargs): + self.binary_name = _get_binary_name() + super(RFCSysLogHandler, self).__init__(*args, **kwargs) + + def format(self, record): + msg = super(RFCSysLogHandler, self).format(record) + msg = self.binary_name + ' ' + msg + return msg + + def _setup_logging_from_conf(): log_root = getLogger(None).logger for handler in log_root.handlers: @@ -464,8 +489,14 @@ def _setup_logging_from_conf(): if CONF.use_syslog: facility = _find_facility_from_conf() - syslog = logging.handlers.SysLogHandler(address='/dev/log', - facility=facility) + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(address='/dev/log', + facility=facility) + else: + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) log_root.addHandler(syslog) logpath = _get_log_file_path() @@ -543,7 +574,7 @@ class WritableLogger(object): self.level = level def write(self, msg): - self.logger.log(self.level, msg) + self.logger.log(self.level, msg.rstrip()) class ContextFormatter(logging.Formatter): @@ -561,7 +592,7 @@ class ContextFormatter(logging.Formatter): def format(self, record): """Uses contextstring if request_id is set, otherwise default.""" - # NOTE(sdague): default the fancier formating params + # NOTE(sdague): default the fancier formatting params # to an empty string so we don't throw an exception if # they get used for key in ('instance', 'color'): @@ -577,7 +608,7 @@ class ContextFormatter(logging.Formatter): CONF.logging_debug_format_suffix): self._fmt += " " + CONF.logging_debug_format_suffix - # Cache this on the record, Logger will respect our formated copy + # Cache this on the record, Logger will respect our formatted copy if record.exc_info: record.exc_text = self.formatException(record.exc_info, record) return logging.Formatter.format(self, record) diff --git a/ceilometer/openstack/common/log_handler.py b/ceilometer/openstack/common/log_handler.py index 7135a5e723..653f39833e 100644 --- a/ceilometer/openstack/common/log_handler.py +++ b/ceilometer/openstack/common/log_handler.py @@ -11,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + import logging from oslo.config import cfg diff --git a/ceilometer/openstack/common/middleware/audit.py b/ceilometer/openstack/common/middleware/audit.py index 863ddb2d21..e7d9a76106 100644 --- a/ceilometer/openstack/common/middleware/audit.py +++ b/ceilometer/openstack/common/middleware/audit.py @@ -12,6 +12,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """ Attach open standard audit information to request.environ diff --git a/ceilometer/openstack/common/middleware/base.py b/ceilometer/openstack/common/middleware/base.py index 20995498a3..464a1ccd72 100644 --- a/ceilometer/openstack/common/middleware/base.py +++ b/ceilometer/openstack/common/middleware/base.py @@ -12,6 +12,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """Base class(es) for WSGI Middleware.""" import webob.dec diff --git a/ceilometer/openstack/common/middleware/context.py b/ceilometer/openstack/common/middleware/context.py deleted file mode 100644 index 78dc144afd..0000000000 --- a/ceilometer/openstack/common/middleware/context.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Middleware that attaches a context to the WSGI request -""" - -from ceilometer.openstack.common import context -from ceilometer.openstack.common import importutils -from ceilometer.openstack.common.middleware import base - - -class ContextMiddleware(base.Middleware): - def __init__(self, app, options): - self.options = options - super(ContextMiddleware, self).__init__(app) - - def make_context(self, *args, **kwargs): - """Create a context with the given arguments.""" - - # Determine the context class to use - ctxcls = context.RequestContext - if 'context_class' in self.options: - ctxcls = importutils.import_class(self.options['context_class']) - - return ctxcls(*args, **kwargs) - - def process_request(self, req): - """Process the request. - - Extract any authentication information in the request and - construct an appropriate context from it. - """ - # Use the default empty context, with admin turned on for - # backwards compatibility - req.context = self.make_context(is_admin=True) - - -def filter_factory(global_conf, **local_conf): - """Factory method for paste.deploy.""" - conf = global_conf.copy() - conf.update(local_conf) - - def f(app): - return ContextMiddleware(app, conf) - - return f diff --git a/ceilometer/openstack/common/middleware/debug.py b/ceilometer/openstack/common/middleware/debug.py index 19d5f6ceed..7626dd8d43 100644 --- a/ceilometer/openstack/common/middleware/debug.py +++ b/ceilometer/openstack/common/middleware/debug.py @@ -12,6 +12,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """Debug middleware""" from __future__ import print_function diff --git a/ceilometer/openstack/common/middleware/notifier.py b/ceilometer/openstack/common/middleware/notifier.py index cc96ff33d8..00aafe3df1 100644 --- a/ceilometer/openstack/common/middleware/notifier.py +++ b/ceilometer/openstack/common/middleware/notifier.py @@ -11,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """ Send notifications on request diff --git a/ceilometer/openstack/common/middleware/sizelimit.py b/ceilometer/openstack/common/middleware/sizelimit.py index c88c35bd77..e9ad6ccfb0 100644 --- a/ceilometer/openstack/common/middleware/sizelimit.py +++ b/ceilometer/openstack/common/middleware/sizelimit.py @@ -11,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """ Request Body limiting middleware. @@ -28,8 +29,8 @@ from ceilometer.openstack.common.middleware import base max_req_body_size = cfg.IntOpt('max_request_body_size', deprecated_name='osapi_max_request_body_size', default=114688, - help='the maximum body size ' - 'per each request(bytes)') + help='The maximum body size ' + 'per request, in bytes') CONF = cfg.CONF CONF.register_opt(max_req_body_size) diff --git a/ceilometer/openstack/common/notifier/test_notifier.py b/ceilometer/openstack/common/notifier/test_notifier.py index 96c1746bf4..11fc21fc31 100644 --- a/ceilometer/openstack/common/notifier/test_notifier.py +++ b/ceilometer/openstack/common/notifier/test_notifier.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. - NOTIFICATIONS = [] diff --git a/ceilometer/openstack/common/policy.py b/ceilometer/openstack/common/policy.py index 4162481a09..23ab9e9abc 100644 --- a/ceilometer/openstack/common/policy.py +++ b/ceilometer/openstack/common/policy.py @@ -59,12 +59,13 @@ import re from oslo.config import cfg import six +import six.moves.urllib.parse as urlparse +import six.moves.urllib.request as urlrequest from ceilometer.openstack.common import fileutils from ceilometer.openstack.common.gettextutils import _ from ceilometer.openstack.common import jsonutils from ceilometer.openstack.common import log as logging -from ceilometer.openstack.common.py3kcompat import urlutils policy_opts = [ @@ -824,8 +825,8 @@ class HttpCheck(Check): url = ('http:' + self.match) % target data = {'target': jsonutils.dumps(target), 'credentials': jsonutils.dumps(creds)} - post_data = urlutils.urlencode(data) - f = urlutils.urlopen(url, post_data) + post_data = urlparse.urlencode(data) + f = urlrequest.urlopen(url, post_data) return f.read() == "True" diff --git a/ceilometer/openstack/common/processutils.py b/ceilometer/openstack/common/processutils.py deleted file mode 100644 index c03131ebc3..0000000000 --- a/ceilometer/openstack/common/processutils.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -System-level utilities and helper functions. -""" - -import logging as stdlib_logging -import os -import random -import shlex -import signal - -from eventlet.green import subprocess -from eventlet import greenthread - -from ceilometer.openstack.common.gettextutils import _ # noqa -from ceilometer.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -class InvalidArgumentError(Exception): - def __init__(self, message=None): - super(InvalidArgumentError, self).__init__(message) - - -class UnknownArgumentError(Exception): - def __init__(self, message=None): - super(UnknownArgumentError, self).__init__(message) - - -class ProcessExecutionError(Exception): - def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, - description=None): - self.exit_code = exit_code - self.stderr = stderr - self.stdout = stdout - self.cmd = cmd - self.description = description - - if description is None: - description = "Unexpected error while running command." - if exit_code is None: - exit_code = '-' - message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" - % (description, cmd, exit_code, stdout, stderr)) - super(ProcessExecutionError, self).__init__(message) - - -class NoRootWrapSpecified(Exception): - def __init__(self, message=None): - super(NoRootWrapSpecified, self).__init__(message) - - -def _subprocess_setup(): - # Python installs a SIGPIPE handler by default. This is usually not what - # non-Python subprocesses expect. - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - - -def execute(*cmd, **kwargs): - """Helper method to shell out and execute a command through subprocess. - - Allows optional retry. - - :param cmd: Passed to subprocess.Popen. - :type cmd: string - :param process_input: Send to opened process. - :type proces_input: string - :param check_exit_code: Single bool, int, or list of allowed exit - codes. Defaults to [0]. Raise - :class:`ProcessExecutionError` unless - program exits with one of these code. - :type check_exit_code: boolean, int, or [int] - :param delay_on_retry: True | False. Defaults to True. If set to True, - wait a short amount of time before retrying. - :type delay_on_retry: boolean - :param attempts: How many times to retry cmd. - :type attempts: int - :param run_as_root: True | False. Defaults to False. If set to True, - the command is prefixed by the command specified - in the root_helper kwarg. - :type run_as_root: boolean - :param root_helper: command to prefix to commands called with - run_as_root=True - :type root_helper: string - :param shell: whether or not there should be a shell used to - execute this command. Defaults to false. - :type shell: boolean - :param loglevel: log level for execute commands. - :type loglevel: int. (Should be stdlib_logging.DEBUG or - stdlib_logging.INFO) - :returns: (stdout, stderr) from process execution - :raises: :class:`UnknownArgumentError` on - receiving unknown arguments - :raises: :class:`ProcessExecutionError` - """ - - process_input = kwargs.pop('process_input', None) - check_exit_code = kwargs.pop('check_exit_code', [0]) - ignore_exit_code = False - delay_on_retry = kwargs.pop('delay_on_retry', True) - attempts = kwargs.pop('attempts', 1) - run_as_root = kwargs.pop('run_as_root', False) - root_helper = kwargs.pop('root_helper', '') - shell = kwargs.pop('shell', False) - loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG) - - if isinstance(check_exit_code, bool): - ignore_exit_code = not check_exit_code - check_exit_code = [0] - elif isinstance(check_exit_code, int): - check_exit_code = [check_exit_code] - - if kwargs: - raise UnknownArgumentError(_('Got unknown keyword args ' - 'to utils.execute: %r') % kwargs) - - if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: - if not root_helper: - raise NoRootWrapSpecified( - message=('Command requested root, but did not specify a root ' - 'helper.')) - cmd = shlex.split(root_helper) + list(cmd) - - cmd = map(str, cmd) - - while attempts > 0: - attempts -= 1 - try: - LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd)) - _PIPE = subprocess.PIPE # pylint: disable=E1101 - - if os.name == 'nt': - preexec_fn = None - close_fds = False - else: - preexec_fn = _subprocess_setup - close_fds = True - - obj = subprocess.Popen(cmd, - stdin=_PIPE, - stdout=_PIPE, - stderr=_PIPE, - close_fds=close_fds, - preexec_fn=preexec_fn, - shell=shell) - result = None - if process_input is not None: - result = obj.communicate(process_input) - else: - result = obj.communicate() - obj.stdin.close() # pylint: disable=E1101 - _returncode = obj.returncode # pylint: disable=E1101 - LOG.log(loglevel, _('Result was %s') % _returncode) - if not ignore_exit_code and _returncode not in check_exit_code: - (stdout, stderr) = result - raise ProcessExecutionError(exit_code=_returncode, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) - return result - except ProcessExecutionError: - if not attempts: - raise - else: - LOG.log(loglevel, _('%r failed. Retrying.'), cmd) - if delay_on_retry: - greenthread.sleep(random.randint(20, 200) / 100.0) - finally: - # NOTE(termie): this appears to be necessary to let the subprocess - # call clean something up in between calls, without - # it two execute calls in a row hangs the second one - greenthread.sleep(0) - - -def trycmd(*args, **kwargs): - """A wrapper around execute() to more easily handle warnings and errors. - - Returns an (out, err) tuple of strings containing the output of - the command's stdout and stderr. If 'err' is not empty then the - command can be considered to have failed. - - :discard_warnings True | False. Defaults to False. If set to True, - then for succeeding commands, stderr is cleared - - """ - discard_warnings = kwargs.pop('discard_warnings', False) - - try: - out, err = execute(*args, **kwargs) - failed = False - except ProcessExecutionError as exn: - out, err = '', str(exn) - failed = True - - if not failed and discard_warnings and err: - # Handle commands that output to stderr but otherwise succeed - err = '' - - return out, err - - -def ssh_execute(ssh, cmd, process_input=None, - addl_env=None, check_exit_code=True): - LOG.debug(_('Running cmd (SSH): %s'), cmd) - if addl_env: - raise InvalidArgumentError(_('Environment not supported over SSH')) - - if process_input: - # This is (probably) fixable if we need it... - raise InvalidArgumentError(_('process_input not supported over SSH')) - - stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) - channel = stdout_stream.channel - - # NOTE(justinsb): This seems suspicious... - # ...other SSH clients have buffering issues with this approach - stdout = stdout_stream.read() - stderr = stderr_stream.read() - stdin_stream.close() - - exit_status = channel.recv_exit_status() - - # exit_status == -1 if no exit code was returned - if exit_status != -1: - LOG.debug(_('Result was %s') % exit_status) - if check_exit_code and exit_status != 0: - raise ProcessExecutionError(exit_code=exit_status, - stdout=stdout, - stderr=stderr, - cmd=cmd) - - return (stdout, stderr) diff --git a/ceilometer/openstack/common/rpc/__init__.py b/ceilometer/openstack/common/rpc/__init__.py index 119f56916d..6c68083770 100644 --- a/ceilometer/openstack/common/rpc/__init__.py +++ b/ceilometer/openstack/common/rpc/__init__.py @@ -23,13 +23,9 @@ For some wrappers that add message versioning to rpc, see: rpc.proxy """ -import inspect - from oslo.config import cfg -from ceilometer.openstack.common.gettextutils import _ from ceilometer.openstack.common import importutils -from ceilometer.openstack.common import local from ceilometer.openstack.common import log as logging @@ -93,24 +89,7 @@ def create_connection(new=True): return _get_impl().create_connection(CONF, new=new) -def _check_for_lock(): - if not CONF.debug: - return None - - if ((hasattr(local.strong_store, 'locks_held') - and local.strong_store.locks_held)): - stack = ' :: '.join([frame[3] for frame in inspect.stack()]) - LOG.warn(_('A RPC is being made while holding a lock. The locks ' - 'currently held are %(locks)s. This is probably a bug. ' - 'Please report it. Include the following: [%(stack)s].'), - {'locks': local.strong_store.locks_held, - 'stack': stack}) - return True - - return False - - -def call(context, topic, msg, timeout=None, check_for_lock=False): +def call(context, topic, msg, timeout=None): """Invoke a remote method that returns something. :param context: Information that identifies the user that has made this @@ -124,16 +103,12 @@ def call(context, topic, msg, timeout=None, check_for_lock=False): "args" : dict_of_kwargs } :param timeout: int, number of seconds to use for a response timeout. If set, this overrides the rpc_response_timeout option. - :param check_for_lock: if True, a warning is emitted if a RPC call is made - with a lock held. :returns: A dict from the remote method. :raises: openstack.common.rpc.common.Timeout if a complete response is not received before the timeout is reached. """ - if check_for_lock: - _check_for_lock() return _get_impl().call(CONF, context, topic, msg, timeout) @@ -176,7 +151,7 @@ def fanout_cast(context, topic, msg): return _get_impl().fanout_cast(CONF, context, topic, msg) -def multicall(context, topic, msg, timeout=None, check_for_lock=False): +def multicall(context, topic, msg, timeout=None): """Invoke a remote method and get back an iterator. In this case, the remote method will be returning multiple values in @@ -194,8 +169,6 @@ def multicall(context, topic, msg, timeout=None, check_for_lock=False): "args" : dict_of_kwargs } :param timeout: int, number of seconds to use for a response timeout. If set, this overrides the rpc_response_timeout option. - :param check_for_lock: if True, a warning is emitted if a RPC call is made - with a lock held. :returns: An iterator. The iterator will yield a tuple (N, X) where N is an index that starts at 0 and increases by one for each value @@ -205,8 +178,6 @@ def multicall(context, topic, msg, timeout=None, check_for_lock=False): :raises: openstack.common.rpc.common.Timeout if a complete response is not received before the timeout is reached. """ - if check_for_lock: - _check_for_lock() return _get_impl().multicall(CONF, context, topic, msg, timeout) diff --git a/ceilometer/openstack/common/rpc/impl_fake.py b/ceilometer/openstack/common/rpc/impl_fake.py index f2b8cf71f3..568addf2a4 100644 --- a/ceilometer/openstack/common/rpc/impl_fake.py +++ b/ceilometer/openstack/common/rpc/impl_fake.py @@ -11,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """Fake RPC implementation which calls proxy methods directly with no queues. Casts will block, but this is very useful for tests. """ diff --git a/ceilometer/openstack/common/rpc/impl_kombu.py b/ceilometer/openstack/common/rpc/impl_kombu.py index 6f285289b2..c8a6de0e30 100644 --- a/ceilometer/openstack/common/rpc/impl_kombu.py +++ b/ceilometer/openstack/common/rpc/impl_kombu.py @@ -38,9 +38,9 @@ from ceilometer.openstack.common import sslutils kombu_opts = [ cfg.StrOpt('kombu_ssl_version', default='', - help='SSL version to use (valid only if SSL enabled). ' - 'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may ' - 'be available on some distributions' + help='If SSL is enabled, the SSL version to use. Valid ' + 'values are TLSv1, SSLv23 and SSLv3. SSLv2 might ' + 'be available on some distributions.' ), cfg.StrOpt('kombu_ssl_keyfile', default='', @@ -63,33 +63,33 @@ kombu_opts = [ help='RabbitMQ HA cluster host:port pairs'), cfg.BoolOpt('rabbit_use_ssl', default=False, - help='connect over SSL for RabbitMQ'), + help='Connect over SSL for RabbitMQ'), cfg.StrOpt('rabbit_userid', default='guest', - help='the RabbitMQ userid'), + help='The RabbitMQ userid'), cfg.StrOpt('rabbit_password', default='guest', - help='the RabbitMQ password', + help='The RabbitMQ password', secret=True), cfg.StrOpt('rabbit_virtual_host', default='/', - help='the RabbitMQ virtual host'), + help='The RabbitMQ virtual host'), cfg.IntOpt('rabbit_retry_interval', default=1, - help='how frequently to retry connecting with RabbitMQ'), + help='How frequently to retry connecting with RabbitMQ'), cfg.IntOpt('rabbit_retry_backoff', default=2, - help='how long to backoff for between retries when connecting ' + help='How long to backoff for between retries when connecting ' 'to RabbitMQ'), cfg.IntOpt('rabbit_max_retries', default=0, - help='maximum retries with trying to connect to RabbitMQ ' - '(the default of 0 implies an infinite retry count)'), + help='Maximum number of RabbitMQ connection retries. ' + 'Default is 0 (infinite retry count)'), cfg.BoolOpt('rabbit_ha_queues', default=False, - help='use H/A queues in RabbitMQ (x-ha-policy: all).' - 'You need to wipe RabbitMQ database when ' - 'changing this option.'), + help='Use HA queues in RabbitMQ (x-ha-policy: all). ' + 'If you change this option, you must wipe the ' + 'RabbitMQ database.'), ] diff --git a/ceilometer/openstack/common/rpc/impl_zmq.py b/ceilometer/openstack/common/rpc/impl_zmq.py index 1e7df65185..02a580348c 100644 --- a/ceilometer/openstack/common/rpc/impl_zmq.py +++ b/ceilometer/openstack/common/rpc/impl_zmq.py @@ -80,7 +80,7 @@ CONF = cfg.CONF CONF.register_opts(zmq_opts) ZMQ_CTX = None # ZeroMQ Context, must be global. -matchmaker = None # memoized matchmaker object +matchmaker = None # memorized matchmaker object def _serialize(data): diff --git a/ceilometer/openstack/common/rpc/matchmaker.py b/ceilometer/openstack/common/rpc/matchmaker.py index 6e83329555..5017fd88fb 100644 --- a/ceilometer/openstack/common/rpc/matchmaker.py +++ b/ceilometer/openstack/common/rpc/matchmaker.py @@ -11,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """ The MatchMaker classes should except a Topic or Fanout exchange key and return keys for direct exchanges, per (approximate) AMQP parlance. diff --git a/ceilometer/openstack/common/rpc/matchmaker_redis.py b/ceilometer/openstack/common/rpc/matchmaker_redis.py index 4c1c14e1ff..decf9fed35 100644 --- a/ceilometer/openstack/common/rpc/matchmaker_redis.py +++ b/ceilometer/openstack/common/rpc/matchmaker_redis.py @@ -11,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """ The MatchMaker classes should accept a Topic or Fanout exchange key and return keys for direct exchanges, per (approximate) AMQP parlance. diff --git a/ceilometer/openstack/common/rpc/matchmaker_ring.py b/ceilometer/openstack/common/rpc/matchmaker_ring.py index 0a49928d7e..55bfc557d6 100644 --- a/ceilometer/openstack/common/rpc/matchmaker_ring.py +++ b/ceilometer/openstack/common/rpc/matchmaker_ring.py @@ -11,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """ The MatchMaker classes should except a Topic or Fanout exchange key and return keys for direct exchanges, per (approximate) AMQP parlance. diff --git a/ceilometer/openstack/common/service.py b/ceilometer/openstack/common/service.py index 7ccea167df..1ef002cfa1 100644 --- a/ceilometer/openstack/common/service.py +++ b/ceilometer/openstack/common/service.py @@ -484,11 +484,12 @@ class Services(object): done.wait() -def launch(service, workers=None): - if workers: - launcher = ProcessLauncher() - launcher.launch_service(service, workers=workers) - else: +def launch(service, workers=1): + if workers is None or workers == 1: launcher = ServiceLauncher() launcher.launch_service(service) + else: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + return launcher diff --git a/ceilometer/openstack/common/test.py b/ceilometer/openstack/common/test.py index 43a656e4ad..c406f5d7fb 100644 --- a/ceilometer/openstack/common/test.py +++ b/ceilometer/openstack/common/test.py @@ -15,12 +15,15 @@ """Common utilities used in testing""" +import logging import os +import tempfile import fixtures import testtools _TRUE_VALUES = ('True', 'true', '1', 'yes') +_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s" class BaseTestCase(testtools.TestCase): @@ -29,9 +32,10 @@ class BaseTestCase(testtools.TestCase): super(BaseTestCase, self).setUp() self._set_timeout() self._fake_output() - self.useFixture(fixtures.FakeLogger()) + self._fake_logs() self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) + self.tempdirs = [] def _set_timeout(self): test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) @@ -50,3 +54,35 @@ class BaseTestCase(testtools.TestCase): if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + + def _fake_logs(self): + if os.environ.get('OS_DEBUG') in _TRUE_VALUES: + level = logging.DEBUG + else: + level = logging.INFO + capture_logs = os.environ.get('OS_LOG_CAPTURE') in _TRUE_VALUES + if capture_logs: + self.useFixture( + fixtures.FakeLogger( + format=_LOG_FORMAT, + level=level, + nuke_handlers=capture_logs, + ) + ) + else: + logging.basicConfig(format=_LOG_FORMAT, level=level) + + def create_tempfiles(self, files, ext='.conf'): + tempfiles = [] + for (basename, contents) in files: + if not os.path.isabs(basename): + (fd, path) = tempfile.mkstemp(prefix=basename, suffix=ext) + else: + path = basename + ext + fd = os.open(path, os.O_CREAT | os.O_WRONLY) + tempfiles.append(path) + try: + os.write(fd, contents) + finally: + os.close(fd) + return tempfiles diff --git a/ceilometer/openstack/common/threadgroup.py b/ceilometer/openstack/common/threadgroup.py index e4037e4789..1acd32bb40 100644 --- a/ceilometer/openstack/common/threadgroup.py +++ b/ceilometer/openstack/common/threadgroup.py @@ -11,10 +11,10 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import threading import eventlet from eventlet import greenpool -from eventlet import greenthread from ceilometer.openstack.common import log as logging from ceilometer.openstack.common import loopingcall @@ -51,7 +51,7 @@ class Thread(object): class ThreadGroup(object): - """The point of the ThreadGroup classis to: + """The point of the ThreadGroup class is to: * keep track of timers and greenthreads (making it easier to stop them when need be). @@ -86,7 +86,7 @@ class ThreadGroup(object): self.threads.remove(thread) def stop(self): - current = greenthread.getcurrent() + current = threading.current_thread() # Iterate over a copy of self.threads so thread_done doesn't # modify the list while we're iterating @@ -114,7 +114,7 @@ class ThreadGroup(object): pass except Exception as ex: LOG.exception(ex) - current = greenthread.getcurrent() + current = threading.current_thread() # Iterate over a copy of self.threads so thread_done doesn't # modify the list while we're iterating diff --git a/ceilometer/openstack/common/timeutils.py b/ceilometer/openstack/common/timeutils.py index d5ed81d3e3..52688a0268 100644 --- a/ceilometer/openstack/common/timeutils.py +++ b/ceilometer/openstack/common/timeutils.py @@ -114,7 +114,7 @@ def utcnow(): def iso8601_from_timestamp(timestamp): - """Returns a iso8601 formated date from timestamp.""" + """Returns a iso8601 formatted date from timestamp.""" return isotime(datetime.datetime.utcfromtimestamp(timestamp)) diff --git a/ceilometer/openstack/common/xmlutils.py b/ceilometer/openstack/common/xmlutils.py deleted file mode 100644 index 1231a5902c..0000000000 --- a/ceilometer/openstack/common/xmlutils.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from xml.dom import minidom -from xml.parsers import expat -from xml import sax -from xml.sax import expatreader - - -class ProtectedExpatParser(expatreader.ExpatParser): - """An expat parser which disables DTD's and entities by default.""" - - def __init__(self, forbid_dtd=True, forbid_entities=True, - *args, **kwargs): - # Python 2.x old style class - expatreader.ExpatParser.__init__(self, *args, **kwargs) - self.forbid_dtd = forbid_dtd - self.forbid_entities = forbid_entities - - def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): - raise ValueError("Inline DTD forbidden") - - def entity_decl(self, entityName, is_parameter_entity, value, base, - systemId, publicId, notationName): - raise ValueError(" entity declaration forbidden") - - def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): - # expat 1.2 - raise ValueError(" unparsed entity forbidden") - - def external_entity_ref(self, context, base, systemId, publicId): - raise ValueError(" external entity forbidden") - - def notation_decl(self, name, base, sysid, pubid): - raise ValueError(" notation forbidden") - - def reset(self): - expatreader.ExpatParser.reset(self) - if self.forbid_dtd: - self._parser.StartDoctypeDeclHandler = self.start_doctype_decl - self._parser.EndDoctypeDeclHandler = None - if self.forbid_entities: - self._parser.EntityDeclHandler = self.entity_decl - self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl - self._parser.ExternalEntityRefHandler = self.external_entity_ref - self._parser.NotationDeclHandler = self.notation_decl - try: - self._parser.SkippedEntityHandler = None - except AttributeError: - # some pyexpat versions do not support SkippedEntity - pass - - -def safe_minidom_parse_string(xml_string): - """Parse an XML string using minidom safely. - - """ - try: - return minidom.parseString(xml_string, parser=ProtectedExpatParser()) - except sax.SAXParseException: - raise expat.ExpatError() diff --git a/etc/ceilometer/ceilometer.conf.sample b/etc/ceilometer/ceilometer.conf.sample index 3025e888ab..0c04492fb1 100644 --- a/etc/ceilometer/ceilometer.conf.sample +++ b/etc/ceilometer/ceilometer.conf.sample @@ -125,26 +125,13 @@ # Options defined in ceilometer.openstack.common.db.sqlalchemy.session # -# the filename to use with sqlite (string value) +# The file name to use with SQLite (string value) #sqlite_db=ceilometer.sqlite -# If true, use synchronous mode for sqlite (boolean value) +# If True, SQLite uses synchronous mode (boolean value) #sqlite_synchronous=true -# -# Options defined in ceilometer.openstack.common.deprecated.wsgi -# - -# Number of backlog requests to configure the socket with -# (integer value) -#backlog=4096 - -# Sets the value of TCP_KEEPIDLE in seconds for each server -# socket. Not supported on OS X. (integer value) -#tcp_keepidle=600 - - # # Options defined in ceilometer.openstack.common.eventlet_backdoor # @@ -186,29 +173,29 @@ # Log output to standard error (boolean value) #use_stderr=true -# format string to use for log messages with context (string +# Format string to use for log messages with context (string # value) #logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s -# format string to use for log messages without context +# Format string to use for log messages without context # (string value) #logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s -# data to append to log format when level is DEBUG (string +# Data to append to log format when level is DEBUG (string # value) #logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d -# prefix each line of exception output with this format +# Prefix each line of exception output with this format # (string value) #logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s -# list of logger=LEVEL pairs (list value) -#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN +# List of logger=LEVEL pairs (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN -# publish error events (boolean value) +# Publish error events (boolean value) #publish_errors=false -# make deprecations fatal (boolean value) +# Make deprecations fatal (boolean value) #fatal_deprecations=false # If an instance is passed with the log message, format it @@ -248,10 +235,18 @@ # Deprecated group/name - [DEFAULT]/logdir #log_dir= -# Use syslog for logging. (boolean value) +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and then will be changed in J to honor RFC5424 +# (boolean value) #use_syslog=false -# syslog facility to receive log lines (string value) +# (Optional) Use syslog rfc5424 format for logging. If +# enabled, will add APP-NAME (RFC5424) before the MSG part of +# the syslog message. The old format without APP-NAME is +# deprecated in I, and will be removed in J. (boolean value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines (string value) #syslog_log_facility=LOG_USER @@ -259,8 +254,7 @@ # Options defined in ceilometer.openstack.common.middleware.sizelimit # -# the maximum body size per each request(bytes) (integer -# value) +# The maximum body size per request, in bytes (integer value) # Deprecated group/name - [DEFAULT]/osapi_max_request_body_size #max_request_body_size=114688 @@ -352,9 +346,9 @@ # Options defined in ceilometer.openstack.common.rpc.impl_kombu # -# SSL version to use (valid only if SSL enabled). valid values -# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some -# distributions (string value) +# If SSL is enabled, the SSL version to use. Valid values are +# TLSv1, SSLv23 and SSLv3. SSLv2 might be available on some +# distributions. (string value) #kombu_ssl_version= # SSL key file (valid only if SSL enabled) (string value) @@ -378,33 +372,32 @@ # RabbitMQ HA cluster host:port pairs (list value) #rabbit_hosts=$rabbit_host:$rabbit_port -# connect over SSL for RabbitMQ (boolean value) +# Connect over SSL for RabbitMQ (boolean value) #rabbit_use_ssl=false -# the RabbitMQ userid (string value) +# The RabbitMQ userid (string value) #rabbit_userid=guest -# the RabbitMQ password (string value) +# The RabbitMQ password (string value) #rabbit_password=guest -# the RabbitMQ virtual host (string value) +# The RabbitMQ virtual host (string value) #rabbit_virtual_host=/ -# how frequently to retry connecting with RabbitMQ (integer +# How frequently to retry connecting with RabbitMQ (integer # value) #rabbit_retry_interval=1 -# how long to backoff for between retries when connecting to +# How long to backoff for between retries when connecting to # RabbitMQ (integer value) #rabbit_retry_backoff=2 -# maximum retries with trying to connect to RabbitMQ (the -# default of 0 implies an infinite retry count) (integer -# value) +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count) (integer value) #rabbit_max_retries=0 -# use H/A queues in RabbitMQ (x-ha-policy: all).You need to -# wipe RabbitMQ database when changing this option. (boolean +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean # value) #rabbit_ha_queues=false @@ -623,7 +616,7 @@ # slave database (string value) #slave_connection= -# timeout before idle sql connections are reaped (integer +# Timeout before idle sql connections are reaped (integer # value) # Deprecated group/name - [DEFAULT]/sql_idle_timeout # Deprecated group/name - [DATABASE]/sql_idle_timeout @@ -642,13 +635,13 @@ # Deprecated group/name - [DATABASE]/sql_max_pool_size #max_pool_size= -# maximum db connection retries during startup. (setting -1 +# Maximum db connection retries during startup. (setting -1 # implies an infinite retry count) (integer value) # Deprecated group/name - [DEFAULT]/sql_max_retries # Deprecated group/name - [DATABASE]/sql_max_retries #max_retries=10 -# interval between retries of opening a sql connection +# Interval between retries of opening a sql connection # (integer value) # Deprecated group/name - [DEFAULT]/sql_retry_interval # Deprecated group/name - [DATABASE]/reconnect_interval diff --git a/tools/config/generate_sample.sh b/tools/config/generate_sample.sh index cfc40dc106..521effd61a 100755 --- a/tools/config/generate_sample.sh +++ b/tools/config/generate_sample.sh @@ -4,8 +4,8 @@ print_hint() { echo "Try \`${0##*/} --help' for more information." >&2 } -PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:o: \ - --long help,base-dir:,package-name:,output-dir: -- "$@") +PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:l:o: \ + --long help,base-dir:,package-name:,output-dir:,library: -- "$@") if [ $? != 0 ] ; then print_hint ; exit 1 ; fi @@ -21,6 +21,7 @@ while true; do echo "-b, --base-dir=DIR project base directory" echo "-p, --package-name=NAME project package name" echo "-o, --output-dir=DIR file output directory" + echo "-l, --library=LIB extra library that registers options for discovery" exit 0 ;; -b|--base-dir) @@ -38,6 +39,11 @@ while true; do OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'` shift ;; + -l|--library) + shift + LIBRARIES="$LIBRARIES -l $1" + shift + ;; --) break ;; @@ -77,10 +83,10 @@ find $TARGETDIR -type f -name "*.pyc" -delete FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \ -exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u) -EXTRA_MODULES_FILE="`dirname $0`/oslo.config.generator.rc" -if test -r "$EXTRA_MODULES_FILE" +RC_FILE="`dirname $0`/oslo.config.generator.rc" +if test -r "$RC_FILE" then - source "$EXTRA_MODULES_FILE" + source "$RC_FILE" fi export EVENTLET_NO_GREENDNS=yes @@ -90,7 +96,7 @@ OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs) DEFAULT_MODULEPATH=ceilometer.openstack.common.config.generator MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH} OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample -python -m $MODULEPATH $FILES > $OUTPUTFILE +python -m $MODULEPATH $LIBRARIES $FILES > $OUTPUTFILE # Hook to allow projects to append custom config file snippets CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)