From c8a5bc39dccd513d879ad042165e4713fc2a3878 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Mon, 3 Dec 2012 16:21:29 -0600 Subject: [PATCH] Update oslo codebase within reddwarf. * Updated logging,cfg,setup to new oslo * Split out the paste ini from the conf files * Modified reddwarf-api/server to use new modules * Modified reddwarf-manage to use new cfg * Added rpc helper for rpc services * Modified reddwarf-taskmanager to use rpc helper * Modified reddwarf-guestagent to use new rpc helper * Fixed guestagent api to use rpc proxy * Fixed taskmanager module to conform to new rpc * Updated guestagent manager/pkg to use new rpc * Updated api paste to use keystoneclient auth_token * Updated managers to use periodic tasks Implements: blueprint reddwarf/upgrade-oslo Change-Id: I9ad1b441eca855a4304454014ae746ec51bef8f3 --- .gitignore | 3 + bin/reddwarf-api | 38 +- bin/reddwarf-guestagent | 41 +- bin/reddwarf-manage | 109 +- bin/reddwarf-server | 60 +- bin/reddwarf-taskmanager | 27 +- etc/reddwarf/api-paste.ini | 42 + etc/reddwarf/reddwarf-guestagent.conf.sample | 38 +- etc/reddwarf/reddwarf-taskmanager.conf.sample | 5 +- etc/reddwarf/reddwarf.conf.sample | 41 - etc/reddwarf/reddwarf.conf.test | 2 +- openstack-common.conf | 2 +- reddwarf/common/api.py | 9 +- reddwarf/common/auth.py | 3 +- reddwarf/common/cfg.py | 105 + reddwarf/common/config.py | 153 -- reddwarf/common/exception.py | 16 +- reddwarf/common/excutils.py | 2 +- reddwarf/common/extensions.py | 15 +- reddwarf/common/manager.py | 9 +- reddwarf/common/models.py | 2 +- reddwarf/common/remote.py | 16 +- reddwarf/common/rpc.py | 49 + reddwarf/common/service.py | 19 +- reddwarf/common/utils.py | 17 +- reddwarf/common/wsgi.py | 45 +- reddwarf/db/__init__.py | 8 +- reddwarf/db/models.py | 5 +- reddwarf/db/sqlalchemy/migrate_repo/schema.py | 2 +- reddwarf/db/sqlalchemy/migration.py | 2 +- reddwarf/db/sqlalchemy/session.py | 28 +- reddwarf/dns/manager.py | 14 +- reddwarf/dns/models.py | 3 +- reddwarf/dns/rsdns/driver.py | 24 +- reddwarf/extensions/account.py | 2 +- reddwarf/extensions/account/models.py | 2 +- reddwarf/extensions/account/service.py | 4 +- reddwarf/extensions/mgmt.py | 2 +- .../extensions/mgmt/host/instance/service.py | 3 +- reddwarf/extensions/mgmt/host/models.py | 4 +- reddwarf/extensions/mgmt/host/service.py | 3 +- reddwarf/extensions/mgmt/instances/models.py | 4 +- reddwarf/extensions/mgmt/instances/service.py | 17 +- reddwarf/extensions/mgmt/volume/models.py | 2 +- reddwarf/extensions/mgmt/volume/service.py | 5 +- reddwarf/extensions/mysql.py | 2 +- reddwarf/extensions/mysql/models.py | 22 +- reddwarf/extensions/mysql/service.py | 7 +- reddwarf/guestagent/api.py | 51 +- reddwarf/guestagent/db/models.py | 8 +- reddwarf/guestagent/dbaas.py | 114 +- reddwarf/guestagent/manager.py | 176 +- reddwarf/guestagent/models.py | 11 +- reddwarf/guestagent/pkg.py | 293 +-- reddwarf/guestagent/service.py | 2 +- reddwarf/guestagent/volume.py | 20 +- reddwarf/instance/models.py | 13 +- reddwarf/instance/service.py | 17 +- reddwarf/instance/views.py | 10 +- reddwarf/openstack/common/authutils.py | 44 + reddwarf/openstack/common/cfg.py | 1787 +++++++++++++++++ reddwarf/openstack/common/context.py | 43 +- .../openstack/common/eventlet_backdoor.py | 80 + reddwarf/openstack/common/exception.py | 14 +- reddwarf/openstack/common/excutils.py | 10 +- reddwarf/openstack/common/fileutils.py | 35 + reddwarf/openstack/common/gettextutils.py | 33 + reddwarf/openstack/common/importutils.py | 2 +- reddwarf/openstack/common/iniparser.py | 130 ++ reddwarf/openstack/common/jsonutils.py | 2 +- reddwarf/openstack/common/lockutils.py | 233 +++ reddwarf/openstack/common/log.py | 476 +++++ reddwarf/openstack/common/loopingcall.py | 95 + .../openstack/common/middleware/__init__.py | 0 .../openstack/common/middleware/context.py | 64 + reddwarf/openstack/common/network_utils.py | 68 + .../openstack/common/notifier/__init__.py | 14 + reddwarf/openstack/common/notifier/api.py | 182 ++ .../openstack/common/notifier/log_notifier.py | 35 + .../common/notifier/no_op_notifier.py} | 9 +- .../common/notifier/rabbit_notifier.py | 29 + .../openstack/common/notifier/rpc_notifier.py | 46 + .../common/notifier/test_notifier.py | 22 + reddwarf/openstack/common/pastedeploy.py | 164 ++ reddwarf/openstack/common/periodic_task.py | 115 ++ reddwarf/openstack/common/policy.py | 779 +++++++ reddwarf/openstack/common/processutils.py | 135 ++ reddwarf/openstack/common/rpc/__init__.py | 46 +- reddwarf/openstack/common/rpc/amqp.py | 53 +- reddwarf/openstack/common/rpc/common.py | 61 +- reddwarf/openstack/common/rpc/dispatcher.py | 10 +- reddwarf/openstack/common/rpc/impl_fake.py | 11 +- reddwarf/openstack/common/rpc/impl_kombu.py | 157 +- reddwarf/openstack/common/rpc/impl_qpid.py | 135 +- reddwarf/openstack/common/rpc/impl_zmq.py | 48 +- reddwarf/openstack/common/rpc/matchmaker.py | 2 +- reddwarf/openstack/common/rpc/service.py | 75 + reddwarf/openstack/common/service.py | 325 +++ reddwarf/openstack/common/setup.py | 315 ++- reddwarf/openstack/common/testutils.py | 68 + reddwarf/openstack/common/threadgroup.py | 116 ++ reddwarf/openstack/common/timeutils.py | 67 +- reddwarf/openstack/common/utils.py | 166 +- reddwarf/openstack/common/uuidutils.py | 39 + reddwarf/openstack/common/version.py | 168 ++ reddwarf/openstack/common/wsgi.py | 100 +- reddwarf/taskmanager/api.py | 17 +- reddwarf/taskmanager/manager.py | 16 +- reddwarf/taskmanager/models.py | 50 +- reddwarf/taskmanager/service.py | 2 +- reddwarf/tests/fakes/common.py | 16 +- reddwarf/tests/fakes/guestagent.py | 2 +- reddwarf/tests/fakes/nova.py | 2 +- rsdns/client/dns_client.py | 2 +- run_tests.py | 69 +- setup.py | 69 +- tools/pip-requires | 1 + 117 files changed, 6982 insertions(+), 1620 deletions(-) create mode 100644 etc/reddwarf/api-paste.ini create mode 100644 reddwarf/common/cfg.py delete mode 100644 reddwarf/common/config.py create mode 100644 reddwarf/common/rpc.py create mode 100644 reddwarf/openstack/common/authutils.py create mode 100644 reddwarf/openstack/common/cfg.py create mode 100644 reddwarf/openstack/common/eventlet_backdoor.py create mode 100644 reddwarf/openstack/common/fileutils.py create mode 100644 reddwarf/openstack/common/gettextutils.py create mode 100644 reddwarf/openstack/common/iniparser.py create mode 100644 reddwarf/openstack/common/lockutils.py create mode 100644 reddwarf/openstack/common/log.py create mode 100644 reddwarf/openstack/common/loopingcall.py create mode 100644 reddwarf/openstack/common/middleware/__init__.py create mode 100644 reddwarf/openstack/common/middleware/context.py create mode 100644 reddwarf/openstack/common/network_utils.py create mode 100644 reddwarf/openstack/common/notifier/__init__.py create mode 100644 reddwarf/openstack/common/notifier/api.py create mode 100644 reddwarf/openstack/common/notifier/log_notifier.py rename reddwarf/{guestagent/agent.py => openstack/common/notifier/no_op_notifier.py} (83%) create mode 100644 reddwarf/openstack/common/notifier/rabbit_notifier.py create mode 100644 reddwarf/openstack/common/notifier/rpc_notifier.py create mode 100644 reddwarf/openstack/common/notifier/test_notifier.py create mode 100644 reddwarf/openstack/common/pastedeploy.py create mode 100644 reddwarf/openstack/common/periodic_task.py create mode 100644 reddwarf/openstack/common/policy.py create mode 100644 reddwarf/openstack/common/processutils.py create mode 100644 reddwarf/openstack/common/rpc/service.py create mode 100644 reddwarf/openstack/common/service.py create mode 100644 reddwarf/openstack/common/testutils.py create mode 100644 reddwarf/openstack/common/threadgroup.py create mode 100644 reddwarf/openstack/common/uuidutils.py create mode 100644 reddwarf/openstack/common/version.py diff --git a/.gitignore b/.gitignore index 91ab736128..a8feccdf13 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,6 @@ host-syslog.log tags .tox rdtest.log +reddwarf/versioninfo +AUTHORS +Changelog diff --git a/bin/reddwarf-api b/bin/reddwarf-api index ec67dd58c8..2f443ea513 100755 --- a/bin/reddwarf-api +++ b/bin/reddwarf-api @@ -33,40 +33,24 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')): sys.path.insert(0, possible_topdir) -from reddwarf import version -from reddwarf.common import config +from reddwarf.common import cfg +from reddwarf.openstack.common import service +from reddwarf.openstack.common import log as logging from reddwarf.common import wsgi from reddwarf.db import get_db_api -def create_options(parser): - """Sets up the CLI and config-file options - - :param parser: The option parser - :returns: None - - """ - parser.add_option('-p', '--port', dest="port", metavar="PORT", - type=int, - help="Port the Reddwarf API host listens on. " - "Default: %default") - config.add_common_options(parser) - config.add_log_options(parser) - +CONF = cfg.CONF if __name__ == '__main__': - oparser = optparse.OptionParser(version="%%prog %s" - % version.version_string()) - create_options(oparser) - (options, args) = config.parse_options(oparser) + cfg.parse_args(sys.argv) + logging.setup(None) + try: - config.Config.load_paste_config('reddwarf', options, args) - conf, app = config.Config.load_paste_app('reddwarf', options, args) - get_db_api().configure_db(conf) - server = wsgi.Server() - server.start(app, int(options.get('port') or conf['bind_port']), - conf['bind_host']) - server.wait() + get_db_api().configure_db(CONF) + conf_file = CONF.find_file(CONF.api_paste_config) + launcher = wsgi.launch('reddwarf', CONF.bind_port or 8779, conf_file) + launcher.wait() except RuntimeError as error: import traceback print traceback.format_exc() diff --git a/bin/reddwarf-guestagent b/bin/reddwarf-guestagent index 8b827e37fd..e8eed7666d 100755 --- a/bin/reddwarf-guestagent +++ b/bin/reddwarf-guestagent @@ -36,37 +36,28 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')): sys.path.insert(0, possible_topdir) -from reddwarf import version -from reddwarf.common import config -from reddwarf.common import service -# TODO(hub-cap): find out why the db api isint being imported properly +from reddwarf.common import cfg +from reddwarf.common import rpc +from reddwarf.openstack.common import cfg as openstack_cfg +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import service from reddwarf.db import get_db_api +CONF = cfg.CONF +CONF.register_opts([openstack_cfg.StrOpt('guestagent_manager'), + openstack_cfg.StrOpt('guest_id')]) if __name__ == '__main__': - parser = optparse.OptionParser(version="%%prog %s" - % version.version_string()) - config.add_common_options(parser) - config.add_log_options(parser) + cfg.parse_args(sys.argv) + logging.setup(None) - (options, args) = config.parse_options(parser) try: - conf, app = config.Config.load_paste_app('reddwarf-guestagent', - options, args) - # Use the config file location for putting the new config values - conf_loc = '%s/%s' % (config.Config.get('here'), 'conf.d/guest_info') - config.Config.append_to_config_values('reddwarf-guestagent', - {'config_file': conf_loc}, None) - # Now do the same for the /etc/guest_info file - # that is injected into the VM - config.Config.append_to_config_values('reddwarf-guestagent', - {'config_file': '/etc/guest_info'}, None) - get_db_api().configure_db(conf) - server = service.Service.create(binary='reddwarf-guestagent', - host=config.Config.get('guest_id')) - service.serve(server) - service.wait() + get_db_api().configure_db(CONF) + server = rpc.RpcService(manager=CONF.guestagent_manager, + host=CONF.guest_id) + launcher = service.launch(server) + launcher.wait() except RuntimeError as error: import traceback print traceback.format_exc() - sys.exit("ERROR: %s" % error) + sys.exit("ERROR: %s" % error) \ No newline at end of file diff --git a/bin/reddwarf-manage b/bin/reddwarf-manage index b435ee776d..9ebb2f4219 100755 --- a/bin/reddwarf-manage +++ b/bin/reddwarf-manage @@ -17,6 +17,7 @@ # under the License. import gettext +import inspect import optparse import os import sys @@ -34,47 +35,41 @@ if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')): sys.path.insert(0, possible_topdir) from reddwarf import version -from reddwarf.common import config +from reddwarf.common import cfg from reddwarf.common import utils from reddwarf.db import get_db_api +from reddwarf.openstack.common import log as logging from reddwarf.instance import models as instance_models -def create_options(parser): - """Sets up the CLI and config-file options. - - :param parser: The option parser - :returns: None - """ - parser.add_option('-p', '--port', dest="port", metavar="PORT", - type=int, default=9898, - help="Port the Reddwarf API host listens on. " - "Default: %default") - config.add_common_options(parser) - config.add_log_options(parser) +CONF = cfg.CONF class Commands(object): - def __init__(self, conf): + def __init__(self): self.db_api = get_db_api() - self.conf = conf - def db_sync(self): - self.db_api.db_sync(self.conf, repo_path=None) + def db_sync(self, repo_path=None): + self.db_api.db_sync(CONF, repo_path=repo_path) def db_upgrade(self, version=None, repo_path=None): - self.db_api.db_upgrade(self.conf, version, repo_path=None) + self.db_api.db_upgrade(CONF, version, repo_path=None) def db_downgrade(self, version, repo_path=None): - self.db_api.db_downgrade(self.conf, version, repo_path=None) + self.db_api.db_downgrade(CONF, version, repo_path=None) - def execute(self, command_name, *args): - if self.has(command_name): - return getattr(self, command_name)(*args) + def execute(self): + exec_method = getattr(self, CONF.action.name) + args = inspect.getargspec(exec_method) + args.args.remove('self') + kwargs = {} + for arg in args.args: + kwargs[arg] = getattr(CONF.action, arg) + exec_method(**kwargs) def image_update(self, service_name, image_id): - self.db_api.configure_db(self.conf) + self.db_api.configure_db(CONF) image = self.db_api.find_by(instance_models.ServiceImage, service_name=service_name) if image is None: @@ -89,63 +84,47 @@ class Commands(object): """Drops the database and recreates it.""" from reddwarf.instance import models from reddwarf.db.sqlalchemy import session - self.db_api.drop_db(self.conf) + self.db_api.drop_db(CONF) self.db_sync() # Sets up database engine, so the next line will work... - session.configure_db(self.conf) + session.configure_db(CONF) models.ServiceImage.create(service_name=service_name, image_id=image_id) - _commands = ['db_sync', 'db_upgrade', 'db_downgrade', 'db_wipe', - 'image_update'] - - @classmethod - def has(cls, command_name): - return (command_name in cls._commands) - - @classmethod - def all(cls): - return cls._commands - def params_of(self, command_name): if Commands.has(command_name): return utils.MethodInspector(getattr(self, command_name)) -def usage(): - usage = """ -%prog action [args] [options] - -Available actions: - - """ - for action in Commands.all(): - usage = usage + ("\t%s\n" % action) - return usage.strip() - - if __name__ == '__main__': - oparser = optparse.OptionParser(version="%%prog %s" - % version.version_string(), - usage=usage()) - create_options(oparser) - (options, args) = config.parse_options(oparser) - if len(args) < 1 or not Commands.has(args[0]): - oparser.print_usage() - sys.exit(2) + def actions(subparser): + parser = subparser.add_parser('db_sync') + parser.add_argument('--repo_path') + parser = subparser.add_parser('db_upgrade') + parser.add_argument('--version') + parser.add_argument('--repo_path') + parser = subparser.add_parser('db_downgrade') + parser.add_argument('version') + parser.add_argument('--repo_path') + parser = subparser.add_parser('image_update') + parser.add_argument('service_name') + parser.add_argument('image_id') + parser = subparser.add_parser('db_wipe') + parser.add_argument('repo_path') + parser.add_argument('service_name') + parser.add_argument('image_id') + + cfg.custom_parser('action', actions) + cfg.parse_args(sys.argv) try: - conf = config.Config.load_paste_config('reddwarf', options, args) - config.setup_logging(options, conf) - - command_name = args.pop(0) - Commands(conf).execute(command_name, *args) + logging.setup(None) + + Commands().execute() sys.exit(0) - except TypeError: - print _("Possible wrong number of arguments supplied") - command_params = Commands(conf).params_of(command_name) - print "Usage: reddwarf-manage %s" % command_params + except TypeError as e: + print _("Possible wrong number of arguments supplied %s" % e) sys.exit(2) except Exception: print _("Command failed, please check log for more info") diff --git a/bin/reddwarf-server b/bin/reddwarf-server index 4072c26c93..9c8b43fe86 100755 --- a/bin/reddwarf-server +++ b/bin/reddwarf-server @@ -33,35 +33,32 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')): sys.path.insert(0, possible_topdir) -from reddwarf import version -from reddwarf.common import config +from reddwarf.common import cfg +from reddwarf.openstack.common import cfg as openstack_cfg +from reddwarf.openstack.common import service +from reddwarf.openstack.common import log as logging from reddwarf.common import wsgi from reddwarf.db import get_db_api +extra_opts = [ + openstack_cfg.BoolOpt('fork', + short='f', + default=False, + dest='fork'), + openstack_cfg.StrOpt('pid-file', + default='.pid'), +] -def create_options(parser): - """Sets up the CLI and config-file options - - :param parser: The option parser - :returns: None - - """ - parser.add_option('-p', '--port', dest="port", metavar="PORT", - type=int, - help="Port the Reddwarf API host listens on. " - "Default: %default") - parser.add_option("-f", '--fork', action="store_true", dest="fork") - parser.add_option('--pid_file', dest="pid_file", default=".pid") - config.add_common_options(parser) - config.add_log_options(parser) +CONF = cfg.CONF +CONF.register_cli_opts(extra_opts) -def run_server(app, port): +def run_server(): try: - server = wsgi.Server() - server.start(app, int(options.get('port') or conf['bind_port']), - conf['bind_host']) - server.wait() + get_db_api().configure_db(CONF) + server = wsgi.WSGIService('reddwarf', CONF.bind_port or 8779) + launcher = service.launch(server) + launcher.wait() except RuntimeError as error: import traceback print traceback.format_exc() @@ -69,23 +66,18 @@ def run_server(app, port): if __name__ == '__main__': - oparser = optparse.OptionParser(version="%%prog %s" - % version.version_string()) - create_options(oparser) - (options, args) = config.parse_options(oparser) - config.Config.load_paste_config('reddwarf', options, args) - conf, app = config.Config.load_paste_app('reddwarf', options, args) - get_db_api().configure_db(conf) - port = int(options.get('port') or conf['bind_port']) - if options['fork']: + cfg.parse_args(sys.argv) + logging.setup(None) + + if CONF.fork: pid = os.fork() if pid == 0: - run_server(app, port) + run_server() else: print("Starting server:%s" % pid) - pid_file = options.get('pid_file', '.pid') + pid_file = CONF.pid_file with open(pid_file, 'w') as f: f.write(str(pid)) else: - run_server(app, port) + run_server() diff --git a/bin/reddwarf-taskmanager b/bin/reddwarf-taskmanager index 42bec9d91a..69e4a766c7 100755 --- a/bin/reddwarf-taskmanager +++ b/bin/reddwarf-taskmanager @@ -36,26 +36,25 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')): sys.path.insert(0, possible_topdir) -from reddwarf import version -from reddwarf.common import config -from reddwarf.common import service +from reddwarf.common import cfg +from reddwarf.common import rpc +from reddwarf.openstack.common import cfg as openstack_cfg +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import service from reddwarf.db import get_db_api +CONF = cfg.CONF +CONF.register_opts([openstack_cfg.StrOpt('taskmanager_manager')]) if __name__ == '__main__': - parser = optparse.OptionParser(version="%%prog %s" - % version.version_string()) - config.add_common_options(parser) - config.add_log_options(parser) + cfg.parse_args(sys.argv) + logging.setup(None) - (options, args) = config.parse_options(parser) try: - conf, app = config.Config.load_paste_app('reddwarf-taskmanager', - options, args) - get_db_api().configure_db(conf) - server = service.Service.create(binary='reddwarf-taskmanager') - service.serve(server) - service.wait() + get_db_api().configure_db(CONF) + server = rpc.RpcService(manager=CONF.taskmanager_manager) + launcher = service.launch(server) + launcher.wait() except RuntimeError as error: import traceback print traceback.format_exc() diff --git a/etc/reddwarf/api-paste.ini b/etc/reddwarf/api-paste.ini new file mode 100644 index 0000000000..896c6c18a5 --- /dev/null +++ b/etc/reddwarf/api-paste.ini @@ -0,0 +1,42 @@ +[composite:reddwarf] +use = call:reddwarf.common.wsgi:versioned_urlmap +/: versions +/v1.0: reddwarfapi + +[app:versions] +paste.app_factory = reddwarf.versions:app_factory + +[pipeline:reddwarfapi] +pipeline = faultwrapper tokenauth authorization contextwrapper extensions reddwarfapp +#pipeline = debug extensions reddwarfapp + +[filter:extensions] +paste.filter_factory = reddwarf.common.extensions:factory + +[filter:tokenauth] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory +service_protocol = http +service_host = 127.0.0.1 +service_port = 5000 +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http +auth_uri = http://127.0.0.1:5000/ +admin_token = be19c524ddc92109a224 +signing_dir = /tmp/keystone-signing-reddwarf + +[filter:authorization] +paste.filter_factory = reddwarf.common.auth:AuthorizationMiddleware.factory + +[filter:contextwrapper] +paste.filter_factory = reddwarf.common.wsgi:ContextMiddleware.factory + +[filter:faultwrapper] +paste.filter_factory = reddwarf.common.wsgi:FaultWrapper.factory + +[app:reddwarfapp] +paste.app_factory = reddwarf.common.api:app_factory + +#Add this filter to log request and response for debugging +[filter:debug] +paste.filter_factory = reddwarf.common.wsgi:Debug diff --git a/etc/reddwarf/reddwarf-guestagent.conf.sample b/etc/reddwarf/reddwarf-guestagent.conf.sample index c71259acf4..e87f18f13c 100644 --- a/etc/reddwarf/reddwarf-guestagent.conf.sample +++ b/etc/reddwarf/reddwarf-guestagent.conf.sample @@ -46,7 +46,7 @@ reddwarf_proxy_admin_tenant_name = admin reddwarf_auth_url = http://0.0.0.0:5000/v2.0 # Manager impl for the taskmanager -guestagent_manager=reddwarf.guestagent.manager.GuestManager +guestagent_manager=reddwarf.guestagent.manager.Manager # ============ kombu connection options ======================== @@ -55,39 +55,3 @@ rabbit_host=10.0.0.1 # ============ Logging information ============================= log_dir = /tmp/ log_file = logfile.txt - -[composite:reddwarf-guestagent] -use = call:reddwarf.common.wsgi:versioned_urlmap -/: versions -/v0.1: reddwarf-guestagent-app - -[app:versions] -paste.app_factory = reddwarf.versions:app_factory - -[pipeline:reddwarf-guestagent-app] -pipeline = guestagent-app -#pipeline = debug extensions reddwarfapp - -[filter:extensions] -paste.filter_factory = reddwarf.common.extensions:factory - -[filter:tokenauth] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -service_protocol = http -service_host = 127.0.0.1 -service_port = 5000 -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http -auth_uri = http://127.0.0.1:5000/ -admin_token = be19c524ddc92109a224 - -[filter:authorization] -paste.filter_factory = reddwarf.common.auth:AuthorizationMiddleware.factory - -[app:guestagent-app] -paste.app_factory = reddwarf.guestagent.service:app_factory - -#Add this filter to log request and response for debugging -[filter:debug] -paste.filter_factory = reddwarf.common.wsgi:Debug diff --git a/etc/reddwarf/reddwarf-taskmanager.conf.sample b/etc/reddwarf/reddwarf-taskmanager.conf.sample index 589a006d86..17ae040a7d 100644 --- a/etc/reddwarf/reddwarf-taskmanager.conf.sample +++ b/etc/reddwarf/reddwarf-taskmanager.conf.sample @@ -49,7 +49,7 @@ reddwarf_proxy_admin_tenant_name = admin reddwarf_auth_url = http://0.0.0.0:5000/v2.0 # Manager impl for the taskmanager -taskmanager_manager=reddwarf.taskmanager.manager.TaskManager +taskmanager_manager=reddwarf.taskmanager.manager.Manager # Reddwarf DNS reddwarf_dns_support = False @@ -75,6 +75,3 @@ notifier_queue_transport = memory # ============ Logging information ============================= #log_dir = /integration/report #log_file = reddwarf-taskmanager.log - -[app:reddwarf-taskmanager] -paste.app_factory = reddwarf.taskmanager.service:app_factory diff --git a/etc/reddwarf/reddwarf.conf.sample b/etc/reddwarf/reddwarf.conf.sample index 0509986816..720c10622c 100644 --- a/etc/reddwarf/reddwarf.conf.sample +++ b/etc/reddwarf/reddwarf.conf.sample @@ -88,44 +88,3 @@ notifier_queue_transport = memory #log_dir = /integration/report #log_file = reddwarf-api.log -[composite:reddwarf] -use = call:reddwarf.common.wsgi:versioned_urlmap -/: versions -/v1.0: reddwarfapi - -[app:versions] -paste.app_factory = reddwarf.versions:app_factory - -[pipeline:reddwarfapi] -pipeline = faultwrapper tokenauth authorization contextwrapper extensions reddwarfapp -#pipeline = debug extensions reddwarfapp - -[filter:extensions] -paste.filter_factory = reddwarf.common.extensions:factory - -[filter:tokenauth] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -service_protocol = http -service_host = 127.0.0.1 -service_port = 5000 -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http -auth_uri = http://127.0.0.1:5000/ -admin_token = be19c524ddc92109a224 - -[filter:authorization] -paste.filter_factory = reddwarf.common.auth:AuthorizationMiddleware.factory - -[filter:contextwrapper] -paste.filter_factory = reddwarf.common.wsgi:ContextMiddleware.factory - -[filter:faultwrapper] -paste.filter_factory = reddwarf.common.wsgi:FaultWrapper.factory - -[app:reddwarfapp] -paste.app_factory = reddwarf.common.api:app_factory - -#Add this filter to log request and response for debugging -[filter:debug] -paste.filter_factory = reddwarf.common.wsgi:Debug diff --git a/etc/reddwarf/reddwarf.conf.test b/etc/reddwarf/reddwarf.conf.test index 6624eb0131..706c083196 100644 --- a/etc/reddwarf/reddwarf.conf.test +++ b/etc/reddwarf/reddwarf.conf.test @@ -102,7 +102,7 @@ paste.app_factory = reddwarf.versions:app_factory [pipeline:reddwarfapi] pipeline = faultwrapper tokenauth authorization contextwrapper extensions reddwarfapp -#pipeline = debug extensions reddwarfapp +# pipeline = debug reddwarfapp [filter:extensions] paste.filter_factory = reddwarf.common.extensions:factory diff --git a/openstack-common.conf b/openstack-common.conf index 6a89f7c889..8d2daed0ed 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -1,7 +1,7 @@ [DEFAULT] # The list of modules to copy from openstack-common -modules=config,context,exception,extensions,utils,wsgi,setup +modules=middleware,notifier,rpc,authutils,cfg,context,eventlet_backdoor,exception,excutils,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,loopingcall,network_utils,pastedeploy,periodic_task,policy,processutils,service,setup,testutils,threadgroup,timeutils,utils,uuidutils,version,wsgi # The base module to hold the copy of openstack.common base=reddwarf diff --git a/reddwarf/common/api.py b/reddwarf/common/api.py index 2c9c85bf26..838780daed 100644 --- a/reddwarf/common/api.py +++ b/reddwarf/common/api.py @@ -12,17 +12,16 @@ # License for the specific language governing permissions and limitations # under the License. -import logging import routes -from reddwarf.openstack.common import rpc -from reddwarf.common import config from reddwarf.common import exception from reddwarf.common import wsgi -from reddwarf.versions import VersionsController +from reddwarf.extensions.mgmt.host.instance import service as hostservice from reddwarf.flavor.service import FlavorController from reddwarf.instance.service import InstanceController -from reddwarf.extensions.mgmt.host.instance import service as hostservice +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import rpc +from reddwarf.versions import VersionsController class API(wsgi.Router): diff --git a/reddwarf/common/auth.py b/reddwarf/common/auth.py index 662d037426..6bc548fc24 100644 --- a/reddwarf/common/auth.py +++ b/reddwarf/common/auth.py @@ -16,12 +16,13 @@ # under the License. import httplib2 -import logging import re import webob.exc import wsgi from reddwarf.common import exception +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) diff --git a/reddwarf/common/cfg.py b/reddwarf/common/cfg.py new file mode 100644 index 0000000000..e7616d755a --- /dev/null +++ b/reddwarf/common/cfg.py @@ -0,0 +1,105 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Routines for configuring Reddwarf.""" + +from reddwarf.openstack.common import cfg + +common_opts = [ + cfg.StrOpt('sql_connection', + default='sqlite:///reddwarf_test.sqlite', + help='SQL Connection'), + cfg.IntOpt('sql_idle_timeout', default=3600), + cfg.BoolOpt('sql_query_log', default=False), + cfg.IntOpt('bind_port', default=8779), + cfg.StrOpt('api_extensions_path', default='', + help='Path to extensions'), + cfg.StrOpt('api_paste_config', + default="api-paste.ini", + help='File name for the paste.deploy config for reddwarf-api'), + cfg.BoolOpt('add_addresses', + default=False, + help='Whether to add IP addresses to the list operations'), + cfg.BoolOpt('reddwarf_volume_support', + default=False, + help='File name for the paste.deploy config for reddwarf-api'), + cfg.BoolOpt('reddwarf_must_use_volume', default=False), + cfg.ListOpt('admin_roles', default=[]), + cfg.StrOpt('remote_implementation', + default="real", + help='Remote implementation for using fake integration code'), + cfg.StrOpt('nova_compute_url', default='http://localhost:8774/v2'), + cfg.StrOpt('nova_volume_url', default='http://localhost:8776/v2'), + cfg.StrOpt('reddwarf_auth_url', default='http://0.0.0.0:5000/v2.0'), + cfg.StrOpt('host', default='0.0.0.0'), + cfg.IntOpt('report_interval', default=10), + cfg.IntOpt('periodic_interval', default=60), + cfg.BoolOpt('reddwarf_dns_support', default=False), + cfg.StrOpt('db_api_implementation', default='reddwarf.db.sqlalchemy.api'), + cfg.StrOpt('dns_driver', default='reddwarf.dns.driver.DnsDriver'), + cfg.StrOpt('dns_instance_entry_factory', + default='reddwarf.dns.driver.DnsInstanceEntryFactory'), + cfg.StrOpt('dns_hostname', default=""), + cfg.IntOpt('dns_account_id', default=0), + cfg.StrOpt('dns_auth_url', default=""), + cfg.StrOpt('dns_domain_name', default=""), + cfg.StrOpt('dns_username', default=""), + cfg.StrOpt('dns_passkey', default=""), + cfg.StrOpt('dns_management_base_url', default=""), + cfg.IntOpt('dns_ttl', default=300), + cfg.IntOpt('dns_domain_id', default=1), + cfg.IntOpt('users_page_size', default=20), + cfg.IntOpt('databases_page_size', default=20), + cfg.IntOpt('instances_page_size', default=20), + cfg.ListOpt('ignore_users', default=[]), + cfg.ListOpt('ignore_dbs', default=[]), + cfg.IntOpt('agent_call_low_timeout', default=5), + cfg.IntOpt('agent_call_high_timeout', default=60), + cfg.StrOpt('guest_id', default=None), + cfg.IntOpt('state_change_wait_time', default=2 * 60), + cfg.IntOpt('agent_heartbeat_time', default=10), + cfg.IntOpt('num_tries', default=3), + cfg.StrOpt('volume_fstype', default='ext3'), + cfg.StrOpt('format_options', default='-m 5'), + cfg.IntOpt('volume_format_timeout', default=120), + cfg.StrOpt('mount_options', default='defaults,noatime'), + cfg.IntOpt('max_instances_per_user', default=5), + cfg.IntOpt('max_accepted_volume_size', default=5), + cfg.StrOpt('taskmanager_queue', default='taskmanager'), + cfg.BoolOpt('use_nova_server_volume', default=False), + cfg.StrOpt('fake_mode_events', default='simulated'), + cfg.StrOpt('device_path', default='/dev/vdb'), + cfg.StrOpt('mount_point', default='/var/lib/mysql'), + cfg.StrOpt('service_type', default='mysql'), + cfg.StrOpt('block_device_mapping', default='vdb'), + cfg.IntOpt('server_delete_time_out', default=2), + cfg.IntOpt('volume_time_out', default=2), + cfg.IntOpt('reboot_time_out', default=60 * 2), +] + + +CONF = cfg.CONF +CONF.register_opts(common_opts) + + +def custom_parser(parsername, parser): + CONF.register_cli_opt(cfg.SubCommandOpt(parsername, handler=parser)) + + +def parse_args(argv, default_config_files=None): + cfg.CONF(args=argv[1:], + project='reddwarf', + default_config_files=default_config_files) diff --git a/reddwarf/common/config.py b/reddwarf/common/config.py deleted file mode 100644 index 9e4e2bdae9..0000000000 --- a/reddwarf/common/config.py +++ /dev/null @@ -1,153 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Routines for configuring Reddwarf.""" - -import re - -from reddwarf.openstack.common import config as openstack_config - - -parse_options = openstack_config.parse_options -add_log_options = openstack_config.add_log_options -add_common_options = openstack_config.add_common_options -setup_logging = openstack_config.setup_logging - - -def _to_list(value): - items = value.split(',') - trimmed_list = [item.strip() for item in items] - return trimmed_list - - -def get_option(options, option, **kwargs): - if option in options and kwargs.get('type', 'str') == 'list': - value = options[option] - return _to_list(value) - else: - return openstack_config.get_option(options, option, **kwargs) - - -class Config(object): - - instance = {} - - @classmethod - def load_paste_app(cls, *args, **kwargs): - conf, app = openstack_config.load_paste_app(*args, **kwargs) - cls.instance.update(conf) - return conf, app - - @classmethod - def load_paste_config(cls, *args, **kwargs): - conf_file, conf = openstack_config.load_paste_config(*args, **kwargs) - cls.instance.update(conf) - return conf - - @classmethod - def append_to_config_values(cls, *args): - config_file = openstack_config.find_config_file(*args) - if not config_file: - raise RuntimeError("Unable to locate any configuration file. " - "Cannot load application %s" % app_name) - # Now take the conf file values and append them to the current conf - with open(config_file, 'r') as conf: - for line in conf.readlines(): - m = re.match("\s*([^#]\S+)\s*=\s*(\S+)\s*", line) - if m: - cls.instance[m.group(1)] = m.group(2) - - @classmethod - def write_config_values(cls, *args, **kwargs): - # Pass in empty kwargs so it doesnt mess up the config find - config_file = openstack_config.find_config_file(*args) - if not config_file: - raise RuntimeError("Unable to locate any configuration file. " - "Cannot load application %s" % app_name) - with open(config_file, 'a') as conf: - for k, v in kwargs.items(): - # Start with newline to be sure its on a new line - conf.write("\n%s=%s" % (k, v)) - # Now append them to the cls instance - cls.append_to_config_values(*args) - - @classmethod - def get(cls, key, default=None, **kwargs): - # We always use a default, even if its None. - kwargs['default'] = default - return get_option(cls.instance, key, **kwargs) - - -def create_type_func(type): - @classmethod - def get(cls, key, default=None, **kwargs): - kwargs['type'] = type - return cls.get(key, default, **kwargs) - return get - -Config.get_bool = create_type_func('bool') -Config.get_float = create_type_func('float') -Config.get_int = create_type_func('int') -Config.get_list = create_type_func('list') -Config.get_str = create_type_func('str') -del create_type_func - - -class ConfigFacade(object): - """This class presents an interface usable by OpenStack Common modules. - - OpenStack common uses a new config interface where the values are - accessed as attributes directly. This presents the same interface - so we can interface with OS common modules while we change our config - stuff. - - """ - - value_info = {} - - def __init__(self, conf): - self.conf = conf - - def __getattr__(self, name): - if name == "register_opts": - def f(*args, **kwargs): - pass - return f - if name in self.value_info: - v = self.value_info[name] - return self.conf.get(name, **v) - return self.conf.get(name) - - -class OsCommonModule(object): - """Emulates the OpenStack Common cfg module.""" - - @property - def CONF(self): - return ConfigFacade(Config()) - - -def create_type_func(type): - @classmethod - def func(cls, name, default, help): - ConfigFacade.value_info[name] = {'default': default, 'type': type} - return func - -OsCommonModule.BoolOpt = create_type_func('bool') -OsCommonModule.IntOpt = create_type_func('int') -OsCommonModule.ListOpt = create_type_func('list') -OsCommonModule.StrOpt = create_type_func('str') -del create_type_func diff --git a/reddwarf/common/exception.py b/reddwarf/common/exception.py index 916e41b096..df23a48747 100644 --- a/reddwarf/common/exception.py +++ b/reddwarf/common/exception.py @@ -16,13 +16,15 @@ # under the License. """I totally stole most of this from melange, thx guys!!!""" -import logging +from reddwarf.openstack.common import log as logging from reddwarf.openstack.common import exception as openstack_exception +from reddwarf.openstack.common import processutils +from reddwarf.openstack.common.gettextutils import _ from webob import exc ClientConnectionError = openstack_exception.ClientConnectionError -ProcessExecutionError = openstack_exception.ProcessExecutionError +ProcessExecutionError = processutils.ProcessExecutionError DatabaseMigrationError = openstack_exception.DatabaseMigrationError LOG = logging.getLogger(__name__) wrap_exception = openstack_exception.wrap_exception @@ -179,3 +181,13 @@ class ModelNotFoundError(NotFound): class UpdateGuestError(ReddwarfError): message = _("Failed to update instances") + + +class ConfigNotFound(NotFound): + + message = _("Config file not found") + + +class PasteAppNotFound(NotFound): + + message = _("Paste app not found.") diff --git a/reddwarf/common/excutils.py b/reddwarf/common/excutils.py index f635744369..86b9649cb4 100644 --- a/reddwarf/common/excutils.py +++ b/reddwarf/common/excutils.py @@ -19,7 +19,7 @@ Exception related utilities. """ import contextlib -import logging +from reddwarf.openstack.common import log as logging import sys import traceback diff --git a/reddwarf/common/extensions.py b/reddwarf/common/extensions.py index 7f1e70b738..5db446d418 100644 --- a/reddwarf/common/extensions.py +++ b/reddwarf/common/extensions.py @@ -17,9 +17,11 @@ import routes import webob.dec -import logging +from reddwarf.openstack.common import log as logging from reddwarf.openstack.common import extensions +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.common import cfg from reddwarf.common import wsgi LOG = logging.getLogger(__name__) @@ -27,12 +29,14 @@ LOG = logging.getLogger(__name__) ExtensionsDescriptor = extensions.ExtensionDescriptor ResourceExtension = extensions.ResourceExtension +CONF = cfg.CONF + class ReddwarfExtensionMiddleware(extensions.ExtensionMiddleware): - def __init__(self, application, config, ext_mgr=None): + def __init__(self, application, ext_mgr=None): ext_mgr = (ext_mgr or - ExtensionManager(config['api_extensions_path'])) + ExtensionManager(CONF.api_extensions_path)) mapper = routes.Mapper() # extended resources @@ -84,7 +88,6 @@ def factory(global_config, **local_config): """Paste factory.""" def _factory(app): extensions.DEFAULT_XMLNS = "http://docs.openstack.org/reddwarf" - ext_mgr = extensions.ExtensionManager( - global_config.get('api_extensions_path', '')) - return ReddwarfExtensionMiddleware(app, global_config, ext_mgr) + ext_mgr = extensions.ExtensionManager(CONF.api_extensions_path) + return ReddwarfExtensionMiddleware(app, ext_mgr) return _factory diff --git a/reddwarf/common/manager.py b/reddwarf/common/manager.py index c652f08d91..cab359ef0f 100644 --- a/reddwarf/common/manager.py +++ b/reddwarf/common/manager.py @@ -12,17 +12,18 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +from reddwarf.openstack.common import log as logging from reddwarf.openstack.common import rpc -from reddwarf.common import config +from reddwarf.common import cfg from reddwarf.common import exception -CONFIG = config.Config +CONF = cfg.CONF LOG = logging.getLogger(__name__) +# TODO(hub_cap): upgrade this to use rpc.proxy.RpcProxy class ManagerAPI(object): """Extend this API for interacting with the common methods of managers""" @@ -30,7 +31,7 @@ class ManagerAPI(object): self.context = context def _cast(self, method_name, **kwargs): - if CONFIG.get("remote_implementation", "real") == "fake": + if CONF.remote_implementation == "fake": self._fake_cast(method_name, **kwargs) else: self._real_cast(method_name, **kwargs) diff --git a/reddwarf/common/models.py b/reddwarf/common/models.py index 523ccf9007..f80c950337 100644 --- a/reddwarf/common/models.py +++ b/reddwarf/common/models.py @@ -17,7 +17,7 @@ """Model classes that form the core of instances functionality.""" -import logging +from reddwarf.openstack.common import log as logging from reddwarf.common import remote diff --git a/reddwarf/common/remote.py b/reddwarf/common/remote.py index 4323e9985a..1119c67131 100644 --- a/reddwarf/common/remote.py +++ b/reddwarf/common/remote.py @@ -15,11 +15,11 @@ # License for the specific language governing permissions and limitations # under the License. -from reddwarf.common import config +from reddwarf.common import cfg from novaclient.v1_1.client import Client -CONFIG = config.Config +CONF = cfg.CONF def create_dns_client(context): @@ -33,9 +33,8 @@ def create_guest_client(context, id): def create_nova_client(context): - COMPUTE_URL = CONFIG.get('nova_compute_url', 'http://localhost:8774/v2') - PROXY_AUTH_URL = CONFIG.get('reddwarf_auth_url', - 'http://0.0.0.0:5000/v2.0') + COMPUTE_URL = CONF.nova_compute_url + PROXY_AUTH_URL = CONF.reddwarf_auth_url client = Client(context.user, context.auth_tok, project_id=context.tenant, auth_url=PROXY_AUTH_URL) client.client.auth_token = context.auth_tok @@ -47,9 +46,8 @@ def create_nova_client(context): def create_nova_volume_client(context): # Quite annoying but due to a paste config loading bug. # TODO(hub-cap): talk to the openstack-common people about this - VOLUME_URL = CONFIG.get('nova_volume_url', 'http://localhost:8776/v2') - PROXY_AUTH_URL = CONFIG.get('reddwarf_auth_url', - 'http://0.0.0.0:5000/v2.0') + VOLUME_URL = CONF.nova_volume_url + PROXY_AUTH_URL = CONF.reddwarf_auth_url client = Client(context.user, context.auth_tok, project_id=context.tenant, auth_url=PROXY_AUTH_URL) client.client.auth_token = context.auth_tok @@ -58,7 +56,7 @@ def create_nova_volume_client(context): return client -if CONFIG.get("remote_implementation", "real") == "fake": +if CONF.remote_implementation == "fake": # Override the functions above with fakes. from reddwarf.tests.fakes.nova import fake_create_nova_client diff --git a/reddwarf/common/rpc.py b/reddwarf/common/rpc.py new file mode 100644 index 0000000000..3c1a94162f --- /dev/null +++ b/reddwarf/common/rpc.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""RPC helper for launching a rpc service.""" + +import inspect +import os + +from reddwarf.openstack.common import importutils +from reddwarf.openstack.common import loopingcall +from reddwarf.openstack.common.rpc import service as rpc_service +from reddwarf.common import cfg + +CONF = cfg.CONF + + +class RpcService(rpc_service.Service): + + def __init__(self, host=None, binary=None, topic=None, manager=None): + host = host or CONF.host + binary = binary or os.path.basename(inspect.stack()[-1][1]) + topic = topic or binary.rpartition('reddwarf-')[2] + self.manager_impl = importutils.import_object(manager) + self.report_interval = CONF.report_interval + super(RpcService, self).__init__(host, topic, + manager=self.manager_impl) + + def start(self): + super(RpcService, self).start() + # TODO(hub-cap): Currently the context is none... do we _need_ it here? + pulse = loopingcall.LoopingCall(self.manager_impl.run_periodic_tasks, + context=None) + pulse.start(interval=self.report_interval, + initial_delay=self.report_interval) + pulse.wait() diff --git a/reddwarf/common/service.py b/reddwarf/common/service.py index 7528548496..860023311e 100644 --- a/reddwarf/common/service.py +++ b/reddwarf/common/service.py @@ -20,7 +20,6 @@ import functools import inspect import os -import logging import socket import traceback import weakref @@ -29,13 +28,17 @@ import eventlet import greenlet from eventlet import greenthread -from reddwarf.common import config -from reddwarf.openstack.common import rpc -from reddwarf.common import utils from reddwarf import version +from reddwarf.common import cfg +from reddwarf.common import utils +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import rpc +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) +CONF = cfg.CONF + class Launcher(object): """Launch one or more services and wait for them to complete.""" @@ -160,17 +163,17 @@ class Service(object): """ if not host: - host = config.Config.get('host') + host = CONF.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: topic = binary.rpartition('reddwarf-')[2] if not manager: - manager = config.Config.get('%s_manager' % topic, None) + manager = CONF._get('%s_manager' % topic) if not report_interval: - report_interval = config.Config.get('report_interval', 10) + report_interval = CONF.report_interval if not periodic_interval: - periodic_interval = config.Config.get('periodic_interval', 60) + periodic_interval = CONF.periodic_interval service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) diff --git a/reddwarf/common/utils.py b/reddwarf/common/utils.py index 6125f044c2..118c93e606 100644 --- a/reddwarf/common/utils.py +++ b/reddwarf/common/utils.py @@ -18,7 +18,6 @@ import datetime import inspect -import logging import re import signal import sys @@ -32,15 +31,21 @@ from eventlet import semaphore from eventlet.green import subprocess from eventlet.timeout import Timeout -from reddwarf.openstack.common import utils as openstack_utils from reddwarf.common import exception +from reddwarf.openstack.common import importutils +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import processutils +from reddwarf.openstack.common import timeutils +from reddwarf.openstack.common import utils as openstack_utils +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) -import_class = openstack_utils.import_class -import_object = openstack_utils.import_object +import_class = importutils.import_class +import_object = importutils.import_object +import_module = importutils.import_module bool_from_string = openstack_utils.bool_from_string -execute = openstack_utils.execute -isotime = openstack_utils.isotime +execute = processutils.execute +isotime = timeutils.isotime def create_method_args_string(*args, **kwargs): diff --git a/reddwarf/common/wsgi.py b/reddwarf/common/wsgi.py index 69c539e85a..0c366c56e8 100644 --- a/reddwarf/common/wsgi.py +++ b/reddwarf/common/wsgi.py @@ -14,27 +14,32 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""I totally stole most of this from melange, thx guys!!!""" +"""Wsgi helper utilities for reddwarf""" import eventlet.wsgi -import logging +import os import paste.urlmap import re import traceback import webob import webob.dec import webob.exc +from paste import deploy from xml.dom import minidom from reddwarf.common import context as rd_context -from reddwarf.common import config from reddwarf.common import exception from reddwarf.common import utils +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import pastedeploy +from reddwarf.openstack.common import service from reddwarf.openstack.common import wsgi as openstack_wsgi +from reddwarf.openstack.common import log as logging +from reddwarf.common import cfg + CONTEXT_KEY = 'reddwarf.context' Router = openstack_wsgi.Router -Server = openstack_wsgi.Server Debug = openstack_wsgi.Debug Middleware = openstack_wsgi.Middleware JSONDictSerializer = openstack_wsgi.JSONDictSerializer @@ -46,6 +51,8 @@ eventlet.patcher.monkey_patch(all=False, socket=True) LOG = logging.getLogger('reddwarf.common.wsgi') +CONF = cfg.CONF + XMLNS = 'http://docs.openstack.org/database/api/v1.0' CUSTOM_PLURALS_METADATA = {'databases': '', 'users': ''} CUSTOM_SERIALIZER_METADATA = { @@ -100,6 +107,28 @@ def versioned_urlmap(*args, **kwargs): return VersionedURLMap(urlmap) +def launch(app_name, port, paste_config_file, data={}, + host='0.0.0.0', backlog=128, threads=1000): + """Launches a wsgi server based on the passed in paste_config_file. + + Launch provides a easy way to create a paste app from the config + file and launch it via the service launcher. It takes care of + all of the plumbing. The only caveat is that the paste_config_file + must be a file that paste.deploy can find and handle. There is + a helper method in cfg.py that finds files. + + Example: + conf_file = CONF.find_file(CONF.api_paste_config) + launcher = wsgi.launch('myapp', CONF.bind_port, conf_file) + launcher.wait() + + """ + app = pastedeploy.paste_deploy_app(paste_config_file, app_name, data) + server = openstack_wsgi.Service(app, port, host=host, + backlog=backlog, threads=threads) + return service.launch(server) + + class VersionedURLMap(object): def __init__(self, urlmap): @@ -303,10 +332,8 @@ class Controller(object): } def __init__(self): - self.add_addresses = utils.bool_from_string( - config.Config.get('add_addresses', 'False')) - self.add_volumes = utils.bool_from_string( - config.Config.get('reddwarf_volume_support', 'False')) + self.add_addresses = CONF.add_addresses + self.add_volumes = CONF.reddwarf_volume_support def create_resource(self): serializer = ReddwarfResponseSerializer( @@ -505,7 +532,7 @@ class Fault(webob.exc.HTTPException): class ContextMiddleware(openstack_wsgi.Middleware): def __init__(self, application): - self.admin_roles = config.Config.get_list('admin_roles', []) + self.admin_roles = CONF.admin_roles super(ContextMiddleware, self).__init__(application) def _extract_limits(self, params): diff --git a/reddwarf/db/__init__.py b/reddwarf/db/__init__.py index 4611714589..63b5091292 100644 --- a/reddwarf/db/__init__.py +++ b/reddwarf/db/__init__.py @@ -18,15 +18,15 @@ import optparse from reddwarf.common import utils -from reddwarf.common import config +from reddwarf.common import cfg +CONF = cfg.CONF -db_api_opt = config.Config.get("db_api_implementation", - "reddwarf.db.sqlalchemy.api") +db_api_opt = CONF.db_api_implementation def get_db_api(): - return utils.import_object(db_api_opt) + return utils.import_module(db_api_opt) class Query(object): diff --git a/reddwarf/db/models.py b/reddwarf/db/models.py index 2a26d300a2..8390ef8e18 100644 --- a/reddwarf/db/models.py +++ b/reddwarf/db/models.py @@ -12,15 +12,14 @@ # License for the specific language governing permissions and limitations # under the License. -import logging - from reddwarf.db import get_db_api from reddwarf.db import db_query from reddwarf.common import exception from reddwarf.common import models from reddwarf.common import pagination from reddwarf.common import utils - +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) diff --git a/reddwarf/db/sqlalchemy/migrate_repo/schema.py b/reddwarf/db/sqlalchemy/migrate_repo/schema.py index 6970dac698..8c67e46fb4 100644 --- a/reddwarf/db/sqlalchemy/migrate_repo/schema.py +++ b/reddwarf/db/sqlalchemy/migrate_repo/schema.py @@ -17,7 +17,7 @@ """Various conveniences used for migration scripts.""" -import logging +from reddwarf.openstack.common import log as logging import sqlalchemy.types diff --git a/reddwarf/db/sqlalchemy/migration.py b/reddwarf/db/sqlalchemy/migration.py index dde8b86fe2..d653e53cbb 100644 --- a/reddwarf/db/sqlalchemy/migration.py +++ b/reddwarf/db/sqlalchemy/migration.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +from reddwarf.openstack.common import log as logging import os from migrate.versioning import api as versioning_api diff --git a/reddwarf/db/sqlalchemy/session.py b/reddwarf/db/sqlalchemy/session.py index e1cd0eef04..3b197c5224 100644 --- a/reddwarf/db/sqlalchemy/session.py +++ b/reddwarf/db/sqlalchemy/session.py @@ -16,12 +16,13 @@ # under the License. import contextlib -import logging from sqlalchemy import create_engine from sqlalchemy import MetaData from sqlalchemy.orm import sessionmaker -from reddwarf.common import config +from reddwarf.common import cfg +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ from reddwarf.db.sqlalchemy import mappers _ENGINE = None @@ -30,9 +31,10 @@ _MAKER = None LOG = logging.getLogger(__name__) +CONF = cfg.CONF + def configure_db(options, models_mapper=None): - configure_sqlalchemy_log(options) global _ENGINE if not _ENGINE: _ENGINE = _create_engine(options) @@ -57,26 +59,10 @@ def configure_db(options, models_mapper=None): mappers.map(_ENGINE, models) -def configure_sqlalchemy_log(options): - debug = config.get_option(options, 'debug', type='bool', default=False) - verbose = config.get_option(options, 'verbose', type='bool', default=False) - logger = logging.getLogger('sqlalchemy.engine') - if debug: - logger.setLevel(logging.DEBUG) - elif verbose: - logger.setLevel(logging.INFO) - - def _create_engine(options): engine_args = { - "pool_recycle": config.get_option(options, - 'sql_idle_timeout', - type='int', - default=3600), - "echo": config.get_option(options, - 'sql_query_log', - type='bool', - default=False), + "pool_recycle": CONF.sql_idle_timeout, + "echo": CONF.sql_query_log } LOG.info(_("Creating SQLAlchemy engine with args: %s") % engine_args) return create_engine(options['sql_connection'], **engine_args) diff --git a/reddwarf/dns/manager.py b/reddwarf/dns/manager.py index 3675336802..e41d616ad2 100644 --- a/reddwarf/dns/manager.py +++ b/reddwarf/dns/manager.py @@ -18,13 +18,15 @@ """ Dns manager. """ -import logging +from reddwarf.openstack.common import log as logging from reddwarf.common import utils -from reddwarf.common import config +from reddwarf.common import cfg LOG = logging.getLogger(__name__) +CONF = cfg.CONF + class DnsManager(object): """Handles associating DNS to and from IPs.""" @@ -32,16 +34,12 @@ class DnsManager(object): def __init__(self, dns_driver=None, dns_instance_entry_factory=None, *args, **kwargs): if not dns_driver: - dns_driver = config.Config.get( - "dns_driver", - "reddwarf.dns.driver.DnsDriver") + dns_driver = CONF.dns_driver dns_driver = utils.import_object(dns_driver) self.driver = dns_driver() if not dns_instance_entry_factory: - dns_instance_entry_factory = config.Config.get( - 'dns_instance_entry_factory', - 'reddwarf.dns.driver.DnsInstanceEntryFactory') + dns_instance_entry_factory = CONF.dns_instance_entry_factory entry_factory = utils.import_object(dns_instance_entry_factory) self.entry_factory = entry_factory() diff --git a/reddwarf/dns/models.py b/reddwarf/dns/models.py index 02a57d287b..97cb6ee945 100644 --- a/reddwarf/dns/models.py +++ b/reddwarf/dns/models.py @@ -19,11 +19,12 @@ Model classes that map instance Ip to dns record. """ -import logging from reddwarf.db import get_db_api from reddwarf.common import exception from reddwarf.common.models import ModelBase +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) diff --git a/reddwarf/dns/rsdns/driver.py b/reddwarf/dns/rsdns/driver.py index 7de875d80a..790039e3c2 100644 --- a/reddwarf/dns/rsdns/driver.py +++ b/reddwarf/dns/rsdns/driver.py @@ -23,8 +23,8 @@ __version__ = '2.4' import hashlib -import logging -from reddwarf.common import config +from reddwarf.openstack.common import log as logging +from reddwarf.common import cfg from reddwarf.common import exception from reddwarf.common.exception import NotFound from reddwarf.dns.models import DnsRecord @@ -33,15 +33,17 @@ from rsdns.client.future import RsDnsError from reddwarf.dns.driver import DnsEntry -DNS_HOSTNAME = config.Config.get("dns_hostname", "") -DNS_ACCOUNT_ID = config.Config.get("dns_account_id", 0) -DNS_AUTH_URL = config.Config.get("dns_auth_url", "") -DNS_DOMAIN_NAME = config.Config.get("dns_domain_name", "") -DNS_USERNAME = config.Config.get("dns_username", "") -DNS_PASSKEY = config.Config.get("dns_passkey", "") -DNS_MANAGEMENT_BASE_URL = config.Config.get("dns_management_base_url", "") -DNS_TTL = config.Config.get("dns_ttl", 300) -DNS_DOMAIN_ID = config.Config.get("dns_domain_id", 1) +CONF = cfg.CONF + +DNS_HOSTNAME = CONF.dns_hostname +DNS_ACCOUNT_ID = CONF.dns_account_id +DNS_AUTH_URL = CONF.dns_auth_url +DNS_DOMAIN_NAME = CONF.dns_domain_name +DNS_USERNAME = CONF.dns_username +DNS_PASSKEY = CONF.dns_passkey +DNS_MANAGEMENT_BASE_URL = CONF.dns_management_base_url +DNS_TTL = CONF.dns_ttl +DNS_DOMAIN_ID = CONF.dns_domain_id LOG = logging.getLogger(__name__) diff --git a/reddwarf/extensions/account.py b/reddwarf/extensions/account.py index a5807a8dfc..89184f7005 100644 --- a/reddwarf/extensions/account.py +++ b/reddwarf/extensions/account.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +from reddwarf.openstack.common import log as logging from reddwarf.common import extensions from reddwarf.common import wsgi diff --git a/reddwarf/extensions/account/models.py b/reddwarf/extensions/account/models.py index 88d4a29c05..5d283710cd 100644 --- a/reddwarf/extensions/account/models.py +++ b/reddwarf/extensions/account/models.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +from reddwarf.openstack.common import log as logging from reddwarf.common.remote import create_nova_client from reddwarf.instance.models import DBInstance diff --git a/reddwarf/extensions/account/service.py b/reddwarf/extensions/account/service.py index fa496287e4..39a7275040 100644 --- a/reddwarf/extensions/account/service.py +++ b/reddwarf/extensions/account/service.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +from reddwarf.openstack.common import log as logging from novaclient import exceptions as nova_exceptions @@ -25,7 +25,7 @@ from reddwarf.common.remote import create_nova_client from reddwarf.extensions.account import models from reddwarf.extensions.account import views from reddwarf.instance.models import DBInstance - +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) diff --git a/reddwarf/extensions/mgmt.py b/reddwarf/extensions/mgmt.py index 7dfd60e46b..21b34a2002 100644 --- a/reddwarf/extensions/mgmt.py +++ b/reddwarf/extensions/mgmt.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +from reddwarf.openstack.common import log as logging from reddwarf.common import extensions from reddwarf.common import wsgi diff --git a/reddwarf/extensions/mgmt/host/instance/service.py b/reddwarf/extensions/mgmt/host/instance/service.py index 82e408c50e..562a363347 100644 --- a/reddwarf/extensions/mgmt/host/instance/service.py +++ b/reddwarf/extensions/mgmt/host/instance/service.py @@ -15,11 +15,12 @@ # License for the specific language governing permissions and limitations # under the License. -import logging from reddwarf.common import exception from reddwarf.common import wsgi from reddwarf.extensions.mgmt.host import models +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) diff --git a/reddwarf/extensions/mgmt/host/models.py b/reddwarf/extensions/mgmt/host/models.py index 3c0ab7c7eb..135a8826f3 100644 --- a/reddwarf/extensions/mgmt/host/models.py +++ b/reddwarf/extensions/mgmt/host/models.py @@ -19,11 +19,10 @@ Model classes that extend the instances functionality for MySQL instances. """ -import logging +from reddwarf.openstack.common import log as logging from reddwarf import db -from reddwarf.common import config from reddwarf.common import exception from reddwarf.common import utils from reddwarf.instance.models import DBInstance @@ -35,7 +34,6 @@ from reddwarf.common.remote import create_nova_client from novaclient import exceptions as nova_exceptions -CONFIG = config.Config LOG = logging.getLogger(__name__) diff --git a/reddwarf/extensions/mgmt/host/service.py b/reddwarf/extensions/mgmt/host/service.py index b15a2e3e71..475cca03e7 100644 --- a/reddwarf/extensions/mgmt/host/service.py +++ b/reddwarf/extensions/mgmt/host/service.py @@ -15,7 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -import logging import webob.exc from reddwarf.common import exception @@ -25,6 +24,8 @@ from reddwarf.extensions.mgmt.host import models from reddwarf.extensions.mgmt.host import views from reddwarf.extensions.mysql import models as mysql_models from reddwarf.instance.service import InstanceController +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) diff --git a/reddwarf/extensions/mgmt/instances/models.py b/reddwarf/extensions/mgmt/instances/models.py index 4a887ecddf..0143932089 100644 --- a/reddwarf/extensions/mgmt/instances/models.py +++ b/reddwarf/extensions/mgmt/instances/models.py @@ -12,9 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +from reddwarf.openstack.common import log as logging -from reddwarf.common import config from reddwarf.common.remote import create_nova_client from reddwarf.common.remote import create_nova_volume_client from reddwarf.instance import models as imodels @@ -23,7 +22,6 @@ from reddwarf.instance import models as instance_models from reddwarf.extensions.mysql import models as mysql_models -CONFIG = config.Config LOG = logging.getLogger(__name__) diff --git a/reddwarf/extensions/mgmt/instances/service.py b/reddwarf/extensions/mgmt/instances/service.py index 957a41964e..79fa461976 100644 --- a/reddwarf/extensions/mgmt/instances/service.py +++ b/reddwarf/extensions/mgmt/instances/service.py @@ -15,22 +15,23 @@ # License for the specific language governing permissions and limitations # under the License. -import logging import webob.exc from novaclient import exceptions as nova_exceptions from reddwarf.common import exception from reddwarf.common import wsgi -from reddwarf.extensions.mgmt.instances import models -from reddwarf.extensions.mgmt.instances.views import DiagnosticsView -from reddwarf.extensions.mgmt.instances.views import HwInfoView -from reddwarf.instance import models as instance_models -from reddwarf.extensions.mgmt.instances import views -from reddwarf.extensions.mysql import models as mysql_models -from reddwarf.instance.service import InstanceController from reddwarf.common.auth import admin_context from reddwarf.common.remote import create_nova_client +from reddwarf.instance import models as instance_models +from reddwarf.extensions.mgmt.instances import models +from reddwarf.extensions.mgmt.instances import views +from reddwarf.extensions.mgmt.instances.views import DiagnosticsView +from reddwarf.extensions.mgmt.instances.views import HwInfoView +from reddwarf.extensions.mysql import models as mysql_models +from reddwarf.instance.service import InstanceController +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) diff --git a/reddwarf/extensions/mgmt/volume/models.py b/reddwarf/extensions/mgmt/volume/models.py index 37aad94fe3..7ccc2608e0 100644 --- a/reddwarf/extensions/mgmt/volume/models.py +++ b/reddwarf/extensions/mgmt/volume/models.py @@ -19,7 +19,7 @@ Model classes that extend the instances functionality for volumes. """ -import logging +from reddwarf.openstack.common import log as logging from reddwarf.common.remote import create_nova_volume_client diff --git a/reddwarf/extensions/mgmt/volume/service.py b/reddwarf/extensions/mgmt/volume/service.py index 9353feb741..dad84348e6 100644 --- a/reddwarf/extensions/mgmt/volume/service.py +++ b/reddwarf/extensions/mgmt/volume/service.py @@ -15,14 +15,15 @@ # License for the specific language governing permissions and limitations # under the License. -import logging import webob.exc -from reddwarf.common.auth import admin_context from reddwarf.common import exception from reddwarf.common import wsgi +from reddwarf.common.auth import admin_context from reddwarf.extensions.mgmt.volume import models from reddwarf.extensions.mgmt.volume import views +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) diff --git a/reddwarf/extensions/mysql.py b/reddwarf/extensions/mysql.py index 98b2e7618f..f05dbfa81f 100644 --- a/reddwarf/extensions/mysql.py +++ b/reddwarf/extensions/mysql.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +from reddwarf.openstack.common import log as logging from reddwarf.common import extensions from reddwarf.common import wsgi diff --git a/reddwarf/extensions/mysql/models.py b/reddwarf/extensions/mysql/models.py index 185f2b691e..e5b41db372 100644 --- a/reddwarf/extensions/mysql/models.py +++ b/reddwarf/extensions/mysql/models.py @@ -19,17 +19,17 @@ Model classes that extend the instances functionality for MySQL instances. """ -import logging - -from reddwarf.common import config +from reddwarf.common import cfg from reddwarf.common import exception from reddwarf.common import utils -from reddwarf.db import get_db_api -from reddwarf.instance import models as base_models -from reddwarf.guestagent.db import models as guest_models from reddwarf.common.remote import create_guest_client +from reddwarf.db import get_db_api +from reddwarf.guestagent.db import models as guest_models +from reddwarf.instance import models as base_models +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ -CONFIG = config.Config +CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -150,7 +150,7 @@ def load_via_context(cls, context, instance_id): class Users(object): - DEFAULT_LIMIT = int(CONFIG.get('users_page_size', '20')) + DEFAULT_LIMIT = CONF.users_page_size @classmethod def load(cls, context, instance_id): @@ -163,7 +163,7 @@ class Users(object): marker=marker, include_marker=include_marker) model_users = [] - ignore_users = CONFIG.get_list('ignore_users', []) + ignore_users = CONF.ignore_users for user in user_list: mysql_user = guest_models.MySQLUser() mysql_user.deserialize(user) @@ -213,7 +213,7 @@ class Schema(object): class Schemas(object): - DEFAULT_LIMIT = int(CONFIG.get('databases_page_size', '20')) + DEFAULT_LIMIT = CONF.databases_page_size @classmethod def load(cls, context, instance_id): @@ -226,7 +226,7 @@ class Schemas(object): marker=marker, include_marker=include_marker) model_schemas = [] - ignore_dbs = CONFIG.get_list('ignore_dbs', []) + ignore_dbs = CONF.ignore_dbs for schema in schemas: mysql_schema = guest_models.MySQLDatabase() mysql_schema.deserialize(schema) diff --git a/reddwarf/extensions/mysql/service.py b/reddwarf/extensions/mysql/service.py index 65133d571a..62ca24c5cb 100644 --- a/reddwarf/extensions/mysql/service.py +++ b/reddwarf/extensions/mysql/service.py @@ -15,17 +15,18 @@ # License for the specific language governing permissions and limitations # under the License. -import logging import webob.exc from reddwarf.common import exception from reddwarf.common import pagination from reddwarf.common import wsgi -from reddwarf.guestagent.db import models as guest_models from reddwarf.extensions.mysql.common import populate_databases from reddwarf.extensions.mysql.common import populate_users from reddwarf.extensions.mysql import models from reddwarf.extensions.mysql import views +from reddwarf.guestagent.db import models as guest_models +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) @@ -42,7 +43,7 @@ class RootController(wsgi.Controller): is_root_enabled = models.Root.load(context, instance_id) return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200) - def create(self, req, body, tenant_id, instance_id): + def create(self, req, tenant_id, instance_id): """ Enable the root user for the db instance """ LOG.info(_("Enabling root for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % req) diff --git a/reddwarf/guestagent/api.py b/reddwarf/guestagent/api.py index ccf2e63b52..4af573b904 100644 --- a/reddwarf/guestagent/api.py +++ b/reddwarf/guestagent/api.py @@ -19,36 +19,40 @@ Handles all request to the Platform or Guest VM """ -import logging - from eventlet import Timeout -from reddwarf.openstack.common import rpc -from reddwarf.common import config +from reddwarf.common import cfg from reddwarf.common import exception from reddwarf.common import utils from reddwarf.guestagent import models as agent_models +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import rpc +from reddwarf.openstack.common.rpc import proxy +from reddwarf.openstack.common.gettextutils import _ - +CONF = cfg.CONF LOG = logging.getLogger(__name__) -AGENT_LOW_TIMEOUT = int(config.Config.get('agent_call_low_timeout', 5)) -AGENT_HIGH_TIMEOUT = int(config.Config.get('agent_call_high_timeout', 60)) +AGENT_LOW_TIMEOUT = CONF.agent_call_low_timeout +AGENT_HIGH_TIMEOUT = CONF.agent_call_high_timeout +RPC_API_VERSION = "1.0" -class API(object): +class API(proxy.RpcProxy): """API for interacting with the guest manager.""" def __init__(self, context, id): self.context = context self.id = id + super(API, self).__init__(self._get_routing_key(), + RPC_API_VERSION) def _call(self, method_name, timeout_sec, **kwargs): LOG.debug("Calling %s" % method_name) - - timeout = Timeout(timeout_sec) try: - result = rpc.call(self.context, self._get_routing_key(), - {'method': method_name, 'args': kwargs}) + result = self.call(self.context, + self.make_msg(method_name, **kwargs), + timeout=timeout_sec) + LOG.debug("Result is %s" % result) return result except Exception as e: @@ -59,24 +63,30 @@ class API(object): raise else: raise exception.GuestTimeout() - finally: - timeout.cancel() def _cast(self, method_name, **kwargs): + LOG.debug("Casting %s" % method_name) try: - rpc.cast(self.context, self._get_routing_key(), - {'method': method_name, 'args': kwargs}) + self.cast(self.context, self.make_msg(method_name, **kwargs), + topic=kwargs.get('topic'), + version=kwargs.get('version')) except Exception as e: LOG.error(e) raise exception.GuestError(original_message=str(e)) def _cast_with_consumer(self, method_name, **kwargs): try: - rpc.cast_with_consumer(self.context, self._get_routing_key(), - {'method': method_name, 'args': kwargs}) + conn = rpc.create_connection(new=True) + conn.create_consumer(self._get_routing_key(), None, fanout=False) except Exception as e: LOG.error(e) raise exception.GuestError(original_message=str(e)) + finally: + if conn: + conn.close() + + # leave the cast call out of the hackity consumer create + self._cast(method_name, **kwargs) def delete_queue(self): """Deletes the queue.""" @@ -164,9 +174,8 @@ class API(object): as a database container""" LOG.debug(_("Sending the call to prepare the Guest")) self._cast_with_consumer( - "prepare", databases=databases, - memory_mb=memory_mb, users=users, device_path=device_path, - mount_point=mount_point) + "prepare", databases=databases, memory_mb=memory_mb, + users=users, device_path=device_path, mount_point=mount_point) def restart(self): """Restart the MySQL server.""" diff --git a/reddwarf/guestagent/db/models.py b/reddwarf/guestagent/db/models.py index 5950782a28..9795f1fe5d 100644 --- a/reddwarf/guestagent/db/models.py +++ b/reddwarf/guestagent/db/models.py @@ -18,7 +18,9 @@ import re import string -from reddwarf.common import config +from reddwarf.common import cfg + +CONF = cfg.CONF class Base(object): @@ -32,7 +34,7 @@ class Base(object): class MySQLDatabase(Base): """Represents a Database and its properties""" - _ignore_dbs = config.Config.get_list("ignore_dbs", []) + _ignore_dbs = CONF.ignore_dbs # Defaults __charset__ = "utf8" @@ -343,7 +345,7 @@ class MySQLUser(Base): """Represents a MySQL User and its associated properties""" not_supported_chars = re.compile("^\s|\s$|'|\"|;|`|,|/|\\\\") - _ignore_users = config.Config.get_list("ignore_users", []) + _ignore_users = CONF.ignore_users def __init__(self): self._name = None diff --git a/reddwarf/guestagent/dbaas.py b/reddwarf/guestagent/dbaas.py index 66937520af..d090f6842d 100644 --- a/reddwarf/guestagent/dbaas.py +++ b/reddwarf/guestagent/dbaas.py @@ -25,8 +25,6 @@ handles RPC calls relating to Platform specific operations. """ - -import logging import os import pexpect import re @@ -43,12 +41,15 @@ from sqlalchemy.sql.expression import text from reddwarf import db from reddwarf.common.exception import GuestError from reddwarf.common.exception import ProcessExecutionError -from reddwarf.common import config +from reddwarf.common import cfg from reddwarf.common import utils from reddwarf.guestagent.db import models from reddwarf.guestagent.volume import VolumeDevice from reddwarf.guestagent.query import Query +from reddwarf.guestagent import pkg from reddwarf.instance import models as rd_models +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ ADMIN_USER_NAME = "os_admin" @@ -66,7 +67,7 @@ TMP_MYCNF = "/tmp/my.cnf.tmp" DBAAS_MYCNF = "/etc/dbaas/my.cnf/my.cnf.%dM" MYSQL_BASE_DIR = "/var/lib/mysql" -CONFIG = config.Config +CONF = cfg.CONF INCLUDE_MARKER_OPERATORS = { True: ">=", False: ">" @@ -229,7 +230,7 @@ class MySqlAppStatus(object): @staticmethod def _load_status(): """Loads the status from the database.""" - id = config.Config.get('guest_id') + id = CONF.guest_id return rd_models.InstanceServiceStatus.find_by(instance_id=id) def set_status(self, status): @@ -497,90 +498,6 @@ class MySqlAdmin(object): return users, next_marker -class DBaaSAgent(object): - """ Database as a Service Agent Controller """ - - def __init__(self): - self.status = MySqlAppStatus.get() - - def begin_mysql_restart(self): - self.restart_mode = True - - def create_database(self, databases): - return MySqlAdmin().create_database(databases) - - def create_user(self, users): - MySqlAdmin().create_user(users) - - def delete_database(self, database): - return MySqlAdmin().delete_database(database) - - def delete_user(self, user): - MySqlAdmin().delete_user(user) - - def list_databases(self, limit=None, marker=None, include_marker=False): - return MySqlAdmin().list_databases(limit, marker, include_marker) - - def list_users(self, limit=None, marker=None, include_marker=False): - return MySqlAdmin().list_users(limit, marker, include_marker) - - def enable_root(self): - return MySqlAdmin().enable_root() - - def is_root_enabled(self): - return MySqlAdmin().is_root_enabled() - - def prepare(self, databases, memory_mb, users, device_path=None, - mount_point=None): - """Makes ready DBAAS on a Guest container.""" - from reddwarf.guestagent.pkg import PkgAgent - if not isinstance(self, PkgAgent): - raise TypeError("This must also be an instance of Pkg agent.") - pkg = self # Python cast. - self.status.begin_mysql_install() - # status end_mysql_install set with install_and_secure() - app = MySqlApp(self.status) - restart_mysql = False - if device_path: - device = VolumeDevice(device_path) - device.format() - if app.is_installed(pkg): - #stop and do not update database - app.stop_mysql() - restart_mysql = True - #rsync exiting data - device.migrate_data(MYSQL_BASE_DIR) - #mount the volume - device.mount(mount_point) - LOG.debug(_("Mounted the volume.")) - #check mysql was installed and stopped - if restart_mysql: - app.start_mysql() - app.install_and_secure(pkg, memory_mb) - LOG.info("Creating initial databases and users following successful " - "prepare.") - self.create_database(databases) - self.create_user(users) - LOG.info('"prepare" call has finished.') - - def restart(self): - app = MySqlApp(self.status) - app.restart() - - def start_mysql_with_conf_changes(self, updated_memory_size): - app = MySqlApp(self.status) - pkg = self # Python cast. - app.start_mysql_with_conf_changes(pkg, updated_memory_size) - - def stop_mysql(self): - app = MySqlApp(self.status) - app.stop_mysql() - - def update_status(self): - """Update the status of the MySQL service""" - MySqlAppStatus.get().update() - - class KeepAliveConnection(interfaces.PoolListener): """ A connection pool listener that ensures live connections are returned @@ -610,8 +527,7 @@ class MySqlApp(object): def __init__(self, status): """ By default login with root no password for initial setup. """ - self.state_change_wait_time = int(config.Config.get( - 'state_change_wait_time', 2 * 60)) + self.state_change_wait_time = CONF.state_change_wait_time self.status = status def _create_admin_user(self, client, password): @@ -639,13 +555,13 @@ class MySqlApp(object): WHERE User='root';""") client.execute(t, pwd=generate_random_password()) - def install_and_secure(self, pkg, memory_mb): + def install_and_secure(self, memory_mb): """Prepare the guest machine with a secure mysql server installation""" LOG.info(_("Preparing Guest as MySQL Server")) #TODO(tim.simpson): Check that MySQL is not already installed. self.status.begin_mysql_install() - self._install_mysql(pkg) + self._install_mysql() LOG.info(_("Generating root password...")) admin_password = generate_random_password() @@ -658,13 +574,13 @@ class MySqlApp(object): self._create_admin_user(client, admin_password) self.stop_mysql() - self._write_mycnf(pkg, memory_mb, admin_password) + self._write_mycnf(memory_mb, admin_password) self.start_mysql() self.status.end_install_or_restart() LOG.info(_("Dbaas install_and_secure complete.")) - def _install_mysql(self, pkg): + def _install_mysql(self): """Install mysql server. The current version is 5.1""" LOG.debug(_("Installing mysql server")) pkg.pkg_install(self.MYSQL_PACKAGE_VERSION, self.TIME_OUT) @@ -749,7 +665,7 @@ class MySqlApp(object): if "No such file or directory" not in str(pe): raise - def _write_mycnf(self, pkg, update_memory_mb, admin_password): + def _write_mycnf(self, update_memory_mb, admin_password): """ Install the set of mysql my.cnf templates from dbaas-mycnf package. The package generates a template suited for the current @@ -812,17 +728,17 @@ class MySqlApp(object): self.status.end_install_or_restart() raise RuntimeError("Could not start MySQL!") - def start_mysql_with_conf_changes(self, pkg, updated_memory_mb): + def start_mysql_with_conf_changes(self, updated_memory_mb): LOG.info(_("Starting mysql with conf changes...")) if self.status.is_mysql_running: LOG.error(_("Cannot execute start_mysql_with_conf_changes because " "MySQL state == %s!") % self.status) raise RuntimeError("MySQL not stopped.") LOG.info(_("Initiating config.")) - self._write_mycnf(pkg, updated_memory_mb, None) + self._write_mycnf(updated_memory_mb, None) self.start_mysql(True) - def is_installed(self, pkg): + def is_installed(self): #(cp16net) could raise an exception, does it need to be handled here? version = pkg.pkg_version(self.MYSQL_PACKAGE_VERSION) return not version is None diff --git a/reddwarf/guestagent/manager.py b/reddwarf/guestagent/manager.py index 6f49699942..d64195bd7b 100644 --- a/reddwarf/guestagent/manager.py +++ b/reddwarf/guestagent/manager.py @@ -1,119 +1,83 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 OpenStack, LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handles all processes within the Guest VM, considering it as a Platform - -The :py:class:`GuestManager` class is a :py:class:`nova.manager.Manager` that -handles RPC calls relating to Platform specific operations. - -""" - - -import functools -import logging -import traceback - -from reddwarf.common import config -from reddwarf.common import exception -from reddwarf.common import utils -from reddwarf.common import service - +from reddwarf.guestagent import dbaas +from reddwarf.guestagent import volume +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import periodic_task LOG = logging.getLogger(__name__) -CONFIG = config.Config -GUEST_SERVICES = {'mysql': 'reddwarf.guestagent.dbaas.DBaaSAgent'} -class GuestManager(service.Manager): +class Manager(periodic_task.PeriodicTasks): - """Manages the tasks within a Guest VM.""" - RPC_API_VERSION = "1.0" + @periodic_task.periodic_task(ticks_between_runs=10) + def update_status(self, context): + """Update the status of the MySQL service""" + dbaas.MySqlAppStatus.get().update() - def __init__(self, guest_drivers=None, *args, **kwargs): - service_type = CONFIG.get('service_type') - try: - service_impl = GUEST_SERVICES[service_type] - except KeyError as e: - LOG.error(_("Could not create guest, no impl for key - %s") % - service_type) - raise e - LOG.info("Create guest driver %s" % service_impl) - self.create_guest_driver(service_impl) - super(GuestManager, self).__init__(*args, **kwargs) + def create_database(self, context, databases): + return dbaas.MySqlAdmin().create_database(databases) - def create_guest_driver(self, service_impl): - guest_drivers = [service_impl, - 'reddwarf.guestagent.pkg.PkgAgent'] - classes = [] - for guest_driver in guest_drivers: - LOG.info(guest_driver) - driver = utils.import_class(guest_driver) - classes.append(driver) - try: - cls = type("GuestDriver", tuple(set(classes)), {}) - self.driver = cls() - except TypeError as te: - msg = "An issue occurred instantiating the GuestDriver as the " \ - "following classes: " + str(classes) + \ - " Exception=" + str(te) - raise TypeError(msg) + def create_user(self, context, users): + dbaas.MySqlAdmin().create_user(users) - def init_host(self): - """Method for any service initialization""" - pass + def delete_database(self, context, database): + return dbaas.MySqlAdmin().delete_database(database) - def periodic_tasks(self, raise_on_error=False): - """Method for running any periodic tasks. + def delete_user(self, context, user): + dbaas.MySqlAdmin().delete_user(user) - Right now does the status updates""" - status_method = "update_status" - try: - method = getattr(self.driver, status_method) - except AttributeError as ae: - LOG.error(_("Method %s not found for driver %s"), status_method, - self.driver) - if raise_on_error: - raise ae - try: - method() - except Exception as e: - LOG.error("Got an error during periodic tasks!") - LOG.debug(traceback.format_exc()) + def list_databases(self, context, limit=None, marker=None, + include_marker=False): + return dbaas.MySqlAdmin().list_databases(limit, marker, + include_marker) - def upgrade(self, context): - """Upgrade the guest agent and restart the agent""" - LOG.debug(_("Self upgrade of guest agent issued")) + def list_users(self, context, limit=None, marker=None, + include_marker=False): + return dbaas.MySqlAdmin().list_users(limit, marker, + include_marker) - def __getattr__(self, key): - """Converts all method calls and direct it at the driver""" - return functools.partial(self._mapper, key) + def enable_root(self, context): + return dbaas.MySqlAdmin().enable_root() - def _mapper(self, method, context, *args, **kwargs): - """ Tries to call the respective driver method """ - try: - func = getattr(self.driver, method) - except AttributeError: - LOG.error(_("Method %s not found for driver %s"), method, - self.driver) - raise exception.NotFound("Method %s is not available for the " - "chosen driver.") - try: - return func(*args, **kwargs) - except Exception as e: - LOG.error("Got an error running %s!" % method) - LOG.debug(traceback.format_exc()) + def is_root_enabled(self, ontext): + return dbaas.MySqlAdmin().is_root_enabled() + + def prepare(self, context, databases, memory_mb, users, device_path=None, + mount_point=None): + """Makes ready DBAAS on a Guest container.""" + dbaas.MySqlAppStatus.get().begin_mysql_install() + # status end_mysql_install set with install_and_secure() + app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get()) + restart_mysql = False + if device_path: + device = volume.VolumeDevice(device_path) + device.format() + if app.is_installed(): + #stop and do not update database + app.stop_mysql() + restart_mysql = True + #rsync exiting data + device.migrate_data(MYSQL_BASE_DIR) + #mount the volume + device.mount(mount_point) + LOG.debug(_("Mounted the volume.")) + #check mysql was installed and stopped + if restart_mysql: + app.start_mysql() + app.install_and_secure(memory_mb) + LOG.info("Creating initial databases and users following successful " + "prepare.") + self.create_database(context, databases) + self.create_user(context, users) + LOG.info('"prepare" call has finished.') + + def restart(self, context): + app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get()) + app.restart() + + def start_mysql_with_conf_changes(self, context, updated_memory_size): + app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get()) + app.start_mysql_with_conf_changes(updated_memory_size) + + def stop_mysql(self, context): + app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get()) + app.stop_mysql() diff --git a/reddwarf/guestagent/models.py b/reddwarf/guestagent/models.py index 505443d4ae..275d0b91f8 100644 --- a/reddwarf/guestagent/models.py +++ b/reddwarf/guestagent/models.py @@ -12,20 +12,23 @@ # License for the specific language governing permissions and limitations # under the License. -import logging from datetime import datetime from datetime import timedelta -from reddwarf.db import get_db_api -from reddwarf.common import config +from reddwarf.common import cfg from reddwarf.common import exception from reddwarf.common import utils +from reddwarf.db import get_db_api from reddwarf.db import models as dbmodels +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) -AGENT_HEARTBEAT = int(config.Config.get('agent_heartbeat_time', '10')) +CONF = cfg.CONF + +AGENT_HEARTBEAT = CONF.agent_heartbeat_time def persisted_models(): diff --git a/reddwarf/guestagent/pkg.py b/reddwarf/guestagent/pkg.py index ca3ccea7cd..473d7fa2d4 100644 --- a/reddwarf/guestagent/pkg.py +++ b/reddwarf/guestagent/pkg.py @@ -19,13 +19,14 @@ Manages packages on the Guest VM. """ import commands -import logging import pexpect import re from reddwarf.common import exception -from reddwarf.common.exception import ProcessExecutionError from reddwarf.common import utils +from reddwarf.common.exception import ProcessExecutionError +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) @@ -67,160 +68,162 @@ def wait_and_close_proc(child, time_out=-1): child.close() -class PkgAgent(object): - """ Agent Controller which can maintain package installs on a guest.""" +def _fix(time_out): + """Sometimes you have to run this command before a pkg will install.""" + #sudo dpkg --configure -a + child = pexpect.spawn("sudo -E dpkg --configure -a") + wait_and_close_proc(child, time_out) - def _fix(self, time_out): - """Sometimes you have to run this command before a pkg will install.""" - #sudo dpkg --configure -a - child = pexpect.spawn("sudo -E dpkg --configure -a") - wait_and_close_proc(child, time_out) - def _install(self, package_name, time_out): - """Attempts to install a package. +def _install(package_name, time_out): + """Attempts to install a package. - Returns OK if the package installs fine or a result code if a - recoverable-error occurred. - Raises an exception if a non-recoverable error or time out occurs. + Returns OK if the package installs fine or a result code if a + recoverable-error occurred. + Raises an exception if a non-recoverable error or time out occurs. - """ - child = pexpect.spawn("sudo -E DEBIAN_FRONTEND=noninteractive " - "apt-get -y --allow-unauthenticated install %s" - % package_name) - try: - i = child.expect(['.*password*', - 'E: Unable to locate package %s' % package_name, - "Couldn't find package % s" % package_name, - ("dpkg was interrupted, you must manually run " - "'sudo dpkg --configure -a'"), - "Unable to lock the administration directory", - "Setting up %s*" % package_name, - "is already the newest version"], - timeout=time_out) - if i == 0: - raise PkgPermissionError("Invalid permissions.") - elif i == 1 or i == 2: - raise PkgNotFoundError("Could not find apt %s" % package_name) - elif i == 3: - return RUN_DPKG_FIRST - elif i == 4: - raise PkgAdminLockError() - except pexpect.TIMEOUT: - kill_proc(child) - raise PkgTimeout("Process timeout after %i seconds." % time_out) - try: - wait_and_close_proc(child) - except pexpect.TIMEOUT as e: - LOG.error("wait_and_close_proc failed: %s" % e) - #TODO(tim.simpson): As of RDL, and on my machine exclusively (in - # both Virtual Box and VmWare!) this fails, but - # the package is installed. - return OK + """ + child = pexpect.spawn("sudo -E DEBIAN_FRONTEND=noninteractive " + "apt-get -y --allow-unauthenticated install %s" + % package_name) + try: + i = child.expect(['.*password*', + 'E: Unable to locate package %s' % package_name, + "Couldn't find package % s" % package_name, + ("dpkg was interrupted, you must manually run " + "'sudo dpkg --configure -a'"), + "Unable to lock the administration directory", + "Setting up %s*" % package_name, + "is already the newest version"], + timeout=time_out) + if i == 0: + raise PkgPermissionError("Invalid permissions.") + elif i == 1 or i == 2: + raise PkgNotFoundError("Could not find apt %s" % package_name) + elif i == 3: + return RUN_DPKG_FIRST + elif i == 4: + raise PkgAdminLockError() + except pexpect.TIMEOUT: + kill_proc(child) + raise PkgTimeout("Process timeout after %i seconds." % time_out) + try: + wait_and_close_proc(child) + except pexpect.TIMEOUT as e: + LOG.error("wait_and_close_proc failed: %s" % e) + #TODO(tim.simpson): As of RDL, and on my machine exclusively (in + # both Virtual Box and VmWare!) this fails, but + # the package is installed. + return OK - def _remove(self, package_name, time_out): - """Removes a package. - Returns OK if the package is removed successfully or a result code if a - recoverable-error occurs. - Raises an exception if a non-recoverable error or time out occurs. +def _remove(package_name, time_out): + """Removes a package. - """ - child = pexpect.spawn("sudo -E apt-get -y --allow-unauthenticated " - "remove %s" % package_name) - try: - i = child.expect(['.*password*', - 'E: Unable to locate package %s' % package_name, - 'Package is in a very bad inconsistent state', - ("Sub-process /usr/bin/dpkg returned an error " - "code"), - ("dpkg was interrupted, you must manually run " - "'sudo dpkg --configure -a'"), - "Unable to lock the administration directory", - #'The following packages will be REMOVED', - "Removing %s*" % package_name], - timeout=time_out) - if i == 0: - raise PkgPermissionError("Invalid permissions.") - elif i == 1: - raise PkgNotFoundError("Could not find pkg %s" % package_name) - elif i == 2 or i == 3: - return REINSTALL_FIRST - elif i == 4: - return RUN_DPKG_FIRST - elif i == 5: - raise PkgAdminLockError() - wait_and_close_proc(child) - except pexpect.TIMEOUT: - kill_proc(child) - raise PkgTimeout("Process timeout after %i seconds." % time_out) - return OK + Returns OK if the package is removed successfully or a result code if a + recoverable-error occurs. + Raises an exception if a non-recoverable error or time out occurs. - def pkg_install(self, package_name, time_out): - """Installs a package.""" - try: - utils.execute("apt-get", "update", run_as_root=True, - root_helper="sudo") - except ProcessExecutionError as e: - LOG.error(_("Error updating the apt sources")) + """ + child = pexpect.spawn("sudo -E apt-get -y --allow-unauthenticated " + "remove %s" % package_name) + try: + i = child.expect(['.*password*', + 'E: Unable to locate package %s' % package_name, + 'Package is in a very bad inconsistent state', + ("Sub-process /usr/bin/dpkg returned an error " + "code"), + ("dpkg was interrupted, you must manually run " + "'sudo dpkg --configure -a'"), + "Unable to lock the administration directory", + #'The following packages will be REMOVED', + "Removing %s*" % package_name], + timeout=time_out) + if i == 0: + raise PkgPermissionError("Invalid permissions.") + elif i == 1: + raise PkgNotFoundError("Could not find pkg %s" % package_name) + elif i == 2 or i == 3: + return REINSTALL_FIRST + elif i == 4: + return RUN_DPKG_FIRST + elif i == 5: + raise PkgAdminLockError() + wait_and_close_proc(child) + except pexpect.TIMEOUT: + kill_proc(child) + raise PkgTimeout("Process timeout after %i seconds." % time_out) + return OK - result = self._install(package_name, time_out) + +def pkg_install(package_name, time_out): + """Installs a package.""" + try: + utils.execute("apt-get", "update", run_as_root=True, + root_helper="sudo") + except ProcessExecutionError as e: + LOG.error(_("Error updating the apt sources")) + + result = _install(package_name, time_out) + if result != OK: + if result == RUN_DPKG_FIRST: + _fix(time_out) + result = _install(package_name, time_out) if result != OK: - if result == RUN_DPKG_FIRST: - self._fix(time_out) - result = self._install(package_name, time_out) - if result != OK: - raise PkgPackageStateError("Package %s is in a bad state." - % package_name) + raise PkgPackageStateError("Package %s is in a bad state." + % package_name) - def pkg_version(self, package_name): - cmd_list = ["dpkg", "-l", package_name] - p = commands.getstatusoutput(' '.join(cmd_list)) - # check the command status code - if not p[0] == 0: - return None - # Need to capture the version string - # check the command output - std_out = p[1] - patterns = ['.*No packages found matching.*', - "\w\w\s+(\S+)\s+(\S+)\s+(.*)$"] - for line in std_out.split("\n"): - for p in patterns: - regex = re.compile(p) - matches = regex.match(line) - if matches: - line = matches.group() - parts = line.split() - if not parts: - msg = _("returned nothing") - LOG.error(msg) - raise exception.GuestError(msg) - if len(parts) <= 2: - msg = _("Unexpected output.") - LOG.error(msg) - raise exception.GuestError(msg) - if parts[1] != package_name: - msg = _("Unexpected output:[1] = %s" % str(parts[1])) - LOG.error(msg) - raise exception.GuestError(msg) - if parts[0] == 'un' or parts[2] == '': - return None - return parts[2] - msg = _("version() saw unexpected output from dpkg!") - LOG.error(msg) - raise exception.GuestError(msg) - def pkg_remove(self, package_name, time_out): - """Removes a package.""" - if self.pkg_version(package_name) is None: - return - result = self._remove(package_name, time_out) +def pkg_version(package_name): + cmd_list = ["dpkg", "-l", package_name] + p = commands.getstatusoutput(' '.join(cmd_list)) + # check the command status code + if not p[0] == 0: + return None + # Need to capture the version string + # check the command output + std_out = p[1] + patterns = ['.*No packages found matching.*', + "\w\w\s+(\S+)\s+(\S+)\s+(.*)$"] + for line in std_out.split("\n"): + for p in patterns: + regex = re.compile(p) + matches = regex.match(line) + if matches: + line = matches.group() + parts = line.split() + if not parts: + msg = _("returned nothing") + LOG.error(msg) + raise exception.GuestError(msg) + if len(parts) <= 2: + msg = _("Unexpected output.") + LOG.error(msg) + raise exception.GuestError(msg) + if parts[1] != package_name: + msg = _("Unexpected output:[1] = %s" % str(parts[1])) + LOG.error(msg) + raise exception.GuestError(msg) + if parts[0] == 'un' or parts[2] == '': + return None + return parts[2] + msg = _("version() saw unexpected output from dpkg!") + LOG.error(msg) + raise exception.GuestError(msg) + +def pkg_remove(package_name, time_out): + """Removes a package.""" + if pkg_version(package_name) is None: + return + result = _remove(package_name, time_out) + + if result != OK: + if result == REINSTALL_FIRST: + _install(package_name, time_out) + elif result == RUN_DPKG_FIRST: + _fix(time_out) + result = _remove(package_name, time_out) if result != OK: - if result == REINSTALL_FIRST: - self._install(package_name, time_out) - elif result == RUN_DPKG_FIRST: - self._fix(time_out) - result = self._remove(package_name, time_out) - if result != OK: - raise PkgPackageStateError("Package %s is in a bad state." - % package_name) + raise PkgPackageStateError("Package %s is in a bad state." + % package_name) diff --git a/reddwarf/guestagent/service.py b/reddwarf/guestagent/service.py index 86a7dd6dbc..5d096e62cd 100644 --- a/reddwarf/guestagent/service.py +++ b/reddwarf/guestagent/service.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +from reddwarf.openstack.common import log as logging import routes import webob.exc diff --git a/reddwarf/guestagent/volume.py b/reddwarf/guestagent/volume.py index 8941401bc9..9861b62416 100644 --- a/reddwarf/guestagent/volume.py +++ b/reddwarf/guestagent/volume.py @@ -15,11 +15,11 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +from reddwarf.openstack.common import log as logging import os import pexpect -from reddwarf.common import config +from reddwarf.common import cfg from reddwarf.common import utils from reddwarf.common.exception import GuestError from reddwarf.common.exception import ProcessExecutionError @@ -27,7 +27,7 @@ from reddwarf.common.exception import ProcessExecutionError TMP_MOUNT_POINT = "/mnt/volume" LOG = logging.getLogger(__name__) -CONFIG = config.Config +CONF = cfg.CONF class VolumeDevice(object): @@ -55,7 +55,7 @@ class VolumeDevice(object): num_tries to account for the time lag. """ try: - num_tries = CONFIG.get('num_tries', 3) + num_tries = CONF.num_tries utils.execute('sudo', 'blockdev', '--getsize64', self.device_path, attempts=num_tries) except ProcessExecutionError: @@ -68,7 +68,7 @@ class VolumeDevice(object): i = child.expect(['has_journal', 'Wrong magic number']) if i == 0: return - volume_fstype = CONFIG.get('volume_fstype', 'ext3') + volume_fstype = CONF.volume_fstype raise IOError('Device path at %s did not seem to be %s.' % (self.device_path, volume_fstype)) except pexpect.EOF: @@ -77,11 +77,11 @@ class VolumeDevice(object): def _format(self): """Calls mkfs to format the device at device_path.""" - volume_fstype = CONFIG.get('volume_fstype', 'ext3') - format_options = CONFIG.get('format_options', '-m 5') + volume_fstype = CONF.volume_fstype + format_options = CONF.format_options cmd = "sudo mkfs -t %s %s %s" % (volume_fstype, format_options, self.device_path) - volume_format_timeout = CONFIG.get('volume_format_timeout', 120) + volume_format_timeout = CONF.volume_format_timeout child = pexpect.spawn(cmd, timeout=volume_format_timeout) # child.expect("(y,n)") # child.sendline('y') @@ -127,8 +127,8 @@ class VolumeMountPoint(object): def __init__(self, device_path, mount_point): self.device_path = device_path self.mount_point = mount_point - self.volume_fstype = CONFIG.get('volume_fstype', 'ext3') - self.mount_options = CONFIG.get('mount_options', 'defaults,noatime') + self.volume_fstype = CONF.volume_fstype + self.mount_options = CONF.mount_options def mount(self): if not os.path.exists(self.mount_point): diff --git a/reddwarf/instance/models.py b/reddwarf/instance/models.py index 47f4b62884..114ddfe677 100644 --- a/reddwarf/instance/models.py +++ b/reddwarf/instance/models.py @@ -18,12 +18,11 @@ """Model classes that form the core of instances functionality.""" import eventlet -import logging import netaddr from datetime import datetime from novaclient import exceptions as nova_exceptions -from reddwarf.common import config +from reddwarf.common import cfg from reddwarf.common import exception from reddwarf.common import utils from reddwarf.common.remote import create_dns_client @@ -35,12 +34,14 @@ from reddwarf.instance.tasks import InstanceTask from reddwarf.instance.tasks import InstanceTasks from reddwarf.guestagent import models as agent_models from reddwarf.taskmanager import api as task_api +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ from eventlet import greenthread -CONFIG = config.Config +CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -412,8 +413,8 @@ class Instance(BuiltInstance): instance_id=db_info.id, status=ServiceStatuses.NEW) - dns_support = config.Config.get("reddwarf_dns_support", 'False') - if utils.bool_from_string(dns_support): + dns_support = CONF.reddwarf_dns_support + if dns_support: dns_client = create_dns_client(context) hostname = dns_client.determine_hostname(db_info.id) db_info.hostname = hostname @@ -523,7 +524,7 @@ def create_server_list_matcher(server_list): class Instances(object): - DEFAULT_LIMIT = int(config.Config.get('instances_page_size', '20')) + DEFAULT_LIMIT = CONF.instances_page_size @staticmethod def load(context): diff --git a/reddwarf/instance/service.py b/reddwarf/instance/service.py index 0f4c40b1ce..be713dd947 100644 --- a/reddwarf/instance/service.py +++ b/reddwarf/instance/service.py @@ -15,11 +15,10 @@ # License for the specific language governing permissions and limitations # under the License. -import logging import routes import webob.exc -from reddwarf.common import config +from reddwarf.common import cfg from reddwarf.common import exception from reddwarf.common import pagination from reddwarf.common import utils @@ -27,9 +26,11 @@ from reddwarf.common import wsgi from reddwarf.extensions.mysql.common import populate_databases from reddwarf.extensions.mysql.common import populate_users from reddwarf.instance import models, views +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ -CONFIG = config.Config +CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -202,7 +203,7 @@ class InstanceController(wsgi.Controller): else: volume_size = None - instance_max = int(config.Config.get('max_instances_per_user', 5)) + instance_max = CONF.max_instances_per_user number_instances = models.DBInstance.find_all(tenant_id=tenant_id, deleted=False).count() @@ -252,7 +253,7 @@ class InstanceController(wsgi.Controller): "integer value, %s cannot be accepted." % volume_size) raise exception.ReddwarfError(msg) - max_size = int(config.Config.get('max_accepted_volume_size', 1)) + max_size = CONF.max_accepted_volume_size if int(volume_size) > max_size: msg = ("Volume 'size' cannot exceed maximum " "of %d Gb, %s cannot be accepted." @@ -270,10 +271,8 @@ class InstanceController(wsgi.Controller): name = body['instance'].get('name', '').strip() if not name: raise exception.MissingKey(key='name') - vol_enabled = utils.bool_from_string( - config.Config.get('reddwarf_volume_support', 'True')) - must_have_vol = utils.bool_from_string( - config.Config.get('reddwarf_must_use_volume', 'False')) + vol_enabled = CONF.reddwarf_volume_support + must_have_vol = CONF.reddwarf_must_use_volume if vol_enabled: if body['instance'].get('volume', None): if body['instance']['volume'].get('size', None): diff --git a/reddwarf/instance/views.py b/reddwarf/instance/views.py index 8596762533..038edf55d5 100644 --- a/reddwarf/instance/views.py +++ b/reddwarf/instance/views.py @@ -15,14 +15,16 @@ # License for the specific language governing permissions and limitations # under the License. -import logging -from reddwarf.common import config +from reddwarf.openstack.common import log as logging +from reddwarf.common import cfg from reddwarf.common import utils from reddwarf.common.views import create_links from reddwarf.instance import models LOG = logging.getLogger(__name__) +CONF = cfg.CONF + def get_ip_address(addresses): if (addresses is not None and @@ -91,8 +93,8 @@ class InstanceDetailView(InstanceView): result['instance']['created'] = self.instance.created result['instance']['updated'] = self.instance.updated - dns_support = config.Config.get("reddwarf_dns_support", 'False') - if utils.bool_from_string(dns_support): + dns_support = CONF.reddwarf_dns_support + if dns_support: result['instance']['hostname'] = self.instance.hostname if self.add_addresses: diff --git a/reddwarf/openstack/common/authutils.py b/reddwarf/openstack/common/authutils.py new file mode 100644 index 0000000000..f0e2c80dc5 --- /dev/null +++ b/reddwarf/openstack/common/authutils.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Authentication related utilities and helper functions. +""" + + +def auth_str_equal(provided, known): + """Constant-time string comparison. + + :params provided: the first string + :params known: the second string + + :return: True if the strings are equal. + + This function takes two strings and compares them. It is intended to be + used when doing a comparison for authentication purposes to help guard + against timing attacks. When using the function for this purpose, always + provide the user-provided password as the first argument. The time this + function will take is always a factor of the length of this string. + """ + result = 0 + p_len = len(provided) + k_len = len(known) + for i in xrange(p_len): + a = ord(provided[i]) if i < p_len else 0 + b = ord(known[i]) if i < k_len else 0 + result |= a ^ b + return (p_len == k_len) & (result == 0) diff --git a/reddwarf/openstack/common/cfg.py b/reddwarf/openstack/common/cfg.py new file mode 100644 index 0000000000..7655e23e51 --- /dev/null +++ b/reddwarf/openstack/common/cfg.py @@ -0,0 +1,1787 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +Configuration options which may be set on the command line or in config files. + +The schema for each option is defined using the Opt sub-classes, e.g.: + +:: + + common_opts = [ + cfg.StrOpt('bind_host', + default='0.0.0.0', + help='IP address to listen on'), + cfg.IntOpt('bind_port', + default=9292, + help='Port number to listen on') + ] + +Options can be strings, integers, floats, booleans, lists or 'multi strings':: + + enabled_apis_opt = cfg.ListOpt('enabled_apis', + default=['ec2', 'osapi_compute'], + help='List of APIs to enable by default') + + DEFAULT_EXTENSIONS = [ + 'nova.api.openstack.compute.contrib.standard_extensions' + ] + osapi_compute_extension_opt = cfg.MultiStrOpt('osapi_compute_extension', + default=DEFAULT_EXTENSIONS) + +Option schemas are registered with the config manager at runtime, but before +the option is referenced:: + + class ExtensionManager(object): + + enabled_apis_opt = cfg.ListOpt(...) + + def __init__(self, conf): + self.conf = conf + self.conf.register_opt(enabled_apis_opt) + ... + + def _load_extensions(self): + for ext_factory in self.conf.osapi_compute_extension: + .... + +A common usage pattern is for each option schema to be defined in the module or +class which uses the option:: + + opts = ... + + def add_common_opts(conf): + conf.register_opts(opts) + + def get_bind_host(conf): + return conf.bind_host + + def get_bind_port(conf): + return conf.bind_port + +An option may optionally be made available via the command line. Such options +must registered with the config manager before the command line is parsed (for +the purposes of --help and CLI arg validation):: + + cli_opts = [ + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output'), + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output'), + ] + + def add_common_opts(conf): + conf.register_cli_opts(cli_opts) + +The config manager has two CLI options defined by default, --config-file +and --config-dir:: + + class ConfigOpts(object): + + def __call__(self, ...): + + opts = [ + MultiStrOpt('config-file', + ...), + StrOpt('config-dir', + ...), + ] + + self.register_cli_opts(opts) + +Option values are parsed from any supplied config files using +openstack.common.iniparser. If none are specified, a default set is used +e.g. glance-api.conf and glance-common.conf:: + + glance-api.conf: + [DEFAULT] + bind_port = 9292 + + glance-common.conf: + [DEFAULT] + bind_host = 0.0.0.0 + +Option values in config files override those on the command line. Config files +are parsed in order, with values in later files overriding those in earlier +files. + +The parsing of CLI args and config files is initiated by invoking the config +manager e.g.:: + + conf = ConfigOpts() + conf.register_opt(BoolOpt('verbose', ...)) + conf(sys.argv[1:]) + if conf.verbose: + ... + +Options can be registered as belonging to a group:: + + rabbit_group = cfg.OptGroup(name='rabbit', + title='RabbitMQ options') + + rabbit_host_opt = cfg.StrOpt('host', + default='localhost', + help='IP/hostname to listen on'), + rabbit_port_opt = cfg.IntOpt('port', + default=5672, + help='Port number to listen on') + + def register_rabbit_opts(conf): + conf.register_group(rabbit_group) + # options can be registered under a group in either of these ways: + conf.register_opt(rabbit_host_opt, group=rabbit_group) + conf.register_opt(rabbit_port_opt, group='rabbit') + +If it no group attributes are required other than the group name, the group +need not be explicitly registered e.g. + + def register_rabbit_opts(conf): + # The group will automatically be created, equivalent calling:: + # conf.register_group(OptGroup(name='rabbit')) + conf.register_opt(rabbit_port_opt, group='rabbit') + +If no group is specified, options belong to the 'DEFAULT' section of config +files:: + + glance-api.conf: + [DEFAULT] + bind_port = 9292 + ... + + [rabbit] + host = localhost + port = 5672 + use_ssl = False + userid = guest + password = guest + virtual_host = / + +Command-line options in a group are automatically prefixed with the +group name:: + + --rabbit-host localhost --rabbit-port 9999 + +Option values in the default group are referenced as attributes/properties on +the config manager; groups are also attributes on the config manager, with +attributes for each of the options associated with the group:: + + server.start(app, conf.bind_port, conf.bind_host, conf) + + self.connection = kombu.connection.BrokerConnection( + hostname=conf.rabbit.host, + port=conf.rabbit.port, + ...) + +Option values may reference other values using PEP 292 string substitution:: + + opts = [ + cfg.StrOpt('state_path', + default=os.path.join(os.path.dirname(__file__), '../'), + help='Top-level directory for maintaining nova state'), + cfg.StrOpt('sqlite_db', + default='nova.sqlite', + help='file name for sqlite'), + cfg.StrOpt('sql_connection', + default='sqlite:///$state_path/$sqlite_db', + help='connection string for sql database'), + ] + +Note that interpolation can be avoided by using '$$'. + +Options may be declared as required so that an error is raised if the user +does not supply a value for the option. + +Options may be declared as secret so that their values are not leaked into +log files:: + + opts = [ + cfg.StrOpt('s3_store_access_key', secret=True), + cfg.StrOpt('s3_store_secret_key', secret=True), + ... + ] + +This module also contains a global instance of the CommonConfigOpts class +in order to support a common usage pattern in OpenStack:: + + from reddwarf.openstack.common import cfg + + opts = [ + cfg.StrOpt('bind_host', default='0.0.0.0'), + cfg.IntOpt('bind_port', default=9292), + ] + + CONF = cfg.CONF + CONF.register_opts(opts) + + def start(server, app): + server.start(app, CONF.bind_port, CONF.bind_host) + +Positional command line arguments are supported via a 'positional' Opt +constructor argument:: + + >>> CONF.register_cli_opt(MultiStrOpt('bar', positional=True)) + True + >>> CONF(['a', 'b']) + >>> CONF.bar + ['a', 'b'] + +It is also possible to use argparse "sub-parsers" to parse additional +command line arguments using the SubCommandOpt class: + + >>> def add_parsers(subparsers): + ... list_action = subparsers.add_parser('list') + ... list_action.add_argument('id') + ... + >>> CONF.register_cli_opt(SubCommandOpt('action', handler=add_parsers)) + True + >>> CONF(['list', '10']) + >>> CONF.action.name, CONF.action.id + ('list', '10') + +""" + +import argparse +import collections +import copy +import functools +import glob +import os +import string +import sys + +from reddwarf.openstack.common import iniparser + + +class Error(Exception): + """Base class for cfg exceptions.""" + + def __init__(self, msg=None): + self.msg = msg + + def __str__(self): + return self.msg + + +class ArgsAlreadyParsedError(Error): + """Raised if a CLI opt is registered after parsing.""" + + def __str__(self): + ret = "arguments already parsed" + if self.msg: + ret += ": " + self.msg + return ret + + +class NoSuchOptError(Error, AttributeError): + """Raised if an opt which doesn't exist is referenced.""" + + def __init__(self, opt_name, group=None): + self.opt_name = opt_name + self.group = group + + def __str__(self): + if self.group is None: + return "no such option: %s" % self.opt_name + else: + return "no such option in group %s: %s" % (self.group.name, + self.opt_name) + + +class NoSuchGroupError(Error): + """Raised if a group which doesn't exist is referenced.""" + + def __init__(self, group_name): + self.group_name = group_name + + def __str__(self): + return "no such group: %s" % self.group_name + + +class DuplicateOptError(Error): + """Raised if multiple opts with the same name are registered.""" + + def __init__(self, opt_name): + self.opt_name = opt_name + + def __str__(self): + return "duplicate option: %s" % self.opt_name + + +class RequiredOptError(Error): + """Raised if an option is required but no value is supplied by the user.""" + + def __init__(self, opt_name, group=None): + self.opt_name = opt_name + self.group = group + + def __str__(self): + if self.group is None: + return "value required for option: %s" % self.opt_name + else: + return "value required for option: %s.%s" % (self.group.name, + self.opt_name) + + +class TemplateSubstitutionError(Error): + """Raised if an error occurs substituting a variable in an opt value.""" + + def __str__(self): + return "template substitution error: %s" % self.msg + + +class ConfigFilesNotFoundError(Error): + """Raised if one or more config files are not found.""" + + def __init__(self, config_files): + self.config_files = config_files + + def __str__(self): + return ('Failed to read some config files: %s' % + string.join(self.config_files, ',')) + + +class ConfigFileParseError(Error): + """Raised if there is an error parsing a config file.""" + + def __init__(self, config_file, msg): + self.config_file = config_file + self.msg = msg + + def __str__(self): + return 'Failed to parse %s: %s' % (self.config_file, self.msg) + + +class ConfigFileValueError(Error): + """Raised if a config file value does not match its opt type.""" + pass + + +def _fixpath(p): + """Apply tilde expansion and absolutization to a path.""" + return os.path.abspath(os.path.expanduser(p)) + + +def _get_config_dirs(project=None): + """Return a list of directors where config files may be located. + + :param project: an optional project name + + If a project is specified, following directories are returned:: + + ~/.${project}/ + ~/ + /etc/${project}/ + /etc/ + + Otherwise, these directories:: + + ~/ + /etc/ + """ + cfg_dirs = [ + _fixpath(os.path.join('~', '.' + project)) if project else None, + _fixpath('~'), + os.path.join('/etc', project) if project else None, + '/etc' + ] + + return filter(bool, cfg_dirs) + + +def _search_dirs(dirs, basename, extension=""): + """Search a list of directories for a given filename. + + Iterator over the supplied directories, returning the first file + found with the supplied name and extension. + + :param dirs: a list of directories + :param basename: the filename, e.g. 'glance-api' + :param extension: the file extension, e.g. '.conf' + :returns: the path to a matching file, or None + """ + for d in dirs: + path = os.path.join(d, '%s%s' % (basename, extension)) + if os.path.exists(path): + return path + + +def find_config_files(project=None, prog=None, extension='.conf'): + """Return a list of default configuration files. + + :param project: an optional project name + :param prog: the program name, defaulting to the basename of sys.argv[0] + :param extension: the type of the config file + + We default to two config files: [${project}.conf, ${prog}.conf] + + And we look for those config files in the following directories:: + + ~/.${project}/ + ~/ + /etc/${project}/ + /etc/ + + We return an absolute path for (at most) one of each the default config + files, for the topmost directory it exists in. + + For example, if project=foo, prog=bar and /etc/foo/foo.conf, /etc/bar.conf + and ~/.foo/bar.conf all exist, then we return ['/etc/foo/foo.conf', + '~/.foo/bar.conf'] + + If no project name is supplied, we only look for ${prog.conf}. + """ + if prog is None: + prog = os.path.basename(sys.argv[0]) + + cfg_dirs = _get_config_dirs(project) + + config_files = [] + if project: + config_files.append(_search_dirs(cfg_dirs, project, extension)) + config_files.append(_search_dirs(cfg_dirs, prog, extension)) + + return filter(bool, config_files) + + +def _is_opt_registered(opts, opt): + """Check whether an opt with the same name is already registered. + + The same opt may be registered multiple times, with only the first + registration having any effect. However, it is an error to attempt + to register a different opt with the same name. + + :param opts: the set of opts already registered + :param opt: the opt to be registered + :returns: True if the opt was previously registered, False otherwise + :raises: DuplicateOptError if a naming conflict is detected + """ + if opt.dest in opts: + if opts[opt.dest]['opt'] != opt: + raise DuplicateOptError(opt.name) + return True + else: + return False + + +def set_defaults(opts, **kwargs): + for opt in opts: + if opt.dest in kwargs: + opt.default = kwargs[opt.dest] + break + + +class Opt(object): + + """Base class for all configuration options. + + An Opt object has no public methods, but has a number of public string + properties: + + name: + the name of the option, which may include hyphens + dest: + the (hyphen-less) ConfigOpts property which contains the option value + short: + a single character CLI option name + default: + the default value of the option + positional: + True if the option is a positional CLI argument + metavar: + the name shown as the argument to a CLI option in --help output + help: + an string explaining how the options value is used + """ + multi = False + + def __init__(self, name, dest=None, short=None, default=None, + positional=False, metavar=None, help=None, + secret=False, required=False, deprecated_name=None): + """Construct an Opt object. + + The only required parameter is the option's name. However, it is + common to also supply a default and help string for all options. + + :param name: the option's name + :param dest: the name of the corresponding ConfigOpts property + :param short: a single character CLI option name + :param default: the default value of the option + :param positional: True if the option is a positional CLI argument + :param metavar: the option argument to show in --help + :param help: an explanation of how the option is used + :param secret: true iff the value should be obfuscated in log output + :param required: true iff a value must be supplied for this option + :param deprecated_name: deprecated name option. Acts like an alias + """ + self.name = name + if dest is None: + self.dest = self.name.replace('-', '_') + else: + self.dest = dest + self.short = short + self.default = default + self.positional = positional + self.metavar = metavar + self.help = help + self.secret = secret + self.required = required + if deprecated_name is not None: + self.deprecated_name = deprecated_name.replace('-', '_') + else: + self.deprecated_name = None + + def __ne__(self, another): + return vars(self) != vars(another) + + def _get_from_config_parser(self, cparser, section): + """Retrieves the option value from a MultiConfigParser object. + + This is the method ConfigOpts uses to look up the option value from + config files. Most opt types override this method in order to perform + type appropriate conversion of the returned value. + + :param cparser: a ConfigParser object + :param section: a section name + """ + return self._cparser_get_with_deprecated(cparser, section) + + def _cparser_get_with_deprecated(self, cparser, section): + """If cannot find option as dest try deprecated_name alias.""" + if self.deprecated_name is not None: + return cparser.get(section, [self.dest, self.deprecated_name]) + return cparser.get(section, [self.dest]) + + def _add_to_cli(self, parser, group=None): + """Makes the option available in the command line interface. + + This is the method ConfigOpts uses to add the opt to the CLI interface + as appropriate for the opt type. Some opt types may extend this method, + others may just extend the helper methods it uses. + + :param parser: the CLI option parser + :param group: an optional OptGroup object + """ + container = self._get_argparse_container(parser, group) + kwargs = self._get_argparse_kwargs(group) + prefix = self._get_argparse_prefix('', group) + self._add_to_argparse(container, self.name, self.short, kwargs, prefix, + self.positional, self.deprecated_name) + + def _add_to_argparse(self, container, name, short, kwargs, prefix='', + positional=False, deprecated_name=None): + """Add an option to an argparse parser or group. + + :param container: an argparse._ArgumentGroup object + :param name: the opt name + :param short: the short opt name + :param kwargs: the keyword arguments for add_argument() + :param prefix: an optional prefix to prepend to the opt name + :param position: whether the optional is a positional CLI argument + :raises: DuplicateOptError if a naming confict is detected + """ + def hyphen(arg): + return arg if not positional else '' + + args = [hyphen('--') + prefix + name] + if short: + args.append(hyphen('-') + short) + if deprecated_name: + args.append(hyphen('--') + prefix + deprecated_name) + + try: + container.add_argument(*args, **kwargs) + except argparse.ArgumentError as e: + raise DuplicateOptError(e) + + def _get_argparse_container(self, parser, group): + """Returns an argparse._ArgumentGroup. + + :param parser: an argparse.ArgumentParser + :param group: an (optional) OptGroup object + :returns: an argparse._ArgumentGroup if group is given, else parser + """ + if group is not None: + return group._get_argparse_group(parser) + else: + return parser + + def _get_argparse_kwargs(self, group, **kwargs): + """Build a dict of keyword arguments for argparse's add_argument(). + + Most opt types extend this method to customize the behaviour of the + options added to argparse. + + :param group: an optional group + :param kwargs: optional keyword arguments to add to + :returns: a dict of keyword arguments + """ + if not self.positional: + dest = self.dest + if group is not None: + dest = group.name + '_' + dest + kwargs['dest'] = dest + else: + kwargs['nargs'] = '?' + kwargs.update({'default': None, + 'metavar': self.metavar, + 'help': self.help, }) + return kwargs + + def _get_argparse_prefix(self, prefix, group): + """Build a prefix for the CLI option name, if required. + + CLI options in a group are prefixed with the group's name in order + to avoid conflicts between similarly named options in different + groups. + + :param prefix: an existing prefix to append to (e.g. 'no' or '') + :param group: an optional OptGroup object + :returns: a CLI option prefix including the group name, if appropriate + """ + if group is not None: + return group.name + '-' + prefix + else: + return prefix + + +class StrOpt(Opt): + """ + String opts do not have their values transformed and are returned as + str objects. + """ + pass + + +class BoolOpt(Opt): + + """ + Bool opts are set to True or False on the command line using --optname or + --noopttname respectively. + + In config files, boolean values are case insensitive and can be set using + 1/0, yes/no, true/false or on/off. + """ + + _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, + '0': False, 'no': False, 'false': False, 'off': False} + + def __init__(self, *args, **kwargs): + if 'positional' in kwargs: + raise ValueError('positional boolean args not supported') + super(BoolOpt, self).__init__(*args, **kwargs) + + def _get_from_config_parser(self, cparser, section): + """Retrieve the opt value as a boolean from ConfigParser.""" + def convert_bool(v): + value = self._boolean_states.get(v.lower()) + if value is None: + raise ValueError('Unexpected boolean value %r' % v) + + return value + + return [convert_bool(v) for v in + self._cparser_get_with_deprecated(cparser, section)] + + def _add_to_cli(self, parser, group=None): + """Extends the base class method to add the --nooptname option.""" + super(BoolOpt, self)._add_to_cli(parser, group) + self._add_inverse_to_argparse(parser, group) + + def _add_inverse_to_argparse(self, parser, group): + """Add the --nooptname option to the option parser.""" + container = self._get_argparse_container(parser, group) + kwargs = self._get_argparse_kwargs(group, action='store_false') + prefix = self._get_argparse_prefix('no', group) + kwargs["help"] = "The inverse of --" + self.name + self._add_to_argparse(container, self.name, None, kwargs, prefix, + self.positional, self.deprecated_name) + + def _get_argparse_kwargs(self, group, action='store_true', **kwargs): + """Extends the base argparse keyword dict for boolean options.""" + + kwargs = super(BoolOpt, self)._get_argparse_kwargs(group, **kwargs) + + # metavar has no effect for BoolOpt + if 'metavar' in kwargs: + del kwargs['metavar'] + + if action != 'store_true': + action = 'store_false' + + kwargs['action'] = action + + return kwargs + + +class IntOpt(Opt): + + """Int opt values are converted to integers using the int() builtin.""" + + def _get_from_config_parser(self, cparser, section): + """Retrieve the opt value as a integer from ConfigParser.""" + return [int(v) for v in self._cparser_get_with_deprecated(cparser, + section)] + + def _get_argparse_kwargs(self, group, **kwargs): + """Extends the base argparse keyword dict for integer options.""" + return super(IntOpt, + self)._get_argparse_kwargs(group, type=int, **kwargs) + + +class FloatOpt(Opt): + + """Float opt values are converted to floats using the float() builtin.""" + + def _get_from_config_parser(self, cparser, section): + """Retrieve the opt value as a float from ConfigParser.""" + return [float(v) for v in + self._cparser_get_with_deprecated(cparser, section)] + + def _get_argparse_kwargs(self, group, **kwargs): + """Extends the base argparse keyword dict for float options.""" + return super(FloatOpt, self)._get_argparse_kwargs(group, + type=float, **kwargs) + + +class ListOpt(Opt): + + """ + List opt values are simple string values separated by commas. The opt value + is a list containing these strings. + """ + + class _StoreListAction(argparse.Action): + """ + An argparse action for parsing an option value into a list. + """ + def __call__(self, parser, namespace, values, option_string=None): + if values is not None: + values = [a.strip() for a in values.split(',')] + setattr(namespace, self.dest, values) + + def _get_from_config_parser(self, cparser, section): + """Retrieve the opt value as a list from ConfigParser.""" + return [[a.strip() for a in v.split(',')] for v in + self._cparser_get_with_deprecated(cparser, section)] + + def _get_argparse_kwargs(self, group, **kwargs): + """Extends the base argparse keyword dict for list options.""" + return Opt._get_argparse_kwargs(self, + group, + action=ListOpt._StoreListAction, + **kwargs) + + +class MultiStrOpt(Opt): + + """ + Multistr opt values are string opts which may be specified multiple times. + The opt value is a list containing all the string values specified. + """ + multi = True + + def _get_argparse_kwargs(self, group, **kwargs): + """Extends the base argparse keyword dict for multi str options.""" + kwargs = super(MultiStrOpt, self)._get_argparse_kwargs(group) + if not self.positional: + kwargs['action'] = 'append' + else: + kwargs['nargs'] = '*' + return kwargs + + def _cparser_get_with_deprecated(self, cparser, section): + """If cannot find option as dest try deprecated_name alias.""" + if self.deprecated_name is not None: + return cparser.get(section, [self.dest, self.deprecated_name], + multi=True) + return cparser.get(section, [self.dest], multi=True) + + +class SubCommandOpt(Opt): + + """ + Sub-command options allow argparse sub-parsers to be used to parse + additional command line arguments. + + The handler argument to the SubCommandOpt contructor is a callable + which is supplied an argparse subparsers object. Use this handler + callable to add sub-parsers. + + The opt value is SubCommandAttr object with the name of the chosen + sub-parser stored in the 'name' attribute and the values of other + sub-parser arguments available as additional attributes. + """ + + def __init__(self, name, dest=None, handler=None, + title=None, description=None, help=None): + """Construct an sub-command parsing option. + + This behaves similarly to other Opt sub-classes but adds a + 'handler' argument. The handler is a callable which is supplied + an subparsers object when invoked. The add_parser() method on + this subparsers object can be used to register parsers for + sub-commands. + + :param name: the option's name + :param dest: the name of the corresponding ConfigOpts property + :param title: title of the sub-commands group in help output + :param description: description of the group in help output + :param help: a help string giving an overview of available sub-commands + """ + super(SubCommandOpt, self).__init__(name, dest=dest, help=help) + self.handler = handler + self.title = title + self.description = description + + def _add_to_cli(self, parser, group=None): + """Add argparse sub-parsers and invoke the handler method.""" + dest = self.dest + if group is not None: + dest = group.name + '_' + dest + + subparsers = parser.add_subparsers(dest=dest, + title=self.title, + description=self.description, + help=self.help) + + if not self.handler is None: + self.handler(subparsers) + + +class OptGroup(object): + + """ + Represents a group of opts. + + CLI opts in the group are automatically prefixed with the group name. + + Each group corresponds to a section in config files. + + An OptGroup object has no public methods, but has a number of public string + properties: + + name: + the name of the group + title: + the group title as displayed in --help + help: + the group description as displayed in --help + """ + + def __init__(self, name, title=None, help=None): + """Constructs an OptGroup object. + + :param name: the group name + :param title: the group title for --help + :param help: the group description for --help + """ + self.name = name + if title is None: + self.title = "%s options" % title + else: + self.title = title + self.help = help + + self._opts = {} # dict of dicts of (opt:, override:, default:) + self._argparse_group = None + + def _register_opt(self, opt, cli=False): + """Add an opt to this group. + + :param opt: an Opt object + :param cli: whether this is a CLI option + :returns: False if previously registered, True otherwise + :raises: DuplicateOptError if a naming conflict is detected + """ + if _is_opt_registered(self._opts, opt): + return False + + self._opts[opt.dest] = {'opt': opt, 'cli': cli} + + return True + + def _unregister_opt(self, opt): + """Remove an opt from this group. + + :param opt: an Opt object + """ + if opt.dest in self._opts: + del self._opts[opt.dest] + + def _get_argparse_group(self, parser): + if self._argparse_group is None: + """Build an argparse._ArgumentGroup for this group.""" + self._argparse_group = parser.add_argument_group(self.title, + self.help) + return self._argparse_group + + def _clear(self): + """Clear this group's option parsing state.""" + self._argparse_group = None + + +class ParseError(iniparser.ParseError): + def __init__(self, msg, lineno, line, filename): + super(ParseError, self).__init__(msg, lineno, line) + self.filename = filename + + def __str__(self): + return 'at %s:%d, %s: %r' % (self.filename, self.lineno, + self.msg, self.line) + + +class ConfigParser(iniparser.BaseParser): + def __init__(self, filename, sections): + super(ConfigParser, self).__init__() + self.filename = filename + self.sections = sections + self.section = None + + def parse(self): + with open(self.filename) as f: + return super(ConfigParser, self).parse(f) + + def new_section(self, section): + self.section = section + self.sections.setdefault(self.section, {}) + + def assignment(self, key, value): + if not self.section: + raise self.error_no_section() + + self.sections[self.section].setdefault(key, []) + self.sections[self.section][key].append('\n'.join(value)) + + def parse_exc(self, msg, lineno, line=None): + return ParseError(msg, lineno, line, self.filename) + + def error_no_section(self): + return self.parse_exc('Section must be started before assignment', + self.lineno) + + +class MultiConfigParser(object): + def __init__(self): + self.parsed = [] + + def read(self, config_files): + read_ok = [] + + for filename in config_files: + sections = {} + parser = ConfigParser(filename, sections) + + try: + parser.parse() + except IOError: + continue + self.parsed.insert(0, sections) + read_ok.append(filename) + + return read_ok + + def get(self, section, names, multi=False): + rvalue = [] + for sections in self.parsed: + if section not in sections: + continue + for name in names: + if name in sections[section]: + if multi: + rvalue = sections[section][name] + rvalue + else: + return sections[section][name] + if multi and rvalue != []: + return rvalue + raise KeyError + + +class ConfigOpts(collections.Mapping): + + """ + Config options which may be set on the command line or in config files. + + ConfigOpts is a configuration option manager with APIs for registering + option schemas, grouping options, parsing option values and retrieving + the values of options. + """ + + def __init__(self): + """Construct a ConfigOpts object.""" + self._opts = {} # dict of dicts of (opt:, override:, default:) + self._groups = {} + + self._args = None + + self._oparser = None + self._cparser = None + self._cli_values = {} + self.__cache = {} + self._config_opts = [] + + def _pre_setup(self, project, prog, version, usage, default_config_files): + """Initialize a ConfigCliParser object for option parsing.""" + + if prog is None: + prog = os.path.basename(sys.argv[0]) + + if default_config_files is None: + default_config_files = find_config_files(project, prog) + + self._oparser = argparse.ArgumentParser(prog=prog, usage=usage) + self._oparser.add_argument('--version', + action='version', + version=version) + + return prog, default_config_files + + def _setup(self, project, prog, version, usage, default_config_files): + """Initialize a ConfigOpts object for option parsing.""" + + self._config_opts = [ + MultiStrOpt('config-file', + default=default_config_files, + metavar='PATH', + help='Path to a config file to use. Multiple config ' + 'files can be specified, with values in later ' + 'files taking precedence. The default files ' + ' used are: %s' % (default_config_files, )), + StrOpt('config-dir', + metavar='DIR', + help='Path to a config directory to pull *.conf ' + 'files from. This file set is sorted, so as to ' + 'provide a predictable parse order if individual ' + 'options are over-ridden. The set is parsed after ' + 'the file(s), if any, specified via --config-file, ' + 'hence over-ridden options in the directory take ' + 'precedence.'), + ] + self.register_cli_opts(self._config_opts) + + self.project = project + self.prog = prog + self.version = version + self.usage = usage + self.default_config_files = default_config_files + + def __clear_cache(f): + @functools.wraps(f) + def __inner(self, *args, **kwargs): + if kwargs.pop('clear_cache', True): + self.__cache.clear() + return f(self, *args, **kwargs) + + return __inner + + def __call__(self, + args=None, + project=None, + prog=None, + version=None, + usage=None, + default_config_files=None): + """Parse command line arguments and config files. + + Calling a ConfigOpts object causes the supplied command line arguments + and config files to be parsed, causing opt values to be made available + as attributes of the object. + + The object may be called multiple times, each time causing the previous + set of values to be overwritten. + + Automatically registers the --config-file option with either a supplied + list of default config files, or a list from find_config_files(). + + If the --config-dir option is set, any *.conf files from this + directory are pulled in, after all the file(s) specified by the + --config-file option. + + :param args: command line arguments (defaults to sys.argv[1:]) + :param project: the toplevel project name, used to locate config files + :param prog: the name of the program (defaults to sys.argv[0] basename) + :param version: the program version (for --version) + :param usage: a usage string (%prog will be expanded) + :param default_config_files: config files to use by default + :returns: the list of arguments left over after parsing options + :raises: SystemExit, ConfigFilesNotFoundError, ConfigFileParseError, + RequiredOptError, DuplicateOptError + """ + + self.clear() + + prog, default_config_files = self._pre_setup(project, + prog, + version, + usage, + default_config_files) + + self._setup(project, prog, version, usage, default_config_files) + + self._cli_values = self._parse_cli_opts(args) + + self._parse_config_files() + + self._check_required_opts() + + def __getattr__(self, name): + """Look up an option value and perform string substitution. + + :param name: the opt name (or 'dest', more precisely) + :returns: the option value (after string subsititution) or a GroupAttr + :raises: NoSuchOptError,ConfigFileValueError,TemplateSubstitutionError + """ + return self._get(name) + + def __getitem__(self, key): + """Look up an option value and perform string substitution.""" + return self.__getattr__(key) + + def __contains__(self, key): + """Return True if key is the name of a registered opt or group.""" + return key in self._opts or key in self._groups + + def __iter__(self): + """Iterate over all registered opt and group names.""" + for key in self._opts.keys() + self._groups.keys(): + yield key + + def __len__(self): + """Return the number of options and option groups.""" + return len(self._opts) + len(self._groups) + + def reset(self): + """Clear the object state and unset overrides and defaults.""" + self._unset_defaults_and_overrides() + self.clear() + + @__clear_cache + def clear(self): + """Clear the state of the object to before it was called. + + Any subparsers added using the add_cli_subparsers() will also be + removed as a side-effect of this method. + """ + self._args = None + self._cli_values.clear() + self._oparser = argparse.ArgumentParser() + self._cparser = None + self.unregister_opts(self._config_opts) + for group in self._groups.values(): + group._clear() + + @__clear_cache + def register_opt(self, opt, group=None, cli=False): + """Register an option schema. + + Registering an option schema makes any option value which is previously + or subsequently parsed from the command line or config files available + as an attribute of this object. + + :param opt: an instance of an Opt sub-class + :param cli: whether this is a CLI option + :param group: an optional OptGroup object or group name + :return: False if the opt was already register, True otherwise + :raises: DuplicateOptError + """ + if group is not None: + group = self._get_group(group, autocreate=True) + return group._register_opt(opt, cli) + + if _is_opt_registered(self._opts, opt): + return False + + self._opts[opt.dest] = {'opt': opt, 'cli': cli} + + return True + + @__clear_cache + def register_opts(self, opts, group=None): + """Register multiple option schemas at once.""" + for opt in opts: + self.register_opt(opt, group, clear_cache=False) + + @__clear_cache + def register_cli_opt(self, opt, group=None): + """Register a CLI option schema. + + CLI option schemas must be registered before the command line and + config files are parsed. This is to ensure that all CLI options are + show in --help and option validation works as expected. + + :param opt: an instance of an Opt sub-class + :param group: an optional OptGroup object or group name + :return: False if the opt was already register, True otherwise + :raises: DuplicateOptError, ArgsAlreadyParsedError + """ + if self._args is not None: + raise ArgsAlreadyParsedError("cannot register CLI option") + + return self.register_opt(opt, group, cli=True, clear_cache=False) + + @__clear_cache + def register_cli_opts(self, opts, group=None): + """Register multiple CLI option schemas at once.""" + for opt in opts: + self.register_cli_opt(opt, group, clear_cache=False) + + def register_group(self, group): + """Register an option group. + + An option group must be registered before options can be registered + with the group. + + :param group: an OptGroup object + """ + if group.name in self._groups: + return + + self._groups[group.name] = copy.copy(group) + + @__clear_cache + def unregister_opt(self, opt, group=None): + """Unregister an option. + + :param opt: an Opt object + :param group: an optional OptGroup object or group name + :raises: ArgsAlreadyParsedError, NoSuchGroupError + """ + if self._args is not None: + raise ArgsAlreadyParsedError("reset before unregistering options") + + if group is not None: + self._get_group(group)._unregister_opt(opt) + elif opt.dest in self._opts: + del self._opts[opt.dest] + + @__clear_cache + def unregister_opts(self, opts, group=None): + """Unregister multiple CLI option schemas at once.""" + for opt in opts: + self.unregister_opt(opt, group, clear_cache=False) + + def import_opt(self, name, module_str, group=None): + """Import an option definition from a module. + + Import a module and check that a given option is registered. + + This is intended for use with global configuration objects + like cfg.CONF where modules commonly register options with + CONF at module load time. If one module requires an option + defined by another module it can use this method to explicitly + declare the dependency. + + :param name: the name/dest of the opt + :param module_str: the name of a module to import + :param group: an option OptGroup object or group name + :raises: NoSuchOptError, NoSuchGroupError + """ + __import__(module_str) + self._get_opt_info(name, group) + + @__clear_cache + def set_override(self, name, override, group=None): + """Override an opt value. + + Override the command line, config file and default values of a + given option. + + :param name: the name/dest of the opt + :param override: the override value + :param group: an option OptGroup object or group name + :raises: NoSuchOptError, NoSuchGroupError + """ + opt_info = self._get_opt_info(name, group) + opt_info['override'] = override + + @__clear_cache + def set_default(self, name, default, group=None): + """Override an opt's default value. + + Override the default value of given option. A command line or + config file value will still take precedence over this default. + + :param name: the name/dest of the opt + :param default: the default value + :param group: an option OptGroup object or group name + :raises: NoSuchOptError, NoSuchGroupError + """ + opt_info = self._get_opt_info(name, group) + opt_info['default'] = default + + @__clear_cache + def clear_override(self, name, group=None): + """Clear an override an opt value. + + Clear a previously set override of the command line, config file + and default values of a given option. + + :param name: the name/dest of the opt + :param group: an option OptGroup object or group name + :raises: NoSuchOptError, NoSuchGroupError + """ + opt_info = self._get_opt_info(name, group) + opt_info.pop('override', None) + + @__clear_cache + def clear_default(self, name, group=None): + """Clear an override an opt's default value. + + Clear a previously set override of the default value of given option. + + :param name: the name/dest of the opt + :param group: an option OptGroup object or group name + :raises: NoSuchOptError, NoSuchGroupError + """ + opt_info = self._get_opt_info(name, group) + opt_info.pop('default', None) + + def _all_opt_infos(self): + """A generator function for iteration opt infos.""" + for info in self._opts.values(): + yield info, None + for group in self._groups.values(): + for info in group._opts.values(): + yield info, group + + def _all_cli_opts(self): + """A generator function for iterating CLI opts.""" + for info, group in self._all_opt_infos(): + if info['cli']: + yield info['opt'], group + + def _unset_defaults_and_overrides(self): + """Unset any default or override on all options.""" + for info, group in self._all_opt_infos(): + info.pop('default', None) + info.pop('override', None) + + def find_file(self, name): + """Locate a file located alongside the config files. + + Search for a file with the supplied basename in the directories + which we have already loaded config files from and other known + configuration directories. + + The directory, if any, supplied by the config_dir option is + searched first. Then the config_file option is iterated over + and each of the base directories of the config_files values + are searched. Failing both of these, the standard directories + searched by the module level find_config_files() function is + used. The first matching file is returned. + + :param basename: the filename, e.g. 'policy.json' + :returns: the path to a matching file, or None + """ + dirs = [] + if self.config_dir: + dirs.append(_fixpath(self.config_dir)) + + for cf in reversed(self.config_file): + dirs.append(os.path.dirname(_fixpath(cf))) + + dirs.extend(_get_config_dirs(self.project)) + + return _search_dirs(dirs, name) + + def log_opt_values(self, logger, lvl): + """Log the value of all registered opts. + + It's often useful for an app to log its configuration to a log file at + startup for debugging. This method dumps to the entire config state to + the supplied logger at a given log level. + + :param logger: a logging.Logger object + :param lvl: the log level (e.g. logging.DEBUG) arg to logger.log() + """ + logger.log(lvl, "*" * 80) + logger.log(lvl, "Configuration options gathered from:") + logger.log(lvl, "command line args: %s", self._args) + logger.log(lvl, "config files: %s", self.config_file) + logger.log(lvl, "=" * 80) + + def _sanitize(opt, value): + """Obfuscate values of options declared secret""" + return value if not opt.secret else '*' * len(str(value)) + + for opt_name in sorted(self._opts): + opt = self._get_opt_info(opt_name)['opt'] + logger.log(lvl, "%-30s = %s", opt_name, + _sanitize(opt, getattr(self, opt_name))) + + for group_name in self._groups: + group_attr = self.GroupAttr(self, self._get_group(group_name)) + for opt_name in sorted(self._groups[group_name]._opts): + opt = self._get_opt_info(opt_name, group_name)['opt'] + logger.log(lvl, "%-30s = %s", + "%s.%s" % (group_name, opt_name), + _sanitize(opt, getattr(group_attr, opt_name))) + + logger.log(lvl, "*" * 80) + + def print_usage(self, file=None): + """Print the usage message for the current program.""" + self._oparser.print_usage(file) + + def print_help(self, file=None): + """Print the help message for the current program.""" + self._oparser.print_help(file) + + def _get(self, name, group=None): + if isinstance(group, OptGroup): + key = (group.name, name) + else: + key = (group, name) + try: + return self.__cache[key] + except KeyError: + value = self._substitute(self._do_get(name, group)) + self.__cache[key] = value + return value + + def _do_get(self, name, group=None): + """Look up an option value. + + :param name: the opt name (or 'dest', more precisely) + :param group: an OptGroup + :returns: the option value, or a GroupAttr object + :raises: NoSuchOptError, NoSuchGroupError, ConfigFileValueError, + TemplateSubstitutionError + """ + if group is None and name in self._groups: + return self.GroupAttr(self, self._get_group(name)) + + info = self._get_opt_info(name, group) + opt = info['opt'] + + if isinstance(opt, SubCommandOpt): + return self.SubCommandAttr(self, group, opt.dest) + + if 'override' in info: + return info['override'] + + values = [] + if self._cparser is not None: + section = group.name if group is not None else 'DEFAULT' + try: + value = opt._get_from_config_parser(self._cparser, section) + except KeyError: + pass + except ValueError as ve: + raise ConfigFileValueError(str(ve)) + else: + if not opt.multi: + # No need to continue since the last value wins + return value[-1] + values.extend(value) + + name = name if group is None else group.name + '_' + name + value = self._cli_values.get(name) + if value is not None: + if not opt.multi: + return value + + # argparse ignores default=None for nargs='*' + if opt.positional and not value: + value = opt.default + + return value + values + + if values: + return values + + if 'default' in info: + return info['default'] + + return opt.default + + def _substitute(self, value): + """Perform string template substitution. + + Substitute any template variables (e.g. $foo, ${bar}) in the supplied + string value(s) with opt values. + + :param value: the string value, or list of string values + :returns: the substituted string(s) + """ + if isinstance(value, list): + return [self._substitute(i) for i in value] + elif isinstance(value, str): + tmpl = string.Template(value) + return tmpl.safe_substitute(self.StrSubWrapper(self)) + else: + return value + + def _get_group(self, group_or_name, autocreate=False): + """Looks up a OptGroup object. + + Helper function to return an OptGroup given a parameter which can + either be the group's name or an OptGroup object. + + The OptGroup object returned is from the internal dict of OptGroup + objects, which will be a copy of any OptGroup object that users of + the API have access to. + + :param group_or_name: the group's name or the OptGroup object itself + :param autocreate: whether to auto-create the group if it's not found + :raises: NoSuchGroupError + """ + group = group_or_name if isinstance(group_or_name, OptGroup) else None + group_name = group.name if group else group_or_name + + if not group_name in self._groups: + if not group is None or not autocreate: + raise NoSuchGroupError(group_name) + + self.register_group(OptGroup(name=group_name)) + + return self._groups[group_name] + + def _get_opt_info(self, opt_name, group=None): + """Return the (opt, override, default) dict for an opt. + + :param opt_name: an opt name/dest + :param group: an optional group name or OptGroup object + :raises: NoSuchOptError, NoSuchGroupError + """ + if group is None: + opts = self._opts + else: + group = self._get_group(group) + opts = group._opts + + if not opt_name in opts: + raise NoSuchOptError(opt_name, group) + + return opts[opt_name] + + def _parse_config_files(self): + """Parse the config files from --config-file and --config-dir. + + :raises: ConfigFilesNotFoundError, ConfigFileParseError + """ + config_files = list(self.config_file) + + if self.config_dir: + config_dir_glob = os.path.join(self.config_dir, '*.conf') + config_files += sorted(glob.glob(config_dir_glob)) + + config_files = [_fixpath(p) for p in config_files] + + self._cparser = MultiConfigParser() + + try: + read_ok = self._cparser.read(config_files) + except iniparser.ParseError as pe: + raise ConfigFileParseError(pe.filename, str(pe)) + + if read_ok != config_files: + not_read_ok = filter(lambda f: f not in read_ok, config_files) + raise ConfigFilesNotFoundError(not_read_ok) + + def _check_required_opts(self): + """Check that all opts marked as required have values specified. + + :raises: RequiredOptError + """ + for info, group in self._all_opt_infos(): + opt = info['opt'] + + if opt.required: + if ('default' in info or 'override' in info): + continue + + if self._get(opt.dest, group) is None: + raise RequiredOptError(opt.name, group) + + def _parse_cli_opts(self, args): + """Parse command line options. + + Initializes the command line option parser and parses the supplied + command line arguments. + + :param args: the command line arguments + :returns: a dict of parsed option values + :raises: SystemExit, DuplicateOptError + + """ + self._args = args + + for opt, group in self._all_cli_opts(): + opt._add_to_cli(self._oparser, group) + + return vars(self._oparser.parse_args(args)) + + class GroupAttr(collections.Mapping): + + """ + A helper class representing the option values of a group as a mapping + and attributes. + """ + + def __init__(self, conf, group): + """Construct a GroupAttr object. + + :param conf: a ConfigOpts object + :param group: an OptGroup object + """ + self._conf = conf + self._group = group + + def __getattr__(self, name): + """Look up an option value and perform template substitution.""" + return self._conf._get(name, self._group) + + def __getitem__(self, key): + """Look up an option value and perform string substitution.""" + return self.__getattr__(key) + + def __contains__(self, key): + """Return True if key is the name of a registered opt or group.""" + return key in self._group._opts + + def __iter__(self): + """Iterate over all registered opt and group names.""" + for key in self._group._opts.keys(): + yield key + + def __len__(self): + """Return the number of options and option groups.""" + return len(self._group._opts) + + class SubCommandAttr(object): + + """ + A helper class representing the name and arguments of an argparse + sub-parser. + """ + + def __init__(self, conf, group, dest): + """Construct a SubCommandAttr object. + + :param conf: a ConfigOpts object + :param group: an OptGroup object + :param dest: the name of the sub-parser + """ + self._conf = conf + self._group = group + self._dest = dest + + def __getattr__(self, name): + """Look up a sub-parser name or argument value.""" + if name == 'name': + name = self._dest + if self._group is not None: + name = self._group.name + '_' + name + return self._conf._cli_values[name] + + if name in self._conf: + raise DuplicateOptError(name) + + try: + return self._conf._cli_values[name] + except KeyError: + raise NoSuchOptError(name) + + class StrSubWrapper(object): + + """ + A helper class exposing opt values as a dict for string substitution. + """ + + def __init__(self, conf): + """Construct a StrSubWrapper object. + + :param conf: a ConfigOpts object + """ + self.conf = conf + + def __getitem__(self, key): + """Look up an opt value from the ConfigOpts object. + + :param key: an opt name + :returns: an opt value + :raises: TemplateSubstitutionError if attribute is a group + """ + value = getattr(self.conf, key) + if isinstance(value, self.conf.GroupAttr): + raise TemplateSubstitutionError( + 'substituting group %s not supported' % key) + return value + + +class CommonConfigOpts(ConfigOpts): + + DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" + DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + + common_cli_opts = [ + BoolOpt('debug', + short='d', + default=False, + help='Print debugging output'), + BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output'), + ] + + logging_cli_opts = [ + StrOpt('log-config', + metavar='PATH', + help='If this option is specified, the logging configuration ' + 'file specified is used and overrides any other logging ' + 'options specified. Please see the Python logging module ' + 'documentation for details on logging configuration ' + 'files.'), + StrOpt('log-format', + default=DEFAULT_LOG_FORMAT, + metavar='FORMAT', + help='A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'Default: %(default)s'), + StrOpt('log-date-format', + default=DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s'), + StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If not set, logging will go to stdout.'), + StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The directory to keep log files in ' + '(will be prepended to --log-file)'), + BoolOpt('use-syslog', + default=False, + help='Use syslog for logging.'), + StrOpt('syslog-log-facility', + default='LOG_USER', + help='syslog facility to receive log lines') + ] + + def __init__(self): + super(CommonConfigOpts, self).__init__() + self.register_cli_opts(self.common_cli_opts) + self.register_cli_opts(self.logging_cli_opts) + + +CONF = CommonConfigOpts() diff --git a/reddwarf/openstack/common/context.py b/reddwarf/openstack/common/context.py index a9a16f8e5c..dd7dd04c38 100644 --- a/reddwarf/openstack/common/context.py +++ b/reddwarf/openstack/common/context.py @@ -22,6 +22,13 @@ Projects should subclass this class if they wish to enhance the request context or provide additional information in their specific WSGI pipeline. """ +import itertools +import uuid + + +def generate_request_id(): + return 'req-' + str(uuid.uuid4()) + class RequestContext(object): @@ -31,10 +38,44 @@ class RequestContext(object): """ def __init__(self, auth_tok=None, user=None, tenant=None, is_admin=False, - read_only=False, show_deleted=False): + read_only=False, show_deleted=False, request_id=None): self.auth_tok = auth_tok self.user = user self.tenant = tenant self.is_admin = is_admin self.read_only = read_only self.show_deleted = show_deleted + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + + def to_dict(self): + return {'user': self.user, + 'tenant': self.tenant, + 'is_admin': self.is_admin, + 'read_only': self.read_only, + 'show_deleted': self.show_deleted, + 'auth_token': self.auth_tok, + 'request_id': self.request_id} + + +def get_admin_context(show_deleted="no"): + context = RequestContext(None, + tenant=None, + is_admin=True, + show_deleted=show_deleted) + return context + + +def get_context_from_function_and_args(function, args, kwargs): + """Find an arg of type RequestContext and return it. + + This is useful in a couple of decorators where we don't + know much about the function we're wrapping. + """ + + for arg in itertools.chain(kwargs.values(), args): + if isinstance(arg, RequestContext): + return arg + + return None diff --git a/reddwarf/openstack/common/eventlet_backdoor.py b/reddwarf/openstack/common/eventlet_backdoor.py new file mode 100644 index 0000000000..48d36b9a21 --- /dev/null +++ b/reddwarf/openstack/common/eventlet_backdoor.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Openstack, LLC. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import gc +import pprint +import sys +import traceback + +import eventlet +import eventlet.backdoor +import greenlet + +from reddwarf.openstack.common import cfg + +eventlet_backdoor_opts = [ + cfg.IntOpt('backdoor_port', + default=None, + help='port for eventlet backdoor to listen') +] + +CONF = cfg.CONF +CONF.register_opts(eventlet_backdoor_opts) + + +def _dont_use_this(): + print "Don't use this, just disconnect instead" + + +def _find_objects(t): + return filter(lambda o: isinstance(o, t), gc.get_objects()) + + +def _print_greenthreads(): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): + print i, gt + traceback.print_stack(gt.gr_frame) + print + + +def initialize_if_enabled(): + backdoor_locals = { + 'exit': _dont_use_this, # So we don't exit the entire process + 'quit': _dont_use_this, # So we don't exit the entire process + 'fo': _find_objects, + 'pgt': _print_greenthreads, + } + + if CONF.backdoor_port is None: + return None + + # NOTE(johannes): The standard sys.displayhook will print the value of + # the last expression and set it to __builtin__._, which overwrites + # the __builtin__._ that gettext sets. Let's switch to using pprint + # since it won't interact poorly with gettext, and it's easier to + # read the output too. + def displayhook(val): + if val is not None: + pprint.pprint(val) + sys.displayhook = displayhook + + sock = eventlet.listen(('localhost', CONF.backdoor_port)) + port = sock.getsockname()[1] + eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, + locals=backdoor_locals) + return port diff --git a/reddwarf/openstack/common/exception.py b/reddwarf/openstack/common/exception.py index ba32da550b..5e42a91c9f 100644 --- a/reddwarf/openstack/common/exception.py +++ b/reddwarf/openstack/common/exception.py @@ -21,17 +21,7 @@ Exceptions common to OpenStack projects import logging - -class ProcessExecutionError(IOError): - def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, - description=None): - if description is None: - description = "Unexpected error while running command." - if exit_code is None: - exit_code = '-' - message = "%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % ( - description, cmd, exit_code, stdout, stderr) - IOError.__init__(self, message) +from reddwarf.openstack.common.gettextutils import _ class Error(Exception): @@ -109,7 +99,7 @@ def wrap_exception(f): except Exception, e: if not isinstance(e, Error): #exc_type, exc_value, exc_traceback = sys.exc_info() - logging.exception('Uncaught exception') + logging.exception(_('Uncaught exception')) #logging.error(traceback.extract_stack(exc_traceback)) raise Error(str(e)) raise diff --git a/reddwarf/openstack/common/excutils.py b/reddwarf/openstack/common/excutils.py index 67c9fa9511..e6fbcdc3e7 100644 --- a/reddwarf/openstack/common/excutils.py +++ b/reddwarf/openstack/common/excutils.py @@ -24,26 +24,28 @@ import logging import sys import traceback +from reddwarf.openstack.common.gettextutils import _ + @contextlib.contextmanager def save_and_reraise_exception(): """Save current exception, run some code and then re-raise. In some cases the exception context can be cleared, resulting in None - being attempted to be reraised after an exception handler is run. This + being attempted to be re-raised after an exception handler is run. This can happen when eventlet switches greenthreads or when running an exception handler, code raises and catches an exception. In both cases the exception context will be cleared. To work around this, we save the exception state, run handler code, and then re-raise the original exception. If another exception occurs, the - saved exception is logged and the new exception is reraised. + saved exception is logged and the new exception is re-raised. """ type_, value, tb = sys.exc_info() try: yield except Exception: - logging.error('Original exception being dropped: %s' % - (traceback.format_exception(type_, value, tb))) + logging.error(_('Original exception being dropped: %s'), + traceback.format_exception(type_, value, tb)) raise raise type_, value, tb diff --git a/reddwarf/openstack/common/fileutils.py b/reddwarf/openstack/common/fileutils.py new file mode 100644 index 0000000000..4746ad4981 --- /dev/null +++ b/reddwarf/openstack/common/fileutils.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import errno +import os + + +def ensure_tree(path): + """Create a directory (and any ancestor directories required) + + :param path: Directory to create + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST: + if not os.path.isdir(path): + raise + else: + raise diff --git a/reddwarf/openstack/common/gettextutils.py b/reddwarf/openstack/common/gettextutils.py new file mode 100644 index 0000000000..989e9c0ae6 --- /dev/null +++ b/reddwarf/openstack/common/gettextutils.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from reddwarf.openstack.common.gettextutils import _ +""" + +import gettext + + +t = gettext.translation('openstack-common', 'locale', fallback=True) + + +def _(msg): + return t.ugettext(msg) diff --git a/reddwarf/openstack/common/importutils.py b/reddwarf/openstack/common/importutils.py index 2fbb0291a0..2a28b455e8 100644 --- a/reddwarf/openstack/common/importutils.py +++ b/reddwarf/openstack/common/importutils.py @@ -29,7 +29,7 @@ def import_class(import_str): try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) - except (ImportError, ValueError, AttributeError), exc: + except (ValueError, AttributeError): raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info()))) diff --git a/reddwarf/openstack/common/iniparser.py b/reddwarf/openstack/common/iniparser.py new file mode 100644 index 0000000000..241284449e --- /dev/null +++ b/reddwarf/openstack/common/iniparser.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class ParseError(Exception): + def __init__(self, message, lineno, line): + self.msg = message + self.line = line + self.lineno = lineno + + def __str__(self): + return 'at line %d, %s: %r' % (self.lineno, self.msg, self.line) + + +class BaseParser(object): + lineno = 0 + parse_exc = ParseError + + def _assignment(self, key, value): + self.assignment(key, value) + return None, [] + + def _get_section(self, line): + if line[-1] != ']': + return self.error_no_section_end_bracket(line) + if len(line) <= 2: + return self.error_no_section_name(line) + + return line[1:-1] + + def _split_key_value(self, line): + colon = line.find(':') + equal = line.find('=') + if colon < 0 and equal < 0: + return self.error_invalid_assignment(line) + + if colon < 0 or (equal >= 0 and equal < colon): + key, value = line[:equal], line[equal + 1:] + else: + key, value = line[:colon], line[colon + 1:] + + value = value.strip() + if ((value and value[0] == value[-1]) and + (value[0] == "\"" or value[0] == "'")): + value = value[1:-1] + return key.strip(), [value] + + def parse(self, lineiter): + key = None + value = [] + + for line in lineiter: + self.lineno += 1 + + line = line.rstrip() + if not line: + # Blank line, ends multi-line values + if key: + key, value = self._assignment(key, value) + continue + elif line[0] in (' ', '\t'): + # Continuation of previous assignment + if key is None: + self.error_unexpected_continuation(line) + else: + value.append(line.lstrip()) + continue + + if key: + # Flush previous assignment, if any + key, value = self._assignment(key, value) + + if line[0] == '[': + # Section start + section = self._get_section(line) + if section: + self.new_section(section) + elif line[0] in '#;': + self.comment(line[1:].lstrip()) + else: + key, value = self._split_key_value(line) + if not key: + return self.error_empty_key(line) + + if key: + # Flush previous assignment, if any + self._assignment(key, value) + + def assignment(self, key, value): + """Called when a full assignment is parsed""" + raise NotImplementedError() + + def new_section(self, section): + """Called when a new section is started""" + raise NotImplementedError() + + def comment(self, comment): + """Called when a comment is parsed""" + pass + + def error_invalid_assignment(self, line): + raise self.parse_exc("No ':' or '=' found in assignment", + self.lineno, line) + + def error_empty_key(self, line): + raise self.parse_exc('Key cannot be empty', self.lineno, line) + + def error_unexpected_continuation(self, line): + raise self.parse_exc('Unexpected continuation line', + self.lineno, line) + + def error_no_section_end_bracket(self, line): + raise self.parse_exc('Invalid section (must end with ])', + self.lineno, line) + + def error_no_section_name(self, line): + raise self.parse_exc('Empty section name', self.lineno, line) diff --git a/reddwarf/openstack/common/jsonutils.py b/reddwarf/openstack/common/jsonutils.py index 7379d4d4a3..e2612da7e6 100644 --- a/reddwarf/openstack/common/jsonutils.py +++ b/reddwarf/openstack/common/jsonutils.py @@ -120,7 +120,7 @@ def to_primitive(value, convert_instances=False, level=0): level=level + 1) else: return value - except TypeError, e: + except TypeError: # Class objects are tricky since they may define something like # __iter__ defined but it isn't callable as list(). return unicode(value) diff --git a/reddwarf/openstack/common/lockutils.py b/reddwarf/openstack/common/lockutils.py new file mode 100644 index 0000000000..90a9b83342 --- /dev/null +++ b/reddwarf/openstack/common/lockutils.py @@ -0,0 +1,233 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import errno +import functools +import os +import shutil +import tempfile +import time +import weakref + +from eventlet import semaphore + +from reddwarf.openstack.common import cfg +from reddwarf.openstack.common import fileutils +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +util_opts = [ + cfg.BoolOpt('disable_process_locking', default=False, + help='Whether to disable inter-process locks'), + cfg.StrOpt('lock_path', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), + '../')), + help='Directory to use for lock files') +] + + +CONF = cfg.CONF +CONF.register_opts(util_opts) + + +class _InterProcessLock(object): + """Lock implementation which allows multiple locks, working around + issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does + not require any cleanup. Since the lock is always held on a file + descriptor rather than outside of the process, the lock gets dropped + automatically if the process crashes, even if __exit__ is not executed. + + There are no guarantees regarding usage by multiple green threads in a + single process here. This lock works only between processes. Exclusive + access between local threads should be achieved using the semaphores + in the @synchronized decorator. + + Note these locks are released when the descriptor is closed, so it's not + safe to close the file descriptor while another green thread holds the + lock. Just opening and closing the lock file can break synchronisation, + so lock files must be accessed only using this abstraction. + """ + + def __init__(self, name): + self.lockfile = None + self.fname = name + + def __enter__(self): + self.lockfile = open(self.fname, 'w') + + while True: + try: + # Using non-blocking locks since green threads are not + # patched to deal with blocking locking calls. + # Also upon reading the MSDN docs for locking(), it seems + # to have a laughable 10 attempts "blocking" mechanism. + self.trylock() + return self + except IOError, e: + if e.errno in (errno.EACCES, errno.EAGAIN): + # external locks synchronise things like iptables + # updates - give it some time to prevent busy spinning + time.sleep(0.01) + else: + raise + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + self.unlock() + self.lockfile.close() + except IOError: + LOG.exception(_("Could not release the acquired lock `%s`"), + self.fname) + + def trylock(self): + raise NotImplementedError() + + def unlock(self): + raise NotImplementedError() + + +class _WindowsLock(_InterProcessLock): + def trylock(self): + msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1) + + def unlock(self): + msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1) + + +class _PosixLock(_InterProcessLock): + def trylock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) + + def unlock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_UN) + + +if os.name == 'nt': + import msvcrt + InterProcessLock = _WindowsLock +else: + import fcntl + InterProcessLock = _PosixLock + +_semaphores = weakref.WeakValueDictionary() + + +def synchronized(name, lock_file_prefix, external=False, lock_path=None): + """Synchronization decorator. + + Decorating a method like so:: + + @synchronized('mylock') + def foo(self, *args): + ... + + ensures that only one thread will execute the bar method at a time. + + Different methods can share the same lock:: + + @synchronized('mylock') + def foo(self, *args): + ... + + @synchronized('mylock') + def bar(self, *args): + ... + + This way only one of either foo or bar can be executing at a time. + + The lock_file_prefix argument is used to provide lock files on disk with a + meaningful prefix. The prefix should end with a hyphen ('-') if specified. + + The external keyword argument denotes whether this lock should work across + multiple processes. This means that if two different workers both run a + a method decorated with @synchronized('mylock', external=True), only one + of them will execute at a time. + + The lock_path keyword argument is used to specify a special location for + external lock files to live. If nothing is set, then CONF.lock_path is + used as a default. + """ + + def wrap(f): + @functools.wraps(f) + def inner(*args, **kwargs): + # NOTE(soren): If we ever go natively threaded, this will be racy. + # See http://stackoverflow.com/questions/5390569/dyn + # amically-allocating-and-destroying-mutexes + sem = _semaphores.get(name, semaphore.Semaphore()) + if name not in _semaphores: + # this check is not racy - we're already holding ref locally + # so GC won't remove the item and there was no IO switch + # (only valid in greenthreads) + _semaphores[name] = sem + + with sem: + LOG.debug(_('Got semaphore "%(lock)s" for method ' + '"%(method)s"...'), {'lock': name, + 'method': f.__name__}) + if external and not CONF.disable_process_locking: + LOG.debug(_('Attempting to grab file lock "%(lock)s" for ' + 'method "%(method)s"...'), + {'lock': name, 'method': f.__name__}) + cleanup_dir = False + + # We need a copy of lock_path because it is non-local + local_lock_path = lock_path + if not local_lock_path: + local_lock_path = CONF.lock_path + + if not local_lock_path: + cleanup_dir = True + local_lock_path = tempfile.mkdtemp() + + if not os.path.exists(local_lock_path): + cleanup_dir = True + fileutils.ensure_tree(local_lock_path) + + # NOTE(mikal): the lock name cannot contain directory + # separators + safe_name = name.replace(os.sep, '_') + lock_file_name = '%s%s' % (lock_file_prefix, safe_name) + lock_file_path = os.path.join(local_lock_path, + lock_file_name) + + try: + lock = InterProcessLock(lock_file_path) + with lock: + LOG.debug(_('Got file lock "%(lock)s" at %(path)s ' + 'for method "%(method)s"...'), + {'lock': name, + 'path': lock_file_path, + 'method': f.__name__}) + retval = f(*args, **kwargs) + finally: + # NOTE(vish): This removes the tempdir if we needed + # to create one. This is used to cleanup + # the locks left behind by unit tests. + if cleanup_dir: + shutil.rmtree(local_lock_path) + else: + retval = f(*args, **kwargs) + + return retval + return inner + return wrap diff --git a/reddwarf/openstack/common/log.py b/reddwarf/openstack/common/log.py new file mode 100644 index 0000000000..09f220dcab --- /dev/null +++ b/reddwarf/openstack/common/log.py @@ -0,0 +1,476 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Openstack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import cStringIO +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import stat +import sys +import traceback + +from reddwarf.openstack.common import cfg +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import jsonutils +from reddwarf.openstack.common import local +from reddwarf.openstack.common import notifier + + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s %(levelname)s %(name)s [%(request_id)s ' + '%(user)s %(tenant)s] %(instance)s' + '%(message)s', + help='format string to use for log messages with context'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s %(process)d %(levelname)s %(name)s [-]' + ' %(instance)s%(message)s', + help='format string to use for log messages without context'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='data to append to log format when level is DEBUG'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s %(process)d TRACE %(name)s %(instance)s', + help='prefix each line of exception output with this format'), + cfg.ListOpt('default_log_levels', + default=[ + 'amqplib=WARN', + 'sqlalchemy=WARN', + 'boto=WARN', + 'suds=INFO', + 'keystone=INFO', + 'eventlet.wsgi.server=WARN' + ], + help='list of logger=LEVEL pairs'), + cfg.BoolOpt('publish_errors', + default=False, + help='publish error events'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='make deprecations fatal'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='If an instance is passed with the log message, format ' + 'it like this'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='If an instance UUID is passed with the log message, ' + 'format it like this'), +] + + +generic_log_opts = [ + cfg.StrOpt('logdir', + default=None, + help='Log output to a per-service log file in named directory'), + cfg.StrOpt('logfile', + default=None, + help='Log output to a named file'), + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error'), + cfg.StrOpt('logfile_mode', + default='0644', + help='Default file mode used when creating log files'), +] + + +CONF = cfg.CONF +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file or CONF.logfile + logdir = CONF.log_dir or CONF.logdir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + +class ContextAdapter(logging.LoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + def deprecated(self, msg, *args, **kwargs): + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + else: + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + else: + instance_uuid = kwargs.pop('instance_uuid', None) + if instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra.update({'instance': instance_extra}) + + extra.update({"project": self.project}) + extra.update({"version": self.version}) + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [itertools.ifilter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +class PublishErrorsHandler(logging.Handler): + def emit(self, record): + if ('reddwarf.openstack.common.notifier.log_notifier' in + CONF.notification_driver): + return + notifier.api.notify(None, 'error.publisher', + 'error_notification', + notifier.api.ERROR, + dict(error=record.msg)) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(type, value, tb): + extra = {} + if CONF.verbose: + extra['exc_info'] = (type, value, tb) + getLogger(product_name).critical(str(value), **extra) + return logging_excepthook + + +def setup(product_name): + """Setup logging.""" + sys.excepthook = _create_logging_excepthook(product_name) + + if CONF.log_config: + try: + logging.config.fileConfig(CONF.log_config) + except Exception: + traceback.print_exc() + raise + else: + _setup_logging_from_conf(product_name) + + +def set_defaults(logging_context_format_string): + cfg.set_defaults(log_opts, + logging_context_format_string= + logging_context_format_string) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +def _setup_logging_from_conf(product_name): + log_root = getLogger(product_name).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + if CONF.use_syslog: + facility = _find_facility_from_conf() + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + mode = int(CONF.logfile_mode, 8) + st = os.stat(logpath) + if st.st_mode != (stat.S_IFREG | mode): + os.chmod(logpath, mode) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not CONF.log_file: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + log_root.addHandler(PublishErrorsHandler(logging.ERROR)) + + for handler in log_root.handlers: + datefmt = CONF.log_date_format + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + handler.setFormatter(LegacyFormatter(datefmt=datefmt)) + + if CONF.verbose or CONF.debug: + log_root.setLevel(logging.DEBUG) + else: + log_root.setLevel(logging.INFO) + + level = logging.NOTSET + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + level = logging.getLevelName(level_name) + logger = logging.getLogger(mod) + logger.setLevel(level) + for handler in log_root.handlers: + logger.addHandler(handler) + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg) + + +class LegacyFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + # NOTE(sdague): default the fancier formating params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id', None): + self._fmt = CONF.logging_context_format_string + else: + self._fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + self._fmt += " " + CONF.logging_debug_format_suffix + + # Cache this on the record, Logger will respect our formated copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = cStringIO.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/reddwarf/openstack/common/loopingcall.py b/reddwarf/openstack/common/loopingcall.py new file mode 100644 index 0000000000..1f18cfc3ea --- /dev/null +++ b/reddwarf/openstack/common/loopingcall.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from eventlet import event +from eventlet import greenthread + +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import timeutils + +LOG = logging.getLogger(__name__) + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCall. + + The poll-function passed to LoopingCall can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCall.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCall.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCall(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + start = timeutils.utcnow() + self.f(*self.args, **self.kw) + end = timeutils.utcnow() + if not self._running: + break + delay = interval - timeutils.delta_seconds(start, end) + if delay <= 0: + LOG.warn(_('task run outlasted interval by %s sec') % + -delay) + greenthread.sleep(delay if delay > 0 else 0) + except LoopingCallDone, e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn_n(_inner) + return self.done + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() diff --git a/reddwarf/openstack/common/middleware/__init__.py b/reddwarf/openstack/common/middleware/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/reddwarf/openstack/common/middleware/context.py b/reddwarf/openstack/common/middleware/context.py new file mode 100644 index 0000000000..05294d48a6 --- /dev/null +++ b/reddwarf/openstack/common/middleware/context.py @@ -0,0 +1,64 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Middleware that attaches a context to the WSGI request +""" + +from reddwarf.openstack.common import context +from reddwarf.openstack.common import importutils +from reddwarf.openstack.common import wsgi + + +class ContextMiddleware(wsgi.Middleware): + def __init__(self, app, options): + self.options = options + super(ContextMiddleware, self).__init__(app) + + def make_context(self, *args, **kwargs): + """ + Create a context with the given arguments. + """ + + # Determine the context class to use + ctxcls = context.RequestContext + if 'context_class' in self.options: + ctxcls = importutils.import_class(self.options['context_class']) + + return ctxcls(*args, **kwargs) + + def process_request(self, req): + """ + Extract any authentication information in the request and + construct an appropriate context from it. + """ + # Use the default empty context, with admin turned on for + # backwards compatibility + req.context = self.make_context(is_admin=True) + + +def filter_factory(global_conf, **local_conf): + """ + Factory method for paste.deploy + """ + conf = global_conf.copy() + conf.update(local_conf) + + def filter(app): + return ContextMiddleware(app, conf) + + return filter diff --git a/reddwarf/openstack/common/network_utils.py b/reddwarf/openstack/common/network_utils.py new file mode 100644 index 0000000000..69f6732163 --- /dev/null +++ b/reddwarf/openstack/common/network_utils.py @@ -0,0 +1,68 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Network-related utilities and helper functions. +""" + +import logging + +LOG = logging.getLogger(__name__) + + +def parse_host_port(address, default_port=None): + """ + Interpret a string as a host:port pair. + An IPv6 address MUST be escaped if accompanied by a port, + because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 + means both [2001:db8:85a3::8a2e:370:7334] and + [2001:db8:85a3::8a2e:370]:7334. + + >>> parse_host_port('server01:80') + ('server01', 80) + >>> parse_host_port('server01') + ('server01', None) + >>> parse_host_port('server01', default_port=1234) + ('server01', 1234) + >>> parse_host_port('[::1]:80') + ('::1', 80) + >>> parse_host_port('[::1]') + ('::1', None) + >>> parse_host_port('[::1]', default_port=1234) + ('::1', 1234) + >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) + ('2001:db8:85a3::8a2e:370:7334', 1234) + + """ + if address[0] == '[': + # Escaped ipv6 + _host, _port = address[1:].split(']') + host = _host + if ':' in _port: + port = _port.split(':')[1] + else: + port = default_port + else: + if address.count(':') == 1: + host, port = address.split(':') + else: + # 0 means ipv4, >1 means ipv6. + # We prohibit unescaped ipv6 addresses with port. + host = address + port = default_port + + return (host, None if port is None else int(port)) diff --git a/reddwarf/openstack/common/notifier/__init__.py b/reddwarf/openstack/common/notifier/__init__.py new file mode 100644 index 0000000000..482d54e4fd --- /dev/null +++ b/reddwarf/openstack/common/notifier/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/reddwarf/openstack/common/notifier/api.py b/reddwarf/openstack/common/notifier/api.py new file mode 100644 index 0000000000..1ac0906b95 --- /dev/null +++ b/reddwarf/openstack/common/notifier/api.py @@ -0,0 +1,182 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from reddwarf.openstack.common import cfg +from reddwarf.openstack.common import context +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import importutils +from reddwarf.openstack.common import jsonutils +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + +notifier_opts = [ + cfg.MultiStrOpt('notification_driver', + default=[], + deprecated_name='list_notifier_drivers', + help='Driver or drivers to handle sending notifications'), + cfg.StrOpt('default_notification_level', + default='INFO', + help='Default notification level for outgoing notifications'), + cfg.StrOpt('default_publisher_id', + default='$host', + help='Default publisher_id for outgoing notifications'), +] + +CONF = cfg.CONF +CONF.register_opts(notifier_opts) + +WARN = 'WARN' +INFO = 'INFO' +ERROR = 'ERROR' +CRITICAL = 'CRITICAL' +DEBUG = 'DEBUG' + +log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) + + +class BadPriorityException(Exception): + pass + + +def notify_decorator(name, fn): + """ decorator for notify which is used from utils.monkey_patch() + + :param name: name of the function + :param function: - object of the function + :returns: function -- decorated function + + """ + def wrapped_func(*args, **kwarg): + body = {} + body['args'] = [] + body['kwarg'] = {} + for arg in args: + body['args'].append(arg) + for key in kwarg: + body['kwarg'][key] = kwarg[key] + + ctxt = context.get_context_from_function_and_args(fn, args, kwarg) + notify(ctxt, + CONF.default_publisher_id, + name, + CONF.default_notification_level, + body) + return fn(*args, **kwarg) + return wrapped_func + + +def publisher_id(service, host=None): + if not host: + host = CONF.host + return "%s.%s" % (service, host) + + +def notify(context, publisher_id, event_type, priority, payload): + """Sends a notification using the specified driver + + :param publisher_id: the source worker_type.host of the message + :param event_type: the literal type of event (ex. Instance Creation) + :param priority: patterned after the enumeration of Python logging + levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) + :param payload: A python dictionary of attributes + + Outgoing message format includes the above parameters, and appends the + following: + + message_id + a UUID representing the id for this notification + + timestamp + the GMT timestamp the notification was sent at + + The composite message will be constructed as a dictionary of the above + attributes, which will then be sent via the transport mechanism defined + by the driver. + + Message example:: + + {'message_id': str(uuid.uuid4()), + 'publisher_id': 'compute.host1', + 'timestamp': timeutils.utcnow(), + 'priority': 'WARN', + 'event_type': 'compute.create_instance', + 'payload': {'instance_id': 12, ... }} + + """ + if priority not in log_levels: + raise BadPriorityException( + _('%s not in valid priorities') % priority) + + # Ensure everything is JSON serializable. + payload = jsonutils.to_primitive(payload, convert_instances=True) + + msg = dict(message_id=str(uuid.uuid4()), + publisher_id=publisher_id, + event_type=event_type, + priority=priority, + payload=payload, + timestamp=str(timeutils.utcnow())) + + for driver in _get_drivers(): + try: + driver.notify(context, msg) + except Exception as e: + LOG.exception(_("Problem '%(e)s' attempting to " + "send to notification system. " + "Payload=%(payload)s") + % dict(e=e, payload=payload)) + + +_drivers = None + + +def _get_drivers(): + """Instantiate, cache, and return drivers based on the CONF.""" + global _drivers + if _drivers is None: + _drivers = {} + for notification_driver in CONF.notification_driver: + add_driver(notification_driver) + + return _drivers.values() + + +def add_driver(notification_driver): + """Add a notification driver at runtime.""" + # Make sure the driver list is initialized. + _get_drivers() + if isinstance(notification_driver, basestring): + # Load and add + try: + driver = importutils.import_module(notification_driver) + _drivers[notification_driver] = driver + except ImportError: + LOG.exception(_("Failed to load notifier %s. " + "These notifications will not be sent.") % + notification_driver) + else: + # Driver is already loaded; just add the object. + _drivers[notification_driver] = notification_driver + + +def _reset_drivers(): + """Used by unit tests to reset the drivers.""" + global _drivers + _drivers = None diff --git a/reddwarf/openstack/common/notifier/log_notifier.py b/reddwarf/openstack/common/notifier/log_notifier.py new file mode 100644 index 0000000000..24919e1db5 --- /dev/null +++ b/reddwarf/openstack/common/notifier/log_notifier.py @@ -0,0 +1,35 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from reddwarf.openstack.common import cfg +from reddwarf.openstack.common import jsonutils +from reddwarf.openstack.common import log as logging + + +CONF = cfg.CONF + + +def notify(_context, message): + """Notifies the recipient of the desired event given the model. + Log notifications using openstack's default logging system""" + + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + logger = logging.getLogger( + 'reddwarf.openstack.common.notification.%s' % + message['event_type']) + getattr(logger, priority)(jsonutils.dumps(message)) diff --git a/reddwarf/guestagent/agent.py b/reddwarf/openstack/common/notifier/no_op_notifier.py similarity index 83% rename from reddwarf/guestagent/agent.py rename to reddwarf/openstack/common/notifier/no_op_notifier.py index a34cd64126..ee1ddbdcac 100644 --- a/reddwarf/guestagent/agent.py +++ b/reddwarf/openstack/common/notifier/no_op_notifier.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack LLC. # All Rights Reserved. # @@ -15,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -import subprocess -print "This is only a test!" -subprocess.call(["touch", "FOO.txt"]) + +def notify(_context, message): + """Notifies the recipient of the desired event given the model""" + pass diff --git a/reddwarf/openstack/common/notifier/rabbit_notifier.py b/reddwarf/openstack/common/notifier/rabbit_notifier.py new file mode 100644 index 0000000000..7859903306 --- /dev/null +++ b/reddwarf/openstack/common/notifier/rabbit_notifier.py @@ -0,0 +1,29 @@ +# Copyright 2012 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.notifier import rpc_notifier + +LOG = logging.getLogger(__name__) + + +def notify(context, message): + """Deprecated in Grizzly. Please use rpc_notifier instead.""" + + LOG.deprecated(_("The rabbit_notifier is now deprecated." + " Please use rpc_notifier instead.")) + rpc_notifier.notify(context, message) diff --git a/reddwarf/openstack/common/notifier/rpc_notifier.py b/reddwarf/openstack/common/notifier/rpc_notifier.py new file mode 100644 index 0000000000..873e20c748 --- /dev/null +++ b/reddwarf/openstack/common/notifier/rpc_notifier.py @@ -0,0 +1,46 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from reddwarf.openstack.common import cfg +from reddwarf.openstack.common import context as req_context +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import rpc + +LOG = logging.getLogger(__name__) + +notification_topic_opt = cfg.ListOpt( + 'notification_topics', default=['notifications', ], + help='AMQP topic used for openstack notifications') + +CONF = cfg.CONF +CONF.register_opt(notification_topic_opt) + + +def notify(context, message): + """Sends a notification via RPC""" + if not context: + context = req_context.get_admin_context() + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + for topic in CONF.notification_topics: + topic = '%s.%s' % (topic, priority) + try: + rpc.notify(context, topic, message) + except Exception: + LOG.exception(_("Could not send notification to %(topic)s. " + "Payload=%(message)s"), locals()) diff --git a/reddwarf/openstack/common/notifier/test_notifier.py b/reddwarf/openstack/common/notifier/test_notifier.py new file mode 100644 index 0000000000..5e348803dc --- /dev/null +++ b/reddwarf/openstack/common/notifier/test_notifier.py @@ -0,0 +1,22 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +NOTIFICATIONS = [] + + +def notify(_context, message): + """Test notifier, stores notifications in memory for unittests.""" + NOTIFICATIONS.append(message) diff --git a/reddwarf/openstack/common/pastedeploy.py b/reddwarf/openstack/common/pastedeploy.py new file mode 100644 index 0000000000..8932d13ea7 --- /dev/null +++ b/reddwarf/openstack/common/pastedeploy.py @@ -0,0 +1,164 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from paste import deploy + +from reddwarf.openstack.common import local + + +class BasePasteFactory(object): + + """A base class for paste app and filter factories. + + Sub-classes must override the KEY class attribute and provide + a __call__ method. + """ + + KEY = None + + def __init__(self, data): + self.data = data + + def _import_factory(self, local_conf): + """Import an app/filter class. + + Lookup the KEY from the PasteDeploy local conf and import the + class named there. This class can then be used as an app or + filter factory. + + Note we support the : format. + + Note also that if you do e.g. + + key = + value + + then ConfigParser returns a value with a leading newline, so + we strip() the value before using it. + """ + mod_str, _sep, class_str = local_conf[self.KEY].strip().rpartition(':') + del local_conf[self.KEY] + + __import__(mod_str) + return getattr(sys.modules[mod_str], class_str) + + +class AppFactory(BasePasteFactory): + + """A Generic paste.deploy app factory. + + This requires openstack.app_factory to be set to a callable which returns a + WSGI app when invoked. The format of the name is : e.g. + + [app:myfooapp] + paste.app_factory = openstack.common.pastedeploy:app_factory + openstack.app_factory = myapp:Foo + + The WSGI app constructor must accept a data object and a local config + dict as its two arguments. + """ + + KEY = 'openstack.app_factory' + + def __call__(self, global_conf, **local_conf): + """The actual paste.app_factory protocol method.""" + factory = self._import_factory(local_conf) + return factory(self.data, **local_conf) + + +class FilterFactory(AppFactory): + + """A Generic paste.deploy filter factory. + + This requires openstack.filter_factory to be set to a callable which + returns a WSGI filter when invoked. The format is : e.g. + + [filter:myfoofilter] + paste.filter_factory = openstack.common.pastedeploy:filter_factory + openstack.filter_factory = myfilter:Foo + + The WSGI filter constructor must accept a WSGI app, a data object and + a local config dict as its three arguments. + """ + + KEY = 'openstack.filter_factory' + + def __call__(self, global_conf, **local_conf): + """The actual paste.filter_factory protocol method.""" + factory = self._import_factory(local_conf) + + def filter(app): + return factory(app, self.data, **local_conf) + + return filter + + +def app_factory(global_conf, **local_conf): + """A paste app factory used with paste_deploy_app().""" + return local.store.app_factory(global_conf, **local_conf) + + +def filter_factory(global_conf, **local_conf): + """A paste filter factory used with paste_deploy_app().""" + return local.store.filter_factory(global_conf, **local_conf) + + +def paste_deploy_app(paste_config_file, app_name, data): + """Load a WSGI app from a PasteDeploy configuration. + + Use deploy.loadapp() to load the app from the PasteDeploy configuration, + ensuring that the supplied data object is passed to the app and filter + factories defined in this module. + + To use these factories and the data object, the configuration should look + like this: + + [app:myapp] + paste.app_factory = openstack.common.pastedeploy:app_factory + openstack.app_factory = myapp:App + ... + [filter:myfilter] + paste.filter_factory = openstack.common.pastedeploy:filter_factory + openstack.filter_factory = myapp:Filter + + and then: + + myapp.py: + + class App(object): + def __init__(self, data): + ... + + class Filter(object): + def __init__(self, app, data): + ... + + :param paste_config_file: a PasteDeploy config file + :param app_name: the name of the app/pipeline to load from the file + :param data: a data object to supply to the app and its filters + :returns: the WSGI app + """ + (af, ff) = (AppFactory(data), FilterFactory(data)) + + local.store.app_factory = af + local.store.filter_factory = ff + try: + return deploy.loadapp("config:%s" % paste_config_file, name=app_name) + finally: + del local.store.app_factory + del local.store.filter_factory diff --git a/reddwarf/openstack/common/periodic_task.py b/reddwarf/openstack/common/periodic_task.py new file mode 100644 index 0000000000..3dc9728b6f --- /dev/null +++ b/reddwarf/openstack/common/periodic_task.py @@ -0,0 +1,115 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def periodic_task(*args, **kwargs): + """Decorator to indicate that a method is a periodic task. + + This decorator can be used in two ways: + + 1. Without arguments '@periodic_task', this will be run on every tick + of the periodic scheduler. + + 2. With arguments, @periodic_task(ticks_between_runs=N), this will be + run on every N ticks of the periodic scheduler. + """ + def decorator(f): + f._periodic_task = True + f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0) + return f + + # NOTE(sirp): The `if` is necessary to allow the decorator to be used with + # and without parens. + # + # In the 'with-parens' case (with kwargs present), this function needs to + # return a decorator function since the interpreter will invoke it like: + # + # periodic_task(*args, **kwargs)(f) + # + # In the 'without-parens' case, the original function will be passed + # in as the first argument, like: + # + # periodic_task(f) + if kwargs: + return decorator + else: + return decorator(args[0]) + + +class _PeriodicTasksMeta(type): + def __init__(cls, names, bases, dict_): + """Metaclass that allows us to collect decorated periodic tasks.""" + super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) + + # NOTE(sirp): if the attribute is not present then we must be the base + # class, so, go ahead and initialize it. If the attribute is present, + # then we're a subclass so make a copy of it so we don't step on our + # parent's toes. + try: + cls._periodic_tasks = cls._periodic_tasks[:] + except AttributeError: + cls._periodic_tasks = [] + + try: + cls._ticks_to_skip = cls._ticks_to_skip.copy() + except AttributeError: + cls._ticks_to_skip = {} + + # This uses __dict__ instead of + # inspect.getmembers(cls, inspect.ismethod) so only the methods of the + # current class are added when this class is scanned, and base classes + # are not added redundantly. + for value in cls.__dict__.values(): + if getattr(value, '_periodic_task', False): + task = value + name = task.__name__ + cls._periodic_tasks.append((name, task)) + cls._ticks_to_skip[name] = task._ticks_between_runs + + +class PeriodicTasks(object): + __metaclass__ = _PeriodicTasksMeta + + def run_periodic_tasks(self, context, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + for task_name, task in self._periodic_tasks: + full_task_name = '.'.join([self.__class__.__name__, task_name]) + + ticks_to_skip = self._ticks_to_skip[task_name] + if ticks_to_skip > 0: + LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s" + " ticks left until next run"), + dict(full_task_name=full_task_name, + ticks_to_skip=ticks_to_skip)) + self._ticks_to_skip[task_name] -= 1 + continue + + self._ticks_to_skip[task_name] = task._ticks_between_runs + LOG.debug(_("Running periodic task %(full_task_name)s"), + dict(full_task_name=full_task_name)) + + try: + task(self, context) + except Exception as e: + if raise_on_error: + raise + LOG.exception(_("Error during %(full_task_name)s:" + " %(e)s"), + dict(e=e, full_task_name=full_task_name)) diff --git a/reddwarf/openstack/common/policy.py b/reddwarf/openstack/common/policy.py new file mode 100644 index 0000000000..5fa0d831e4 --- /dev/null +++ b/reddwarf/openstack/common/policy.py @@ -0,0 +1,779 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Common Policy Engine Implementation + +Policies can be expressed in one of two forms: A list of lists, or a +string written in the new policy language. + +In the list-of-lists representation, each check inside the innermost +list is combined as with an "and" conjunction--for that check to pass, +all the specified checks must pass. These innermost lists are then +combined as with an "or" conjunction. This is the original way of +expressing policies, but there now exists a new way: the policy +language. + +In the policy language, each check is specified the same way as in the +list-of-lists representation: a simple "a:b" pair that is matched to +the correct code to perform that check. However, conjunction +operators are available, allowing for more expressiveness in crafting +policies. + +As an example, take the following rule, expressed in the list-of-lists +representation:: + + [["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]] + +In the policy language, this becomes:: + + role:admin or (project_id:%(project_id)s and role:projectadmin) + +The policy language also has the "not" operator, allowing a richer +policy rule:: + + project_id:%(project_id)s and not role:dunce + +Finally, two special policy checks should be mentioned; the policy +check "@" will always accept an access, and the policy check "!" will +always reject an access. (Note that if a rule is either the empty +list ("[]") or the empty string, this is equivalent to the "@" policy +check.) Of these, the "!" policy check is probably the most useful, +as it allows particular rules to be explicitly disabled. +""" + +import abc +import logging +import re +import urllib + +import urllib2 + +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import jsonutils + + +LOG = logging.getLogger(__name__) + + +_rules = None +_checks = {} + + +class Rules(dict): + """ + A store for rules. Handles the default_rule setting directly. + """ + + @classmethod + def load_json(cls, data, default_rule=None): + """ + Allow loading of JSON rule data. + """ + + # Suck in the JSON data and parse the rules + rules = dict((k, parse_rule(v)) for k, v in + jsonutils.loads(data).items()) + + return cls(rules, default_rule) + + def __init__(self, rules=None, default_rule=None): + """Initialize the Rules store.""" + + super(Rules, self).__init__(rules or {}) + self.default_rule = default_rule + + def __missing__(self, key): + """Implements the default rule handling.""" + + # If the default rule isn't actually defined, do something + # reasonably intelligent + if not self.default_rule or self.default_rule not in self: + raise KeyError(key) + + return self[self.default_rule] + + def __str__(self): + """Dumps a string representation of the rules.""" + + # Start by building the canonical strings for the rules + out_rules = {} + for key, value in self.items(): + # Use empty string for singleton TrueCheck instances + if isinstance(value, TrueCheck): + out_rules[key] = '' + else: + out_rules[key] = str(value) + + # Dump a pretty-printed JSON representation + return jsonutils.dumps(out_rules, indent=4) + + +# Really have to figure out a way to deprecate this +def set_rules(rules): + """Set the rules in use for policy checks.""" + + global _rules + + _rules = rules + + +# Ditto +def reset(): + """Clear the rules used for policy checks.""" + + global _rules + + _rules = None + + +def check(rule, target, creds, exc=None, *args, **kwargs): + """ + Checks authorization of a rule against the target and credentials. + + :param rule: The rule to evaluate. + :param target: As much information about the object being operated + on as possible, as a dictionary. + :param creds: As much information about the user performing the + action as possible, as a dictionary. + :param exc: Class of the exception to raise if the check fails. + Any remaining arguments passed to check() (both + positional and keyword arguments) will be passed to + the exception class. If exc is not provided, returns + False. + + :return: Returns False if the policy does not allow the action and + exc is not provided; otherwise, returns a value that + evaluates to True. Note: for rules using the "case" + expression, this True value will be the specified string + from the expression. + """ + + # Allow the rule to be a Check tree + if isinstance(rule, BaseCheck): + result = rule(target, creds) + elif not _rules: + # No rules to reference means we're going to fail closed + result = False + else: + try: + # Evaluate the rule + result = _rules[rule](target, creds) + except KeyError: + # If the rule doesn't exist, fail closed + result = False + + # If it is False, raise the exception if requested + if exc and result is False: + raise exc(*args, **kwargs) + + return result + + +class BaseCheck(object): + """ + Abstract base class for Check classes. + """ + + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def __str__(self): + """ + Retrieve a string representation of the Check tree rooted at + this node. + """ + + pass + + @abc.abstractmethod + def __call__(self, target, cred): + """ + Perform the check. Returns False to reject the access or a + true value (not necessary True) to accept the access. + """ + + pass + + +class FalseCheck(BaseCheck): + """ + A policy check that always returns False (disallow). + """ + + def __str__(self): + """Return a string representation of this check.""" + + return "!" + + def __call__(self, target, cred): + """Check the policy.""" + + return False + + +class TrueCheck(BaseCheck): + """ + A policy check that always returns True (allow). + """ + + def __str__(self): + """Return a string representation of this check.""" + + return "@" + + def __call__(self, target, cred): + """Check the policy.""" + + return True + + +class Check(BaseCheck): + """ + A base class to allow for user-defined policy checks. + """ + + def __init__(self, kind, match): + """ + :param kind: The kind of the check, i.e., the field before the + ':'. + :param match: The match of the check, i.e., the field after + the ':'. + """ + + self.kind = kind + self.match = match + + def __str__(self): + """Return a string representation of this check.""" + + return "%s:%s" % (self.kind, self.match) + + +class NotCheck(BaseCheck): + """ + A policy check that inverts the result of another policy check. + Implements the "not" operator. + """ + + def __init__(self, rule): + """ + Initialize the 'not' check. + + :param rule: The rule to negate. Must be a Check. + """ + + self.rule = rule + + def __str__(self): + """Return a string representation of this check.""" + + return "not %s" % self.rule + + def __call__(self, target, cred): + """ + Check the policy. Returns the logical inverse of the wrapped + check. + """ + + return not self.rule(target, cred) + + +class AndCheck(BaseCheck): + """ + A policy check that requires that a list of other checks all + return True. Implements the "and" operator. + """ + + def __init__(self, rules): + """ + Initialize the 'and' check. + + :param rules: A list of rules that will be tested. + """ + + self.rules = rules + + def __str__(self): + """Return a string representation of this check.""" + + return "(%s)" % ' and '.join(str(r) for r in self.rules) + + def __call__(self, target, cred): + """ + Check the policy. Requires that all rules accept in order to + return True. + """ + + for rule in self.rules: + if not rule(target, cred): + return False + + return True + + def add_check(self, rule): + """ + Allows addition of another rule to the list of rules that will + be tested. Returns the AndCheck object for convenience. + """ + + self.rules.append(rule) + return self + + +class OrCheck(BaseCheck): + """ + A policy check that requires that at least one of a list of other + checks returns True. Implements the "or" operator. + """ + + def __init__(self, rules): + """ + Initialize the 'or' check. + + :param rules: A list of rules that will be tested. + """ + + self.rules = rules + + def __str__(self): + """Return a string representation of this check.""" + + return "(%s)" % ' or '.join(str(r) for r in self.rules) + + def __call__(self, target, cred): + """ + Check the policy. Requires that at least one rule accept in + order to return True. + """ + + for rule in self.rules: + if rule(target, cred): + return True + + return False + + def add_check(self, rule): + """ + Allows addition of another rule to the list of rules that will + be tested. Returns the OrCheck object for convenience. + """ + + self.rules.append(rule) + return self + + +def _parse_check(rule): + """ + Parse a single base check rule into an appropriate Check object. + """ + + # Handle the special checks + if rule == '!': + return FalseCheck() + elif rule == '@': + return TrueCheck() + + try: + kind, match = rule.split(':', 1) + except Exception: + LOG.exception(_("Failed to understand rule %(rule)s") % locals()) + # If the rule is invalid, we'll fail closed + return FalseCheck() + + # Find what implements the check + if kind in _checks: + return _checks[kind](kind, match) + elif None in _checks: + return _checks[None](kind, match) + else: + LOG.error(_("No handler for matches of kind %s") % kind) + return FalseCheck() + + +def _parse_list_rule(rule): + """ + Provided for backwards compatibility. Translates the old + list-of-lists syntax into a tree of Check objects. + """ + + # Empty rule defaults to True + if not rule: + return TrueCheck() + + # Outer list is joined by "or"; inner list by "and" + or_list = [] + for inner_rule in rule: + # Elide empty inner lists + if not inner_rule: + continue + + # Handle bare strings + if isinstance(inner_rule, basestring): + inner_rule = [inner_rule] + + # Parse the inner rules into Check objects + and_list = [_parse_check(r) for r in inner_rule] + + # Append the appropriate check to the or_list + if len(and_list) == 1: + or_list.append(and_list[0]) + else: + or_list.append(AndCheck(and_list)) + + # If we have only one check, omit the "or" + if len(or_list) == 0: + return FalseCheck() + elif len(or_list) == 1: + return or_list[0] + + return OrCheck(or_list) + + +# Used for tokenizing the policy language +_tokenize_re = re.compile(r'\s+') + + +def _parse_tokenize(rule): + """ + Tokenizer for the policy language. + + Most of the single-character tokens are specified in the + _tokenize_re; however, parentheses need to be handled specially, + because they can appear inside a check string. Thankfully, those + parentheses that appear inside a check string can never occur at + the very beginning or end ("%(variable)s" is the correct syntax). + """ + + for tok in _tokenize_re.split(rule): + # Skip empty tokens + if not tok or tok.isspace(): + continue + + # Handle leading parens on the token + clean = tok.lstrip('(') + for i in range(len(tok) - len(clean)): + yield '(', '(' + + # If it was only parentheses, continue + if not clean: + continue + else: + tok = clean + + # Handle trailing parens on the token + clean = tok.rstrip(')') + trail = len(tok) - len(clean) + + # Yield the cleaned token + lowered = clean.lower() + if lowered in ('and', 'or', 'not'): + # Special tokens + yield lowered, clean + elif clean: + # Not a special token, but not composed solely of ')' + if len(tok) >= 2 and ((tok[0], tok[-1]) in + [('"', '"'), ("'", "'")]): + # It's a quoted string + yield 'string', tok[1:-1] + else: + yield 'check', _parse_check(clean) + + # Yield the trailing parens + for i in range(trail): + yield ')', ')' + + +class ParseStateMeta(type): + """ + Metaclass for the ParseState class. Facilitates identifying + reduction methods. + """ + + def __new__(mcs, name, bases, cls_dict): + """ + Create the class. Injects the 'reducers' list, a list of + tuples matching token sequences to the names of the + corresponding reduction methods. + """ + + reducers = [] + + for key, value in cls_dict.items(): + if not hasattr(value, 'reducers'): + continue + for reduction in value.reducers: + reducers.append((reduction, key)) + + cls_dict['reducers'] = reducers + + return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict) + + +def reducer(*tokens): + """ + Decorator for reduction methods. Arguments are a sequence of + tokens, in order, which should trigger running this reduction + method. + """ + + def decorator(func): + # Make sure we have a list of reducer sequences + if not hasattr(func, 'reducers'): + func.reducers = [] + + # Add the tokens to the list of reducer sequences + func.reducers.append(list(tokens)) + + return func + + return decorator + + +class ParseState(object): + """ + Implement the core of parsing the policy language. Uses a greedy + reduction algorithm to reduce a sequence of tokens into a single + terminal, the value of which will be the root of the Check tree. + + Note: error reporting is rather lacking. The best we can get with + this parser formulation is an overall "parse failed" error. + Fortunately, the policy language is simple enough that this + shouldn't be that big a problem. + """ + + __metaclass__ = ParseStateMeta + + def __init__(self): + """Initialize the ParseState.""" + + self.tokens = [] + self.values = [] + + def reduce(self): + """ + Perform a greedy reduction of the token stream. If a reducer + method matches, it will be executed, then the reduce() method + will be called recursively to search for any more possible + reductions. + """ + + for reduction, methname in self.reducers: + if (len(self.tokens) >= len(reduction) and + self.tokens[-len(reduction):] == reduction): + # Get the reduction method + meth = getattr(self, methname) + + # Reduce the token stream + results = meth(*self.values[-len(reduction):]) + + # Update the tokens and values + self.tokens[-len(reduction):] = [r[0] for r in results] + self.values[-len(reduction):] = [r[1] for r in results] + + # Check for any more reductions + return self.reduce() + + def shift(self, tok, value): + """Adds one more token to the state. Calls reduce().""" + + self.tokens.append(tok) + self.values.append(value) + + # Do a greedy reduce... + self.reduce() + + @property + def result(self): + """ + Obtain the final result of the parse. Raises ValueError if + the parse failed to reduce to a single result. + """ + + if len(self.values) != 1: + raise ValueError("Could not parse rule") + return self.values[0] + + @reducer('(', 'check', ')') + @reducer('(', 'and_expr', ')') + @reducer('(', 'or_expr', ')') + def _wrap_check(self, _p1, check, _p2): + """Turn parenthesized expressions into a 'check' token.""" + + return [('check', check)] + + @reducer('check', 'and', 'check') + def _make_and_expr(self, check1, _and, check2): + """ + Create an 'and_expr' from two checks joined by the 'and' + operator. + """ + + return [('and_expr', AndCheck([check1, check2]))] + + @reducer('and_expr', 'and', 'check') + def _extend_and_expr(self, and_expr, _and, check): + """ + Extend an 'and_expr' by adding one more check. + """ + + return [('and_expr', and_expr.add_check(check))] + + @reducer('check', 'or', 'check') + def _make_or_expr(self, check1, _or, check2): + """ + Create an 'or_expr' from two checks joined by the 'or' + operator. + """ + + return [('or_expr', OrCheck([check1, check2]))] + + @reducer('or_expr', 'or', 'check') + def _extend_or_expr(self, or_expr, _or, check): + """ + Extend an 'or_expr' by adding one more check. + """ + + return [('or_expr', or_expr.add_check(check))] + + @reducer('not', 'check') + def _make_not_expr(self, _not, check): + """Invert the result of another check.""" + + return [('check', NotCheck(check))] + + +def _parse_text_rule(rule): + """ + Translates a policy written in the policy language into a tree of + Check objects. + """ + + # Empty rule means always accept + if not rule: + return TrueCheck() + + # Parse the token stream + state = ParseState() + for tok, value in _parse_tokenize(rule): + state.shift(tok, value) + + try: + return state.result + except ValueError: + # Couldn't parse the rule + LOG.exception(_("Failed to understand rule %(rule)r") % locals()) + + # Fail closed + return FalseCheck() + + +def parse_rule(rule): + """ + Parses a policy rule into a tree of Check objects. + """ + + # If the rule is a string, it's in the policy language + if isinstance(rule, basestring): + return _parse_text_rule(rule) + return _parse_list_rule(rule) + + +def register(name, func=None): + """ + Register a function or Check class as a policy check. + + :param name: Gives the name of the check type, e.g., 'rule', + 'role', etc. If name is None, a default check type + will be registered. + :param func: If given, provides the function or class to register. + If not given, returns a function taking one argument + to specify the function or class to register, + allowing use as a decorator. + """ + + # Perform the actual decoration by registering the function or + # class. Returns the function or class for compliance with the + # decorator interface. + def decorator(func): + _checks[name] = func + return func + + # If the function or class is given, do the registration + if func: + return decorator(func) + + return decorator + + +@register("rule") +class RuleCheck(Check): + def __call__(self, target, creds): + """ + Recursively checks credentials based on the defined rules. + """ + + try: + return _rules[self.match](target, creds) + except KeyError: + # We don't have any matching rule; fail closed + return False + + +@register("role") +class RoleCheck(Check): + def __call__(self, target, creds): + """Check that there is a matching role in the cred dict.""" + + return self.match.lower() in [x.lower() for x in creds['roles']] + + +@register('http') +class HttpCheck(Check): + def __call__(self, target, creds): + """ + Check http: rules by calling to a remote server. + + This example implementation simply verifies that the response + is exactly 'True'. + """ + + url = ('http:' + self.match) % target + data = {'target': jsonutils.dumps(target), + 'credentials': jsonutils.dumps(creds)} + post_data = urllib.urlencode(data) + f = urllib2.urlopen(url, post_data) + return f.read() == "True" + + +@register(None) +class GenericCheck(Check): + def __call__(self, target, creds): + """ + Check an individual match. + + Matches look like: + + tenant:%(tenant_id)s + role:compute:admin + """ + + # TODO(termie): do dict inspection via dot syntax + match = self.match % target + if self.kind in creds: + return match == unicode(creds[self.kind]) + return False diff --git a/reddwarf/openstack/common/processutils.py b/reddwarf/openstack/common/processutils.py new file mode 100644 index 0000000000..2c3f8f62fa --- /dev/null +++ b/reddwarf/openstack/common/processutils.py @@ -0,0 +1,135 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import logging +import random +import shlex + +from eventlet.green import subprocess +from eventlet import greenthread + +from reddwarf.openstack.common.gettextutils import _ + + +LOG = logging.getLogger(__name__) + + +class UnknownArgumentError(Exception): + def __init__(self, message=None): + super(UnknownArgumentError, self).__init__(message) + + +class ProcessExecutionError(Exception): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, + description=None): + if description is None: + description = "Unexpected error while running command." + if exit_code is None: + exit_code = '-' + message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" + % (description, cmd, exit_code, stdout, stderr)) + super(ProcessExecutionError, self).__init__(message) + + +def execute(*cmd, **kwargs): + """ + Helper method to shell out and execute a command through subprocess with + optional retry. + + :param cmd: Passed to subprocess.Popen. + :type cmd: string + :param process_input: Send to opened process. + :type proces_input: string + :param check_exit_code: Defaults to 0. Will raise + :class:`ProcessExecutionError` + if the command exits without returning this value + as a returncode + :type check_exit_code: int + :param delay_on_retry: True | False. Defaults to True. If set to True, + wait a short amount of time before retrying. + :type delay_on_retry: boolean + :param attempts: How many times to retry cmd. + :type attempts: int + :param run_as_root: True | False. Defaults to False. If set to True, + the command is prefixed by the command specified + in the root_helper kwarg. + :type run_as_root: boolean + :param root_helper: command to prefix all cmd's with + :type root_helper: string + :returns: (stdout, stderr) from process execution + :raises: :class:`UnknownArgumentError` on + receiving unknown arguments + :raises: :class:`ProcessExecutionError` + """ + + process_input = kwargs.pop('process_input', None) + check_exit_code = kwargs.pop('check_exit_code', 0) + delay_on_retry = kwargs.pop('delay_on_retry', True) + attempts = kwargs.pop('attempts', 1) + run_as_root = kwargs.pop('run_as_root', False) + root_helper = kwargs.pop('root_helper', '') + if len(kwargs): + raise UnknownArgumentError(_('Got unknown keyword args ' + 'to utils.execute: %r') % kwargs) + if run_as_root: + cmd = shlex.split(root_helper) + list(cmd) + cmd = map(str, cmd) + + while attempts > 0: + attempts -= 1 + try: + LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) + _PIPE = subprocess.PIPE # pylint: disable=E1101 + obj = subprocess.Popen(cmd, + stdin=_PIPE, + stdout=_PIPE, + stderr=_PIPE, + close_fds=True) + result = None + if process_input is not None: + result = obj.communicate(process_input) + else: + result = obj.communicate() + obj.stdin.close() # pylint: disable=E1101 + _returncode = obj.returncode # pylint: disable=E1101 + if _returncode: + LOG.debug(_('Result was %s') % _returncode) + if (isinstance(check_exit_code, int) and + not isinstance(check_exit_code, bool) and + _returncode != check_exit_code): + (stdout, stderr) = result + raise ProcessExecutionError(exit_code=_returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) + return result + except ProcessExecutionError: + if not attempts: + raise + else: + LOG.debug(_('%r failed. Retrying.'), cmd) + if delay_on_retry: + greenthread.sleep(random.randint(20, 200) / 100.0) + finally: + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) diff --git a/reddwarf/openstack/common/rpc/__init__.py b/reddwarf/openstack/common/rpc/__init__.py index fd2e3279e4..07294ea813 100644 --- a/reddwarf/openstack/common/rpc/__init__.py +++ b/reddwarf/openstack/common/rpc/__init__.py @@ -25,12 +25,7 @@ For some wrappers that add message versioning to rpc, see: rpc.proxy """ -#TODO(tim.simpson): Doing this as we aren't yet using the real cfg module. -from reddwarf.common.config import OsCommonModule -cfg = OsCommonModule() - - -#from openstack.common import cfg +from reddwarf.openstack.common import cfg from reddwarf.openstack.common import importutils @@ -52,17 +47,24 @@ rpc_opts = [ help='Seconds to wait before a cast expires (TTL). ' 'Only supported by impl_zmq.'), cfg.ListOpt('allowed_rpc_exception_modules', - default=['openstack.common.exception', + default=['reddwarf.openstack.common.exception', 'nova.exception', + 'cinder.exception', + 'exceptions', ], help='Modules of exceptions that are permitted to be recreated' 'upon receiving exception data from an rpc call.'), - cfg.StrOpt('control_exchange', - default='nova', - help='AMQP exchange to connect to if using RabbitMQ or Qpid'), cfg.BoolOpt('fake_rabbit', default=False, help='If passed, use a fake RabbitMQ provider'), + # + # The following options are not registered here, but are expected to be + # present. The project using this library must register these options with + # the configuration so that project-specific defaults may be defined. + # + #cfg.StrOpt('control_exchange', + # default='nova', + # help='AMQP exchange to connect to if using RabbitMQ or Qpid'), ] cfg.CONF.register_opts(rpc_opts) @@ -125,28 +127,6 @@ def cast(context, topic, msg): return _get_impl().cast(cfg.CONF, context, topic, msg) -def cast_with_consumer(context, topic, msg): - """Invoke a remote method that does not return anything. - - :param context: Information that identifies the user that has made this - request. - :param topic: The topic to send the rpc message to. This correlates to the - topic argument of - nova.rpc.common.Connection.create_consumer() and only applies - when the consumer was created with fanout=False. - :param msg: This is a dict in the form { "method" : "method_to_invoke", - "args" : dict_of_kwargs } - - :returns: None - """ - return _get_impl().cast_with_consumer(cfg.CONF, context, topic, msg) - - -def delete_queue(context, topic): - """Deletes the queue.""" - return _get_impl().delete_queue(cfg.CONF, context, topic) - - def fanout_cast(context, topic, msg): """Broadcast a remote method invocation with no return. @@ -271,7 +251,7 @@ def queue_get_for(context, topic, host): Messages sent to the 'foo.' topic are sent to the nova-foo service on . """ - return '%s.%s' % (topic, host) + return '%s.%s' % (topic, host) if host else topic _RPCIMPL = None diff --git a/reddwarf/openstack/common/rpc/amqp.py b/reddwarf/openstack/common/rpc/amqp.py index e7ab71cf99..bf864b7716 100644 --- a/reddwarf/openstack/common/rpc/amqp.py +++ b/reddwarf/openstack/common/rpc/amqp.py @@ -26,7 +26,6 @@ AMQP, but is deprecated and predates this code. """ import inspect -import logging import sys import uuid @@ -34,11 +33,11 @@ from eventlet import greenpool from eventlet import pools from eventlet import semaphore +from reddwarf.openstack.common import cfg from reddwarf.openstack.common import excutils -#TODO(tim.simpson): Import the true version of Mr. Underscore. -#from reddwarf.openstack.common.gettextutils import _ - +from reddwarf.openstack.common.gettextutils import _ from reddwarf.openstack.common import local +from reddwarf.openstack.common import log as logging from reddwarf.openstack.common.rpc import common as rpc_common @@ -56,7 +55,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): - LOG.debug('Pool creating new connection') + LOG.debug(_('Pool creating new connection')) return self.connection_cls(self.conf) def empty(self): @@ -151,7 +150,7 @@ class ConnectionContext(rpc_common.Connection): def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None, - ending=False): + ending=False, log_failure=True): """Sends a reply or an error on the channel signified by msg_id. Failure should be a sys.exc_info() tuple. @@ -159,7 +158,8 @@ def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None, """ with ConnectionContext(conf, connection_pool) as conn: if failure: - failure = rpc_common.serialize_remote_exception(failure) + failure = rpc_common.serialize_remote_exception(failure, + log_failure) try: msg = {'result': reply, 'failure': failure} @@ -186,10 +186,10 @@ class RpcContext(rpc_common.CommonRpcContext): return self.__class__(**values) def reply(self, reply=None, failure=None, ending=False, - connection_pool=None): + connection_pool=None, log_failure=True): if self.msg_id: msg_reply(self.conf, self.msg_id, connection_pool, reply, failure, - ending) + ending, log_failure) if ending: self.msg_id = None @@ -283,8 +283,14 @@ class ProxyCallback(object): ctxt.reply(rval, None, connection_pool=self.connection_pool) # This final None tells multicall that it is done. ctxt.reply(ending=True, connection_pool=self.connection_pool) - except Exception as e: - LOG.exception('Exception during message handling') + except rpc_common.ClientException as e: + LOG.debug(_('Expected exception during message handling (%s)') % + e._exc_info[1]) + ctxt.reply(None, e._exc_info, + connection_pool=self.connection_pool, + log_failure=False) + except Exception: + LOG.exception(_('Exception during message handling')) ctxt.reply(None, sys.exc_info(), connection_pool=self.connection_pool) @@ -365,8 +371,6 @@ def multicall(conf, context, topic, msg, timeout, connection_pool): def call(conf, context, topic, msg, timeout, connection_pool): """Sends a message on a topic and wait for a response.""" - with ConnectionContext(conf, connection_pool) as conn: - consumer = conn.declare_topic_consumer(topic=topic) rv = multicall(conf, context, topic, msg, timeout, connection_pool) # NOTE(vish): return the last result from the multicall rv = list(rv) @@ -377,23 +381,12 @@ def call(conf, context, topic, msg, timeout, connection_pool): def cast(conf, context, topic, msg, connection_pool): """Sends a message on a topic without waiting for a response.""" - with ConnectionContext(conf, connection_pool) as conn: - consumer = conn.declare_topic_consumer(topic=topic) LOG.debug(_('Making asynchronous cast on %s...'), topic) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: conn.topic_send(topic, msg) -def cast_with_consumer(conf, context, topic, msg, connection_pool): - """Sends a message on a topic without waiting for a response.""" - LOG.debug(_('Making asynchronous cast on %s...'), topic) - pack_context(msg, context) - with ConnectionContext(conf, connection_pool) as conn: - consumer = conn.declare_topic_consumer(topic=topic) - conn.topic_send(topic, msg) - - def fanout_cast(conf, context, topic, msg, connection_pool): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) @@ -421,8 +414,9 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg, def notify(conf, context, topic, msg, connection_pool): """Sends a notification event on a topic.""" - event_type = msg.get('event_type') - LOG.debug(_('Sending %(event_type)s on %(topic)s'), locals()) + LOG.debug(_('Sending %(event_type)s on %(topic)s'), + dict(event_type=msg.get('event_type'), + topic=topic)) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: conn.notify_send(topic, msg) @@ -431,3 +425,10 @@ def notify(conf, context, topic, msg, connection_pool): def cleanup(connection_pool): if connection_pool: connection_pool.empty() + + +def get_control_exchange(conf): + try: + return conf.control_exchange + except cfg.NoSuchOptError: + return 'openstack' diff --git a/reddwarf/openstack/common/rpc/common.py b/reddwarf/openstack/common/rpc/common.py index 5fde45afb7..3ad57332f7 100644 --- a/reddwarf/openstack/common/rpc/common.py +++ b/reddwarf/openstack/common/rpc/common.py @@ -18,21 +18,14 @@ # under the License. import copy -import logging import sys import traceback -#from reddwarf.openstack.common import cfg -#TODO(tim.simpson): Doing this as we aren't yet using the real cfg module. -from reddwarf.common.config import OsCommonModule -cfg = OsCommonModule() - - -#TODO(tim.simpson): Import the true version of Mr. Underscore. -#from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common.gettextutils import _ from reddwarf.openstack.common import importutils from reddwarf.openstack.common import jsonutils from reddwarf.openstack.common import local +from reddwarf.openstack.common import log as logging LOG = logging.getLogger(__name__) @@ -48,7 +41,7 @@ class RPCException(Exception): try: message = self.message % kwargs - except Exception as e: + except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) @@ -114,7 +107,7 @@ class Connection(object): """ raise NotImplementedError() - def create_consumer(self, conf, topic, proxy, fanout=False): + def create_consumer(self, topic, proxy, fanout=False): """Create a consumer on this connection. A consumer is associated with a message queue on the backend message @@ -123,7 +116,6 @@ class Connection(object): off of the queue will determine which method gets called on the proxy object. - :param conf: An openstack.common.cfg configuration object. :param topic: This is a name associated with what to consume from. Multiple instances of a service may consume from the same topic. For example, all instances of nova-compute consume @@ -139,7 +131,7 @@ class Connection(object): """ raise NotImplementedError() - def create_worker(self, conf, topic, proxy, pool_name): + def create_worker(self, topic, proxy, pool_name): """Create a worker on this connection. A worker is like a regular consumer of messages directed to a @@ -149,7 +141,6 @@ class Connection(object): be asked to process it. Load is distributed across the members of the pool in round-robin fashion. - :param conf: An openstack.common.cfg configuration object. :param topic: This is a name associated with what to consume from. Multiple instances of a service may consume from the same topic. @@ -205,7 +196,7 @@ def _safe_log(log_func, msg, msg_data): return log_func(msg, msg_data) -def serialize_remote_exception(failure_info): +def serialize_remote_exception(failure_info, log_failure=True): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. @@ -213,8 +204,9 @@ def serialize_remote_exception(failure_info): """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] - LOG.error(_("Returning exception %s to caller"), unicode(failure)) - LOG.error(tb) + if log_failure: + LOG.error(_("Returning exception %s to caller"), unicode(failure)) + LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): @@ -268,7 +260,7 @@ def deserialize_remote_exception(conf, data): # we cannot necessarily change an exception message so we must override # the __str__ method. failure.__class__ = new_ex_type - except TypeError as e: + except TypeError: # NOTE(ameade): If a core exception then just add the traceback to the # first exception argument. failure.args = (message,) + failure.args[1:] @@ -319,3 +311,36 @@ class CommonRpcContext(object): context.values['read_deleted'] = read_deleted return context + + +class ClientException(Exception): + """This encapsulates some actual exception that is expected to be + hit by an RPC proxy object. Merely instantiating it records the + current exception information, which will be passed back to the + RPC client without exceptional logging.""" + def __init__(self): + self._exc_info = sys.exc_info() + + +def catch_client_exception(exceptions, func, *args, **kwargs): + try: + return func(*args, **kwargs) + except Exception, e: + if type(e) in exceptions: + raise ClientException() + else: + raise + + +def client_exceptions(*exceptions): + """Decorator for manager methods that raise expected exceptions. + Marking a Manager method with this decorator allows the declaration + of expected exceptions that the RPC layer should not consider fatal, + and not log as if they were generated in a real error scenario. Note + that this will cause listed exceptions to be wrapped in a + ClientException, which is used internally by the RPC layer.""" + def outer(func): + def inner(*args, **kwargs): + return catch_client_exception(exceptions, func, *args, **kwargs) + return inner + return outer diff --git a/reddwarf/openstack/common/rpc/dispatcher.py b/reddwarf/openstack/common/rpc/dispatcher.py index 6c7c34c807..f935e5287a 100644 --- a/reddwarf/openstack/common/rpc/dispatcher.py +++ b/reddwarf/openstack/common/rpc/dispatcher.py @@ -41,8 +41,8 @@ server side of the API at the same time. However, as the code stands today, there can be both versioned and unversioned APIs implemented in the same code base. - -EXAMPLES: +EXAMPLES +======== Nova was the first project to use versioned rpc APIs. Consider the compute rpc API as an example. The client side is in nova/compute/rpcapi.py and the server @@ -50,12 +50,13 @@ side is in nova/compute/manager.py. Example 1) Adding a new method. +------------------------------- Adding a new method is a backwards compatible change. It should be added to nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should have a specific version specified to indicate the minimum API version that must -be implemented for the method to be supported. For example: +be implemented for the method to be supported. For example:: def get_host_uptime(self, ctxt, host): topic = _compute_topic(self.topic, ctxt, host, None) @@ -67,10 +68,11 @@ get_host_uptime() method. Example 2) Adding a new parameter. +---------------------------------- Adding a new parameter to an rpc method can be made backwards compatible. The RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped. -The implementation of the method must not expect the parameter to be present. +The implementation of the method must not expect the parameter to be present.:: def some_remote_method(self, arg1, arg2, newarg=None): # The code needs to deal with newarg=None for cases diff --git a/reddwarf/openstack/common/rpc/impl_fake.py b/reddwarf/openstack/common/rpc/impl_fake.py index ef76d2310f..dbe661a5b1 100644 --- a/reddwarf/openstack/common/rpc/impl_fake.py +++ b/reddwarf/openstack/common/rpc/impl_fake.py @@ -18,11 +18,15 @@ queues. Casts will block, but this is very useful for tests. """ import inspect +# NOTE(russellb): We specifically want to use json, not our own jsonutils. +# jsonutils has some extra logic to automatically convert objects to primitive +# types so that they can be serialized. We want to catch all cases where +# non-primitive types make it into this code and treat it as an error. +import json import time import eventlet -from reddwarf.openstack.common import jsonutils from reddwarf.openstack.common.rpc import common as rpc_common CONSUMERS = {} @@ -75,6 +79,8 @@ class Consumer(object): else: res.append(rval) done.send(res) + except rpc_common.ClientException as e: + done.send_exception(e._exc_info[1]) except Exception as e: done.send_exception(e) @@ -121,7 +127,7 @@ def create_connection(conf, new=True): def check_serialize(msg): """Make sure a message intended for rpc can be serialized.""" - jsonutils.dumps(msg) + json.dumps(msg) def multicall(conf, context, topic, msg, timeout=None): @@ -154,6 +160,7 @@ def call(conf, context, topic, msg, timeout=None): def cast(conf, context, topic, msg): + check_serialize(msg) try: call(conf, context, topic, msg) except Exception: diff --git a/reddwarf/openstack/common/rpc/impl_kombu.py b/reddwarf/openstack/common/rpc/impl_kombu.py index f210a5d7cd..9fc099fa74 100644 --- a/reddwarf/openstack/common/rpc/impl_kombu.py +++ b/reddwarf/openstack/common/rpc/impl_kombu.py @@ -29,14 +29,9 @@ import kombu.connection import kombu.entity import kombu.messaging -#from reddwarf.openstack.common import cfg -#TODO(tim.simpson): Doing this as we aren't yet using the real cfg module. -from reddwarf.common.config import OsCommonModule -cfg = OsCommonModule() - -#TODO(tim.simpson): Import the true version of Mr. Underscore. -#from reddwarf.openstack.common.gettextutils import _ - +from reddwarf.openstack.common import cfg +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import network_utils from reddwarf.openstack.common.rpc import amqp as rpc_amqp from reddwarf.openstack.common.rpc import common as rpc_common @@ -56,10 +51,13 @@ kombu_opts = [ '(valid only if SSL enabled)')), cfg.StrOpt('rabbit_host', default='localhost', - help='the RabbitMQ host'), + help='The RabbitMQ broker address where a single node is used'), cfg.IntOpt('rabbit_port', default=5672, - help='the RabbitMQ port'), + help='The RabbitMQ broker port where a single node is used'), + cfg.ListOpt('rabbit_hosts', + default=['$rabbit_host:$rabbit_port'], + help='RabbitMQ HA cluster host:port pairs'), cfg.BoolOpt('rabbit_use_ssl', default=False, help='connect over SSL for RabbitMQ'), @@ -86,6 +84,11 @@ kombu_opts = [ cfg.BoolOpt('rabbit_durable_queues', default=False, help='use durable queues in RabbitMQ'), + cfg.BoolOpt('rabbit_ha_queues', + default=False, + help='use H/A queues in RabbitMQ (x-ha-policy: all).' + 'You need to wipe RabbitMQ database when ' + 'changing this option.'), ] @@ -94,6 +97,20 @@ cfg.CONF.register_opts(kombu_opts) LOG = rpc_common.LOG +def _get_queue_arguments(conf): + """Construct the arguments for declaring a queue. + + If the rabbit_ha_queues option is set, we declare a mirrored queue + as described here: + + http://www.rabbitmq.com/ha.html + + Setting x-ha-policy to all means that the queue will be mirrored + to all nodes in the cluster. + """ + return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {} + + class ConsumerBase(object): """Consumer base class.""" @@ -198,7 +215,7 @@ class TopicConsumer(ConsumerBase): """Consumer class for 'topic'""" def __init__(self, conf, channel, topic, callback, tag, name=None, - **kwargs): + exchange_name=None, **kwargs): """Init a 'topic' queue. :param channel: the amqp channel to use @@ -213,10 +230,12 @@ class TopicConsumer(ConsumerBase): """ # Default options options = {'durable': conf.rabbit_durable_queues, + 'queue_arguments': _get_queue_arguments(conf), 'auto_delete': False, 'exclusive': False} options.update(kwargs) - exchange = kombu.entity.Exchange(name=conf.control_exchange, + exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) + exchange = kombu.entity.Exchange(name=exchange_name, type='topic', durable=options['durable'], auto_delete=options['auto_delete']) @@ -248,6 +267,7 @@ class FanoutConsumer(ConsumerBase): # Default options options = {'durable': False, + 'queue_arguments': _get_queue_arguments(conf), 'auto_delete': True, 'exclusive': True} options.update(kwargs) @@ -313,8 +333,12 @@ class TopicPublisher(Publisher): 'auto_delete': False, 'exclusive': False} options.update(kwargs) - super(TopicPublisher, self).__init__(channel, conf.control_exchange, - topic, type='topic', **options) + exchange_name = rpc_amqp.get_control_exchange(conf) + super(TopicPublisher, self).__init__(channel, + exchange_name, + topic, + type='topic', + **options) class FanoutPublisher(Publisher): @@ -337,6 +361,7 @@ class NotifyPublisher(TopicPublisher): def __init__(self, conf, channel, topic, **kwargs): self.durable = kwargs.pop('durable', conf.rabbit_durable_queues) + self.queue_arguments = _get_queue_arguments(conf) super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs) def reconnect(self, channel): @@ -349,7 +374,8 @@ class NotifyPublisher(TopicPublisher): exchange=self.exchange, durable=self.durable, name=self.routing_key, - routing_key=self.routing_key) + routing_key=self.routing_key, + queue_arguments=self.queue_arguments) queue.declare() @@ -374,31 +400,37 @@ class Connection(object): if server_params is None: server_params = {} - # Keys to translate from server_params to kombu params server_params_to_kombu_params = {'username': 'userid'} - params = {} - for sp_key, value in server_params.iteritems(): - p_key = server_params_to_kombu_params.get(sp_key, sp_key) - params[p_key] = value + ssl_params = self._fetch_ssl_params() + params_list = [] + for adr in self.conf.rabbit_hosts: + hostname, port = network_utils.parse_host_port( + adr, default_port=self.conf.rabbit_port) - params.setdefault('hostname', self.conf.rabbit_host) - params.setdefault('port', self.conf.rabbit_port) - params.setdefault('userid', self.conf.rabbit_userid) - params.setdefault('password', self.conf.rabbit_password) - params.setdefault('virtual_host', self.conf.rabbit_virtual_host) + params = { + 'hostname': hostname, + 'port': port, + 'userid': self.conf.rabbit_userid, + 'password': self.conf.rabbit_password, + 'virtual_host': self.conf.rabbit_virtual_host, + } - self.params = params + for sp_key, value in server_params.iteritems(): + p_key = server_params_to_kombu_params.get(sp_key, sp_key) + params[p_key] = value - if self.conf.fake_rabbit: - self.params['transport'] = 'memory' - self.memory_transport = True - else: - self.memory_transport = False + if self.conf.fake_rabbit: + params['transport'] = 'memory' + if self.conf.rabbit_use_ssl: + params['ssl'] = ssl_params - if self.conf.rabbit_use_ssl: - self.params['ssl'] = self._fetch_ssl_params() + params_list.append(params) + + self.params_list = params_list + + self.memory_transport = self.conf.fake_rabbit self.connection = None self.reconnect() @@ -428,14 +460,14 @@ class Connection(object): # Return the extended behavior return ssl_params - def _connect(self): + def _connect(self, params): """Connect to rabbit. Re-establish any queues that may have been declared before if we are reconnecting. Exceptions should be handled by the caller. """ if self.connection: LOG.info(_("Reconnecting to AMQP server on " - "%(hostname)s:%(port)d") % self.params) + "%(hostname)s:%(port)d") % params) try: self.connection.close() except self.connection_errors: @@ -443,7 +475,7 @@ class Connection(object): # Setting this in case the next statement fails, though # it shouldn't be doing any network operations, yet. self.connection = None - self.connection = kombu.connection.BrokerConnection(**self.params) + self.connection = kombu.connection.BrokerConnection(**params) self.connection_errors = self.connection.connection_errors if self.memory_transport: # Kludge to speed up tests. @@ -456,8 +488,8 @@ class Connection(object): self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) - LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d'), - self.params) + LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') % + params) def reconnect(self): """Handles reconnecting and re-establishing queues. @@ -470,11 +502,12 @@ class Connection(object): attempt = 0 while True: + params = self.params_list[attempt % len(self.params_list)] attempt += 1 try: - self._connect() + self._connect(params) return - except (self.connection_errors, IOError), e: + except (IOError, self.connection_errors) as e: pass except Exception, e: # NOTE(comstud): Unfortunately it's possible for amqplib @@ -489,12 +522,12 @@ class Connection(object): log_info = {} log_info['err_str'] = str(e) log_info['max_retries'] = self.max_retries - log_info.update(self.params) + log_info.update(params) if self.max_retries and attempt == self.max_retries: - LOG.exception(_('Unable to connect to AMQP server on ' - '%(hostname)s:%(port)d after %(max_retries)d ' - 'tries: %(err_str)s') % log_info) + LOG.error(_('Unable to connect to AMQP server on ' + '%(hostname)s:%(port)d after %(max_retries)d ' + 'tries: %(err_str)s') % log_info) # NOTE(comstud): Copied from original code. There's # really no better recourse because if this was a queue we # need to consume on, we have no way to consume anymore. @@ -508,9 +541,9 @@ class Connection(object): sleep_time = min(sleep_time, self.interval_max) log_info['sleep_time'] = sleep_time - LOG.exception(_('AMQP server on %(hostname)s:%(port)d is' - ' unreachable: %(err_str)s. Trying again in ' - '%(sleep_time)d seconds.') % log_info) + LOG.error(_('AMQP server on %(hostname)s:%(port)d is ' + 'unreachable: %(err_str)s. Trying again in ' + '%(sleep_time)d seconds.') % log_info) time.sleep(sleep_time) def ensure(self, error_callback, method, *args, **kwargs): @@ -518,7 +551,8 @@ class Connection(object): try: return method(*args, **kwargs) except (self.connection_errors, socket.timeout, IOError), e: - pass + if error_callback: + error_callback(e) except Exception, e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport @@ -528,8 +562,8 @@ class Connection(object): # and try to reconnect in this case. if 'timeout' not in str(e): raise - if error_callback: - error_callback(e) + if error_callback: + error_callback(e) self.reconnect() def get_channel(self): @@ -631,10 +665,12 @@ class Connection(object): """ self.declare_consumer(DirectConsumer, topic, callback) - def declare_topic_consumer(self, topic, callback=None, queue_name=None): + def declare_topic_consumer(self, topic, callback=None, queue_name=None, + exchange_name=None): """Create a 'topic' consumer.""" self.declare_consumer(functools.partial(TopicConsumer, name=queue_name, + exchange_name=exchange_name, ), topic, callback) @@ -725,23 +761,6 @@ def cast(conf, context, topic, msg): rpc_amqp.get_connection_pool(conf, Connection)) -def cast_with_consumer(conf, context, topic, msg): - """Sends a message on a topic without waiting for a response.""" - return rpc_amqp.cast_with_consumer(conf, context, topic, msg, - Connection.pool) - - -def delete_queue(conf, context, topic): - LOG.debug("Deleting queue with name %s." % topic) - with rpc_amqp.ConnectionContext(conf, Connection.pool) as conn: - channel = conn.channel - durable = conf.rabbit_durable_queues - queue = kombu.entity.Queue(name=topic, channel=channel, - auto_delete=False, exclusive=False, - durable=durable) - queue.delete() - - def fanout_cast(conf, context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" return rpc_amqp.fanout_cast( @@ -758,7 +777,7 @@ def cast_to_server(conf, context, server_params, topic, msg): def fanout_cast_to_server(conf, context, server_params, topic, msg): """Sends a message on a fanout exchange to a specific server.""" - return rpc_amqp.cast_to_server( + return rpc_amqp.fanout_cast_to_server( conf, context, server_params, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) diff --git a/reddwarf/openstack/common/rpc/impl_qpid.py b/reddwarf/openstack/common/rpc/impl_qpid.py index 78d1ceffca..c8435d63ce 100644 --- a/reddwarf/openstack/common/rpc/impl_qpid.py +++ b/reddwarf/openstack/common/rpc/impl_qpid.py @@ -17,7 +17,6 @@ import functools import itertools -import logging import time import uuid @@ -29,6 +28,7 @@ import qpid.messaging.exceptions from reddwarf.openstack.common import cfg from reddwarf.openstack.common.gettextutils import _ from reddwarf.openstack.common import jsonutils +from reddwarf.openstack.common import log as logging from reddwarf.openstack.common.rpc import amqp as rpc_amqp from reddwarf.openstack.common.rpc import common as rpc_common @@ -41,6 +41,9 @@ qpid_opts = [ cfg.StrOpt('qpid_port', default='5672', help='Qpid broker port'), + cfg.ListOpt('qpid_hosts', + default=['$qpid_hostname:$qpid_port'], + help='Qpid HA cluster host:port pairs'), cfg.StrOpt('qpid_username', default='', help='Username for qpid connection'), @@ -50,26 +53,8 @@ qpid_opts = [ cfg.StrOpt('qpid_sasl_mechanisms', default='', help='Space separated list of SASL mechanisms to use for auth'), - cfg.BoolOpt('qpid_reconnect', - default=True, - help='Automatically reconnect'), - cfg.IntOpt('qpid_reconnect_timeout', - default=0, - help='Reconnection timeout in seconds'), - cfg.IntOpt('qpid_reconnect_limit', - default=0, - help='Max reconnections before giving up'), - cfg.IntOpt('qpid_reconnect_interval_min', - default=0, - help='Minimum seconds between reconnection attempts'), - cfg.IntOpt('qpid_reconnect_interval_max', - default=0, - help='Maximum seconds between reconnection attempts'), - cfg.IntOpt('qpid_reconnect_interval', - default=0, - help='Equivalent to setting max and min to the same value'), cfg.IntOpt('qpid_heartbeat', - default=5, + default=60, help='Seconds between connection keepalive heartbeats'), cfg.StrOpt('qpid_protocol', default='tcp', @@ -170,7 +155,8 @@ class DirectConsumer(ConsumerBase): class TopicConsumer(ConsumerBase): """Consumer class for 'topic'""" - def __init__(self, conf, session, topic, callback, name=None): + def __init__(self, conf, session, topic, callback, name=None, + exchange_name=None): """Init a 'topic' queue. :param session: the amqp session to use @@ -180,9 +166,9 @@ class TopicConsumer(ConsumerBase): :param name: optional queue name, defaults to topic """ + exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) super(TopicConsumer, self).__init__(session, callback, - "%s/%s" % (conf.control_exchange, - topic), + "%s/%s" % (exchange_name, topic), {}, name or topic, {}) @@ -256,9 +242,9 @@ class TopicPublisher(Publisher): def __init__(self, conf, session, topic): """init a 'topic' publisher. """ - super(TopicPublisher, self).__init__( - session, - "%s/%s" % (conf.control_exchange, topic)) + exchange_name = rpc_amqp.get_control_exchange(conf) + super(TopicPublisher, self).__init__(session, + "%s/%s" % (exchange_name, topic)) class FanoutPublisher(Publisher): @@ -276,10 +262,10 @@ class NotifyPublisher(Publisher): def __init__(self, conf, session, topic): """init a 'topic' publisher. """ - super(NotifyPublisher, self).__init__( - session, - "%s/%s" % (conf.control_exchange, topic), - {"durable": True}) + exchange_name = rpc_amqp.get_control_exchange(conf) + super(NotifyPublisher, self).__init__(session, + "%s/%s" % (exchange_name, topic), + {"durable": True}) class Connection(object): @@ -293,50 +279,42 @@ class Connection(object): self.consumer_thread = None self.conf = conf - if server_params is None: - server_params = {} + if server_params and 'hostname' in server_params: + # NOTE(russellb) This enables support for cast_to_server. + server_params['qpid_hosts'] = [ + '%s:%d' % (server_params['hostname'], + server_params.get('port', 5672)) + ] - default_params = dict(hostname=self.conf.qpid_hostname, - port=self.conf.qpid_port, - username=self.conf.qpid_username, - password=self.conf.qpid_password) + params = { + 'qpid_hosts': self.conf.qpid_hosts, + 'username': self.conf.qpid_username, + 'password': self.conf.qpid_password, + } + params.update(server_params or {}) - params = server_params - for key in default_params.keys(): - params.setdefault(key, default_params[key]) + self.brokers = params['qpid_hosts'] + self.username = params['username'] + self.password = params['password'] + self.connection_create(self.brokers[0]) + self.reconnect() - self.broker = params['hostname'] + ":" + str(params['port']) + def connection_create(self, broker): # Create the connection - this does not open the connection - self.connection = qpid.messaging.Connection(self.broker) + self.connection = qpid.messaging.Connection(broker) # Check if flags are set and if so set them for the connection # before we call open - self.connection.username = params['username'] - self.connection.password = params['password'] + self.connection.username = self.username + self.connection.password = self.password + self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms - self.connection.reconnect = self.conf.qpid_reconnect - if self.conf.qpid_reconnect_timeout: - self.connection.reconnect_timeout = ( - self.conf.qpid_reconnect_timeout) - if self.conf.qpid_reconnect_limit: - self.connection.reconnect_limit = self.conf.qpid_reconnect_limit - if self.conf.qpid_reconnect_interval_max: - self.connection.reconnect_interval_max = ( - self.conf.qpid_reconnect_interval_max) - if self.conf.qpid_reconnect_interval_min: - self.connection.reconnect_interval_min = ( - self.conf.qpid_reconnect_interval_min) - if self.conf.qpid_reconnect_interval: - self.connection.reconnect_interval = ( - self.conf.qpid_reconnect_interval) - self.connection.hearbeat = self.conf.qpid_heartbeat + # Reconnection is done by self.reconnect() + self.connection.reconnect = False + self.connection.heartbeat = self.conf.qpid_heartbeat self.connection.protocol = self.conf.qpid_protocol self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay - # Open is part of reconnect - - # NOTE(WGH) not sure we need this with the reconnect flags - self.reconnect() - def _register_consumer(self, consumer): self.consumers[str(consumer.get_receiver())] = consumer @@ -351,23 +329,36 @@ class Connection(object): except qpid.messaging.exceptions.ConnectionError: pass + attempt = 0 + delay = 1 while True: + broker = self.brokers[attempt % len(self.brokers)] + attempt += 1 + try: + self.connection_create(broker) self.connection.open() except qpid.messaging.exceptions.ConnectionError, e: - LOG.error(_('Unable to connect to AMQP server: %s'), e) - time.sleep(self.conf.qpid_reconnect_interval or 1) + msg_dict = dict(e=e, delay=delay) + msg = _("Unable to connect to AMQP server: %(e)s. " + "Sleeping %(delay)s seconds") % msg_dict + LOG.error(msg) + time.sleep(delay) + delay = min(2 * delay, 60) else: + LOG.info(_('Connected to AMQP server on %s'), broker) break - LOG.info(_('Connected to AMQP server on %s'), self.broker) - self.session = self.connection.session() - for consumer in self.consumers.itervalues(): - consumer.reconnect(self.session) - if self.consumers: + consumers = self.consumers + self.consumers = {} + + for consumer in consumers.itervalues(): + consumer.reconnect(self.session) + self._register_consumer(consumer) + LOG.debug(_("Re-established AMQP queues")) def ensure(self, error_callback, method, *args, **kwargs): @@ -464,10 +455,12 @@ class Connection(object): """ self.declare_consumer(DirectConsumer, topic, callback) - def declare_topic_consumer(self, topic, callback=None, queue_name=None): + def declare_topic_consumer(self, topic, callback=None, queue_name=None, + exchange_name=None): """Create a 'topic' consumer.""" self.declare_consumer(functools.partial(TopicConsumer, name=queue_name, + exchange_name=exchange_name, ), topic, callback) diff --git a/reddwarf/openstack/common/rpc/impl_zmq.py b/reddwarf/openstack/common/rpc/impl_zmq.py index 0d4008aa8b..3344888c34 100644 --- a/reddwarf/openstack/common/rpc/impl_zmq.py +++ b/reddwarf/openstack/common/rpc/impl_zmq.py @@ -49,7 +49,7 @@ zmq_opts = [ # The module.Class to use for matchmaking. cfg.StrOpt( 'rpc_zmq_matchmaker', - default=('openstack.common.rpc.' + default=('reddwarf.openstack.common.rpc.' 'matchmaker.MatchMakerLocalhost'), help='MatchMaker driver', ), @@ -72,7 +72,7 @@ zmq_opts = [ # These globals are defined in register_opts(conf), # a mandatory initialization call -FLAGS = None +CONF = None ZMQ_CTX = None # ZeroMQ Context, must be global. matchmaker = None # memoized matchmaker object @@ -259,7 +259,14 @@ class InternalContext(object): except greenlet.GreenletExit: # ignore these since they are just from shutdowns pass + except rpc_common.ClientException, e: + LOG.debug(_("Expected exception during message handling (%s)") % + e._exc_info[1]) + return {'exc': + rpc_common.serialize_remote_exception(e._exc_info, + log_failure=False)} except Exception: + LOG.error(_("Exception during message handling")) return {'exc': rpc_common.serialize_remote_exception(sys.exc_info())} @@ -274,7 +281,7 @@ class InternalContext(object): ctx.replies) LOG.debug(_("Sending reply")) - cast(FLAGS, ctx, topic, { + cast(CONF, ctx, topic, { 'method': '-process_reply', 'args': { 'msg_id': msg_id, @@ -329,7 +336,6 @@ class ZmqBaseReactor(ConsumerBase): def __init__(self, conf): super(ZmqBaseReactor, self).__init__() - self.conf = conf self.mapping = {} self.proxies = {} self.threads = [] @@ -405,7 +411,7 @@ class ZmqProxy(ZmqBaseReactor): super(ZmqProxy, self).__init__(conf) self.topic_proxy = {} - ipc_dir = conf.rpc_zmq_ipc_dir + ipc_dir = CONF.rpc_zmq_ipc_dir self.topic_proxy['zmq_replies'] = \ ZmqSocket("ipc://%s/zmq_topic_zmq_replies" % (ipc_dir, ), @@ -413,7 +419,7 @@ class ZmqProxy(ZmqBaseReactor): self.sockets.append(self.topic_proxy['zmq_replies']) def consume(self, sock): - ipc_dir = self.conf.rpc_zmq_ipc_dir + ipc_dir = CONF.rpc_zmq_ipc_dir #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() @@ -487,7 +493,6 @@ class Connection(rpc_common.Connection): """Manages connections and threads.""" def __init__(self, conf): - self.conf = conf self.reactor = ZmqReactor(conf) def create_consumer(self, topic, proxy, fanout=False): @@ -508,7 +513,7 @@ class Connection(rpc_common.Connection): # Receive messages from (local) proxy inaddr = "ipc://%s/zmq_topic_%s" % \ - (self.conf.rpc_zmq_ipc_dir, topic) + (CONF.rpc_zmq_ipc_dir, topic) LOG.debug(_("Consumer is a zmq.%s"), ['PULL', 'SUB'][sock_type == zmq.SUB]) @@ -527,7 +532,7 @@ class Connection(rpc_common.Connection): def _cast(addr, context, msg_id, topic, msg, timeout=None): - timeout_cast = timeout or FLAGS.rpc_cast_timeout + timeout_cast = timeout or CONF.rpc_cast_timeout payload = [RpcContext.marshal(context), msg] with Timeout(timeout_cast, exception=rpc_common.Timeout): @@ -545,13 +550,13 @@ def _cast(addr, context, msg_id, topic, msg, timeout=None): def _call(addr, context, msg_id, topic, msg, timeout=None): # timeout_response is how long we wait for a response - timeout = timeout or FLAGS.rpc_response_timeout + timeout = timeout or CONF.rpc_response_timeout # The msg_id is used to track replies. - msg_id = str(uuid.uuid4().hex) + msg_id = uuid.uuid4().hex # Replies always come into the reply service. - reply_topic = "zmq_replies.%s" % FLAGS.rpc_zmq_host + reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host LOG.debug(_("Creating payload")) # Curry the original request into a reply method. @@ -573,7 +578,7 @@ def _call(addr, context, msg_id, topic, msg, timeout=None): with Timeout(timeout, exception=rpc_common.Timeout): try: msg_waiter = ZmqSocket( - "ipc://%s/zmq_topic_zmq_replies" % FLAGS.rpc_zmq_ipc_dir, + "ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir, zmq.SUB, subscribe=msg_id, bind=False ) @@ -599,7 +604,7 @@ def _call(addr, context, msg_id, topic, msg, timeout=None): # responses for Exceptions. for resp in responses: if isinstance(resp, types.DictType) and 'exc' in resp: - raise rpc_common.deserialize_remote_exception(FLAGS, resp['exc']) + raise rpc_common.deserialize_remote_exception(CONF, resp['exc']) return responses[-1] @@ -610,7 +615,7 @@ def _multi_send(method, context, topic, msg, timeout=None): dispatches to the matchmaker and sends message to all relevant hosts. """ - conf = FLAGS + conf = CONF LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) queues = matchmaker.queues(topic) @@ -641,26 +646,22 @@ def create_connection(conf, new=True): def multicall(conf, *args, **kwargs): """Multiple calls.""" - register_opts(conf) return _multi_send(_call, *args, **kwargs) def call(conf, *args, **kwargs): """Send a message, expect a response.""" - register_opts(conf) data = _multi_send(_call, *args, **kwargs) return data[-1] def cast(conf, *args, **kwargs): """Send a message expecting no reply.""" - register_opts(conf) _multi_send(_cast, *args, **kwargs) def fanout_cast(conf, context, topic, msg, **kwargs): """Send a message to all listening and expect no reply.""" - register_opts(conf) # NOTE(ewindisch): fanout~ is used because it avoid splitting on . # and acts as a non-subtle hint to the matchmaker and ZmqProxy. _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs) @@ -672,7 +673,6 @@ def notify(conf, context, topic, msg, **kwargs): Notifications are sent to topic-priority. This differs from the AMQP drivers which send to topic.priority. """ - register_opts(conf) # NOTE(ewindisch): dot-priority in rpc notifier does not # work with our assumptions. topic.replace('.', '-') @@ -684,7 +684,7 @@ def cleanup(): global ZMQ_CTX global matchmaker matchmaker = None - ZMQ_CTX.destroy() + ZMQ_CTX.term() ZMQ_CTX = None @@ -697,11 +697,11 @@ def register_opts(conf): # We memoize through these globals global ZMQ_CTX global matchmaker - global FLAGS + global CONF - if not FLAGS: + if not CONF: conf.register_opts(zmq_opts) - FLAGS = conf + CONF = conf # Don't re-set, if this method is called twice. if not ZMQ_CTX: ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts) diff --git a/reddwarf/openstack/common/rpc/matchmaker.py b/reddwarf/openstack/common/rpc/matchmaker.py index 92041a71aa..b3a0ceb5ac 100644 --- a/reddwarf/openstack/common/rpc/matchmaker.py +++ b/reddwarf/openstack/common/rpc/matchmaker.py @@ -21,10 +21,10 @@ return keys for direct exchanges, per (approximate) AMQP parlance. import contextlib import itertools import json -import logging from reddwarf.openstack.common import cfg from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import log as logging matchmaker_opts = [ diff --git a/reddwarf/openstack/common/rpc/service.py b/reddwarf/openstack/common/rpc/service.py new file mode 100644 index 0000000000..3301c2785d --- /dev/null +++ b/reddwarf/openstack/common/rpc/service.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import rpc +from reddwarf.openstack.common.rpc import dispatcher as rpc_dispatcher +from reddwarf.openstack.common import service + + +LOG = logging.getLogger(__name__) + + +class Service(service.Service): + """Service object for binaries running on hosts. + + A service enables rpc by listening to queues based on topic and host.""" + def __init__(self, host, topic, manager=None): + super(Service, self).__init__() + self.host = host + self.topic = topic + if manager is None: + self.manager = self + else: + self.manager = manager + + def start(self): + super(Service, self).start() + + self.conn = rpc.create_connection(new=True) + LOG.debug(_("Creating Consumer connection for Service %s") % + self.topic) + + dispatcher = rpc_dispatcher.RpcDispatcher([self.manager]) + + # Share this same connection for these Consumers + self.conn.create_consumer(self.topic, dispatcher, fanout=False) + + node_topic = '%s.%s' % (self.topic, self.host) + self.conn.create_consumer(node_topic, dispatcher, fanout=False) + + self.conn.create_consumer(self.topic, dispatcher, fanout=True) + + # Hook to allow the manager to do other initializations after + # the rpc connection is created. + if callable(getattr(self.manager, 'initialize_service_hook', None)): + self.manager.initialize_service_hook(self) + + # Consume from all consumers in a thread + self.conn.consume_in_thread() + + def stop(self): + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass + super(Service, self).stop() diff --git a/reddwarf/openstack/common/service.py b/reddwarf/openstack/common/service.py new file mode 100644 index 0000000000..ca475080a1 --- /dev/null +++ b/reddwarf/openstack/common/service.py @@ -0,0 +1,325 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import os +import random +import signal +import sys +import time + +import eventlet +import extras +import logging as std_logging + +from reddwarf.openstack.common import cfg +from reddwarf.openstack.common import eventlet_backdoor +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import threadgroup + + +rpc = extras.try_import('reddwarf.openstack.common.rpc') +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self._services = threadgroup.ThreadGroup('launcher') + eventlet_backdoor.initialize_if_enabled() + + @staticmethod + def run_service(service): + """Start and wait for a service to finish. + + :param service: service to run and wait for. + :returns: None + + """ + service.start() + service.wait() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + self._services.add_thread(self.run_service, service) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + self._services.stop() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + self._services.wait() + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_DFL) + + raise SignalExit(signo) + + def wait(self): + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + status = None + try: + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[exc.signo] + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + except SystemExit as exc: + status = exc.code + finally: + if rpc: + rpc.cleanup() + self.stop() + return status + + +class ServiceWrapper(object): + def __init__(self, service, workers): + self.service = service + self.workers = workers + self.children = set() + self.forktimes = [] + + +class ProcessLauncher(object): + def __init__(self): + self.children = {} + self.sigcaught = None + self.running = True + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process(self, service): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + signal.signal(signal.SIGTERM, _sigterm) + # Block SIGINT and let the parent send us a SIGTERM + signal.signal(signal.SIGINT, signal.SIG_IGN) + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn_n(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.run_service(service) + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + status = 0 + try: + self._child_process(wrap.service) + except SignalExit as exc: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[exc.signo] + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_('Unhandled exception')) + status = 2 + finally: + wrap.service.stop() + + os._exit(status) + + LOG.info(_('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_service(self, service, workers=1): + wrap = ServiceWrapper(service, workers) + + LOG.info(_('Starting %d workers'), wrap.workers) + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def _wait_child(self): + try: + pid, status = os.wait() + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_('Child %(pid)d killed by signal %(sig)d'), + dict(pid=pid, sig=sig)) + else: + code = os.WEXITSTATUS(status) + LOG.info(_('Child %(pid)s exited with status %(code)d'), + dict(pid=pid, code=code)) + + if pid not in self.children: + LOG.warning(_('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + return wrap + + def wait(self): + """Loop waiting on children to die and respawning as necessary""" + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + while self.running: + wrap = self._wait_child() + if not wrap: + continue + + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + if self.sigcaught: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[self.sigcaught] + LOG.info(_('Caught %s, stopping children'), signame) + + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts.""" + + def __init__(self, threads=1000): + self.tg = threadgroup.ThreadGroup('service', threads) + + def start(self): + pass + + def stop(self): + self.tg.stop() + + def wait(self): + self.tg.wait() + + +def launch(service, workers=None): + if workers: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + else: + launcher = ServiceLauncher() + launcher.launch_service(service) + return launcher diff --git a/reddwarf/openstack/common/setup.py b/reddwarf/openstack/common/setup.py index 9eabfcca3f..e6f72f034e 100644 --- a/reddwarf/openstack/common/setup.py +++ b/reddwarf/openstack/common/setup.py @@ -19,26 +19,31 @@ Utilities with minimum-depends for use in setup.py """ +import datetime import os import re import subprocess +import sys + +from setuptools.command import sdist def parse_mailmap(mailmap='.mailmap'): mapping = {} if os.path.exists(mailmap): - fp = open(mailmap, 'r') - for l in fp: - l = l.strip() - if not l.startswith('#') and ' ' in l: - canonical_email, alias = l.split(' ') - mapping[alias] = canonical_email + with open(mailmap, 'r') as fp: + for l in fp: + l = l.strip() + if not l.startswith('#') and ' ' in l: + canonical_email, alias = [x for x in l.split(' ') + if x.startswith('<')] + mapping[alias] = canonical_email return mapping def canonicalize_emails(changelog, mapping): - """ Takes in a string and an email alias mapping and replaces all - instances of the aliases in the string with their real email + """Takes in a string and an email alias mapping and replaces all + instances of the aliases in the string with their real email. """ for alias, email in mapping.iteritems(): changelog = changelog.replace(alias, email) @@ -47,10 +52,10 @@ def canonicalize_emails(changelog, mapping): # Get requirements from the first file that exists def get_reqs_from_files(requirements_files): - reqs_in = [] for requirements_file in requirements_files: if os.path.exists(requirements_file): - return open(requirements_file, 'r').read().split('\n') + with open(requirements_file, 'r') as fil: + return fil.read().split('\n') return [] @@ -58,11 +63,25 @@ def parse_requirements(requirements_files=['requirements.txt', 'tools/pip-requires']): requirements = [] for line in get_reqs_from_files(requirements_files): + # For the requirements list, we need to inject only the portion + # after egg= so that distutils knows the package it's looking for + # such as: + # -e git://github.com/openstack/nova/master#egg=nova if re.match(r'\s*-e\s+', line): requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1', line)) + # such as: + # http://github.com/openstack/nova/zipball/master#egg=nova + elif re.match(r'\s*https?:', line): + requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1', + line)) + # -f lines are for index locations, and don't get used here elif re.match(r'\s*-f\s+', line): pass + # argparse is part of the standard library starting with 2.7 + # adding it to the requirements list screws distro installs + elif line == 'argparse' and sys.version_info >= (2, 7): + pass else: requirements.append(line) @@ -72,11 +91,18 @@ def parse_requirements(requirements_files=['requirements.txt', def parse_dependency_links(requirements_files=['requirements.txt', 'tools/pip-requires']): dependency_links = [] + # dependency_links inject alternate locations to find packages listed + # in requirements for line in get_reqs_from_files(requirements_files): + # skip comments and blank lines if re.match(r'(\s*#)|(\s*$)', line): continue + # lines with -e or -f need the whole line, minus the flag if re.match(r'\s*-[ef]\s+', line): dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line)) + # lines that are only urls can go in unmolested + elif re.match(r'\s*https?:', line): + dependency_links.append(line) return dependency_links @@ -91,37 +117,250 @@ def write_requirements(): def _run_shell_command(cmd): - output = subprocess.Popen(["/bin/sh", "-c", cmd], - stdout=subprocess.PIPE) - return output.communicate()[0].strip() + if os.name == 'nt': + output = subprocess.Popen(["cmd.exe", "/C", cmd], + stdout=subprocess.PIPE) + else: + output = subprocess.Popen(["/bin/sh", "-c", cmd], + stdout=subprocess.PIPE) + out = output.communicate() + if len(out) == 0: + return None + if len(out[0].strip()) == 0: + return None + return out[0].strip() -def write_vcsversion(location): - """ Produce a vcsversion dict that mimics the old one produced by bzr - """ - if os.path.isdir('.git'): - branch_nick_cmd = 'git branch | grep -Ei "\* (.*)" | cut -f2 -d" "' - branch_nick = _run_shell_command(branch_nick_cmd) - revid_cmd = "git rev-parse HEAD" - revid = _run_shell_command(revid_cmd).split()[0] - revno_cmd = "git log --oneline | wc -l" - revno = _run_shell_command(revno_cmd) - with open(location, 'w') as version_file: - version_file.write(""" -# This file is automatically generated by setup.py, So don't edit it. :) -version_info = { - 'branch_nick': '%s', - 'revision_id': '%s', - 'revno': %s -} -""" % (branch_nick, revid, revno)) +def _get_git_next_version_suffix(branch_name): + datestamp = datetime.datetime.now().strftime('%Y%m%d') + if branch_name == 'milestone-proposed': + revno_prefix = "r" + else: + revno_prefix = "" + _run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*") + milestone_cmd = "git show meta/openstack/release:%s" % branch_name + milestonever = _run_shell_command(milestone_cmd) + if milestonever: + first_half = "%s~%s" % (milestonever, datestamp) + else: + first_half = datestamp + + post_version = _get_git_post_version() + # post version should look like: + # 0.1.1.4.gcc9e28a + # where the bit after the last . is the short sha, and the bit between + # the last and second to last is the revno count + (revno, sha) = post_version.split(".")[-2:] + second_half = "%s%s.%s" % (revno_prefix, revno, sha) + return ".".join((first_half, second_half)) + + +def _get_git_current_tag(): + return _run_shell_command("git tag --contains HEAD") + + +def _get_git_tag_info(): + return _run_shell_command("git describe --tags") + + +def _get_git_post_version(): + current_tag = _get_git_current_tag() + if current_tag is not None: + return current_tag + else: + tag_info = _get_git_tag_info() + if tag_info is None: + base_version = "0.0" + cmd = "git --no-pager log --oneline" + out = _run_shell_command(cmd) + revno = len(out.split("\n")) + sha = _run_shell_command("git describe --always") + else: + tag_infos = tag_info.split("-") + base_version = "-".join(tag_infos[:-2]) + (revno, sha) = tag_infos[-2:] + return "%s.%s.%s" % (base_version, revno, sha) def write_git_changelog(): - """ Write a changelog based on the git changelog """ + """Write a changelog based on the git changelog.""" + new_changelog = 'ChangeLog' + if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'): + if os.path.isdir('.git'): + git_log_cmd = 'git log --stat' + changelog = _run_shell_command(git_log_cmd) + mailmap = parse_mailmap() + with open(new_changelog, "w") as changelog_file: + changelog_file.write(canonicalize_emails(changelog, mailmap)) + else: + open(new_changelog, 'w').close() + + +def generate_authors(): + """Create AUTHORS file using git commits.""" + jenkins_email = 'jenkins@review.(openstack|stackforge).org' + old_authors = 'AUTHORS.in' + new_authors = 'AUTHORS' + if not os.getenv('SKIP_GENERATE_AUTHORS'): + if os.path.isdir('.git'): + # don't include jenkins email address in AUTHORS file + git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | " + "egrep -v '" + jenkins_email + "'") + changelog = _run_shell_command(git_log_cmd) + mailmap = parse_mailmap() + with open(new_authors, 'w') as new_authors_fh: + new_authors_fh.write(canonicalize_emails(changelog, mailmap)) + if os.path.exists(old_authors): + with open(old_authors, "r") as old_authors_fh: + new_authors_fh.write('\n' + old_authors_fh.read()) + else: + open(new_authors, 'w').close() + + +_rst_template = """%(heading)s +%(underline)s + +.. automodule:: %(module)s + :members: + :undoc-members: + :show-inheritance: +""" + + +def read_versioninfo(project): + """Read the versioninfo file. If it doesn't exist, we're in a github + zipball, and there's really no way to know what version we really + are, but that should be ok, because the utility of that should be + just about nil if this code path is in use in the first place.""" + versioninfo_path = os.path.join(project, 'versioninfo') + if os.path.exists(versioninfo_path): + with open(versioninfo_path, 'r') as vinfo: + version = vinfo.read().strip() + else: + version = "0.0.0" + return version + + +def write_versioninfo(project, version): + """Write a simple file containing the version of the package.""" + with open(os.path.join(project, 'versioninfo'), 'w') as fil: + fil.write("%s\n" % version) + + +def get_cmdclass(): + """Return dict of commands to run from setup.py.""" + + cmdclass = dict() + + def _find_modules(arg, dirname, files): + for filename in files: + if filename.endswith('.py') and filename != '__init__.py': + arg["%s.%s" % (dirname.replace('/', '.'), + filename[:-3])] = True + + class LocalSDist(sdist.sdist): + """Builds the ChangeLog and Authors files from VC first.""" + + def run(self): + write_git_changelog() + generate_authors() + # sdist.sdist is an old style class, can't use super() + sdist.sdist.run(self) + + cmdclass['sdist'] = LocalSDist + + # If Sphinx is installed on the box running setup.py, + # enable setup.py to build the documentation, otherwise, + # just ignore it + try: + from sphinx.setup_command import BuildDoc + + class LocalBuildDoc(BuildDoc): + def generate_autoindex(self): + print "**Autodocumenting from %s" % os.path.abspath(os.curdir) + modules = {} + option_dict = self.distribution.get_option_dict('build_sphinx') + source_dir = os.path.join(option_dict['source_dir'][1], 'api') + if not os.path.exists(source_dir): + os.makedirs(source_dir) + for pkg in self.distribution.packages: + if '.' not in pkg: + os.path.walk(pkg, _find_modules, modules) + module_list = modules.keys() + module_list.sort() + autoindex_filename = os.path.join(source_dir, 'autoindex.rst') + with open(autoindex_filename, 'w') as autoindex: + autoindex.write(""".. toctree:: + :maxdepth: 1 + +""") + for module in module_list: + output_filename = os.path.join(source_dir, + "%s.rst" % module) + heading = "The :mod:`%s` Module" % module + underline = "=" * len(heading) + values = dict(module=module, heading=heading, + underline=underline) + + print "Generating %s" % output_filename + with open(output_filename, 'w') as output_file: + output_file.write(_rst_template % values) + autoindex.write(" %s.rst\n" % module) + + def run(self): + if not os.getenv('SPHINX_DEBUG'): + self.generate_autoindex() + + for builder in ['html', 'man']: + self.builder = builder + self.finalize_options() + self.project = self.distribution.get_name() + self.version = self.distribution.get_version() + self.release = self.distribution.get_version() + BuildDoc.run(self) + cmdclass['build_sphinx'] = LocalBuildDoc + except ImportError: + pass + + return cmdclass + + +def get_git_branchname(): + for branch in _run_shell_command("git branch --color=never").split("\n"): + if branch.startswith('*'): + _branch_name = branch.split()[1].strip() + if _branch_name == "(no": + _branch_name = "no-branch" + return _branch_name + + +def get_pre_version(projectname, base_version): + """Return a version which is leading up to a version that will + be released in the future.""" if os.path.isdir('.git'): - git_log_cmd = 'git log --stat' - changelog = _run_shell_command(git_log_cmd) - mailmap = parse_mailmap() - with open("ChangeLog", "w") as changelog_file: - changelog_file.write(canonicalize_emails(changelog, mailmap)) + current_tag = _get_git_current_tag() + if current_tag is not None: + version = current_tag + else: + branch_name = os.getenv('BRANCHNAME', + os.getenv('GERRIT_REFNAME', + get_git_branchname())) + version_suffix = _get_git_next_version_suffix(branch_name) + version = "%s~%s" % (base_version, version_suffix) + write_versioninfo(projectname, version) + return version + else: + version = read_versioninfo(projectname) + return version + + +def get_post_version(projectname): + """Return a version which is equal to the tag that's on the current + revision if there is one, or tag plus number of additional revisions + if the current revision has no tag.""" + + if os.path.isdir('.git'): + version = _get_git_post_version() + write_versioninfo(projectname, version) + return version + return read_versioninfo(projectname) diff --git a/reddwarf/openstack/common/testutils.py b/reddwarf/openstack/common/testutils.py new file mode 100644 index 0000000000..5c438e3c7b --- /dev/null +++ b/reddwarf/openstack/common/testutils.py @@ -0,0 +1,68 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Utilities for unit tests.""" + +import functools +import nose + + +class skip_test(object): + """Decorator that skips a test.""" + # TODO(tr3buchet): remember forever what comstud did here + def __init__(self, msg): + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + raise nose.SkipTest(self.message) + return _skipper + + +class skip_if(object): + """Decorator that skips a test if condition is true.""" + def __init__(self, condition, msg): + self.condition = condition + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if self.condition: + raise nose.SkipTest(self.message) + func(*args, **kw) + return _skipper + + +class skip_unless(object): + """Decorator that skips a test if condition is not true.""" + def __init__(self, condition, msg): + self.condition = condition + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if not self.condition: + raise nose.SkipTest(self.message) + func(*args, **kw) + return _skipper diff --git a/reddwarf/openstack/common/threadgroup.py b/reddwarf/openstack/common/threadgroup.py new file mode 100644 index 0000000000..afa272357d --- /dev/null +++ b/reddwarf/openstack/common/threadgroup.py @@ -0,0 +1,116 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from eventlet import greenlet +from eventlet import greenpool +from eventlet import greenthread + +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import loopingcall + + +LOG = logging.getLogger(__name__) + + +def _thread_done(gt, *args, **kwargs): + """ Callback function to be passed to GreenThread.link() when we spawn() + Calls the :class:`ThreadGroup` to notify if. + + """ + kwargs['group'].thread_done(kwargs['thread']) + + +class Thread(object): + """ Wrapper around a greenthread, that holds a reference to the + :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when + it has done so it can be removed from the threads list. + """ + def __init__(self, name, thread, group): + self.name = name + self.thread = thread + self.thread.link(_thread_done, group=group, thread=self) + + def stop(self): + self.thread.kill() + + def wait(self): + return self.thread.wait() + + +class ThreadGroup(object): + """ The point of the ThreadGroup classis to: + + * keep track of timers and greenthreads (making it easier to stop them + when need be). + * provide an easy API to add timers. + """ + def __init__(self, name, thread_pool_size=10): + self.name = name + self.pool = greenpool.GreenPool(thread_pool_size) + self.threads = [] + self.timers = [] + + def add_timer(self, interval, callback, initial_delay=None, + *args, **kwargs): + pulse = loopingcall.LoopingCall(callback, *args, **kwargs) + pulse.start(interval=interval, + initial_delay=initial_delay) + self.timers.append(pulse) + + def add_thread(self, callback, *args, **kwargs): + gt = self.pool.spawn(callback, *args, **kwargs) + th = Thread(callback.__name__, gt, self) + self.threads.append(th) + + def thread_done(self, thread): + self.threads.remove(thread) + + def stop(self): + current = greenthread.getcurrent() + for x in self.threads: + if x is current: + # don't kill the current thread. + continue + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + + for x in self.timers: + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + self.timers = [] + + def wait(self): + for x in self.timers: + try: + x.wait() + except greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) + current = greenthread.getcurrent() + for x in self.threads: + if x is current: + continue + try: + x.wait() + except greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) diff --git a/reddwarf/openstack/common/timeutils.py b/reddwarf/openstack/common/timeutils.py index 5eeaf70aa4..ea69164284 100644 --- a/reddwarf/openstack/common/timeutils.py +++ b/reddwarf/openstack/common/timeutils.py @@ -21,7 +21,6 @@ Time related utilities and helper functions. import calendar import datetime -import time import iso8601 @@ -63,9 +62,11 @@ def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): def normalize_time(timestamp): - """Normalize time in arbitrary timezone to UTC""" + """Normalize time in arbitrary timezone to UTC naive object""" offset = timestamp.utcoffset() - return timestamp.replace(tzinfo=None) - offset if offset else timestamp + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset def is_older_than(before, seconds): @@ -73,6 +74,11 @@ def is_older_than(before, seconds): return utcnow() - before > datetime.timedelta(seconds=seconds) +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + return after - utcnow() > datetime.timedelta(seconds=seconds) + + def utcnow_ts(): """Timestamp version of our utcnow function.""" return calendar.timegm(utcnow().timetuple()) @@ -81,7 +87,10 @@ def utcnow_ts(): def utcnow(): """Overridable version of utils.utcnow.""" if utcnow.override_time: - return utcnow.override_time + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time return datetime.datetime.utcnow() @@ -89,21 +98,63 @@ utcnow.override_time = None def set_time_override(override_time=datetime.datetime.utcnow()): - """Override utils.utcnow to return a constant time.""" + """ + Override utils.utcnow to return a constant time or a list thereof, + one at a time. + """ utcnow.override_time = override_time def advance_time_delta(timedelta): - """Advance overriden time using a datetime.timedelta.""" + """Advance overridden time using a datetime.timedelta.""" assert(not utcnow.override_time is None) - utcnow.override_time += timedelta + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta def advance_time_seconds(seconds): - """Advance overriden time by seconds.""" + """Advance overridden time by seconds.""" advance_time_delta(datetime.timedelta(0, seconds)) def clear_time_override(): """Remove the overridden time.""" utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times.""" + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """ + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) diff --git a/reddwarf/openstack/common/utils.py b/reddwarf/openstack/common/utils.py index f383c6fadb..05f0e9f7be 100644 --- a/reddwarf/openstack/common/utils.py +++ b/reddwarf/openstack/common/utils.py @@ -19,21 +19,8 @@ System-level utilities and helper functions. """ -import datetime import logging -import os -import random -import shlex -import sys -from eventlet import greenthread -from eventlet.green import subprocess -import iso8601 - -from reddwarf.openstack.common import exception - - -TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" LOG = logging.getLogger(__name__) @@ -42,7 +29,9 @@ def int_from_bool_as_string(subject): Interpret a string as a boolean and return either 1 or 0. Any string value in: + ('True', 'true', 'On', 'on', '1') + is interpreted as a boolean True. Useful for JSON-decoded stuff and config file parsing @@ -55,7 +44,9 @@ def bool_from_string(subject): Interpret a string as a boolean. Any string value in: - ('True', 'true', 'On', 'on', '1') + + ('True', 'true', 'On', 'on', 'Yes', 'yes', '1') + is interpreted as a boolean True. Useful for JSON-decoded stuff and config file parsing @@ -63,151 +54,6 @@ def bool_from_string(subject): if isinstance(subject, bool): return subject if isinstance(subject, basestring): - if subject.strip().lower() in ('true', 'on', '1'): + if subject.strip().lower() in ('true', 'on', 'yes', '1'): return True return False - - -def execute(*cmd, **kwargs): - """ - Helper method to execute command with optional retry. - - :cmd Passed to subprocess.Popen. - :process_input Send to opened process. - :check_exit_code Defaults to 0. Raise exception.ProcessExecutionError - unless program exits with this code. - :delay_on_retry True | False. Defaults to True. If set to True, wait a - short amount of time before retrying. - :attempts How many times to retry cmd. - :run_as_root True | False. Defaults to False. If set to True, - the command is prefixed by the command specified - in the root_helper kwarg. - :root_helper command to prefix all cmd's with - - :raises exception.Error on receiving unknown arguments - :raises exception.ProcessExecutionError - """ - - process_input = kwargs.pop('process_input', None) - check_exit_code = kwargs.pop('check_exit_code', 0) - delay_on_retry = kwargs.pop('delay_on_retry', True) - attempts = kwargs.pop('attempts', 1) - run_as_root = kwargs.pop('run_as_root', False) - root_helper = kwargs.pop('root_helper', '') - if len(kwargs): - raise exception.Error(_('Got unknown keyword args ' - 'to utils.execute: %r') % kwargs) - if run_as_root: - cmd = shlex.split(root_helper) + list(cmd) - cmd = map(str, cmd) - - while attempts > 0: - attempts -= 1 - try: - LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) - _PIPE = subprocess.PIPE # pylint: disable=E1101 - obj = subprocess.Popen(cmd, - stdin=_PIPE, - stdout=_PIPE, - stderr=_PIPE, - close_fds=True) - result = None - if process_input is not None: - result = obj.communicate(process_input) - else: - result = obj.communicate() - obj.stdin.close() # pylint: disable=E1101 - _returncode = obj.returncode # pylint: disable=E1101 - if _returncode: - LOG.debug(_('Result was %s') % _returncode) - if (isinstance(check_exit_code, int) and - not isinstance(check_exit_code, bool) and - _returncode != check_exit_code): - (stdout, stderr) = result - raise exception.ProcessExecutionError( - exit_code=_returncode, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) - return result - except exception.ProcessExecutionError: - if not attempts: - raise - else: - LOG.debug(_('%r failed. Retrying.'), cmd) - if delay_on_retry: - greenthread.sleep(random.randint(20, 200) / 100.0) - finally: - # NOTE(termie): this appears to be necessary to let the subprocess - # call clean something up in between calls, without - # it two execute calls in a row hangs the second one - greenthread.sleep(0) - - -def import_class(import_str): - """Returns a class from a string including module and class""" - mod_str, _sep, class_str = import_str.rpartition('.') - try: - __import__(mod_str) - return getattr(sys.modules[mod_str], class_str) - except (ImportError, ValueError, AttributeError): - raise exception.NotFound('Class from %s import %s cannot be found' - % (mod_str, class_str)) - - -def import_object(import_str): - """Returns an object including a module or module and class""" - try: - __import__(import_str) - return sys.modules[import_str] - except ImportError as ie: - try: - return import_class(import_str) - except exception.NotFound: - raise ie - - -def isotime(at=None): - """Stringify time in ISO 8601 format""" - if not at: - at = datetime.datetime.utcnow() - str = at.strftime(TIME_FORMAT) - tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' - str += ('Z' if tz == 'UTC' else tz) - return str - - -def parse_isotime(timestr): - """Parse time from ISO 8601 format""" - try: - return iso8601.parse_date(timestr) - except iso8601.ParseError as e: - raise ValueError(e.message) - except TypeError as e: - raise ValueError(e.message) - - -def normalize_time(timestamp): - """Normalize time in arbitrary timezone to UTC""" - offset = timestamp.utcoffset() - return timestamp.replace(tzinfo=None) - offset if offset else timestamp - - -def utcnow(): - """Overridable version of utils.utcnow.""" - if utcnow.override_time: - return utcnow.override_time - return datetime.datetime.utcnow() - - -utcnow.override_time = None - - -def set_time_override(override_time=datetime.datetime.utcnow()): - """Override utils.utcnow to return a constant time.""" - utcnow.override_time = override_time - - -def clear_time_override(): - """Remove the overridden time.""" - utcnow.override_time = None diff --git a/reddwarf/openstack/common/uuidutils.py b/reddwarf/openstack/common/uuidutils.py new file mode 100644 index 0000000000..7608acb942 --- /dev/null +++ b/reddwarf/openstack/common/uuidutils.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +UUID related utilities and helper functions. +""" + +import uuid + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False diff --git a/reddwarf/openstack/common/version.py b/reddwarf/openstack/common/version.py new file mode 100644 index 0000000000..dae88e37b5 --- /dev/null +++ b/reddwarf/openstack/common/version.py @@ -0,0 +1,168 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utilities for consuming the auto-generated versioninfo files. +""" + +import datetime +import pkg_resources + +import setup + + +class _deferred_version_string(str): + """Internal helper class which provides delayed version calculation.""" + + def __new__(cls, version_info, prefix): + new_obj = str.__new__(cls, "") + new_obj._version_info = version_info + new_obj._prefix = prefix + new_obj._cached_version = None + return new_obj + + def _get_cached_version(self): + if not self._cached_version: + self._cached_version = \ + "%s%s" % (self._prefix, + self._version_info.version_string()) + return self._cached_version + + def __len__(self): + return self._get_cached_version().__len__() + + def __contains__(self, item): + return self._get_cached_version().__contains__(item) + + def __getslice__(self, i, j): + return self._get_cached_version().__getslice__(i, j) + + def __str__(self): + return self._get_cached_version() + + def __repr__(self): + return self._get_cached_version() + + +class VersionInfo(object): + + def __init__(self, package, python_package=None, pre_version=None): + """Object that understands versioning for a package + :param package: name of the top level python namespace. For glance, + this would be "glance" for python-glanceclient, it + would be "glanceclient" + :param python_package: optional name of the project name. For + glance this can be left unset. For + python-glanceclient, this would be + "python-glanceclient" + :param pre_version: optional version that the project is working to + """ + self.package = package + if python_package is None: + self.python_package = package + else: + self.python_package = python_package + self.pre_version = pre_version + self.version = None + + def _generate_version(self): + """Defer to the openstack.common.setup routines for making a + version from git.""" + if self.pre_version is None: + return setup.get_post_version(self.python_package) + else: + return setup.get_pre_version(self.python_package, self.pre_version) + + def _newer_version(self, pending_version): + """Check to see if we're working with a stale version or not. + We expect a version string that either looks like: + 2012.2~f3~20120708.10.4426392 + which is an unreleased version of a pre-version, or: + 0.1.1.4.gcc9e28a + which is an unreleased version of a post-version, or: + 0.1.1 + Which is a release and which should match tag. + For now, if we have a date-embedded version, check to see if it's + old, and if so re-generate. Otherwise, just deal with it. + """ + try: + version_date = int(self.version.split("~")[-1].split('.')[0]) + if version_date < int(datetime.date.today().strftime('%Y%m%d')): + return self._generate_version() + else: + return pending_version + except Exception: + return pending_version + + def version_string_with_vcs(self, always=False): + """Return the full version of the package including suffixes indicating + VCS status. + + For instance, if we are working towards the 2012.2 release, + canonical_version_string should return 2012.2 if this is a final + release, or else something like 2012.2~f1~20120705.20 if it's not. + + :param always: if true, skip all version caching + """ + if always: + self.version = self._generate_version() + + if self.version is None: + + requirement = pkg_resources.Requirement.parse(self.python_package) + versioninfo = "%s/versioninfo" % self.package + try: + raw_version = pkg_resources.resource_string(requirement, + versioninfo) + self.version = self._newer_version(raw_version.strip()) + except (IOError, pkg_resources.DistributionNotFound): + self.version = self._generate_version() + + return self.version + + def canonical_version_string(self, always=False): + """Return the simple version of the package excluding any suffixes. + + For instance, if we are working towards the 2012.2 release, + canonical_version_string should return 2012.2 in all cases. + + :param always: if true, skip all version caching + """ + return self.version_string_with_vcs(always).split('~')[0] + + def version_string(self, always=False): + """Return the base version of the package. + + For instance, if we are working towards the 2012.2 release, + version_string should return 2012.2 if this is a final release, or + 2012.2-dev if it is not. + + :param always: if true, skip all version caching + """ + version_parts = self.version_string_with_vcs(always).split('~') + if len(version_parts) == 1: + return version_parts[0] + else: + return '%s-dev' % (version_parts[0],) + + def deferred_version_string(self, prefix=""): + """Generate an object which will expand in a string context to + the results of version_string(). We do this so that don't + call into pkg_resources every time we start up a program when + passing version information into the CONF constructor, but + rather only do the calculation when and if a version is requested + """ + return _deferred_version_string(self, prefix) diff --git a/reddwarf/openstack/common/wsgi.py b/reddwarf/openstack/common/wsgi.py index f31df1c47e..f3ff899013 100644 --- a/reddwarf/openstack/common/wsgi.py +++ b/reddwarf/openstack/common/wsgi.py @@ -23,31 +23,22 @@ import eventlet.wsgi eventlet.patcher.monkey_patch(all=False, socket=True) -import json -import logging -import sys import routes import routes.middleware +import sys import webob.dec import webob.exc from xml.dom import minidom from xml.parsers import expat from reddwarf.openstack.common import exception +from reddwarf.openstack.common.gettextutils import _ +from reddwarf.openstack.common import jsonutils +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import service -LOG = logging.getLogger('wsgi') - - -class WritableLogger(object): - """A thin wrapper that responds to `write` and logs.""" - - def __init__(self, logger, level=logging.DEBUG): - self.logger = logger - self.level = level - - def write(self, msg): - self.logger.log(self.level, msg.strip("\n")) +LOG = logging.getLogger(__name__) def run_server(application, port): @@ -56,29 +47,54 @@ def run_server(application, port): eventlet.wsgi.server(sock, application) -class Server(object): - """Server class to manage multiple WSGI sockets and applications.""" +class Service(service.Service): + """ + Provides a Service API for wsgi servers. - def __init__(self, threads=1000): - self.pool = eventlet.GreenPool(threads) + This gives us the ability to launch wsgi servers with the + Launcher classes in service.py. + """ - def start(self, application, port, host='0.0.0.0', backlog=128): - """Run a WSGI server with the given application.""" - socket = eventlet.listen((host, port), backlog=backlog) - self.pool.spawn_n(self._run, application, socket) + def __init__(self, application, port, + host='0.0.0.0', backlog=128, threads=1000): + self.application = application + self._port = port + self._host = host + self.backlog = backlog + super(Service, self).__init__(threads) - def wait(self): - """Wait until all servers have completed running.""" - try: - self.pool.waitall() - except KeyboardInterrupt: - pass + def start(self): + """Start serving this service using the provided server instance. + + :returns: None + + """ + super(Service, self).start() + self._socket = eventlet.listen((self._host, self._port), + backlog=self.backlog) + self.tg.add_thread(self._run, self.application, self._socket) + + @property + def host(self): + return self._socket.getsockname()[0] if self._socket else self._host + + @property + def port(self): + return self._socket.getsockname()[1] if self._socket else self._port + + def stop(self): + """Stop serving this API. + + :returns: None + + """ + super(Service, self).stop() def _run(self, application, socket): """Start a WSGI server in a new green thread.""" - logger = logging.getLogger('eventlet.wsgi.server') - eventlet.wsgi.server(socket, application, custom_pool=self.pool, - log=WritableLogger(logger)) + logger = logging.getLogger('eventlet.wsgi') + eventlet.wsgi.server(socket, application, custom_pool=self.tg.pool, + log=logging.WritableLogger(logger)) class Middleware(object): @@ -372,7 +388,7 @@ class JSONDictSerializer(DictSerializer): _dtime = obj - datetime.timedelta(microseconds=obj.microsecond) return _dtime.isoformat() return obj - return json.dumps(data, default=sanitizer) + return jsonutils.dumps(data, default=sanitizer) class XMLDictSerializer(DictSerializer): @@ -557,9 +573,9 @@ class RequestDeserializer(object): """Extract necessary pieces of the request. :param request: Request object - :returns tuple of expected controller action name, dictionary of - keyword arguments to pass to the controller, the expected - content type of the response + :returns: tuple of (expected controller action name, dictionary of + keyword arguments to pass to the controller, the expected + content type of the response) """ action_args = self.get_action_args(request.environ) @@ -578,7 +594,7 @@ class RequestDeserializer(object): def deserialize_body(self, request, action): if not len(request.body) > 0: LOG.debug(_("Empty body provided in request")) - return self._return_empty_body(action) + return {} try: content_type = request.get_content_type() @@ -588,7 +604,7 @@ class RequestDeserializer(object): if content_type is None: LOG.debug(_("No Content-Type provided in request")) - return self._return_empty_body(action) + return {} try: deserializer = self.get_body_deserializer(content_type) @@ -598,12 +614,6 @@ class RequestDeserializer(object): return deserializer.deserialize(request.body, action) - def _return_empty_body(self, action): - if action in ["create", "update", "action"]: - return {'body': None} - else: - return {} - def get_body_deserializer(self, content_type): try: return self.body_deserializers[content_type] @@ -647,7 +657,7 @@ class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: - return json.loads(datastring) + return jsonutils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) diff --git a/reddwarf/taskmanager/api.py b/reddwarf/taskmanager/api.py index fac6726f23..6e060eceed 100644 --- a/reddwarf/taskmanager/api.py +++ b/reddwarf/taskmanager/api.py @@ -19,15 +19,15 @@ Routes all the requests to the task manager. """ -import logging import traceback import sys -from reddwarf.common import config +from reddwarf.common import cfg from reddwarf.common.manager import ManagerAPI +from reddwarf.openstack.common import log as logging -CONFIG = config.Config +CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -36,24 +36,23 @@ class API(ManagerAPI): def _fake_cast(self, method_name, **kwargs): from reddwarf.tests.fakes.common import get_event_spawer - from reddwarf.taskmanager.manager import TaskManager - instance = TaskManager() - method = getattr(instance, method_name) + from reddwarf.taskmanager.manager import Manager + method = getattr(Manager(), method_name) def func(): try: method(self.context, **kwargs) except Exception as ex: type_, value, tb = sys.exc_info() - logging.error("Error running async task:") - logging.error((traceback.format_exception(type_, value, tb))) + LOG.error("Error running async task:") + LOG.error((traceback.format_exception(type_, value, tb))) raise type_, value, tb get_event_spawer()(0, func) def _get_routing_key(self): """Create the routing key for the taskmanager""" - return CONFIG.get('taskmanager_queue', 'taskmanager') + return CONF.taskmanager_queue def resize_volume(self, new_size, instance_id): LOG.debug("Making async call to resize volume for instance: %s" diff --git a/reddwarf/taskmanager/manager.py b/reddwarf/taskmanager/manager.py index 3ad4066817..503412b144 100644 --- a/reddwarf/taskmanager/manager.py +++ b/reddwarf/taskmanager/manager.py @@ -15,29 +15,27 @@ # License for the specific language governing permissions and limitations # under the License. -import logging import traceback from eventlet import greenthread from reddwarf.common import exception from reddwarf.common import service +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common import periodic_task +from reddwarf.openstack.common.rpc.common import UnsupportedRpcVersion +from reddwarf.openstack.common.gettextutils import _ from reddwarf.taskmanager import models from reddwarf.taskmanager.models import BuiltInstanceTasks from reddwarf.taskmanager.models import FreshInstanceTasks -from reddwarf.openstack.common.rpc.common import UnsupportedRpcVersion + LOG = logging.getLogger(__name__) +RPC_API_VERSION = "1.0" -class TaskManager(service.Manager): - """Task manager impl""" - RPC_API_VERSION = "1.0" - - def __init__(self, *args, **kwargs): - super(TaskManager, self).__init__(*args, **kwargs) - LOG.info(_("TaskManager init %s %s") % (args, kwargs)) +class Manager(periodic_task.PeriodicTasks): def resize_volume(self, context, instance_id, new_size): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) diff --git a/reddwarf/taskmanager/models.py b/reddwarf/taskmanager/models.py index 15ba8693a1..c04a0dd336 100644 --- a/reddwarf/taskmanager/models.py +++ b/reddwarf/taskmanager/models.py @@ -12,13 +12,11 @@ # License for the specific language governing permissions and limitations # under the License. -import logging - from eventlet import greenthread from datetime import datetime import traceback from novaclient import exceptions as nova_exceptions -from reddwarf.common import config +from reddwarf.common import cfg from reddwarf.common import remote from reddwarf.common import utils from reddwarf.common.exception import GuestError @@ -42,12 +40,13 @@ from reddwarf.instance.models import InstanceServiceStatus from reddwarf.instance.models import ServiceStatuses from reddwarf.instance.tasks import InstanceTasks from reddwarf.instance.views import get_ip_address +from reddwarf.openstack.common import log as logging +from reddwarf.openstack.common.gettextutils import _ LOG = logging.getLogger(__name__) - -use_nova_server_volume = config.Config.get_bool('use_nova_server_volume', - default=False) +CONF = cfg.CONF +use_nova_server_volume = CONF.use_nova_server_volume class FreshInstanceTasks(FreshInstance): @@ -85,8 +84,9 @@ class FreshInstanceTasks(FreshInstance): server = None try: nova_client = create_nova_client(self.context) - files = {"/etc/guest_info": "--guest_id=%s\n--service_type=%s\n" % - (self.id, service_type)} + files = {"/etc/guest_info": ("[DEFAULT]\n--guest_id=" + "%s\n--service_type=%s\n" % + (self.id, service_type))} name = self.hostname or self.name volume_desc = ("mysql volume for %s" % self.id) volume_name = ("mysql-%s" % self.id) @@ -110,8 +110,8 @@ class FreshInstanceTasks(FreshInstance): err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER self._log_and_raise(e, msg, err) - device_path = config.Config.get('device_path', '/dev/vdb') - mount_point = config.Config.get('mount_point', '/var/lib/mysql') + device_path = CONF.device_path + mount_point = CONF.mount_point volume_info = {'device_path': device_path, 'mount_point': mount_point} return server, volume_info @@ -152,10 +152,10 @@ class FreshInstanceTasks(FreshInstance): LOG.info("Entering create_volume") LOG.debug(_("Starting to create the volume for the instance")) - volume_support = config.Config.get("reddwarf_volume_support", 'False') + volume_support = CONF.reddwarf_volume_support LOG.debug(_("reddwarf volume support = %s") % volume_support) if (volume_size is None or - utils.bool_from_string(volume_support) is False): + volume_support is False): volume_info = { 'block_device': None, 'device_path': None, @@ -188,15 +188,15 @@ class FreshInstanceTasks(FreshInstance): # :[]:[]:[] # setting the delete_on_terminate instance to true=1 mapping = "%s:%s:%s:%s" % (v_ref.id, '', v_ref.size, 1) - bdm = config.Config.get('block_device_mapping', 'vdb') + bdm = CONF.block_device_mapping block_device = {bdm: mapping} volumes = [{'id': v_ref.id, 'size': v_ref.size}] LOG.debug("block_device = %s" % block_device) LOG.debug("volume = %s" % volumes) - device_path = config.Config.get('device_path', '/dev/vdb') - mount_point = config.Config.get('mount_point', '/var/lib/mysql') + device_path = CONF.device_path + mount_point = CONF.mount_point LOG.debug(_("device_path = %s") % device_path) LOG.debug(_("mount_point = %s") % mount_point) @@ -209,8 +209,9 @@ class FreshInstanceTasks(FreshInstance): def _create_server(self, flavor_id, image_id, service_type, block_device_mapping): nova_client = create_nova_client(self.context) - files = {"/etc/guest_info": "guest_id=%s\nservice_type=%s\n" % - (self.id, service_type)} + files = {"/etc/guest_info": ("[DEFAULT]\nguest_id=%s\n" + "service_type=%s\n" % + (self.id, service_type))} name = self.hostname or self.name bdmap = block_device_mapping server = nova_client.servers.create(name, image_id, flavor_id, @@ -230,10 +231,10 @@ class FreshInstanceTasks(FreshInstance): def _create_dns_entry(self): LOG.debug("%s: Creating dns entry for instance: %s" % (greenthread.getcurrent(), self.id)) - dns_support = config.Config.get("reddwarf_dns_support", 'False') + dns_support = CONF.reddwarf_dns_support LOG.debug(_("reddwarf dns support = %s") % dns_support) - if utils.bool_from_string(dns_support): + if dns_support: nova_client = create_nova_client(self.context) dns_client = create_dns_client(self.context) @@ -286,9 +287,9 @@ class BuiltInstanceTasks(BuiltInstance): % self.server.id) LOG.error(ex) try: - dns_support = config.Config.get("reddwarf_dns_support", 'False') + dns_support = CONF.reddwarf_dns_support LOG.debug(_("reddwarf dns support = %s") % dns_support) - if utils.bool_from_string(dns_support): + if dns_support: dns_api = create_dns_client(self.context) dns_api.delete_instance_entry(instance_id=self.db_info.id) except Exception as ex: @@ -310,7 +311,7 @@ class BuiltInstanceTasks(BuiltInstance): return True poll_until(server_is_finished, sleep_time=2, - time_out=int(config.Config.get('server_delete_time_out'))) + time_out=CONF.server_delete_time_out) def resize_volume(self, new_size): LOG.debug("%s: Resizing volume for instance: %s to %r GB" @@ -321,7 +322,7 @@ class BuiltInstanceTasks(BuiltInstance): lambda: self.volume_client.volumes.get(self.volume_id), lambda volume: volume.status == 'in-use', sleep_time=2, - time_out=int(config.Config.get('volume_time_out'))) + time_out=CONF.volume_time_out) volume = self.volume_client.volumes.get(self.volume_id) self.update_db(volume_size=volume.size) self.nova_client.volumes.rescan_server_volume(self.server, @@ -331,6 +332,7 @@ class BuiltInstanceTasks(BuiltInstance): LOG.error("Timeout trying to rescan or resize the attached volume " "filesystem for volume: %s" % self.volume_id) except Exception as e: + LOG.error(e) LOG.error("Error encountered trying to rescan or resize the " "attached volume filesystem for volume: %s" % self.volume_id) @@ -430,7 +432,7 @@ class BuiltInstanceTasks(BuiltInstance): self.server.reboot() # Poll nova until instance is active - reboot_time_out = int(config.Config.get("reboot_time_out", 60 * 2)) + reboot_time_out = CONF.reboot_time_out def update_server_info(): self._refresh_compute_server_info() diff --git a/reddwarf/taskmanager/service.py b/reddwarf/taskmanager/service.py index 50a8bd209a..3a6a152950 100644 --- a/reddwarf/taskmanager/service.py +++ b/reddwarf/taskmanager/service.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +from reddwarf.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/reddwarf/tests/fakes/common.py b/reddwarf/tests/fakes/common.py index 5703d0981a..ec955606c4 100644 --- a/reddwarf/tests/fakes/common.py +++ b/reddwarf/tests/fakes/common.py @@ -18,12 +18,16 @@ """Common code to help in faking the models.""" import time +import traceback +import sys from novaclient import exceptions as nova_exceptions -from reddwarf.common import config +from reddwarf.common import cfg +from reddwarf.openstack.common import log as logging -CONFIG = config.Config +CONF = cfg.CONF +LOG = logging.getLogger(__name__) def authorize(context): @@ -32,7 +36,7 @@ def authorize(context): def get_event_spawer(): - if CONFIG.get('fake_mode_events') == "simulated": + if CONF.fake_mode_events == "simulated": return event_simulator else: return eventlet_spawner @@ -61,6 +65,7 @@ def event_simulator_sleep(time_to_sleep): global pending_events while time_to_sleep > 0: itr_sleep = 0.5 + print pending_events for i in range(len(pending_events)): event = pending_events[i] event["time"] = event["time"] - itr_sleep @@ -71,7 +76,10 @@ def event_simulator_sleep(time_to_sleep): event["func"] = None try: func() - except Exception: + except Exception as e: + type_, value, tb = sys.exc_info() + LOG.info("Simulated event error.") + LOG.info((traceback.format_exception(type_, value, tb))) pass # Ignore exceptions, which can potentially occur. time_to_sleep -= itr_sleep diff --git a/reddwarf/tests/fakes/guestagent.py b/reddwarf/tests/fakes/guestagent.py index a243fabf92..f2e7080f24 100644 --- a/reddwarf/tests/fakes/guestagent.py +++ b/reddwarf/tests/fakes/guestagent.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +from reddwarf.openstack.common import log as logging import time from reddwarf.tests.fakes.common import get_event_spawer diff --git a/reddwarf/tests/fakes/nova.py b/reddwarf/tests/fakes/nova.py index 0955719dec..20c95ba73b 100644 --- a/reddwarf/tests/fakes/nova.py +++ b/reddwarf/tests/fakes/nova.py @@ -16,7 +16,7 @@ # under the License. import eventlet -import logging +from reddwarf.openstack.common import log as logging from novaclient.v1_1.client import Client from novaclient import exceptions as nova_exceptions import uuid diff --git a/rsdns/client/dns_client.py b/rsdns/client/dns_client.py index a43511636a..54b50c985e 100644 --- a/rsdns/client/dns_client.py +++ b/rsdns/client/dns_client.py @@ -19,7 +19,7 @@ We have to duplicate a lot of code from the OpenStack client since so much is different here. """ -import logging +from reddwarf.openstack.common import log as logging import exceptions diff --git a/run_tests.py b/run_tests.py index 07dcc82796..5257c76a8c 100644 --- a/run_tests.py +++ b/run_tests.py @@ -3,12 +3,15 @@ import os import urllib import sys +from reddwarf.common import cfg +from reddwarf.openstack.common import log as logging from reddwarf.tests.config import CONFIG from wsgi_intercept.httplib2_intercept import install as wsgi_install import proboscis from eventlet import greenthread import wsgi_intercept +CONF = cfg.CONF def add_support_for_localization(): @@ -28,51 +31,30 @@ def add_support_for_localization(): def initialize_reddwarf(config_file): - # The test version of poll_until doesn't utilize LoopingCall. - import optparse - from reddwarf.db import get_db_api - from reddwarf.common import config as rd_config - from reddwarf.common import wsgi - from reddwarf import version + from reddwarf.openstack.common import pastedeploy - db_api = get_db_api() - - def create_options(parser): - parser.add_option('-p', '--port', dest="port", metavar="PORT", - type=int, default=9898, - help="Port the Reddwarf API host listens on. " - "Default: %default") - rd_config.add_common_options(parser) - rd_config.add_log_options(parser) - - def usage(): - usage = "" - - oparser = optparse.OptionParser(version="%%prog %s" - % version.version_string(), - usage=usage()) - create_options(oparser) - (options, args) = rd_config.parse_options(oparser, cli_args=[config_file]) - rd_config.Config.load_paste_config('reddwarf', options, args) - # Modify these values by hand - rd_config.Config.instance['fake_mode_events'] = 'simulated' - rd_config.Config.instance['log_file'] = 'rdtest.log' - conf, app = rd_config.Config.load_paste_app('reddwarf', options, args) - rd_config.setup_logging(options, conf) - return conf, app + cfg.CONF(args=[], + project='reddwarf', + default_config_files=[config_file]) + CONF.use_stderr = False + CONF.log_file = 'rdtest.log' + logging.setup(None) + CONF.bind_port = 8779 + CONF.fake_mode_events = 'simulated' + return pastedeploy.paste_deploy_app(config_file, 'reddwarf', {}) -def initialize_database(rd_conf): +def initialize_database(): from reddwarf.db import get_db_api from reddwarf.instance import models from reddwarf.db.sqlalchemy import session db_api = get_db_api() - db_api.drop_db(rd_conf) # Destroys the database, if it exists. - db_api.db_sync(rd_conf) - session.configure_db(rd_conf) + db_api.drop_db(CONF) # Destroys the database, if it exists. + db_api.db_sync(CONF) + session.configure_db(CONF) # Adds the image for mysql (needed to make most calls work). models.ServiceImage.create(service_name="mysql", image_id="fake") - db_api.configure_db(rd_conf) + db_api.configure_db(CONF) def initialize_fakes(app): @@ -89,7 +71,9 @@ def initialize_fakes(app): return call_back - wsgi_intercept.add_wsgi_intercept('localhost', 8779, wsgi_interceptor) + wsgi_intercept.add_wsgi_intercept('localhost', + CONF.bind_port, + wsgi_interceptor) # Finally, engage in some truly evil monkey business. We want # to change anything which spawns threads with eventlet to instead simply @@ -114,14 +98,17 @@ if __name__=="__main__": wsgi_install() add_support_for_localization() replace_poll_until() - # Load Reddwarf config file. - conf, app = initialize_reddwarf("etc/reddwarf/reddwarf.conf.test") + # Load Reddwarf app + # Paste file needs absolute path + config_file = os.path.realpath('etc/reddwarf/reddwarf.conf.test') + # 'etc/reddwarf/test-api-paste.ini' + app = initialize_reddwarf(config_file) # Initialize sqlite database. - initialize_database(conf) + initialize_database() # Swap out WSGI, httplib, and several sleep functions with test doubles. initialize_fakes(app) # Initialize the test configuration. - CONFIG.load_from_file("etc/tests/localhost.test.conf") + CONFIG.load_from_file('etc/tests/localhost.test.conf') from reddwarf.tests.api import flavors from reddwarf.tests.api import versions diff --git a/setup.py b/setup.py index 72cd77ece2..473034e4dc 100644 --- a/setup.py +++ b/setup.py @@ -14,72 +14,31 @@ import gettext import os +import setuptools import subprocess -from setuptools import find_packages -from setuptools.command.sdist import sdist -from setuptools import setup - gettext.install('reddwarf', unicode=1) -from reddwarf.openstack.common.setup import parse_requirements -from reddwarf.openstack.common.setup import parse_dependency_links -from reddwarf.openstack.common.setup import write_requirements -from reddwarf.openstack.common.setup import write_vcsversion +from reddwarf import version +from reddwarf.openstack.common import setup from reddwarf.openstack.common.setup import write_git_changelog -from reddwarf import version +requires = setup.parse_requirements() +depend_links = setup.parse_dependency_links() -class local_sdist(sdist): - """Customized sdist hook - builds the ChangeLog file from VC first""" - def run(self): - write_git_changelog() - sdist.run(self) -cmdclass = {'sdist': local_sdist} - - -try: - from sphinx.setup_command import BuildDoc - - class local_BuildDoc(BuildDoc): - def run(self): - for builder in ['html', 'man']: - self.builder = builder - self.finalize_options() - BuildDoc.run(self) - cmdclass['build_sphinx'] = local_BuildDoc - -except: - pass - - -try: - from babel.messages import frontend as babel - cmdclass['compile_catalog'] = babel.compile_catalog - cmdclass['extract_messages'] = babel.extract_messages - cmdclass['init_catalog'] = babel.init_catalog - cmdclass['update_catalog'] = babel.update_catalog -except: - pass - -requires = parse_requirements() -depend_links = parse_dependency_links() - -write_requirements() -write_vcsversion('reddwarf/vcsversion.py') - -setup(name='reddwarf', - version=version.canonical_version_string(), - description='PaaS services for Openstack', +setuptools.setup(name='reddwarf', + version=setup.get_post_version('reddwarf'), + description='DBaaS services for Openstack', author='OpenStack', author_email='openstack@lists.launchpad.net', - url='http://www.openstack.org/', - cmdclass=cmdclass, - packages=find_packages(exclude=['bin']), + url='https://github.com/stackforge/reddwarf', + cmdclass=setup.get_cmdclass(), + packages=setuptools.find_packages(exclude=['bin']), include_package_data=True, install_requires=requires, dependency_links=depend_links, + setup_requires=['setuptools-git>=0.4'], test_suite='nose.collector', classifiers=[ 'Development Status :: 4 - Beta', @@ -88,8 +47,8 @@ setup(name='reddwarf', 'Programming Language :: Python :: 2.6', 'Environment :: No Input/Output (Daemon)', ], - scripts=['bin/reddwarf-server', - 'bin/reddwarf-api', + scripts=['bin/reddwarf-api', + 'bin/reddwarf-server', 'bin/reddwarf-taskmanager', 'bin/reddwarf-mgmt-taskmanager', 'bin/reddwarf-manage', diff --git a/tools/pip-requires b/tools/pip-requires index 59b5ba1c4e..0839e2f033 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,5 +1,6 @@ SQLAlchemy>=0.7.8,<=0.7.9 eventlet +extras kombu==1.5.1 routes WebOb