Update oslo codebase within reddwarf.

* Updated logging,cfg,setup to new oslo
* Split out the paste ini from the conf files
* Modified reddwarf-api/server to use new modules
* Modified reddwarf-manage to use new cfg
* Added rpc helper for rpc services
* Modified reddwarf-taskmanager to use rpc helper
* Modified reddwarf-guestagent to use new rpc helper
* Fixed guestagent api to use rpc proxy
* Fixed taskmanager module to conform to new rpc
* Updated guestagent manager/pkg to use new rpc
* Updated api paste to use keystoneclient auth_token
* Updated managers to use periodic tasks

Implements: blueprint reddwarf/upgrade-oslo

Change-Id: I9ad1b441eca855a4304454014ae746ec51bef8f3
This commit is contained in:
Michael Basnight 2012-12-03 16:21:29 -06:00
parent 9e946727d2
commit c8a5bc39dc
117 changed files with 6982 additions and 1620 deletions

3
.gitignore vendored
View File

@ -15,3 +15,6 @@ host-syslog.log
tags
.tox
rdtest.log
reddwarf/versioninfo
AUTHORS
Changelog

View File

@ -33,40 +33,24 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')):
sys.path.insert(0, possible_topdir)
from reddwarf import version
from reddwarf.common import config
from reddwarf.common import cfg
from reddwarf.openstack.common import service
from reddwarf.openstack.common import log as logging
from reddwarf.common import wsgi
from reddwarf.db import get_db_api
def create_options(parser):
"""Sets up the CLI and config-file options
:param parser: The option parser
:returns: None
"""
parser.add_option('-p', '--port', dest="port", metavar="PORT",
type=int,
help="Port the Reddwarf API host listens on. "
"Default: %default")
config.add_common_options(parser)
config.add_log_options(parser)
CONF = cfg.CONF
if __name__ == '__main__':
oparser = optparse.OptionParser(version="%%prog %s"
% version.version_string())
create_options(oparser)
(options, args) = config.parse_options(oparser)
cfg.parse_args(sys.argv)
logging.setup(None)
try:
config.Config.load_paste_config('reddwarf', options, args)
conf, app = config.Config.load_paste_app('reddwarf', options, args)
get_db_api().configure_db(conf)
server = wsgi.Server()
server.start(app, int(options.get('port') or conf['bind_port']),
conf['bind_host'])
server.wait()
get_db_api().configure_db(CONF)
conf_file = CONF.find_file(CONF.api_paste_config)
launcher = wsgi.launch('reddwarf', CONF.bind_port or 8779, conf_file)
launcher.wait()
except RuntimeError as error:
import traceback
print traceback.format_exc()

View File

@ -36,37 +36,28 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')):
sys.path.insert(0, possible_topdir)
from reddwarf import version
from reddwarf.common import config
from reddwarf.common import service
# TODO(hub-cap): find out why the db api isint being imported properly
from reddwarf.common import cfg
from reddwarf.common import rpc
from reddwarf.openstack.common import cfg as openstack_cfg
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import service
from reddwarf.db import get_db_api
CONF = cfg.CONF
CONF.register_opts([openstack_cfg.StrOpt('guestagent_manager'),
openstack_cfg.StrOpt('guest_id')])
if __name__ == '__main__':
parser = optparse.OptionParser(version="%%prog %s"
% version.version_string())
config.add_common_options(parser)
config.add_log_options(parser)
cfg.parse_args(sys.argv)
logging.setup(None)
(options, args) = config.parse_options(parser)
try:
conf, app = config.Config.load_paste_app('reddwarf-guestagent',
options, args)
# Use the config file location for putting the new config values
conf_loc = '%s/%s' % (config.Config.get('here'), 'conf.d/guest_info')
config.Config.append_to_config_values('reddwarf-guestagent',
{'config_file': conf_loc}, None)
# Now do the same for the /etc/guest_info file
# that is injected into the VM
config.Config.append_to_config_values('reddwarf-guestagent',
{'config_file': '/etc/guest_info'}, None)
get_db_api().configure_db(conf)
server = service.Service.create(binary='reddwarf-guestagent',
host=config.Config.get('guest_id'))
service.serve(server)
service.wait()
get_db_api().configure_db(CONF)
server = rpc.RpcService(manager=CONF.guestagent_manager,
host=CONF.guest_id)
launcher = service.launch(server)
launcher.wait()
except RuntimeError as error:
import traceback
print traceback.format_exc()
sys.exit("ERROR: %s" % error)
sys.exit("ERROR: %s" % error)

View File

@ -17,6 +17,7 @@
# under the License.
import gettext
import inspect
import optparse
import os
import sys
@ -34,47 +35,41 @@ if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')):
sys.path.insert(0, possible_topdir)
from reddwarf import version
from reddwarf.common import config
from reddwarf.common import cfg
from reddwarf.common import utils
from reddwarf.db import get_db_api
from reddwarf.openstack.common import log as logging
from reddwarf.instance import models as instance_models
def create_options(parser):
"""Sets up the CLI and config-file options.
:param parser: The option parser
:returns: None
"""
parser.add_option('-p', '--port', dest="port", metavar="PORT",
type=int, default=9898,
help="Port the Reddwarf API host listens on. "
"Default: %default")
config.add_common_options(parser)
config.add_log_options(parser)
CONF = cfg.CONF
class Commands(object):
def __init__(self, conf):
def __init__(self):
self.db_api = get_db_api()
self.conf = conf
def db_sync(self):
self.db_api.db_sync(self.conf, repo_path=None)
def db_sync(self, repo_path=None):
self.db_api.db_sync(CONF, repo_path=repo_path)
def db_upgrade(self, version=None, repo_path=None):
self.db_api.db_upgrade(self.conf, version, repo_path=None)
self.db_api.db_upgrade(CONF, version, repo_path=None)
def db_downgrade(self, version, repo_path=None):
self.db_api.db_downgrade(self.conf, version, repo_path=None)
self.db_api.db_downgrade(CONF, version, repo_path=None)
def execute(self, command_name, *args):
if self.has(command_name):
return getattr(self, command_name)(*args)
def execute(self):
exec_method = getattr(self, CONF.action.name)
args = inspect.getargspec(exec_method)
args.args.remove('self')
kwargs = {}
for arg in args.args:
kwargs[arg] = getattr(CONF.action, arg)
exec_method(**kwargs)
def image_update(self, service_name, image_id):
self.db_api.configure_db(self.conf)
self.db_api.configure_db(CONF)
image = self.db_api.find_by(instance_models.ServiceImage,
service_name=service_name)
if image is None:
@ -89,63 +84,47 @@ class Commands(object):
"""Drops the database and recreates it."""
from reddwarf.instance import models
from reddwarf.db.sqlalchemy import session
self.db_api.drop_db(self.conf)
self.db_api.drop_db(CONF)
self.db_sync()
# Sets up database engine, so the next line will work...
session.configure_db(self.conf)
session.configure_db(CONF)
models.ServiceImage.create(service_name=service_name,
image_id=image_id)
_commands = ['db_sync', 'db_upgrade', 'db_downgrade', 'db_wipe',
'image_update']
@classmethod
def has(cls, command_name):
return (command_name in cls._commands)
@classmethod
def all(cls):
return cls._commands
def params_of(self, command_name):
if Commands.has(command_name):
return utils.MethodInspector(getattr(self, command_name))
def usage():
usage = """
%prog action [args] [options]
Available actions:
"""
for action in Commands.all():
usage = usage + ("\t%s\n" % action)
return usage.strip()
if __name__ == '__main__':
oparser = optparse.OptionParser(version="%%prog %s"
% version.version_string(),
usage=usage())
create_options(oparser)
(options, args) = config.parse_options(oparser)
if len(args) < 1 or not Commands.has(args[0]):
oparser.print_usage()
sys.exit(2)
def actions(subparser):
parser = subparser.add_parser('db_sync')
parser.add_argument('--repo_path')
parser = subparser.add_parser('db_upgrade')
parser.add_argument('--version')
parser.add_argument('--repo_path')
parser = subparser.add_parser('db_downgrade')
parser.add_argument('version')
parser.add_argument('--repo_path')
parser = subparser.add_parser('image_update')
parser.add_argument('service_name')
parser.add_argument('image_id')
parser = subparser.add_parser('db_wipe')
parser.add_argument('repo_path')
parser.add_argument('service_name')
parser.add_argument('image_id')
cfg.custom_parser('action', actions)
cfg.parse_args(sys.argv)
try:
conf = config.Config.load_paste_config('reddwarf', options, args)
config.setup_logging(options, conf)
command_name = args.pop(0)
Commands(conf).execute(command_name, *args)
logging.setup(None)
Commands().execute()
sys.exit(0)
except TypeError:
print _("Possible wrong number of arguments supplied")
command_params = Commands(conf).params_of(command_name)
print "Usage: reddwarf-manage %s" % command_params
except TypeError as e:
print _("Possible wrong number of arguments supplied %s" % e)
sys.exit(2)
except Exception:
print _("Command failed, please check log for more info")

View File

@ -33,35 +33,32 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')):
sys.path.insert(0, possible_topdir)
from reddwarf import version
from reddwarf.common import config
from reddwarf.common import cfg
from reddwarf.openstack.common import cfg as openstack_cfg
from reddwarf.openstack.common import service
from reddwarf.openstack.common import log as logging
from reddwarf.common import wsgi
from reddwarf.db import get_db_api
extra_opts = [
openstack_cfg.BoolOpt('fork',
short='f',
default=False,
dest='fork'),
openstack_cfg.StrOpt('pid-file',
default='.pid'),
]
def create_options(parser):
"""Sets up the CLI and config-file options
:param parser: The option parser
:returns: None
"""
parser.add_option('-p', '--port', dest="port", metavar="PORT",
type=int,
help="Port the Reddwarf API host listens on. "
"Default: %default")
parser.add_option("-f", '--fork', action="store_true", dest="fork")
parser.add_option('--pid_file', dest="pid_file", default=".pid")
config.add_common_options(parser)
config.add_log_options(parser)
CONF = cfg.CONF
CONF.register_cli_opts(extra_opts)
def run_server(app, port):
def run_server():
try:
server = wsgi.Server()
server.start(app, int(options.get('port') or conf['bind_port']),
conf['bind_host'])
server.wait()
get_db_api().configure_db(CONF)
server = wsgi.WSGIService('reddwarf', CONF.bind_port or 8779)
launcher = service.launch(server)
launcher.wait()
except RuntimeError as error:
import traceback
print traceback.format_exc()
@ -69,23 +66,18 @@ def run_server(app, port):
if __name__ == '__main__':
oparser = optparse.OptionParser(version="%%prog %s"
% version.version_string())
create_options(oparser)
(options, args) = config.parse_options(oparser)
config.Config.load_paste_config('reddwarf', options, args)
conf, app = config.Config.load_paste_app('reddwarf', options, args)
get_db_api().configure_db(conf)
port = int(options.get('port') or conf['bind_port'])
if options['fork']:
cfg.parse_args(sys.argv)
logging.setup(None)
if CONF.fork:
pid = os.fork()
if pid == 0:
run_server(app, port)
run_server()
else:
print("Starting server:%s" % pid)
pid_file = options.get('pid_file', '.pid')
pid_file = CONF.pid_file
with open(pid_file, 'w') as f:
f.write(str(pid))
else:
run_server(app, port)
run_server()

View File

@ -36,26 +36,25 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')):
sys.path.insert(0, possible_topdir)
from reddwarf import version
from reddwarf.common import config
from reddwarf.common import service
from reddwarf.common import cfg
from reddwarf.common import rpc
from reddwarf.openstack.common import cfg as openstack_cfg
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import service
from reddwarf.db import get_db_api
CONF = cfg.CONF
CONF.register_opts([openstack_cfg.StrOpt('taskmanager_manager')])
if __name__ == '__main__':
parser = optparse.OptionParser(version="%%prog %s"
% version.version_string())
config.add_common_options(parser)
config.add_log_options(parser)
cfg.parse_args(sys.argv)
logging.setup(None)
(options, args) = config.parse_options(parser)
try:
conf, app = config.Config.load_paste_app('reddwarf-taskmanager',
options, args)
get_db_api().configure_db(conf)
server = service.Service.create(binary='reddwarf-taskmanager')
service.serve(server)
service.wait()
get_db_api().configure_db(CONF)
server = rpc.RpcService(manager=CONF.taskmanager_manager)
launcher = service.launch(server)
launcher.wait()
except RuntimeError as error:
import traceback
print traceback.format_exc()

View File

@ -0,0 +1,42 @@
[composite:reddwarf]
use = call:reddwarf.common.wsgi:versioned_urlmap
/: versions
/v1.0: reddwarfapi
[app:versions]
paste.app_factory = reddwarf.versions:app_factory
[pipeline:reddwarfapi]
pipeline = faultwrapper tokenauth authorization contextwrapper extensions reddwarfapp
#pipeline = debug extensions reddwarfapp
[filter:extensions]
paste.filter_factory = reddwarf.common.extensions:factory
[filter:tokenauth]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
service_protocol = http
service_host = 127.0.0.1
service_port = 5000
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
auth_uri = http://127.0.0.1:5000/
admin_token = be19c524ddc92109a224
signing_dir = /tmp/keystone-signing-reddwarf
[filter:authorization]
paste.filter_factory = reddwarf.common.auth:AuthorizationMiddleware.factory
[filter:contextwrapper]
paste.filter_factory = reddwarf.common.wsgi:ContextMiddleware.factory
[filter:faultwrapper]
paste.filter_factory = reddwarf.common.wsgi:FaultWrapper.factory
[app:reddwarfapp]
paste.app_factory = reddwarf.common.api:app_factory
#Add this filter to log request and response for debugging
[filter:debug]
paste.filter_factory = reddwarf.common.wsgi:Debug

View File

@ -46,7 +46,7 @@ reddwarf_proxy_admin_tenant_name = admin
reddwarf_auth_url = http://0.0.0.0:5000/v2.0
# Manager impl for the taskmanager
guestagent_manager=reddwarf.guestagent.manager.GuestManager
guestagent_manager=reddwarf.guestagent.manager.Manager
# ============ kombu connection options ========================
@ -55,39 +55,3 @@ rabbit_host=10.0.0.1
# ============ Logging information =============================
log_dir = /tmp/
log_file = logfile.txt
[composite:reddwarf-guestagent]
use = call:reddwarf.common.wsgi:versioned_urlmap
/: versions
/v0.1: reddwarf-guestagent-app
[app:versions]
paste.app_factory = reddwarf.versions:app_factory
[pipeline:reddwarf-guestagent-app]
pipeline = guestagent-app
#pipeline = debug extensions reddwarfapp
[filter:extensions]
paste.filter_factory = reddwarf.common.extensions:factory
[filter:tokenauth]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
service_protocol = http
service_host = 127.0.0.1
service_port = 5000
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
auth_uri = http://127.0.0.1:5000/
admin_token = be19c524ddc92109a224
[filter:authorization]
paste.filter_factory = reddwarf.common.auth:AuthorizationMiddleware.factory
[app:guestagent-app]
paste.app_factory = reddwarf.guestagent.service:app_factory
#Add this filter to log request and response for debugging
[filter:debug]
paste.filter_factory = reddwarf.common.wsgi:Debug

View File

@ -49,7 +49,7 @@ reddwarf_proxy_admin_tenant_name = admin
reddwarf_auth_url = http://0.0.0.0:5000/v2.0
# Manager impl for the taskmanager
taskmanager_manager=reddwarf.taskmanager.manager.TaskManager
taskmanager_manager=reddwarf.taskmanager.manager.Manager
# Reddwarf DNS
reddwarf_dns_support = False
@ -75,6 +75,3 @@ notifier_queue_transport = memory
# ============ Logging information =============================
#log_dir = /integration/report
#log_file = reddwarf-taskmanager.log
[app:reddwarf-taskmanager]
paste.app_factory = reddwarf.taskmanager.service:app_factory

View File

@ -88,44 +88,3 @@ notifier_queue_transport = memory
#log_dir = /integration/report
#log_file = reddwarf-api.log
[composite:reddwarf]
use = call:reddwarf.common.wsgi:versioned_urlmap
/: versions
/v1.0: reddwarfapi
[app:versions]
paste.app_factory = reddwarf.versions:app_factory
[pipeline:reddwarfapi]
pipeline = faultwrapper tokenauth authorization contextwrapper extensions reddwarfapp
#pipeline = debug extensions reddwarfapp
[filter:extensions]
paste.filter_factory = reddwarf.common.extensions:factory
[filter:tokenauth]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
service_protocol = http
service_host = 127.0.0.1
service_port = 5000
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
auth_uri = http://127.0.0.1:5000/
admin_token = be19c524ddc92109a224
[filter:authorization]
paste.filter_factory = reddwarf.common.auth:AuthorizationMiddleware.factory
[filter:contextwrapper]
paste.filter_factory = reddwarf.common.wsgi:ContextMiddleware.factory
[filter:faultwrapper]
paste.filter_factory = reddwarf.common.wsgi:FaultWrapper.factory
[app:reddwarfapp]
paste.app_factory = reddwarf.common.api:app_factory
#Add this filter to log request and response for debugging
[filter:debug]
paste.filter_factory = reddwarf.common.wsgi:Debug

View File

@ -102,7 +102,7 @@ paste.app_factory = reddwarf.versions:app_factory
[pipeline:reddwarfapi]
pipeline = faultwrapper tokenauth authorization contextwrapper extensions reddwarfapp
#pipeline = debug extensions reddwarfapp
# pipeline = debug reddwarfapp
[filter:extensions]
paste.filter_factory = reddwarf.common.extensions:factory

View File

@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
modules=config,context,exception,extensions,utils,wsgi,setup
modules=middleware,notifier,rpc,authutils,cfg,context,eventlet_backdoor,exception,excutils,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,loopingcall,network_utils,pastedeploy,periodic_task,policy,processutils,service,setup,testutils,threadgroup,timeutils,utils,uuidutils,version,wsgi
# The base module to hold the copy of openstack.common
base=reddwarf

View File

@ -12,17 +12,16 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import routes
from reddwarf.openstack.common import rpc
from reddwarf.common import config
from reddwarf.common import exception
from reddwarf.common import wsgi
from reddwarf.versions import VersionsController
from reddwarf.extensions.mgmt.host.instance import service as hostservice
from reddwarf.flavor.service import FlavorController
from reddwarf.instance.service import InstanceController
from reddwarf.extensions.mgmt.host.instance import service as hostservice
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import rpc
from reddwarf.versions import VersionsController
class API(wsgi.Router):

View File

@ -16,12 +16,13 @@
# under the License.
import httplib2
import logging
import re
import webob.exc
import wsgi
from reddwarf.common import exception
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)

105
reddwarf/common/cfg.py Normal file
View File

@ -0,0 +1,105 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Routines for configuring Reddwarf."""
from reddwarf.openstack.common import cfg
common_opts = [
cfg.StrOpt('sql_connection',
default='sqlite:///reddwarf_test.sqlite',
help='SQL Connection'),
cfg.IntOpt('sql_idle_timeout', default=3600),
cfg.BoolOpt('sql_query_log', default=False),
cfg.IntOpt('bind_port', default=8779),
cfg.StrOpt('api_extensions_path', default='',
help='Path to extensions'),
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for reddwarf-api'),
cfg.BoolOpt('add_addresses',
default=False,
help='Whether to add IP addresses to the list operations'),
cfg.BoolOpt('reddwarf_volume_support',
default=False,
help='File name for the paste.deploy config for reddwarf-api'),
cfg.BoolOpt('reddwarf_must_use_volume', default=False),
cfg.ListOpt('admin_roles', default=[]),
cfg.StrOpt('remote_implementation',
default="real",
help='Remote implementation for using fake integration code'),
cfg.StrOpt('nova_compute_url', default='http://localhost:8774/v2'),
cfg.StrOpt('nova_volume_url', default='http://localhost:8776/v2'),
cfg.StrOpt('reddwarf_auth_url', default='http://0.0.0.0:5000/v2.0'),
cfg.StrOpt('host', default='0.0.0.0'),
cfg.IntOpt('report_interval', default=10),
cfg.IntOpt('periodic_interval', default=60),
cfg.BoolOpt('reddwarf_dns_support', default=False),
cfg.StrOpt('db_api_implementation', default='reddwarf.db.sqlalchemy.api'),
cfg.StrOpt('dns_driver', default='reddwarf.dns.driver.DnsDriver'),
cfg.StrOpt('dns_instance_entry_factory',
default='reddwarf.dns.driver.DnsInstanceEntryFactory'),
cfg.StrOpt('dns_hostname', default=""),
cfg.IntOpt('dns_account_id', default=0),
cfg.StrOpt('dns_auth_url', default=""),
cfg.StrOpt('dns_domain_name', default=""),
cfg.StrOpt('dns_username', default=""),
cfg.StrOpt('dns_passkey', default=""),
cfg.StrOpt('dns_management_base_url', default=""),
cfg.IntOpt('dns_ttl', default=300),
cfg.IntOpt('dns_domain_id', default=1),
cfg.IntOpt('users_page_size', default=20),
cfg.IntOpt('databases_page_size', default=20),
cfg.IntOpt('instances_page_size', default=20),
cfg.ListOpt('ignore_users', default=[]),
cfg.ListOpt('ignore_dbs', default=[]),
cfg.IntOpt('agent_call_low_timeout', default=5),
cfg.IntOpt('agent_call_high_timeout', default=60),
cfg.StrOpt('guest_id', default=None),
cfg.IntOpt('state_change_wait_time', default=2 * 60),
cfg.IntOpt('agent_heartbeat_time', default=10),
cfg.IntOpt('num_tries', default=3),
cfg.StrOpt('volume_fstype', default='ext3'),
cfg.StrOpt('format_options', default='-m 5'),
cfg.IntOpt('volume_format_timeout', default=120),
cfg.StrOpt('mount_options', default='defaults,noatime'),
cfg.IntOpt('max_instances_per_user', default=5),
cfg.IntOpt('max_accepted_volume_size', default=5),
cfg.StrOpt('taskmanager_queue', default='taskmanager'),
cfg.BoolOpt('use_nova_server_volume', default=False),
cfg.StrOpt('fake_mode_events', default='simulated'),
cfg.StrOpt('device_path', default='/dev/vdb'),
cfg.StrOpt('mount_point', default='/var/lib/mysql'),
cfg.StrOpt('service_type', default='mysql'),
cfg.StrOpt('block_device_mapping', default='vdb'),
cfg.IntOpt('server_delete_time_out', default=2),
cfg.IntOpt('volume_time_out', default=2),
cfg.IntOpt('reboot_time_out', default=60 * 2),
]
CONF = cfg.CONF
CONF.register_opts(common_opts)
def custom_parser(parsername, parser):
CONF.register_cli_opt(cfg.SubCommandOpt(parsername, handler=parser))
def parse_args(argv, default_config_files=None):
cfg.CONF(args=argv[1:],
project='reddwarf',
default_config_files=default_config_files)

View File

@ -1,153 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Routines for configuring Reddwarf."""
import re
from reddwarf.openstack.common import config as openstack_config
parse_options = openstack_config.parse_options
add_log_options = openstack_config.add_log_options
add_common_options = openstack_config.add_common_options
setup_logging = openstack_config.setup_logging
def _to_list(value):
items = value.split(',')
trimmed_list = [item.strip() for item in items]
return trimmed_list
def get_option(options, option, **kwargs):
if option in options and kwargs.get('type', 'str') == 'list':
value = options[option]
return _to_list(value)
else:
return openstack_config.get_option(options, option, **kwargs)
class Config(object):
instance = {}
@classmethod
def load_paste_app(cls, *args, **kwargs):
conf, app = openstack_config.load_paste_app(*args, **kwargs)
cls.instance.update(conf)
return conf, app
@classmethod
def load_paste_config(cls, *args, **kwargs):
conf_file, conf = openstack_config.load_paste_config(*args, **kwargs)
cls.instance.update(conf)
return conf
@classmethod
def append_to_config_values(cls, *args):
config_file = openstack_config.find_config_file(*args)
if not config_file:
raise RuntimeError("Unable to locate any configuration file. "
"Cannot load application %s" % app_name)
# Now take the conf file values and append them to the current conf
with open(config_file, 'r') as conf:
for line in conf.readlines():
m = re.match("\s*([^#]\S+)\s*=\s*(\S+)\s*", line)
if m:
cls.instance[m.group(1)] = m.group(2)
@classmethod
def write_config_values(cls, *args, **kwargs):
# Pass in empty kwargs so it doesnt mess up the config find
config_file = openstack_config.find_config_file(*args)
if not config_file:
raise RuntimeError("Unable to locate any configuration file. "
"Cannot load application %s" % app_name)
with open(config_file, 'a') as conf:
for k, v in kwargs.items():
# Start with newline to be sure its on a new line
conf.write("\n%s=%s" % (k, v))
# Now append them to the cls instance
cls.append_to_config_values(*args)
@classmethod
def get(cls, key, default=None, **kwargs):
# We always use a default, even if its None.
kwargs['default'] = default
return get_option(cls.instance, key, **kwargs)
def create_type_func(type):
@classmethod
def get(cls, key, default=None, **kwargs):
kwargs['type'] = type
return cls.get(key, default, **kwargs)
return get
Config.get_bool = create_type_func('bool')
Config.get_float = create_type_func('float')
Config.get_int = create_type_func('int')
Config.get_list = create_type_func('list')
Config.get_str = create_type_func('str')
del create_type_func
class ConfigFacade(object):
"""This class presents an interface usable by OpenStack Common modules.
OpenStack common uses a new config interface where the values are
accessed as attributes directly. This presents the same interface
so we can interface with OS common modules while we change our config
stuff.
"""
value_info = {}
def __init__(self, conf):
self.conf = conf
def __getattr__(self, name):
if name == "register_opts":
def f(*args, **kwargs):
pass
return f
if name in self.value_info:
v = self.value_info[name]
return self.conf.get(name, **v)
return self.conf.get(name)
class OsCommonModule(object):
"""Emulates the OpenStack Common cfg module."""
@property
def CONF(self):
return ConfigFacade(Config())
def create_type_func(type):
@classmethod
def func(cls, name, default, help):
ConfigFacade.value_info[name] = {'default': default, 'type': type}
return func
OsCommonModule.BoolOpt = create_type_func('bool')
OsCommonModule.IntOpt = create_type_func('int')
OsCommonModule.ListOpt = create_type_func('list')
OsCommonModule.StrOpt = create_type_func('str')
del create_type_func

View File

@ -16,13 +16,15 @@
# under the License.
"""I totally stole most of this from melange, thx guys!!!"""
import logging
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import exception as openstack_exception
from reddwarf.openstack.common import processutils
from reddwarf.openstack.common.gettextutils import _
from webob import exc
ClientConnectionError = openstack_exception.ClientConnectionError
ProcessExecutionError = openstack_exception.ProcessExecutionError
ProcessExecutionError = processutils.ProcessExecutionError
DatabaseMigrationError = openstack_exception.DatabaseMigrationError
LOG = logging.getLogger(__name__)
wrap_exception = openstack_exception.wrap_exception
@ -179,3 +181,13 @@ class ModelNotFoundError(NotFound):
class UpdateGuestError(ReddwarfError):
message = _("Failed to update instances")
class ConfigNotFound(NotFound):
message = _("Config file not found")
class PasteAppNotFound(NotFound):
message = _("Paste app not found.")

View File

@ -19,7 +19,7 @@ Exception related utilities.
"""
import contextlib
import logging
from reddwarf.openstack.common import log as logging
import sys
import traceback

View File

@ -17,9 +17,11 @@
import routes
import webob.dec
import logging
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import extensions
from reddwarf.openstack.common.gettextutils import _
from reddwarf.common import cfg
from reddwarf.common import wsgi
LOG = logging.getLogger(__name__)
@ -27,12 +29,14 @@ LOG = logging.getLogger(__name__)
ExtensionsDescriptor = extensions.ExtensionDescriptor
ResourceExtension = extensions.ResourceExtension
CONF = cfg.CONF
class ReddwarfExtensionMiddleware(extensions.ExtensionMiddleware):
def __init__(self, application, config, ext_mgr=None):
def __init__(self, application, ext_mgr=None):
ext_mgr = (ext_mgr or
ExtensionManager(config['api_extensions_path']))
ExtensionManager(CONF.api_extensions_path))
mapper = routes.Mapper()
# extended resources
@ -84,7 +88,6 @@ def factory(global_config, **local_config):
"""Paste factory."""
def _factory(app):
extensions.DEFAULT_XMLNS = "http://docs.openstack.org/reddwarf"
ext_mgr = extensions.ExtensionManager(
global_config.get('api_extensions_path', ''))
return ReddwarfExtensionMiddleware(app, global_config, ext_mgr)
ext_mgr = extensions.ExtensionManager(CONF.api_extensions_path)
return ReddwarfExtensionMiddleware(app, ext_mgr)
return _factory

View File

@ -12,17 +12,18 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import rpc
from reddwarf.common import config
from reddwarf.common import cfg
from reddwarf.common import exception
CONFIG = config.Config
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(hub_cap): upgrade this to use rpc.proxy.RpcProxy
class ManagerAPI(object):
"""Extend this API for interacting with the common methods of managers"""
@ -30,7 +31,7 @@ class ManagerAPI(object):
self.context = context
def _cast(self, method_name, **kwargs):
if CONFIG.get("remote_implementation", "real") == "fake":
if CONF.remote_implementation == "fake":
self._fake_cast(method_name, **kwargs)
else:
self._real_cast(method_name, **kwargs)

View File

@ -17,7 +17,7 @@
"""Model classes that form the core of instances functionality."""
import logging
from reddwarf.openstack.common import log as logging
from reddwarf.common import remote

View File

@ -15,11 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
from reddwarf.common import config
from reddwarf.common import cfg
from novaclient.v1_1.client import Client
CONFIG = config.Config
CONF = cfg.CONF
def create_dns_client(context):
@ -33,9 +33,8 @@ def create_guest_client(context, id):
def create_nova_client(context):
COMPUTE_URL = CONFIG.get('nova_compute_url', 'http://localhost:8774/v2')
PROXY_AUTH_URL = CONFIG.get('reddwarf_auth_url',
'http://0.0.0.0:5000/v2.0')
COMPUTE_URL = CONF.nova_compute_url
PROXY_AUTH_URL = CONF.reddwarf_auth_url
client = Client(context.user, context.auth_tok, project_id=context.tenant,
auth_url=PROXY_AUTH_URL)
client.client.auth_token = context.auth_tok
@ -47,9 +46,8 @@ def create_nova_client(context):
def create_nova_volume_client(context):
# Quite annoying but due to a paste config loading bug.
# TODO(hub-cap): talk to the openstack-common people about this
VOLUME_URL = CONFIG.get('nova_volume_url', 'http://localhost:8776/v2')
PROXY_AUTH_URL = CONFIG.get('reddwarf_auth_url',
'http://0.0.0.0:5000/v2.0')
VOLUME_URL = CONF.nova_volume_url
PROXY_AUTH_URL = CONF.reddwarf_auth_url
client = Client(context.user, context.auth_tok,
project_id=context.tenant, auth_url=PROXY_AUTH_URL)
client.client.auth_token = context.auth_tok
@ -58,7 +56,7 @@ def create_nova_volume_client(context):
return client
if CONFIG.get("remote_implementation", "real") == "fake":
if CONF.remote_implementation == "fake":
# Override the functions above with fakes.
from reddwarf.tests.fakes.nova import fake_create_nova_client

49
reddwarf/common/rpc.py Normal file
View File

@ -0,0 +1,49 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RPC helper for launching a rpc service."""
import inspect
import os
from reddwarf.openstack.common import importutils
from reddwarf.openstack.common import loopingcall
from reddwarf.openstack.common.rpc import service as rpc_service
from reddwarf.common import cfg
CONF = cfg.CONF
class RpcService(rpc_service.Service):
def __init__(self, host=None, binary=None, topic=None, manager=None):
host = host or CONF.host
binary = binary or os.path.basename(inspect.stack()[-1][1])
topic = topic or binary.rpartition('reddwarf-')[2]
self.manager_impl = importutils.import_object(manager)
self.report_interval = CONF.report_interval
super(RpcService, self).__init__(host, topic,
manager=self.manager_impl)
def start(self):
super(RpcService, self).start()
# TODO(hub-cap): Currently the context is none... do we _need_ it here?
pulse = loopingcall.LoopingCall(self.manager_impl.run_periodic_tasks,
context=None)
pulse.start(interval=self.report_interval,
initial_delay=self.report_interval)
pulse.wait()

View File

@ -20,7 +20,6 @@
import functools
import inspect
import os
import logging
import socket
import traceback
import weakref
@ -29,13 +28,17 @@ import eventlet
import greenlet
from eventlet import greenthread
from reddwarf.common import config
from reddwarf.openstack.common import rpc
from reddwarf.common import utils
from reddwarf import version
from reddwarf.common import cfg
from reddwarf.common import utils
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import rpc
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
@ -160,17 +163,17 @@ class Service(object):
"""
if not host:
host = config.Config.get('host')
host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary.rpartition('reddwarf-')[2]
if not manager:
manager = config.Config.get('%s_manager' % topic, None)
manager = CONF._get('%s_manager' % topic)
if not report_interval:
report_interval = config.Config.get('report_interval', 10)
report_interval = CONF.report_interval
if not periodic_interval:
periodic_interval = config.Config.get('periodic_interval', 60)
periodic_interval = CONF.periodic_interval
service_obj = cls(host, binary, topic, manager, report_interval,
periodic_interval)

View File

@ -18,7 +18,6 @@
import datetime
import inspect
import logging
import re
import signal
import sys
@ -32,15 +31,21 @@ from eventlet import semaphore
from eventlet.green import subprocess
from eventlet.timeout import Timeout
from reddwarf.openstack.common import utils as openstack_utils
from reddwarf.common import exception
from reddwarf.openstack.common import importutils
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import processutils
from reddwarf.openstack.common import timeutils
from reddwarf.openstack.common import utils as openstack_utils
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)
import_class = openstack_utils.import_class
import_object = openstack_utils.import_object
import_class = importutils.import_class
import_object = importutils.import_object
import_module = importutils.import_module
bool_from_string = openstack_utils.bool_from_string
execute = openstack_utils.execute
isotime = openstack_utils.isotime
execute = processutils.execute
isotime = timeutils.isotime
def create_method_args_string(*args, **kwargs):

View File

@ -14,27 +14,32 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""I totally stole most of this from melange, thx guys!!!"""
"""Wsgi helper utilities for reddwarf"""
import eventlet.wsgi
import logging
import os
import paste.urlmap
import re
import traceback
import webob
import webob.dec
import webob.exc
from paste import deploy
from xml.dom import minidom
from reddwarf.common import context as rd_context
from reddwarf.common import config
from reddwarf.common import exception
from reddwarf.common import utils
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import pastedeploy
from reddwarf.openstack.common import service
from reddwarf.openstack.common import wsgi as openstack_wsgi
from reddwarf.openstack.common import log as logging
from reddwarf.common import cfg
CONTEXT_KEY = 'reddwarf.context'
Router = openstack_wsgi.Router
Server = openstack_wsgi.Server
Debug = openstack_wsgi.Debug
Middleware = openstack_wsgi.Middleware
JSONDictSerializer = openstack_wsgi.JSONDictSerializer
@ -46,6 +51,8 @@ eventlet.patcher.monkey_patch(all=False, socket=True)
LOG = logging.getLogger('reddwarf.common.wsgi')
CONF = cfg.CONF
XMLNS = 'http://docs.openstack.org/database/api/v1.0'
CUSTOM_PLURALS_METADATA = {'databases': '', 'users': ''}
CUSTOM_SERIALIZER_METADATA = {
@ -100,6 +107,28 @@ def versioned_urlmap(*args, **kwargs):
return VersionedURLMap(urlmap)
def launch(app_name, port, paste_config_file, data={},
host='0.0.0.0', backlog=128, threads=1000):
"""Launches a wsgi server based on the passed in paste_config_file.
Launch provides a easy way to create a paste app from the config
file and launch it via the service launcher. It takes care of
all of the plumbing. The only caveat is that the paste_config_file
must be a file that paste.deploy can find and handle. There is
a helper method in cfg.py that finds files.
Example:
conf_file = CONF.find_file(CONF.api_paste_config)
launcher = wsgi.launch('myapp', CONF.bind_port, conf_file)
launcher.wait()
"""
app = pastedeploy.paste_deploy_app(paste_config_file, app_name, data)
server = openstack_wsgi.Service(app, port, host=host,
backlog=backlog, threads=threads)
return service.launch(server)
class VersionedURLMap(object):
def __init__(self, urlmap):
@ -303,10 +332,8 @@ class Controller(object):
}
def __init__(self):
self.add_addresses = utils.bool_from_string(
config.Config.get('add_addresses', 'False'))
self.add_volumes = utils.bool_from_string(
config.Config.get('reddwarf_volume_support', 'False'))
self.add_addresses = CONF.add_addresses
self.add_volumes = CONF.reddwarf_volume_support
def create_resource(self):
serializer = ReddwarfResponseSerializer(
@ -505,7 +532,7 @@ class Fault(webob.exc.HTTPException):
class ContextMiddleware(openstack_wsgi.Middleware):
def __init__(self, application):
self.admin_roles = config.Config.get_list('admin_roles', [])
self.admin_roles = CONF.admin_roles
super(ContextMiddleware, self).__init__(application)
def _extract_limits(self, params):

View File

@ -18,15 +18,15 @@
import optparse
from reddwarf.common import utils
from reddwarf.common import config
from reddwarf.common import cfg
CONF = cfg.CONF
db_api_opt = config.Config.get("db_api_implementation",
"reddwarf.db.sqlalchemy.api")
db_api_opt = CONF.db_api_implementation
def get_db_api():
return utils.import_object(db_api_opt)
return utils.import_module(db_api_opt)
class Query(object):

View File

@ -12,15 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.db import get_db_api
from reddwarf.db import db_query
from reddwarf.common import exception
from reddwarf.common import models
from reddwarf.common import pagination
from reddwarf.common import utils
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)

View File

@ -17,7 +17,7 @@
"""Various conveniences used for migration scripts."""
import logging
from reddwarf.openstack.common import log as logging
import sqlalchemy.types

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.openstack.common import log as logging
import os
from migrate.versioning import api as versioning_api

View File

@ -16,12 +16,13 @@
# under the License.
import contextlib
import logging
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy.orm import sessionmaker
from reddwarf.common import config
from reddwarf.common import cfg
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
from reddwarf.db.sqlalchemy import mappers
_ENGINE = None
@ -30,9 +31,10 @@ _MAKER = None
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def configure_db(options, models_mapper=None):
configure_sqlalchemy_log(options)
global _ENGINE
if not _ENGINE:
_ENGINE = _create_engine(options)
@ -57,26 +59,10 @@ def configure_db(options, models_mapper=None):
mappers.map(_ENGINE, models)
def configure_sqlalchemy_log(options):
debug = config.get_option(options, 'debug', type='bool', default=False)
verbose = config.get_option(options, 'verbose', type='bool', default=False)
logger = logging.getLogger('sqlalchemy.engine')
if debug:
logger.setLevel(logging.DEBUG)
elif verbose:
logger.setLevel(logging.INFO)
def _create_engine(options):
engine_args = {
"pool_recycle": config.get_option(options,
'sql_idle_timeout',
type='int',
default=3600),
"echo": config.get_option(options,
'sql_query_log',
type='bool',
default=False),
"pool_recycle": CONF.sql_idle_timeout,
"echo": CONF.sql_query_log
}
LOG.info(_("Creating SQLAlchemy engine with args: %s") % engine_args)
return create_engine(options['sql_connection'], **engine_args)

View File

@ -18,13 +18,15 @@
"""
Dns manager.
"""
import logging
from reddwarf.openstack.common import log as logging
from reddwarf.common import utils
from reddwarf.common import config
from reddwarf.common import cfg
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class DnsManager(object):
"""Handles associating DNS to and from IPs."""
@ -32,16 +34,12 @@ class DnsManager(object):
def __init__(self, dns_driver=None, dns_instance_entry_factory=None,
*args, **kwargs):
if not dns_driver:
dns_driver = config.Config.get(
"dns_driver",
"reddwarf.dns.driver.DnsDriver")
dns_driver = CONF.dns_driver
dns_driver = utils.import_object(dns_driver)
self.driver = dns_driver()
if not dns_instance_entry_factory:
dns_instance_entry_factory = config.Config.get(
'dns_instance_entry_factory',
'reddwarf.dns.driver.DnsInstanceEntryFactory')
dns_instance_entry_factory = CONF.dns_instance_entry_factory
entry_factory = utils.import_object(dns_instance_entry_factory)
self.entry_factory = entry_factory()

View File

@ -19,11 +19,12 @@
Model classes that map instance Ip to dns record.
"""
import logging
from reddwarf.db import get_db_api
from reddwarf.common import exception
from reddwarf.common.models import ModelBase
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)

View File

@ -23,8 +23,8 @@ __version__ = '2.4'
import hashlib
import logging
from reddwarf.common import config
from reddwarf.openstack.common import log as logging
from reddwarf.common import cfg
from reddwarf.common import exception
from reddwarf.common.exception import NotFound
from reddwarf.dns.models import DnsRecord
@ -33,15 +33,17 @@ from rsdns.client.future import RsDnsError
from reddwarf.dns.driver import DnsEntry
DNS_HOSTNAME = config.Config.get("dns_hostname", "")
DNS_ACCOUNT_ID = config.Config.get("dns_account_id", 0)
DNS_AUTH_URL = config.Config.get("dns_auth_url", "")
DNS_DOMAIN_NAME = config.Config.get("dns_domain_name", "")
DNS_USERNAME = config.Config.get("dns_username", "")
DNS_PASSKEY = config.Config.get("dns_passkey", "")
DNS_MANAGEMENT_BASE_URL = config.Config.get("dns_management_base_url", "")
DNS_TTL = config.Config.get("dns_ttl", 300)
DNS_DOMAIN_ID = config.Config.get("dns_domain_id", 1)
CONF = cfg.CONF
DNS_HOSTNAME = CONF.dns_hostname
DNS_ACCOUNT_ID = CONF.dns_account_id
DNS_AUTH_URL = CONF.dns_auth_url
DNS_DOMAIN_NAME = CONF.dns_domain_name
DNS_USERNAME = CONF.dns_username
DNS_PASSKEY = CONF.dns_passkey
DNS_MANAGEMENT_BASE_URL = CONF.dns_management_base_url
DNS_TTL = CONF.dns_ttl
DNS_DOMAIN_ID = CONF.dns_domain_id
LOG = logging.getLogger(__name__)

View File

@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.openstack.common import log as logging
from reddwarf.common import extensions
from reddwarf.common import wsgi

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.openstack.common import log as logging
from reddwarf.common.remote import create_nova_client
from reddwarf.instance.models import DBInstance

View File

@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.openstack.common import log as logging
from novaclient import exceptions as nova_exceptions
@ -25,7 +25,7 @@ from reddwarf.common.remote import create_nova_client
from reddwarf.extensions.account import models
from reddwarf.extensions.account import views
from reddwarf.instance.models import DBInstance
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.openstack.common import log as logging
from reddwarf.common import extensions
from reddwarf.common import wsgi

View File

@ -15,11 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.common import exception
from reddwarf.common import wsgi
from reddwarf.extensions.mgmt.host import models
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)

View File

@ -19,11 +19,10 @@
Model classes that extend the instances functionality for MySQL instances.
"""
import logging
from reddwarf.openstack.common import log as logging
from reddwarf import db
from reddwarf.common import config
from reddwarf.common import exception
from reddwarf.common import utils
from reddwarf.instance.models import DBInstance
@ -35,7 +34,6 @@ from reddwarf.common.remote import create_nova_client
from novaclient import exceptions as nova_exceptions
CONFIG = config.Config
LOG = logging.getLogger(__name__)

View File

@ -15,7 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import webob.exc
from reddwarf.common import exception
@ -25,6 +24,8 @@ from reddwarf.extensions.mgmt.host import models
from reddwarf.extensions.mgmt.host import views
from reddwarf.extensions.mysql import models as mysql_models
from reddwarf.instance.service import InstanceController
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)

View File

@ -12,9 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.openstack.common import log as logging
from reddwarf.common import config
from reddwarf.common.remote import create_nova_client
from reddwarf.common.remote import create_nova_volume_client
from reddwarf.instance import models as imodels
@ -23,7 +22,6 @@ from reddwarf.instance import models as instance_models
from reddwarf.extensions.mysql import models as mysql_models
CONFIG = config.Config
LOG = logging.getLogger(__name__)

View File

@ -15,22 +15,23 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import webob.exc
from novaclient import exceptions as nova_exceptions
from reddwarf.common import exception
from reddwarf.common import wsgi
from reddwarf.extensions.mgmt.instances import models
from reddwarf.extensions.mgmt.instances.views import DiagnosticsView
from reddwarf.extensions.mgmt.instances.views import HwInfoView
from reddwarf.instance import models as instance_models
from reddwarf.extensions.mgmt.instances import views
from reddwarf.extensions.mysql import models as mysql_models
from reddwarf.instance.service import InstanceController
from reddwarf.common.auth import admin_context
from reddwarf.common.remote import create_nova_client
from reddwarf.instance import models as instance_models
from reddwarf.extensions.mgmt.instances import models
from reddwarf.extensions.mgmt.instances import views
from reddwarf.extensions.mgmt.instances.views import DiagnosticsView
from reddwarf.extensions.mgmt.instances.views import HwInfoView
from reddwarf.extensions.mysql import models as mysql_models
from reddwarf.instance.service import InstanceController
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)

View File

@ -19,7 +19,7 @@
Model classes that extend the instances functionality for volumes.
"""
import logging
from reddwarf.openstack.common import log as logging
from reddwarf.common.remote import create_nova_volume_client

View File

@ -15,14 +15,15 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import webob.exc
from reddwarf.common.auth import admin_context
from reddwarf.common import exception
from reddwarf.common import wsgi
from reddwarf.common.auth import admin_context
from reddwarf.extensions.mgmt.volume import models
from reddwarf.extensions.mgmt.volume import views
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.openstack.common import log as logging
from reddwarf.common import extensions
from reddwarf.common import wsgi

View File

@ -19,17 +19,17 @@
Model classes that extend the instances functionality for MySQL instances.
"""
import logging
from reddwarf.common import config
from reddwarf.common import cfg
from reddwarf.common import exception
from reddwarf.common import utils
from reddwarf.db import get_db_api
from reddwarf.instance import models as base_models
from reddwarf.guestagent.db import models as guest_models
from reddwarf.common.remote import create_guest_client
from reddwarf.db import get_db_api
from reddwarf.guestagent.db import models as guest_models
from reddwarf.instance import models as base_models
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
CONFIG = config.Config
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -150,7 +150,7 @@ def load_via_context(cls, context, instance_id):
class Users(object):
DEFAULT_LIMIT = int(CONFIG.get('users_page_size', '20'))
DEFAULT_LIMIT = CONF.users_page_size
@classmethod
def load(cls, context, instance_id):
@ -163,7 +163,7 @@ class Users(object):
marker=marker,
include_marker=include_marker)
model_users = []
ignore_users = CONFIG.get_list('ignore_users', [])
ignore_users = CONF.ignore_users
for user in user_list:
mysql_user = guest_models.MySQLUser()
mysql_user.deserialize(user)
@ -213,7 +213,7 @@ class Schema(object):
class Schemas(object):
DEFAULT_LIMIT = int(CONFIG.get('databases_page_size', '20'))
DEFAULT_LIMIT = CONF.databases_page_size
@classmethod
def load(cls, context, instance_id):
@ -226,7 +226,7 @@ class Schemas(object):
marker=marker,
include_marker=include_marker)
model_schemas = []
ignore_dbs = CONFIG.get_list('ignore_dbs', [])
ignore_dbs = CONF.ignore_dbs
for schema in schemas:
mysql_schema = guest_models.MySQLDatabase()
mysql_schema.deserialize(schema)

View File

@ -15,17 +15,18 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import webob.exc
from reddwarf.common import exception
from reddwarf.common import pagination
from reddwarf.common import wsgi
from reddwarf.guestagent.db import models as guest_models
from reddwarf.extensions.mysql.common import populate_databases
from reddwarf.extensions.mysql.common import populate_users
from reddwarf.extensions.mysql import models
from reddwarf.extensions.mysql import views
from reddwarf.guestagent.db import models as guest_models
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)
@ -42,7 +43,7 @@ class RootController(wsgi.Controller):
is_root_enabled = models.Root.load(context, instance_id)
return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200)
def create(self, req, body, tenant_id, instance_id):
def create(self, req, tenant_id, instance_id):
""" Enable the root user for the db instance """
LOG.info(_("Enabling root for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)

View File

@ -19,36 +19,40 @@
Handles all request to the Platform or Guest VM
"""
import logging
from eventlet import Timeout
from reddwarf.openstack.common import rpc
from reddwarf.common import config
from reddwarf.common import cfg
from reddwarf.common import exception
from reddwarf.common import utils
from reddwarf.guestagent import models as agent_models
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import rpc
from reddwarf.openstack.common.rpc import proxy
from reddwarf.openstack.common.gettextutils import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
AGENT_LOW_TIMEOUT = int(config.Config.get('agent_call_low_timeout', 5))
AGENT_HIGH_TIMEOUT = int(config.Config.get('agent_call_high_timeout', 60))
AGENT_LOW_TIMEOUT = CONF.agent_call_low_timeout
AGENT_HIGH_TIMEOUT = CONF.agent_call_high_timeout
RPC_API_VERSION = "1.0"
class API(object):
class API(proxy.RpcProxy):
"""API for interacting with the guest manager."""
def __init__(self, context, id):
self.context = context
self.id = id
super(API, self).__init__(self._get_routing_key(),
RPC_API_VERSION)
def _call(self, method_name, timeout_sec, **kwargs):
LOG.debug("Calling %s" % method_name)
timeout = Timeout(timeout_sec)
try:
result = rpc.call(self.context, self._get_routing_key(),
{'method': method_name, 'args': kwargs})
result = self.call(self.context,
self.make_msg(method_name, **kwargs),
timeout=timeout_sec)
LOG.debug("Result is %s" % result)
return result
except Exception as e:
@ -59,24 +63,30 @@ class API(object):
raise
else:
raise exception.GuestTimeout()
finally:
timeout.cancel()
def _cast(self, method_name, **kwargs):
LOG.debug("Casting %s" % method_name)
try:
rpc.cast(self.context, self._get_routing_key(),
{'method': method_name, 'args': kwargs})
self.cast(self.context, self.make_msg(method_name, **kwargs),
topic=kwargs.get('topic'),
version=kwargs.get('version'))
except Exception as e:
LOG.error(e)
raise exception.GuestError(original_message=str(e))
def _cast_with_consumer(self, method_name, **kwargs):
try:
rpc.cast_with_consumer(self.context, self._get_routing_key(),
{'method': method_name, 'args': kwargs})
conn = rpc.create_connection(new=True)
conn.create_consumer(self._get_routing_key(), None, fanout=False)
except Exception as e:
LOG.error(e)
raise exception.GuestError(original_message=str(e))
finally:
if conn:
conn.close()
# leave the cast call out of the hackity consumer create
self._cast(method_name, **kwargs)
def delete_queue(self):
"""Deletes the queue."""
@ -164,9 +174,8 @@ class API(object):
as a database container"""
LOG.debug(_("Sending the call to prepare the Guest"))
self._cast_with_consumer(
"prepare", databases=databases,
memory_mb=memory_mb, users=users, device_path=device_path,
mount_point=mount_point)
"prepare", databases=databases, memory_mb=memory_mb,
users=users, device_path=device_path, mount_point=mount_point)
def restart(self):
"""Restart the MySQL server."""

View File

@ -18,7 +18,9 @@
import re
import string
from reddwarf.common import config
from reddwarf.common import cfg
CONF = cfg.CONF
class Base(object):
@ -32,7 +34,7 @@ class Base(object):
class MySQLDatabase(Base):
"""Represents a Database and its properties"""
_ignore_dbs = config.Config.get_list("ignore_dbs", [])
_ignore_dbs = CONF.ignore_dbs
# Defaults
__charset__ = "utf8"
@ -343,7 +345,7 @@ class MySQLUser(Base):
"""Represents a MySQL User and its associated properties"""
not_supported_chars = re.compile("^\s|\s$|'|\"|;|`|,|/|\\\\")
_ignore_users = config.Config.get_list("ignore_users", [])
_ignore_users = CONF.ignore_users
def __init__(self):
self._name = None

View File

@ -25,8 +25,6 @@ handles RPC calls relating to Platform specific operations.
"""
import logging
import os
import pexpect
import re
@ -43,12 +41,15 @@ from sqlalchemy.sql.expression import text
from reddwarf import db
from reddwarf.common.exception import GuestError
from reddwarf.common.exception import ProcessExecutionError
from reddwarf.common import config
from reddwarf.common import cfg
from reddwarf.common import utils
from reddwarf.guestagent.db import models
from reddwarf.guestagent.volume import VolumeDevice
from reddwarf.guestagent.query import Query
from reddwarf.guestagent import pkg
from reddwarf.instance import models as rd_models
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
ADMIN_USER_NAME = "os_admin"
@ -66,7 +67,7 @@ TMP_MYCNF = "/tmp/my.cnf.tmp"
DBAAS_MYCNF = "/etc/dbaas/my.cnf/my.cnf.%dM"
MYSQL_BASE_DIR = "/var/lib/mysql"
CONFIG = config.Config
CONF = cfg.CONF
INCLUDE_MARKER_OPERATORS = {
True: ">=",
False: ">"
@ -229,7 +230,7 @@ class MySqlAppStatus(object):
@staticmethod
def _load_status():
"""Loads the status from the database."""
id = config.Config.get('guest_id')
id = CONF.guest_id
return rd_models.InstanceServiceStatus.find_by(instance_id=id)
def set_status(self, status):
@ -497,90 +498,6 @@ class MySqlAdmin(object):
return users, next_marker
class DBaaSAgent(object):
""" Database as a Service Agent Controller """
def __init__(self):
self.status = MySqlAppStatus.get()
def begin_mysql_restart(self):
self.restart_mode = True
def create_database(self, databases):
return MySqlAdmin().create_database(databases)
def create_user(self, users):
MySqlAdmin().create_user(users)
def delete_database(self, database):
return MySqlAdmin().delete_database(database)
def delete_user(self, user):
MySqlAdmin().delete_user(user)
def list_databases(self, limit=None, marker=None, include_marker=False):
return MySqlAdmin().list_databases(limit, marker, include_marker)
def list_users(self, limit=None, marker=None, include_marker=False):
return MySqlAdmin().list_users(limit, marker, include_marker)
def enable_root(self):
return MySqlAdmin().enable_root()
def is_root_enabled(self):
return MySqlAdmin().is_root_enabled()
def prepare(self, databases, memory_mb, users, device_path=None,
mount_point=None):
"""Makes ready DBAAS on a Guest container."""
from reddwarf.guestagent.pkg import PkgAgent
if not isinstance(self, PkgAgent):
raise TypeError("This must also be an instance of Pkg agent.")
pkg = self # Python cast.
self.status.begin_mysql_install()
# status end_mysql_install set with install_and_secure()
app = MySqlApp(self.status)
restart_mysql = False
if device_path:
device = VolumeDevice(device_path)
device.format()
if app.is_installed(pkg):
#stop and do not update database
app.stop_mysql()
restart_mysql = True
#rsync exiting data
device.migrate_data(MYSQL_BASE_DIR)
#mount the volume
device.mount(mount_point)
LOG.debug(_("Mounted the volume."))
#check mysql was installed and stopped
if restart_mysql:
app.start_mysql()
app.install_and_secure(pkg, memory_mb)
LOG.info("Creating initial databases and users following successful "
"prepare.")
self.create_database(databases)
self.create_user(users)
LOG.info('"prepare" call has finished.')
def restart(self):
app = MySqlApp(self.status)
app.restart()
def start_mysql_with_conf_changes(self, updated_memory_size):
app = MySqlApp(self.status)
pkg = self # Python cast.
app.start_mysql_with_conf_changes(pkg, updated_memory_size)
def stop_mysql(self):
app = MySqlApp(self.status)
app.stop_mysql()
def update_status(self):
"""Update the status of the MySQL service"""
MySqlAppStatus.get().update()
class KeepAliveConnection(interfaces.PoolListener):
"""
A connection pool listener that ensures live connections are returned
@ -610,8 +527,7 @@ class MySqlApp(object):
def __init__(self, status):
""" By default login with root no password for initial setup. """
self.state_change_wait_time = int(config.Config.get(
'state_change_wait_time', 2 * 60))
self.state_change_wait_time = CONF.state_change_wait_time
self.status = status
def _create_admin_user(self, client, password):
@ -639,13 +555,13 @@ class MySqlApp(object):
WHERE User='root';""")
client.execute(t, pwd=generate_random_password())
def install_and_secure(self, pkg, memory_mb):
def install_and_secure(self, memory_mb):
"""Prepare the guest machine with a secure mysql server installation"""
LOG.info(_("Preparing Guest as MySQL Server"))
#TODO(tim.simpson): Check that MySQL is not already installed.
self.status.begin_mysql_install()
self._install_mysql(pkg)
self._install_mysql()
LOG.info(_("Generating root password..."))
admin_password = generate_random_password()
@ -658,13 +574,13 @@ class MySqlApp(object):
self._create_admin_user(client, admin_password)
self.stop_mysql()
self._write_mycnf(pkg, memory_mb, admin_password)
self._write_mycnf(memory_mb, admin_password)
self.start_mysql()
self.status.end_install_or_restart()
LOG.info(_("Dbaas install_and_secure complete."))
def _install_mysql(self, pkg):
def _install_mysql(self):
"""Install mysql server. The current version is 5.1"""
LOG.debug(_("Installing mysql server"))
pkg.pkg_install(self.MYSQL_PACKAGE_VERSION, self.TIME_OUT)
@ -749,7 +665,7 @@ class MySqlApp(object):
if "No such file or directory" not in str(pe):
raise
def _write_mycnf(self, pkg, update_memory_mb, admin_password):
def _write_mycnf(self, update_memory_mb, admin_password):
"""
Install the set of mysql my.cnf templates from dbaas-mycnf package.
The package generates a template suited for the current
@ -812,17 +728,17 @@ class MySqlApp(object):
self.status.end_install_or_restart()
raise RuntimeError("Could not start MySQL!")
def start_mysql_with_conf_changes(self, pkg, updated_memory_mb):
def start_mysql_with_conf_changes(self, updated_memory_mb):
LOG.info(_("Starting mysql with conf changes..."))
if self.status.is_mysql_running:
LOG.error(_("Cannot execute start_mysql_with_conf_changes because "
"MySQL state == %s!") % self.status)
raise RuntimeError("MySQL not stopped.")
LOG.info(_("Initiating config."))
self._write_mycnf(pkg, updated_memory_mb, None)
self._write_mycnf(updated_memory_mb, None)
self.start_mysql(True)
def is_installed(self, pkg):
def is_installed(self):
#(cp16net) could raise an exception, does it need to be handled here?
version = pkg.pkg_version(self.MYSQL_PACKAGE_VERSION)
return not version is None

View File

@ -1,119 +1,83 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all processes within the Guest VM, considering it as a Platform
The :py:class:`GuestManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to Platform specific operations.
"""
import functools
import logging
import traceback
from reddwarf.common import config
from reddwarf.common import exception
from reddwarf.common import utils
from reddwarf.common import service
from reddwarf.guestagent import dbaas
from reddwarf.guestagent import volume
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import periodic_task
LOG = logging.getLogger(__name__)
CONFIG = config.Config
GUEST_SERVICES = {'mysql': 'reddwarf.guestagent.dbaas.DBaaSAgent'}
class GuestManager(service.Manager):
class Manager(periodic_task.PeriodicTasks):
"""Manages the tasks within a Guest VM."""
RPC_API_VERSION = "1.0"
@periodic_task.periodic_task(ticks_between_runs=10)
def update_status(self, context):
"""Update the status of the MySQL service"""
dbaas.MySqlAppStatus.get().update()
def __init__(self, guest_drivers=None, *args, **kwargs):
service_type = CONFIG.get('service_type')
try:
service_impl = GUEST_SERVICES[service_type]
except KeyError as e:
LOG.error(_("Could not create guest, no impl for key - %s") %
service_type)
raise e
LOG.info("Create guest driver %s" % service_impl)
self.create_guest_driver(service_impl)
super(GuestManager, self).__init__(*args, **kwargs)
def create_database(self, context, databases):
return dbaas.MySqlAdmin().create_database(databases)
def create_guest_driver(self, service_impl):
guest_drivers = [service_impl,
'reddwarf.guestagent.pkg.PkgAgent']
classes = []
for guest_driver in guest_drivers:
LOG.info(guest_driver)
driver = utils.import_class(guest_driver)
classes.append(driver)
try:
cls = type("GuestDriver", tuple(set(classes)), {})
self.driver = cls()
except TypeError as te:
msg = "An issue occurred instantiating the GuestDriver as the " \
"following classes: " + str(classes) + \
" Exception=" + str(te)
raise TypeError(msg)
def create_user(self, context, users):
dbaas.MySqlAdmin().create_user(users)
def init_host(self):
"""Method for any service initialization"""
pass
def delete_database(self, context, database):
return dbaas.MySqlAdmin().delete_database(database)
def periodic_tasks(self, raise_on_error=False):
"""Method for running any periodic tasks.
def delete_user(self, context, user):
dbaas.MySqlAdmin().delete_user(user)
Right now does the status updates"""
status_method = "update_status"
try:
method = getattr(self.driver, status_method)
except AttributeError as ae:
LOG.error(_("Method %s not found for driver %s"), status_method,
self.driver)
if raise_on_error:
raise ae
try:
method()
except Exception as e:
LOG.error("Got an error during periodic tasks!")
LOG.debug(traceback.format_exc())
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
return dbaas.MySqlAdmin().list_databases(limit, marker,
include_marker)
def upgrade(self, context):
"""Upgrade the guest agent and restart the agent"""
LOG.debug(_("Self upgrade of guest agent issued"))
def list_users(self, context, limit=None, marker=None,
include_marker=False):
return dbaas.MySqlAdmin().list_users(limit, marker,
include_marker)
def __getattr__(self, key):
"""Converts all method calls and direct it at the driver"""
return functools.partial(self._mapper, key)
def enable_root(self, context):
return dbaas.MySqlAdmin().enable_root()
def _mapper(self, method, context, *args, **kwargs):
""" Tries to call the respective driver method """
try:
func = getattr(self.driver, method)
except AttributeError:
LOG.error(_("Method %s not found for driver %s"), method,
self.driver)
raise exception.NotFound("Method %s is not available for the "
"chosen driver.")
try:
return func(*args, **kwargs)
except Exception as e:
LOG.error("Got an error running %s!" % method)
LOG.debug(traceback.format_exc())
def is_root_enabled(self, ontext):
return dbaas.MySqlAdmin().is_root_enabled()
def prepare(self, context, databases, memory_mb, users, device_path=None,
mount_point=None):
"""Makes ready DBAAS on a Guest container."""
dbaas.MySqlAppStatus.get().begin_mysql_install()
# status end_mysql_install set with install_and_secure()
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
restart_mysql = False
if device_path:
device = volume.VolumeDevice(device_path)
device.format()
if app.is_installed():
#stop and do not update database
app.stop_mysql()
restart_mysql = True
#rsync exiting data
device.migrate_data(MYSQL_BASE_DIR)
#mount the volume
device.mount(mount_point)
LOG.debug(_("Mounted the volume."))
#check mysql was installed and stopped
if restart_mysql:
app.start_mysql()
app.install_and_secure(memory_mb)
LOG.info("Creating initial databases and users following successful "
"prepare.")
self.create_database(context, databases)
self.create_user(context, users)
LOG.info('"prepare" call has finished.')
def restart(self, context):
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
app.restart()
def start_mysql_with_conf_changes(self, context, updated_memory_size):
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
app.start_mysql_with_conf_changes(updated_memory_size)
def stop_mysql(self, context):
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
app.stop_mysql()

View File

@ -12,20 +12,23 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from datetime import datetime
from datetime import timedelta
from reddwarf.db import get_db_api
from reddwarf.common import config
from reddwarf.common import cfg
from reddwarf.common import exception
from reddwarf.common import utils
from reddwarf.db import get_db_api
from reddwarf.db import models as dbmodels
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)
AGENT_HEARTBEAT = int(config.Config.get('agent_heartbeat_time', '10'))
CONF = cfg.CONF
AGENT_HEARTBEAT = CONF.agent_heartbeat_time
def persisted_models():

View File

@ -19,13 +19,14 @@
Manages packages on the Guest VM.
"""
import commands
import logging
import pexpect
import re
from reddwarf.common import exception
from reddwarf.common.exception import ProcessExecutionError
from reddwarf.common import utils
from reddwarf.common.exception import ProcessExecutionError
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)
@ -67,160 +68,162 @@ def wait_and_close_proc(child, time_out=-1):
child.close()
class PkgAgent(object):
""" Agent Controller which can maintain package installs on a guest."""
def _fix(time_out):
"""Sometimes you have to run this command before a pkg will install."""
#sudo dpkg --configure -a
child = pexpect.spawn("sudo -E dpkg --configure -a")
wait_and_close_proc(child, time_out)
def _fix(self, time_out):
"""Sometimes you have to run this command before a pkg will install."""
#sudo dpkg --configure -a
child = pexpect.spawn("sudo -E dpkg --configure -a")
wait_and_close_proc(child, time_out)
def _install(self, package_name, time_out):
"""Attempts to install a package.
def _install(package_name, time_out):
"""Attempts to install a package.
Returns OK if the package installs fine or a result code if a
recoverable-error occurred.
Raises an exception if a non-recoverable error or time out occurs.
Returns OK if the package installs fine or a result code if a
recoverable-error occurred.
Raises an exception if a non-recoverable error or time out occurs.
"""
child = pexpect.spawn("sudo -E DEBIAN_FRONTEND=noninteractive "
"apt-get -y --allow-unauthenticated install %s"
% package_name)
try:
i = child.expect(['.*password*',
'E: Unable to locate package %s' % package_name,
"Couldn't find package % s" % package_name,
("dpkg was interrupted, you must manually run "
"'sudo dpkg --configure -a'"),
"Unable to lock the administration directory",
"Setting up %s*" % package_name,
"is already the newest version"],
timeout=time_out)
if i == 0:
raise PkgPermissionError("Invalid permissions.")
elif i == 1 or i == 2:
raise PkgNotFoundError("Could not find apt %s" % package_name)
elif i == 3:
return RUN_DPKG_FIRST
elif i == 4:
raise PkgAdminLockError()
except pexpect.TIMEOUT:
kill_proc(child)
raise PkgTimeout("Process timeout after %i seconds." % time_out)
try:
wait_and_close_proc(child)
except pexpect.TIMEOUT as e:
LOG.error("wait_and_close_proc failed: %s" % e)
#TODO(tim.simpson): As of RDL, and on my machine exclusively (in
# both Virtual Box and VmWare!) this fails, but
# the package is installed.
return OK
"""
child = pexpect.spawn("sudo -E DEBIAN_FRONTEND=noninteractive "
"apt-get -y --allow-unauthenticated install %s"
% package_name)
try:
i = child.expect(['.*password*',
'E: Unable to locate package %s' % package_name,
"Couldn't find package % s" % package_name,
("dpkg was interrupted, you must manually run "
"'sudo dpkg --configure -a'"),
"Unable to lock the administration directory",
"Setting up %s*" % package_name,
"is already the newest version"],
timeout=time_out)
if i == 0:
raise PkgPermissionError("Invalid permissions.")
elif i == 1 or i == 2:
raise PkgNotFoundError("Could not find apt %s" % package_name)
elif i == 3:
return RUN_DPKG_FIRST
elif i == 4:
raise PkgAdminLockError()
except pexpect.TIMEOUT:
kill_proc(child)
raise PkgTimeout("Process timeout after %i seconds." % time_out)
try:
wait_and_close_proc(child)
except pexpect.TIMEOUT as e:
LOG.error("wait_and_close_proc failed: %s" % e)
#TODO(tim.simpson): As of RDL, and on my machine exclusively (in
# both Virtual Box and VmWare!) this fails, but
# the package is installed.
return OK
def _remove(self, package_name, time_out):
"""Removes a package.
Returns OK if the package is removed successfully or a result code if a
recoverable-error occurs.
Raises an exception if a non-recoverable error or time out occurs.
def _remove(package_name, time_out):
"""Removes a package.
"""
child = pexpect.spawn("sudo -E apt-get -y --allow-unauthenticated "
"remove %s" % package_name)
try:
i = child.expect(['.*password*',
'E: Unable to locate package %s' % package_name,
'Package is in a very bad inconsistent state',
("Sub-process /usr/bin/dpkg returned an error "
"code"),
("dpkg was interrupted, you must manually run "
"'sudo dpkg --configure -a'"),
"Unable to lock the administration directory",
#'The following packages will be REMOVED',
"Removing %s*" % package_name],
timeout=time_out)
if i == 0:
raise PkgPermissionError("Invalid permissions.")
elif i == 1:
raise PkgNotFoundError("Could not find pkg %s" % package_name)
elif i == 2 or i == 3:
return REINSTALL_FIRST
elif i == 4:
return RUN_DPKG_FIRST
elif i == 5:
raise PkgAdminLockError()
wait_and_close_proc(child)
except pexpect.TIMEOUT:
kill_proc(child)
raise PkgTimeout("Process timeout after %i seconds." % time_out)
return OK
Returns OK if the package is removed successfully or a result code if a
recoverable-error occurs.
Raises an exception if a non-recoverable error or time out occurs.
def pkg_install(self, package_name, time_out):
"""Installs a package."""
try:
utils.execute("apt-get", "update", run_as_root=True,
root_helper="sudo")
except ProcessExecutionError as e:
LOG.error(_("Error updating the apt sources"))
"""
child = pexpect.spawn("sudo -E apt-get -y --allow-unauthenticated "
"remove %s" % package_name)
try:
i = child.expect(['.*password*',
'E: Unable to locate package %s' % package_name,
'Package is in a very bad inconsistent state',
("Sub-process /usr/bin/dpkg returned an error "
"code"),
("dpkg was interrupted, you must manually run "
"'sudo dpkg --configure -a'"),
"Unable to lock the administration directory",
#'The following packages will be REMOVED',
"Removing %s*" % package_name],
timeout=time_out)
if i == 0:
raise PkgPermissionError("Invalid permissions.")
elif i == 1:
raise PkgNotFoundError("Could not find pkg %s" % package_name)
elif i == 2 or i == 3:
return REINSTALL_FIRST
elif i == 4:
return RUN_DPKG_FIRST
elif i == 5:
raise PkgAdminLockError()
wait_and_close_proc(child)
except pexpect.TIMEOUT:
kill_proc(child)
raise PkgTimeout("Process timeout after %i seconds." % time_out)
return OK
result = self._install(package_name, time_out)
def pkg_install(package_name, time_out):
"""Installs a package."""
try:
utils.execute("apt-get", "update", run_as_root=True,
root_helper="sudo")
except ProcessExecutionError as e:
LOG.error(_("Error updating the apt sources"))
result = _install(package_name, time_out)
if result != OK:
if result == RUN_DPKG_FIRST:
_fix(time_out)
result = _install(package_name, time_out)
if result != OK:
if result == RUN_DPKG_FIRST:
self._fix(time_out)
result = self._install(package_name, time_out)
if result != OK:
raise PkgPackageStateError("Package %s is in a bad state."
% package_name)
raise PkgPackageStateError("Package %s is in a bad state."
% package_name)
def pkg_version(self, package_name):
cmd_list = ["dpkg", "-l", package_name]
p = commands.getstatusoutput(' '.join(cmd_list))
# check the command status code
if not p[0] == 0:
return None
# Need to capture the version string
# check the command output
std_out = p[1]
patterns = ['.*No packages found matching.*',
"\w\w\s+(\S+)\s+(\S+)\s+(.*)$"]
for line in std_out.split("\n"):
for p in patterns:
regex = re.compile(p)
matches = regex.match(line)
if matches:
line = matches.group()
parts = line.split()
if not parts:
msg = _("returned nothing")
LOG.error(msg)
raise exception.GuestError(msg)
if len(parts) <= 2:
msg = _("Unexpected output.")
LOG.error(msg)
raise exception.GuestError(msg)
if parts[1] != package_name:
msg = _("Unexpected output:[1] = %s" % str(parts[1]))
LOG.error(msg)
raise exception.GuestError(msg)
if parts[0] == 'un' or parts[2] == '<none>':
return None
return parts[2]
msg = _("version() saw unexpected output from dpkg!")
LOG.error(msg)
raise exception.GuestError(msg)
def pkg_remove(self, package_name, time_out):
"""Removes a package."""
if self.pkg_version(package_name) is None:
return
result = self._remove(package_name, time_out)
def pkg_version(package_name):
cmd_list = ["dpkg", "-l", package_name]
p = commands.getstatusoutput(' '.join(cmd_list))
# check the command status code
if not p[0] == 0:
return None
# Need to capture the version string
# check the command output
std_out = p[1]
patterns = ['.*No packages found matching.*',
"\w\w\s+(\S+)\s+(\S+)\s+(.*)$"]
for line in std_out.split("\n"):
for p in patterns:
regex = re.compile(p)
matches = regex.match(line)
if matches:
line = matches.group()
parts = line.split()
if not parts:
msg = _("returned nothing")
LOG.error(msg)
raise exception.GuestError(msg)
if len(parts) <= 2:
msg = _("Unexpected output.")
LOG.error(msg)
raise exception.GuestError(msg)
if parts[1] != package_name:
msg = _("Unexpected output:[1] = %s" % str(parts[1]))
LOG.error(msg)
raise exception.GuestError(msg)
if parts[0] == 'un' or parts[2] == '<none>':
return None
return parts[2]
msg = _("version() saw unexpected output from dpkg!")
LOG.error(msg)
raise exception.GuestError(msg)
def pkg_remove(package_name, time_out):
"""Removes a package."""
if pkg_version(package_name) is None:
return
result = _remove(package_name, time_out)
if result != OK:
if result == REINSTALL_FIRST:
_install(package_name, time_out)
elif result == RUN_DPKG_FIRST:
_fix(time_out)
result = _remove(package_name, time_out)
if result != OK:
if result == REINSTALL_FIRST:
self._install(package_name, time_out)
elif result == RUN_DPKG_FIRST:
self._fix(time_out)
result = self._remove(package_name, time_out)
if result != OK:
raise PkgPackageStateError("Package %s is in a bad state."
% package_name)
raise PkgPackageStateError("Package %s is in a bad state."
% package_name)

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.openstack.common import log as logging
import routes
import webob.exc

View File

@ -15,11 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.openstack.common import log as logging
import os
import pexpect
from reddwarf.common import config
from reddwarf.common import cfg
from reddwarf.common import utils
from reddwarf.common.exception import GuestError
from reddwarf.common.exception import ProcessExecutionError
@ -27,7 +27,7 @@ from reddwarf.common.exception import ProcessExecutionError
TMP_MOUNT_POINT = "/mnt/volume"
LOG = logging.getLogger(__name__)
CONFIG = config.Config
CONF = cfg.CONF
class VolumeDevice(object):
@ -55,7 +55,7 @@ class VolumeDevice(object):
num_tries to account for the time lag.
"""
try:
num_tries = CONFIG.get('num_tries', 3)
num_tries = CONF.num_tries
utils.execute('sudo', 'blockdev', '--getsize64', self.device_path,
attempts=num_tries)
except ProcessExecutionError:
@ -68,7 +68,7 @@ class VolumeDevice(object):
i = child.expect(['has_journal', 'Wrong magic number'])
if i == 0:
return
volume_fstype = CONFIG.get('volume_fstype', 'ext3')
volume_fstype = CONF.volume_fstype
raise IOError('Device path at %s did not seem to be %s.' %
(self.device_path, volume_fstype))
except pexpect.EOF:
@ -77,11 +77,11 @@ class VolumeDevice(object):
def _format(self):
"""Calls mkfs to format the device at device_path."""
volume_fstype = CONFIG.get('volume_fstype', 'ext3')
format_options = CONFIG.get('format_options', '-m 5')
volume_fstype = CONF.volume_fstype
format_options = CONF.format_options
cmd = "sudo mkfs -t %s %s %s" % (volume_fstype,
format_options, self.device_path)
volume_format_timeout = CONFIG.get('volume_format_timeout', 120)
volume_format_timeout = CONF.volume_format_timeout
child = pexpect.spawn(cmd, timeout=volume_format_timeout)
# child.expect("(y,n)")
# child.sendline('y')
@ -127,8 +127,8 @@ class VolumeMountPoint(object):
def __init__(self, device_path, mount_point):
self.device_path = device_path
self.mount_point = mount_point
self.volume_fstype = CONFIG.get('volume_fstype', 'ext3')
self.mount_options = CONFIG.get('mount_options', 'defaults,noatime')
self.volume_fstype = CONF.volume_fstype
self.mount_options = CONF.mount_options
def mount(self):
if not os.path.exists(self.mount_point):

View File

@ -18,12 +18,11 @@
"""Model classes that form the core of instances functionality."""
import eventlet
import logging
import netaddr
from datetime import datetime
from novaclient import exceptions as nova_exceptions
from reddwarf.common import config
from reddwarf.common import cfg
from reddwarf.common import exception
from reddwarf.common import utils
from reddwarf.common.remote import create_dns_client
@ -35,12 +34,14 @@ from reddwarf.instance.tasks import InstanceTask
from reddwarf.instance.tasks import InstanceTasks
from reddwarf.guestagent import models as agent_models
from reddwarf.taskmanager import api as task_api
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
from eventlet import greenthread
CONFIG = config.Config
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -412,8 +413,8 @@ class Instance(BuiltInstance):
instance_id=db_info.id,
status=ServiceStatuses.NEW)
dns_support = config.Config.get("reddwarf_dns_support", 'False')
if utils.bool_from_string(dns_support):
dns_support = CONF.reddwarf_dns_support
if dns_support:
dns_client = create_dns_client(context)
hostname = dns_client.determine_hostname(db_info.id)
db_info.hostname = hostname
@ -523,7 +524,7 @@ def create_server_list_matcher(server_list):
class Instances(object):
DEFAULT_LIMIT = int(config.Config.get('instances_page_size', '20'))
DEFAULT_LIMIT = CONF.instances_page_size
@staticmethod
def load(context):

View File

@ -15,11 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import routes
import webob.exc
from reddwarf.common import config
from reddwarf.common import cfg
from reddwarf.common import exception
from reddwarf.common import pagination
from reddwarf.common import utils
@ -27,9 +26,11 @@ from reddwarf.common import wsgi
from reddwarf.extensions.mysql.common import populate_databases
from reddwarf.extensions.mysql.common import populate_users
from reddwarf.instance import models, views
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
CONFIG = config.Config
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -202,7 +203,7 @@ class InstanceController(wsgi.Controller):
else:
volume_size = None
instance_max = int(config.Config.get('max_instances_per_user', 5))
instance_max = CONF.max_instances_per_user
number_instances = models.DBInstance.find_all(tenant_id=tenant_id,
deleted=False).count()
@ -252,7 +253,7 @@ class InstanceController(wsgi.Controller):
"integer value, %s cannot be accepted."
% volume_size)
raise exception.ReddwarfError(msg)
max_size = int(config.Config.get('max_accepted_volume_size', 1))
max_size = CONF.max_accepted_volume_size
if int(volume_size) > max_size:
msg = ("Volume 'size' cannot exceed maximum "
"of %d Gb, %s cannot be accepted."
@ -270,10 +271,8 @@ class InstanceController(wsgi.Controller):
name = body['instance'].get('name', '').strip()
if not name:
raise exception.MissingKey(key='name')
vol_enabled = utils.bool_from_string(
config.Config.get('reddwarf_volume_support', 'True'))
must_have_vol = utils.bool_from_string(
config.Config.get('reddwarf_must_use_volume', 'False'))
vol_enabled = CONF.reddwarf_volume_support
must_have_vol = CONF.reddwarf_must_use_volume
if vol_enabled:
if body['instance'].get('volume', None):
if body['instance']['volume'].get('size', None):

View File

@ -15,14 +15,16 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reddwarf.common import config
from reddwarf.openstack.common import log as logging
from reddwarf.common import cfg
from reddwarf.common import utils
from reddwarf.common.views import create_links
from reddwarf.instance import models
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def get_ip_address(addresses):
if (addresses is not None and
@ -91,8 +93,8 @@ class InstanceDetailView(InstanceView):
result['instance']['created'] = self.instance.created
result['instance']['updated'] = self.instance.updated
dns_support = config.Config.get("reddwarf_dns_support", 'False')
if utils.bool_from_string(dns_support):
dns_support = CONF.reddwarf_dns_support
if dns_support:
result['instance']['hostname'] = self.instance.hostname
if self.add_addresses:

View File

@ -0,0 +1,44 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Authentication related utilities and helper functions.
"""
def auth_str_equal(provided, known):
"""Constant-time string comparison.
:params provided: the first string
:params known: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks. When using the function for this purpose, always
provide the user-provided password as the first argument. The time this
function will take is always a factor of the length of this string.
"""
result = 0
p_len = len(provided)
k_len = len(known)
for i in xrange(p_len):
a = ord(provided[i]) if i < p_len else 0
b = ord(known[i]) if i < k_len else 0
result |= a ^ b
return (p_len == k_len) & (result == 0)

File diff suppressed because it is too large Load Diff

View File

@ -22,6 +22,13 @@ Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
import uuid
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
@ -31,10 +38,44 @@ class RequestContext(object):
"""
def __init__(self, auth_tok=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False):
read_only=False, show_deleted=False, request_id=None):
self.auth_tok = auth_tok
self.user = user
self.tenant = tenant
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_tok,
'request_id': self.request_id}
def get_admin_context(show_deleted="no"):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None

View File

@ -0,0 +1,80 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Openstack, LLC.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gc
import pprint
import sys
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from reddwarf.openstack.common import cfg
eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port',
default=None,
help='port for eventlet backdoor to listen')
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
def _dont_use_this():
print "Don't use this, just disconnect instead"
def _find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects())
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print i, gt
traceback.print_stack(gt.gr_frame)
print
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
}
if CONF.backdoor_port is None:
return None
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = eventlet.listen(('localhost', CONF.backdoor_port))
port = sock.getsockname()[1]
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port

View File

@ -21,17 +21,7 @@ Exceptions common to OpenStack projects
import logging
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = "%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % (
description, cmd, exit_code, stdout, stderr)
IOError.__init__(self, message)
from reddwarf.openstack.common.gettextutils import _
class Error(Exception):
@ -109,7 +99,7 @@ def wrap_exception(f):
except Exception, e:
if not isinstance(e, Error):
#exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception('Uncaught exception')
logging.exception(_('Uncaught exception'))
#logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise

View File

@ -24,26 +24,28 @@ import logging
import sys
import traceback
from reddwarf.openstack.common.gettextutils import _
@contextlib.contextmanager
def save_and_reraise_exception():
"""Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None
being attempted to be reraised after an exception handler is run. This
being attempted to be re-raised after an exception handler is run. This
can happen when eventlet switches greenthreads or when running an
exception handler, code raises and catches an exception. In both
cases the exception context will be cleared.
To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is reraised.
saved exception is logged and the new exception is re-raised.
"""
type_, value, tb = sys.exc_info()
try:
yield
except Exception:
logging.error('Original exception being dropped: %s' %
(traceback.format_exception(type_, value, tb)))
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(type_, value, tb))
raise
raise type_, value, tb

View File

@ -0,0 +1,35 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise

View File

@ -0,0 +1,33 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from reddwarf.openstack.common.gettextutils import _
"""
import gettext
t = gettext.translation('openstack-common', 'locale', fallback=True)
def _(msg):
return t.ugettext(msg)

View File

@ -29,7 +29,7 @@ def import_class(import_str):
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), exc:
except (ValueError, AttributeError):
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))

View File

@ -0,0 +1,130 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ParseError(Exception):
def __init__(self, message, lineno, line):
self.msg = message
self.line = line
self.lineno = lineno
def __str__(self):
return 'at line %d, %s: %r' % (self.lineno, self.msg, self.line)
class BaseParser(object):
lineno = 0
parse_exc = ParseError
def _assignment(self, key, value):
self.assignment(key, value)
return None, []
def _get_section(self, line):
if line[-1] != ']':
return self.error_no_section_end_bracket(line)
if len(line) <= 2:
return self.error_no_section_name(line)
return line[1:-1]
def _split_key_value(self, line):
colon = line.find(':')
equal = line.find('=')
if colon < 0 and equal < 0:
return self.error_invalid_assignment(line)
if colon < 0 or (equal >= 0 and equal < colon):
key, value = line[:equal], line[equal + 1:]
else:
key, value = line[:colon], line[colon + 1:]
value = value.strip()
if ((value and value[0] == value[-1]) and
(value[0] == "\"" or value[0] == "'")):
value = value[1:-1]
return key.strip(), [value]
def parse(self, lineiter):
key = None
value = []
for line in lineiter:
self.lineno += 1
line = line.rstrip()
if not line:
# Blank line, ends multi-line values
if key:
key, value = self._assignment(key, value)
continue
elif line[0] in (' ', '\t'):
# Continuation of previous assignment
if key is None:
self.error_unexpected_continuation(line)
else:
value.append(line.lstrip())
continue
if key:
# Flush previous assignment, if any
key, value = self._assignment(key, value)
if line[0] == '[':
# Section start
section = self._get_section(line)
if section:
self.new_section(section)
elif line[0] in '#;':
self.comment(line[1:].lstrip())
else:
key, value = self._split_key_value(line)
if not key:
return self.error_empty_key(line)
if key:
# Flush previous assignment, if any
self._assignment(key, value)
def assignment(self, key, value):
"""Called when a full assignment is parsed"""
raise NotImplementedError()
def new_section(self, section):
"""Called when a new section is started"""
raise NotImplementedError()
def comment(self, comment):
"""Called when a comment is parsed"""
pass
def error_invalid_assignment(self, line):
raise self.parse_exc("No ':' or '=' found in assignment",
self.lineno, line)
def error_empty_key(self, line):
raise self.parse_exc('Key cannot be empty', self.lineno, line)
def error_unexpected_continuation(self, line):
raise self.parse_exc('Unexpected continuation line',
self.lineno, line)
def error_no_section_end_bracket(self, line):
raise self.parse_exc('Invalid section (must end with ])',
self.lineno, line)
def error_no_section_name(self, line):
raise self.parse_exc('Empty section name', self.lineno, line)

View File

@ -120,7 +120,7 @@ def to_primitive(value, convert_instances=False, level=0):
level=level + 1)
else:
return value
except TypeError, e:
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return unicode(value)

View File

@ -0,0 +1,233 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
from eventlet import semaphore
from reddwarf.openstack.common import cfg
from reddwarf.openstack.common import fileutils
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory to use for lock files')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError, e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the bar method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then CONF.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
cleanup_dir = True
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
'for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to cleanup
# the locks left behind by unit tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
return retval
return inner
return wrap

View File

@ -0,0 +1,476 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import cStringIO
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import stat
import sys
import traceback
from reddwarf.openstack.common import cfg
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import jsonutils
from reddwarf.openstack.common import local
from reddwarf.openstack.common import notifier
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s %(levelname)s %(name)s [%(request_id)s '
'%(user)s %(tenant)s] %(instance)s'
'%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s %(process)d %(levelname)s %(name)s [-]'
' %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s %(process)d TRACE %(name)s %(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=WARN'
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
generic_log_opts = [
cfg.StrOpt('logdir',
default=None,
help='Log output to a per-service log file in named directory'),
cfg.StrOpt('logfile',
default=None,
help='Log output to a named file'),
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error'),
cfg.StrOpt('logfile_mode',
default='0644',
help='Default file mode used when creating log files'),
]
CONF = cfg.CONF
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file or CONF.logfile
logdir = CONF.log_dir or CONF.logdir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
class ContextAdapter(logging.LoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
else:
instance_uuid = kwargs.pop('instance_uuid', None)
if instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
class PublishErrorsHandler(logging.Handler):
def emit(self, record):
if ('reddwarf.openstack.common.notifier.log_notifier' in
CONF.notification_driver):
return
notifier.api.notify(None, 'error.publisher',
'error_notification',
notifier.api.ERROR,
dict(error=record.msg))
def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
def setup(product_name):
"""Setup logging."""
sys.excepthook = _create_logging_excepthook(product_name)
if CONF.log_config:
try:
logging.config.fileConfig(CONF.log_config)
except Exception:
traceback.print_exc()
raise
else:
_setup_logging_from_conf(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf(product_name):
log_root = getLogger(product_name).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
mode = int(CONF.logfile_mode, 8)
st = os.stat(logpath)
if st.st_mode != (stat.S_IFREG | mode):
os.chmod(logpath, mode)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
log_root.addHandler(PublishErrorsHandler(logging.ERROR))
for handler in log_root.handlers:
datefmt = CONF.log_date_format
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
handler.setFormatter(LegacyFormatter(datefmt=datefmt))
if CONF.verbose or CONF.debug:
log_root.setLevel(logging.DEBUG)
else:
log_root.setLevel(logging.INFO)
level = logging.NOTSET
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
for handler in log_root.handlers:
logger.addHandler(handler)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class LegacyFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))

View File

@ -0,0 +1,95 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from eventlet import event
from eventlet import greenthread
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCall(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = timeutils.utcnow()
self.f(*self.args, **self.kw)
end = timeutils.utcnow()
if not self._running:
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone, e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()

View File

@ -0,0 +1,64 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Middleware that attaches a context to the WSGI request
"""
from reddwarf.openstack.common import context
from reddwarf.openstack.common import importutils
from reddwarf.openstack.common import wsgi
class ContextMiddleware(wsgi.Middleware):
def __init__(self, app, options):
self.options = options
super(ContextMiddleware, self).__init__(app)
def make_context(self, *args, **kwargs):
"""
Create a context with the given arguments.
"""
# Determine the context class to use
ctxcls = context.RequestContext
if 'context_class' in self.options:
ctxcls = importutils.import_class(self.options['context_class'])
return ctxcls(*args, **kwargs)
def process_request(self, req):
"""
Extract any authentication information in the request and
construct an appropriate context from it.
"""
# Use the default empty context, with admin turned on for
# backwards compatibility
req.context = self.make_context(is_admin=True)
def filter_factory(global_conf, **local_conf):
"""
Factory method for paste.deploy
"""
conf = global_conf.copy()
conf.update(local_conf)
def filter(app):
return ContextMiddleware(app, conf)
return filter

View File

@ -0,0 +1,68 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Network-related utilities and helper functions.
"""
import logging
LOG = logging.getLogger(__name__)
def parse_host_port(address, default_port=None):
"""
Interpret a string as a host:port pair.
An IPv6 address MUST be escaped if accompanied by a port,
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
means both [2001:db8:85a3::8a2e:370:7334] and
[2001:db8:85a3::8a2e:370]:7334.
>>> parse_host_port('server01:80')
('server01', 80)
>>> parse_host_port('server01')
('server01', None)
>>> parse_host_port('server01', default_port=1234)
('server01', 1234)
>>> parse_host_port('[::1]:80')
('::1', 80)
>>> parse_host_port('[::1]')
('::1', None)
>>> parse_host_port('[::1]', default_port=1234)
('::1', 1234)
>>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234)
('2001:db8:85a3::8a2e:370:7334', 1234)
"""
if address[0] == '[':
# Escaped ipv6
_host, _port = address[1:].split(']')
host = _host
if ':' in _port:
port = _port.split(':')[1]
else:
port = default_port
else:
if address.count(':') == 1:
host, port = address.split(':')
else:
# 0 means ipv4, >1 means ipv6.
# We prohibit unescaped ipv6 addresses with port.
host = address
port = default_port
return (host, None if port is None else int(port))

View File

@ -0,0 +1,14 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,182 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from reddwarf.openstack.common import cfg
from reddwarf.openstack.common import context
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import importutils
from reddwarf.openstack.common import jsonutils
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import timeutils
LOG = logging.getLogger(__name__)
notifier_opts = [
cfg.MultiStrOpt('notification_driver',
default=[],
deprecated_name='list_notifier_drivers',
help='Driver or drivers to handle sending notifications'),
cfg.StrOpt('default_notification_level',
default='INFO',
help='Default notification level for outgoing notifications'),
cfg.StrOpt('default_publisher_id',
default='$host',
help='Default publisher_id for outgoing notifications'),
]
CONF = cfg.CONF
CONF.register_opts(notifier_opts)
WARN = 'WARN'
INFO = 'INFO'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
DEBUG = 'DEBUG'
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
class BadPriorityException(Exception):
pass
def notify_decorator(name, fn):
""" decorator for notify which is used from utils.monkey_patch()
:param name: name of the function
:param function: - object of the function
:returns: function -- decorated function
"""
def wrapped_func(*args, **kwarg):
body = {}
body['args'] = []
body['kwarg'] = {}
for arg in args:
body['args'].append(arg)
for key in kwarg:
body['kwarg'][key] = kwarg[key]
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
notify(ctxt,
CONF.default_publisher_id,
name,
CONF.default_notification_level,
body)
return fn(*args, **kwarg)
return wrapped_func
def publisher_id(service, host=None):
if not host:
host = CONF.host
return "%s.%s" % (service, host)
def notify(context, publisher_id, event_type, priority, payload):
"""Sends a notification using the specified driver
:param publisher_id: the source worker_type.host of the message
:param event_type: the literal type of event (ex. Instance Creation)
:param priority: patterned after the enumeration of Python logging
levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
:param payload: A python dictionary of attributes
Outgoing message format includes the above parameters, and appends the
following:
message_id
a UUID representing the id for this notification
timestamp
the GMT timestamp the notification was sent at
The composite message will be constructed as a dictionary of the above
attributes, which will then be sent via the transport mechanism defined
by the driver.
Message example::
{'message_id': str(uuid.uuid4()),
'publisher_id': 'compute.host1',
'timestamp': timeutils.utcnow(),
'priority': 'WARN',
'event_type': 'compute.create_instance',
'payload': {'instance_id': 12, ... }}
"""
if priority not in log_levels:
raise BadPriorityException(
_('%s not in valid priorities') % priority)
# Ensure everything is JSON serializable.
payload = jsonutils.to_primitive(payload, convert_instances=True)
msg = dict(message_id=str(uuid.uuid4()),
publisher_id=publisher_id,
event_type=event_type,
priority=priority,
payload=payload,
timestamp=str(timeutils.utcnow()))
for driver in _get_drivers():
try:
driver.notify(context, msg)
except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system. "
"Payload=%(payload)s")
% dict(e=e, payload=payload))
_drivers = None
def _get_drivers():
"""Instantiate, cache, and return drivers based on the CONF."""
global _drivers
if _drivers is None:
_drivers = {}
for notification_driver in CONF.notification_driver:
add_driver(notification_driver)
return _drivers.values()
def add_driver(notification_driver):
"""Add a notification driver at runtime."""
# Make sure the driver list is initialized.
_get_drivers()
if isinstance(notification_driver, basestring):
# Load and add
try:
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
except ImportError:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
else:
# Driver is already loaded; just add the object.
_drivers[notification_driver] = notification_driver
def _reset_drivers():
"""Used by unit tests to reset the drivers."""
global _drivers
_drivers = None

View File

@ -0,0 +1,35 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from reddwarf.openstack.common import cfg
from reddwarf.openstack.common import jsonutils
from reddwarf.openstack.common import log as logging
CONF = cfg.CONF
def notify(_context, message):
"""Notifies the recipient of the desired event given the model.
Log notifications using openstack's default logging system"""
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
logger = logging.getLogger(
'reddwarf.openstack.common.notification.%s' %
message['event_type'])
getattr(logger, priority)(jsonutils.dumps(message))

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
@ -15,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
print "This is only a test!"
subprocess.call(["touch", "FOO.txt"])
def notify(_context, message):
"""Notifies the recipient of the desired event given the model"""
pass

View File

@ -0,0 +1,29 @@
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.notifier import rpc_notifier
LOG = logging.getLogger(__name__)
def notify(context, message):
"""Deprecated in Grizzly. Please use rpc_notifier instead."""
LOG.deprecated(_("The rabbit_notifier is now deprecated."
" Please use rpc_notifier instead."))
rpc_notifier.notify(context, message)

View File

@ -0,0 +1,46 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from reddwarf.openstack.common import cfg
from reddwarf.openstack.common import context as req_context
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import rpc
LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'notification_topics', default=['notifications', ],
help='AMQP topic used for openstack notifications')
CONF = cfg.CONF
CONF.register_opt(notification_topic_opt)
def notify(context, message):
"""Sends a notification via RPC"""
if not context:
context = req_context.get_admin_context()
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
for topic in CONF.notification_topics:
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())

View File

@ -0,0 +1,22 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
NOTIFICATIONS = []
def notify(_context, message):
"""Test notifier, stores notifications in memory for unittests."""
NOTIFICATIONS.append(message)

View File

@ -0,0 +1,164 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from paste import deploy
from reddwarf.openstack.common import local
class BasePasteFactory(object):
"""A base class for paste app and filter factories.
Sub-classes must override the KEY class attribute and provide
a __call__ method.
"""
KEY = None
def __init__(self, data):
self.data = data
def _import_factory(self, local_conf):
"""Import an app/filter class.
Lookup the KEY from the PasteDeploy local conf and import the
class named there. This class can then be used as an app or
filter factory.
Note we support the <module>:<class> format.
Note also that if you do e.g.
key =
value
then ConfigParser returns a value with a leading newline, so
we strip() the value before using it.
"""
mod_str, _sep, class_str = local_conf[self.KEY].strip().rpartition(':')
del local_conf[self.KEY]
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
class AppFactory(BasePasteFactory):
"""A Generic paste.deploy app factory.
This requires openstack.app_factory to be set to a callable which returns a
WSGI app when invoked. The format of the name is <module>:<callable> e.g.
[app:myfooapp]
paste.app_factory = openstack.common.pastedeploy:app_factory
openstack.app_factory = myapp:Foo
The WSGI app constructor must accept a data object and a local config
dict as its two arguments.
"""
KEY = 'openstack.app_factory'
def __call__(self, global_conf, **local_conf):
"""The actual paste.app_factory protocol method."""
factory = self._import_factory(local_conf)
return factory(self.data, **local_conf)
class FilterFactory(AppFactory):
"""A Generic paste.deploy filter factory.
This requires openstack.filter_factory to be set to a callable which
returns a WSGI filter when invoked. The format is <module>:<callable> e.g.
[filter:myfoofilter]
paste.filter_factory = openstack.common.pastedeploy:filter_factory
openstack.filter_factory = myfilter:Foo
The WSGI filter constructor must accept a WSGI app, a data object and
a local config dict as its three arguments.
"""
KEY = 'openstack.filter_factory'
def __call__(self, global_conf, **local_conf):
"""The actual paste.filter_factory protocol method."""
factory = self._import_factory(local_conf)
def filter(app):
return factory(app, self.data, **local_conf)
return filter
def app_factory(global_conf, **local_conf):
"""A paste app factory used with paste_deploy_app()."""
return local.store.app_factory(global_conf, **local_conf)
def filter_factory(global_conf, **local_conf):
"""A paste filter factory used with paste_deploy_app()."""
return local.store.filter_factory(global_conf, **local_conf)
def paste_deploy_app(paste_config_file, app_name, data):
"""Load a WSGI app from a PasteDeploy configuration.
Use deploy.loadapp() to load the app from the PasteDeploy configuration,
ensuring that the supplied data object is passed to the app and filter
factories defined in this module.
To use these factories and the data object, the configuration should look
like this:
[app:myapp]
paste.app_factory = openstack.common.pastedeploy:app_factory
openstack.app_factory = myapp:App
...
[filter:myfilter]
paste.filter_factory = openstack.common.pastedeploy:filter_factory
openstack.filter_factory = myapp:Filter
and then:
myapp.py:
class App(object):
def __init__(self, data):
...
class Filter(object):
def __init__(self, app, data):
...
:param paste_config_file: a PasteDeploy config file
:param app_name: the name of the app/pipeline to load from the file
:param data: a data object to supply to the app and its filters
:returns: the WSGI app
"""
(af, ff) = (AppFactory(data), FilterFactory(data))
local.store.app_factory = af
local.store.filter_factory = ff
try:
return deploy.loadapp("config:%s" % paste_config_file, name=app_name)
finally:
del local.store.app_factory
del local.store.filter_factory

View File

@ -0,0 +1,115 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
1. Without arguments '@periodic_task', this will be run on every tick
of the periodic scheduler.
2. With arguments, @periodic_task(ticks_between_runs=N), this will be
run on every N ticks of the periodic scheduler.
"""
def decorator(f):
f._periodic_task = True
f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0)
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parens.
#
# In the 'with-parens' case (with kwargs present), this function needs to
# return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parens' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])
class _PeriodicTasksMeta(type):
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
# NOTE(sirp): if the attribute is not present then we must be the base
# class, so, go ahead and initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic_tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []
try:
cls._ticks_to_skip = cls._ticks_to_skip.copy()
except AttributeError:
cls._ticks_to_skip = {}
# This uses __dict__ instead of
# inspect.getmembers(cls, inspect.ismethod) so only the methods of the
# current class are added when this class is scanned, and base classes
# are not added redundantly.
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
task = value
name = task.__name__
cls._periodic_tasks.append((name, task))
cls._ticks_to_skip[name] = task._ticks_between_runs
class PeriodicTasks(object):
__metaclass__ = _PeriodicTasksMeta
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
ticks_to_skip = self._ticks_to_skip[task_name]
if ticks_to_skip > 0:
LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s"
" ticks left until next run"),
dict(full_task_name=full_task_name,
ticks_to_skip=ticks_to_skip))
self._ticks_to_skip[task_name] -= 1
continue
self._ticks_to_skip[task_name] = task._ticks_between_runs
LOG.debug(_("Running periodic task %(full_task_name)s"),
dict(full_task_name=full_task_name))
try:
task(self, context)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_("Error during %(full_task_name)s:"
" %(e)s"),
dict(e=e, full_task_name=full_task_name))

View File

@ -0,0 +1,779 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import logging
import re
import urllib
import urllib2
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
_rules = None
_checks = {}
class Rules(dict):
"""
A store for rules. Handles the default_rule setting directly.
"""
@classmethod
def load_json(cls, data, default_rule=None):
"""
Allow loading of JSON rule data.
"""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or self.default_rule not in self:
raise KeyError(key)
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
# Really have to figure out a way to deprecate this
def set_rules(rules):
"""Set the rules in use for policy checks."""
global _rules
_rules = rules
# Ditto
def reset():
"""Clear the rules used for policy checks."""
global _rules
_rules = None
def check(rule, target, creds, exc=None, *args, **kwargs):
"""
Checks authorization of a rule against the target and credentials.
:param rule: The rule to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If exc is not provided, returns
False.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds)
elif not _rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = _rules[rule](target, creds)
except KeyError:
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if exc and result is False:
raise exc(*args, **kwargs)
return result
class BaseCheck(object):
"""
Abstract base class for Check classes.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __str__(self):
"""
Retrieve a string representation of the Check tree rooted at
this node.
"""
pass
@abc.abstractmethod
def __call__(self, target, cred):
"""
Perform the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""
A policy check that always returns False (disallow).
"""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""
A policy check that always returns True (allow).
"""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred):
"""Check the policy."""
return True
class Check(BaseCheck):
"""
A base class to allow for user-defined policy checks.
"""
def __init__(self, kind, match):
"""
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""
A policy check that inverts the result of another policy check.
Implements the "not" operator.
"""
def __init__(self, rule):
"""
Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred):
"""
Check the policy. Returns the logical inverse of the wrapped
check.
"""
return not self.rule(target, cred)
class AndCheck(BaseCheck):
"""
A policy check that requires that a list of other checks all
return True. Implements the "and" operator.
"""
def __init__(self, rules):
"""
Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""
Check the policy. Requires that all rules accept in order to
return True.
"""
for rule in self.rules:
if not rule(target, cred):
return False
return True
def add_check(self, rule):
"""
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""
A policy check that requires that at least one of a list of other
checks returns True. Implements the "or" operator.
"""
def __init__(self, rules):
"""
Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""
Check the policy. Requires that at least one rule accept in
order to return True.
"""
for rule in self.rules:
if rule(target, cred):
return True
return False
def add_check(self, rule):
"""
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""
Parse a single base check rule into an appropriate Check object.
"""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_("Failed to understand rule %(rule)s") % locals())
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""
Provided for backwards compatibility. Translates the old
list-of-lists syntax into a tree of Check objects.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, basestring):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if len(or_list) == 0:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""
Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""
Metaclass for the ParseState class. Facilitates identifying
reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""
Create the class. Injects the 'reducers' list, a list of
tuples matching token sequences to the names of the
corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""
Decorator for reduction methods. Arguments are a sequence of
tokens, in order, which should trigger running this reduction
method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
class ParseState(object):
"""
Implement the core of parsing the policy language. Uses a greedy
reduction algorithm to reduce a sequence of tokens into a single
terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
__metaclass__ = ParseStateMeta
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""
Perform a greedy reduction of the token stream. If a reducer
method matches, it will be executed, then the reduce() method
will be called recursively to search for any more possible
reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""
Obtain the final result of the parse. Raises ValueError if
the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""
Create an 'and_expr' from two checks joined by the 'and'
operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""
Extend an 'and_expr' by adding one more check.
"""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""
Create an 'or_expr' from two checks joined by the 'or'
operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""
Extend an 'or_expr' by adding one more check.
"""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_("Failed to understand rule %(rule)r") % locals())
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""
Parses a policy rule into a tree of Check objects.
"""
# If the rule is a string, it's in the policy language
if isinstance(rule, basestring):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""
Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds):
"""
Recursively checks credentials based on the defined rules.
"""
try:
return _rules[self.match](target, creds)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds):
"""
Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urllib.urlencode(data)
f = urllib2.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds):
"""
Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
"""
# TODO(termie): do dict inspection via dot syntax
match = self.match % target
if self.kind in creds:
return match == unicode(creds[self.kind])
return False

View File

@ -0,0 +1,135 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import logging
import random
import shlex
from eventlet.green import subprocess
from eventlet import greenthread
from reddwarf.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
% (description, cmd, exit_code, stdout, stderr))
super(ProcessExecutionError, self).__init__(message)
def execute(*cmd, **kwargs):
"""
Helper method to shell out and execute a command through subprocess with
optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type proces_input: string
:param check_exit_code: Defaults to 0. Will raise
:class:`ProcessExecutionError`
if the command exits without returning this value
as a returncode
:type check_exit_code: int
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix all cmd's with
:type root_helper: string
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', 0)
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
if len(kwargs):
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root:
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=True)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.debug(_('Result was %s') % _returncode)
if (isinstance(check_exit_code, int) and
not isinstance(check_exit_code, bool) and
_returncode != check_exit_code):
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)

View File

@ -25,12 +25,7 @@ For some wrappers that add message versioning to rpc, see:
rpc.proxy
"""
#TODO(tim.simpson): Doing this as we aren't yet using the real cfg module.
from reddwarf.common.config import OsCommonModule
cfg = OsCommonModule()
#from openstack.common import cfg
from reddwarf.openstack.common import cfg
from reddwarf.openstack.common import importutils
@ -52,17 +47,24 @@ rpc_opts = [
help='Seconds to wait before a cast expires (TTL). '
'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules',
default=['openstack.common.exception',
default=['reddwarf.openstack.common.exception',
'nova.exception',
'cinder.exception',
'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
'upon receiving exception data from an rpc call.'),
cfg.StrOpt('control_exchange',
default='nova',
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
#
# The following options are not registered here, but are expected to be
# present. The project using this library must register these options with
# the configuration so that project-specific defaults may be defined.
#
#cfg.StrOpt('control_exchange',
# default='nova',
# help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
cfg.CONF.register_opts(rpc_opts)
@ -125,28 +127,6 @@ def cast(context, topic, msg):
return _get_impl().cast(cfg.CONF, context, topic, msg)
def cast_with_consumer(context, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
nova.rpc.common.Connection.create_consumer() and only applies
when the consumer was created with fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast_with_consumer(cfg.CONF, context, topic, msg)
def delete_queue(context, topic):
"""Deletes the queue."""
return _get_impl().delete_queue(cfg.CONF, context, topic)
def fanout_cast(context, topic, msg):
"""Broadcast a remote method invocation with no return.
@ -271,7 +251,7 @@ def queue_get_for(context, topic, host):
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
return '%s.%s' % (topic, host)
return '%s.%s' % (topic, host) if host else topic
_RPCIMPL = None

View File

@ -26,7 +26,6 @@ AMQP, but is deprecated and predates this code.
"""
import inspect
import logging
import sys
import uuid
@ -34,11 +33,11 @@ from eventlet import greenpool
from eventlet import pools
from eventlet import semaphore
from reddwarf.openstack.common import cfg
from reddwarf.openstack.common import excutils
#TODO(tim.simpson): Import the true version of Mr. Underscore.
#from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import local
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.rpc import common as rpc_common
@ -56,7 +55,7 @@ class Pool(pools.Pool):
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug('Pool creating new connection')
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
@ -151,7 +150,7 @@ class ConnectionContext(rpc_common.Connection):
def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
ending=False):
ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
@ -159,7 +158,8 @@ def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure)
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
try:
msg = {'result': reply, 'failure': failure}
@ -186,10 +186,10 @@ class RpcContext(rpc_common.CommonRpcContext):
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None):
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, connection_pool, reply, failure,
ending)
ending, log_failure)
if ending:
self.msg_id = None
@ -283,8 +283,14 @@ class ProxyCallback(object):
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except Exception as e:
LOG.exception('Exception during message handling')
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
LOG.exception(_('Exception during message handling'))
ctxt.reply(None, sys.exc_info(),
connection_pool=self.connection_pool)
@ -365,8 +371,6 @@ def multicall(conf, context, topic, msg, timeout, connection_pool):
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
with ConnectionContext(conf, connection_pool) as conn:
consumer = conn.declare_topic_consumer(topic=topic)
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
@ -377,23 +381,12 @@ def call(conf, context, topic, msg, timeout, connection_pool):
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
with ConnectionContext(conf, connection_pool) as conn:
consumer = conn.declare_topic_consumer(topic=topic)
LOG.debug(_('Making asynchronous cast on %s...'), topic)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, msg)
def cast_with_consumer(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
consumer = conn.declare_topic_consumer(topic=topic)
conn.topic_send(topic, msg)
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
@ -421,8 +414,9 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg,
def notify(conf, context, topic, msg, connection_pool):
"""Sends a notification event on a topic."""
event_type = msg.get('event_type')
LOG.debug(_('Sending %(event_type)s on %(topic)s'), locals())
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.notify_send(topic, msg)
@ -431,3 +425,10 @@ def notify(conf, context, topic, msg, connection_pool):
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
try:
return conf.control_exchange
except cfg.NoSuchOptError:
return 'openstack'

View File

@ -18,21 +18,14 @@
# under the License.
import copy
import logging
import sys
import traceback
#from reddwarf.openstack.common import cfg
#TODO(tim.simpson): Doing this as we aren't yet using the real cfg module.
from reddwarf.common.config import OsCommonModule
cfg = OsCommonModule()
#TODO(tim.simpson): Import the true version of Mr. Underscore.
#from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import importutils
from reddwarf.openstack.common import jsonutils
from reddwarf.openstack.common import local
from reddwarf.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@ -48,7 +41,7 @@ class RPCException(Exception):
try:
message = self.message % kwargs
except Exception as e:
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
@ -114,7 +107,7 @@ class Connection(object):
"""
raise NotImplementedError()
def create_consumer(self, conf, topic, proxy, fanout=False):
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer on this connection.
A consumer is associated with a message queue on the backend message
@ -123,7 +116,6 @@ class Connection(object):
off of the queue will determine which method gets called on the proxy
object.
:param conf: An openstack.common.cfg configuration object.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic. For example, all instances of nova-compute consume
@ -139,7 +131,7 @@ class Connection(object):
"""
raise NotImplementedError()
def create_worker(self, conf, topic, proxy, pool_name):
def create_worker(self, topic, proxy, pool_name):
"""Create a worker on this connection.
A worker is like a regular consumer of messages directed to a
@ -149,7 +141,6 @@ class Connection(object):
be asked to process it. Load is distributed across the members
of the pool in round-robin fashion.
:param conf: An openstack.common.cfg configuration object.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic.
@ -205,7 +196,7 @@ def _safe_log(log_func, msg, msg_data):
return log_func(msg, msg_data)
def serialize_remote_exception(failure_info):
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
@ -213,8 +204,9 @@ def serialize_remote_exception(failure_info):
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
LOG.error(_("Returning exception %s to caller"), unicode(failure))
LOG.error(tb)
if log_failure:
LOG.error(_("Returning exception %s to caller"), unicode(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
@ -268,7 +260,7 @@ def deserialize_remote_exception(conf, data):
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError as e:
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
@ -319,3 +311,36 @@ class CommonRpcContext(object):
context.values['read_deleted'] = read_deleted
return context
class ClientException(Exception):
"""This encapsulates some actual exception that is expected to be
hit by an RPC proxy object. Merely instantiating it records the
current exception information, which will be passed back to the
RPC client without exceptional logging."""
def __init__(self):
self._exc_info = sys.exc_info()
def catch_client_exception(exceptions, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception, e:
if type(e) in exceptions:
raise ClientException()
else:
raise
def client_exceptions(*exceptions):
"""Decorator for manager methods that raise expected exceptions.
Marking a Manager method with this decorator allows the declaration
of expected exceptions that the RPC layer should not consider fatal,
and not log as if they were generated in a real error scenario. Note
that this will cause listed exceptions to be wrapped in a
ClientException, which is used internally by the RPC layer."""
def outer(func):
def inner(*args, **kwargs):
return catch_client_exception(exceptions, func, *args, **kwargs)
return inner
return outer

View File

@ -41,8 +41,8 @@ server side of the API at the same time. However, as the code stands today,
there can be both versioned and unversioned APIs implemented in the same code
base.
EXAMPLES:
EXAMPLES
========
Nova was the first project to use versioned rpc APIs. Consider the compute rpc
API as an example. The client side is in nova/compute/rpcapi.py and the server
@ -50,12 +50,13 @@ side is in nova/compute/manager.py.
Example 1) Adding a new method.
-------------------------------
Adding a new method is a backwards compatible change. It should be added to
nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should
have a specific version specified to indicate the minimum API version that must
be implemented for the method to be supported. For example:
be implemented for the method to be supported. For example::
def get_host_uptime(self, ctxt, host):
topic = _compute_topic(self.topic, ctxt, host, None)
@ -67,10 +68,11 @@ get_host_uptime() method.
Example 2) Adding a new parameter.
----------------------------------
Adding a new parameter to an rpc method can be made backwards compatible. The
RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
The implementation of the method must not expect the parameter to be present.
The implementation of the method must not expect the parameter to be present.::
def some_remote_method(self, arg1, arg2, newarg=None):
# The code needs to deal with newarg=None for cases

View File

@ -18,11 +18,15 @@ queues. Casts will block, but this is very useful for tests.
"""
import inspect
# NOTE(russellb): We specifically want to use json, not our own jsonutils.
# jsonutils has some extra logic to automatically convert objects to primitive
# types so that they can be serialized. We want to catch all cases where
# non-primitive types make it into this code and treat it as an error.
import json
import time
import eventlet
from reddwarf.openstack.common import jsonutils
from reddwarf.openstack.common.rpc import common as rpc_common
CONSUMERS = {}
@ -75,6 +79,8 @@ class Consumer(object):
else:
res.append(rval)
done.send(res)
except rpc_common.ClientException as e:
done.send_exception(e._exc_info[1])
except Exception as e:
done.send_exception(e)
@ -121,7 +127,7 @@ def create_connection(conf, new=True):
def check_serialize(msg):
"""Make sure a message intended for rpc can be serialized."""
jsonutils.dumps(msg)
json.dumps(msg)
def multicall(conf, context, topic, msg, timeout=None):
@ -154,6 +160,7 @@ def call(conf, context, topic, msg, timeout=None):
def cast(conf, context, topic, msg):
check_serialize(msg)
try:
call(conf, context, topic, msg)
except Exception:

View File

@ -29,14 +29,9 @@ import kombu.connection
import kombu.entity
import kombu.messaging
#from reddwarf.openstack.common import cfg
#TODO(tim.simpson): Doing this as we aren't yet using the real cfg module.
from reddwarf.common.config import OsCommonModule
cfg = OsCommonModule()
#TODO(tim.simpson): Import the true version of Mr. Underscore.
#from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import cfg
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import network_utils
from reddwarf.openstack.common.rpc import amqp as rpc_amqp
from reddwarf.openstack.common.rpc import common as rpc_common
@ -56,10 +51,13 @@ kombu_opts = [
'(valid only if SSL enabled)')),
cfg.StrOpt('rabbit_host',
default='localhost',
help='the RabbitMQ host'),
help='The RabbitMQ broker address where a single node is used'),
cfg.IntOpt('rabbit_port',
default=5672,
help='the RabbitMQ port'),
help='The RabbitMQ broker port where a single node is used'),
cfg.ListOpt('rabbit_hosts',
default=['$rabbit_host:$rabbit_port'],
help='RabbitMQ HA cluster host:port pairs'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
help='connect over SSL for RabbitMQ'),
@ -86,6 +84,11 @@ kombu_opts = [
cfg.BoolOpt('rabbit_durable_queues',
default=False,
help='use durable queues in RabbitMQ'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
'You need to wipe RabbitMQ database when '
'changing this option.'),
]
@ -94,6 +97,20 @@ cfg.CONF.register_opts(kombu_opts)
LOG = rpc_common.LOG
def _get_queue_arguments(conf):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster.
"""
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
class ConsumerBase(object):
"""Consumer base class."""
@ -198,7 +215,7 @@ class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'"""
def __init__(self, conf, channel, topic, callback, tag, name=None,
**kwargs):
exchange_name=None, **kwargs):
"""Init a 'topic' queue.
:param channel: the amqp channel to use
@ -213,10 +230,12 @@ class TopicConsumer(ConsumerBase):
"""
# Default options
options = {'durable': conf.rabbit_durable_queues,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': False,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=conf.control_exchange,
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
exchange = kombu.entity.Exchange(name=exchange_name,
type='topic',
durable=options['durable'],
auto_delete=options['auto_delete'])
@ -248,6 +267,7 @@ class FanoutConsumer(ConsumerBase):
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': True}
options.update(kwargs)
@ -313,8 +333,12 @@ class TopicPublisher(Publisher):
'auto_delete': False,
'exclusive': False}
options.update(kwargs)
super(TopicPublisher, self).__init__(channel, conf.control_exchange,
topic, type='topic', **options)
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(channel,
exchange_name,
topic,
type='topic',
**options)
class FanoutPublisher(Publisher):
@ -337,6 +361,7 @@ class NotifyPublisher(TopicPublisher):
def __init__(self, conf, channel, topic, **kwargs):
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
self.queue_arguments = _get_queue_arguments(conf)
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
def reconnect(self, channel):
@ -349,7 +374,8 @@ class NotifyPublisher(TopicPublisher):
exchange=self.exchange,
durable=self.durable,
name=self.routing_key,
routing_key=self.routing_key)
routing_key=self.routing_key,
queue_arguments=self.queue_arguments)
queue.declare()
@ -374,31 +400,37 @@ class Connection(object):
if server_params is None:
server_params = {}
# Keys to translate from server_params to kombu params
server_params_to_kombu_params = {'username': 'userid'}
params = {}
for sp_key, value in server_params.iteritems():
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
ssl_params = self._fetch_ssl_params()
params_list = []
for adr in self.conf.rabbit_hosts:
hostname, port = network_utils.parse_host_port(
adr, default_port=self.conf.rabbit_port)
params.setdefault('hostname', self.conf.rabbit_host)
params.setdefault('port', self.conf.rabbit_port)
params.setdefault('userid', self.conf.rabbit_userid)
params.setdefault('password', self.conf.rabbit_password)
params.setdefault('virtual_host', self.conf.rabbit_virtual_host)
params = {
'hostname': hostname,
'port': port,
'userid': self.conf.rabbit_userid,
'password': self.conf.rabbit_password,
'virtual_host': self.conf.rabbit_virtual_host,
}
self.params = params
for sp_key, value in server_params.iteritems():
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
if self.conf.fake_rabbit:
self.params['transport'] = 'memory'
self.memory_transport = True
else:
self.memory_transport = False
if self.conf.fake_rabbit:
params['transport'] = 'memory'
if self.conf.rabbit_use_ssl:
params['ssl'] = ssl_params
if self.conf.rabbit_use_ssl:
self.params['ssl'] = self._fetch_ssl_params()
params_list.append(params)
self.params_list = params_list
self.memory_transport = self.conf.fake_rabbit
self.connection = None
self.reconnect()
@ -428,14 +460,14 @@ class Connection(object):
# Return the extended behavior
return ssl_params
def _connect(self):
def _connect(self, params):
"""Connect to rabbit. Re-establish any queues that may have
been declared before if we are reconnecting. Exceptions should
be handled by the caller.
"""
if self.connection:
LOG.info(_("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % self.params)
"%(hostname)s:%(port)d") % params)
try:
self.connection.close()
except self.connection_errors:
@ -443,7 +475,7 @@ class Connection(object):
# Setting this in case the next statement fails, though
# it shouldn't be doing any network operations, yet.
self.connection = None
self.connection = kombu.connection.BrokerConnection(**self.params)
self.connection = kombu.connection.BrokerConnection(**params)
self.connection_errors = self.connection.connection_errors
if self.memory_transport:
# Kludge to speed up tests.
@ -456,8 +488,8 @@ class Connection(object):
self.channel._new_queue('ae.undeliver')
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d'),
self.params)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
params)
def reconnect(self):
"""Handles reconnecting and re-establishing queues.
@ -470,11 +502,12 @@ class Connection(object):
attempt = 0
while True:
params = self.params_list[attempt % len(self.params_list)]
attempt += 1
try:
self._connect()
self._connect(params)
return
except (self.connection_errors, IOError), e:
except (IOError, self.connection_errors) as e:
pass
except Exception, e:
# NOTE(comstud): Unfortunately it's possible for amqplib
@ -489,12 +522,12 @@ class Connection(object):
log_info = {}
log_info['err_str'] = str(e)
log_info['max_retries'] = self.max_retries
log_info.update(self.params)
log_info.update(params)
if self.max_retries and attempt == self.max_retries:
LOG.exception(_('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info)
LOG.error(_('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info)
# NOTE(comstud): Copied from original code. There's
# really no better recourse because if this was a queue we
# need to consume on, we have no way to consume anymore.
@ -508,9 +541,9 @@ class Connection(object):
sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time
LOG.exception(_('AMQP server on %(hostname)s:%(port)d is'
' unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs):
@ -518,7 +551,8 @@ class Connection(object):
try:
return method(*args, **kwargs)
except (self.connection_errors, socket.timeout, IOError), e:
pass
if error_callback:
error_callback(e)
except Exception, e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
@ -528,8 +562,8 @@ class Connection(object):
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
if error_callback:
error_callback(e)
if error_callback:
error_callback(e)
self.reconnect()
def get_channel(self):
@ -631,10 +665,12 @@ class Connection(object):
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None):
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
@ -725,23 +761,6 @@ def cast(conf, context, topic, msg):
rpc_amqp.get_connection_pool(conf, Connection))
def cast_with_consumer(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast_with_consumer(conf, context, topic, msg,
Connection.pool)
def delete_queue(conf, context, topic):
LOG.debug("Deleting queue with name %s." % topic)
with rpc_amqp.ConnectionContext(conf, Connection.pool) as conn:
channel = conn.channel
durable = conf.rabbit_durable_queues
queue = kombu.entity.Queue(name=topic, channel=channel,
auto_delete=False, exclusive=False,
durable=durable)
queue.delete()
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
@ -758,7 +777,7 @@ def cast_to_server(conf, context, server_params, topic, msg):
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.cast_to_server(
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))

View File

@ -17,7 +17,6 @@
import functools
import itertools
import logging
import time
import uuid
@ -29,6 +28,7 @@ import qpid.messaging.exceptions
from reddwarf.openstack.common import cfg
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import jsonutils
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.rpc import amqp as rpc_amqp
from reddwarf.openstack.common.rpc import common as rpc_common
@ -41,6 +41,9 @@ qpid_opts = [
cfg.StrOpt('qpid_port',
default='5672',
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
@ -50,26 +53,8 @@ qpid_opts = [
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.BoolOpt('qpid_reconnect',
default=True,
help='Automatically reconnect'),
cfg.IntOpt('qpid_reconnect_timeout',
default=0,
help='Reconnection timeout in seconds'),
cfg.IntOpt('qpid_reconnect_limit',
default=0,
help='Max reconnections before giving up'),
cfg.IntOpt('qpid_reconnect_interval_min',
default=0,
help='Minimum seconds between reconnection attempts'),
cfg.IntOpt('qpid_reconnect_interval_max',
default=0,
help='Maximum seconds between reconnection attempts'),
cfg.IntOpt('qpid_reconnect_interval',
default=0,
help='Equivalent to setting max and min to the same value'),
cfg.IntOpt('qpid_heartbeat',
default=5,
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
@ -170,7 +155,8 @@ class DirectConsumer(ConsumerBase):
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'"""
def __init__(self, conf, session, topic, callback, name=None):
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
@ -180,9 +166,9 @@ class TopicConsumer(ConsumerBase):
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
super(TopicConsumer, self).__init__(session, callback,
"%s/%s" % (conf.control_exchange,
topic),
"%s/%s" % (exchange_name, topic),
{}, name or topic, {})
@ -256,9 +242,9 @@ class TopicPublisher(Publisher):
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
super(TopicPublisher, self).__init__(
session,
"%s/%s" % (conf.control_exchange, topic))
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(session,
"%s/%s" % (exchange_name, topic))
class FanoutPublisher(Publisher):
@ -276,10 +262,10 @@ class NotifyPublisher(Publisher):
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
super(NotifyPublisher, self).__init__(
session,
"%s/%s" % (conf.control_exchange, topic),
{"durable": True})
exchange_name = rpc_amqp.get_control_exchange(conf)
super(NotifyPublisher, self).__init__(session,
"%s/%s" % (exchange_name, topic),
{"durable": True})
class Connection(object):
@ -293,50 +279,42 @@ class Connection(object):
self.consumer_thread = None
self.conf = conf
if server_params is None:
server_params = {}
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
default_params = dict(hostname=self.conf.qpid_hostname,
port=self.conf.qpid_port,
username=self.conf.qpid_username,
password=self.conf.qpid_password)
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
params = server_params
for key in default_params.keys():
params.setdefault(key, default_params[key])
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
self.connection_create(self.brokers[0])
self.reconnect()
self.broker = params['hostname'] + ":" + str(params['port'])
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid.messaging.Connection(self.broker)
self.connection = qpid.messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = params['username']
self.connection.password = params['password']
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
self.connection.reconnect = self.conf.qpid_reconnect
if self.conf.qpid_reconnect_timeout:
self.connection.reconnect_timeout = (
self.conf.qpid_reconnect_timeout)
if self.conf.qpid_reconnect_limit:
self.connection.reconnect_limit = self.conf.qpid_reconnect_limit
if self.conf.qpid_reconnect_interval_max:
self.connection.reconnect_interval_max = (
self.conf.qpid_reconnect_interval_max)
if self.conf.qpid_reconnect_interval_min:
self.connection.reconnect_interval_min = (
self.conf.qpid_reconnect_interval_min)
if self.conf.qpid_reconnect_interval:
self.connection.reconnect_interval = (
self.conf.qpid_reconnect_interval)
self.connection.hearbeat = self.conf.qpid_heartbeat
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.protocol = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
# Open is part of reconnect -
# NOTE(WGH) not sure we need this with the reconnect flags
self.reconnect()
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
@ -351,23 +329,36 @@ class Connection(object):
except qpid.messaging.exceptions.ConnectionError:
pass
attempt = 0
delay = 1
while True:
broker = self.brokers[attempt % len(self.brokers)]
attempt += 1
try:
self.connection_create(broker)
self.connection.open()
except qpid.messaging.exceptions.ConnectionError, e:
LOG.error(_('Unable to connect to AMQP server: %s'), e)
time.sleep(self.conf.qpid_reconnect_interval or 1)
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
LOG.info(_('Connected to AMQP server on %s'), self.broker)
self.session = self.connection.session()
for consumer in self.consumers.itervalues():
consumer.reconnect(self.session)
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in consumers.itervalues():
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
@ -464,10 +455,12 @@ class Connection(object):
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None):
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)

View File

@ -49,7 +49,7 @@ zmq_opts = [
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('openstack.common.rpc.'
default=('reddwarf.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
@ -72,7 +72,7 @@ zmq_opts = [
# These globals are defined in register_opts(conf),
# a mandatory initialization call
FLAGS = None
CONF = None
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
@ -259,7 +259,14 @@ class InternalContext(object):
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException, e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
@ -274,7 +281,7 @@ class InternalContext(object):
ctx.replies)
LOG.debug(_("Sending reply"))
cast(FLAGS, ctx, topic, {
cast(CONF, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id,
@ -329,7 +336,6 @@ class ZmqBaseReactor(ConsumerBase):
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.conf = conf
self.mapping = {}
self.proxies = {}
self.threads = []
@ -405,7 +411,7 @@ class ZmqProxy(ZmqBaseReactor):
super(ZmqProxy, self).__init__(conf)
self.topic_proxy = {}
ipc_dir = conf.rpc_zmq_ipc_dir
ipc_dir = CONF.rpc_zmq_ipc_dir
self.topic_proxy['zmq_replies'] = \
ZmqSocket("ipc://%s/zmq_topic_zmq_replies" % (ipc_dir, ),
@ -413,7 +419,7 @@ class ZmqProxy(ZmqBaseReactor):
self.sockets.append(self.topic_proxy['zmq_replies'])
def consume(self, sock):
ipc_dir = self.conf.rpc_zmq_ipc_dir
ipc_dir = CONF.rpc_zmq_ipc_dir
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
@ -487,7 +493,6 @@ class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.conf = conf
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
@ -508,7 +513,7 @@ class Connection(rpc_common.Connection):
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(self.conf.rpc_zmq_ipc_dir, topic)
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
@ -527,7 +532,7 @@ class Connection(rpc_common.Connection):
def _cast(addr, context, msg_id, topic, msg, timeout=None):
timeout_cast = timeout or FLAGS.rpc_cast_timeout
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
@ -545,13 +550,13 @@ def _cast(addr, context, msg_id, topic, msg, timeout=None):
def _call(addr, context, msg_id, topic, msg, timeout=None):
# timeout_response is how long we wait for a response
timeout = timeout or FLAGS.rpc_response_timeout
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = str(uuid.uuid4().hex)
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % FLAGS.rpc_zmq_host
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
@ -573,7 +578,7 @@ def _call(addr, context, msg_id, topic, msg, timeout=None):
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies" % FLAGS.rpc_zmq_ipc_dir,
"ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir,
zmq.SUB, subscribe=msg_id, bind=False
)
@ -599,7 +604,7 @@ def _call(addr, context, msg_id, topic, msg, timeout=None):
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(FLAGS, resp['exc'])
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
@ -610,7 +615,7 @@ def _multi_send(method, context, topic, msg, timeout=None):
dispatches to the matchmaker and sends
message to all relevant hosts.
"""
conf = FLAGS
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = matchmaker.queues(topic)
@ -641,26 +646,22 @@ def create_connection(conf, new=True):
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
register_opts(conf)
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
register_opts(conf)
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
register_opts(conf)
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
register_opts(conf)
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
@ -672,7 +673,6 @@ def notify(conf, context, topic, msg, **kwargs):
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
register_opts(conf)
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic.replace('.', '-')
@ -684,7 +684,7 @@ def cleanup():
global ZMQ_CTX
global matchmaker
matchmaker = None
ZMQ_CTX.destroy()
ZMQ_CTX.term()
ZMQ_CTX = None
@ -697,11 +697,11 @@ def register_opts(conf):
# We memoize through these globals
global ZMQ_CTX
global matchmaker
global FLAGS
global CONF
if not FLAGS:
if not CONF:
conf.register_opts(zmq_opts)
FLAGS = conf
CONF = conf
# Don't re-set, if this method is called twice.
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts)

View File

@ -21,10 +21,10 @@ return keys for direct exchanges, per (approximate) AMQP parlance.
import contextlib
import itertools
import json
import logging
from reddwarf.openstack.common import cfg
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import log as logging
matchmaker_opts = [

View File

@ -0,0 +1,75 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import rpc
from reddwarf.openstack.common.rpc import dispatcher as rpc_dispatcher
from reddwarf.openstack.common import service
LOG = logging.getLogger(__name__)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service enables rpc by listening to queues based on topic and host."""
def __init__(self, host, topic, manager=None):
super(Service, self).__init__()
self.host = host
self.topic = topic
if manager is None:
self.manager = self
else:
self.manager = manager
def start(self):
super(Service, self).start()
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
self.topic)
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager])
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, dispatcher, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, dispatcher, fanout=False)
self.conn.create_consumer(self.topic, dispatcher, fanout=True)
# Hook to allow the manager to do other initializations after
# the rpc connection is created.
if callable(getattr(self.manager, 'initialize_service_hook', None)):
self.manager.initialize_service_hook(self)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
super(Service, self).stop()

View File

@ -0,0 +1,325 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import os
import random
import signal
import sys
import time
import eventlet
import extras
import logging as std_logging
from reddwarf.openstack.common import cfg
from reddwarf.openstack.common import eventlet_backdoor
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common import threadgroup
rpc = extras.try_import('reddwarf.openstack.common.rpc')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self._services = threadgroup.ThreadGroup('launcher')
eventlet_backdoor.initialize_if_enabled()
@staticmethod
def run_service(service):
"""Start and wait for a service to finish.
:param service: service to run and wait for.
:returns: None
"""
service.start()
service.wait()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
self._services.add_thread(self.run_service, service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self._services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self._services.wait()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
raise SignalExit(signo)
def wait(self):
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
LOG.debug(_('Full set of CONF:'))
CONF.log_opt_values(LOG, std_logging.DEBUG)
status = None
try:
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
finally:
if rpc:
rpc.cleanup()
self.stop()
return status
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self):
self.children = {}
self.sigcaught = None
self.running = True
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process(self, service):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
signal.signal(signal.SIGTERM, _sigterm)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.run_service(service)
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
status = 0
try:
self._child_process(wrap.service)
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_('Unhandled exception'))
status = 2
finally:
wrap.service.stop()
os._exit(status)
LOG.info(_('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
pid, status = os.wait()
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def wait(self):
"""Loop waiting on children to die and respawning as necessary"""
LOG.debug(_('Full set of CONF:'))
CONF.log_opt_values(LOG, std_logging.DEBUG)
while self.running:
wrap = self._wait_child()
if not wrap:
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
if self.sigcaught:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[self.sigcaught]
LOG.info(_('Caught %s, stopping children'), signame)
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup('service', threads)
def start(self):
pass
def stop(self):
self.tg.stop()
def wait(self):
self.tg.wait()
def launch(service, workers=None):
if workers:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
else:
launcher = ServiceLauncher()
launcher.launch_service(service)
return launcher

View File

@ -19,26 +19,31 @@
Utilities with minimum-depends for use in setup.py
"""
import datetime
import os
import re
import subprocess
import sys
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
with open(mailmap, 'r') as fp:
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = [x for x in l.split(' ')
if x.startswith('<')]
mapping[alias] = canonical_email
return mapping
def canonicalize_emails(changelog, mapping):
""" Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email in mapping.iteritems():
changelog = changelog.replace(alias, email)
@ -47,10 +52,10 @@ def canonicalize_emails(changelog, mapping):
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
reqs_in = []
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
return open(requirements_file, 'r').read().split('\n')
with open(requirements_file, 'r') as fil:
return fil.read().split('\n')
return []
@ -58,11 +63,25 @@ def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line)
@ -72,11 +91,18 @@ def parse_requirements(requirements_files=['requirements.txt',
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
@ -91,37 +117,250 @@ def write_requirements():
def _run_shell_command(cmd):
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE)
return output.communicate()[0].strip()
if os.name == 'nt':
output = subprocess.Popen(["cmd.exe", "/C", cmd],
stdout=subprocess.PIPE)
else:
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE)
out = output.communicate()
if len(out) == 0:
return None
if len(out[0].strip()) == 0:
return None
return out[0].strip()
def write_vcsversion(location):
""" Produce a vcsversion dict that mimics the old one produced by bzr
"""
if os.path.isdir('.git'):
branch_nick_cmd = 'git branch | grep -Ei "\* (.*)" | cut -f2 -d" "'
branch_nick = _run_shell_command(branch_nick_cmd)
revid_cmd = "git rev-parse HEAD"
revid = _run_shell_command(revid_cmd).split()[0]
revno_cmd = "git log --oneline | wc -l"
revno = _run_shell_command(revno_cmd)
with open(location, 'w') as version_file:
version_file.write("""
# This file is automatically generated by setup.py, So don't edit it. :)
version_info = {
'branch_nick': '%s',
'revision_id': '%s',
'revno': %s
}
""" % (branch_nick, revid, revno))
def _get_git_next_version_suffix(branch_name):
datestamp = datetime.datetime.now().strftime('%Y%m%d')
if branch_name == 'milestone-proposed':
revno_prefix = "r"
else:
revno_prefix = ""
_run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
milestone_cmd = "git show meta/openstack/release:%s" % branch_name
milestonever = _run_shell_command(milestone_cmd)
if milestonever:
first_half = "%s~%s" % (milestonever, datestamp)
else:
first_half = datestamp
post_version = _get_git_post_version()
# post version should look like:
# 0.1.1.4.gcc9e28a
# where the bit after the last . is the short sha, and the bit between
# the last and second to last is the revno count
(revno, sha) = post_version.split(".")[-2:]
second_half = "%s%s.%s" % (revno_prefix, revno, sha)
return ".".join((first_half, second_half))
def _get_git_current_tag():
return _run_shell_command("git tag --contains HEAD")
def _get_git_tag_info():
return _run_shell_command("git describe --tags")
def _get_git_post_version():
current_tag = _get_git_current_tag()
if current_tag is not None:
return current_tag
else:
tag_info = _get_git_tag_info()
if tag_info is None:
base_version = "0.0"
cmd = "git --no-pager log --oneline"
out = _run_shell_command(cmd)
revno = len(out.split("\n"))
sha = _run_shell_command("git describe --always")
else:
tag_infos = tag_info.split("-")
base_version = "-".join(tag_infos[:-2])
(revno, sha) = tag_infos[-2:]
return "%s.%s.%s" % (base_version, revno, sha)
def write_git_changelog():
""" Write a changelog based on the git changelog """
"""Write a changelog based on the git changelog."""
new_changelog = 'ChangeLog'
if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
if os.path.isdir('.git'):
git_log_cmd = 'git log --stat'
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open(new_changelog, "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
else:
open(new_changelog, 'w').close()
def generate_authors():
"""Create AUTHORS file using git commits."""
jenkins_email = 'jenkins@review.(openstack|stackforge).org'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
if not os.getenv('SKIP_GENERATE_AUTHORS'):
if os.path.isdir('.git'):
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
"egrep -v '" + jenkins_email + "'")
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
else:
open(new_authors, 'w').close()
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def read_versioninfo(project):
"""Read the versioninfo file. If it doesn't exist, we're in a github
zipball, and there's really no way to know what version we really
are, but that should be ok, because the utility of that should be
just about nil if this code path is in use in the first place."""
versioninfo_path = os.path.join(project, 'versioninfo')
if os.path.exists(versioninfo_path):
with open(versioninfo_path, 'r') as vinfo:
version = vinfo.read().strip()
else:
version = "0.0.0"
return version
def write_versioninfo(project, version):
"""Write a simple file containing the version of the package."""
with open(os.path.join(project, 'versioninfo'), 'w') as fil:
fil.write("%s\n" % version)
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
cmdclass = dict()
def _find_modules(arg, dirname, files):
for filename in files:
if filename.endswith('.py') and filename != '__init__.py':
arg["%s.%s" % (dirname.replace('/', '.'),
filename[:-3])] = True
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
def run(self):
write_git_changelog()
generate_authors()
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
cmdclass['sdist'] = LocalSDist
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
# just ignore it
try:
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
def generate_autoindex(self):
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
modules = {}
option_dict = self.distribution.get_option_dict('build_sphinx')
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
if not os.path.exists(source_dir):
os.makedirs(source_dir)
for pkg in self.distribution.packages:
if '.' not in pkg:
os.path.walk(pkg, _find_modules, modules)
module_list = modules.keys()
module_list.sort()
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
with open(autoindex_filename, 'w') as autoindex:
autoindex.write(""".. toctree::
:maxdepth: 1
""")
for module in module_list:
output_filename = os.path.join(source_dir,
"%s.rst" % module)
heading = "The :mod:`%s` Module" % module
underline = "=" * len(heading)
values = dict(module=module, heading=heading,
underline=underline)
print "Generating %s" % output_filename
with open(output_filename, 'w') as output_file:
output_file.write(_rst_template % values)
autoindex.write(" %s.rst\n" % module)
def run(self):
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
for builder in ['html', 'man']:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
cmdclass['build_sphinx'] = LocalBuildDoc
except ImportError:
pass
return cmdclass
def get_git_branchname():
for branch in _run_shell_command("git branch --color=never").split("\n"):
if branch.startswith('*'):
_branch_name = branch.split()[1].strip()
if _branch_name == "(no":
_branch_name = "no-branch"
return _branch_name
def get_pre_version(projectname, base_version):
"""Return a version which is leading up to a version that will
be released in the future."""
if os.path.isdir('.git'):
git_log_cmd = 'git log --stat'
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
current_tag = _get_git_current_tag()
if current_tag is not None:
version = current_tag
else:
branch_name = os.getenv('BRANCHNAME',
os.getenv('GERRIT_REFNAME',
get_git_branchname()))
version_suffix = _get_git_next_version_suffix(branch_name)
version = "%s~%s" % (base_version, version_suffix)
write_versioninfo(projectname, version)
return version
else:
version = read_versioninfo(projectname)
return version
def get_post_version(projectname):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
if os.path.isdir('.git'):
version = _get_git_post_version()
write_versioninfo(projectname, version)
return version
return read_versioninfo(projectname)

View File

@ -0,0 +1,68 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for unit tests."""
import functools
import nose
class skip_test(object):
"""Decorator that skips a test."""
# TODO(tr3buchet): remember forever what comstud did here
def __init__(self, msg):
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
raise nose.SkipTest(self.message)
return _skipper
class skip_if(object):
"""Decorator that skips a test if condition is true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
class skip_unless(object):
"""Decorator that skips a test if condition is not true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if not self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper

Some files were not shown because too many files have changed in this diff Show More