trunk merge

This commit is contained in:
Sandy Walsh
2011-06-29 10:47:30 -07:00
51 changed files with 2163 additions and 304 deletions

View File

@@ -47,3 +47,7 @@
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
<vishvananda@gmail.com> <root@ubuntu>
<vishvananda@gmail.com> <vishvananda@yahoo.com>
<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>
<ilyaalekseyev@acm.org> <ilya@oscloud.ru>
<reldan@oscloud.ru> <enugaev@griddynamics.com>
<kshileev@gmail.com> <kshileev@griddynamics.com>

View File

@@ -22,14 +22,14 @@ David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com>
Eldar Nugaev <enugaev@griddynamics.com>
Eldar Nugaev <reldan@oscloud.ru>
Eric Day <eday@oddments.org>
Eric Windisch <eric@cloudscaling.com>
Ewan Mellor <ewan.mellor@citrix.com>
Gabe Westmaas <gabe.westmaas@rackspace.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com>
Ilya Alekseyev <ialekseev@griddynamics.com>
Ilya Alekseyev <ilyaalekseyev@acm.org>
Isaku Yamahata <yamahata@valinux.co.jp>
Jason Cannavale <jason.cannavale@rackspace.com>
Jason Koelker <jason@koelker.net>
@@ -53,6 +53,7 @@ Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
Kevin Bringard <kbringard@attinteractive.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Kirill Shileev <kshileev@gmail.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
Lvov Maxim <usrleon@gmail.com>

116
bin/instance-usage-audit Executable file
View File

@@ -0,0 +1,116 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cron script to generate usage notifications for instances neither created
nor destroyed in a given time period.
Together with the notifications generated by compute on instance
create/delete/resize, over that ime period, this allows an external
system consuming usage notification feeds to calculate instance usage
for each tenant.
Time periods are specified like so:
<number>[mdy]
1m = previous month. If the script is run April 1, it will generate usages
for March 1 thry March 31.
3m = 3 previous months.
90d = previous 90 days.
1y = previous year. If run on Jan 1, it generates usages for
Jan 1 thru Dec 31 of the previous year.
"""
import datetime
import gettext
import os
import sys
import time
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.notifier import api as notifier_api
FLAGS = flags.FLAGS
flags.DEFINE_string('instance_usage_audit_period', '1m',
'time period to generate instance usages for.')
def time_period(period):
today = datetime.date.today()
unit = period[-1]
if unit not in 'mdy':
raise ValueError('Time period must be m, d, or y')
n = int(period[:-1])
if unit == 'm':
year = today.year - (n // 12)
n = n % 12
if n >= today.month:
year -= 1
month = 12 + (today.month - n)
else:
month = today.month - n
begin = datetime.datetime(day=1, month=month, year=year)
end = datetime.datetime(day=1, month=today.month, year=today.year)
elif unit == 'y':
begin = datetime.datetime(day=1, month=1, year=today.year - n)
end = datetime.datetime(day=1, month=1, year=today.year)
elif unit == 'd':
b = today - datetime.timedelta(days=n)
begin = datetime.datetime(day=b.day, month=b.month, year=b.year)
end = datetime.datetime(day=today.day,
month=today.month,
year=today.year)
return (begin, end)
if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
begin, end = time_period(FLAGS.instance_usage_audit_period)
print "Creating usages for %s until %s" % (str(begin), str(end))
instances = db.instance_get_active_by_window(context.get_admin_context(),
begin,
end)
print "%s instances" % len(instances)
for instance_ref in instances:
usage_info = utils.usage_from_instance(instance_ref,
audit_period_begining=str(begin),
audit_period_ending=str(end))
notifier_api.notify('compute.%s' % FLAGS.host,
'compute.instance.exists',
notifier_api.INFO,
usage_info)

View File

@@ -137,8 +137,9 @@ if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
server = wsgi.Server()
acp_port = FLAGS.ajax_console_proxy_port
acp = AjaxConsoleProxy()
acp.register_listeners()
server.start(acp, FLAGS.ajax_console_proxy_port, host='0.0.0.0')
server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port)
server.start()
server.wait()

View File

@@ -1,5 +1,4 @@
#!/usr/bin/env python
# pylint: disable=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -18,44 +17,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Nova API."""
"""Starter script for Nova API.
Starts both the EC2 and OpenStack APIs in separate processes.
"""
import gettext
import os
import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
sys.argv[0]), os.pardir, os.pardir))
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
from nova import version
from nova import wsgi
import nova.service
import nova.utils
LOG = logging.getLogger('nova.api')
def main():
"""Launch EC2 and OSAPI services."""
nova.utils.Bootstrapper.bootstrap_binary(sys.argv)
ec2 = nova.service.WSGIService("ec2")
osapi = nova.service.WSGIService("osapi")
launcher = nova.service.Launcher()
launcher.launch_service(ec2)
launcher.launch_service(osapi)
try:
launcher.wait()
except KeyboardInterrupt:
launcher.stop()
FLAGS = flags.FLAGS
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
LOG.audit(_("Starting nova-api node (version %s)"),
version.version_string_with_vcs())
LOG.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
flag_get = FLAGS.get(flag, None)
LOG.debug("%(flag)s : %(flag_get)s" % locals())
service = service.serve_wsgi(service.ApiService)
service.wait()
sys.exit(main())

View File

@@ -93,6 +93,9 @@ if __name__ == '__main__':
with_req = direct.PostParamsMiddleware(with_json)
with_auth = direct.DelegatedAuthMiddleware(with_req)
server = wsgi.Server()
server.start(with_auth, FLAGS.direct_port, host=FLAGS.direct_host)
server = wsgi.Server("Direct API",
with_auth,
host=FLAGS.direct_host,
port=FLAGS.direct_port)
server.start()
server.wait()

View File

@@ -50,6 +50,9 @@ if __name__ == '__main__':
FLAGS(sys.argv)
logging.setup()
router = s3server.S3Application(FLAGS.buckets_path)
server = wsgi.Server()
server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
server = wsgi.Server("S3 Objectstore",
router,
port=FLAGS.s3_port,
host=FLAGS.s3_host)
server.start()
server.wait()

View File

@@ -96,6 +96,9 @@ if __name__ == "__main__":
service.serve()
server = wsgi.Server()
server.start(with_auth, FLAGS.vncproxy_port, host=FLAGS.vncproxy_host)
server = wsgi.Server("VNC Proxy",
with_auth,
host=FLAGS.vncproxy_host,
port=FLAGS.vncproxy_port)
server.start()
server.wait()

View File

@@ -30,3 +30,8 @@
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
import gettext
gettext.install("nova", unicode=1)

View File

@@ -0,0 +1,126 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" The instance type extra specs extension"""
from webob import exc
from nova import db
from nova import quota
from nova.api.openstack import extensions
from nova.api.openstack import faults
from nova.api.openstack import wsgi
class FlavorExtraSpecsController(object):
""" The flavor extra specs API controller for the Openstack API """
def _get_extra_specs(self, context, flavor_id):
extra_specs = db.api.instance_type_extra_specs_get(context, flavor_id)
specs_dict = {}
for key, value in extra_specs.iteritems():
specs_dict[key] = value
return dict(extra_specs=specs_dict)
def _check_body(self, body):
if body == None or body == "":
expl = _('No Request Body')
raise exc.HTTPBadRequest(explanation=expl)
def index(self, req, flavor_id):
""" Returns the list of extra specs for a givenflavor """
context = req.environ['nova.context']
return self._get_extra_specs(context, flavor_id)
def create(self, req, flavor_id, body):
self._check_body(body)
context = req.environ['nova.context']
specs = body.get('extra_specs')
try:
db.api.instance_type_extra_specs_update_or_create(context,
flavor_id,
specs)
except quota.QuotaError as error:
self._handle_quota_error(error)
return body
def update(self, req, flavor_id, id, body):
self._check_body(body)
context = req.environ['nova.context']
if not id in body:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(body) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
try:
db.api.instance_type_extra_specs_update_or_create(context,
flavor_id,
body)
except quota.QuotaError as error:
self._handle_quota_error(error)
return body
def show(self, req, flavor_id, id):
""" Return a single extra spec item """
context = req.environ['nova.context']
specs = self._get_extra_specs(context, flavor_id)
if id in specs['extra_specs']:
return {id: specs['extra_specs'][id]}
else:
return faults.Fault(exc.HTTPNotFound())
def delete(self, req, flavor_id, id):
""" Deletes an existing extra spec """
context = req.environ['nova.context']
db.api.instance_type_extra_specs_delete(context, flavor_id, id)
def _handle_quota_error(self, error):
"""Reraise quota errors as api-specific http exceptions."""
if error.code == "MetadataLimitExceeded":
raise exc.HTTPBadRequest(explanation=error.message)
raise error
class Flavorextraspecs(extensions.ExtensionDescriptor):
def get_name(self):
return "FlavorExtraSpecs"
def get_alias(self):
return "os-flavor-extra-specs"
def get_description(self):
return "Instance type (flavor) extra specs"
def get_namespace(self):
return \
"http://docs.openstack.org/ext/flavor_extra_specs/api/v1.1"
def get_updated(self):
return "2011-06-23T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
'os-extra_specs',
FlavorExtraSpecsController(),
parent=dict(member_name='flavor', collection_name='flavors'))
resources.append(res)
return resources

View File

@@ -0,0 +1,172 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Grid Dynamics
# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
from webob import exc
from nova import exception
from nova import network
from nova import rpc
from nova.api.openstack import faults
from nova.api.openstack import extensions
def _translate_floating_ip_view(floating_ip):
result = {'id': floating_ip['id'],
'ip': floating_ip['address']}
if 'fixed_ip' in floating_ip:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
else:
result['fixed_ip'] = None
if 'instance' in floating_ip:
result['instance_id'] = floating_ip['instance']['id']
else:
result['instance_id'] = None
return {'floating_ip': result}
def _translate_floating_ips_view(floating_ips):
return {'floating_ips': [_translate_floating_ip_view(floating_ip)
for floating_ip in floating_ips]}
class FloatingIPController(object):
"""The Floating IPs API controller for the OpenStack API."""
_serialization_metadata = {
'application/xml': {
"attributes": {
"floating_ip": [
"id",
"ip",
"instance_id",
"fixed_ip",
]}}}
def __init__(self):
self.network_api = network.API()
super(FloatingIPController, self).__init__()
def show(self, req, id):
"""Return data about the given floating ip."""
context = req.environ['nova.context']
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return _translate_floating_ip_view(floating_ip)
def index(self, req):
context = req.environ['nova.context']
floating_ips = self.network_api.list_floating_ips(context)
return _translate_floating_ips_view(floating_ips)
def create(self, req, body):
context = req.environ['nova.context']
try:
address = self.network_api.allocate_floating_ip(context)
ip = self.network_api.get_floating_ip_by_ip(context, address)
except rpc.RemoteError as ex:
if ex.exc_type == 'NoMoreAddresses':
raise exception.NoMoreFloatingIps()
else:
raise
return {'allocated': {
"id": ip['id'],
"floating_ip": ip['address']}}
def delete(self, req, id):
context = req.environ['nova.context']
ip = self.network_api.get_floating_ip(context, id)
self.network_api.release_floating_ip(context, address=ip)
return {'released': {
"id": ip['id'],
"floating_ip": ip['address']}}
def associate(self, req, id, body):
""" /floating_ips/{id}/associate fixed ip in body """
context = req.environ['nova.context']
floating_ip = self._get_ip_by_id(context, id)
fixed_ip = body['associate_address']['fixed_ip']
try:
self.network_api.associate_floating_ip(context,
floating_ip, fixed_ip)
except rpc.RemoteError:
raise
return {'associated':
{
"floating_ip_id": id,
"floating_ip": floating_ip,
"fixed_ip": fixed_ip}}
def disassociate(self, req, id, body):
""" POST /floating_ips/{id}/disassociate """
context = req.environ['nova.context']
floating_ip = self.network_api.get_floating_ip(context, id)
address = floating_ip['address']
fixed_ip = floating_ip['fixed_ip']['address']
try:
self.network_api.disassociate_floating_ip(context, address)
except rpc.RemoteError:
raise
return {'disassociated': {'floating_ip': address,
'fixed_ip': fixed_ip}}
def _get_ip_by_id(self, context, value):
"""Checks that value is id and then returns its address."""
return self.network_api.get_floating_ip(context, value)['address']
class Floating_ips(extensions.ExtensionDescriptor):
def get_name(self):
return "Floating_ips"
def get_alias(self):
return "os-floating-ips"
def get_description(self):
return "Floating IPs support"
def get_namespace(self):
return "http://docs.openstack.org/ext/floating_ips/api/v1.1"
def get_updated(self):
return "2011-06-16T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ips',
FloatingIPController(),
member_actions={
'associate': 'POST',
'disassociate': 'POST'})
resources.append(res)
return resources

View File

@@ -358,7 +358,7 @@ class Resource(wsgi.Application):
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.debug("%(method)s %(url)s" % {"method": request.method,
LOG.info("%(method)s %(url)s" % {"method": request.method,
"url": request.url})
try:
@@ -386,7 +386,7 @@ class Resource(wsgi.Application):
msg_dict = dict(url=request.url, e=e)
msg = _("%(url)s returned a fault: %(e)s" % msg_dict)
LOG.debug(msg)
LOG.info(msg)
return response

View File

@@ -100,6 +100,11 @@ class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103
pass
class SERVER_DOWN(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
def initialize(_uri):
"""Opens a fake connection with an LDAP server."""
return FakeLDAP()
@@ -202,25 +207,38 @@ def _to_json(unencoded):
return json.dumps(list(unencoded))
server_fail = False
class FakeLDAP(object):
"""Fake LDAP connection."""
def simple_bind_s(self, dn, password):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN
pass
def unbind_s(self):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN
pass
def add_s(self, dn, attr):
"""Add an object with the specified attributes at dn."""
if server_fail:
raise SERVER_DOWN
key = "%s%s" % (self.__prefix, dn)
value_dict = dict([(k, _to_json(v)) for k, v in attr])
Store.instance().hmset(key, value_dict)
def delete_s(self, dn):
"""Remove the ldap object at specified dn."""
if server_fail:
raise SERVER_DOWN
Store.instance().delete("%s%s" % (self.__prefix, dn))
def modify_s(self, dn, attrs):
@@ -232,6 +250,9 @@ class FakeLDAP(object):
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
if server_fail:
raise SERVER_DOWN
store = Store.instance()
key = "%s%s" % (self.__prefix, dn)
@@ -255,6 +276,9 @@ class FakeLDAP(object):
fields -- fields to return. Returns all fields if not specified
"""
if server_fail:
raise SERVER_DOWN
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope))
store = Store.instance()

View File

@@ -101,6 +101,41 @@ def sanitize(fn):
return _wrapped
class LDAPWrapper(object):
def __init__(self, ldap, url, user, password):
self.ldap = ldap
self.url = url
self.user = user
self.password = password
self.conn = None
def __wrap_reconnect(f):
def inner(self, *args, **kwargs):
if self.conn is None:
self.connect()
return f(self.conn)(*args, **kwargs)
else:
try:
return f(self.conn)(*args, **kwargs)
except self.ldap.SERVER_DOWN:
self.connect()
return f(self.conn)(*args, **kwargs)
return inner
def connect(self):
try:
self.conn = self.ldap.initialize(self.url)
self.conn.simple_bind_s(self.user, self.password)
except self.ldap.SERVER_DOWN:
self.conn = None
raise
search_s = __wrap_reconnect(lambda conn: conn.search_s)
add_s = __wrap_reconnect(lambda conn: conn.add_s)
delete_s = __wrap_reconnect(lambda conn: conn.delete_s)
modify_s = __wrap_reconnect(lambda conn: conn.modify_s)
class LdapDriver(object):
"""Ldap Auth driver
@@ -124,8 +159,8 @@ class LdapDriver(object):
LdapDriver.project_objectclass = 'novaProject'
self.__cache = None
if LdapDriver.conn is None:
LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url)
LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn,
LdapDriver.conn = LDAPWrapper(self.ldap, FLAGS.ldap_url,
FLAGS.ldap_user_dn,
FLAGS.ldap_password)
if LdapDriver.mc is None:
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)

View File

@@ -53,8 +53,8 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
from nova.notifier import api as notifier_api
from nova.compute.utils import terminate_volumes
from nova.notifier import api as notifier
from nova.virt import driver
@@ -113,7 +113,7 @@ def checks_instance_lock(function):
def publisher_id(host=None):
return notifier.publisher_id("compute", host)
return notifier_api.publisher_id("compute", host)
class ComputeManager(manager.SchedulerDependentManager):
@@ -200,7 +200,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@exception.wrap_exception(publisher_id())
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
def refresh_security_group_rules(self, context, security_group_id,
**kwargs):
"""Tell the virtualization driver to refresh security group rules.
@@ -210,7 +210,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.refresh_security_group_rules(security_group_id)
@exception.wrap_exception(publisher_id())
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
def refresh_security_group_members(self, context,
security_group_id, **kwargs):
"""Tell the virtualization driver to refresh security group members.
@@ -220,7 +220,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception(publisher_id())
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
def refresh_provider_fw_rules(self, context, **_kwargs):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
@@ -348,6 +348,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
usage_info = utils.usage_from_instance(instance_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.create',
notifier_api.INFO,
usage_info)
except exception.InstanceNotFound:
# FIXME(wwolf): We are just ignoring InstanceNotFound
# exceptions here in case the instance was immediately
@@ -355,11 +360,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# be fixed once we have no-db-messaging
pass
@exception.wrap_exception(publisher_id())
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
def run_instance(self, context, instance_id, **kwargs):
self._run_instance(context, instance_id, **kwargs)
@exception.wrap_exception(publisher_id())
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def start_instance(self, context, instance_id):
"""Starting an instance on this host."""
@@ -421,23 +426,29 @@ class ComputeManager(manager.SchedulerDependentManager):
if action_str == 'Terminating':
terminate_volumes(self.db, context, instance_id)
@exception.wrap_exception(publisher_id())
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
instance_ref = self.db.instance_get(context.elevated(), instance_id)
# TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
usage_info = utils.usage_from_instance(instance_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.delete',
notifier_api.INFO,
usage_info)
@exception.wrap_exception(publisher_id())
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def stop_instance(self, context, instance_id):
"""Stopping an instance on this host."""
self._shutdown_instance(context, instance_id, 'Stopping')
# instance state will be updated to stopped by _poll_instance_states()
@exception.wrap_exception(publisher_id())
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def rebuild_instance(self, context, instance_id, **kwargs):
"""Destroy and re-make this instance.
@@ -465,8 +476,14 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_image_ref(context, instance_id, image_ref)
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
usage_info = utils.usage_from_instance(instance_ref,
image_ref=image_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.rebuild',
notifier_api.INFO,
usage_info)
@exception.wrap_exception(publisher_id())
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this host."""
@@ -491,7 +508,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.reboot(instance_ref)
self._update_state(context, instance_id)
@exception.wrap_exception(publisher_id())
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
def snapshot_instance(self, context, instance_id, image_id):
"""Snapshot an instance on this host."""
context = context.elevated()
@@ -513,7 +530,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.snapshot(instance_ref, image_id)
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def set_admin_password(self, context, instance_id, new_pass=None):
"""Set the root/admin password for an instance on this host.
@@ -561,7 +578,7 @@ class ComputeManager(manager.SchedulerDependentManager):
time.sleep(1)
continue
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def inject_file(self, context, instance_id, path, file_contents):
"""Write a file to the specified path in an instance on this host."""
@@ -579,7 +596,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.audit(msg)
self.driver.inject_file(instance_ref, path, file_contents)
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def agent_update(self, context, instance_id, url, md5hash):
"""Update agent running on an instance on this host."""
@@ -597,7 +614,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.audit(msg)
self.driver.agent_update(instance_ref, url, md5hash)
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
@@ -614,7 +631,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.rescue(instance_ref, _update_state)
self._update_state(context, instance_id)
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def unrescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
@@ -635,15 +652,20 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Update instance state when async task completes."""
self._update_state(context, instance_id)
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def confirm_resize(self, context, instance_id, migration_id):
"""Destroys the source instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
self.driver.destroy(instance_ref)
usage_info = utils.usage_from_instance(instance_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.resize.confirm',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def revert_resize(self, context, instance_id, migration_id):
"""Destroys the new instance on the destination machine.
@@ -665,7 +687,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'instance_id': instance_id, },
})
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def finish_revert_resize(self, context, instance_id, migration_id):
"""Finishes the second half of reverting a resize.
@@ -689,8 +711,13 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.revert_resize(instance_ref)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
usage_info = utils.usage_from_instance(instance_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.resize.revert',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def prep_resize(self, context, instance_id, flavor_id):
"""Initiates the process of moving a running instance to another host.
@@ -725,8 +752,15 @@ class ComputeManager(manager.SchedulerDependentManager):
'migration_id': migration_ref['id'],
'instance_id': instance_id, },
})
usage_info = utils.usage_from_instance(instance_ref,
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
notifier_api.notify('compute.%s' % self.host,
'compute.instance.resize.prep',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def resize_instance(self, context, instance_id, migration_id):
"""Starts the migration of a running instance to another host."""
@@ -752,7 +786,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'instance_id': instance_id,
'disk_info': disk_info}})
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def finish_resize(self, context, instance_id, migration_id, disk_info):
"""Completes the migration process.
@@ -782,7 +816,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.db.migration_update(context, migration_id,
{'status': 'finished', })
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this host."""
@@ -799,7 +833,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
result))
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def unpause_instance(self, context, instance_id):
"""Unpause a paused instance on this host."""
@@ -816,7 +850,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
result))
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
@@ -825,7 +859,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context=context)
return self.driver.get_diagnostics(instance_ref)
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def suspend_instance(self, context, instance_id):
"""Suspend the given instance."""
@@ -841,7 +875,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
result))
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
@checks_instance_lock
def resume_instance(self, context, instance_id):
"""Resume the given suspended instance."""
@@ -857,7 +891,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
result))
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
def lock_instance(self, context, instance_id):
"""Lock the given instance."""
context = context.elevated()
@@ -865,7 +899,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.debug(_('instance %s: locking'), instance_id, context=context)
self.db.instance_update(context, instance_id, {'locked': True})
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
def unlock_instance(self, context, instance_id):
"""Unlock the given instance."""
context = context.elevated()
@@ -873,7 +907,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.debug(_('instance %s: unlocking'), instance_id, context=context)
self.db.instance_update(context, instance_id, {'locked': False})
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
def get_lock(self, context, instance_id):
"""Return the boolean state of the given instance's lock."""
context = context.elevated()
@@ -900,7 +934,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context=context)
self.driver.inject_network_info(instance_ref)
@exception.wrap_exception
@exception.wrap_exception(notifier=notifier_api, publisher_id=publisher_id())
def get_console_output(self, context, instance_id):
"""Send the console output for the given instance."""
context = context.elevated()

View File

@@ -223,6 +223,9 @@ def certificate_update(context, certificate_id, values):
###################
def floating_ip_get(context, floating_ip_id):
return IMPL.floating_ip_get(context, floating_ip_id)
def floating_ip_allocate_address(context, host, project_id):
"""Allocate free floating ip and return the address.
@@ -289,6 +292,11 @@ def floating_ip_get_by_address(context, address):
return IMPL.floating_ip_get_by_address(context, address)
def floating_ip_get_by_ip(context, ip):
"""Get a floating ip by floating address."""
return IMPL.floating_ip_get_by_ip(context, ip)
def floating_ip_update(context, address, values):
"""Update a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_update(context, address, values)
@@ -434,6 +442,11 @@ def instance_get_all(context):
return IMPL.instance_get_all(context)
def instance_get_active_by_window(context, begin, end=None):
"""Get instances active during a certain time window."""
return IMPL.instance_get_active_by_window(context, begin, end)
def instance_get_all_by_user(context, user_id):
"""Get all instances."""
return IMPL.instance_get_all_by_user(context, user_id)
@@ -1339,3 +1352,24 @@ def agent_build_destroy(context, agent_update_id):
def agent_build_update(context, agent_build_id, values):
"""Update agent build entry."""
IMPL.agent_build_update(context, agent_build_id, values)
####################
def instance_type_extra_specs_get(context, instance_type_id):
"""Get all extra specs for an instance type."""
return IMPL.instance_type_extra_specs_get(context, instance_type_id)
def instance_type_extra_specs_delete(context, instance_type_id, key):
"""Delete the given extra specs item."""
IMPL.instance_type_extra_specs_delete(context, instance_type_id, key)
def instance_type_extra_specs_update_or_create(context, instance_type_id,
extra_specs):
"""Create or update instance type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument"""
IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id,
extra_specs)

View File

@@ -428,6 +428,29 @@ def certificate_update(context, certificate_id, values):
###################
@require_context
def floating_ip_get(context, id):
session = get_session()
result = None
if is_admin_context(context):
result = session.query(models.FloatingIp).\
options(joinedload('fixed_ip')).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(id=id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.FloatingIp).\
options(joinedload('fixed_ip')).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(project_id=context.project_id).\
filter_by(id=id).\
filter_by(deleted=False).\
first()
if not result:
raise exception.FloatingIpNotFoundForFixedAddress()
return result
@require_context
@@ -582,7 +605,23 @@ def floating_ip_get_by_address(context, address, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.FloatingIpNotFound(fixed_ip=address)
raise exception.FloatingIpNotFoundForFixedAddress(fixed_ip=address)
return result
@require_context
def floating_ip_get_by_ip(context, ip, session=None):
if not session:
session = get_session()
result = session.query(models.FloatingIp).\
filter_by(address=ip).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.FloatingIpNotFound(floating_ip=ip)
return result
@@ -722,7 +761,7 @@ def fixed_ip_get_by_address(context, address, session=None):
options(joinedload('instance')).\
first()
if not result:
raise exception.FloatingIpNotFound(fixed_ip=address)
raise exception.FloatingIpNotFoundForFixedAddress(fixed_ip=address)
if is_user_context(context):
authorize_project_context(context, result.instance.project_id)
@@ -916,6 +955,24 @@ def instance_get_all(context):
all()
@require_admin_context
def instance_get_active_by_window(context, begin, end=None):
"""Return instances that were continuously active over the given window"""
session = get_session()
query = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter(models.Instance.launched_at < begin)
if end:
query = query.filter(or_(models.Instance.terminated_at == None,
models.Instance.terminated_at > end))
else:
query = query.filter(models.Instance.terminated_at == None)
return query.all()
@require_admin_context
def instance_get_all_by_user(context, user_id):
session = get_session()
@@ -2597,7 +2654,22 @@ def console_get(context, console_id, instance_id=None):
@require_admin_context
def instance_type_create(_context, values):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
try:
specs = values.get('extra_specs')
specs_refs = []
if specs:
for k, v in specs.iteritems():
specs_ref = models.InstanceTypeExtraSpecs()
specs_ref['key'] = k
specs_ref['value'] = v
specs_refs.append(specs_ref)
values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
instance_type_ref.save()
@@ -2606,6 +2678,25 @@ def instance_type_create(_context, values):
return instance_type_ref
def _dict_with_extra_specs(inst_type_query):
"""Takes an instance type query returned by sqlalchemy
and returns it as a dictionary, converting the extra_specs
entry from a list of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = dict([(x['key'], x['value']) for x in \
inst_type_query['extra_specs']])
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
@require_context
def instance_type_get_all(context, inactive=False):
"""
@@ -2614,17 +2705,19 @@ def instance_type_get_all(context, inactive=False):
session = get_session()
if inactive:
inst_types = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
order_by("name").\
all()
else:
inst_types = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
filter_by(deleted=False).\
order_by("name").\
all()
if inst_types:
inst_dict = {}
for i in inst_types:
inst_dict[i['name']] = dict(i)
inst_dict[i['name']] = _dict_with_extra_specs(i)
return inst_dict
else:
raise exception.NoInstanceTypesFound()
@@ -2635,12 +2728,14 @@ def instance_type_get_by_id(context, id):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
filter_by(id=id).\
first()
if not inst_type:
raise exception.InstanceTypeNotFound(instance_type=id)
else:
return dict(inst_type)
return _dict_with_extra_specs(inst_type)
@require_context
@@ -2648,12 +2743,13 @@ def instance_type_get_by_name(context, name):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
filter_by(name=name).\
first()
if not inst_type:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return dict(inst_type)
return _dict_with_extra_specs(inst_type)
@require_context
@@ -2661,12 +2757,13 @@ def instance_type_get_by_flavor_id(context, id):
"""Returns a dict describing specific flavor_id"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
filter_by(flavorid=int(id)).\
first()
if not inst_type:
raise exception.FlavorNotFound(flavor_id=id)
else:
return dict(inst_type)
return _dict_with_extra_specs(inst_type)
@require_admin_context
@@ -2834,6 +2931,9 @@ def instance_metadata_update_or_create(context, instance_id, metadata):
return metadata
####################
@require_admin_context
def agent_build_create(context, values):
agent_build_ref = models.AgentBuild()
@@ -2883,3 +2983,70 @@ def agent_build_update(context, agent_build_id, values):
first()
agent_build_ref.update(values)
agent_build_ref.save(session=session)
####################
@require_context
def instance_type_extra_specs_get(context, instance_type_id):
session = get_session()
spec_results = session.query(models.InstanceTypeExtraSpecs).\
filter_by(instance_type_id=instance_type_id).\
filter_by(deleted=False).\
all()
spec_dict = {}
for i in spec_results:
spec_dict[i['key']] = i['value']
return spec_dict
@require_context
def instance_type_extra_specs_delete(context, instance_type_id, key):
session = get_session()
session.query(models.InstanceTypeExtraSpecs).\
filter_by(instance_type_id=instance_type_id).\
filter_by(key=key).\
filter_by(deleted=False).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def instance_type_extra_specs_get_item(context, instance_type_id, key):
session = get_session()
sppec_result = session.query(models.InstanceTypeExtraSpecs).\
filter_by(instance_type_id=instance_type_id).\
filter_by(key=key).\
filter_by(deleted=False).\
first()
if not spec_result:
raise exception.\
InstanceTypeExtraSpecsNotFound(extra_specs_key=key,
instance_type_id=instance_type_id)
return spec_result
@require_context
def instance_type_extra_specs_update_or_create(context, instance_type_id,
specs):
session = get_session()
spec_ref = None
for key, value in specs.iteritems():
try:
spec_ref = instance_type_extra_specs_get_item(context,
instance_type_id,
key,
session)
except:
spec_ref = models.InstanceTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type_id,
"deleted": 0})
spec_ref.save(session=session)
return specs

View File

@@ -58,8 +58,7 @@ provider_fw_rules = Table('provider_fw_rules', meta,
Column('to_port', Integer()),
Column('cidr',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
)
unicode_error=None, _warn_on_bytestring=False)))
def upgrade(migrate_engine):

View File

@@ -0,0 +1,67 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
from sqlalchemy import MetaData, String, Table
from nova import log as logging
meta = MetaData()
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instance_types = Table('instance_types', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
instance_type_extra_specs_table = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_type_id',
Integer(),
ForeignKey('instance_types.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('value',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
for table in (instance_type_extra_specs_table, ):
try:
table.create()
except Exception:
logging.info(repr(table))
logging.exception('Exception while creating table')
raise
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
for table in (instance_type_extra_specs_table, ):
table.drop()

View File

@@ -716,6 +716,21 @@ class InstanceMetadata(BASE, NovaBase):
'InstanceMetadata.deleted == False)')
class InstanceTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for an instance_type"""
__tablename__ = 'instance_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == False)')
class Zone(BASE, NovaBase):
"""Represents a child zone of this zone."""
__tablename__ = 'zones'
@@ -750,7 +765,7 @@ def register_models():
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
AgentBuild, InstanceMetadata, Migration)
AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)

View File

@@ -25,7 +25,6 @@ SHOULD include dedicated exception logging.
"""
from nova import log as logging
from nova.notifier import api as notifier
LOG = logging.getLogger('nova.exception')
@@ -82,14 +81,18 @@ def wrap_db_error(f):
_wrap.func_name = f.func_name
def wrap_exception(f, event_type=None, publisher_id=None, level=notifier.ERROR):
def wrap_exception(f, notifier=None, publisher_id=None, event_type=None, level=None):
def _wrap(*args, **kw):
try:
return f(*args, **kw)
except Exception, e:
if event_type and publisher_id:
if notifier:
payload = dict(args=args, exception=e)
payload.update(kw)
if not level:
level = notifier.ERROR
notifier.safe_notify(publisher_id, event_type, level, payload)
if not isinstance(e, Error):
@@ -367,6 +370,10 @@ class NoFixedIpsFoundForInstance(NotFound):
class FloatingIpNotFound(NotFound):
message = _("Floating ip %(floating_ip)s not found")
class FloatingIpNotFoundForFixedAddress(NotFound):
message = _("Floating ip not found for fixed address %(fixed_ip)s.")
@@ -510,6 +517,11 @@ class InstanceMetadataNotFound(NotFound):
"key %(metadata_key)s.")
class InstanceTypeExtraSpecsNotFound(NotFound):
message = _("Instance Type %(instance_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class LDAPObjectNotFound(NotFound):
message = _("LDAP object could not be found")
@@ -595,3 +607,11 @@ class MigrationError(NovaException):
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")
class PasteConfigNotFound(NotFound):
message = _("Could not find paste config at %(path)s")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")

View File

@@ -314,3 +314,14 @@ logging.setLoggerClass(NovaLogger)
def audit(msg, *args, **kwargs):
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
logging.root.log(AUDIT, msg, *args, **kwargs)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)

View File

@@ -34,6 +34,19 @@ LOG = logging.getLogger('nova.network')
class API(base.Base):
"""API for interacting with the network manager."""
def get_floating_ip(self, context, id):
rv = self.db.floating_ip_get(context, id)
return dict(rv.iteritems())
def get_floating_ip_by_ip(self, context, address):
res = self.db.floating_ip_get_by_ip(context, address)
return dict(res.iteritems())
def list_floating_ips(self, context):
ips = self.db.floating_ip_get_all_by_project(context,
context.project_id)
return ips
def allocate_floating_ip(self, context):
if quota.allowed_floating_ips(context, 1) < 1:
LOG.warn(_('Quota exceeeded for %s, tried to allocate '

View File

@@ -46,7 +46,7 @@ def publisher_id(service, host=None):
def safe_notify(publisher_id, event_type, priority, payload):
try:
notify(publisher_id, event_type, notification_level, payload)
exception Exception, e:
except Exception, e:
LOG.exception(_("Problem '%(e)' attempting to "
"send to notification system." % locals()))

View File

@@ -0,0 +1,28 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from nova import flags
from nova import log as logging
FLAGS = flags.FLAGS
NOTIFICATIONS = []
def notify(message):
"""Test notifier, stores notifications in memory for unittests."""
NOTIFICATIONS.append(message)

View File

@@ -27,10 +27,11 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import notifier
from nova import rpc
from nova import utils
from nova.compute import power_state
from nova.notifier import api as notifier_api
FLAGS = flags.FLAGS
flags.DEFINE_integer('service_down_time', 60,
@@ -49,12 +50,7 @@ class WillNotSchedule(exception.Error):
def publisher_id(host=None):
return notifier.publisher_id("scheduler", host)
def notifier(instance_id, event_type, level, host=None):
return dict(instance_id=instance_id, event_type=event_type,
notification_level=level, host=publisher_id(host))
return notifier_api.publisher_id("scheduler", host)
class Scheduler(object):

View File

@@ -93,6 +93,26 @@ class InstanceTypeFilter(HostFilter):
"""Use instance_type to filter hosts."""
return (self._full_name(), instance_type)
def _satisfies_extra_specs(self, capabilities, instance_type):
"""Check that the capabilities provided by the compute service
satisfy the extra specs associated with the instance type"""
if 'extra_specs' not in instance_type:
return True
# Note(lorinh): For now, we are just checking exact matching on the
# values. Later on, we want to handle numerical
# values so we can represent things like number of GPU cards
try:
for key, value in instance_type['extra_specs'].iteritems():
if capabilities[key] != value:
return False
except KeyError:
return False
return True
def filter_hosts(self, zone_manager, query):
"""Return a list of hosts that can create instance_type."""
instance_type = query
@@ -103,7 +123,11 @@ class InstanceTypeFilter(HostFilter):
disk_bytes = capabilities['disk_available']
spec_ram = instance_type['memory_mb']
spec_disk = instance_type['local_gb']
if host_ram_mb >= spec_ram and disk_bytes >= spec_disk:
extra_specs = instance_type['extra_specs']
if host_ram_mb >= spec_ram and \
disk_bytes >= spec_disk and \
self._satisfies_extra_specs(capabilities, instance_type):
selected_hosts.append((host, capabilities))
return selected_hosts

View File

@@ -19,10 +19,12 @@
"""Generic Node baseclass for all workers that run on hosts."""
import greenlet
import inspect
import multiprocessing
import os
import greenlet
from eventlet import greenthread
from nova import context
@@ -36,6 +38,8 @@ from nova import version
from nova import wsgi
LOG = logging.getLogger('nova.service')
FLAGS = flags.FLAGS
flags.DEFINE_integer('report_interval', 10,
'seconds between nodes reporting state to datastore',
@@ -53,6 +57,63 @@ flags.DEFINE_string('api_paste_config', "api-paste.ini",
'File name for the paste.deploy config for nova-api')
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self._services = []
@staticmethod
def run_service(service):
"""Start and wait for a service to finish.
:param service: Service to run and wait for.
:returns: None
"""
service.start()
try:
service.wait()
except KeyboardInterrupt:
service.stop()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
process = multiprocessing.Process(target=self.run_service,
args=(service,))
process.start()
self._services.append(process)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
for service in self._services:
if service.is_alive():
service.terminate()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
for service in self._services:
service.join()
class Service(object):
"""Base class for workers that run on hosts."""
@@ -232,45 +293,54 @@ class Service(object):
logging.exception(_('model server went away'))
class WsgiService(object):
"""Base class for WSGI based services.
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
For each api you define, you must also define these flags:
:<api>_listen: The address on which to listen
:<api>_listen_port: The port on which to listen
def __init__(self, name, loader=None):
"""Initialize, but do not start the WSGI service.
"""
:param name: The name of the WSGI service given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
def __init__(self, conf, apis):
self.conf = conf
self.apis = apis
self.wsgi_app = None
"""
self.name = name
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0")
self.port = getattr(FLAGS, '%s_listen_port' % name, 0)
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port)
def start(self):
self.wsgi_app = _run_wsgi(self.conf, self.apis)
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
self.server.start()
self.port = self.server.port
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
self.wsgi_app.wait()
"""Wait for the service to stop serving this API.
def get_socket_info(self, api_name):
"""Returns the (host, port) that an API was started on."""
return self.wsgi_app.socket_info[api_name]
:returns: None
class ApiService(WsgiService):
"""Class for our nova-api service."""
@classmethod
def create(cls, conf=None):
if not conf:
conf = wsgi.paste_config_file(FLAGS.api_paste_config)
if not conf:
message = (_('No paste configuration found for: %s'),
FLAGS.api_paste_config)
raise exception.Error(message)
api_endpoints = ['ec2', 'osapi']
service = cls(conf, api_endpoints)
return service
"""
self.server.wait()
def serve(*services):
@@ -302,48 +372,3 @@ def serve(*services):
def wait():
while True:
greenthread.sleep(5)
def serve_wsgi(cls, conf=None):
try:
service = cls.create(conf)
except Exception:
logging.exception('in WsgiService.create()')
raise
finally:
# After we've loaded up all our dynamic bits, check
# whether we should print help
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
FLAGS.ParseNewFlags()
service.start()
return service
def _run_wsgi(paste_config_file, apis):
logging.debug(_('Using paste.deploy config at: %s'), paste_config_file)
apps = []
for api in apis:
config = wsgi.load_paste_configuration(paste_config_file, api)
if config is None:
logging.debug(_('No paste configuration for app: %s'), api)
continue
logging.debug(_('App Config: %(api)s\n%(config)r') % locals())
logging.info(_('Running %s API'), api)
app = wsgi.load_paste_app(paste_config_file, api)
apps.append((app,
getattr(FLAGS, '%s_listen_port' % api),
getattr(FLAGS, '%s_listen' % api),
api))
if len(apps) == 0:
logging.error(_('No known API applications configured in %s.'),
paste_config_file)
return
server = wsgi.Server()
for app in apps:
server.start(*app)
return server

View File

@@ -38,7 +38,6 @@ from nova import flags
from nova import rpc
from nova import utils
from nova import service
from nova import wsgi
from nova.virt import fake
@@ -81,7 +80,6 @@ class TestCase(unittest.TestCase):
self.injected = []
self._services = []
self._monkey_patch_attach()
self._monkey_patch_wsgi()
self._original_flags = FLAGS.FlagValuesDict()
rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size)
@@ -107,7 +105,6 @@ class TestCase(unittest.TestCase):
# Reset our monkey-patches
rpc.Consumer.attach_to_eventlet = self.original_attach
wsgi.Server.start = self.original_start
# Stop any timers
for x in self.injected:
@@ -163,26 +160,6 @@ class TestCase(unittest.TestCase):
_wrapped.func_name = self.original_attach.func_name
rpc.Consumer.attach_to_eventlet = _wrapped
def _monkey_patch_wsgi(self):
"""Allow us to kill servers spawned by wsgi.Server."""
self.original_start = wsgi.Server.start
@functools.wraps(self.original_start)
def _wrapped_start(inner_self, *args, **kwargs):
original_spawn_n = inner_self.pool.spawn_n
@functools.wraps(original_spawn_n)
def _wrapped_spawn_n(*args, **kwargs):
rv = greenthread.spawn(*args, **kwargs)
self._services.append(rv)
inner_self.pool.spawn_n = _wrapped_spawn_n
self.original_start(inner_self, *args, **kwargs)
inner_self.pool.spawn_n = original_spawn_n
_wrapped_start.func_name = self.original_start.func_name
wsgi.Server.start = _wrapped_start
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.

View File

@@ -50,7 +50,7 @@ def setup():
testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db)
if os.path.exists(testdb):
os.unlink(testdb)
return
migration.db_sync()
ctxt = context.get_admin_context()
network_manager.VlanManager().create_networks(ctxt,

View File

@@ -0,0 +1,15 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@@ -0,0 +1,186 @@
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import stubout
import webob
from nova import context
from nova import db
from nova import test
from nova import network
from nova.tests.api.openstack import fakes
from nova.api.openstack.contrib.floating_ips import FloatingIPController
from nova.api.openstack.contrib.floating_ips import _translate_floating_ip_view
def network_api_get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10',
'fixed_ip': {'address': '11.0.0.1'}}
def network_api_list_floating_ips(self, context):
return [{'id': 1,
'address': '10.10.10.10',
'instance': {'id': 11},
'fixed_ip': {'address': '10.0.0.1'}},
{'id': 2,
'address': '10.10.10.11'}]
def network_api_allocate(self, context):
return '10.10.10.10'
def network_api_release(self, context, address):
pass
def network_api_associate(self, context, floating_ip, fixed_ip):
pass
def network_api_disassociate(self, context, floating_address):
pass
class FloatingIpTest(test.TestCase):
address = "10.10.10.10"
def _create_floating_ip(self):
"""Create a floating ip object."""
host = "fake_host"
return db.floating_ip_create(self.context,
{'address': self.address,
'host': host})
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.address)
def setUp(self):
super(FloatingIpTest, self).setUp()
self.controller = FloatingIPController()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "list_floating_ips",
network_api_list_floating_ips)
self.stubs.Set(network.api.API, "allocate_floating_ip",
network_api_allocate)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "associate_floating_ip",
network_api_associate)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.context = context.get_admin_context()
self._create_floating_ip()
def tearDown(self):
self.stubs.UnsetAll()
self._delete_floating_ip()
super(FloatingIpTest, self).tearDown()
def test_translate_floating_ip_view(self):
floating_ip_address = self._create_floating_ip()
floating_ip = db.floating_ip_get_by_address(self.context,
floating_ip_address)
view = _translate_floating_ip_view(floating_ip)
self.assertTrue('floating_ip' in view)
self.assertTrue(view['floating_ip']['id'])
self.assertEqual(view['floating_ip']['ip'], self.address)
self.assertEqual(view['floating_ip']['fixed_ip'], None)
self.assertEqual(view['floating_ip']['instance_id'], None)
def test_floating_ips_list(self):
req = webob.Request.blank('/v1.1/os-floating-ips')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
response = {'floating_ips': [{'floating_ip': {'instance_id': 11,
'ip': '10.10.10.10',
'fixed_ip': '10.0.0.1',
'id': 1}},
{'floating_ip': {'instance_id': None,
'ip': '10.10.10.11',
'fixed_ip': None,
'id': 2}}]}
self.assertEqual(res_dict, response)
def test_floating_ip_show(self):
req = webob.Request.blank('/v1.1/os-floating-ips/1')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['fixed_ip'], '11.0.0.1')
self.assertEqual(res_dict['floating_ip']['instance_id'], None)
def test_floating_ip_allocate(self):
req = webob.Request.blank('/v1.1/os-floating-ips')
req.method = 'POST'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
ip = json.loads(res.body)['allocated']
expected = {
"id": 1,
"floating_ip": '10.10.10.10'}
self.assertEqual(ip, expected)
def test_floating_ip_release(self):
req = webob.Request.blank('/v1.1/os-floating-ips/1')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
actual = json.loads(res.body)['released']
expected = {
"id": 1,
"floating_ip": '10.10.10.10'}
self.assertEqual(actual, expected)
def test_floating_ip_associate(self):
body = dict(associate_address=dict(fixed_ip='1.2.3.4'))
req = webob.Request.blank('/v1.1/os-floating-ips/1/associate')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
actual = json.loads(res.body)['associated']
expected = {
"floating_ip_id": '1',
"floating_ip": "10.10.10.10",
"fixed_ip": "1.2.3.4"}
self.assertEqual(actual, expected)
def test_floating_ip_disassociate(self):
req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate')
req.method = 'POST'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
ip = json.loads(res.body)['disassociated']
expected = {
"floating_ip": '10.10.10.10',
"fixed_ip": '11.0.0.1'}
self.assertEqual(ip, expected)

View File

@@ -0,0 +1,198 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import stubout
import unittest
import webob
import os.path
from nova import flags
from nova.api import openstack
from nova.api.openstack import auth
from nova.api.openstack import extensions
from nova.tests.api.openstack import fakes
import nova.wsgi
FLAGS = flags.FLAGS
def return_create_flavor_extra_specs(context, flavor_id, extra_specs):
return stub_flavor_extra_specs()
def return_flavor_extra_specs(context, flavor_id):
return stub_flavor_extra_specs()
def return_flavor_extra_specs(context, flavor_id):
return stub_flavor_extra_specs()
def return_empty_flavor_extra_specs(context, flavor_id):
return {}
def delete_flavor_extra_specs(context, flavor_id, key):
pass
def stub_flavor_extra_specs():
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return specs
class FlavorsExtraSpecsTest(unittest.TestCase):
def setUp(self):
super(FlavorsExtraSpecsTest, self).setUp()
FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__),
"extensions")
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
self.mware = auth.AuthMiddleware(
extensions.ExtensionMiddleware(
openstack.APIRouterV11()))
def tearDown(self):
self.stubs.UnsetAll()
super(FlavorsExtraSpecsTest, self).tearDown()
def test_index(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_flavor_extra_specs)
request = webob.Request.blank('/flavors/1/os-extra_specs')
res = request.get_response(self.mware)
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_index_no_data(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_empty_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs')
res = req.get_response(self.mware)
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual(0, len(res_dict['extra_specs']))
def test_show(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key5')
res = req.get_response(self.mware)
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value5', res_dict['key5'])
def test_show_spec_not_found(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_empty_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key6')
res = req.get_response(self.mware)
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
def test_delete(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_delete',
delete_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key5')
req.method = 'DELETE'
res = req.get_response(self.mware)
self.assertEqual(200, res.status_int)
def test_create(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs')
req.method = 'POST'
req.body = '{"extra_specs": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_create_empty_body(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs')
req.method = 'POST'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(400, res.status_int)
def test_update_item(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
req.method = 'PUT'
req.body = '{"key1": "value1"}'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(200, res.status_int)
self.assertEqual('application/json', res.headers['Content-Type'])
res_dict = json.loads(res.body)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_empty_body(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(400, res.status_int)
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
req.method = 'PUT'
req.body = '{"key1": "value1", "key2": "value2"}'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(400, res.status_int)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/bad')
req.method = 'PUT'
req.body = '{"key1": "value1"}'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(400, res.status_int)

View File

@@ -16,7 +16,6 @@
# under the License.
import copy
import json
import random
import string
@@ -29,11 +28,11 @@ from glance.common import exception as glance_exc
from nova import context
from nova import exception as exc
from nova import flags
from nova import utils
import nova.api.openstack.auth
from nova.api import openstack
from nova.api.openstack import auth
from nova.api.openstack import extensions
from nova.api.openstack import versions
from nova.api.openstack import limits
from nova.auth.manager import User, Project
@@ -82,7 +81,8 @@ def wsgi_app(inner_app10=None, inner_app11=None):
api10 = openstack.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(inner_app10)))
api11 = openstack.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(inner_app11)))
limits.RateLimitingMiddleware(
extensions.ExtensionMiddleware(inner_app11))))
mapper['/v1.0'] = api10
mapper['/v1.1'] = api11
mapper['/'] = openstack.FaultWrapper(versions.Versions())

View File

@@ -171,16 +171,10 @@ class _IntegratedTestBase(test.TestCase):
self.api = self.user.openstack_api
def _start_api_service(self):
api_service = service.ApiService.create()
api_service.start()
if not api_service:
raise Exception("API Service was None")
self.api_service = api_service
host, port = api_service.get_socket_info('osapi')
self.auth_url = 'http://%s:%s/v1.1' % (host, port)
osapi = service.WSGIService("osapi")
osapi.start()
self.auth_url = 'http://%s:%s/v1.1' % (osapi.host, osapi.port)
LOG.warn(self.auth_url)
def tearDown(self):
self.context.cleanup()

View File

@@ -67,7 +67,18 @@ class HostFilterTestCase(test.TestCase):
flavorid=1,
swap=500,
rxtx_quota=30000,
rxtx_cap=200)
rxtx_cap=200,
extra_specs={})
self.gpu_instance_type = dict(name='tiny.gpu',
memory_mb=50,
vcpus=10,
local_gb=500,
flavorid=2,
swap=500,
rxtx_quota=30000,
rxtx_cap=200,
extra_specs={'xpu_arch': 'fermi',
'xpu_info': 'Tesla 2050'})
self.zone_manager = FakeZoneManager()
states = {}
@@ -75,6 +86,18 @@ class HostFilterTestCase(test.TestCase):
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
self.zone_manager.service_states = states
# Add some extra capabilities to some hosts
host07 = self.zone_manager.service_states['host07']['compute']
host07['xpu_arch'] = 'fermi'
host07['xpu_info'] = 'Tesla 2050'
host08 = self.zone_manager.service_states['host08']['compute']
host08['xpu_arch'] = 'radeon'
host09 = self.zone_manager.service_states['host09']['compute']
host09['xpu_arch'] = 'fermi'
host09['xpu_info'] = 'Tesla 2150'
def tearDown(self):
FLAGS.default_host_filter = self.old_flag
@@ -116,6 +139,17 @@ class HostFilterTestCase(test.TestCase):
self.assertEquals('host05', just_hosts[0])
self.assertEquals('host10', just_hosts[5])
def test_instance_type_filter_extra_specs(self):
hf = host_filter.InstanceTypeFilter()
# filter all hosts that can support 50 ram and 500 disk
name, cooked = hf.instance_type_to_filter(self.gpu_instance_type)
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
name)
hosts = hf.filter_hosts(self.zone_manager, cooked)
self.assertEquals(1, len(hosts))
just_hosts = [host for host, caps in hosts]
self.assertEquals('host07', just_hosts[0])
def test_json_filter(self):
hf = host_filter.JsonFilter()
# filter all hosts that can support 50 ram and 500 disk

View File

@@ -25,6 +25,7 @@ from nova import log as logging
from nova import test
from nova.auth import manager
from nova.api.ec2 import cloud
from nova.auth import fakeldap
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.auth_unittest')
@@ -369,6 +370,15 @@ class _AuthManagerBaseTestCase(test.TestCase):
class AuthManagerLdapTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
def test_reconnect_on_server_failure(self):
self.manager.get_users()
fakeldap.server_fail = True
try:
self.assertRaises(fakeldap.SERVER_DOWN, self.manager.get_users)
finally:
fakeldap.server_fail = False
self.manager.get_users()
class AuthManagerDbTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.dbdriver.DbDriver'

View File

@@ -37,6 +37,7 @@ from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
from nova.notifier import test_notifier
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
@@ -62,6 +63,7 @@ class ComputeTestCase(test.TestCase):
super(ComputeTestCase, self).setUp()
self.flags(connection_type='fake',
stub_network=True,
notification_driver='nova.notifier.test_notifier',
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
self.compute_api = compute.API()
@@ -69,6 +71,7 @@ class ComputeTestCase(test.TestCase):
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake', False)
test_notifier.NOTIFICATIONS = []
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
@@ -327,6 +330,50 @@ class ComputeTestCase(test.TestCase):
self.assert_(console)
self.compute.terminate_instance(self.context, instance_id)
def test_run_instance_usage_notification(self):
"""Ensure run instance generates apropriate usage notification"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.create')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project.id)
self.assertEquals(payload['user_id'], self.user.id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEquals(payload['image_ref'], '1')
self.compute.terminate_instance(self.context, instance_id)
def test_terminate_usage_notification(self):
"""Ensure terminate_instance generates apropriate usage notification"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
test_notifier.NOTIFICATIONS = []
self.compute.terminate_instance(self.context, instance_id)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.delete')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project.id)
self.assertEquals(payload['user_id'], self.user.id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEquals(payload['image_ref'], '1')
def test_run_instance_existing(self):
"""Ensure failure when running an instance that already exists"""
instance_id = self._create_instance()
@@ -378,6 +425,36 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance_id)
def test_resize_instance_notification(self):
"""Ensure notifications on instance migrate/resize"""
instance_id = self._create_instance()
context = self.context.elevated()
self.compute.run_instance(self.context, instance_id)
test_notifier.NOTIFICATIONS = []
db.instance_update(self.context, instance_id, {'host': 'foo'})
self.compute.prep_resize(context, instance_id, 1)
migration_ref = db.migration_get_by_instance_and_status(context,
instance_id, 'pre-migrating')
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.resize.prep')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project.id)
self.assertEquals(payload['user_id'], self.user.id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEquals(payload['image_ref'], '1')
self.compute.terminate_instance(context, instance_id)
def test_resize_instance(self):
"""Ensure instance can be migrated/resized"""
instance_id = self._create_instance()

View File

@@ -67,7 +67,8 @@ class HostFilterTestCase(test.TestCase):
flavorid=1,
swap=500,
rxtx_quota=30000,
rxtx_cap=200)
rxtx_cap=200,
extra_specs={})
self.zone_manager = FakeZoneManager()
states = {}

View File

@@ -0,0 +1,165 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for instance types extra specs code
"""
from nova import context
from nova import db
from nova import test
from nova.db.sqlalchemy.session import get_session
from nova.db.sqlalchemy import models
class InstanceTypeExtraSpecsTestCase(test.TestCase):
def setUp(self):
super(InstanceTypeExtraSpecsTestCase, self).setUp()
self.context = context.get_admin_context()
values = dict(name="cg1.4xlarge",
memory_mb=22000,
vcpus=8,
local_gb=1690,
flavorid=105)
specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus=2,
xpu_model="Tesla 2050")
values['extra_specs'] = specs
ref = db.api.instance_type_create(self.context,
values)
self.instance_type_id = ref.id
def tearDown(self):
# Remove the instance type from the database
db.api.instance_type_purge(context.get_admin_context(), "cg1.4xlarge")
super(InstanceTypeExtraSpecsTestCase, self).tearDown()
def test_instance_type_specs_get(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050")
actual_specs = db.api.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_id)
self.assertEquals(expected_specs, actual_specs)
def test_instance_type_extra_specs_delete(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2")
db.api.instance_type_extra_specs_delete(context.get_admin_context(),
self.instance_type_id,
"xpu_model")
actual_specs = db.api.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_id)
self.assertEquals(expected_specs, actual_specs)
def test_instance_type_extra_specs_update(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Sandy Bridge",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050")
db.api.instance_type_extra_specs_update_or_create(
context.get_admin_context(),
self.instance_type_id,
dict(cpu_model="Sandy Bridge"))
actual_specs = db.api.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_id)
self.assertEquals(expected_specs, actual_specs)
def test_instance_type_extra_specs_create(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050",
net_arch="ethernet",
net_mbps="10000")
db.api.instance_type_extra_specs_update_or_create(
context.get_admin_context(),
self.instance_type_id,
dict(net_arch="ethernet",
net_mbps=10000))
actual_specs = db.api.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_id)
self.assertEquals(expected_specs, actual_specs)
def test_instance_type_get_by_id_with_extra_specs(self):
instance_type = db.api.instance_type_get_by_id(
context.get_admin_context(),
self.instance_type_id)
self.assertEquals(instance_type['extra_specs'],
dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050"))
instance_type = db.api.instance_type_get_by_id(
context.get_admin_context(),
5)
self.assertEquals(instance_type['extra_specs'], {})
def test_instance_type_get_by_name_with_extra_specs(self):
instance_type = db.api.instance_type_get_by_name(
context.get_admin_context(),
"cg1.4xlarge")
self.assertEquals(instance_type['extra_specs'],
dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050"))
instance_type = db.api.instance_type_get_by_name(
context.get_admin_context(),
"m1.small")
self.assertEquals(instance_type['extra_specs'], {})
def test_instance_type_get_by_id_with_extra_specs(self):
instance_type = db.api.instance_type_get_by_flavor_id(
context.get_admin_context(),
105)
self.assertEquals(instance_type['extra_specs'],
dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050"))
instance_type = db.api.instance_type_get_by_flavor_id(
context.get_admin_context(),
2)
self.assertEquals(instance_type['extra_specs'], {})
def test_instance_type_get_all(self):
specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus='2',
xpu_model="Tesla 2050")
types = db.api.instance_type_get_all(context.get_admin_context())
self.assertEquals(types['cg1.4xlarge']['extra_specs'], specs)
self.assertEquals(types['m1.small']['extra_specs'], {})

View File

@@ -70,11 +70,15 @@ class S3APITestCase(test.TestCase):
os.mkdir(FLAGS.buckets_path)
router = s3server.S3Application(FLAGS.buckets_path)
server = wsgi.Server()
server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
self.server = wsgi.Server("S3 Objectstore",
router,
host=FLAGS.s3_host,
port=FLAGS.s3_port)
self.server.start()
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.set('Boto', 'num_retries', '0')
conn = s3.S3Connection(aws_access_key_id=self.admin_user.access,
aws_secret_access_key=self.admin_user.secret,
@@ -145,4 +149,5 @@ class S3APITestCase(test.TestCase):
"""Tear down auth and test server."""
self.auth_manager.delete_user('admin')
self.auth_manager.delete_project('admin')
self.server.stop()
super(S3APITestCase, self).tearDown()

View File

@@ -30,6 +30,7 @@ from nova import rpc
from nova import test
from nova import service
from nova import manager
from nova import wsgi
from nova.compute import manager as compute_manager
FLAGS = flags.FLAGS
@@ -349,3 +350,32 @@ class ServiceTestCase(test.TestCase):
serv.stop()
db.service_destroy(ctxt, service_ref['id'])
class TestWSGIService(test.TestCase):
def setUp(self):
super(TestWSGIService, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
def test_service_random_port(self):
test_service = service.WSGIService("test_service")
self.assertEquals(0, test_service.port)
test_service.start()
self.assertNotEqual(0, test_service.port)
test_service.stop()
class TestLauncher(test.TestCase):
def setUp(self):
super(TestLauncher, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
self.service = service.WSGIService("test_service")
def test_launch_app(self):
self.assertEquals(0, self.service.port)
launcher = service.Launcher()
launcher.launch_service(self.service)
self.assertEquals(0, self.service.port)
launcher.stop()

95
nova/tests/test_wsgi.py Normal file
View File

@@ -0,0 +1,95 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for `nova.wsgi`."""
import os.path
import tempfile
import unittest
import nova.exception
import nova.test
import nova.wsgi
class TestLoaderNothingExists(unittest.TestCase):
"""Loader tests where os.path.exists always returns False."""
def setUp(self):
self._os_path_exists = os.path.exists
os.path.exists = lambda _: False
def test_config_not_found(self):
self.assertRaises(
nova.exception.PasteConfigNotFound,
nova.wsgi.Loader,
)
def tearDown(self):
os.path.exists = self._os_path_exists
class TestLoaderNormalFilesystem(unittest.TestCase):
"""Loader tests with normal filesystem (unmodified os.path module)."""
_paste_config = """
[app:test_app]
use = egg:Paste#static
document_root = /tmp
"""
def setUp(self):
self.config = tempfile.NamedTemporaryFile(mode="w+t")
self.config.write(self._paste_config.lstrip())
self.config.seek(0)
self.config.flush()
self.loader = nova.wsgi.Loader(self.config.name)
def test_config_found(self):
self.assertEquals(self.config.name, self.loader.config_path)
def test_app_not_found(self):
self.assertRaises(
nova.exception.PasteAppNotFound,
self.loader.load_app,
"non-existant app",
)
def test_app_found(self):
url_parser = self.loader.load_app("test_app")
self.assertEquals("/tmp", url_parser.directory)
def tearDown(self):
self.config.close()
class TestWSGIServer(unittest.TestCase):
"""WSGI server tests."""
def test_no_app(self):
server = nova.wsgi.Server("test_app", None)
self.assertEquals("test_app", server.name)
def test_start_random_port(self):
server = nova.wsgi.Server("test_random_port", None, host="127.0.0.1")
self.assertEqual(0, server.port)
server.start()
self.assertNotEqual(0, server.port)
server.stop()
server.wait()

View File

@@ -46,6 +46,7 @@ from eventlet.green import subprocess
from nova import exception
from nova import flags
from nova import log as logging
from nova import version
LOG = logging.getLogger("nova.utils")
@@ -226,8 +227,10 @@ def novadir():
return os.path.abspath(nova.__file__).split('nova/__init__.pyc')[0]
def default_flagfile(filename='nova.conf'):
for arg in sys.argv:
def default_flagfile(filename='nova.conf', args=None):
if args is None:
args = sys.argv
for arg in args:
if arg.find('flagfile') != -1:
break
else:
@@ -239,8 +242,8 @@ def default_flagfile(filename='nova.conf'):
filename = "./nova.conf"
if not os.path.exists(filename):
filename = '/etc/nova/nova.conf'
flagfile = ['--flagfile=%s' % filename]
sys.argv = sys.argv[:1] + flagfile + sys.argv[1:]
flagfile = '--flagfile=%s' % filename
args.insert(1, flagfile)
def debug(arg):
@@ -279,6 +282,22 @@ EASIER_PASSWORD_SYMBOLS = ('23456789' # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
def usage_from_instance(instance_ref, **kw):
usage_info = dict(
tenant_id=instance_ref['project_id'],
user_id=instance_ref['user_id'],
instance_id=instance_ref['id'],
instance_type=instance_ref['instance_type']['name'],
instance_type_id=instance_ref['instance_type_id'],
display_name=instance_ref['display_name'],
created_at=str(instance_ref['created_at']),
launched_at=str(instance_ref['launched_at']) \
if instance_ref['launched_at'] else '',
image_ref=instance_ref['image_ref'])
usage_info.update(kw)
return usage_info
def generate_password(length=20, symbols=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbols.
@@ -751,3 +770,39 @@ def is_uuid_like(val):
if not isinstance(val, basestring):
return False
return (len(val) == 36) and (val.count('-') == 4)
class Bootstrapper(object):
"""Provides environment bootstrapping capabilities for entry points."""
@staticmethod
def bootstrap_binary(argv):
"""Initialize the Nova environment using command line arguments."""
Bootstrapper.setup_flags(argv)
Bootstrapper.setup_logging()
Bootstrapper.log_flags()
@staticmethod
def setup_logging():
"""Initialize logging and log a message indicating the Nova version."""
logging.setup()
logging.audit(_("Nova Version (%s)") %
version.version_string_with_vcs())
@staticmethod
def setup_flags(input_flags):
"""Initialize flags, load flag file, and print help if needed."""
default_flagfile(args=input_flags)
FLAGS(input_flags or [])
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
FLAGS.ParseNewFlags()
@staticmethod
def log_flags():
"""Log the list of all active flags being used."""
logging.audit(_("Currently active flags:"))
for key in FLAGS:
value = FLAGS.get(key, None)
logging.audit(_("%(key)s : %(value)s" % locals()))

View File

@@ -185,6 +185,7 @@ class LibvirtConnection(driver.ComputeDriver):
if state != power_state.RUNNING:
continue
self.firewall_driver.setup_basic_filtering(instance)
self.firewall_driver.prepare_instance_filter(instance)
self.firewall_driver.apply_instance_filter(instance)

View File

@@ -21,16 +21,16 @@
import os
import sys
from xml.dom import minidom
import eventlet
import eventlet.wsgi
eventlet.patcher.monkey_patch(all=False, socket=True, time=True)
import routes
import greenlet
import routes.middleware
import webob
import webob.dec
import webob.exc
from paste import deploy
from nova import exception
@@ -39,49 +39,86 @@ from nova import log as logging
from nova import utils
eventlet.patcher.monkey_patch(socket=True, time=True)
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.wsgi')
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.DEBUG):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
"""Server class to manage a WSGI server, serving a WSGI application."""
def __init__(self, threads=1000):
self.pool = eventlet.GreenPool(threads)
self.socket_info = {}
default_pool_size = 1000
def start(self, application, port, host='0.0.0.0', key=None, backlog=128):
"""Run a WSGI server with the given application."""
arg0 = sys.argv[0]
logging.audit(_('Starting %(arg0)s on %(host)s:%(port)s') % locals())
socket = eventlet.listen((host, port), backlog=backlog)
self.pool.spawn_n(self._run, application, socket)
if key:
self.socket_info[key] = socket.getsockname()
def __init__(self, name, app, host=None, port=None, pool_size=None):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:returns: None
"""
self.name = name
self.app = app
self.host = host or "0.0.0.0"
self.port = port or 0
self._server = None
self._socket = None
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
self._logger = logging.getLogger("eventlet.wsgi.server")
self._wsgi_logger = logging.WritableLogger(self._logger)
def _start(self):
"""Run the blocking eventlet WSGI server.
:returns: None
"""
eventlet.wsgi.server(self._socket,
self.app,
custom_pool=self._pool,
log=self._wsgi_logger)
def start(self, backlog=128):
"""Start serving a WSGI application.
:param backlog: Maximum number of queued connections.
:returns: None
"""
self._socket = eventlet.listen((self.host, self.port), backlog=backlog)
self._server = eventlet.spawn(self._start)
(self.host, self.port) = self._socket.getsockname()
LOG.info(_("Started %(name)s on %(host)s:%(port)s") % self.__dict__)
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing it's eventlet.
:returns: None
"""
LOG.info(_("Stopping WSGI server."))
self._server.kill()
def wait(self):
"""Wait until all servers have completed running."""
try:
self.pool.waitall()
except KeyboardInterrupt:
pass
"""Block, until the server has stopped.
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
logger = logging.getLogger('eventlet.wsgi.server')
eventlet.wsgi.server(socket, application, custom_pool=self.pool,
log=WritableLogger(logger))
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_("WSGI server has stopped."))
class Request(webob.Request):
@@ -309,55 +346,51 @@ class Router(object):
return app
def paste_config_file(basename):
"""Find the best location in the system for a paste config file.
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
Search Order
------------
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
The search for a paste config file honors `FLAGS.state_path`, which in a
version checked out from bzr will be the `nova` directory in the top level
of the checkout, and in an installation for a package for your distribution
will likely point to someplace like /etc/nova.
:param config_path: Full or relative path to the paste config.
:returns: None
This method tries to load places likely to be used in development or
experimentation before falling back to the system-wide configuration
in `/etc/nova/`.
"""
config_path = config_path or FLAGS.api_paste_config
self.config_path = self._find_config(config_path)
* Current working directory
* the `etc` directory under state_path, because when working on a checkout
from bzr this will point to the default
* top level of FLAGS.state_path, for distributions
* /etc/nova, which may not be diffrerent from state_path on your distro
def _find_config(self, config_path):
"""Find the paste configuration file using the given hint.
"""
configfiles = [basename,
os.path.join(FLAGS.state_path, 'etc', 'nova', basename),
os.path.join(FLAGS.state_path, 'etc', basename),
os.path.join(FLAGS.state_path, basename),
'/etc/nova/%s' % basename]
for configfile in configfiles:
if os.path.exists(configfile):
return configfile
:param config_path: Full or relative path to the paste config.
:returns: Full path of the paste config, if it exists.
:raises: `nova.exception.PasteConfigNotFound`
"""
possible_locations = [
config_path,
os.path.join(FLAGS.state_path, "etc", "nova", config_path),
os.path.join(FLAGS.state_path, "etc", config_path),
os.path.join(FLAGS.state_path, config_path),
"/etc/nova/%s" % config_path,
]
def load_paste_configuration(filename, appname):
"""Returns a paste configuration dict, or None."""
filename = os.path.abspath(filename)
config = None
try:
config = deploy.appconfig('config:%s' % filename, name=appname)
except LookupError:
pass
return config
for path in possible_locations:
if os.path.exists(path):
return os.path.abspath(path)
raise exception.PasteConfigNotFound(path=os.path.abspath(config_path))
def load_paste_app(filename, appname):
"""Builds a wsgi app from a paste config, None if app not configured."""
filename = os.path.abspath(filename)
app = None
try:
app = deploy.loadapp('config:%s' % filename, name=appname)
except LookupError:
pass
return app
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: `nova.exception.PasteAppNotFound`
"""
try:
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError as err:
LOG.error(err)
raise exception.PasteAppNotFound(name=name, path=self.config_path)

View File

@@ -0,0 +1,20 @@
#!/bin/bash
PACKAGE=openstack-xen-plugins
RPMBUILD_DIR=$PWD/rpmbuild
if [ ! -d $RPMBUILD_DIR ]; then
echo $RPMBUILD_DIR is missing
exit 1
fi
for dir in BUILD BUILDROOT SRPMS RPMS SOURCES; do
rm -rf $RPMBUILD_DIR/$dir
mkdir -p $RPMBUILD_DIR/$dir
done
rm -rf /tmp/$PACKAGE
mkdir /tmp/$PACKAGE
cp -r ../etc/xapi.d /tmp/$PACKAGE
tar czf $RPMBUILD_DIR/SOURCES/$PACKAGE.tar.gz -C /tmp $PACKAGE
rpmbuild -ba --nodeps --define "_topdir $RPMBUILD_DIR" \
$RPMBUILD_DIR/SPECS/$PACKAGE.spec

View File

@@ -0,0 +1,36 @@
Name: openstack-xen-plugins
Version: 2011.3
Release: 1
Summary: Files for XenAPI support.
License: ASL 2.0
Group: Applications/Utilities
Source0: openstack-xen-plugins.tar.gz
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
Requires: parted
%define debug_package %{nil}
%description
This package contains files that are required for XenAPI support for OpenStack.
%prep
%setup -q -n openstack-xen-plugins
%install
rm -rf $RPM_BUILD_ROOT
mkdir -p $RPM_BUILD_ROOT/etc
cp -r xapi.d $RPM_BUILD_ROOT/etc
chmod a+x $RPM_BUILD_ROOT/etc/xapi.d/plugins/*
%clean
rm -rf $RPM_BUILD_ROOT
%files
%defattr(-,root,root,-)
/etc/xapi.d/plugins/agent
/etc/xapi.d/plugins/glance
/etc/xapi.d/plugins/migration
/etc/xapi.d/plugins/objectstore
/etc/xapi.d/plugins/pluginlib_nova.py
/etc/xapi.d/plugins/xenhost
/etc/xapi.d/plugins/xenstore.py

View File

@@ -69,7 +69,6 @@ from nose import core
from nose import result
from nova import log as logging
from nova.tests import fake_flags
class _AnsiColorizer(object):
@@ -211,11 +210,11 @@ class NovaTestResult(result.TextTestResult):
break
sys.stdout = stdout
# NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
# error results in it failing to be initialized later. Otherwise,
# NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
# error results in it failing to be initialized later. Otherwise,
# _handleElapsedTime will fail, causing the wrong error message to
# be outputted.
self.start_time = time.time()
self.start_time = time.time()
def getDescription(self, test):
return str(test)

View File

@@ -6,6 +6,7 @@ function usage {
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -r, --recreate-db Recreate the test database."
echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -p, --pep8 Just run pep8"
@@ -23,6 +24,7 @@ function process_option {
-h|--help) usage;;
-V|--virtual-env) let always_venv=1; let never_venv=0;;
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
-r|--recreate-db) let recreate_db=1;;
-f|--force) let force=1;;
-p|--pep8) let just_pep8=1;;
-*) noseopts="$noseopts $1";;
@@ -39,6 +41,7 @@ noseargs=
noseopts=
wrapper=""
just_pep8=0
recreate_db=0
for arg in "$@"; do
process_option $arg
@@ -108,6 +111,10 @@ if [ $just_pep8 -eq 1 ]; then
exit
fi
if [ $recreate_db -eq 1 ]; then
rm tests.sqlite
fi
run_tests || exit
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,

View File

@@ -1,5 +1,5 @@
SQLAlchemy==0.6.3
pep8==0.5.0
pep8==0.6.1
pylint==0.19
Cheetah==2.4.4
M2Crypto==0.20.2
@@ -7,7 +7,7 @@ amqplib==0.6.1
anyjson==0.2.4
boto==1.9b
carrot==0.10.5
eventlet==0.9.12
eventlet
lockfile==0.8
python-novaclient==2.5.3
python-daemon==1.5.5