remerged trunk
4
.mailmap
@@ -47,3 +47,7 @@
|
||||
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
|
||||
<vishvananda@gmail.com> <root@ubuntu>
|
||||
<vishvananda@gmail.com> <vishvananda@yahoo.com>
|
||||
<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>
|
||||
<ilyaalekseyev@acm.org> <ilya@oscloud.ru>
|
||||
<reldan@oscloud.ru> <enugaev@griddynamics.com>
|
||||
<kshileev@gmail.com> <kshileev@griddynamics.com>
|
||||
12
Authors
@@ -1,4 +1,5 @@
|
||||
Alex Meade <alex.meade@rackspace.com>
|
||||
Andrey Brindeyev <abrindeyev@griddynamics.com>
|
||||
Andy Smith <code@term.ie>
|
||||
Andy Southgate <andy.southgate@citrix.com>
|
||||
Anne Gentle <anne@openstack.org>
|
||||
@@ -16,18 +17,21 @@ Christian Berendt <berendt@b1-systems.de>
|
||||
Chuck Short <zulcss@ubuntu.com>
|
||||
Cory Wright <corywright@gmail.com>
|
||||
Dan Prince <dan.prince@rackspace.com>
|
||||
Dave Walker <DaveWalker@ubuntu.com>
|
||||
David Pravec <David.Pravec@danix.org>
|
||||
Dean Troyer <dtroyer@gmail.com>
|
||||
Devin Carlen <devin.carlen@gmail.com>
|
||||
Ed Leafe <ed@leafe.com>
|
||||
Eldar Nugaev <enugaev@griddynamics.com>
|
||||
Eldar Nugaev <reldan@oscloud.ru>
|
||||
Eric Day <eday@oddments.org>
|
||||
Eric Windisch <eric@cloudscaling.com>
|
||||
Ewan Mellor <ewan.mellor@citrix.com>
|
||||
Gabe Westmaas <gabe.westmaas@rackspace.com>
|
||||
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
||||
Hisaki Ohara <hisaki.ohara@intel.com>
|
||||
Ilya Alekseyev <ialekseev@griddynamics.com>
|
||||
Ilya Alekseyev <ilyaalekseyev@acm.org>
|
||||
Isaku Yamahata <yamahata@valinux.co.jp>
|
||||
Jason Cannavale <jason.cannavale@rackspace.com>
|
||||
Jason Koelker <jason@koelker.net>
|
||||
Jay Pipes <jaypipes@gmail.com>
|
||||
Jesse Andrews <anotherjesse@gmail.com>
|
||||
@@ -49,6 +53,7 @@ Kei Masumoto <masumotok@nttdata.co.jp>
|
||||
Ken Pepple <ken.pepple@gmail.com>
|
||||
Kevin Bringard <kbringard@attinteractive.com>
|
||||
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
|
||||
Kirill Shileev <kshileev@gmail.com>
|
||||
Koji Iida <iida.koji@lab.ntt.co.jp>
|
||||
Lorin Hochstein <lorin@isi.edu>
|
||||
Lvov Maxim <usrleon@gmail.com>
|
||||
@@ -56,6 +61,7 @@ Mark Washenberger <mark.washenberger@rackspace.com>
|
||||
Masanori Itoh <itoumsn@nttdata.co.jp>
|
||||
Matt Dietz <matt.dietz@rackspace.com>
|
||||
Michael Gundlach <michael.gundlach@rackspace.com>
|
||||
Mike Scherbakov <mihgen@gmail.com>
|
||||
Monsyne Dragon <mdragon@rackspace.com>
|
||||
Monty Taylor <mordred@inaugust.com>
|
||||
MORITA Kazutaka <morita.kazutaka@gmail.com>
|
||||
@@ -64,6 +70,7 @@ Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
|
||||
Naveed Massjouni <naveedm9@gmail.com>
|
||||
Nirmal Ranganathan <nirmal.ranganathan@rackspace.com>
|
||||
Paul Voccio <paul@openstack.org>
|
||||
Renuka Apte <renuka.apte@citrix.com>
|
||||
Ricardo Carrillo Cruz <emaildericky@gmail.com>
|
||||
Rick Clark <rick@openstack.org>
|
||||
Rick Harris <rconradharris@gmail.com>
|
||||
@@ -80,6 +87,7 @@ Trey Morris <trey.morris@rackspace.com>
|
||||
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
||||
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||
Vivek Y S <vivek.ys@gmail.com>
|
||||
William Wolf <throughnothing@gmail.com>
|
||||
Yoshiaki Tamura <yoshi@midokura.jp>
|
||||
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||
|
||||
@@ -35,6 +35,7 @@ include nova/tests/bundle/1mb.manifest.xml
|
||||
include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
|
||||
include nova/tests/bundle/1mb.part.0
|
||||
include nova/tests/bundle/1mb.part.1
|
||||
include nova/tests/public_key/*
|
||||
include nova/tests/db/nova.austin.sqlite
|
||||
include plugins/xenapi/README
|
||||
include plugins/xenapi/etc/xapi.d/plugins/objectstore
|
||||
|
||||
@@ -108,6 +108,13 @@ def main():
|
||||
interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface)
|
||||
if int(os.environ.get('TESTING', '0')):
|
||||
from nova.tests import fake_flags
|
||||
|
||||
#if FLAGS.fake_rabbit:
|
||||
# LOG.debug(_("leasing ip"))
|
||||
# network_manager = utils.import_object(FLAGS.network_manager)
|
||||
## reload(fake_flags)
|
||||
# from nova.tests import fake_flags
|
||||
|
||||
action = argv[1]
|
||||
if action in ['add', 'del', 'old']:
|
||||
mac = argv[2]
|
||||
|
||||
224
bin/nova-manage
@@ -53,15 +53,14 @@
|
||||
CLI interface for nova management.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import gettext
|
||||
import glob
|
||||
import json
|
||||
import netaddr
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
import IPy
|
||||
|
||||
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||
@@ -78,6 +77,7 @@ from nova import crypto
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import image
|
||||
from nova import log as logging
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
@@ -96,8 +96,8 @@ flags.DECLARE('network_size', 'nova.network.manager')
|
||||
flags.DECLARE('vlan_start', 'nova.network.manager')
|
||||
flags.DECLARE('vpn_start', 'nova.network.manager')
|
||||
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
||||
flags.DECLARE('images_path', 'nova.image.local')
|
||||
flags.DECLARE('libvirt_type', 'nova.virt.libvirt_conn')
|
||||
flags.DECLARE('gateway_v6', 'nova.network.manager')
|
||||
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
|
||||
flags.DEFINE_flag(flags.HelpFlag())
|
||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||
@@ -257,6 +257,11 @@ class RoleCommands(object):
|
||||
"""adds role to user
|
||||
if project is specified, adds project specific role
|
||||
arguments: user, role [project]"""
|
||||
if project:
|
||||
projobj = self.manager.get_project(project)
|
||||
if not projobj.has_member(user):
|
||||
print "%s not a member of %s" % (user, project)
|
||||
return
|
||||
self.manager.add_role(user, role, project)
|
||||
|
||||
def has(self, user, role, project=None):
|
||||
@@ -362,27 +367,47 @@ class ProjectCommands(object):
|
||||
def add(self, project_id, user_id):
|
||||
"""Adds user to project
|
||||
arguments: project_id user_id"""
|
||||
self.manager.add_to_project(user_id, project_id)
|
||||
try:
|
||||
self.manager.add_to_project(user_id, project_id)
|
||||
except exception.UserNotFound as ex:
|
||||
print ex
|
||||
raise
|
||||
|
||||
def create(self, name, project_manager, description=None):
|
||||
"""Creates a new project
|
||||
arguments: name project_manager [description]"""
|
||||
self.manager.create_project(name, project_manager, description)
|
||||
try:
|
||||
self.manager.create_project(name, project_manager, description)
|
||||
except exception.UserNotFound as ex:
|
||||
print ex
|
||||
raise
|
||||
|
||||
def modify(self, name, project_manager, description=None):
|
||||
"""Modifies a project
|
||||
arguments: name project_manager [description]"""
|
||||
self.manager.modify_project(name, project_manager, description)
|
||||
try:
|
||||
self.manager.modify_project(name, project_manager, description)
|
||||
except exception.UserNotFound as ex:
|
||||
print ex
|
||||
raise
|
||||
|
||||
def delete(self, name):
|
||||
"""Deletes an existing project
|
||||
arguments: name"""
|
||||
self.manager.delete_project(name)
|
||||
try:
|
||||
self.manager.delete_project(name)
|
||||
except exception.ProjectNotFound as ex:
|
||||
print ex
|
||||
raise
|
||||
|
||||
def environment(self, project_id, user_id, filename='novarc'):
|
||||
"""Exports environment variables to an sourcable file
|
||||
arguments: project_id user_id [filename='novarc]"""
|
||||
rc = self.manager.get_environment_rc(user_id, project_id)
|
||||
try:
|
||||
rc = self.manager.get_environment_rc(user_id, project_id)
|
||||
except (exception.UserNotFound, exception.ProjectNotFound) as ex:
|
||||
print ex
|
||||
raise
|
||||
with open(filename, 'w') as f:
|
||||
f.write(rc)
|
||||
|
||||
@@ -397,18 +422,26 @@ class ProjectCommands(object):
|
||||
arguments: project_id [key] [value]"""
|
||||
ctxt = context.get_admin_context()
|
||||
if key:
|
||||
if value.lower() == 'unlimited':
|
||||
value = None
|
||||
try:
|
||||
db.quota_update(ctxt, project_id, key, value)
|
||||
except exception.NotFound:
|
||||
except exception.ProjectQuotaNotFound:
|
||||
db.quota_create(ctxt, project_id, key, value)
|
||||
project_quota = quota.get_quota(ctxt, project_id)
|
||||
project_quota = quota.get_project_quotas(ctxt, project_id)
|
||||
for key, value in project_quota.iteritems():
|
||||
if value is None:
|
||||
value = 'unlimited'
|
||||
print '%s: %s' % (key, value)
|
||||
|
||||
def remove(self, project_id, user_id):
|
||||
"""Removes user from project
|
||||
arguments: project_id user_id"""
|
||||
self.manager.remove_from_project(user_id, project_id)
|
||||
try:
|
||||
self.manager.remove_from_project(user_id, project_id)
|
||||
except (exception.UserNotFound, exception.ProjectNotFound) as ex:
|
||||
print ex
|
||||
raise
|
||||
|
||||
def scrub(self, project_id):
|
||||
"""Deletes data associated with project
|
||||
@@ -427,6 +460,9 @@ class ProjectCommands(object):
|
||||
zip_file = self.manager.get_credentials(user_id, project_id)
|
||||
with open(filename, 'w') as f:
|
||||
f.write(zip_file)
|
||||
except (exception.UserNotFound, exception.ProjectNotFound) as ex:
|
||||
print ex
|
||||
raise
|
||||
except db.api.NoMoreNetworks:
|
||||
print _('No more networks available. If this is a new '
|
||||
'installation, you need\nto call something like this:\n\n'
|
||||
@@ -482,7 +518,7 @@ class FloatingIpCommands(object):
|
||||
def create(self, host, range):
|
||||
"""Creates floating ips for host by range
|
||||
arguments: host ip_range"""
|
||||
for address in IPy.IP(range):
|
||||
for address in netaddr.IPNetwork(range):
|
||||
db.floating_ip_create(context.get_admin_context(),
|
||||
{'address': str(address),
|
||||
'host': host})
|
||||
@@ -490,7 +526,7 @@ class FloatingIpCommands(object):
|
||||
def delete(self, ip_range):
|
||||
"""Deletes floating ips by range
|
||||
arguments: range"""
|
||||
for address in IPy.IP(ip_range):
|
||||
for address in netaddr.IPNetwork(ip_range):
|
||||
db.floating_ip_destroy(context.get_admin_context(),
|
||||
str(address))
|
||||
|
||||
@@ -505,7 +541,7 @@ class FloatingIpCommands(object):
|
||||
for floating_ip in floating_ips:
|
||||
instance = None
|
||||
if floating_ip['fixed_ip']:
|
||||
instance = floating_ip['fixed_ip']['instance']['ec2_id']
|
||||
instance = floating_ip['fixed_ip']['instance']['hostname']
|
||||
print "%s\t%s\t%s" % (floating_ip['host'],
|
||||
floating_ip['address'],
|
||||
instance)
|
||||
@@ -514,16 +550,15 @@ class FloatingIpCommands(object):
|
||||
class NetworkCommands(object):
|
||||
"""Class for managing networks."""
|
||||
|
||||
def create(self, fixed_range=None, num_networks=None,
|
||||
network_size=None, vlan_start=None,
|
||||
vpn_start=None, fixed_range_v6=None, label='public'):
|
||||
"""Creates fixed ips for host by range
|
||||
arguments: fixed_range=FLAG, [num_networks=FLAG],
|
||||
[network_size=FLAG], [vlan_start=FLAG],
|
||||
[vpn_start=FLAG], [fixed_range_v6=FLAG]"""
|
||||
def create(self, fixed_range=None, num_networks=None, network_size=None,
|
||||
vlan_start=None, vpn_start=None, fixed_range_v6=None,
|
||||
gateway_v6=None, label='public'):
|
||||
"""Creates fixed ips for host by range"""
|
||||
if not fixed_range:
|
||||
raise TypeError(_('Fixed range in the form of 10.0.0.0/8 is '
|
||||
'required to create networks.'))
|
||||
msg = _('Fixed range in the form of 10.0.0.0/8 is '
|
||||
'required to create networks.')
|
||||
print msg
|
||||
raise TypeError(msg)
|
||||
if not num_networks:
|
||||
num_networks = FLAGS.num_networks
|
||||
if not network_size:
|
||||
@@ -534,15 +569,22 @@ class NetworkCommands(object):
|
||||
vpn_start = FLAGS.vpn_start
|
||||
if not fixed_range_v6:
|
||||
fixed_range_v6 = FLAGS.fixed_range_v6
|
||||
if not gateway_v6:
|
||||
gateway_v6 = FLAGS.gateway_v6
|
||||
net_manager = utils.import_object(FLAGS.network_manager)
|
||||
net_manager.create_networks(context.get_admin_context(),
|
||||
cidr=fixed_range,
|
||||
num_networks=int(num_networks),
|
||||
network_size=int(network_size),
|
||||
vlan_start=int(vlan_start),
|
||||
vpn_start=int(vpn_start),
|
||||
cidr_v6=fixed_range_v6,
|
||||
label=label)
|
||||
try:
|
||||
net_manager.create_networks(context.get_admin_context(),
|
||||
cidr=fixed_range,
|
||||
num_networks=int(num_networks),
|
||||
network_size=int(network_size),
|
||||
vlan_start=int(vlan_start),
|
||||
vpn_start=int(vpn_start),
|
||||
cidr_v6=fixed_range_v6,
|
||||
gateway_v6=gateway_v6,
|
||||
label=label)
|
||||
except ValueError, e:
|
||||
print e
|
||||
raise e
|
||||
|
||||
def list(self):
|
||||
"""List all created networks"""
|
||||
@@ -652,7 +694,7 @@ class ServiceCommands(object):
|
||||
"""Show a list of all running services. Filter by host & service name.
|
||||
args: [host] [service]"""
|
||||
ctxt = context.get_admin_context()
|
||||
now = datetime.datetime.utcnow()
|
||||
now = utils.utcnow()
|
||||
services = db.service_get_all(ctxt)
|
||||
if host:
|
||||
services = [s for s in services if s['host'] == host]
|
||||
@@ -836,7 +878,7 @@ class InstanceTypeCommands(object):
|
||||
try:
|
||||
instance_types.create(name, memory, vcpus, local_gb,
|
||||
flavorid, swap, rxtx_quota, rxtx_cap)
|
||||
except exception.InvalidInputException:
|
||||
except exception.InvalidInput:
|
||||
print "Must supply valid parameters to create instance_type"
|
||||
print e
|
||||
sys.exit(1)
|
||||
@@ -899,7 +941,7 @@ class ImageCommands(object):
|
||||
"""Methods for dealing with a cloud in an odd state"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.image_service = utils.import_object(FLAGS.image_service)
|
||||
self.image_service = image.get_default_image_service()
|
||||
|
||||
def _register(self, container_format, disk_format,
|
||||
path, owner, name=None, is_public='T',
|
||||
@@ -1018,16 +1060,6 @@ class ImageCommands(object):
|
||||
machine_images = {}
|
||||
other_images = {}
|
||||
directory = os.path.abspath(directory)
|
||||
# NOTE(vish): If we're importing from the images path dir, attempt
|
||||
# to move the files out of the way before importing
|
||||
# so we aren't writing to the same directory. This
|
||||
# may fail if the dir was a mointpoint.
|
||||
if (FLAGS.image_service == 'nova.image.local.LocalImageService'
|
||||
and directory == os.path.abspath(FLAGS.images_path)):
|
||||
new_dir = "%s_bak" % directory
|
||||
os.rename(directory, new_dir)
|
||||
os.mkdir(directory)
|
||||
directory = new_dir
|
||||
for fn in glob.glob("%s/*/info.json" % directory):
|
||||
try:
|
||||
image_path = os.path.join(fn.rpartition('/')[0], 'image')
|
||||
@@ -1044,24 +1076,100 @@ class ImageCommands(object):
|
||||
self._convert_images(machine_images)
|
||||
|
||||
|
||||
class AgentBuildCommands(object):
|
||||
"""Class for managing agent builds."""
|
||||
|
||||
def create(self, os, architecture, version, url, md5hash,
|
||||
hypervisor='xen'):
|
||||
"""Creates a new agent build.
|
||||
arguments: os architecture version url md5hash [hypervisor='xen']"""
|
||||
ctxt = context.get_admin_context()
|
||||
agent_build = db.agent_build_create(ctxt,
|
||||
{'hypervisor': hypervisor,
|
||||
'os': os,
|
||||
'architecture': architecture,
|
||||
'version': version,
|
||||
'url': url,
|
||||
'md5hash': md5hash})
|
||||
|
||||
def delete(self, os, architecture, hypervisor='xen'):
|
||||
"""Deletes an existing agent build.
|
||||
arguments: os architecture [hypervisor='xen']"""
|
||||
ctxt = context.get_admin_context()
|
||||
agent_build_ref = db.agent_build_get_by_triple(ctxt,
|
||||
hypervisor, os, architecture)
|
||||
db.agent_build_destroy(ctxt, agent_build_ref['id'])
|
||||
|
||||
def list(self, hypervisor=None):
|
||||
"""Lists all agent builds.
|
||||
arguments: <none>"""
|
||||
fmt = "%-10s %-8s %12s %s"
|
||||
ctxt = context.get_admin_context()
|
||||
by_hypervisor = {}
|
||||
for agent_build in db.agent_build_get_all(ctxt):
|
||||
buildlist = by_hypervisor.get(agent_build.hypervisor)
|
||||
if not buildlist:
|
||||
buildlist = by_hypervisor[agent_build.hypervisor] = []
|
||||
|
||||
buildlist.append(agent_build)
|
||||
|
||||
for key, buildlist in by_hypervisor.iteritems():
|
||||
if hypervisor and key != hypervisor:
|
||||
continue
|
||||
|
||||
print "Hypervisor: %s" % key
|
||||
print fmt % ('-' * 10, '-' * 8, '-' * 12, '-' * 32)
|
||||
for agent_build in buildlist:
|
||||
print fmt % (agent_build.os, agent_build.architecture,
|
||||
agent_build.version, agent_build.md5hash)
|
||||
print ' %s' % agent_build.url
|
||||
|
||||
print
|
||||
|
||||
def modify(self, os, architecture, version, url, md5hash,
|
||||
hypervisor='xen'):
|
||||
"""Update an existing agent build.
|
||||
arguments: os architecture version url md5hash [hypervisor='xen']
|
||||
"""
|
||||
ctxt = context.get_admin_context()
|
||||
agent_build_ref = db.agent_build_get_by_triple(ctxt,
|
||||
hypervisor, os, architecture)
|
||||
db.agent_build_update(ctxt, agent_build_ref['id'],
|
||||
{'version': version,
|
||||
'url': url,
|
||||
'md5hash': md5hash})
|
||||
|
||||
|
||||
class ConfigCommands(object):
|
||||
"""Class for exposing the flags defined by flag_file(s)."""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def list(self):
|
||||
print FLAGS.FlagsIntoString()
|
||||
|
||||
|
||||
CATEGORIES = [
|
||||
('user', UserCommands),
|
||||
('account', AccountCommands),
|
||||
('project', ProjectCommands),
|
||||
('role', RoleCommands),
|
||||
('shell', ShellCommands),
|
||||
('vpn', VpnCommands),
|
||||
('fixed', FixedIpCommands),
|
||||
('floating', FloatingIpCommands),
|
||||
('network', NetworkCommands),
|
||||
('vm', VmCommands),
|
||||
('service', ServiceCommands),
|
||||
('agent', AgentBuildCommands),
|
||||
('config', ConfigCommands),
|
||||
('db', DbCommands),
|
||||
('volume', VolumeCommands),
|
||||
('fixed', FixedIpCommands),
|
||||
('flavor', InstanceTypeCommands),
|
||||
('floating', FloatingIpCommands),
|
||||
('instance_type', InstanceTypeCommands),
|
||||
('image', ImageCommands),
|
||||
('flavor', InstanceTypeCommands),
|
||||
('version', VersionCommands)]
|
||||
('network', NetworkCommands),
|
||||
('project', ProjectCommands),
|
||||
('role', RoleCommands),
|
||||
('service', ServiceCommands),
|
||||
('shell', ShellCommands),
|
||||
('user', UserCommands),
|
||||
('version', VersionCommands),
|
||||
('vm', VmCommands),
|
||||
('volume', VolumeCommands),
|
||||
('vpn', VpnCommands)]
|
||||
|
||||
|
||||
def lazy_match(name, key_value_tuples):
|
||||
|
||||
@@ -65,7 +65,7 @@ def format_help(d):
|
||||
indent = MAX_INDENT - 6
|
||||
|
||||
out = []
|
||||
for k, v in d.iteritems():
|
||||
for k, v in sorted(d.iteritems()):
|
||||
if (len(k) + 6) > MAX_INDENT:
|
||||
out.extend([' %s' % k])
|
||||
initial_indent = ' ' * (indent + 6)
|
||||
|
||||
@@ -1,283 +0,0 @@
|
||||
source/api/nova..adminclient.rst
|
||||
source/api/nova..api.direct.rst
|
||||
source/api/nova..api.ec2.admin.rst
|
||||
source/api/nova..api.ec2.apirequest.rst
|
||||
source/api/nova..api.ec2.cloud.rst
|
||||
source/api/nova..api.ec2.metadatarequesthandler.rst
|
||||
source/api/nova..api.openstack.auth.rst
|
||||
source/api/nova..api.openstack.backup_schedules.rst
|
||||
source/api/nova..api.openstack.common.rst
|
||||
source/api/nova..api.openstack.consoles.rst
|
||||
source/api/nova..api.openstack.faults.rst
|
||||
source/api/nova..api.openstack.flavors.rst
|
||||
source/api/nova..api.openstack.images.rst
|
||||
source/api/nova..api.openstack.servers.rst
|
||||
source/api/nova..api.openstack.shared_ip_groups.rst
|
||||
source/api/nova..api.openstack.zones.rst
|
||||
source/api/nova..auth.dbdriver.rst
|
||||
source/api/nova..auth.fakeldap.rst
|
||||
source/api/nova..auth.ldapdriver.rst
|
||||
source/api/nova..auth.manager.rst
|
||||
source/api/nova..auth.signer.rst
|
||||
source/api/nova..cloudpipe.pipelib.rst
|
||||
source/api/nova..compute.api.rst
|
||||
source/api/nova..compute.instance_types.rst
|
||||
source/api/nova..compute.manager.rst
|
||||
source/api/nova..compute.monitor.rst
|
||||
source/api/nova..compute.power_state.rst
|
||||
source/api/nova..console.api.rst
|
||||
source/api/nova..console.fake.rst
|
||||
source/api/nova..console.manager.rst
|
||||
source/api/nova..console.xvp.rst
|
||||
source/api/nova..context.rst
|
||||
source/api/nova..crypto.rst
|
||||
source/api/nova..db.api.rst
|
||||
source/api/nova..db.base.rst
|
||||
source/api/nova..db.migration.rst
|
||||
source/api/nova..db.sqlalchemy.api.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
|
||||
source/api/nova..db.sqlalchemy.migration.rst
|
||||
source/api/nova..db.sqlalchemy.models.rst
|
||||
source/api/nova..db.sqlalchemy.session.rst
|
||||
source/api/nova..exception.rst
|
||||
source/api/nova..fakememcache.rst
|
||||
source/api/nova..fakerabbit.rst
|
||||
source/api/nova..flags.rst
|
||||
source/api/nova..image.glance.rst
|
||||
source/api/nova..image.local.rst
|
||||
source/api/nova..image.s3.rst
|
||||
source/api/nova..image.service.rst
|
||||
source/api/nova..log.rst
|
||||
source/api/nova..manager.rst
|
||||
source/api/nova..network.api.rst
|
||||
source/api/nova..network.linux_net.rst
|
||||
source/api/nova..network.manager.rst
|
||||
source/api/nova..objectstore.bucket.rst
|
||||
source/api/nova..objectstore.handler.rst
|
||||
source/api/nova..objectstore.image.rst
|
||||
source/api/nova..objectstore.stored.rst
|
||||
source/api/nova..quota.rst
|
||||
source/api/nova..rpc.rst
|
||||
source/api/nova..scheduler.chance.rst
|
||||
source/api/nova..scheduler.driver.rst
|
||||
source/api/nova..scheduler.manager.rst
|
||||
source/api/nova..scheduler.simple.rst
|
||||
source/api/nova..scheduler.zone.rst
|
||||
source/api/nova..service.rst
|
||||
source/api/nova..test.rst
|
||||
source/api/nova..tests.api.openstack.fakes.rst
|
||||
source/api/nova..tests.api.openstack.test_adminapi.rst
|
||||
source/api/nova..tests.api.openstack.test_api.rst
|
||||
source/api/nova..tests.api.openstack.test_auth.rst
|
||||
source/api/nova..tests.api.openstack.test_common.rst
|
||||
source/api/nova..tests.api.openstack.test_faults.rst
|
||||
source/api/nova..tests.api.openstack.test_flavors.rst
|
||||
source/api/nova..tests.api.openstack.test_images.rst
|
||||
source/api/nova..tests.api.openstack.test_ratelimiting.rst
|
||||
source/api/nova..tests.api.openstack.test_servers.rst
|
||||
source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
|
||||
source/api/nova..tests.api.openstack.test_zones.rst
|
||||
source/api/nova..tests.api.test_wsgi.rst
|
||||
source/api/nova..tests.db.fakes.rst
|
||||
source/api/nova..tests.declare_flags.rst
|
||||
source/api/nova..tests.fake_flags.rst
|
||||
source/api/nova..tests.glance.stubs.rst
|
||||
source/api/nova..tests.hyperv_unittest.rst
|
||||
source/api/nova..tests.objectstore_unittest.rst
|
||||
source/api/nova..tests.real_flags.rst
|
||||
source/api/nova..tests.runtime_flags.rst
|
||||
source/api/nova..tests.test_access.rst
|
||||
source/api/nova..tests.test_api.rst
|
||||
source/api/nova..tests.test_auth.rst
|
||||
source/api/nova..tests.test_cloud.rst
|
||||
source/api/nova..tests.test_compute.rst
|
||||
source/api/nova..tests.test_console.rst
|
||||
source/api/nova..tests.test_direct.rst
|
||||
source/api/nova..tests.test_flags.rst
|
||||
source/api/nova..tests.test_instance_types.rst
|
||||
source/api/nova..tests.test_localization.rst
|
||||
source/api/nova..tests.test_log.rst
|
||||
source/api/nova..tests.test_middleware.rst
|
||||
source/api/nova..tests.test_misc.rst
|
||||
source/api/nova..tests.test_network.rst
|
||||
source/api/nova..tests.test_quota.rst
|
||||
source/api/nova..tests.test_rpc.rst
|
||||
source/api/nova..tests.test_scheduler.rst
|
||||
source/api/nova..tests.test_service.rst
|
||||
source/api/nova..tests.test_test.rst
|
||||
source/api/nova..tests.test_twistd.rst
|
||||
source/api/nova..tests.test_utils.rst
|
||||
source/api/nova..tests.test_virt.rst
|
||||
source/api/nova..tests.test_volume.rst
|
||||
source/api/nova..tests.test_xenapi.rst
|
||||
source/api/nova..tests.xenapi.stubs.rst
|
||||
source/api/nova..twistd.rst
|
||||
source/api/nova..utils.rst
|
||||
source/api/nova..version.rst
|
||||
source/api/nova..virt.connection.rst
|
||||
source/api/nova..virt.disk.rst
|
||||
source/api/nova..virt.fake.rst
|
||||
source/api/nova..virt.hyperv.rst
|
||||
source/api/nova..virt.images.rst
|
||||
source/api/nova..virt.libvirt_conn.rst
|
||||
source/api/nova..virt.xenapi.fake.rst
|
||||
source/api/nova..virt.xenapi.network_utils.rst
|
||||
source/api/nova..virt.xenapi.vm_utils.rst
|
||||
source/api/nova..virt.xenapi.vmops.rst
|
||||
source/api/nova..virt.xenapi.volume_utils.rst
|
||||
source/api/nova..virt.xenapi.volumeops.rst
|
||||
source/api/nova..virt.xenapi_conn.rst
|
||||
source/api/nova..volume.api.rst
|
||||
source/api/nova..volume.driver.rst
|
||||
source/api/nova..volume.manager.rst
|
||||
source/api/nova..volume.san.rst
|
||||
source/api/nova..wsgi.rst
|
||||
source/api/autoindex.rst
|
||||
source/api/nova..adminclient.rst
|
||||
source/api/nova..api.direct.rst
|
||||
source/api/nova..api.ec2.admin.rst
|
||||
source/api/nova..api.ec2.apirequest.rst
|
||||
source/api/nova..api.ec2.cloud.rst
|
||||
source/api/nova..api.ec2.metadatarequesthandler.rst
|
||||
source/api/nova..api.openstack.auth.rst
|
||||
source/api/nova..api.openstack.backup_schedules.rst
|
||||
source/api/nova..api.openstack.common.rst
|
||||
source/api/nova..api.openstack.consoles.rst
|
||||
source/api/nova..api.openstack.faults.rst
|
||||
source/api/nova..api.openstack.flavors.rst
|
||||
source/api/nova..api.openstack.images.rst
|
||||
source/api/nova..api.openstack.servers.rst
|
||||
source/api/nova..api.openstack.shared_ip_groups.rst
|
||||
source/api/nova..api.openstack.zones.rst
|
||||
source/api/nova..auth.dbdriver.rst
|
||||
source/api/nova..auth.fakeldap.rst
|
||||
source/api/nova..auth.ldapdriver.rst
|
||||
source/api/nova..auth.manager.rst
|
||||
source/api/nova..auth.signer.rst
|
||||
source/api/nova..cloudpipe.pipelib.rst
|
||||
source/api/nova..compute.api.rst
|
||||
source/api/nova..compute.instance_types.rst
|
||||
source/api/nova..compute.manager.rst
|
||||
source/api/nova..compute.monitor.rst
|
||||
source/api/nova..compute.power_state.rst
|
||||
source/api/nova..console.api.rst
|
||||
source/api/nova..console.fake.rst
|
||||
source/api/nova..console.manager.rst
|
||||
source/api/nova..console.xvp.rst
|
||||
source/api/nova..context.rst
|
||||
source/api/nova..crypto.rst
|
||||
source/api/nova..db.api.rst
|
||||
source/api/nova..db.base.rst
|
||||
source/api/nova..db.migration.rst
|
||||
source/api/nova..db.sqlalchemy.api.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
|
||||
source/api/nova..db.sqlalchemy.migration.rst
|
||||
source/api/nova..db.sqlalchemy.models.rst
|
||||
source/api/nova..db.sqlalchemy.session.rst
|
||||
source/api/nova..exception.rst
|
||||
source/api/nova..fakememcache.rst
|
||||
source/api/nova..fakerabbit.rst
|
||||
source/api/nova..flags.rst
|
||||
source/api/nova..image.glance.rst
|
||||
source/api/nova..image.local.rst
|
||||
source/api/nova..image.s3.rst
|
||||
source/api/nova..image.service.rst
|
||||
source/api/nova..log.rst
|
||||
source/api/nova..manager.rst
|
||||
source/api/nova..network.api.rst
|
||||
source/api/nova..network.linux_net.rst
|
||||
source/api/nova..network.manager.rst
|
||||
source/api/nova..objectstore.bucket.rst
|
||||
source/api/nova..objectstore.handler.rst
|
||||
source/api/nova..objectstore.image.rst
|
||||
source/api/nova..objectstore.stored.rst
|
||||
source/api/nova..quota.rst
|
||||
source/api/nova..rpc.rst
|
||||
source/api/nova..scheduler.chance.rst
|
||||
source/api/nova..scheduler.driver.rst
|
||||
source/api/nova..scheduler.manager.rst
|
||||
source/api/nova..scheduler.simple.rst
|
||||
source/api/nova..scheduler.zone.rst
|
||||
source/api/nova..service.rst
|
||||
source/api/nova..test.rst
|
||||
source/api/nova..tests.api.openstack.fakes.rst
|
||||
source/api/nova..tests.api.openstack.test_adminapi.rst
|
||||
source/api/nova..tests.api.openstack.test_api.rst
|
||||
source/api/nova..tests.api.openstack.test_auth.rst
|
||||
source/api/nova..tests.api.openstack.test_common.rst
|
||||
source/api/nova..tests.api.openstack.test_faults.rst
|
||||
source/api/nova..tests.api.openstack.test_flavors.rst
|
||||
source/api/nova..tests.api.openstack.test_images.rst
|
||||
source/api/nova..tests.api.openstack.test_ratelimiting.rst
|
||||
source/api/nova..tests.api.openstack.test_servers.rst
|
||||
source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
|
||||
source/api/nova..tests.api.openstack.test_zones.rst
|
||||
source/api/nova..tests.api.test_wsgi.rst
|
||||
source/api/nova..tests.db.fakes.rst
|
||||
source/api/nova..tests.declare_flags.rst
|
||||
source/api/nova..tests.fake_flags.rst
|
||||
source/api/nova..tests.glance.stubs.rst
|
||||
source/api/nova..tests.hyperv_unittest.rst
|
||||
source/api/nova..tests.objectstore_unittest.rst
|
||||
source/api/nova..tests.real_flags.rst
|
||||
source/api/nova..tests.runtime_flags.rst
|
||||
source/api/nova..tests.test_access.rst
|
||||
source/api/nova..tests.test_api.rst
|
||||
source/api/nova..tests.test_auth.rst
|
||||
source/api/nova..tests.test_cloud.rst
|
||||
source/api/nova..tests.test_compute.rst
|
||||
source/api/nova..tests.test_console.rst
|
||||
source/api/nova..tests.test_direct.rst
|
||||
source/api/nova..tests.test_flags.rst
|
||||
source/api/nova..tests.test_instance_types.rst
|
||||
source/api/nova..tests.test_localization.rst
|
||||
source/api/nova..tests.test_log.rst
|
||||
source/api/nova..tests.test_middleware.rst
|
||||
source/api/nova..tests.test_misc.rst
|
||||
source/api/nova..tests.test_network.rst
|
||||
source/api/nova..tests.test_quota.rst
|
||||
source/api/nova..tests.test_rpc.rst
|
||||
source/api/nova..tests.test_scheduler.rst
|
||||
source/api/nova..tests.test_service.rst
|
||||
source/api/nova..tests.test_test.rst
|
||||
source/api/nova..tests.test_twistd.rst
|
||||
source/api/nova..tests.test_utils.rst
|
||||
source/api/nova..tests.test_virt.rst
|
||||
source/api/nova..tests.test_volume.rst
|
||||
source/api/nova..tests.test_xenapi.rst
|
||||
source/api/nova..tests.xenapi.stubs.rst
|
||||
source/api/nova..twistd.rst
|
||||
source/api/nova..utils.rst
|
||||
source/api/nova..version.rst
|
||||
source/api/nova..virt.connection.rst
|
||||
source/api/nova..virt.disk.rst
|
||||
source/api/nova..virt.fake.rst
|
||||
source/api/nova..virt.hyperv.rst
|
||||
source/api/nova..virt.images.rst
|
||||
source/api/nova..virt.libvirt_conn.rst
|
||||
source/api/nova..virt.xenapi.fake.rst
|
||||
source/api/nova..virt.xenapi.network_utils.rst
|
||||
source/api/nova..virt.xenapi.vm_utils.rst
|
||||
source/api/nova..virt.xenapi.vmops.rst
|
||||
source/api/nova..virt.xenapi.volume_utils.rst
|
||||
source/api/nova..virt.xenapi.volumeops.rst
|
||||
source/api/nova..virt.xenapi_conn.rst
|
||||
source/api/nova..volume.api.rst
|
||||
source/api/nova..volume.driver.rst
|
||||
source/api/nova..volume.manager.rst
|
||||
source/api/nova..volume.san.rst
|
||||
source/api/nova..wsgi.rst
|
||||
188
doc/source/devref/distributed_scheduler.rst
Normal file
@@ -0,0 +1,188 @@
|
||||
..
|
||||
Copyright 2011 OpenStack LLC
|
||||
All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
Source for illustrations in doc/source/image_src/zone_distsched_illustrations.odp
|
||||
(OpenOffice Impress format) Illustrations are "exported" to png and then scaled
|
||||
to 400x300 or 640x480 as needed and placed in the doc/source/images directory.
|
||||
|
||||
Distributed Scheduler
|
||||
=====================
|
||||
|
||||
The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Chance Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone).
|
||||
|
||||
.. image:: /images/dating_service.png
|
||||
|
||||
But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone.
|
||||
|
||||
This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capabilities of a Zone and its component services to make informed decisions on where a new instance should be created. When making this decision it consults not only all the Compute nodes in the current Zone, but the Compute nodes in each Child Zone. This continues recursively until the ideal host is found.
|
||||
|
||||
So, how does this all work?
|
||||
|
||||
This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the :doc:`devguide/zones` documentation before reading this.
|
||||
|
||||
.. image:: /images/zone_aware_scheduler.png
|
||||
|
||||
Costs & Weights
|
||||
---------------
|
||||
When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost.
|
||||
|
||||
Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost.
|
||||
|
||||
An example of some other costs might include selecting:
|
||||
* a GPU-based host over a standard CPU
|
||||
* a host with fast ethernet over a 10mbps line
|
||||
* a host that can run Windows instances
|
||||
* a host in the EU vs North America
|
||||
* etc
|
||||
|
||||
This Weight is computed for each Instance requested. If the customer asked for 1000 instances, the consumed resources on each Host are "virtually" depleted so the Cost can change accordingly.
|
||||
|
||||
.. image:: /images/costs_weights.png
|
||||
|
||||
nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler
|
||||
------------------------------------------------------
|
||||
As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions.
|
||||
|
||||
Here is how it works:
|
||||
|
||||
1. The compute nodes are filtered and the nodes remaining are weighed.
|
||||
2. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request.
|
||||
3. Weighing of the remaining compute nodes assigns a number based on their suitability for the request.
|
||||
4. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent.
|
||||
5. The parent Zone sorts and aggregates all the weights and a final build plan is constructed.
|
||||
6. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed.
|
||||
|
||||
.. image:: /images/zone_aware_overview.png
|
||||
|
||||
`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which host filtering and weighing strategy will be used.
|
||||
|
||||
Filtering and Weighing
|
||||
----------------------
|
||||
The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible.
|
||||
|
||||
.. image:: /images/filtering.png
|
||||
|
||||
Requesting a new instance
|
||||
-------------------------
|
||||
Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table.
|
||||
|
||||
`nova.compute.api.create()` performed the following actions:
|
||||
1. it validated all the fields passed into it.
|
||||
2. it created an entry in the `Instance` table for each instance requested
|
||||
3. it put one `run_instance` message in the scheduler queue for each instance requested
|
||||
4. the schedulers picked off the messages and decided which compute node should handle the request.
|
||||
5. the `run_instance` message was forwarded to the compute node for processing and the instance is created.
|
||||
6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_ids` are valid.
|
||||
|
||||
.. image:: /images/nova.compute.api.create.png
|
||||
|
||||
Generally, the standard schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones.
|
||||
|
||||
The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once.
|
||||
|
||||
For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently:
|
||||
1. it validates all the fields passed into it.
|
||||
2. it creates a single `reservation_id` for all of instances created. This is a UUID.
|
||||
3. it creates a single `run_instance` request in the scheduler queue
|
||||
4. a scheduler picks the message off the queue and works on it.
|
||||
5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`.
|
||||
6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones.
|
||||
7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well.
|
||||
8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed.
|
||||
9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user.
|
||||
10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`.
|
||||
|
||||
.. image:: /images/nova.compute.api.create_all_at_once.png
|
||||
|
||||
The Catch
|
||||
---------
|
||||
This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world.
|
||||
|
||||
When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going to be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates.
|
||||
|
||||
Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. It's for this reason that it is so important that each Zone defines a unique encryption key via `--build_plan_encryption_key`
|
||||
|
||||
In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent.
|
||||
|
||||
Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.zone_aware_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use.
|
||||
|
||||
Reservation IDs
|
||||
---------------
|
||||
|
||||
NOTE: The features described in this section are related to the up-coming 'merge-4' branch.
|
||||
|
||||
The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created.
|
||||
|
||||
NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would not be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled.
|
||||
|
||||
We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/boot` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches.
|
||||
|
||||
Finally, we need to give the user a way to get information on each of the instances created under this `reservation_id`. Fortunately, this is still possible with the existing `GET /servers` command, so long as we add a new optional `reservation_id` parameter.
|
||||
|
||||
`python-novaclient` will be extended to support both of these changes.
|
||||
|
||||
Host Filter
|
||||
-----------
|
||||
|
||||
As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms.
|
||||
|
||||
The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others:
|
||||
|
||||
* `nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`.
|
||||
|
||||
* `nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples.
|
||||
|
||||
To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The host tuple contains (`<hostname>`, `<additional data>`) where `<additional data>` is whatever you want it to be.
|
||||
|
||||
Cost Scheduler Weighing
|
||||
-----------------------
|
||||
Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `ZoneAwareScheduler` base class when all the results have been assembled.
|
||||
|
||||
Simple Zone Aware Scheduling
|
||||
----------------------------
|
||||
The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things.
|
||||
|
||||
The `--scheduler_driver` flag is how you specify the scheduler class name.
|
||||
|
||||
Flags
|
||||
-----
|
||||
|
||||
All this Zone and Distributed Scheduler stuff can seem a little daunting to configure, but it's actually not too bad. Here are some of the main flags you should set in your `nova.conf` file:
|
||||
|
||||
::
|
||||
|
||||
--allow_admin_api=true
|
||||
--enable_zone_routing=true
|
||||
--zone_name=zone1
|
||||
--build_plan_encryption_key=c286696d887c9aa0611bbb3e2025a45b
|
||||
--scheduler_driver=nova.scheduler.host_filter.HostFilterScheduler
|
||||
--default_host_filter=nova.scheduler.host_filter.AllHostsFilter
|
||||
|
||||
`--allow_admin_api` must be set for OS API to enable the new `/zones/*` commands.
|
||||
`--enable_zone_routing` must be set for OS API commands such as `create()`, `pause()` and `delete()` to get routed from Zone to Zone when looking for instances.
|
||||
`--zone_name` is only required in child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue.
|
||||
`build_plan_encryption_key` is the SHA-256 key for encrypting/decrypting the Host information when it leaves a Zone. Be sure to change this key for each Zone you create. Do not duplicate keys.
|
||||
`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler`.
|
||||
`default_host_filter` is the host filter to be used for filtering candidate Compute nodes.
|
||||
|
||||
Some optional flags which are handy for debugging are:
|
||||
|
||||
::
|
||||
|
||||
--connection_type=fake
|
||||
--verbose
|
||||
|
||||
Using the `Fake` virtualization driver is handy when you're setting this stuff up so you're not dealing with a million possible issues at once. When things seem to working correctly, switch back to whatever hypervisor your deployment uses.
|
||||
@@ -35,6 +35,7 @@ Programming Concepts
|
||||
.. toctree::
|
||||
:maxdepth: 3
|
||||
|
||||
zone
|
||||
rabbit
|
||||
|
||||
API Reference
|
||||
|
||||
@@ -17,11 +17,11 @@
|
||||
Zones
|
||||
=====
|
||||
|
||||
A Nova deployment is called a Zone. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution.
|
||||
A Nova deployment is called a Zone. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers.
|
||||
|
||||
The idea behind Zones is, if a particular deployment is not capable of servicing a particular request, the request may be forwarded to (child) Zones for possible processing. Zones may be nested in a tree fashion.
|
||||
|
||||
Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones.
|
||||
Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones and, in those cases, the grandchild's internal structure would not be known to the grand-parent.
|
||||
|
||||
Zones share nothing. They communicate via the public OpenStack API only. No database, queue, user or project definition is shared between Zones.
|
||||
|
||||
@@ -34,7 +34,7 @@ Routing between Zones is based on the Capabilities of that Zone. Capabilities ar
|
||||
|
||||
key=value;value;value, key=value;value;value
|
||||
|
||||
Zones have Capabilities which are general to the Zone and are set via `--zone-capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag.
|
||||
Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag.
|
||||
|
||||
Flow within a Zone
|
||||
------------------
|
||||
@@ -47,7 +47,7 @@ Inter-service communication within a Zone is done with RabbitMQ. Each class of S
|
||||
|
||||
These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing.
|
||||
|
||||
The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone-name` flag (and defaults to "nova").
|
||||
The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova").
|
||||
|
||||
Zone administrative functions
|
||||
-----------------------------
|
||||
@@ -99,7 +99,7 @@ You can get the `child zone api url`, `nova api key` and `username` from the `no
|
||||
export NOVA_URL="http://192.168.2.120:8774/v1.0/"
|
||||
|
||||
|
||||
This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information.
|
||||
This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done with this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information.
|
||||
|
||||
Getting a list of child Zones
|
||||
-----------------------------
|
||||
|
||||
BIN
doc/source/image_src/zones_distsched_illustrations.odp
Executable file
BIN
doc/source/images/costs_weights.png
Normal file
|
After Width: | Height: | Size: 35 KiB |
BIN
doc/source/images/dating_service.png
Normal file
|
After Width: | Height: | Size: 31 KiB |
BIN
doc/source/images/filtering.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
doc/source/images/nova.compute.api.create.png
Executable file
|
After Width: | Height: | Size: 49 KiB |
BIN
doc/source/images/nova.compute.api.create_all_at_once.png
Executable file
|
After Width: | Height: | Size: 61 KiB |
BIN
doc/source/images/zone_aware_overview.png
Executable file
|
After Width: | Height: | Size: 55 KiB |
BIN
doc/source/images/zone_aware_scheduler.png
Normal file
|
After Width: | Height: | Size: 20 KiB |
@@ -6,7 +6,7 @@ nova-manage
|
||||
control and manage cloud computer instances and images
|
||||
------------------------------------------------------
|
||||
|
||||
:Author: nova@lists.launchpad.net
|
||||
:Author: openstack@lists.launchpad.net
|
||||
:Date: 2010-11-16
|
||||
:Copyright: OpenStack LLC
|
||||
:Version: 0.1
|
||||
@@ -121,7 +121,7 @@ Nova Role
|
||||
nova-manage role <action> [<argument>]
|
||||
``nova-manage role add <username> <rolename> <(optional) projectname>``
|
||||
|
||||
Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects.
|
||||
Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: cloudadmin, itsec, sysadmin, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects.
|
||||
|
||||
``nova-manage role has <username> <projectname>``
|
||||
Checks the user or project and responds with True if the user has a global role with a particular project.
|
||||
|
||||
@@ -38,11 +38,11 @@ Role-based access control (RBAC) is an approach to restricting system access to
|
||||
|
||||
Nova’s rights management system employs the RBAC model and currently supports the following five roles:
|
||||
|
||||
* **Cloud Administrator.** (admin) Users of this class enjoy complete system access.
|
||||
* **Cloud Administrator.** (cloudadmin) Users of this class enjoy complete system access.
|
||||
* **IT Security.** (itsec) This role is limited to IT security personnel. It permits role holders to quarantine instances.
|
||||
* **Project Manager.** (projectmanager)The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances.
|
||||
* **System Administrator.** (sysadmin) The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances.
|
||||
* **Network Administrator.** (netadmin) Users with this role are permitted to allocate and assign publicly accessible IP addresses as well as create and modify firewall rules.
|
||||
* **Developer.** This is a general purpose role that is assigned to users by default.
|
||||
* **Developer.** (developer) This is a general purpose role that is assigned to users by default.
|
||||
|
||||
RBAC management is exposed through the dashboard for simplified user management.
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ other backends by creating another class that exposes the same
|
||||
public methods.
|
||||
"""
|
||||
|
||||
import functools
|
||||
import sys
|
||||
|
||||
from nova import exception
|
||||
@@ -68,6 +69,12 @@ flags.DEFINE_string('ldap_developer',
|
||||
LOG = logging.getLogger("nova.ldapdriver")
|
||||
|
||||
|
||||
if FLAGS.memcached_servers:
|
||||
import memcache
|
||||
else:
|
||||
from nova import fakememcache as memcache
|
||||
|
||||
|
||||
# TODO(vish): make an abstract base class with the same public methods
|
||||
# to define a set interface for AuthDrivers. I'm delaying
|
||||
# creating this now because I'm expecting an auth refactor
|
||||
@@ -85,6 +92,7 @@ def _clean(attr):
|
||||
|
||||
def sanitize(fn):
|
||||
"""Decorator to sanitize all args"""
|
||||
@functools.wraps(fn)
|
||||
def _wrapped(self, *args, **kwargs):
|
||||
args = [_clean(x) for x in args]
|
||||
kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
|
||||
@@ -103,29 +111,56 @@ class LdapDriver(object):
|
||||
isadmin_attribute = 'isNovaAdmin'
|
||||
project_attribute = 'owner'
|
||||
project_objectclass = 'groupOfNames'
|
||||
conn = None
|
||||
mc = None
|
||||
|
||||
def __init__(self):
|
||||
"""Imports the LDAP module"""
|
||||
self.ldap = __import__('ldap')
|
||||
self.conn = None
|
||||
if FLAGS.ldap_schema_version == 1:
|
||||
LdapDriver.project_pattern = '(objectclass=novaProject)'
|
||||
LdapDriver.isadmin_attribute = 'isAdmin'
|
||||
LdapDriver.project_attribute = 'projectManager'
|
||||
LdapDriver.project_objectclass = 'novaProject'
|
||||
self.__cache = None
|
||||
if LdapDriver.conn is None:
|
||||
LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url)
|
||||
LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn,
|
||||
FLAGS.ldap_password)
|
||||
if LdapDriver.mc is None:
|
||||
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
|
||||
|
||||
def __enter__(self):
|
||||
"""Creates the connection to LDAP"""
|
||||
self.conn = self.ldap.initialize(FLAGS.ldap_url)
|
||||
self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password)
|
||||
# TODO(yorik-sar): Should be per-request cache, not per-driver-request
|
||||
self.__cache = {}
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
"""Destroys the connection to LDAP"""
|
||||
self.conn.unbind_s()
|
||||
self.__cache = None
|
||||
return False
|
||||
|
||||
def __local_cache(key_fmt): # pylint: disable=E0213
|
||||
"""Wrap function to cache it's result in self.__cache.
|
||||
Works only with functions with one fixed argument.
|
||||
"""
|
||||
def do_wrap(fn):
|
||||
@functools.wraps(fn)
|
||||
def inner(self, arg, **kwargs):
|
||||
cache_key = key_fmt % (arg,)
|
||||
try:
|
||||
res = self.__cache[cache_key]
|
||||
LOG.debug('Local cache hit for %s by key %s' %
|
||||
(fn.__name__, cache_key))
|
||||
return res
|
||||
except KeyError:
|
||||
res = fn(self, arg, **kwargs)
|
||||
self.__cache[cache_key] = res
|
||||
return res
|
||||
return inner
|
||||
return do_wrap
|
||||
|
||||
@sanitize
|
||||
@__local_cache('uid_user-%s')
|
||||
def get_user(self, uid):
|
||||
"""Retrieve user by id"""
|
||||
attr = self.__get_ldap_user(uid)
|
||||
@@ -134,15 +169,31 @@ class LdapDriver(object):
|
||||
@sanitize
|
||||
def get_user_from_access_key(self, access):
|
||||
"""Retrieve user by access key"""
|
||||
cache_key = 'uak_dn_%s' % (access,)
|
||||
user_dn = self.mc.get(cache_key)
|
||||
if user_dn:
|
||||
user = self.__to_user(
|
||||
self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE))
|
||||
if user:
|
||||
if user['access'] == access:
|
||||
return user
|
||||
else:
|
||||
self.mc.set(cache_key, None)
|
||||
query = '(accessKey=%s)' % access
|
||||
dn = FLAGS.ldap_user_subtree
|
||||
return self.__to_user(self.__find_object(dn, query))
|
||||
user_obj = self.__find_object(dn, query)
|
||||
user = self.__to_user(user_obj)
|
||||
if user:
|
||||
self.mc.set(cache_key, user_obj['dn'][0])
|
||||
return user
|
||||
|
||||
@sanitize
|
||||
@__local_cache('pid_project-%s')
|
||||
def get_project(self, pid):
|
||||
"""Retrieve project by id"""
|
||||
dn = self.__project_to_dn(pid)
|
||||
attr = self.__find_object(dn, LdapDriver.project_pattern)
|
||||
dn = self.__project_to_dn(pid, search=False)
|
||||
attr = self.__find_object(dn, LdapDriver.project_pattern,
|
||||
scope=self.ldap.SCOPE_BASE)
|
||||
return self.__to_project(attr)
|
||||
|
||||
@sanitize
|
||||
@@ -395,6 +446,7 @@ class LdapDriver(object):
|
||||
"""Check if project exists"""
|
||||
return self.get_project(project_id) is not None
|
||||
|
||||
@__local_cache('uid_attrs-%s')
|
||||
def __get_ldap_user(self, uid):
|
||||
"""Retrieve LDAP user entry by id"""
|
||||
dn = FLAGS.ldap_user_subtree
|
||||
@@ -426,12 +478,20 @@ class LdapDriver(object):
|
||||
if scope is None:
|
||||
# One of the flags is 0!
|
||||
scope = self.ldap.SCOPE_SUBTREE
|
||||
if query is None:
|
||||
query = "(objectClass=*)"
|
||||
try:
|
||||
res = self.conn.search_s(dn, scope, query)
|
||||
except self.ldap.NO_SUCH_OBJECT:
|
||||
return []
|
||||
# Just return the attributes
|
||||
return [attributes for dn, attributes in res]
|
||||
# FIXME(yorik-sar): Whole driver should be refactored to
|
||||
# prevent this hack
|
||||
res1 = []
|
||||
for dn, attrs in res:
|
||||
attrs['dn'] = [dn]
|
||||
res1.append(attrs)
|
||||
return res1
|
||||
|
||||
def __find_role_dns(self, tree):
|
||||
"""Find dns of role objects in given tree"""
|
||||
@@ -564,6 +624,7 @@ class LdapDriver(object):
|
||||
'description': attr.get('description', [None])[0],
|
||||
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
|
||||
|
||||
@__local_cache('uid_dn-%s')
|
||||
def __uid_to_dn(self, uid, search=True):
|
||||
"""Convert uid to dn"""
|
||||
# By default return a generated DN
|
||||
@@ -576,6 +637,7 @@ class LdapDriver(object):
|
||||
userdn = user[0]
|
||||
return userdn
|
||||
|
||||
@__local_cache('pid_dn-%s')
|
||||
def __project_to_dn(self, pid, search=True):
|
||||
"""Convert pid to dn"""
|
||||
# By default return a generated DN
|
||||
@@ -603,16 +665,18 @@ class LdapDriver(object):
|
||||
else:
|
||||
return None
|
||||
|
||||
@__local_cache('dn_uid-%s')
|
||||
def __dn_to_uid(self, dn):
|
||||
"""Convert user dn to uid"""
|
||||
query = '(objectclass=novaUser)'
|
||||
user = self.__find_object(dn, query)
|
||||
user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE)
|
||||
return user[FLAGS.ldap_user_id_attribute][0]
|
||||
|
||||
|
||||
class FakeLdapDriver(LdapDriver):
|
||||
"""Fake Ldap Auth driver"""
|
||||
|
||||
def __init__(self): # pylint: disable=W0231
|
||||
__import__('nova.auth.fakeldap')
|
||||
self.ldap = sys.modules['nova.auth.fakeldap']
|
||||
def __init__(self):
|
||||
import nova.auth.fakeldap
|
||||
sys.modules['ldap'] = nova.auth.fakeldap
|
||||
super(FakeLdapDriver, self).__init__()
|
||||
|
||||
@@ -73,6 +73,12 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
|
||||
LOG = logging.getLogger('nova.auth.manager')
|
||||
|
||||
|
||||
if FLAGS.memcached_servers:
|
||||
import memcache
|
||||
else:
|
||||
from nova import fakememcache as memcache
|
||||
|
||||
|
||||
class AuthBase(object):
|
||||
"""Base class for objects relating to auth
|
||||
|
||||
@@ -206,6 +212,7 @@ class AuthManager(object):
|
||||
"""
|
||||
|
||||
_instance = None
|
||||
mc = None
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
"""Returns the AuthManager singleton"""
|
||||
@@ -222,13 +229,8 @@ class AuthManager(object):
|
||||
self.network_manager = utils.import_object(FLAGS.network_manager)
|
||||
if driver or not getattr(self, 'driver', None):
|
||||
self.driver = utils.import_class(driver or FLAGS.auth_driver)
|
||||
|
||||
if FLAGS.memcached_servers:
|
||||
import memcache
|
||||
else:
|
||||
from nova import fakememcache as memcache
|
||||
self.mc = memcache.Client(FLAGS.memcached_servers,
|
||||
debug=0)
|
||||
if AuthManager.mc is None:
|
||||
AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
|
||||
|
||||
def authenticate(self, access, signature, params, verb='GET',
|
||||
server_string='127.0.0.1:8773', path='/',
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null)
|
||||
NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) ||
|
||||
NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}")
|
||||
NOVA_KEY_DIR=${NOVARC%%/*}
|
||||
export EC2_ACCESS_KEY="%(access)s:%(project)s"
|
||||
export EC2_SECRET_KEY="%(secret)s"
|
||||
export EC2_URL="%(ec2)s"
|
||||
@@ -12,4 +14,5 @@ alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_P
|
||||
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
|
||||
export NOVA_API_KEY="%(access)s"
|
||||
export NOVA_USERNAME="%(user)s"
|
||||
export NOVA_PROJECT_ID="%(project)s"
|
||||
export NOVA_URL="%(os)s"
|
||||
|
||||
@@ -65,7 +65,7 @@ class BuildInProgress(Error):
|
||||
|
||||
class DBError(Error):
|
||||
"""Wraps an implementation specific exception."""
|
||||
def __init__(self, inner_exception):
|
||||
def __init__(self, inner_exception=None):
|
||||
self.inner_exception = inner_exception
|
||||
super(DBError, self).__init__(str(inner_exception))
|
||||
|
||||
@@ -122,7 +122,7 @@ class NotAuthorized(NovaException):
|
||||
message = _("Not authorized.")
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NotFound, self).__init__(**kwargs)
|
||||
super(NotAuthorized, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class AdminRequired(NotAuthorized):
|
||||
@@ -255,6 +255,10 @@ class NotFound(NovaException):
|
||||
super(NotFound, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class FlagNotSet(NotFound):
|
||||
message = _("Required flag %(flag)s not set.")
|
||||
|
||||
|
||||
class InstanceNotFound(NotFound):
|
||||
message = _("Instance %(instance_id)s could not be found.")
|
||||
|
||||
@@ -267,6 +271,14 @@ class VolumeNotFoundForInstance(VolumeNotFound):
|
||||
message = _("Volume not found for instance %(instance_id)s.")
|
||||
|
||||
|
||||
class SnapshotNotFound(NotFound):
|
||||
message = _("Snapshot %(snapshot_id)s could not be found.")
|
||||
|
||||
|
||||
class VolumeIsBusy(Error):
|
||||
message = _("deleting volume %(volume_name)s that has snapshot")
|
||||
|
||||
|
||||
class ExportDeviceNotFoundForVolume(NotFound):
|
||||
message = _("No export device found for volume %(volume_id)s.")
|
||||
|
||||
@@ -279,6 +291,15 @@ class DiskNotFound(NotFound):
|
||||
message = _("No disk at %(location)s")
|
||||
|
||||
|
||||
class InvalidImageRef(Invalid):
|
||||
message = _("Invalid image href %(image_href)s.")
|
||||
|
||||
|
||||
class ListingImageRefsNotSupported(Invalid):
|
||||
message = _("Some images have been stored via hrefs."
|
||||
+ " This version of the api does not support displaying image hrefs.")
|
||||
|
||||
|
||||
class ImageNotFound(NotFound):
|
||||
message = _("Image %(image_id)s could not be found.")
|
||||
|
||||
@@ -340,6 +361,10 @@ class NoFixedIpsFoundForInstance(NotFound):
|
||||
|
||||
|
||||
class FloatingIpNotFound(NotFound):
|
||||
message = _("Floating ip %(floating_ip)s not found")
|
||||
|
||||
|
||||
class FloatingIpNotFoundForFixedAddress(NotFound):
|
||||
message = _("Floating ip not found for fixed address %(fixed_ip)s.")
|
||||
|
||||
|
||||
@@ -355,6 +380,10 @@ class NoFloatingIpsDefinedForInstance(NoFloatingIpsDefined):
|
||||
message = _("Zero floating ips defined for instance %(instance_id)s.")
|
||||
|
||||
|
||||
class NoMoreFloatingIps(NotFound):
|
||||
message = _("Zero floating ips available.")
|
||||
|
||||
|
||||
class KeypairNotFound(NotFound):
|
||||
message = _("Keypair %(keypair_name)s not found for user %(user_id)s")
|
||||
|
||||
@@ -461,11 +490,19 @@ class ZoneNotFound(NotFound):
|
||||
message = _("Zone %(zone_id)s could not be found.")
|
||||
|
||||
|
||||
class SchedulerHostFilterDriverNotFound(NotFound):
|
||||
message = _("Scheduler Host Filter Driver %(driver_name)s could"
|
||||
class SchedulerHostFilterNotFound(NotFound):
|
||||
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
|
||||
|
||||
|
||||
class SchedulerCostFunctionNotFound(NotFound):
|
||||
message = _("Scheduler cost function %(cost_fn_str)s could"
|
||||
" not be found.")
|
||||
|
||||
|
||||
class SchedulerWeightFlagNotFound(NotFound):
|
||||
message = _("Scheduler weight flag not found: %(flag_name)s")
|
||||
|
||||
|
||||
class InstanceMetadataNotFound(NotFound):
|
||||
message = _("Instance %(instance_id)s has no metadata with "
|
||||
"key %(metadata_key)s.")
|
||||
@@ -552,3 +589,7 @@ class InstanceExists(Duplicate):
|
||||
|
||||
class MigrationError(NovaException):
|
||||
message = _("Migration error") + ": %(reason)s"
|
||||
|
||||
|
||||
class MalformedRequestBody(NovaException):
|
||||
message = _("Malformed message body: %(reason)s")
|
||||
|
||||
@@ -31,6 +31,7 @@ LOG = logging.getLogger("nova.fakerabbit")
|
||||
|
||||
EXCHANGES = {}
|
||||
QUEUES = {}
|
||||
CONSUMERS = {}
|
||||
|
||||
|
||||
class Message(base.BaseMessage):
|
||||
@@ -96,17 +97,29 @@ class Backend(base.BaseBackend):
|
||||
' key %(routing_key)s') % locals())
|
||||
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
|
||||
|
||||
def declare_consumer(self, queue, callback, *args, **kwargs):
|
||||
self.current_queue = queue
|
||||
self.current_callback = callback
|
||||
def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs):
|
||||
global CONSUMERS
|
||||
LOG.debug("Adding consumer %s", consumer_tag)
|
||||
CONSUMERS[consumer_tag] = (queue, callback)
|
||||
|
||||
def cancel(self, consumer_tag):
|
||||
global CONSUMERS
|
||||
LOG.debug("Removing consumer %s", consumer_tag)
|
||||
del CONSUMERS[consumer_tag]
|
||||
|
||||
def consume(self, limit=None):
|
||||
global CONSUMERS
|
||||
num = 0
|
||||
while True:
|
||||
item = self.get(self.current_queue)
|
||||
if item:
|
||||
self.current_callback(item)
|
||||
raise StopIteration()
|
||||
greenthread.sleep(0)
|
||||
for (queue, callback) in CONSUMERS.itervalues():
|
||||
item = self.get(queue)
|
||||
if item:
|
||||
callback(item)
|
||||
num += 1
|
||||
yield
|
||||
if limit and num == limit:
|
||||
raise StopIteration()
|
||||
greenthread.sleep(0.1)
|
||||
|
||||
def get(self, queue, no_ack=False):
|
||||
global QUEUES
|
||||
@@ -134,5 +147,7 @@ class Backend(base.BaseBackend):
|
||||
def reset_all():
|
||||
global EXCHANGES
|
||||
global QUEUES
|
||||
global CONSUMERS
|
||||
EXCHANGES = {}
|
||||
QUEUES = {}
|
||||
CONSUMERS = {}
|
||||
|
||||
@@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues):
|
||||
return name in self.__dict__['__dirty']
|
||||
|
||||
def ClearDirty(self):
|
||||
self.__dict__['__is_dirty'] = []
|
||||
self.__dict__['__dirty'] = []
|
||||
|
||||
def WasAlreadyParsed(self):
|
||||
return self.__dict__['__was_already_parsed']
|
||||
@@ -119,11 +119,12 @@ class FlagValues(gflags.FlagValues):
|
||||
if '__stored_argv' not in self.__dict__:
|
||||
return
|
||||
new_flags = FlagValues(self)
|
||||
for k in self.__dict__['__dirty']:
|
||||
for k in self.FlagDict().iterkeys():
|
||||
new_flags[k] = gflags.FlagValues.__getitem__(self, k)
|
||||
|
||||
new_flags.Reset()
|
||||
new_flags(self.__dict__['__stored_argv'])
|
||||
for k in self.__dict__['__dirty']:
|
||||
for k in new_flags.FlagDict().iterkeys():
|
||||
setattr(self, k, getattr(new_flags, k))
|
||||
self.ClearDirty()
|
||||
|
||||
@@ -269,8 +270,10 @@ DEFINE_list('region_list',
|
||||
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
||||
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
||||
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
||||
DEFINE_integer('glance_port', 9292, 'glance port')
|
||||
DEFINE_string('glance_host', '$my_ip', 'glance host')
|
||||
# NOTE(sirp): my_ip interpolation doesn't work within nested structures
|
||||
DEFINE_list('glance_api_servers',
|
||||
['%s:9292' % _get_my_ip()],
|
||||
'list of glance api servers available to nova (host:port)')
|
||||
DEFINE_integer('s3_port', 3333, 's3 port')
|
||||
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
|
||||
DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)')
|
||||
@@ -295,6 +298,7 @@ DEFINE_bool('fake_network', False,
|
||||
'should we use fake network devices and addresses')
|
||||
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
|
||||
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
|
||||
DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL')
|
||||
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
|
||||
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
|
||||
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
|
||||
@@ -360,7 +364,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
|
||||
'Manager for scheduler')
|
||||
|
||||
# The service to use for image search and retrieval
|
||||
DEFINE_string('image_service', 'nova.image.local.LocalImageService',
|
||||
DEFINE_string('image_service', 'nova.image.glance.GlanceImageService',
|
||||
'The service to use for retrieving and searching for images.')
|
||||
|
||||
DEFINE_string('host', socket.gethostname(),
|
||||
@@ -379,3 +383,5 @@ DEFINE_string('zone_name', 'nova', 'name of this zone')
|
||||
DEFINE_list('zone_capabilities',
|
||||
['hypervisor=xenserver;kvm', 'os=linux;windows'],
|
||||
'Key/Multi-value list representng capabilities of this zone')
|
||||
DEFINE_string('build_plan_encryption_key', None,
|
||||
'128bit (hex) encryption key for scheduler build plans.')
|
||||
|
||||
10
nova/log.py
@@ -35,6 +35,7 @@ import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import nova
|
||||
from nova import flags
|
||||
from nova import version
|
||||
|
||||
@@ -63,6 +64,7 @@ flags.DEFINE_list('default_log_levels',
|
||||
'eventlet.wsgi.server=WARN'],
|
||||
'list of logger=LEVEL pairs')
|
||||
flags.DEFINE_bool('use_syslog', False, 'output to syslog')
|
||||
flags.DEFINE_bool('publish_errors', False, 'publish error events')
|
||||
flags.DEFINE_string('logfile', None, 'output to named file')
|
||||
|
||||
|
||||
@@ -258,12 +260,20 @@ class NovaRootLogger(NovaLogger):
|
||||
else:
|
||||
self.removeHandler(self.filelog)
|
||||
self.addHandler(self.streamlog)
|
||||
if FLAGS.publish_errors:
|
||||
self.addHandler(PublishErrorsHandler(ERROR))
|
||||
if FLAGS.verbose:
|
||||
self.setLevel(DEBUG)
|
||||
else:
|
||||
self.setLevel(INFO)
|
||||
|
||||
|
||||
class PublishErrorsHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
nova.notifier.api.notify('nova.error.publisher', 'error_notification',
|
||||
nova.notifier.api.ERROR, dict(error=record.msg))
|
||||
|
||||
|
||||
def handle_exception(type, value, tb):
|
||||
extra = {}
|
||||
if FLAGS.verbose:
|
||||
|
||||
@@ -11,14 +11,14 @@
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.import datetime
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import uuid
|
||||
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
flags.DEFINE_string('default_notification_level', 'INFO',
|
||||
@@ -63,7 +63,7 @@ def notify(publisher_id, event_type, priority, payload):
|
||||
|
||||
{'message_id': str(uuid.uuid4()),
|
||||
'publisher_id': 'compute.host1',
|
||||
'timestamp': datetime.datetime.utcnow(),
|
||||
'timestamp': utils.utcnow(),
|
||||
'priority': 'WARN',
|
||||
'event_type': 'compute.create_instance',
|
||||
'payload': {'instance_id': 12, ... }}
|
||||
@@ -78,5 +78,5 @@ def notify(publisher_id, event_type, priority, payload):
|
||||
event_type=event_type,
|
||||
priority=priority,
|
||||
payload=payload,
|
||||
timestamp=str(datetime.datetime.utcnow()))
|
||||
timestamp=str(utils.utcnow()))
|
||||
driver.notify(msg)
|
||||
|
||||
@@ -18,6 +18,7 @@ import json
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import nova.context
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
flags.DEFINE_string('notification_topic', 'notifications',
|
||||
|
||||
108
nova/quota.py
@@ -28,6 +28,8 @@ flags.DEFINE_integer('quota_instances', 10,
|
||||
'number of instances allowed per project')
|
||||
flags.DEFINE_integer('quota_cores', 20,
|
||||
'number of instance cores allowed per project')
|
||||
flags.DEFINE_integer('quota_ram', 50 * 1024,
|
||||
'megabytes of instance ram allowed per project')
|
||||
flags.DEFINE_integer('quota_volumes', 10,
|
||||
'number of volumes allowed per project')
|
||||
flags.DEFINE_integer('quota_gigabytes', 1000,
|
||||
@@ -44,14 +46,28 @@ flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255,
|
||||
'number of bytes allowed per injected file path')
|
||||
|
||||
|
||||
def get_quota(context, project_id):
|
||||
rval = {'instances': FLAGS.quota_instances,
|
||||
'cores': FLAGS.quota_cores,
|
||||
'volumes': FLAGS.quota_volumes,
|
||||
'gigabytes': FLAGS.quota_gigabytes,
|
||||
'floating_ips': FLAGS.quota_floating_ips,
|
||||
'metadata_items': FLAGS.quota_metadata_items}
|
||||
def _get_default_quotas():
|
||||
defaults = {
|
||||
'instances': FLAGS.quota_instances,
|
||||
'cores': FLAGS.quota_cores,
|
||||
'ram': FLAGS.quota_ram,
|
||||
'volumes': FLAGS.quota_volumes,
|
||||
'gigabytes': FLAGS.quota_gigabytes,
|
||||
'floating_ips': FLAGS.quota_floating_ips,
|
||||
'metadata_items': FLAGS.quota_metadata_items,
|
||||
'injected_files': FLAGS.quota_max_injected_files,
|
||||
'injected_file_content_bytes':
|
||||
FLAGS.quota_max_injected_file_content_bytes,
|
||||
}
|
||||
# -1 in the quota flags means unlimited
|
||||
for key in defaults.keys():
|
||||
if defaults[key] == -1:
|
||||
defaults[key] = None
|
||||
return defaults
|
||||
|
||||
|
||||
def get_project_quotas(context, project_id):
|
||||
rval = _get_default_quotas()
|
||||
quota = db.quota_get_all_by_project(context, project_id)
|
||||
for key in rval.keys():
|
||||
if key in quota:
|
||||
@@ -65,71 +81,81 @@ def _get_request_allotment(requested, used, quota):
|
||||
return quota - used
|
||||
|
||||
|
||||
def allowed_instances(context, num_instances, instance_type):
|
||||
"""Check quota and return min(num_instances, allowed_instances)."""
|
||||
def allowed_instances(context, requested_instances, instance_type):
|
||||
"""Check quota and return min(requested_instances, allowed_instances)."""
|
||||
project_id = context.project_id
|
||||
context = context.elevated()
|
||||
num_cores = num_instances * instance_type['vcpus']
|
||||
used_instances, used_cores = db.instance_data_get_for_project(context,
|
||||
project_id)
|
||||
quota = get_quota(context, project_id)
|
||||
allowed_instances = _get_request_allotment(num_instances, used_instances,
|
||||
requested_cores = requested_instances * instance_type['vcpus']
|
||||
requested_ram = requested_instances * instance_type['memory_mb']
|
||||
usage = db.instance_data_get_for_project(context, project_id)
|
||||
used_instances, used_cores, used_ram = usage
|
||||
quota = get_project_quotas(context, project_id)
|
||||
allowed_instances = _get_request_allotment(requested_instances,
|
||||
used_instances,
|
||||
quota['instances'])
|
||||
allowed_cores = _get_request_allotment(num_cores, used_cores,
|
||||
allowed_cores = _get_request_allotment(requested_cores, used_cores,
|
||||
quota['cores'])
|
||||
allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram'])
|
||||
allowed_instances = min(allowed_instances,
|
||||
int(allowed_cores // instance_type['vcpus']))
|
||||
return min(num_instances, allowed_instances)
|
||||
allowed_cores // instance_type['vcpus'],
|
||||
allowed_ram // instance_type['memory_mb'])
|
||||
return min(requested_instances, allowed_instances)
|
||||
|
||||
|
||||
def allowed_volumes(context, num_volumes, size):
|
||||
"""Check quota and return min(num_volumes, allowed_volumes)."""
|
||||
def allowed_volumes(context, requested_volumes, size):
|
||||
"""Check quota and return min(requested_volumes, allowed_volumes)."""
|
||||
project_id = context.project_id
|
||||
context = context.elevated()
|
||||
size = int(size)
|
||||
num_gigabytes = num_volumes * size
|
||||
requested_gigabytes = requested_volumes * size
|
||||
used_volumes, used_gigabytes = db.volume_data_get_for_project(context,
|
||||
project_id)
|
||||
quota = get_quota(context, project_id)
|
||||
allowed_volumes = _get_request_allotment(num_volumes, used_volumes,
|
||||
quota = get_project_quotas(context, project_id)
|
||||
allowed_volumes = _get_request_allotment(requested_volumes, used_volumes,
|
||||
quota['volumes'])
|
||||
allowed_gigabytes = _get_request_allotment(num_gigabytes, used_gigabytes,
|
||||
allowed_gigabytes = _get_request_allotment(requested_gigabytes,
|
||||
used_gigabytes,
|
||||
quota['gigabytes'])
|
||||
allowed_volumes = min(allowed_volumes,
|
||||
int(allowed_gigabytes // size))
|
||||
return min(num_volumes, allowed_volumes)
|
||||
return min(requested_volumes, allowed_volumes)
|
||||
|
||||
|
||||
def allowed_floating_ips(context, num_floating_ips):
|
||||
"""Check quota and return min(num_floating_ips, allowed_floating_ips)."""
|
||||
def allowed_floating_ips(context, requested_floating_ips):
|
||||
"""Check quota and return min(requested, allowed) floating ips."""
|
||||
project_id = context.project_id
|
||||
context = context.elevated()
|
||||
used_floating_ips = db.floating_ip_count_by_project(context, project_id)
|
||||
quota = get_quota(context, project_id)
|
||||
allowed_floating_ips = _get_request_allotment(num_floating_ips,
|
||||
quota = get_project_quotas(context, project_id)
|
||||
allowed_floating_ips = _get_request_allotment(requested_floating_ips,
|
||||
used_floating_ips,
|
||||
quota['floating_ips'])
|
||||
return min(num_floating_ips, allowed_floating_ips)
|
||||
return min(requested_floating_ips, allowed_floating_ips)
|
||||
|
||||
|
||||
def allowed_metadata_items(context, num_metadata_items):
|
||||
"""Check quota; return min(num_metadata_items,allowed_metadata_items)."""
|
||||
project_id = context.project_id
|
||||
context = context.elevated()
|
||||
quota = get_quota(context, project_id)
|
||||
allowed_metadata_items = _get_request_allotment(num_metadata_items, 0,
|
||||
quota['metadata_items'])
|
||||
return min(num_metadata_items, allowed_metadata_items)
|
||||
def _calculate_simple_quota(context, resource, requested):
|
||||
"""Check quota for resource; return min(requested, allowed)."""
|
||||
quota = get_project_quotas(context, context.project_id)
|
||||
allowed = _get_request_allotment(requested, 0, quota[resource])
|
||||
return min(requested, allowed)
|
||||
|
||||
|
||||
def allowed_injected_files(context):
|
||||
def allowed_metadata_items(context, requested_metadata_items):
|
||||
"""Return the number of metadata items allowed."""
|
||||
return _calculate_simple_quota(context, 'metadata_items',
|
||||
requested_metadata_items)
|
||||
|
||||
|
||||
def allowed_injected_files(context, requested_injected_files):
|
||||
"""Return the number of injected files allowed."""
|
||||
return FLAGS.quota_max_injected_files
|
||||
return _calculate_simple_quota(context, 'injected_files',
|
||||
requested_injected_files)
|
||||
|
||||
|
||||
def allowed_injected_file_content_bytes(context):
|
||||
def allowed_injected_file_content_bytes(context, requested_bytes):
|
||||
"""Return the number of bytes allowed per injected file content."""
|
||||
return FLAGS.quota_max_injected_file_content_bytes
|
||||
resource = 'injected_file_content_bytes'
|
||||
return _calculate_simple_quota(context, resource, requested_bytes)
|
||||
|
||||
|
||||
def allowed_injected_file_path_bytes(context):
|
||||
|
||||
272
nova/rpc.py
@@ -28,12 +28,15 @@ import json
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import types
|
||||
import uuid
|
||||
|
||||
from carrot import connection as carrot_connection
|
||||
from carrot import messaging
|
||||
from eventlet import greenpool
|
||||
from eventlet import greenthread
|
||||
from eventlet import pools
|
||||
from eventlet import queue
|
||||
import greenlet
|
||||
|
||||
from nova import context
|
||||
from nova import exception
|
||||
@@ -47,7 +50,10 @@ LOG = logging.getLogger('nova.rpc')
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool')
|
||||
flags.DEFINE_integer('rpc_thread_pool_size', 1024,
|
||||
'Size of RPC thread pool')
|
||||
flags.DEFINE_integer('rpc_conn_pool_size', 30,
|
||||
'Size of RPC connection pool')
|
||||
|
||||
|
||||
class Connection(carrot_connection.BrokerConnection):
|
||||
@@ -59,6 +65,7 @@ class Connection(carrot_connection.BrokerConnection):
|
||||
if new or not hasattr(cls, '_instance'):
|
||||
params = dict(hostname=FLAGS.rabbit_host,
|
||||
port=FLAGS.rabbit_port,
|
||||
ssl=FLAGS.rabbit_use_ssl,
|
||||
userid=FLAGS.rabbit_userid,
|
||||
password=FLAGS.rabbit_password,
|
||||
virtual_host=FLAGS.rabbit_virtual_host)
|
||||
@@ -90,6 +97,22 @@ class Connection(carrot_connection.BrokerConnection):
|
||||
return cls.instance()
|
||||
|
||||
|
||||
class Pool(pools.Pool):
|
||||
"""Class that implements a Pool of Connections."""
|
||||
|
||||
# TODO(comstud): Timeout connections not used in a while
|
||||
def create(self):
|
||||
LOG.debug('Creating new connection')
|
||||
return Connection.instance(new=True)
|
||||
|
||||
# Create a ConnectionPool to use for RPC calls. We'll order the
|
||||
# pool as a stack (LIFO), so that we can potentially loop through and
|
||||
# timeout old unused connections at some point
|
||||
ConnectionPool = Pool(
|
||||
max_size=FLAGS.rpc_conn_pool_size,
|
||||
order_as_stack=True)
|
||||
|
||||
|
||||
class Consumer(messaging.Consumer):
|
||||
"""Consumer base class.
|
||||
|
||||
@@ -131,7 +154,9 @@ class Consumer(messaging.Consumer):
|
||||
self.connection = Connection.recreate()
|
||||
self.backend = self.connection.create_backend()
|
||||
self.declare()
|
||||
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
|
||||
return super(Consumer, self).fetch(no_ack,
|
||||
auto_ack,
|
||||
enable_callbacks)
|
||||
if self.failed_connection:
|
||||
LOG.error(_('Reconnected to queue'))
|
||||
self.failed_connection = False
|
||||
@@ -159,13 +184,13 @@ class AdapterConsumer(Consumer):
|
||||
self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
|
||||
super(AdapterConsumer, self).__init__(connection=connection,
|
||||
topic=topic)
|
||||
self.register_callback(self.process_data)
|
||||
|
||||
def receive(self, *args, **kwargs):
|
||||
self.pool.spawn_n(self._receive, *args, **kwargs)
|
||||
def process_data(self, message_data, message):
|
||||
"""Consumer callback to call a method on a proxy object.
|
||||
|
||||
@exception.wrap_exception
|
||||
def _receive(self, message_data, message):
|
||||
"""Magically looks for a method on the proxy object and calls it.
|
||||
Parses the message for validity and fires off a thread to call the
|
||||
proxy object method.
|
||||
|
||||
Message data should be a dictionary with two keys:
|
||||
method: string representing the method to call
|
||||
@@ -175,8 +200,8 @@ class AdapterConsumer(Consumer):
|
||||
|
||||
"""
|
||||
LOG.debug(_('received %s') % message_data)
|
||||
msg_id = message_data.pop('_msg_id', None)
|
||||
|
||||
# This will be popped off in _unpack_context
|
||||
msg_id = message_data.get('_msg_id', None)
|
||||
ctxt = _unpack_context(message_data)
|
||||
|
||||
method = message_data.get('method')
|
||||
@@ -188,8 +213,17 @@ class AdapterConsumer(Consumer):
|
||||
# we just log the message and send an error string
|
||||
# back to the caller
|
||||
LOG.warn(_('no method for message: %s') % message_data)
|
||||
msg_reply(msg_id, _('No method for message: %s') % message_data)
|
||||
if msg_id:
|
||||
msg_reply(msg_id,
|
||||
_('No method for message: %s') % message_data)
|
||||
return
|
||||
self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args)
|
||||
|
||||
@exception.wrap_exception
|
||||
def _process_data(self, msg_id, ctxt, method, args):
|
||||
"""Thread that maigcally looks for a method on the proxy
|
||||
object and calls it.
|
||||
"""
|
||||
|
||||
node_func = getattr(self.proxy, str(method))
|
||||
node_args = dict((str(k), v) for k, v in args.iteritems())
|
||||
@@ -197,7 +231,18 @@ class AdapterConsumer(Consumer):
|
||||
try:
|
||||
rval = node_func(context=ctxt, **node_args)
|
||||
if msg_id:
|
||||
msg_reply(msg_id, rval, None)
|
||||
# Check if the result was a generator
|
||||
if isinstance(rval, types.GeneratorType):
|
||||
for x in rval:
|
||||
msg_reply(msg_id, x, None)
|
||||
else:
|
||||
msg_reply(msg_id, rval, None)
|
||||
|
||||
# This final None tells multicall that it is done.
|
||||
msg_reply(msg_id, None, None)
|
||||
elif isinstance(rval, types.GeneratorType):
|
||||
# NOTE(vish): this iterates through the generator
|
||||
list(rval)
|
||||
except Exception as e:
|
||||
logging.exception('Exception during message handling')
|
||||
if msg_id:
|
||||
@@ -205,11 +250,6 @@ class AdapterConsumer(Consumer):
|
||||
return
|
||||
|
||||
|
||||
class Publisher(messaging.Publisher):
|
||||
"""Publisher base class."""
|
||||
pass
|
||||
|
||||
|
||||
class TopicAdapterConsumer(AdapterConsumer):
|
||||
"""Consumes messages on a specific topic."""
|
||||
|
||||
@@ -242,6 +282,58 @@ class FanoutAdapterConsumer(AdapterConsumer):
|
||||
topic=topic, proxy=proxy)
|
||||
|
||||
|
||||
class ConsumerSet(object):
|
||||
"""Groups consumers to listen on together on a single connection."""
|
||||
|
||||
def __init__(self, connection, consumer_list):
|
||||
self.consumer_list = set(consumer_list)
|
||||
self.consumer_set = None
|
||||
self.enabled = True
|
||||
self.init(connection)
|
||||
|
||||
def init(self, conn):
|
||||
if not conn:
|
||||
conn = Connection.instance(new=True)
|
||||
if self.consumer_set:
|
||||
self.consumer_set.close()
|
||||
self.consumer_set = messaging.ConsumerSet(conn)
|
||||
for consumer in self.consumer_list:
|
||||
consumer.connection = conn
|
||||
# consumer.backend is set for us
|
||||
self.consumer_set.add_consumer(consumer)
|
||||
|
||||
def reconnect(self):
|
||||
self.init(None)
|
||||
|
||||
def wait(self, limit=None):
|
||||
running = True
|
||||
while running:
|
||||
it = self.consumer_set.iterconsume(limit=limit)
|
||||
if not it:
|
||||
break
|
||||
while True:
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration:
|
||||
return
|
||||
except greenlet.GreenletExit:
|
||||
running = False
|
||||
break
|
||||
except Exception as e:
|
||||
LOG.exception(_("Exception while processing consumer"))
|
||||
self.reconnect()
|
||||
# Break to outer loop
|
||||
break
|
||||
|
||||
def close(self):
|
||||
self.consumer_set.close()
|
||||
|
||||
|
||||
class Publisher(messaging.Publisher):
|
||||
"""Publisher base class."""
|
||||
pass
|
||||
|
||||
|
||||
class TopicPublisher(Publisher):
|
||||
"""Publishes messages on a specific topic."""
|
||||
|
||||
@@ -306,16 +398,18 @@ def msg_reply(msg_id, reply=None, failure=None):
|
||||
LOG.error(_("Returning exception %s to caller"), message)
|
||||
LOG.error(tb)
|
||||
failure = (failure[0].__name__, str(failure[1]), tb)
|
||||
conn = Connection.instance()
|
||||
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
||||
try:
|
||||
publisher.send({'result': reply, 'failure': failure})
|
||||
except TypeError:
|
||||
publisher.send(
|
||||
{'result': dict((k, repr(v))
|
||||
for k, v in reply.__dict__.iteritems()),
|
||||
'failure': failure})
|
||||
publisher.close()
|
||||
|
||||
with ConnectionPool.item() as conn:
|
||||
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
||||
try:
|
||||
publisher.send({'result': reply, 'failure': failure})
|
||||
except TypeError:
|
||||
publisher.send(
|
||||
{'result': dict((k, repr(v))
|
||||
for k, v in reply.__dict__.iteritems()),
|
||||
'failure': failure})
|
||||
|
||||
publisher.close()
|
||||
|
||||
|
||||
class RemoteError(exception.Error):
|
||||
@@ -347,8 +441,9 @@ def _unpack_context(msg):
|
||||
if key.startswith('_context_'):
|
||||
value = msg.pop(key)
|
||||
context_dict[key[9:]] = value
|
||||
context_dict['msg_id'] = msg.pop('_msg_id', None)
|
||||
LOG.debug(_('unpacked context: %s'), context_dict)
|
||||
return context.RequestContext.from_dict(context_dict)
|
||||
return RpcContext.from_dict(context_dict)
|
||||
|
||||
|
||||
def _pack_context(msg, context):
|
||||
@@ -360,70 +455,112 @@ def _pack_context(msg, context):
|
||||
for args at some point.
|
||||
|
||||
"""
|
||||
context = dict([('_context_%s' % key, value)
|
||||
for (key, value) in context.to_dict().iteritems()])
|
||||
msg.update(context)
|
||||
context_d = dict([('_context_%s' % key, value)
|
||||
for (key, value) in context.to_dict().iteritems()])
|
||||
msg.update(context_d)
|
||||
|
||||
|
||||
def call(context, topic, msg):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
class RpcContext(context.RequestContext):
|
||||
def __init__(self, *args, **kwargs):
|
||||
msg_id = kwargs.pop('msg_id', None)
|
||||
self.msg_id = msg_id
|
||||
super(RpcContext, self).__init__(*args, **kwargs)
|
||||
|
||||
def reply(self, *args, **kwargs):
|
||||
msg_reply(self.msg_id, *args, **kwargs)
|
||||
|
||||
|
||||
def multicall(context, topic, msg):
|
||||
"""Make a call that returns multiple times."""
|
||||
LOG.debug(_('Making asynchronous call on %s ...'), topic)
|
||||
msg_id = uuid.uuid4().hex
|
||||
msg.update({'_msg_id': msg_id})
|
||||
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
||||
_pack_context(msg, context)
|
||||
|
||||
class WaitMessage(object):
|
||||
def __call__(self, data, message):
|
||||
"""Acks message and sets result."""
|
||||
message.ack()
|
||||
if data['failure']:
|
||||
self.result = RemoteError(*data['failure'])
|
||||
else:
|
||||
self.result = data['result']
|
||||
|
||||
wait_msg = WaitMessage()
|
||||
conn = Connection.instance()
|
||||
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
|
||||
con_conn = ConnectionPool.get()
|
||||
consumer = DirectConsumer(connection=con_conn, msg_id=msg_id)
|
||||
wait_msg = MulticallWaiter(consumer)
|
||||
consumer.register_callback(wait_msg)
|
||||
|
||||
conn = Connection.instance()
|
||||
publisher = TopicPublisher(connection=conn, topic=topic)
|
||||
publisher = TopicPublisher(connection=con_conn, topic=topic)
|
||||
publisher.send(msg)
|
||||
publisher.close()
|
||||
|
||||
try:
|
||||
consumer.wait(limit=1)
|
||||
except StopIteration:
|
||||
pass
|
||||
consumer.close()
|
||||
# NOTE(termie): this is a little bit of a change from the original
|
||||
# non-eventlet code where returning a Failure
|
||||
# instance from a deferred call is very similar to
|
||||
# raising an exception
|
||||
if isinstance(wait_msg.result, Exception):
|
||||
raise wait_msg.result
|
||||
return wait_msg.result
|
||||
return wait_msg
|
||||
|
||||
|
||||
class MulticallWaiter(object):
|
||||
def __init__(self, consumer):
|
||||
self._consumer = consumer
|
||||
self._results = queue.Queue()
|
||||
self._closed = False
|
||||
|
||||
def close(self):
|
||||
self._closed = True
|
||||
self._consumer.close()
|
||||
ConnectionPool.put(self._consumer.connection)
|
||||
|
||||
def __call__(self, data, message):
|
||||
"""Acks message and sets result."""
|
||||
message.ack()
|
||||
if data['failure']:
|
||||
self._results.put(RemoteError(*data['failure']))
|
||||
else:
|
||||
self._results.put(data['result'])
|
||||
|
||||
def __iter__(self):
|
||||
return self.wait()
|
||||
|
||||
def wait(self):
|
||||
while True:
|
||||
rv = None
|
||||
while rv is None and not self._closed:
|
||||
try:
|
||||
rv = self._consumer.fetch(enable_callbacks=True)
|
||||
except Exception:
|
||||
self.close()
|
||||
raise
|
||||
time.sleep(0.01)
|
||||
|
||||
result = self._results.get()
|
||||
if isinstance(result, Exception):
|
||||
self.close()
|
||||
raise result
|
||||
if result == None:
|
||||
self.close()
|
||||
raise StopIteration
|
||||
yield result
|
||||
|
||||
|
||||
def call(context, topic, msg):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
rv = multicall(context, topic, msg)
|
||||
# NOTE(vish): return the last result from the multicall
|
||||
rv = list(rv)
|
||||
if not rv:
|
||||
return
|
||||
return rv[-1]
|
||||
|
||||
|
||||
def cast(context, topic, msg):
|
||||
"""Sends a message on a topic without waiting for a response."""
|
||||
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
||||
_pack_context(msg, context)
|
||||
conn = Connection.instance()
|
||||
publisher = TopicPublisher(connection=conn, topic=topic)
|
||||
publisher.send(msg)
|
||||
publisher.close()
|
||||
with ConnectionPool.item() as conn:
|
||||
publisher = TopicPublisher(connection=conn, topic=topic)
|
||||
publisher.send(msg)
|
||||
publisher.close()
|
||||
|
||||
|
||||
def fanout_cast(context, topic, msg):
|
||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||
LOG.debug(_('Making asynchronous fanout cast...'))
|
||||
_pack_context(msg, context)
|
||||
conn = Connection.instance()
|
||||
publisher = FanoutPublisher(topic, connection=conn)
|
||||
publisher.send(msg)
|
||||
publisher.close()
|
||||
with ConnectionPool.item() as conn:
|
||||
publisher = FanoutPublisher(topic, connection=conn)
|
||||
publisher.send(msg)
|
||||
publisher.close()
|
||||
|
||||
|
||||
def generic_response(message_data, message):
|
||||
@@ -459,6 +596,7 @@ def send_message(topic, message, wait=True):
|
||||
|
||||
if wait:
|
||||
consumer.wait()
|
||||
consumer.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -24,6 +24,7 @@ from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
|
||||
from eventlet import greenpool
|
||||
|
||||
@@ -81,6 +82,12 @@ def get_zone_capabilities(context):
|
||||
return _call_scheduler('get_zone_capabilities', context=context)
|
||||
|
||||
|
||||
def select(context, specs=None):
|
||||
"""Returns a list of hosts."""
|
||||
return _call_scheduler('select', context=context,
|
||||
params={"request_spec": specs})
|
||||
|
||||
|
||||
def update_service_capabilities(context, service_name, host, capabilities):
|
||||
"""Send an update to all the scheduler services informing them
|
||||
of the capabilities of this service."""
|
||||
@@ -100,11 +107,50 @@ def _wrap_method(function, self):
|
||||
def _process(func, zone):
|
||||
"""Worker stub for green thread pool. Give the worker
|
||||
an authenticated nova client and zone info."""
|
||||
nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
|
||||
nova = novaclient.OpenStack(zone.username, zone.password, None,
|
||||
zone.api_url)
|
||||
nova.authenticate()
|
||||
return func(nova, zone)
|
||||
|
||||
|
||||
def call_zone_method(context, method_name, errors_to_ignore=None,
|
||||
novaclient_collection_name='zones', *args, **kwargs):
|
||||
"""Returns a list of (zone, call_result) objects."""
|
||||
if not isinstance(errors_to_ignore, (list, tuple)):
|
||||
# This will also handle the default None
|
||||
errors_to_ignore = [errors_to_ignore]
|
||||
|
||||
pool = greenpool.GreenPool()
|
||||
results = []
|
||||
for zone in db.zone_get_all(context):
|
||||
try:
|
||||
nova = novaclient.OpenStack(zone.username, zone.password, None,
|
||||
zone.api_url)
|
||||
nova.authenticate()
|
||||
except novaclient.exceptions.BadRequest, e:
|
||||
url = zone.api_url
|
||||
LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s")
|
||||
% locals())
|
||||
#TODO (dabo) - add logic for failure counts per zone,
|
||||
# with escalation after a given number of failures.
|
||||
continue
|
||||
novaclient_collection = getattr(nova, novaclient_collection_name)
|
||||
collection_method = getattr(novaclient_collection, method_name)
|
||||
|
||||
def _error_trap(*args, **kwargs):
|
||||
try:
|
||||
return collection_method(*args, **kwargs)
|
||||
except Exception as e:
|
||||
if type(e) in errors_to_ignore:
|
||||
return None
|
||||
raise
|
||||
|
||||
res = pool.spawn(_error_trap, *args, **kwargs)
|
||||
results.append((zone, res))
|
||||
pool.waitall()
|
||||
return [(zone.id, res.wait()) for zone, res in results]
|
||||
|
||||
|
||||
def child_zone_helper(zone_list, func):
|
||||
"""Fire off a command to each zone in the list.
|
||||
The return is [novaclient return objects] from each child zone.
|
||||
@@ -156,38 +202,78 @@ class RedirectResult(exception.Error):
|
||||
|
||||
|
||||
class reroute_compute(object):
|
||||
"""Decorator used to indicate that the method should
|
||||
delegate the call the child zones if the db query
|
||||
can't find anything."""
|
||||
"""
|
||||
reroute_compute is responsible for trying to lookup a resource in the
|
||||
current zone and if it's not found there, delegating the call to the
|
||||
child zones.
|
||||
|
||||
Since reroute_compute will be making 'cross-zone' calls, the ID for the
|
||||
object must come in as a UUID-- if we receive an integer ID, we bail.
|
||||
|
||||
The steps involved are:
|
||||
|
||||
1. Validate that item_id is UUID like
|
||||
|
||||
2. Lookup item by UUID in the zone local database
|
||||
|
||||
3. If the item was found, then extract integer ID, and pass that to
|
||||
the wrapped method. (This ensures that zone-local code can
|
||||
continue to use integer IDs).
|
||||
|
||||
4. If the item was not found, we delgate the call to a child zone
|
||||
using the UUID.
|
||||
"""
|
||||
def __init__(self, method_name):
|
||||
self.method_name = method_name
|
||||
|
||||
def _route_to_child_zones(self, context, collection, item_uuid):
|
||||
if not FLAGS.enable_zone_routing:
|
||||
raise exception.InstanceNotFound(instance_id=item_uuid)
|
||||
|
||||
zones = db.zone_get_all(context)
|
||||
if not zones:
|
||||
raise exception.InstanceNotFound(instance_id=item_uuid)
|
||||
|
||||
# Ask the children to provide an answer ...
|
||||
LOG.debug(_("Asking child zones ..."))
|
||||
result = self._call_child_zones(zones,
|
||||
wrap_novaclient_function(_issue_novaclient_command,
|
||||
collection, self.method_name, item_uuid))
|
||||
# Scrub the results and raise another exception
|
||||
# so the API layers can bail out gracefully ...
|
||||
raise RedirectResult(self.unmarshall_result(result))
|
||||
|
||||
def __call__(self, f):
|
||||
def wrapped_f(*args, **kwargs):
|
||||
collection, context, item_id = \
|
||||
collection, context, item_id_or_uuid = \
|
||||
self.get_collection_context_and_id(args, kwargs)
|
||||
try:
|
||||
# Call the original function ...
|
||||
|
||||
attempt_reroute = False
|
||||
if utils.is_uuid_like(item_id_or_uuid):
|
||||
item_uuid = item_id_or_uuid
|
||||
try:
|
||||
instance = db.instance_get_by_uuid(context, item_uuid)
|
||||
except exception.InstanceNotFound, e:
|
||||
# NOTE(sirp): since a UUID was passed in, we can attempt
|
||||
# to reroute to a child zone
|
||||
attempt_reroute = True
|
||||
LOG.debug(_("Instance %(item_uuid)s not found "
|
||||
"locally: '%(e)s'" % locals()))
|
||||
else:
|
||||
# NOTE(sirp): since we're not re-routing in this case, and
|
||||
# we we were passed a UUID, we need to replace that UUID
|
||||
# with an integer ID in the argument list so that the
|
||||
# zone-local code can continue to use integer IDs.
|
||||
item_id = instance['id']
|
||||
args = list(args) # needs to be mutable to replace
|
||||
self.replace_uuid_with_id(args, kwargs, item_id)
|
||||
|
||||
if attempt_reroute:
|
||||
return self._route_to_child_zones(context, collection,
|
||||
item_uuid)
|
||||
else:
|
||||
return f(*args, **kwargs)
|
||||
except exception.InstanceNotFound, e:
|
||||
LOG.debug(_("Instance %(item_id)s not found "
|
||||
"locally: '%(e)s'" % locals()))
|
||||
|
||||
if not FLAGS.enable_zone_routing:
|
||||
raise
|
||||
|
||||
zones = db.zone_get_all(context)
|
||||
if not zones:
|
||||
raise
|
||||
|
||||
# Ask the children to provide an answer ...
|
||||
LOG.debug(_("Asking child zones ..."))
|
||||
result = self._call_child_zones(zones,
|
||||
wrap_novaclient_function(_issue_novaclient_command,
|
||||
collection, self.method_name, item_id))
|
||||
# Scrub the results and raise another exception
|
||||
# so the API layers can bail out gracefully ...
|
||||
raise RedirectResult(self.unmarshall_result(result))
|
||||
return wrapped_f
|
||||
|
||||
def _call_child_zones(self, zones, function):
|
||||
@@ -206,6 +292,18 @@ class reroute_compute(object):
|
||||
instance_id = args[2]
|
||||
return ("servers", context, instance_id)
|
||||
|
||||
@staticmethod
|
||||
def replace_uuid_with_id(args, kwargs, replacement_id):
|
||||
"""
|
||||
Extracts the UUID parameter from the arg or kwarg list and replaces
|
||||
it with an integer ID.
|
||||
"""
|
||||
if 'instance_id' in kwargs:
|
||||
kwargs['instance_id'] = replacement_id
|
||||
elif len(args) > 1:
|
||||
args.pop(2)
|
||||
args.insert(2, replacement_id)
|
||||
|
||||
def unmarshall_result(self, zone_responses):
|
||||
"""Result is a list of responses from each child zone.
|
||||
Each decorator derivation is responsible to turning this
|
||||
|
||||
@@ -28,6 +28,7 @@ from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.compute import power_state
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -61,7 +62,7 @@ class Scheduler(object):
|
||||
"""Check whether a service is up based on last heartbeat."""
|
||||
last_heartbeat = service['updated_at'] or service['created_at']
|
||||
# Timestamps in DB are UTC.
|
||||
elapsed = datetime.datetime.utcnow() - last_heartbeat
|
||||
elapsed = utils.utcnow() - last_heartbeat
|
||||
return elapsed < datetime.timedelta(seconds=FLAGS.service_down_time)
|
||||
|
||||
def hosts_up(self, context, topic):
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Host Filter is a driver mechanism for requesting instance resources.
|
||||
Three drivers are included: AllHosts, Flavor & JSON. AllHosts just
|
||||
Host Filter is a mechanism for requesting instance resources.
|
||||
Three filters are included: AllHosts, Flavor & JSON. AllHosts just
|
||||
returns the full, unfiltered list of hosts. Flavor is a hard coded
|
||||
matching mechanism based on flavor criteria and JSON is an ad-hoc
|
||||
filter grammar.
|
||||
@@ -41,18 +41,20 @@ import json
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.scheduler import zone_aware_scheduler
|
||||
from nova import utils
|
||||
from nova.scheduler import zone_aware_scheduler
|
||||
|
||||
LOG = logging.getLogger('nova.scheduler.host_filter')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('default_host_filter_driver',
|
||||
flags.DEFINE_string('default_host_filter',
|
||||
'nova.scheduler.host_filter.AllHostsFilter',
|
||||
'Which driver to use for filtering hosts.')
|
||||
'Which filter to use for filtering hosts.')
|
||||
|
||||
|
||||
class HostFilter(object):
|
||||
"""Base class for host filter drivers."""
|
||||
"""Base class for host filters."""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Convert instance_type into a filter for most common use-case."""
|
||||
@@ -63,14 +65,15 @@ class HostFilter(object):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _full_name(self):
|
||||
"""module.classname of the filter driver"""
|
||||
"""module.classname of the filter."""
|
||||
return "%s.%s" % (self.__module__, self.__class__.__name__)
|
||||
|
||||
|
||||
class AllHostsFilter(HostFilter):
|
||||
"""NOP host filter driver. Returns all hosts in ZoneManager.
|
||||
""" NOP host filter. Returns all hosts in ZoneManager.
|
||||
This essentially does what the old Scheduler+Chance used
|
||||
to give us."""
|
||||
to give us.
|
||||
"""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Return anything to prevent base-class from raising
|
||||
@@ -83,8 +86,8 @@ class AllHostsFilter(HostFilter):
|
||||
for host, services in zone_manager.service_states.iteritems()]
|
||||
|
||||
|
||||
class FlavorFilter(HostFilter):
|
||||
"""HostFilter driver hard-coded to work with flavors."""
|
||||
class InstanceTypeFilter(HostFilter):
|
||||
"""HostFilter hard-coded to work with InstanceType records."""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Use instance_type to filter hosts."""
|
||||
@@ -98,9 +101,10 @@ class FlavorFilter(HostFilter):
|
||||
capabilities = services.get('compute', {})
|
||||
host_ram_mb = capabilities['host_memory_free']
|
||||
disk_bytes = capabilities['disk_available']
|
||||
if host_ram_mb >= instance_type['memory_mb'] and \
|
||||
disk_bytes >= instance_type['local_gb']:
|
||||
selected_hosts.append((host, capabilities))
|
||||
spec_ram = instance_type['memory_mb']
|
||||
spec_disk = instance_type['local_gb']
|
||||
if host_ram_mb >= spec_ram and disk_bytes >= spec_disk:
|
||||
selected_hosts.append((host, capabilities))
|
||||
return selected_hosts
|
||||
|
||||
#host entries (currently) are like:
|
||||
@@ -109,15 +113,15 @@ class FlavorFilter(HostFilter):
|
||||
# 'host_memory_total': 8244539392,
|
||||
# 'host_memory_overhead': 184225792,
|
||||
# 'host_memory_free': 3868327936,
|
||||
# 'host_memory_free_computed': 3840843776},
|
||||
# 'host_other-config': {},
|
||||
# 'host_memory_free_computed': 3840843776,
|
||||
# 'host_other_config': {},
|
||||
# 'host_ip_address': '192.168.1.109',
|
||||
# 'host_cpu_info': {},
|
||||
# 'disk_available': 32954957824,
|
||||
# 'disk_total': 50394562560,
|
||||
# 'disk_used': 17439604736},
|
||||
# 'disk_used': 17439604736,
|
||||
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
|
||||
# 'host_name-label': 'xs-mini'}
|
||||
# 'host_name_label': 'xs-mini'}
|
||||
|
||||
# instance_type table has:
|
||||
#name = Column(String(255), unique=True)
|
||||
@@ -131,8 +135,9 @@ class FlavorFilter(HostFilter):
|
||||
|
||||
|
||||
class JsonFilter(HostFilter):
|
||||
"""Host Filter driver to allow simple JSON-based grammar for
|
||||
selecting hosts."""
|
||||
"""Host Filter to allow simple JSON-based grammar for
|
||||
selecting hosts.
|
||||
"""
|
||||
|
||||
def _equals(self, args):
|
||||
"""First term is == all the other terms."""
|
||||
@@ -222,13 +227,14 @@ class JsonFilter(HostFilter):
|
||||
required_disk = instance_type['local_gb']
|
||||
query = ['and',
|
||||
['>=', '$compute.host_memory_free', required_ram],
|
||||
['>=', '$compute.disk_available', required_disk]
|
||||
['>=', '$compute.disk_available', required_disk],
|
||||
]
|
||||
return (self._full_name(), json.dumps(query))
|
||||
|
||||
def _parse_string(self, string, host, services):
|
||||
"""Strings prefixed with $ are capability lookups in the
|
||||
form '$service.capability[.subcap*]'"""
|
||||
form '$service.capability[.subcap*]'
|
||||
"""
|
||||
if not string:
|
||||
return None
|
||||
if string[0] != '$':
|
||||
@@ -271,18 +277,48 @@ class JsonFilter(HostFilter):
|
||||
return hosts
|
||||
|
||||
|
||||
DRIVERS = [AllHostsFilter, FlavorFilter, JsonFilter]
|
||||
FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter]
|
||||
|
||||
|
||||
def choose_driver(driver_name=None):
|
||||
"""Since the caller may specify which driver to use we need
|
||||
to have an authoritative list of what is permissible. This
|
||||
function checks the driver name against a predefined set
|
||||
of acceptable drivers."""
|
||||
def choose_host_filter(filter_name=None):
|
||||
"""Since the caller may specify which filter to use we need
|
||||
to have an authoritative list of what is permissible. This
|
||||
function checks the filter name against a predefined set
|
||||
of acceptable filters.
|
||||
"""
|
||||
|
||||
if not driver_name:
|
||||
driver_name = FLAGS.default_host_filter_driver
|
||||
for driver in DRIVERS:
|
||||
if "%s.%s" % (driver.__module__, driver.__name__) == driver_name:
|
||||
return driver()
|
||||
raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name)
|
||||
if not filter_name:
|
||||
filter_name = FLAGS.default_host_filter
|
||||
for filter_class in FILTERS:
|
||||
host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__)
|
||||
if host_match == filter_name:
|
||||
return filter_class()
|
||||
raise exception.SchedulerHostFilterNotFound(filter_name=filter_name)
|
||||
|
||||
|
||||
class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||
"""The HostFilterScheduler uses the HostFilter to filter
|
||||
hosts for weighing. The particular filter used may be passed in
|
||||
as an argument or the default will be used.
|
||||
|
||||
request_spec = {'filter': <Filter name>,
|
||||
'instance_type': <InstanceType dict>}
|
||||
"""
|
||||
|
||||
def filter_hosts(self, num, request_spec):
|
||||
"""Filter the full host list (from the ZoneManager)"""
|
||||
filter_name = request_spec.get('filter', None)
|
||||
host_filter = choose_host_filter(filter_name)
|
||||
|
||||
# TODO(sandy): We're only using InstanceType-based specs
|
||||
# currently. Later we'll need to snoop for more detailed
|
||||
# host filter requests.
|
||||
instance_type = request_spec['instance_type']
|
||||
name, query = host_filter.instance_type_to_filter(instance_type)
|
||||
return host_filter.filter_hosts(self.zone_manager, query)
|
||||
|
||||
def weigh_hosts(self, num, request_spec, hosts):
|
||||
"""Derived classes must override this method and return
|
||||
a lists of hosts in [{weight, hostname}] format.
|
||||
"""
|
||||
return [dict(weight=1, hostname=host) for host, caps in hosts]
|
||||
|
||||
156
nova/scheduler/least_cost.py
Normal file
@@ -0,0 +1,156 @@
|
||||
# Copyright (c) 2011 Openstack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Least Cost Scheduler is a mechanism for choosing which host machines to
|
||||
provision a set of resources to. The input of the least-cost-scheduler is a
|
||||
set of objective-functions, called the 'cost-functions', a weight for each
|
||||
cost-function, and a list of candidate hosts (gathered via FilterHosts).
|
||||
|
||||
The cost-function and weights are tabulated, and the host with the least cost
|
||||
is then selected for provisioning.
|
||||
"""
|
||||
|
||||
import collections
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.scheduler import zone_aware_scheduler
|
||||
from nova import utils
|
||||
|
||||
LOG = logging.getLogger('nova.scheduler.least_cost')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_list('least_cost_scheduler_cost_functions',
|
||||
['nova.scheduler.least_cost.noop_cost_fn'],
|
||||
'Which cost functions the LeastCostScheduler should use.')
|
||||
|
||||
|
||||
# TODO(sirp): Once we have enough of these rules, we can break them out into a
|
||||
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
|
||||
flags.DEFINE_integer('noop_cost_fn_weight', 1,
|
||||
'How much weight to give the noop cost function')
|
||||
|
||||
|
||||
def noop_cost_fn(host):
|
||||
"""Return a pre-weight cost of 1 for each host"""
|
||||
return 1
|
||||
|
||||
|
||||
flags.DEFINE_integer('fill_first_cost_fn_weight', 1,
|
||||
'How much weight to give the fill-first cost function')
|
||||
|
||||
|
||||
def fill_first_cost_fn(host):
|
||||
"""Prefer hosts that have less ram available, filter_hosts will exclude
|
||||
hosts that don't have enough ram"""
|
||||
hostname, caps = host
|
||||
free_mem = caps['compute']['host_memory_free']
|
||||
return free_mem
|
||||
|
||||
|
||||
class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||
def get_cost_fns(self):
|
||||
"""Returns a list of tuples containing weights and cost functions to
|
||||
use for weighing hosts
|
||||
"""
|
||||
cost_fns = []
|
||||
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
||||
|
||||
try:
|
||||
# NOTE(sirp): import_class is somewhat misnamed since it can
|
||||
# any callable from a module
|
||||
cost_fn = utils.import_class(cost_fn_str)
|
||||
except exception.ClassNotFound:
|
||||
raise exception.SchedulerCostFunctionNotFound(
|
||||
cost_fn_str=cost_fn_str)
|
||||
|
||||
try:
|
||||
weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__)
|
||||
except AttributeError:
|
||||
raise exception.SchedulerWeightFlagNotFound(
|
||||
flag_name=flag_name)
|
||||
|
||||
cost_fns.append((weight, cost_fn))
|
||||
|
||||
return cost_fns
|
||||
|
||||
def weigh_hosts(self, num, request_spec, hosts):
|
||||
"""Returns a list of dictionaries of form:
|
||||
[ {weight: weight, hostname: hostname} ]"""
|
||||
|
||||
# FIXME(sirp): weigh_hosts should handle more than just instances
|
||||
hostnames = [hostname for hostname, caps in hosts]
|
||||
|
||||
cost_fns = self.get_cost_fns()
|
||||
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
||||
|
||||
weighted = []
|
||||
weight_log = []
|
||||
for cost, hostname in zip(costs, hostnames):
|
||||
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||
weight_dict = dict(weight=cost, hostname=hostname)
|
||||
weighted.append(weight_dict)
|
||||
|
||||
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
||||
return weighted
|
||||
|
||||
|
||||
def normalize_list(L):
|
||||
"""Normalize an array of numbers such that each element satisfies:
|
||||
0 <= e <= 1"""
|
||||
if not L:
|
||||
return L
|
||||
max_ = max(L)
|
||||
if max_ > 0:
|
||||
return [(float(e) / max_) for e in L]
|
||||
return L
|
||||
|
||||
|
||||
def weighted_sum(domain, weighted_fns, normalize=True):
|
||||
"""Use the weighted-sum method to compute a score for an array of objects.
|
||||
Normalize the results of the objective-functions so that the weights are
|
||||
meaningful regardless of objective-function's range.
|
||||
|
||||
domain - input to be scored
|
||||
weighted_fns - list of weights and functions like:
|
||||
[(weight, objective-functions)]
|
||||
|
||||
Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts)
|
||||
"""
|
||||
# Table of form:
|
||||
# { domain1: [score1, score2, ..., scoreM]
|
||||
# ...
|
||||
# domainN: [score1, score2, ..., scoreM] }
|
||||
score_table = collections.defaultdict(list)
|
||||
for weight, fn in weighted_fns:
|
||||
scores = [fn(elem) for elem in domain]
|
||||
|
||||
if normalize:
|
||||
norm_scores = normalize_list(scores)
|
||||
else:
|
||||
norm_scores = scores
|
||||
|
||||
for idx, score in enumerate(norm_scores):
|
||||
weighted_score = score * weight
|
||||
score_table[idx].append(weighted_score)
|
||||
|
||||
# Sum rows in table to compute score for each element in domain
|
||||
domain_scores = []
|
||||
for idx in sorted(score_table):
|
||||
elem_score = sum(score_table[idx])
|
||||
elem = domain[idx]
|
||||
domain_scores.append(elem_score)
|
||||
|
||||
return domain_scores
|
||||
@@ -70,6 +70,14 @@ class SchedulerManager(manager.Manager):
|
||||
self.zone_manager.update_service_capabilities(service_name,
|
||||
host, capabilities)
|
||||
|
||||
def select(self, context=None, *args, **kwargs):
|
||||
"""Select a list of hosts best matching the provided specs."""
|
||||
return self.driver.select(context, *args, **kwargs)
|
||||
|
||||
def get_scheduler_rules(self, context=None, *args, **kwargs):
|
||||
"""Ask the driver how requests should be made of it."""
|
||||
return self.driver.get_scheduler_rules(context, *args, **kwargs)
|
||||
|
||||
def _schedule(self, method, context, topic, *args, **kwargs):
|
||||
"""Tries to call schedule_* method on the driver to retrieve host.
|
||||
|
||||
@@ -80,14 +88,21 @@ class SchedulerManager(manager.Manager):
|
||||
try:
|
||||
host = getattr(self.driver, driver_method)(elevated, *args,
|
||||
**kwargs)
|
||||
except AttributeError:
|
||||
except AttributeError, e:
|
||||
LOG.warning(_("Driver Method %(driver_method)s missing: %(e)s."
|
||||
"Reverting to schedule()") % locals())
|
||||
host = self.driver.schedule(elevated, topic, *args, **kwargs)
|
||||
|
||||
if not host:
|
||||
LOG.debug(_("%(topic)s %(method)s handled in Scheduler")
|
||||
% locals())
|
||||
return
|
||||
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, topic, host),
|
||||
{"method": method,
|
||||
"args": kwargs})
|
||||
LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals())
|
||||
LOG.debug(_("Casted to %(topic)s %(host)s for %(method)s") % locals())
|
||||
|
||||
# NOTE (masumotok) : This method should be moved to nova.api.ec2.admin.
|
||||
# Based on bexar design summit discussion,
|
||||
|
||||
@@ -21,10 +21,9 @@
|
||||
Simple Scheduler
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import chance
|
||||
|
||||
@@ -40,7 +39,7 @@ flags.DEFINE_integer("max_networks", 1000,
|
||||
class SimpleScheduler(chance.ChanceScheduler):
|
||||
"""Implements Naive Scheduler that tries to find least loaded host."""
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
def _schedule_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest running instances."""
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
if (instance_ref['availability_zone']
|
||||
@@ -54,7 +53,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
now = utils.utcnow()
|
||||
db.instance_update(context, instance_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
@@ -66,7 +65,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
now = utils.utcnow()
|
||||
db.instance_update(context,
|
||||
instance_id,
|
||||
{'host': service['host'],
|
||||
@@ -76,6 +75,12 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
|
||||
|
||||
def schedule_start_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
|
||||
|
||||
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest volumes."""
|
||||
volume_ref = db.volume_get(context, volume_id)
|
||||
@@ -90,7 +95,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
@@ -103,7 +108,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context,
|
||||
volume_id,
|
||||
{'host': service['host'],
|
||||
|
||||
271
nova/scheduler/zone_aware_scheduler.py
Normal file
@@ -0,0 +1,271 @@
|
||||
# Copyright (c) 2011 Openstack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
The Zone Aware Scheduler is a base class Scheduler for creating instances
|
||||
across zones. There are two expansion points to this class for:
|
||||
1. Assigning Weights to hosts for requested instances
|
||||
2. Filtering Hosts based on required instance capabilities
|
||||
"""
|
||||
|
||||
import operator
|
||||
import json
|
||||
|
||||
import M2Crypto
|
||||
import novaclient
|
||||
|
||||
from nova import crypto
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
|
||||
from nova.scheduler import api
|
||||
from nova.scheduler import driver
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler')
|
||||
|
||||
|
||||
class InvalidBlob(exception.NovaException):
|
||||
message = _("Ill-formed or incorrectly routed 'blob' data sent "
|
||||
"to instance create request.")
|
||||
|
||||
|
||||
class ZoneAwareScheduler(driver.Scheduler):
|
||||
"""Base class for creating Zone Aware Schedulers."""
|
||||
|
||||
def _call_zone_method(self, context, method, specs):
|
||||
"""Call novaclient zone method. Broken out for testing."""
|
||||
return api.call_zone_method(context, method, specs=specs)
|
||||
|
||||
def _provision_resource_locally(self, context, item, instance_id, kwargs):
|
||||
"""Create the requested resource in this Zone."""
|
||||
host = item['hostname']
|
||||
kwargs['instance_id'] = instance_id
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, "compute", host),
|
||||
{"method": "run_instance",
|
||||
"args": kwargs})
|
||||
LOG.debug(_("Provisioning locally via compute node %(host)s")
|
||||
% locals())
|
||||
|
||||
def _decrypt_blob(self, blob):
|
||||
"""Returns the decrypted blob or None if invalid. Broken out
|
||||
for testing."""
|
||||
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
|
||||
try:
|
||||
json_entry = decryptor(blob)
|
||||
return json.dumps(entry)
|
||||
except M2Crypto.EVP.EVPError:
|
||||
pass
|
||||
return None
|
||||
|
||||
def _ask_child_zone_to_create_instance(self, context, zone_info,
|
||||
request_spec, kwargs):
|
||||
"""Once we have determined that the request should go to one
|
||||
of our children, we need to fabricate a new POST /servers/
|
||||
call with the same parameters that were passed into us.
|
||||
|
||||
Note that we have to reverse engineer from our args to get back the
|
||||
image, flavor, ipgroup, etc. since the original call could have
|
||||
come in from EC2 (which doesn't use these things)."""
|
||||
|
||||
instance_type = request_spec['instance_type']
|
||||
instance_properties = request_spec['instance_properties']
|
||||
|
||||
name = instance_properties['display_name']
|
||||
image_ref = instance_properties['image_ref']
|
||||
meta = instance_properties['metadata']
|
||||
flavor_id = instance_type['flavorid']
|
||||
reservation_id = instance_properties['reservation_id']
|
||||
|
||||
files = kwargs['injected_files']
|
||||
ipgroup = None # Not supported in OS API ... yet
|
||||
|
||||
child_zone = zone_info['child_zone']
|
||||
child_blob = zone_info['child_blob']
|
||||
zone = db.zone_get(context, child_zone)
|
||||
url = zone.api_url
|
||||
LOG.debug(_("Forwarding instance create call to child zone %(url)s"
|
||||
". ReservationID=%(reservation_id)s")
|
||||
% locals())
|
||||
nova = None
|
||||
try:
|
||||
nova = novaclient.OpenStack(zone.username, zone.password, None,
|
||||
url)
|
||||
nova.authenticate()
|
||||
except novaclient.exceptions.BadRequest, e:
|
||||
raise exception.NotAuthorized(_("Bad credentials attempting "
|
||||
"to talk to zone at %(url)s.") % locals())
|
||||
|
||||
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
|
||||
child_blob, reservation_id=reservation_id)
|
||||
|
||||
def _provision_resource_from_blob(self, context, item, instance_id,
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource locally or in a child zone
|
||||
based on what is stored in the zone blob info.
|
||||
|
||||
Attempt to decrypt the blob to see if this request is:
|
||||
1. valid, and
|
||||
2. intended for this zone or a child zone.
|
||||
|
||||
Note: If we have "blob" that means the request was passed
|
||||
into us from a parent zone. If we have "child_blob" that
|
||||
means we gathered the info from one of our children.
|
||||
It's possible that, when we decrypt the 'blob' field, it
|
||||
contains "child_blob" data. In which case we forward the
|
||||
request."""
|
||||
|
||||
host_info = None
|
||||
if "blob" in item:
|
||||
# Request was passed in from above. Is it for us?
|
||||
host_info = self._decrypt_blob(item['blob'])
|
||||
elif "child_blob" in item:
|
||||
# Our immediate child zone provided this info ...
|
||||
host_info = item
|
||||
|
||||
if not host_info:
|
||||
raise InvalidBlob()
|
||||
|
||||
# Valid data ... is it for us?
|
||||
if 'child_zone' in host_info and 'child_blob' in host_info:
|
||||
self._ask_child_zone_to_create_instance(context, host_info,
|
||||
request_spec, kwargs)
|
||||
else:
|
||||
self._provision_resource_locally(context, host_info,
|
||||
instance_id, kwargs)
|
||||
|
||||
def _provision_resource(self, context, item, instance_id, request_spec,
|
||||
kwargs):
|
||||
"""Create the requested resource in this Zone or a child zone."""
|
||||
if "hostname" in item:
|
||||
self._provision_resource_locally(context, item, instance_id,
|
||||
kwargs)
|
||||
return
|
||||
|
||||
self._provision_resource_from_blob(context, item, instance_id,
|
||||
request_spec, kwargs)
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, request_spec,
|
||||
*args, **kwargs):
|
||||
"""This method is called from nova.compute.api to provision
|
||||
an instance. However we need to look at the parameters being
|
||||
passed in to see if this is a request to:
|
||||
1. Create a Build Plan and then provision, or
|
||||
2. Use the Build Plan information in the request parameters
|
||||
to simply create the instance (either in this zone or
|
||||
a child zone).
|
||||
"""
|
||||
|
||||
# TODO(sandy): We'll have to look for richer specs at some point.
|
||||
|
||||
blob = request_spec.get('blob')
|
||||
if blob:
|
||||
self._provision_resource(context, request_spec, instance_id,
|
||||
request_spec, kwargs)
|
||||
return None
|
||||
|
||||
# Create build plan and provision ...
|
||||
build_plan = self.select(context, request_spec)
|
||||
if not build_plan:
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
|
||||
for num in xrange(request_spec['num_instances']):
|
||||
if not build_plan:
|
||||
break
|
||||
|
||||
item = build_plan.pop(0)
|
||||
self._provision_resource(context, item, instance_id, request_spec,
|
||||
kwargs)
|
||||
|
||||
# Returning None short-circuits the routing to Compute (since
|
||||
# we've already done it here)
|
||||
return None
|
||||
|
||||
def select(self, context, request_spec, *args, **kwargs):
|
||||
"""Select returns a list of weights and zone/host information
|
||||
corresponding to the best hosts to service the request. Any
|
||||
child zone information has been encrypted so as not to reveal
|
||||
anything about the children.
|
||||
"""
|
||||
return self._schedule(context, "compute", request_spec,
|
||||
*args, **kwargs)
|
||||
|
||||
# TODO(sandy): We're only focused on compute instances right now,
|
||||
# so we don't implement the default "schedule()" method required
|
||||
# of Schedulers.
|
||||
def schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||
"""The schedule() contract requires we return the one
|
||||
best-suited host for this request.
|
||||
"""
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
|
||||
def _schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||
"""Returns a list of hosts that meet the required specs,
|
||||
ordered by their fitness.
|
||||
"""
|
||||
|
||||
if topic != "compute":
|
||||
raise NotImplemented(_("Zone Aware Scheduler only understands "
|
||||
"Compute nodes (for now)"))
|
||||
|
||||
#TODO(sandy): how to infer this from OS API params?
|
||||
num_instances = 1
|
||||
|
||||
# Filter local hosts based on requirements ...
|
||||
host_list = self.filter_hosts(num_instances, request_spec)
|
||||
|
||||
# TODO(sirp): weigh_hosts should also be a function of 'topic' or
|
||||
# resources, so that we can apply different objective functions to it
|
||||
|
||||
# then weigh the selected hosts.
|
||||
# weighted = [{weight=weight, name=hostname}, ...]
|
||||
weighted = self.weigh_hosts(num_instances, request_spec, host_list)
|
||||
|
||||
# Next, tack on the best weights from the child zones ...
|
||||
json_spec = json.dumps(request_spec)
|
||||
child_results = self._call_zone_method(context, "select",
|
||||
specs=json_spec)
|
||||
for child_zone, result in child_results:
|
||||
for weighting in result:
|
||||
# Remember the child_zone so we can get back to
|
||||
# it later if needed. This implicitly builds a zone
|
||||
# path structure.
|
||||
host_dict = {"weight": weighting["weight"],
|
||||
"child_zone": child_zone,
|
||||
"child_blob": weighting["blob"]}
|
||||
weighted.append(host_dict)
|
||||
|
||||
weighted.sort(key=operator.itemgetter('weight'))
|
||||
return weighted
|
||||
|
||||
def filter_hosts(self, num, request_spec):
|
||||
"""Derived classes must override this method and return
|
||||
a list of hosts in [(hostname, capability_dict)] format.
|
||||
"""
|
||||
# NOTE(sirp): The default logic is the equivalent to AllHostsFilter
|
||||
service_states = self.zone_manager.service_states
|
||||
return [(host, services)
|
||||
for host, services in service_states.iteritems()]
|
||||
|
||||
def weigh_hosts(self, num, request_spec, hosts):
|
||||
"""Derived classes may override this to provide more sophisticated
|
||||
scheduling objectives
|
||||
"""
|
||||
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
||||
return [dict(weight=1, hostname=host) for host, caps in hosts]
|
||||
@@ -17,16 +17,17 @@
|
||||
ZoneManager oversees all communications with child Zones.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import novaclient
|
||||
import thread
|
||||
import traceback
|
||||
|
||||
from datetime import datetime
|
||||
from eventlet import greenpool
|
||||
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer('zone_db_check_interval', 60,
|
||||
@@ -42,7 +43,7 @@ class ZoneState(object):
|
||||
self.name = None
|
||||
self.capabilities = None
|
||||
self.attempt = 0
|
||||
self.last_seen = datetime.min
|
||||
self.last_seen = datetime.datetime.min
|
||||
self.last_exception = None
|
||||
self.last_exception_time = None
|
||||
|
||||
@@ -56,7 +57,7 @@ class ZoneState(object):
|
||||
def update_metadata(self, zone_metadata):
|
||||
"""Update zone metadata after successful communications with
|
||||
child zone."""
|
||||
self.last_seen = datetime.now()
|
||||
self.last_seen = utils.utcnow()
|
||||
self.attempt = 0
|
||||
self.name = zone_metadata.get("name", "n/a")
|
||||
self.capabilities = ", ".join(["%s=%s" % (k, v)
|
||||
@@ -72,7 +73,7 @@ class ZoneState(object):
|
||||
"""Something went wrong. Check to see if zone should be
|
||||
marked as offline."""
|
||||
self.last_exception = exception
|
||||
self.last_exception_time = datetime.now()
|
||||
self.last_exception_time = utils.utcnow()
|
||||
api_url = self.api_url
|
||||
logging.warning(_("'%(exception)s' error talking to "
|
||||
"zone %(api_url)s") % locals())
|
||||
@@ -88,7 +89,8 @@ class ZoneState(object):
|
||||
|
||||
def _call_novaclient(zone):
|
||||
"""Call novaclient. Broken out for testing purposes."""
|
||||
client = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
|
||||
client = novaclient.OpenStack(zone.username, zone.password, None,
|
||||
zone.api_url)
|
||||
return client.zones.info()._info
|
||||
|
||||
|
||||
@@ -104,7 +106,7 @@ def _poll_zone(zone):
|
||||
class ZoneManager(object):
|
||||
"""Keeps the zone states updated."""
|
||||
def __init__(self):
|
||||
self.last_zone_db_check = datetime.min
|
||||
self.last_zone_db_check = datetime.datetime.min
|
||||
self.zone_states = {} # { <zone_id> : ZoneState }
|
||||
self.service_states = {} # { <host> : { <service> : { cap k : v }}}
|
||||
self.green_pool = greenpool.GreenPool()
|
||||
@@ -158,10 +160,10 @@ class ZoneManager(object):
|
||||
|
||||
def ping(self, context=None):
|
||||
"""Ping should be called periodically to update zone status."""
|
||||
diff = datetime.now() - self.last_zone_db_check
|
||||
diff = utils.utcnow() - self.last_zone_db_check
|
||||
if diff.seconds >= FLAGS.zone_db_check_interval:
|
||||
logging.debug(_("Updating zone cache from db."))
|
||||
self.last_zone_db_check = datetime.now()
|
||||
self.last_zone_db_check = utils.utcnow()
|
||||
self._refresh_from_db(context)
|
||||
self._poll_zones(context)
|
||||
|
||||
|
||||
@@ -19,14 +19,11 @@
|
||||
|
||||
"""Generic Node baseclass for all workers that run on hosts."""
|
||||
|
||||
import greenlet
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import greenthread
|
||||
from eventlet import greenpool
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
@@ -91,27 +88,37 @@ class Service(object):
|
||||
if 'nova-compute' == self.binary:
|
||||
self.manager.update_available_resource(ctxt)
|
||||
|
||||
conn1 = rpc.Connection.instance(new=True)
|
||||
conn2 = rpc.Connection.instance(new=True)
|
||||
conn3 = rpc.Connection.instance(new=True)
|
||||
self.conn = rpc.Connection.instance(new=True)
|
||||
logging.debug("Creating Consumer connection for Service %s" %
|
||||
self.topic)
|
||||
|
||||
# Share this same connection for these Consumers
|
||||
consumer_all = rpc.TopicAdapterConsumer(
|
||||
connection=self.conn,
|
||||
topic=self.topic,
|
||||
proxy=self)
|
||||
consumer_node = rpc.TopicAdapterConsumer(
|
||||
connection=self.conn,
|
||||
topic='%s.%s' % (self.topic, self.host),
|
||||
proxy=self)
|
||||
fanout = rpc.FanoutAdapterConsumer(
|
||||
connection=self.conn,
|
||||
topic=self.topic,
|
||||
proxy=self)
|
||||
consumer_set = rpc.ConsumerSet(
|
||||
connection=self.conn,
|
||||
consumer_list=[consumer_all, consumer_node, fanout])
|
||||
|
||||
# Wait forever, processing these consumers
|
||||
def _wait():
|
||||
try:
|
||||
consumer_set.wait()
|
||||
finally:
|
||||
consumer_set.close()
|
||||
|
||||
self.consumer_set_thread = greenthread.spawn(_wait)
|
||||
|
||||
if self.report_interval:
|
||||
consumer_all = rpc.TopicAdapterConsumer(
|
||||
connection=conn1,
|
||||
topic=self.topic,
|
||||
proxy=self)
|
||||
consumer_node = rpc.TopicAdapterConsumer(
|
||||
connection=conn2,
|
||||
topic='%s.%s' % (self.topic, self.host),
|
||||
proxy=self)
|
||||
fanout = rpc.FanoutAdapterConsumer(
|
||||
connection=conn3,
|
||||
topic=self.topic,
|
||||
proxy=self)
|
||||
|
||||
self.timers.append(consumer_all.attach_to_eventlet())
|
||||
self.timers.append(consumer_node.attach_to_eventlet())
|
||||
self.timers.append(fanout.attach_to_eventlet())
|
||||
|
||||
pulse = utils.LoopingCall(self.report_state)
|
||||
pulse.start(interval=self.report_interval, now=False)
|
||||
self.timers.append(pulse)
|
||||
@@ -174,6 +181,11 @@ class Service(object):
|
||||
logging.warn(_('Service killed that has no database entry'))
|
||||
|
||||
def stop(self):
|
||||
self.consumer_set_thread.kill()
|
||||
try:
|
||||
self.consumer_set_thread.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.stop()
|
||||
@@ -240,6 +252,10 @@ class WsgiService(object):
|
||||
def wait(self):
|
||||
self.wsgi_app.wait()
|
||||
|
||||
def get_socket_info(self, api_name):
|
||||
"""Returns the (host, port) that an API was started on."""
|
||||
return self.wsgi_app.socket_info[api_name]
|
||||
|
||||
|
||||
class ApiService(WsgiService):
|
||||
"""Class for our nova-api service."""
|
||||
@@ -318,8 +334,10 @@ def _run_wsgi(paste_config_file, apis):
|
||||
logging.debug(_('App Config: %(api)s\n%(config)r') % locals())
|
||||
logging.info(_('Running %s API'), api)
|
||||
app = wsgi.load_paste_app(paste_config_file, api)
|
||||
apps.append((app, getattr(FLAGS, '%s_listen_port' % api),
|
||||
getattr(FLAGS, '%s_listen' % api)))
|
||||
apps.append((app,
|
||||
getattr(FLAGS, '%s_listen_port' % api),
|
||||
getattr(FLAGS, '%s_listen' % api),
|
||||
api))
|
||||
if len(apps) == 0:
|
||||
logging.error(_('No known API applications configured in %s.'),
|
||||
paste_config_file)
|
||||
|
||||
31
nova/test.py
@@ -23,7 +23,6 @@ inline callbacks.
|
||||
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import functools
|
||||
import os
|
||||
import shutil
|
||||
@@ -31,17 +30,16 @@ import uuid
|
||||
import unittest
|
||||
|
||||
import mox
|
||||
import shutil
|
||||
import stubout
|
||||
from eventlet import greenthread
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import fakerabbit
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova import service
|
||||
from nova import wsgi
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -71,7 +69,7 @@ class TestCase(unittest.TestCase):
|
||||
# NOTE(vish): We need a better method for creating fixtures for tests
|
||||
# now that we have some required db setup for the system
|
||||
# to work properly.
|
||||
self.start = datetime.datetime.utcnow()
|
||||
self.start = utils.utcnow()
|
||||
shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db),
|
||||
os.path.join(FLAGS.state_path, FLAGS.sqlite_db))
|
||||
|
||||
@@ -85,6 +83,7 @@ class TestCase(unittest.TestCase):
|
||||
self._monkey_patch_attach()
|
||||
self._monkey_patch_wsgi()
|
||||
self._original_flags = FLAGS.FlagValuesDict()
|
||||
rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size)
|
||||
|
||||
def tearDown(self):
|
||||
"""Runs after each test method to tear down test environment."""
|
||||
@@ -99,6 +98,10 @@ class TestCase(unittest.TestCase):
|
||||
if FLAGS.fake_rabbit:
|
||||
fakerabbit.reset_all()
|
||||
|
||||
if FLAGS.connection_type == 'fake':
|
||||
if hasattr(fake.FakeConnection, '_instance'):
|
||||
del fake.FakeConnection._instance
|
||||
|
||||
# Reset any overriden flags
|
||||
self.reset_flags()
|
||||
|
||||
@@ -181,7 +184,7 @@ class TestCase(unittest.TestCase):
|
||||
wsgi.Server.start = _wrapped_start
|
||||
|
||||
# Useful assertions
|
||||
def assertDictMatch(self, d1, d2):
|
||||
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
|
||||
"""Assert two dicts are equivalent.
|
||||
|
||||
This is a 'deep' match in the sense that it handles nested
|
||||
@@ -212,15 +215,26 @@ class TestCase(unittest.TestCase):
|
||||
for key in d1keys:
|
||||
d1value = d1[key]
|
||||
d2value = d2[key]
|
||||
try:
|
||||
error = abs(float(d1value) - float(d2value))
|
||||
within_tolerance = error <= tolerance
|
||||
except (ValueError, TypeError):
|
||||
# If both values aren't convertable to float, just ignore
|
||||
# ValueError if arg is a str, TypeError if it's something else
|
||||
# (like None)
|
||||
within_tolerance = False
|
||||
|
||||
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
|
||||
self.assertDictMatch(d1value, d2value)
|
||||
elif 'DONTCARE' in (d1value, d2value):
|
||||
continue
|
||||
elif approx_equal and within_tolerance:
|
||||
continue
|
||||
elif d1value != d2value:
|
||||
raise_assertion("d1['%(key)s']=%(d1value)s != "
|
||||
"d2['%(key)s']=%(d2value)s" % locals())
|
||||
|
||||
def assertDictListMatch(self, L1, L2):
|
||||
def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
|
||||
"""Assert a list of dicts are equivalent."""
|
||||
def raise_assertion(msg):
|
||||
L1str = str(L1)
|
||||
@@ -236,4 +250,5 @@ class TestCase(unittest.TestCase):
|
||||
'len(L2)=%(L2count)d' % locals())
|
||||
|
||||
for d1, d2 in zip(L1, L2):
|
||||
self.assertDictMatch(d1, d2)
|
||||
self.assertDictMatch(d1, d2, approx_equal=approx_equal,
|
||||
tolerance=tolerance)
|
||||
|
||||
@@ -50,7 +50,7 @@ def setup():
|
||||
|
||||
testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db)
|
||||
if os.path.exists(testdb):
|
||||
os.unlink(testdb)
|
||||
return
|
||||
migration.db_sync()
|
||||
ctxt = context.get_admin_context()
|
||||
network_manager.VlanManager().create_networks(ctxt,
|
||||
|
||||
@@ -21,24 +21,24 @@ from nova import flags
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
flags.DECLARE('volume_driver', 'nova.volume.manager')
|
||||
FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver'
|
||||
FLAGS.connection_type = 'fake'
|
||||
FLAGS.fake_rabbit = True
|
||||
FLAGS['volume_driver'].SetDefault('nova.volume.driver.FakeISCSIDriver')
|
||||
FLAGS['connection_type'].SetDefault('fake')
|
||||
FLAGS['fake_rabbit'].SetDefault(True)
|
||||
flags.DECLARE('auth_driver', 'nova.auth.manager')
|
||||
FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
|
||||
FLAGS['auth_driver'].SetDefault('nova.auth.dbdriver.DbDriver')
|
||||
flags.DECLARE('network_size', 'nova.network.manager')
|
||||
flags.DECLARE('num_networks', 'nova.network.manager')
|
||||
flags.DECLARE('fake_network', 'nova.network.manager')
|
||||
FLAGS.network_size = 8
|
||||
FLAGS.num_networks = 2
|
||||
FLAGS.fake_network = True
|
||||
FLAGS.image_service = 'nova.image.local.LocalImageService'
|
||||
FLAGS['network_size'].SetDefault(8)
|
||||
FLAGS['num_networks'].SetDefault(2)
|
||||
FLAGS['fake_network'].SetDefault(True)
|
||||
FLAGS['image_service'].SetDefault('nova.image.fake.FakeImageService')
|
||||
flags.DECLARE('num_shelves', 'nova.volume.driver')
|
||||
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
|
||||
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
|
||||
FLAGS.num_shelves = 2
|
||||
FLAGS.blades_per_shelf = 4
|
||||
FLAGS.iscsi_num_targets = 8
|
||||
FLAGS.verbose = True
|
||||
FLAGS.sqlite_db = "tests.sqlite"
|
||||
FLAGS.use_ipv6 = True
|
||||
FLAGS['num_shelves'].SetDefault(2)
|
||||
FLAGS['blades_per_shelf'].SetDefault(4)
|
||||
FLAGS['iscsi_num_targets'].SetDefault(8)
|
||||
FLAGS['verbose'].SetDefault(True)
|
||||
FLAGS['sqlite_db'].SetDefault("tests.sqlite")
|
||||
FLAGS['use_ipv6'].SetDefault(True)
|
||||
|
||||
1
nova/tests/public_key/dummy.fingerprint
Normal file
@@ -0,0 +1 @@
|
||||
1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df
|
||||
1
nova/tests/public_key/dummy.pub
Normal file
@@ -0,0 +1 @@
|
||||
ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk
|
||||
@@ -1,26 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import flags
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
FLAGS.connection_type = 'libvirt'
|
||||
FLAGS.fake_rabbit = False
|
||||
FLAGS.fake_network = False
|
||||
FLAGS.verbose = False
|
||||
0
nova/tests/scheduler/__init__.py
Normal file
206
nova/tests/scheduler/test_host_filter.py
Normal file
@@ -0,0 +1,206 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Scheduler Host Filters.
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova.scheduler import host_filter
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class FakeZoneManager:
|
||||
pass
|
||||
|
||||
|
||||
class HostFilterTestCase(test.TestCase):
|
||||
"""Test case for host filters."""
|
||||
|
||||
def _host_caps(self, multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
# host1 = memory:free 10 (100max)
|
||||
# disk:available 100 (1000max)
|
||||
# hostN = memory:free 10 + 10N
|
||||
# disk:available 100 + 100N
|
||||
# in other words: hostN has more resources than host0
|
||||
# which means ... don't go above 10 hosts.
|
||||
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||
'host_hostname': 'xs-%s' % multiplier,
|
||||
'host_memory_total': 100,
|
||||
'host_memory_overhead': 10,
|
||||
'host_memory_free': 10 + multiplier * 10,
|
||||
'host_memory_free-computed': 10 + multiplier * 10,
|
||||
'host_other-config': {},
|
||||
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||
'host_cpu_info': {},
|
||||
'disk_available': 100 + multiplier * 100,
|
||||
'disk_total': 1000,
|
||||
'disk_used': 0,
|
||||
'host_uuid': 'xxx-%d' % multiplier,
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
def setUp(self):
|
||||
self.old_flag = FLAGS.default_host_filter
|
||||
FLAGS.default_host_filter = \
|
||||
'nova.scheduler.host_filter.AllHostsFilter'
|
||||
self.instance_type = dict(name='tiny',
|
||||
memory_mb=50,
|
||||
vcpus=10,
|
||||
local_gb=500,
|
||||
flavorid=1,
|
||||
swap=500,
|
||||
rxtx_quota=30000,
|
||||
rxtx_cap=200)
|
||||
|
||||
self.zone_manager = FakeZoneManager()
|
||||
states = {}
|
||||
for x in xrange(10):
|
||||
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
||||
self.zone_manager.service_states = states
|
||||
|
||||
def tearDown(self):
|
||||
FLAGS.default_host_filter = self.old_flag
|
||||
|
||||
def test_choose_filter(self):
|
||||
# Test default filter ...
|
||||
hf = host_filter.choose_host_filter()
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.AllHostsFilter')
|
||||
# Test valid filter ...
|
||||
hf = host_filter.choose_host_filter(
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
# Test invalid filter ...
|
||||
try:
|
||||
host_filter.choose_host_filter('does not exist')
|
||||
self.fail("Should not find host filter.")
|
||||
except exception.SchedulerHostFilterNotFound:
|
||||
pass
|
||||
|
||||
def test_all_host_filter(self):
|
||||
hf = host_filter.AllHostsFilter()
|
||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(10, len(hosts))
|
||||
for host, capabilities in hosts:
|
||||
self.assertTrue(host.startswith('host'))
|
||||
|
||||
def test_instance_type_filter(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
def test_json_filter(self):
|
||||
hf = host_filter.JsonFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
# Try some custom queries
|
||||
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300],
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700],
|
||||
]
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([1, 2, 8, 9, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['not',
|
||||
['=', '$compute.host_memory_free', 30],
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(9, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([2, 4, 6, 8, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
# Try some bogus input ...
|
||||
raw = ['unknown command', ]
|
||||
cooked = json.dumps(raw)
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False],
|
||||
)))
|
||||
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False,
|
||||
))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$foo', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$.....', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
||||
144
nova/tests/scheduler/test_least_cost_scheduler.py
Normal file
@@ -0,0 +1,144 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Least Cost Scheduler
|
||||
"""
|
||||
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova.scheduler import least_cost
|
||||
from nova.tests.scheduler import test_zone_aware_scheduler
|
||||
|
||||
MB = 1024 * 1024
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class FakeHost(object):
|
||||
def __init__(self, host_id, free_ram, io):
|
||||
self.id = host_id
|
||||
self.free_ram = free_ram
|
||||
self.io = io
|
||||
|
||||
|
||||
class WeightedSumTestCase(test.TestCase):
|
||||
def test_empty_domain(self):
|
||||
domain = []
|
||||
weighted_fns = []
|
||||
result = least_cost.weighted_sum(domain, weighted_fns)
|
||||
expected = []
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_basic_costing(self):
|
||||
hosts = [
|
||||
FakeHost(1, 512 * MB, 100),
|
||||
FakeHost(2, 256 * MB, 400),
|
||||
FakeHost(3, 512 * MB, 100),
|
||||
]
|
||||
|
||||
weighted_fns = [
|
||||
(1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost*
|
||||
(2, lambda h: h.io), # Avoid high I/O
|
||||
]
|
||||
|
||||
costs = least_cost.weighted_sum(
|
||||
domain=hosts, weighted_fns=weighted_fns)
|
||||
|
||||
# Each 256 MB unit of free-ram contributes 0.5 points by way of:
|
||||
# cost = weight * (score/max_score) = 1 * (256/512) = 0.5
|
||||
# Each 100 iops of IO adds 0.5 points by way of:
|
||||
# cost = 2 * (100/400) = 2 * 0.25 = 0.5
|
||||
expected = [1.5, 2.5, 1.5]
|
||||
self.assertEqual(expected, costs)
|
||||
|
||||
|
||||
class LeastCostSchedulerTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(LeastCostSchedulerTestCase, self).setUp()
|
||||
|
||||
class FakeZoneManager:
|
||||
pass
|
||||
|
||||
zone_manager = FakeZoneManager()
|
||||
|
||||
states = test_zone_aware_scheduler.fake_zone_manager_service_states(
|
||||
num_hosts=10)
|
||||
zone_manager.service_states = states
|
||||
|
||||
self.sched = least_cost.LeastCostScheduler()
|
||||
self.sched.zone_manager = zone_manager
|
||||
|
||||
def tearDown(self):
|
||||
super(LeastCostSchedulerTestCase, self).tearDown()
|
||||
|
||||
def assertWeights(self, expected, num, request_spec, hosts):
|
||||
weighted = self.sched.weigh_hosts(num, request_spec, hosts)
|
||||
self.assertDictListMatch(weighted, expected, approx_equal=True)
|
||||
|
||||
def test_no_hosts(self):
|
||||
num = 1
|
||||
request_spec = {}
|
||||
hosts = []
|
||||
|
||||
expected = []
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
|
||||
def test_noop_cost_fn(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.noop_cost_fn',
|
||||
]
|
||||
FLAGS.noop_cost_fn_weight = 1
|
||||
|
||||
num = 1
|
||||
request_spec = {}
|
||||
hosts = self.sched.filter_hosts(num, request_spec)
|
||||
|
||||
expected = [dict(weight=1, hostname=hostname)
|
||||
for hostname, caps in hosts]
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
|
||||
def test_cost_fn_weights(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.noop_cost_fn',
|
||||
]
|
||||
FLAGS.noop_cost_fn_weight = 2
|
||||
|
||||
num = 1
|
||||
request_spec = {}
|
||||
hosts = self.sched.filter_hosts(num, request_spec)
|
||||
|
||||
expected = [dict(weight=2, hostname=hostname)
|
||||
for hostname, caps in hosts]
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
|
||||
def test_fill_first_cost_fn(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.fill_first_cost_fn',
|
||||
]
|
||||
FLAGS.fill_first_cost_fn_weight = 1
|
||||
|
||||
num = 1
|
||||
request_spec = {}
|
||||
hosts = self.sched.filter_hosts(num, request_spec)
|
||||
|
||||
expected = []
|
||||
for idx, (hostname, caps) in enumerate(hosts):
|
||||
# Costs are normalized so over 10 hosts, each host with increasing
|
||||
# free ram will cost 1/N more. Since the lowest cost host has some
|
||||
# free ram, we add in the 1/N for the base_cost
|
||||
weight = 0.1 + (0.1 * idx)
|
||||
weight_dict = dict(weight=weight, hostname=hostname)
|
||||
expected.append(weight_dict)
|
||||
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
@@ -48,6 +48,10 @@ flags.DECLARE('stub_network', 'nova.compute.manager')
|
||||
flags.DECLARE('instances_path', 'nova.compute.manager')
|
||||
|
||||
|
||||
FAKE_UUID_NOT_FOUND = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
|
||||
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
|
||||
|
||||
|
||||
class TestDriver(driver.Scheduler):
|
||||
"""Scheduler Driver for Tests"""
|
||||
def schedule(context, topic, *args, **kwargs):
|
||||
@@ -61,7 +65,8 @@ class SchedulerTestCase(test.TestCase):
|
||||
"""Test case for scheduler"""
|
||||
def setUp(self):
|
||||
super(SchedulerTestCase, self).setUp()
|
||||
self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
|
||||
driver = 'nova.tests.scheduler.test_scheduler.TestDriver'
|
||||
self.flags(scheduler_driver=driver)
|
||||
|
||||
def _create_compute_service(self):
|
||||
"""Create compute-manager(ComputeNode and Service record)."""
|
||||
@@ -196,7 +201,7 @@ class ZoneSchedulerTestCase(test.TestCase):
|
||||
service.topic = 'compute'
|
||||
service.id = kwargs['id']
|
||||
service.availability_zone = kwargs['zone']
|
||||
service.created_at = datetime.datetime.utcnow()
|
||||
service.created_at = utils.utcnow()
|
||||
return service
|
||||
|
||||
def test_with_two_zones(self):
|
||||
@@ -290,7 +295,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dic['host'] = kwargs.get('host', 'dummy')
|
||||
s_ref = db.service_create(self.context, dic)
|
||||
if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():
|
||||
t = datetime.datetime.utcnow() - datetime.timedelta(0)
|
||||
t = utils.utcnow() - datetime.timedelta(0)
|
||||
dic['created_at'] = kwargs.get('created_at', t)
|
||||
dic['updated_at'] = kwargs.get('updated_at', t)
|
||||
db.service_update(self.context, s_ref['id'], dic)
|
||||
@@ -401,7 +406,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
FLAGS.compute_manager)
|
||||
compute1.start()
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
now = datetime.datetime.utcnow()
|
||||
now = utils.utcnow()
|
||||
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
|
||||
past = now - delta
|
||||
db.service_update(self.context, s1['id'], {'updated_at': past})
|
||||
@@ -542,7 +547,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
def test_wont_sechedule_if_specified_host_is_down(self):
|
||||
compute1 = self.start_service('compute', host='host1')
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
now = datetime.datetime.utcnow()
|
||||
now = utils.utcnow()
|
||||
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
|
||||
past = now - delta
|
||||
db.service_update(self.context, s1['id'], {'updated_at': past})
|
||||
@@ -692,7 +697,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dic = {'instance_id': instance_id, 'size': 1}
|
||||
v_ref = db.volume_create(self.context, {'instance_id': instance_id,
|
||||
'size': 1})
|
||||
t1 = datetime.datetime.utcnow() - datetime.timedelta(1)
|
||||
t1 = utils.utcnow() - datetime.timedelta(1)
|
||||
dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',
|
||||
'topic': 'volume', 'report_count': 0}
|
||||
s_ref = db.service_create(self.context, dic)
|
||||
@@ -709,7 +714,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
"""Confirms src-compute node is alive."""
|
||||
instance_id = self._create_instance()
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
t = datetime.datetime.utcnow() - datetime.timedelta(10)
|
||||
t = utils.utcnow() - datetime.timedelta(10)
|
||||
s_ref = self._create_compute_service(created_at=t, updated_at=t,
|
||||
host=i_ref['host'])
|
||||
|
||||
@@ -737,7 +742,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
"""Confirms exception raises in case dest host does not exist."""
|
||||
instance_id = self._create_instance()
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
t = datetime.datetime.utcnow() - datetime.timedelta(10)
|
||||
t = utils.utcnow() - datetime.timedelta(10)
|
||||
s_ref = self._create_compute_service(created_at=t, updated_at=t,
|
||||
host=i_ref['host'])
|
||||
|
||||
@@ -796,7 +801,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
# mocks for live_migration_common_check()
|
||||
instance_id = self._create_instance()
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
t1 = datetime.datetime.utcnow() - datetime.timedelta(10)
|
||||
t1 = utils.utcnow() - datetime.timedelta(10)
|
||||
s_ref = self._create_compute_service(created_at=t1, updated_at=t1,
|
||||
host=dest)
|
||||
|
||||
@@ -912,7 +917,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
|
||||
class FakeZone(object):
|
||||
def __init__(self, api_url, username, password):
|
||||
def __init__(self, id, api_url, username, password):
|
||||
self.id = id
|
||||
self.api_url = api_url
|
||||
self.username = username
|
||||
self.password = password
|
||||
@@ -920,16 +926,27 @@ class FakeZone(object):
|
||||
|
||||
def zone_get_all(context):
|
||||
return [
|
||||
FakeZone('http://example.com', 'bob', 'xxx'),
|
||||
FakeZone(1, 'http://example.com', 'bob', 'xxx'),
|
||||
]
|
||||
|
||||
|
||||
def fake_instance_get_by_uuid(context, uuid):
|
||||
if FAKE_UUID_NOT_FOUND:
|
||||
raise exception.InstanceNotFound(instance_id=uuid)
|
||||
else:
|
||||
return {'id': 1}
|
||||
|
||||
|
||||
class FakeRerouteCompute(api.reroute_compute):
|
||||
def __init__(self, method_name, id_to_return=1):
|
||||
super(FakeRerouteCompute, self).__init__(method_name)
|
||||
self.id_to_return = id_to_return
|
||||
|
||||
def _call_child_zones(self, zones, function):
|
||||
return []
|
||||
|
||||
def get_collection_context_and_id(self, args, kwargs):
|
||||
return ("servers", None, 1)
|
||||
return ("servers", None, self.id_to_return)
|
||||
|
||||
def unmarshall_result(self, zone_responses):
|
||||
return dict(magic="found me")
|
||||
@@ -958,6 +975,8 @@ class ZoneRedirectTest(test.TestCase):
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
|
||||
self.stubs.Set(db, 'zone_get_all', zone_get_all)
|
||||
self.stubs.Set(db, 'instance_get_by_uuid',
|
||||
fake_instance_get_by_uuid)
|
||||
|
||||
self.enable_zone_routing = FLAGS.enable_zone_routing
|
||||
FLAGS.enable_zone_routing = True
|
||||
@@ -974,8 +993,19 @@ class ZoneRedirectTest(test.TestCase):
|
||||
except api.RedirectResult, e:
|
||||
self.fail(_("Successful database hit should succeed"))
|
||||
|
||||
def test_trap_not_found_locally(self):
|
||||
def test_trap_not_found_locally_id_passed(self):
|
||||
"""When an integer ID is not found locally, we cannot reroute to
|
||||
another zone, so just return InstanceNotFound exception
|
||||
"""
|
||||
decorator = FakeRerouteCompute("foo")
|
||||
self.assertRaises(exception.InstanceNotFound,
|
||||
decorator(go_boom), None, None, 1)
|
||||
|
||||
def test_trap_not_found_locally_uuid_passed(self):
|
||||
"""When a UUID is found, if the item isn't found locally, we should
|
||||
try to reroute to a child zone to see if they have it
|
||||
"""
|
||||
decorator = FakeRerouteCompute("foo", id_to_return=FAKE_UUID_NOT_FOUND)
|
||||
try:
|
||||
result = decorator(go_boom)(None, None, 1)
|
||||
self.assertFail(_("Should have rerouted."))
|
||||
@@ -1037,7 +1067,7 @@ class FakeNovaClient(object):
|
||||
|
||||
class DynamicNovaClientTest(test.TestCase):
|
||||
def test_issue_novaclient_command_found(self):
|
||||
zone = FakeZone('http://example.com', 'bob', 'xxx')
|
||||
zone = FakeZone(1, 'http://example.com', 'bob', 'xxx')
|
||||
self.assertEquals(api._issue_novaclient_command(
|
||||
FakeNovaClient(FakeServerCollection()),
|
||||
zone, "servers", "get", 100).a, 10)
|
||||
@@ -1051,7 +1081,7 @@ class DynamicNovaClientTest(test.TestCase):
|
||||
zone, "servers", "pause", 100), None)
|
||||
|
||||
def test_issue_novaclient_command_not_found(self):
|
||||
zone = FakeZone('http://example.com', 'bob', 'xxx')
|
||||
zone = FakeZone(1, 'http://example.com', 'bob', 'xxx')
|
||||
self.assertEquals(api._issue_novaclient_command(
|
||||
FakeNovaClient(FakeEmptyServerCollection()),
|
||||
zone, "servers", "get", 100), None)
|
||||
@@ -1063,3 +1093,49 @@ class DynamicNovaClientTest(test.TestCase):
|
||||
self.assertEquals(api._issue_novaclient_command(
|
||||
FakeNovaClient(FakeEmptyServerCollection()),
|
||||
zone, "servers", "any", "name"), None)
|
||||
|
||||
|
||||
class FakeZonesProxy(object):
|
||||
def do_something(*args, **kwargs):
|
||||
return 42
|
||||
|
||||
def raises_exception(*args, **kwargs):
|
||||
raise Exception('testing')
|
||||
|
||||
|
||||
class FakeNovaClientOpenStack(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.zones = FakeZonesProxy()
|
||||
|
||||
def authenticate(self):
|
||||
pass
|
||||
|
||||
|
||||
class CallZoneMethodTest(test.TestCase):
|
||||
def setUp(self):
|
||||
super(CallZoneMethodTest, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.stubs.Set(db, 'zone_get_all', zone_get_all)
|
||||
self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack)
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
super(CallZoneMethodTest, self).tearDown()
|
||||
|
||||
def test_call_zone_method(self):
|
||||
context = {}
|
||||
method = 'do_something'
|
||||
results = api.call_zone_method(context, method)
|
||||
expected = [(1, 42)]
|
||||
self.assertEqual(expected, results)
|
||||
|
||||
def test_call_zone_method_not_present(self):
|
||||
context = {}
|
||||
method = 'not_present'
|
||||
self.assertRaises(AttributeError, api.call_zone_method,
|
||||
context, method)
|
||||
|
||||
def test_call_zone_method_generates_exception(self):
|
||||
context = {}
|
||||
method = 'raises_exception'
|
||||
self.assertRaises(Exception, api.call_zone_method, context, method)
|
||||
296
nova/tests/scheduler/test_zone_aware_scheduler.py
Normal file
@@ -0,0 +1,296 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Zone Aware Scheduler.
|
||||
"""
|
||||
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import zone_aware_scheduler
|
||||
from nova.scheduler import zone_manager
|
||||
|
||||
|
||||
def _host_caps(multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
# host1 = memory:free 10 (100max)
|
||||
# disk:available 100 (1000max)
|
||||
# hostN = memory:free 10 + 10N
|
||||
# disk:available 100 + 100N
|
||||
# in other words: hostN has more resources than host0
|
||||
# which means ... don't go above 10 hosts.
|
||||
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||
'host_hostname': 'xs-%s' % multiplier,
|
||||
'host_memory_total': 100,
|
||||
'host_memory_overhead': 10,
|
||||
'host_memory_free': 10 + multiplier * 10,
|
||||
'host_memory_free-computed': 10 + multiplier * 10,
|
||||
'host_other-config': {},
|
||||
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||
'host_cpu_info': {},
|
||||
'disk_available': 100 + multiplier * 100,
|
||||
'disk_total': 1000,
|
||||
'disk_used': 0,
|
||||
'host_uuid': 'xxx-%d' % multiplier,
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
|
||||
def fake_zone_manager_service_states(num_hosts):
|
||||
states = {}
|
||||
for x in xrange(num_hosts):
|
||||
states['host%02d' % (x + 1)] = {'compute': _host_caps(x)}
|
||||
return states
|
||||
|
||||
|
||||
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||
def filter_hosts(self, num, specs):
|
||||
# NOTE(sirp): this is returning [(hostname, services)]
|
||||
return self.zone_manager.service_states.items()
|
||||
|
||||
def weigh_hosts(self, num, specs, hosts):
|
||||
fake_weight = 99
|
||||
weighted = []
|
||||
for hostname, caps in hosts:
|
||||
weighted.append(dict(weight=fake_weight, name=hostname))
|
||||
return weighted
|
||||
|
||||
|
||||
class FakeZoneManager(zone_manager.ZoneManager):
|
||||
def __init__(self):
|
||||
self.service_states = {
|
||||
'host1': {
|
||||
'compute': {'ram': 1000},
|
||||
},
|
||||
'host2': {
|
||||
'compute': {'ram': 2000},
|
||||
},
|
||||
'host3': {
|
||||
'compute': {'ram': 3000},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class FakeEmptyZoneManager(zone_manager.ZoneManager):
|
||||
def __init__(self):
|
||||
self.service_states = {}
|
||||
|
||||
|
||||
def fake_empty_call_zone_method(context, method, specs):
|
||||
return []
|
||||
|
||||
|
||||
# Hmm, I should probably be using mox for this.
|
||||
was_called = False
|
||||
|
||||
|
||||
def fake_provision_resource(context, item, instance_id, request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_ask_child_zone_to_create_instance(context, zone_info,
|
||||
request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_provision_resource_locally(context, item, instance_id, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_provision_resource_from_blob(context, item, instance_id,
|
||||
request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_decrypt_blob_returns_local_info(blob):
|
||||
return {'foo': True} # values aren't important.
|
||||
|
||||
|
||||
def fake_decrypt_blob_returns_child_info(blob):
|
||||
return {'child_zone': True,
|
||||
'child_blob': True} # values aren't important. Keys are.
|
||||
|
||||
|
||||
def fake_call_zone_method(context, method, specs):
|
||||
return [
|
||||
('zone1', [
|
||||
dict(weight=1, blob='AAAAAAA'),
|
||||
dict(weight=111, blob='BBBBBBB'),
|
||||
dict(weight=112, blob='CCCCCCC'),
|
||||
dict(weight=113, blob='DDDDDDD'),
|
||||
]),
|
||||
('zone2', [
|
||||
dict(weight=120, blob='EEEEEEE'),
|
||||
dict(weight=2, blob='FFFFFFF'),
|
||||
dict(weight=122, blob='GGGGGGG'),
|
||||
dict(weight=123, blob='HHHHHHH'),
|
||||
]),
|
||||
('zone3', [
|
||||
dict(weight=130, blob='IIIIIII'),
|
||||
dict(weight=131, blob='JJJJJJJ'),
|
||||
dict(weight=132, blob='KKKKKKK'),
|
||||
dict(weight=3, blob='LLLLLLL'),
|
||||
]),
|
||||
]
|
||||
|
||||
|
||||
class ZoneAwareSchedulerTestCase(test.TestCase):
|
||||
"""Test case for Zone Aware Scheduler."""
|
||||
|
||||
def test_zone_aware_scheduler(self):
|
||||
"""
|
||||
Create a nested set of FakeZones, ensure that a select call returns the
|
||||
appropriate build plan.
|
||||
"""
|
||||
sched = FakeZoneAwareScheduler()
|
||||
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
|
||||
|
||||
zm = FakeZoneManager()
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = {}
|
||||
build_plan = sched.select(fake_context, {})
|
||||
|
||||
self.assertEqual(15, len(build_plan))
|
||||
|
||||
hostnames = [plan_item['name']
|
||||
for plan_item in build_plan if 'name' in plan_item]
|
||||
self.assertEqual(3, len(hostnames))
|
||||
|
||||
def test_empty_zone_aware_scheduler(self):
|
||||
"""
|
||||
Ensure empty hosts & child_zones result in NoValidHosts exception.
|
||||
"""
|
||||
sched = FakeZoneAwareScheduler()
|
||||
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
|
||||
|
||||
zm = FakeEmptyZoneManager()
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = {}
|
||||
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
||||
fake_context, 1,
|
||||
dict(host_filter=None,
|
||||
request_spec={'instance_type': {}}))
|
||||
|
||||
def test_schedule_do_not_schedule_with_hint(self):
|
||||
"""
|
||||
Check the local/child zone routing in the run_instance() call.
|
||||
If the zone_blob hint was passed in, don't re-schedule.
|
||||
"""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_provision_resource', fake_provision_resource)
|
||||
request_spec = {
|
||||
'instance_properties': {},
|
||||
'instance_type': {},
|
||||
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
|
||||
'blob': "Non-None blob data",
|
||||
}
|
||||
|
||||
result = sched.schedule_run_instance(None, 1, request_spec)
|
||||
self.assertEquals(None, result)
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_local(self):
|
||||
"""Provision a resource locally or remotely."""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_provision_resource_locally',
|
||||
fake_provision_resource_locally)
|
||||
|
||||
request_spec = {'hostname': "foo"}
|
||||
sched._provision_resource(None, request_spec, 1, request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_remote(self):
|
||||
"""Provision a resource locally or remotely."""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_provision_resource_from_blob',
|
||||
fake_provision_resource_from_blob)
|
||||
|
||||
request_spec = {}
|
||||
sched._provision_resource(None, request_spec, 1, request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_from_blob_empty(self):
|
||||
"""Provision a resource locally or remotely given no hints."""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
request_spec = {}
|
||||
self.assertRaises(zone_aware_scheduler.InvalidBlob,
|
||||
sched._provision_resource_from_blob,
|
||||
None, {}, 1, {}, {})
|
||||
|
||||
def test_provision_resource_from_blob_with_local_blob(self):
|
||||
"""
|
||||
Provision a resource locally or remotely when blob hint passed in.
|
||||
"""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_decrypt_blob',
|
||||
fake_decrypt_blob_returns_local_info)
|
||||
self.stubs.Set(sched, '_provision_resource_locally',
|
||||
fake_provision_resource_locally)
|
||||
|
||||
request_spec = {'blob': "Non-None blob data"}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_from_blob_with_child_blob(self):
|
||||
"""
|
||||
Provision a resource locally or remotely when child blob hint
|
||||
passed in.
|
||||
"""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
self.stubs.Set(sched, '_decrypt_blob',
|
||||
fake_decrypt_blob_returns_child_info)
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
|
||||
fake_ask_child_zone_to_create_instance)
|
||||
|
||||
request_spec = {'blob': "Non-None blob data"}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_from_blob_with_immediate_child_blob(self):
|
||||
"""
|
||||
Provision a resource locally or remotely when blob hint passed in
|
||||
from an immediate child.
|
||||
"""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
|
||||
fake_ask_child_zone_to_create_instance)
|
||||
|
||||
request_spec = {'child_blob': True, 'child_zone': True}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
111
nova/tests/test_adminapi.py
Normal file
@@ -0,0 +1,111 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from eventlet import greenthread
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.api.ec2 import admin
|
||||
from nova.image import fake
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.adminapi')
|
||||
|
||||
|
||||
class AdminApiTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(AdminApiTestCase, self).setUp()
|
||||
self.flags(connection_type='fake')
|
||||
|
||||
self.conn = rpc.Connection.instance()
|
||||
|
||||
# set up our cloud
|
||||
self.api = admin.AdminController()
|
||||
|
||||
# set up services
|
||||
self.compute = self.start_service('compute')
|
||||
self.scheduter = self.start_service('scheduler')
|
||||
self.network = self.start_service('network')
|
||||
self.volume = self.start_service('volume')
|
||||
self.image_service = utils.import_object(FLAGS.image_service)
|
||||
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
||||
self.project = self.manager.create_project('proj', 'admin', 'proj')
|
||||
self.context = context.RequestContext(user=self.user,
|
||||
project=self.project)
|
||||
host = self.network.get_network_host(self.context.elevated())
|
||||
|
||||
def fake_show(meh, context, id):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||
'type': 'machine', 'image_state': 'available'}}
|
||||
|
||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
|
||||
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
|
||||
|
||||
# NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
|
||||
rpc_cast = rpc.cast
|
||||
|
||||
def finish_cast(*args, **kwargs):
|
||||
rpc_cast(*args, **kwargs)
|
||||
greenthread.sleep(0.2)
|
||||
|
||||
self.stubs.Set(rpc, 'cast', finish_cast)
|
||||
|
||||
def tearDown(self):
|
||||
network_ref = db.project_get_network(self.context,
|
||||
self.project.id)
|
||||
db.network_disassociate(self.context, network_ref['id'])
|
||||
self.manager.delete_project(self.project)
|
||||
self.manager.delete_user(self.user)
|
||||
super(AdminApiTestCase, self).tearDown()
|
||||
|
||||
def test_block_external_ips(self):
|
||||
"""Make sure provider firewall rules are created."""
|
||||
result = self.api.block_external_addresses(self.context, '1.1.1.1/32')
|
||||
self.api.remove_external_address_block(self.context, '1.1.1.1/32')
|
||||
self.assertEqual('OK', result['status'])
|
||||
self.assertEqual('Added 3 rules', result['message'])
|
||||
|
||||
def test_list_blocked_ips(self):
|
||||
"""Make sure we can see the external blocks that exist."""
|
||||
self.api.block_external_addresses(self.context, '1.1.1.2/32')
|
||||
result = self.api.describe_external_address_blocks(self.context)
|
||||
num = len(db.provider_fw_rule_get_all(self.context))
|
||||
self.api.remove_external_address_block(self.context, '1.1.1.2/32')
|
||||
# we only list IP, not tcp/udp/icmp rules
|
||||
self.assertEqual(num / 3, len(result['externalIpBlockInfo']))
|
||||
|
||||
def test_remove_ip_block(self):
|
||||
"""Remove ip blocks."""
|
||||
result = self.api.block_external_addresses(self.context, '1.1.1.3/32')
|
||||
self.assertEqual('OK', result['status'])
|
||||
num0 = len(db.provider_fw_rule_get_all(self.context))
|
||||
result = self.api.remove_external_address_block(self.context,
|
||||
'1.1.1.3/32')
|
||||
self.assertEqual('OK', result['status'])
|
||||
self.assertEqual('Deleted 3 rules', result['message'])
|
||||
num1 = len(db.provider_fw_rule_get_all(self.context))
|
||||
self.assert_(num1 < num0)
|
||||
@@ -89,7 +89,7 @@ class FakeHttplibConnection(object):
|
||||
class XmlConversionTestCase(test.TestCase):
|
||||
"""Unit test api xml conversion"""
|
||||
def test_number_conversion(self):
|
||||
conv = apirequest._try_convert
|
||||
conv = ec2utils._try_convert
|
||||
self.assertEqual(conv('None'), None)
|
||||
self.assertEqual(conv('True'), True)
|
||||
self.assertEqual(conv('False'), False)
|
||||
@@ -224,6 +224,29 @@ class ApiEc2TestCase(test.TestCase):
|
||||
self.manager.delete_project(project)
|
||||
self.manager.delete_user(user)
|
||||
|
||||
def test_create_duplicate_key_pair(self):
|
||||
"""Test that, after successfully generating a keypair,
|
||||
requesting a second keypair with the same name fails sanely"""
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
|
||||
for x in range(random.randint(4, 8)))
|
||||
user = self.manager.create_user('fake', 'fake', 'fake')
|
||||
project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
# NOTE(vish): create depends on pool, so call helper directly
|
||||
self.ec2.create_key_pair('test')
|
||||
|
||||
try:
|
||||
self.ec2.create_key_pair('test')
|
||||
except EC2ResponseError, e:
|
||||
if e.code == 'KeyPairExists':
|
||||
pass
|
||||
else:
|
||||
self.fail("Unexpected EC2ResponseError: %s "
|
||||
"(expected KeyPairExists)" % e.code)
|
||||
else:
|
||||
self.fail('Exception not raised.')
|
||||
|
||||
def test_get_all_security_groups(self):
|
||||
"""Test that we can retrieve security groups"""
|
||||
self.expect_http()
|
||||
|
||||
@@ -86,6 +86,7 @@ class _AuthManagerBaseTestCase(test.TestCase):
|
||||
super(_AuthManagerBaseTestCase, self).setUp()
|
||||
self.flags(connection_type='fake')
|
||||
self.manager = manager.AuthManager(new=True)
|
||||
self.manager.mc.cache = {}
|
||||
|
||||
def test_create_and_find_user(self):
|
||||
with user_generator(self.manager):
|
||||
|
||||
@@ -17,32 +17,25 @@
|
||||
# under the License.
|
||||
|
||||
from base64 import b64decode
|
||||
import json
|
||||
from M2Crypto import BIO
|
||||
from M2Crypto import RSA
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
from eventlet import greenthread
|
||||
|
||||
from nova import context
|
||||
from nova import crypto
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import service
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova import exception
|
||||
from nova.auth import manager
|
||||
from nova.compute import power_state
|
||||
from nova.api.ec2 import cloud
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova.image import local
|
||||
from nova.exception import NotFound
|
||||
from nova.image import fake
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -63,6 +56,7 @@ class CloudTestCase(test.TestCase):
|
||||
self.compute = self.start_service('compute')
|
||||
self.scheduter = self.start_service('scheduler')
|
||||
self.network = self.start_service('network')
|
||||
self.volume = self.start_service('volume')
|
||||
self.image_service = utils.import_object(FLAGS.image_service)
|
||||
|
||||
self.manager = manager.AuthManager()
|
||||
@@ -74,10 +68,19 @@ class CloudTestCase(test.TestCase):
|
||||
|
||||
def fake_show(meh, context, id):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||
'type': 'machine'}}
|
||||
'type': 'machine', 'image_state': 'available'}}
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
|
||||
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
|
||||
|
||||
# NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
|
||||
rpc_cast = rpc.cast
|
||||
|
||||
def finish_cast(*args, **kwargs):
|
||||
rpc_cast(*args, **kwargs)
|
||||
greenthread.sleep(0.2)
|
||||
|
||||
self.stubs.Set(rpc, 'cast', finish_cast)
|
||||
|
||||
def tearDown(self):
|
||||
network_ref = db.project_get_network(self.context,
|
||||
@@ -85,8 +88,6 @@ class CloudTestCase(test.TestCase):
|
||||
db.network_disassociate(self.context, network_ref['id'])
|
||||
self.manager.delete_project(self.project)
|
||||
self.manager.delete_user(self.user)
|
||||
self.compute.kill()
|
||||
self.network.kill()
|
||||
super(CloudTestCase, self).tearDown()
|
||||
|
||||
def _create_key(self, name):
|
||||
@@ -113,9 +114,20 @@ class CloudTestCase(test.TestCase):
|
||||
self.cloud.describe_addresses(self.context)
|
||||
self.cloud.release_address(self.context,
|
||||
public_ip=address)
|
||||
greenthread.sleep(0.3)
|
||||
db.floating_ip_destroy(self.context, address)
|
||||
|
||||
def test_allocate_address(self):
|
||||
address = "10.10.10.10"
|
||||
allocate = self.cloud.allocate_address
|
||||
db.floating_ip_create(self.context,
|
||||
{'address': address,
|
||||
'host': self.network.host})
|
||||
self.assertEqual(allocate(self.context)['publicIp'], address)
|
||||
db.floating_ip_destroy(self.context, address)
|
||||
self.assertRaises(exception.NoMoreFloatingIps,
|
||||
allocate,
|
||||
self.context)
|
||||
|
||||
def test_associate_disassociate_address(self):
|
||||
"""Verifies associate runs cleanly without raising an exception"""
|
||||
address = "10.10.10.10"
|
||||
@@ -129,12 +141,10 @@ class CloudTestCase(test.TestCase):
|
||||
self.cloud.associate_address(self.context,
|
||||
instance_id=ec2_id,
|
||||
public_ip=address)
|
||||
greenthread.sleep(0.3)
|
||||
self.cloud.disassociate_address(self.context,
|
||||
public_ip=address)
|
||||
self.cloud.release_address(self.context,
|
||||
public_ip=address)
|
||||
greenthread.sleep(0.3)
|
||||
self.network.deallocate_fixed_ip(self.context, fixed)
|
||||
db.instance_destroy(self.context, inst['id'])
|
||||
db.floating_ip_destroy(self.context, address)
|
||||
@@ -171,6 +181,25 @@ class CloudTestCase(test.TestCase):
|
||||
db.volume_destroy(self.context, vol1['id'])
|
||||
db.volume_destroy(self.context, vol2['id'])
|
||||
|
||||
def test_create_volume_from_snapshot(self):
|
||||
"""Makes sure create_volume works when we specify a snapshot."""
|
||||
vol = db.volume_create(self.context, {'size': 1})
|
||||
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
|
||||
'volume_size': vol['size'],
|
||||
'status': "available"})
|
||||
snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
|
||||
|
||||
result = self.cloud.create_volume(self.context,
|
||||
snapshot_id=snapshot_id)
|
||||
volume_id = result['volumeId']
|
||||
result = self.cloud.describe_volumes(self.context)
|
||||
self.assertEqual(len(result['volumeSet']), 2)
|
||||
self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id)
|
||||
|
||||
db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id))
|
||||
db.snapshot_destroy(self.context, snap['id'])
|
||||
db.volume_destroy(self.context, vol['id'])
|
||||
|
||||
def test_describe_availability_zones(self):
|
||||
"""Makes sure describe_availability_zones works and filters results."""
|
||||
service1 = db.service_create(self.context, {'host': 'host1_zones',
|
||||
@@ -188,13 +217,59 @@ class CloudTestCase(test.TestCase):
|
||||
db.service_destroy(self.context, service1['id'])
|
||||
db.service_destroy(self.context, service2['id'])
|
||||
|
||||
def test_describe_snapshots(self):
|
||||
"""Makes sure describe_snapshots works and filters results."""
|
||||
vol = db.volume_create(self.context, {})
|
||||
snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']})
|
||||
snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']})
|
||||
result = self.cloud.describe_snapshots(self.context)
|
||||
self.assertEqual(len(result['snapshotSet']), 2)
|
||||
snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x')
|
||||
result = self.cloud.describe_snapshots(self.context,
|
||||
snapshot_id=[snapshot_id])
|
||||
self.assertEqual(len(result['snapshotSet']), 1)
|
||||
self.assertEqual(
|
||||
ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']),
|
||||
snap2['id'])
|
||||
db.snapshot_destroy(self.context, snap1['id'])
|
||||
db.snapshot_destroy(self.context, snap2['id'])
|
||||
db.volume_destroy(self.context, vol['id'])
|
||||
|
||||
def test_create_snapshot(self):
|
||||
"""Makes sure create_snapshot works."""
|
||||
vol = db.volume_create(self.context, {'status': "available"})
|
||||
volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
|
||||
|
||||
result = self.cloud.create_snapshot(self.context,
|
||||
volume_id=volume_id)
|
||||
snapshot_id = result['snapshotId']
|
||||
result = self.cloud.describe_snapshots(self.context)
|
||||
self.assertEqual(len(result['snapshotSet']), 1)
|
||||
self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
|
||||
|
||||
db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id))
|
||||
db.volume_destroy(self.context, vol['id'])
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
"""Makes sure delete_snapshot works."""
|
||||
vol = db.volume_create(self.context, {'status': "available"})
|
||||
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
|
||||
'status': "available"})
|
||||
snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
|
||||
|
||||
result = self.cloud.delete_snapshot(self.context,
|
||||
snapshot_id=snapshot_id)
|
||||
self.assertTrue(result)
|
||||
|
||||
db.volume_destroy(self.context, vol['id'])
|
||||
|
||||
def test_describe_instances(self):
|
||||
"""Makes sure describe_instances works and filters results."""
|
||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'host': 'host1'})
|
||||
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'host': 'host2'})
|
||||
comp1 = db.service_create(self.context, {'host': 'host1',
|
||||
'availability_zone': 'zone1',
|
||||
@@ -227,9 +302,9 @@ class CloudTestCase(test.TestCase):
|
||||
'type': 'machine'}}]
|
||||
|
||||
def fake_show_none(meh, context, id):
|
||||
raise NotFound
|
||||
raise exception.ImageNotFound(image_id='bad_image_id')
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'detail', fake_detail)
|
||||
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
|
||||
# list all
|
||||
result1 = describe_images(self.context)
|
||||
result1 = result1['imagesSet'][0]
|
||||
@@ -243,9 +318,9 @@ class CloudTestCase(test.TestCase):
|
||||
self.assertEqual(2, len(result3['imagesSet']))
|
||||
# provide an non-existing image_id
|
||||
self.stubs.UnsetAll()
|
||||
self.stubs.Set(local.LocalImageService, 'show', fake_show_none)
|
||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none)
|
||||
self.assertRaises(NotFound, describe_images,
|
||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
|
||||
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none)
|
||||
self.assertRaises(exception.ImageNotFound, describe_images,
|
||||
self.context, ['ami-fake'])
|
||||
|
||||
def test_describe_image_attribute(self):
|
||||
@@ -255,8 +330,8 @@ class CloudTestCase(test.TestCase):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||
'type': 'machine'}, 'is_public': True}
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
|
||||
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
|
||||
result = describe_image_attribute(self.context, 'ami-00000001',
|
||||
'launchPermission')
|
||||
self.assertEqual([{'group': 'all'}], result['launchPermission'])
|
||||
@@ -271,9 +346,9 @@ class CloudTestCase(test.TestCase):
|
||||
def fake_update(meh, context, image_id, metadata, data=None):
|
||||
return metadata
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
||||
self.stubs.Set(local.LocalImageService, 'update', fake_update)
|
||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
|
||||
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
|
||||
self.stubs.Set(fake._FakeImageService, 'update', fake_update)
|
||||
result = modify_image_attribute(self.context, 'ami-00000001',
|
||||
'launchPermission', 'add',
|
||||
user_group=['all'])
|
||||
@@ -285,7 +360,7 @@ class CloudTestCase(test.TestCase):
|
||||
def fake_delete(self, context, id):
|
||||
return None
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'delete', fake_delete)
|
||||
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
|
||||
# valid image
|
||||
result = deregister_image(self.context, 'ami-00000001')
|
||||
self.assertEqual(result['imageId'], 'ami-00000001')
|
||||
@@ -295,42 +370,41 @@ class CloudTestCase(test.TestCase):
|
||||
def fake_detail_empty(self, context):
|
||||
return []
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'detail', fake_detail_empty)
|
||||
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
|
||||
self.assertRaises(exception.ImageNotFound, deregister_image,
|
||||
self.context, 'ami-bad001')
|
||||
|
||||
def test_console_output(self):
|
||||
instance_type = FLAGS.default_instance_type
|
||||
max_count = 1
|
||||
kwargs = {'image_id': 'ami-1',
|
||||
'instance_type': instance_type,
|
||||
'max_count': max_count}
|
||||
def _run_instance(self, **kwargs):
|
||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||
greenthread.sleep(0.3)
|
||||
instance_id = rv['instancesSet'][0]['instanceId']
|
||||
return instance_id
|
||||
|
||||
def _run_instance_wait(self, **kwargs):
|
||||
ec2_instance_id = self._run_instance(**kwargs)
|
||||
self._wait_for_running(ec2_instance_id)
|
||||
return ec2_instance_id
|
||||
|
||||
def test_console_output(self):
|
||||
instance_id = self._run_instance(
|
||||
image_id='ami-1',
|
||||
instance_type=FLAGS.default_instance_type,
|
||||
max_count=1)
|
||||
output = self.cloud.get_console_output(context=self.context,
|
||||
instance_id=[instance_id])
|
||||
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT')
|
||||
# TODO(soren): We need this until we can stop polling in the rpc code
|
||||
# for unit tests.
|
||||
greenthread.sleep(0.3)
|
||||
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
||||
greenthread.sleep(0.3)
|
||||
|
||||
def test_ajax_console(self):
|
||||
kwargs = {'image_id': 'ami-1'}
|
||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||
instance_id = rv['instancesSet'][0]['instanceId']
|
||||
greenthread.sleep(0.3)
|
||||
instance_id = self._run_instance(image_id='ami-1')
|
||||
output = self.cloud.get_ajax_console(context=self.context,
|
||||
instance_id=[instance_id])
|
||||
self.assertEquals(output['url'],
|
||||
'%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url)
|
||||
# TODO(soren): We need this until we can stop polling in the rpc code
|
||||
# for unit tests.
|
||||
greenthread.sleep(0.3)
|
||||
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
||||
greenthread.sleep(0.3)
|
||||
|
||||
def test_key_generation(self):
|
||||
result = self._create_key('test')
|
||||
@@ -354,13 +428,107 @@ class CloudTestCase(test.TestCase):
|
||||
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
|
||||
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
|
||||
|
||||
def test_import_public_key(self):
|
||||
# test when user provides all values
|
||||
result1 = self.cloud.import_public_key(self.context,
|
||||
'testimportkey1',
|
||||
'mytestpubkey',
|
||||
'mytestfprint')
|
||||
self.assertTrue(result1)
|
||||
keydata = db.key_pair_get(self.context,
|
||||
self.context.user.id,
|
||||
'testimportkey1')
|
||||
self.assertEqual('mytestpubkey', keydata['public_key'])
|
||||
self.assertEqual('mytestfprint', keydata['fingerprint'])
|
||||
# test when user omits fingerprint
|
||||
pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
|
||||
f = open(pubkey_path + '/dummy.pub', 'r')
|
||||
dummypub = f.readline().rstrip()
|
||||
f.close
|
||||
f = open(pubkey_path + '/dummy.fingerprint', 'r')
|
||||
dummyfprint = f.readline().rstrip()
|
||||
f.close
|
||||
result2 = self.cloud.import_public_key(self.context,
|
||||
'testimportkey2',
|
||||
dummypub)
|
||||
self.assertTrue(result2)
|
||||
keydata = db.key_pair_get(self.context,
|
||||
self.context.user.id,
|
||||
'testimportkey2')
|
||||
self.assertEqual(dummypub, keydata['public_key'])
|
||||
self.assertEqual(dummyfprint, keydata['fingerprint'])
|
||||
|
||||
def test_delete_key_pair(self):
|
||||
self._create_key('test')
|
||||
self.cloud.delete_key_pair(self.context, 'test')
|
||||
|
||||
def test_run_instances(self):
|
||||
# stub out the rpc call
|
||||
def stub_cast(*args, **kwargs):
|
||||
pass
|
||||
|
||||
self.stubs.Set(rpc, 'cast', stub_cast)
|
||||
|
||||
kwargs = {'image_id': FLAGS.default_image,
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1}
|
||||
run_instances = self.cloud.run_instances
|
||||
result = run_instances(self.context, **kwargs)
|
||||
instance = result['instancesSet'][0]
|
||||
self.assertEqual(instance['imageId'], 'ami-00000001')
|
||||
self.assertEqual(instance['displayName'], 'Server 1')
|
||||
self.assertEqual(instance['instanceId'], 'i-00000001')
|
||||
self.assertEqual(instance['instanceState']['name'], 'scheduling')
|
||||
self.assertEqual(instance['instanceType'], 'm1.small')
|
||||
|
||||
def test_run_instances_image_state_none(self):
|
||||
kwargs = {'image_id': FLAGS.default_image,
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1}
|
||||
run_instances = self.cloud.run_instances
|
||||
|
||||
def fake_show_no_state(self, context, id):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||
'type': 'machine'}}
|
||||
|
||||
self.stubs.UnsetAll()
|
||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
|
||||
self.assertRaises(exception.ApiError, run_instances,
|
||||
self.context, **kwargs)
|
||||
|
||||
def test_run_instances_image_state_invalid(self):
|
||||
kwargs = {'image_id': FLAGS.default_image,
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1}
|
||||
run_instances = self.cloud.run_instances
|
||||
|
||||
def fake_show_decrypt(self, context, id):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||
'type': 'machine', 'image_state': 'decrypting'}}
|
||||
|
||||
self.stubs.UnsetAll()
|
||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
|
||||
self.assertRaises(exception.ApiError, run_instances,
|
||||
self.context, **kwargs)
|
||||
|
||||
def test_run_instances_image_status_active(self):
|
||||
kwargs = {'image_id': FLAGS.default_image,
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1}
|
||||
run_instances = self.cloud.run_instances
|
||||
|
||||
def fake_show_stat_active(self, context, id):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||
'type': 'machine'}, 'status': 'active'}
|
||||
|
||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
|
||||
|
||||
result = run_instances(self.context, **kwargs)
|
||||
self.assertEqual(len(result['instancesSet']), 1)
|
||||
|
||||
def test_terminate_instances(self):
|
||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'host': 'host1'})
|
||||
terminate_instances = self.cloud.terminate_instances
|
||||
# valid instance_id
|
||||
@@ -382,7 +550,9 @@ class CloudTestCase(test.TestCase):
|
||||
|
||||
def test_update_of_instance_wont_update_private_fields(self):
|
||||
inst = db.instance_create(self.context, {})
|
||||
self.cloud.update_instance(self.context, inst['id'],
|
||||
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
|
||||
self.cloud.update_instance(self.context, ec2_id,
|
||||
display_name='c00l 1m4g3',
|
||||
mac_address='DE:AD:BE:EF')
|
||||
inst = db.instance_get(self.context, inst['id'])
|
||||
self.assertEqual(None, inst['mac_address'])
|
||||
@@ -405,3 +575,299 @@ class CloudTestCase(test.TestCase):
|
||||
vol = db.volume_get(self.context, vol['id'])
|
||||
self.assertEqual(None, vol['mountpoint'])
|
||||
db.volume_destroy(self.context, vol['id'])
|
||||
|
||||
def _restart_compute_service(self, periodic_interval=None):
|
||||
"""restart compute service. NOTE: fake driver forgets all instances."""
|
||||
self.compute.kill()
|
||||
if periodic_interval:
|
||||
self.compute = self.start_service(
|
||||
'compute', periodic_interval=periodic_interval)
|
||||
else:
|
||||
self.compute = self.start_service('compute')
|
||||
|
||||
def _wait_for_state(self, ctxt, instance_id, predicate):
|
||||
"""Wait for an stopping instance to be a given state"""
|
||||
id = ec2utils.ec2_id_to_id(instance_id)
|
||||
while True:
|
||||
info = self.cloud.compute_api.get(context=ctxt, instance_id=id)
|
||||
LOG.debug(info)
|
||||
if predicate(info):
|
||||
break
|
||||
greenthread.sleep(1)
|
||||
|
||||
def _wait_for_running(self, instance_id):
|
||||
def is_running(info):
|
||||
return info['state_description'] == 'running'
|
||||
self._wait_for_state(self.context, instance_id, is_running)
|
||||
|
||||
def _wait_for_stopped(self, instance_id):
|
||||
def is_stopped(info):
|
||||
return info['state_description'] == 'stopped'
|
||||
self._wait_for_state(self.context, instance_id, is_stopped)
|
||||
|
||||
def _wait_for_terminate(self, instance_id):
|
||||
def is_deleted(info):
|
||||
return info['deleted']
|
||||
elevated = self.context.elevated(read_deleted=True)
|
||||
self._wait_for_state(elevated, instance_id, is_deleted)
|
||||
|
||||
def test_stop_start_instance(self):
|
||||
"""Makes sure stop/start instance works"""
|
||||
# enforce periodic tasks run in short time to avoid wait for 60s.
|
||||
self._restart_compute_service(periodic_interval=0.3)
|
||||
|
||||
kwargs = {'image_id': 'ami-1',
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1, }
|
||||
instance_id = self._run_instance_wait(**kwargs)
|
||||
|
||||
# a running instance can't be started. It is just ignored.
|
||||
result = self.cloud.start_instances(self.context, [instance_id])
|
||||
greenthread.sleep(0.3)
|
||||
self.assertTrue(result)
|
||||
|
||||
result = self.cloud.stop_instances(self.context, [instance_id])
|
||||
greenthread.sleep(0.3)
|
||||
self.assertTrue(result)
|
||||
self._wait_for_stopped(instance_id)
|
||||
|
||||
result = self.cloud.start_instances(self.context, [instance_id])
|
||||
greenthread.sleep(0.3)
|
||||
self.assertTrue(result)
|
||||
self._wait_for_running(instance_id)
|
||||
|
||||
result = self.cloud.stop_instances(self.context, [instance_id])
|
||||
greenthread.sleep(0.3)
|
||||
self.assertTrue(result)
|
||||
self._wait_for_stopped(instance_id)
|
||||
|
||||
result = self.cloud.terminate_instances(self.context, [instance_id])
|
||||
greenthread.sleep(0.3)
|
||||
self.assertTrue(result)
|
||||
|
||||
self._restart_compute_service()
|
||||
|
||||
def _volume_create(self):
|
||||
kwargs = {'status': 'available',
|
||||
'host': self.volume.host,
|
||||
'size': 1,
|
||||
'attach_status': 'detached', }
|
||||
return db.volume_create(self.context, kwargs)
|
||||
|
||||
def _assert_volume_attached(self, vol, instance_id, mountpoint):
|
||||
self.assertEqual(vol['instance_id'], instance_id)
|
||||
self.assertEqual(vol['mountpoint'], mountpoint)
|
||||
self.assertEqual(vol['status'], "in-use")
|
||||
self.assertEqual(vol['attach_status'], "attached")
|
||||
|
||||
def _assert_volume_detached(self, vol):
|
||||
self.assertEqual(vol['instance_id'], None)
|
||||
self.assertEqual(vol['mountpoint'], None)
|
||||
self.assertEqual(vol['status'], "available")
|
||||
self.assertEqual(vol['attach_status'], "detached")
|
||||
|
||||
def test_stop_start_with_volume(self):
|
||||
"""Make sure run instance with block device mapping works"""
|
||||
|
||||
# enforce periodic tasks run in short time to avoid wait for 60s.
|
||||
self._restart_compute_service(periodic_interval=0.3)
|
||||
|
||||
vol1 = self._volume_create()
|
||||
vol2 = self._volume_create()
|
||||
kwargs = {'image_id': 'ami-1',
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1,
|
||||
'block_device_mapping': [{'device_name': '/dev/vdb',
|
||||
'volume_id': vol1['id'],
|
||||
'delete_on_termination': False, },
|
||||
{'device_name': '/dev/vdc',
|
||||
'volume_id': vol2['id'],
|
||||
'delete_on_termination': True, },
|
||||
]}
|
||||
ec2_instance_id = self._run_instance_wait(**kwargs)
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
|
||||
|
||||
vols = db.volume_get_all_by_instance(self.context, instance_id)
|
||||
self.assertEqual(len(vols), 2)
|
||||
for vol in vols:
|
||||
self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
|
||||
|
||||
vol = db.volume_get(self.context, vol1['id'])
|
||||
self._assert_volume_attached(vol, instance_id, '/dev/vdb')
|
||||
|
||||
vol = db.volume_get(self.context, vol2['id'])
|
||||
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
|
||||
|
||||
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
|
||||
self.assertTrue(result)
|
||||
self._wait_for_stopped(ec2_instance_id)
|
||||
|
||||
vol = db.volume_get(self.context, vol1['id'])
|
||||
self._assert_volume_detached(vol)
|
||||
vol = db.volume_get(self.context, vol2['id'])
|
||||
self._assert_volume_detached(vol)
|
||||
|
||||
self.cloud.start_instances(self.context, [ec2_instance_id])
|
||||
self._wait_for_running(ec2_instance_id)
|
||||
vols = db.volume_get_all_by_instance(self.context, instance_id)
|
||||
self.assertEqual(len(vols), 2)
|
||||
for vol in vols:
|
||||
self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
|
||||
self.assertTrue(vol['mountpoint'] == '/dev/vdb' or
|
||||
vol['mountpoint'] == '/dev/vdc')
|
||||
self.assertEqual(vol['instance_id'], instance_id)
|
||||
self.assertEqual(vol['status'], "in-use")
|
||||
self.assertEqual(vol['attach_status'], "attached")
|
||||
|
||||
self.cloud.terminate_instances(self.context, [ec2_instance_id])
|
||||
greenthread.sleep(0.3)
|
||||
|
||||
admin_ctxt = context.get_admin_context(read_deleted=False)
|
||||
vol = db.volume_get(admin_ctxt, vol1['id'])
|
||||
self.assertFalse(vol['deleted'])
|
||||
db.volume_destroy(self.context, vol1['id'])
|
||||
|
||||
greenthread.sleep(0.3)
|
||||
admin_ctxt = context.get_admin_context(read_deleted=True)
|
||||
vol = db.volume_get(admin_ctxt, vol2['id'])
|
||||
self.assertTrue(vol['deleted'])
|
||||
|
||||
self._restart_compute_service()
|
||||
|
||||
def test_stop_with_attached_volume(self):
|
||||
"""Make sure attach info is reflected to block device mapping"""
|
||||
# enforce periodic tasks run in short time to avoid wait for 60s.
|
||||
self._restart_compute_service(periodic_interval=0.3)
|
||||
|
||||
vol1 = self._volume_create()
|
||||
vol2 = self._volume_create()
|
||||
kwargs = {'image_id': 'ami-1',
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1,
|
||||
'block_device_mapping': [{'device_name': '/dev/vdb',
|
||||
'volume_id': vol1['id'],
|
||||
'delete_on_termination': True}]}
|
||||
ec2_instance_id = self._run_instance_wait(**kwargs)
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
|
||||
|
||||
vols = db.volume_get_all_by_instance(self.context, instance_id)
|
||||
self.assertEqual(len(vols), 1)
|
||||
for vol in vols:
|
||||
self.assertEqual(vol['id'], vol1['id'])
|
||||
self._assert_volume_attached(vol, instance_id, '/dev/vdb')
|
||||
|
||||
vol = db.volume_get(self.context, vol2['id'])
|
||||
self._assert_volume_detached(vol)
|
||||
|
||||
self.cloud.compute_api.attach_volume(self.context,
|
||||
instance_id=instance_id,
|
||||
volume_id=vol2['id'],
|
||||
device='/dev/vdc')
|
||||
greenthread.sleep(0.3)
|
||||
vol = db.volume_get(self.context, vol2['id'])
|
||||
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
|
||||
|
||||
self.cloud.compute_api.detach_volume(self.context,
|
||||
volume_id=vol1['id'])
|
||||
greenthread.sleep(0.3)
|
||||
vol = db.volume_get(self.context, vol1['id'])
|
||||
self._assert_volume_detached(vol)
|
||||
|
||||
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
|
||||
self.assertTrue(result)
|
||||
self._wait_for_stopped(ec2_instance_id)
|
||||
|
||||
for vol_id in (vol1['id'], vol2['id']):
|
||||
vol = db.volume_get(self.context, vol_id)
|
||||
self._assert_volume_detached(vol)
|
||||
|
||||
self.cloud.start_instances(self.context, [ec2_instance_id])
|
||||
self._wait_for_running(ec2_instance_id)
|
||||
vols = db.volume_get_all_by_instance(self.context, instance_id)
|
||||
self.assertEqual(len(vols), 1)
|
||||
for vol in vols:
|
||||
self.assertEqual(vol['id'], vol2['id'])
|
||||
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
|
||||
|
||||
vol = db.volume_get(self.context, vol1['id'])
|
||||
self._assert_volume_detached(vol)
|
||||
|
||||
self.cloud.terminate_instances(self.context, [ec2_instance_id])
|
||||
greenthread.sleep(0.3)
|
||||
|
||||
for vol_id in (vol1['id'], vol2['id']):
|
||||
vol = db.volume_get(self.context, vol_id)
|
||||
self.assertEqual(vol['id'], vol_id)
|
||||
self._assert_volume_detached(vol)
|
||||
db.volume_destroy(self.context, vol_id)
|
||||
|
||||
self._restart_compute_service()
|
||||
|
||||
def _create_snapshot(self, ec2_volume_id):
|
||||
result = self.cloud.create_snapshot(self.context,
|
||||
volume_id=ec2_volume_id)
|
||||
greenthread.sleep(0.3)
|
||||
return result['snapshotId']
|
||||
|
||||
def test_run_with_snapshot(self):
|
||||
"""Makes sure run/stop/start instance with snapshot works."""
|
||||
vol = self._volume_create()
|
||||
ec2_volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
|
||||
|
||||
ec2_snapshot1_id = self._create_snapshot(ec2_volume_id)
|
||||
snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id)
|
||||
ec2_snapshot2_id = self._create_snapshot(ec2_volume_id)
|
||||
snapshot2_id = ec2utils.ec2_id_to_id(ec2_snapshot2_id)
|
||||
|
||||
kwargs = {'image_id': 'ami-1',
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1,
|
||||
'block_device_mapping': [{'device_name': '/dev/vdb',
|
||||
'snapshot_id': snapshot1_id,
|
||||
'delete_on_termination': False, },
|
||||
{'device_name': '/dev/vdc',
|
||||
'snapshot_id': snapshot2_id,
|
||||
'delete_on_termination': True}]}
|
||||
ec2_instance_id = self._run_instance_wait(**kwargs)
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
|
||||
|
||||
vols = db.volume_get_all_by_instance(self.context, instance_id)
|
||||
self.assertEqual(len(vols), 2)
|
||||
vol1_id = None
|
||||
vol2_id = None
|
||||
for vol in vols:
|
||||
snapshot_id = vol['snapshot_id']
|
||||
if snapshot_id == snapshot1_id:
|
||||
vol1_id = vol['id']
|
||||
mountpoint = '/dev/vdb'
|
||||
elif snapshot_id == snapshot2_id:
|
||||
vol2_id = vol['id']
|
||||
mountpoint = '/dev/vdc'
|
||||
else:
|
||||
self.fail()
|
||||
|
||||
self._assert_volume_attached(vol, instance_id, mountpoint)
|
||||
|
||||
self.assertTrue(vol1_id)
|
||||
self.assertTrue(vol2_id)
|
||||
|
||||
self.cloud.terminate_instances(self.context, [ec2_instance_id])
|
||||
greenthread.sleep(0.3)
|
||||
self._wait_for_terminate(ec2_instance_id)
|
||||
|
||||
greenthread.sleep(0.3)
|
||||
admin_ctxt = context.get_admin_context(read_deleted=False)
|
||||
vol = db.volume_get(admin_ctxt, vol1_id)
|
||||
self._assert_volume_detached(vol)
|
||||
self.assertFalse(vol['deleted'])
|
||||
db.volume_destroy(self.context, vol1_id)
|
||||
|
||||
greenthread.sleep(0.3)
|
||||
admin_ctxt = context.get_admin_context(read_deleted=True)
|
||||
vol = db.volume_get(admin_ctxt, vol2_id)
|
||||
self.assertTrue(vol['deleted'])
|
||||
|
||||
for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id):
|
||||
self.cloud.delete_snapshot(self.context, snapshot_id)
|
||||
greenthread.sleep(0.3)
|
||||
db.volume_destroy(self.context, vol['id'])
|
||||
|
||||
@@ -19,25 +19,24 @@
|
||||
Tests For Compute
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import mox
|
||||
import stubout
|
||||
|
||||
from nova.auth import manager
|
||||
from nova import compute
|
||||
from nova.compute import instance_types
|
||||
from nova.compute import manager as compute_manager
|
||||
from nova.compute import power_state
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova.db.sqlalchemy import models
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
import nova.image.fake
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.compute import instance_types
|
||||
from nova.compute import manager as compute_manager
|
||||
from nova.compute import power_state
|
||||
from nova.db.sqlalchemy import models
|
||||
from nova.image import local
|
||||
from nova.notifier import test_notifier
|
||||
|
||||
LOG = logging.getLogger('nova.tests.compute')
|
||||
@@ -77,7 +76,7 @@ class ComputeTestCase(test.TestCase):
|
||||
def fake_show(meh, context, id):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||
|
||||
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||
self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
|
||||
|
||||
def tearDown(self):
|
||||
self.manager.delete_user(self.user)
|
||||
@@ -87,7 +86,7 @@ class ComputeTestCase(test.TestCase):
|
||||
def _create_instance(self, params={}):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 1
|
||||
inst['image_ref'] = 1
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = self.user.id
|
||||
@@ -153,7 +152,7 @@ class ComputeTestCase(test.TestCase):
|
||||
ref = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_id=None,
|
||||
image_href=None,
|
||||
security_group=['testgroup'])
|
||||
try:
|
||||
self.assertEqual(len(db.security_group_get_by_instance(
|
||||
@@ -171,7 +170,7 @@ class ComputeTestCase(test.TestCase):
|
||||
ref = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_id=None,
|
||||
image_href=None,
|
||||
security_group=['testgroup'])
|
||||
try:
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
@@ -187,7 +186,7 @@ class ComputeTestCase(test.TestCase):
|
||||
ref = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_id=None,
|
||||
image_href=None,
|
||||
security_group=['testgroup'])
|
||||
|
||||
try:
|
||||
@@ -220,18 +219,33 @@ class ComputeTestCase(test.TestCase):
|
||||
instance_ref = db.instance_get(self.context, instance_id)
|
||||
self.assertEqual(instance_ref['launched_at'], None)
|
||||
self.assertEqual(instance_ref['deleted_at'], None)
|
||||
launch = datetime.datetime.utcnow()
|
||||
launch = utils.utcnow()
|
||||
self.compute.run_instance(self.context, instance_id)
|
||||
instance_ref = db.instance_get(self.context, instance_id)
|
||||
self.assert_(instance_ref['launched_at'] > launch)
|
||||
self.assertEqual(instance_ref['deleted_at'], None)
|
||||
terminate = datetime.datetime.utcnow()
|
||||
terminate = utils.utcnow()
|
||||
self.compute.terminate_instance(self.context, instance_id)
|
||||
self.context = self.context.elevated(True)
|
||||
instance_ref = db.instance_get(self.context, instance_id)
|
||||
self.assert_(instance_ref['launched_at'] < terminate)
|
||||
self.assert_(instance_ref['deleted_at'] > terminate)
|
||||
|
||||
def test_stop(self):
|
||||
"""Ensure instance can be stopped"""
|
||||
instance_id = self._create_instance()
|
||||
self.compute.run_instance(self.context, instance_id)
|
||||
self.compute.stop_instance(self.context, instance_id)
|
||||
self.compute.terminate_instance(self.context, instance_id)
|
||||
|
||||
def test_start(self):
|
||||
"""Ensure instance can be started"""
|
||||
instance_id = self._create_instance()
|
||||
self.compute.run_instance(self.context, instance_id)
|
||||
self.compute.stop_instance(self.context, instance_id)
|
||||
self.compute.start_instance(self.context, instance_id)
|
||||
self.compute.terminate_instance(self.context, instance_id)
|
||||
|
||||
def test_pause(self):
|
||||
"""Ensure instance can be paused"""
|
||||
instance_id = self._create_instance()
|
||||
@@ -270,6 +284,14 @@ class ComputeTestCase(test.TestCase):
|
||||
"File Contents")
|
||||
self.compute.terminate_instance(self.context, instance_id)
|
||||
|
||||
def test_agent_update(self):
|
||||
"""Ensure instance can have its agent updated"""
|
||||
instance_id = self._create_instance()
|
||||
self.compute.run_instance(self.context, instance_id)
|
||||
self.compute.agent_update(self.context, instance_id,
|
||||
'http://127.0.0.1/agent', '00112233445566778899aabbccddeeff')
|
||||
self.compute.terminate_instance(self.context, instance_id)
|
||||
|
||||
def test_snapshot(self):
|
||||
"""Ensure instance can be snapshotted"""
|
||||
instance_id = self._create_instance()
|
||||
@@ -381,8 +403,30 @@ class ComputeTestCase(test.TestCase):
|
||||
|
||||
self.compute.terminate_instance(self.context, instance_id)
|
||||
|
||||
def test_finish_resize(self):
|
||||
"""Contrived test to ensure finish_resize doesn't raise anything"""
|
||||
|
||||
def fake(*args, **kwargs):
|
||||
pass
|
||||
|
||||
self.stubs.Set(self.compute.driver, 'finish_resize', fake)
|
||||
context = self.context.elevated()
|
||||
instance_id = self._create_instance()
|
||||
self.compute.prep_resize(context, instance_id, 1)
|
||||
migration_ref = db.migration_get_by_instance_and_status(context,
|
||||
instance_id, 'pre-migrating')
|
||||
try:
|
||||
self.compute.finish_resize(context, instance_id,
|
||||
int(migration_ref['id']), {})
|
||||
except KeyError, e:
|
||||
# Only catch key errors. We want other reasons for the test to
|
||||
# fail to actually error out so we don't obscure anything
|
||||
self.fail()
|
||||
|
||||
self.compute.terminate_instance(self.context, instance_id)
|
||||
|
||||
def test_resize_instance_notification(self):
|
||||
"""Ensure instance can be migrated/resized"""
|
||||
"""Ensure notifications on instance migrate/resize"""
|
||||
instance_id = self._create_instance()
|
||||
context = self.context.elevated()
|
||||
|
||||
|
||||
@@ -20,8 +20,6 @@
|
||||
Tests For Console proxy.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
|
||||
@@ -91,6 +91,20 @@ class FlagsTestCase(test.TestCase):
|
||||
self.assert_('runtime_answer' in self.global_FLAGS)
|
||||
self.assertEqual(self.global_FLAGS.runtime_answer, 60)
|
||||
|
||||
def test_long_vs_short_flags(self):
|
||||
flags.DEFINE_string('duplicate_answer_long', 'val', 'desc',
|
||||
flag_values=self.global_FLAGS)
|
||||
argv = ['flags_test', '--duplicate_answer=60', 'extra_arg']
|
||||
args = self.global_FLAGS(argv)
|
||||
|
||||
self.assert_('duplicate_answer' not in self.global_FLAGS)
|
||||
self.assert_(self.global_FLAGS.duplicate_answer_long, 60)
|
||||
|
||||
flags.DEFINE_integer('duplicate_answer', 60, 'desc',
|
||||
flag_values=self.global_FLAGS)
|
||||
self.assertEqual(self.global_FLAGS.duplicate_answer, 60)
|
||||
self.assertEqual(self.global_FLAGS.duplicate_answer_long, 'val')
|
||||
|
||||
def test_flag_leak_left(self):
|
||||
self.assertEqual(FLAGS.flags_unittest, 'foo')
|
||||
FLAGS.flags_unittest = 'bar'
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"""
|
||||
Unit Tests for flat network code
|
||||
"""
|
||||
import IPy
|
||||
import netaddr
|
||||
import os
|
||||
import unittest
|
||||
|
||||
@@ -45,8 +45,8 @@ class FlatNetworkTestCase(base.NetworkTestCase):
|
||||
|
||||
self.context._project = self.projects[0]
|
||||
self.context.project_id = self.projects[0].id
|
||||
pubnet = IPy.IP(flags.FLAGS.floating_range)
|
||||
address = str(pubnet[0])
|
||||
pubnet = netaddr.IPRange(flags.FLAGS.floating_range)
|
||||
address = str(list(pubnet)[0])
|
||||
try:
|
||||
db.floating_ip_get_by_address(context.get_admin_context(), address)
|
||||
except exception.NotFound:
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Scheduler Host Filter Drivers.
|
||||
Tests For Scheduler Host Filters.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -31,7 +31,7 @@ class FakeZoneManager:
|
||||
|
||||
|
||||
class HostFilterTestCase(test.TestCase):
|
||||
"""Test case for host filter drivers."""
|
||||
"""Test case for host filters."""
|
||||
|
||||
def _host_caps(self, multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
@@ -57,8 +57,8 @@ class HostFilterTestCase(test.TestCase):
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
def setUp(self):
|
||||
self.old_flag = FLAGS.default_host_filter_driver
|
||||
FLAGS.default_host_filter_driver = \
|
||||
self.old_flag = FLAGS.default_host_filter
|
||||
FLAGS.default_host_filter = \
|
||||
'nova.scheduler.host_filter.AllHostsFilter'
|
||||
self.instance_type = dict(name='tiny',
|
||||
memory_mb=50,
|
||||
@@ -76,51 +76,52 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.zone_manager.service_states = states
|
||||
|
||||
def tearDown(self):
|
||||
FLAGS.default_host_filter_driver = self.old_flag
|
||||
FLAGS.default_host_filter = self.old_flag
|
||||
|
||||
def test_choose_driver(self):
|
||||
# Test default driver ...
|
||||
driver = host_filter.choose_driver()
|
||||
self.assertEquals(driver._full_name(),
|
||||
def test_choose_filter(self):
|
||||
# Test default filter ...
|
||||
hf = host_filter.choose_host_filter()
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.AllHostsFilter')
|
||||
# Test valid driver ...
|
||||
driver = host_filter.choose_driver(
|
||||
'nova.scheduler.host_filter.FlavorFilter')
|
||||
self.assertEquals(driver._full_name(),
|
||||
'nova.scheduler.host_filter.FlavorFilter')
|
||||
# Test invalid driver ...
|
||||
# Test valid filter ...
|
||||
hf = host_filter.choose_host_filter(
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
# Test invalid filter ...
|
||||
try:
|
||||
host_filter.choose_driver('does not exist')
|
||||
self.fail("Should not find driver")
|
||||
except exception.SchedulerHostFilterDriverNotFound:
|
||||
host_filter.choose_host_filter('does not exist')
|
||||
self.fail("Should not find host filter.")
|
||||
except exception.SchedulerHostFilterNotFound:
|
||||
pass
|
||||
|
||||
def test_all_host_driver(self):
|
||||
driver = host_filter.AllHostsFilter()
|
||||
cooked = driver.instance_type_to_filter(self.instance_type)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
def test_all_host_filter(self):
|
||||
hf = host_filter.AllHostsFilter()
|
||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(10, len(hosts))
|
||||
for host, capabilities in hosts:
|
||||
self.assertTrue(host.startswith('host'))
|
||||
|
||||
def test_flavor_driver(self):
|
||||
driver = host_filter.FlavorFilter()
|
||||
def test_instance_type_filter(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = driver.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
def test_json_driver(self):
|
||||
driver = host_filter.JsonFilter()
|
||||
def test_json_filter(self):
|
||||
hf = host_filter.JsonFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = driver.instance_type_to_filter(self.instance_type)
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
@@ -132,15 +133,16 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300]
|
||||
['<', '$compute.disk_available', 300],
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700]
|
||||
]
|
||||
['>', '$compute.disk_available', 700],
|
||||
],
|
||||
]
|
||||
|
||||
cooked = json.dumps(raw)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@@ -152,7 +154,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
['=', '$compute.host_memory_free', 30],
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(9, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@@ -162,7 +164,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
|
||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@@ -174,35 +176,30 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['unknown command', ]
|
||||
cooked = json.dumps(raw)
|
||||
try:
|
||||
driver.filter_hosts(self.zone_manager, cooked)
|
||||
hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False]
|
||||
)))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False])))
|
||||
|
||||
try:
|
||||
driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False
|
||||
))
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['=', '$foo', 100]
|
||||
)))
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['=', '$.....', 100]
|
||||
)))
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
|
||||
)))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$foo', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$.....', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['=', {}, ['>', '$missing....foo']]
|
||||
)))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
||||
|
||||
@@ -14,10 +14,12 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import eventlet
|
||||
import mox
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from xml.etree.ElementTree import fromstring as xml_to_tree
|
||||
@@ -32,7 +34,8 @@ from nova import utils
|
||||
from nova.api.ec2 import cloud
|
||||
from nova.auth import manager
|
||||
from nova.compute import power_state
|
||||
from nova.virt import libvirt_conn
|
||||
from nova.virt.libvirt import connection
|
||||
from nova.virt.libvirt import firewall
|
||||
|
||||
libvirt = None
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -83,7 +86,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
||||
|
||||
def test_same_fname_concurrency(self):
|
||||
"""Ensures that the same fname cache runs at a sequentially"""
|
||||
conn = libvirt_conn.LibvirtConnection
|
||||
conn = connection.LibvirtConnection
|
||||
wait1 = eventlet.event.Event()
|
||||
done1 = eventlet.event.Event()
|
||||
eventlet.spawn(conn._cache_image, _concurrency,
|
||||
@@ -104,7 +107,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
||||
|
||||
def test_different_fname_concurrency(self):
|
||||
"""Ensures that two different fname caches are concurrent"""
|
||||
conn = libvirt_conn.LibvirtConnection
|
||||
conn = connection.LibvirtConnection
|
||||
wait1 = eventlet.event.Event()
|
||||
done1 = eventlet.event.Event()
|
||||
eventlet.spawn(conn._cache_image, _concurrency,
|
||||
@@ -123,9 +126,10 @@ class CacheConcurrencyTestCase(test.TestCase):
|
||||
|
||||
|
||||
class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(LibvirtConnTestCase, self).setUp()
|
||||
libvirt_conn._late_load_cheetah()
|
||||
connection._late_load_cheetah()
|
||||
self.flags(fake_call=True)
|
||||
self.manager = manager.AuthManager()
|
||||
|
||||
@@ -159,6 +163,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
'vcpus': 2,
|
||||
'project_id': 'fake',
|
||||
'bridge': 'br101',
|
||||
'image_ref': '123456',
|
||||
'instance_type_id': '5'} # m1.small
|
||||
|
||||
def lazy_load_library_exists(self):
|
||||
@@ -171,8 +176,8 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
return False
|
||||
global libvirt
|
||||
libvirt = __import__('libvirt')
|
||||
libvirt_conn.libvirt = __import__('libvirt')
|
||||
libvirt_conn.libxml2 = __import__('libxml2')
|
||||
connection.libvirt = __import__('libvirt')
|
||||
connection.libxml2 = __import__('libxml2')
|
||||
return True
|
||||
|
||||
def create_fake_libvirt_mock(self, **kwargs):
|
||||
@@ -182,7 +187,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
class FakeLibvirtConnection(object):
|
||||
pass
|
||||
|
||||
# A fake libvirt_conn.IptablesFirewallDriver
|
||||
# A fake connection.IptablesFirewallDriver
|
||||
class FakeIptablesFirewallDriver(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
@@ -198,11 +203,34 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
for key, val in kwargs.items():
|
||||
fake.__setattr__(key, val)
|
||||
|
||||
# Inevitable mocks for libvirt_conn.LibvirtConnection
|
||||
self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class')
|
||||
libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
|
||||
libvirt_conn.LibvirtConnection._conn = fake
|
||||
# Inevitable mocks for connection.LibvirtConnection
|
||||
self.mox.StubOutWithMock(connection.utils, 'import_class')
|
||||
connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
|
||||
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||
connection.LibvirtConnection._conn = fake
|
||||
|
||||
def fake_lookup(self, instance_name):
|
||||
|
||||
class FakeVirtDomain(object):
|
||||
|
||||
def snapshotCreateXML(self, *args):
|
||||
return None
|
||||
|
||||
def XMLDesc(self, *args):
|
||||
return """
|
||||
<domain type='kvm'>
|
||||
<devices>
|
||||
<disk type='file'>
|
||||
<source file='filename'/>
|
||||
</disk>
|
||||
</devices>
|
||||
</domain>
|
||||
"""
|
||||
|
||||
return FakeVirtDomain()
|
||||
|
||||
def fake_execute(self, *args):
|
||||
open(args[-1], "a").close()
|
||||
|
||||
def create_service(self, **kwargs):
|
||||
service_ref = {'host': kwargs.get('host', 'dummy'),
|
||||
@@ -214,7 +242,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
return db.service_create(context.get_admin_context(), service_ref)
|
||||
|
||||
def test_preparing_xml_info(self):
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
result = conn._prepare_xml_info(instance_ref, False)
|
||||
@@ -229,7 +257,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.assertTrue(len(result['nics']) == 2)
|
||||
|
||||
def test_get_nic_for_xml_v4(self):
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
network, mapping = _create_network_info()[0]
|
||||
self.flags(use_ipv6=False)
|
||||
params = conn._get_nic_for_xml(network, mapping)['extra_params']
|
||||
@@ -237,7 +265,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.assertTrue(params.find('PROJMASKV6') == -1)
|
||||
|
||||
def test_get_nic_for_xml_v6(self):
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
network, mapping = _create_network_info()[0]
|
||||
self.flags(use_ipv6=True)
|
||||
params = conn._get_nic_for_xml(network, mapping)['extra_params']
|
||||
@@ -279,10 +307,85 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
instance_data = dict(self.test_instance)
|
||||
self._check_xml_and_container(instance_data)
|
||||
|
||||
def test_snapshot(self):
|
||||
if not self.lazy_load_library_exists():
|
||||
return
|
||||
|
||||
FLAGS.image_service = 'nova.image.fake.FakeImageService'
|
||||
|
||||
# Start test
|
||||
image_service = utils.import_object(FLAGS.image_service)
|
||||
|
||||
# Assuming that base image already exists in image_service
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
properties = {'instance_id': instance_ref['id'],
|
||||
'user_id': str(self.context.user_id)}
|
||||
snapshot_name = 'test-snap'
|
||||
sent_meta = {'name': snapshot_name, 'is_public': False,
|
||||
'status': 'creating', 'properties': properties}
|
||||
# Create new image. It will be updated in snapshot method
|
||||
# To work with it from snapshot, the single image_service is needed
|
||||
recv_meta = image_service.create(context, sent_meta)
|
||||
|
||||
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
|
||||
self.mox.StubOutWithMock(connection.utils, 'execute')
|
||||
connection.utils.execute = self.fake_execute
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
conn = connection.LibvirtConnection(False)
|
||||
conn.snapshot(instance_ref, recv_meta['id'])
|
||||
|
||||
snapshot = image_service.show(context, recv_meta['id'])
|
||||
self.assertEquals(snapshot['properties']['image_state'], 'available')
|
||||
self.assertEquals(snapshot['status'], 'active')
|
||||
self.assertEquals(snapshot['name'], snapshot_name)
|
||||
|
||||
def test_snapshot_no_image_architecture(self):
|
||||
if not self.lazy_load_library_exists():
|
||||
return
|
||||
|
||||
FLAGS.image_service = 'nova.image.fake.FakeImageService'
|
||||
|
||||
# Start test
|
||||
image_service = utils.import_object(FLAGS.image_service)
|
||||
|
||||
# Assign image_ref = 2 from nova/images/fakes for testing different
|
||||
# base image
|
||||
test_instance = copy.deepcopy(self.test_instance)
|
||||
test_instance["image_ref"] = "2"
|
||||
|
||||
# Assuming that base image already exists in image_service
|
||||
instance_ref = db.instance_create(self.context, test_instance)
|
||||
properties = {'instance_id': instance_ref['id'],
|
||||
'user_id': str(self.context.user_id)}
|
||||
snapshot_name = 'test-snap'
|
||||
sent_meta = {'name': snapshot_name, 'is_public': False,
|
||||
'status': 'creating', 'properties': properties}
|
||||
# Create new image. It will be updated in snapshot method
|
||||
# To work with it from snapshot, the single image_service is needed
|
||||
recv_meta = image_service.create(context, sent_meta)
|
||||
|
||||
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
|
||||
self.mox.StubOutWithMock(connection.utils, 'execute')
|
||||
connection.utils.execute = self.fake_execute
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
conn = connection.LibvirtConnection(False)
|
||||
conn.snapshot(instance_ref, recv_meta['id'])
|
||||
|
||||
snapshot = image_service.show(context, recv_meta['id'])
|
||||
self.assertEquals(snapshot['properties']['image_state'], 'available')
|
||||
self.assertEquals(snapshot['status'], 'active')
|
||||
self.assertEquals(snapshot['name'], snapshot_name)
|
||||
|
||||
def test_multi_nic(self):
|
||||
instance_data = dict(self.test_instance)
|
||||
network_info = _create_network_info(2)
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
instance_ref = db.instance_create(self.context, instance_data)
|
||||
xml = conn.to_xml(instance_ref, False, network_info)
|
||||
tree = xml_to_tree(xml)
|
||||
@@ -313,7 +416,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
'instance_id': instance_ref['id']})
|
||||
|
||||
self.flags(libvirt_type='lxc')
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
|
||||
uri = conn.get_uri()
|
||||
self.assertEquals(uri, 'lxc:///')
|
||||
@@ -419,7 +522,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||
FLAGS.libvirt_type = libvirt_type
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
|
||||
uri = conn.get_uri()
|
||||
self.assertEquals(uri, expected_uri)
|
||||
@@ -446,7 +549,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
FLAGS.libvirt_uri = testuri
|
||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||
FLAGS.libvirt_type = libvirt_type
|
||||
conn = libvirt_conn.LibvirtConnection(True)
|
||||
conn = connection.LibvirtConnection(True)
|
||||
uri = conn.get_uri()
|
||||
self.assertEquals(uri, testuri)
|
||||
db.instance_destroy(user_context, instance_ref['id'])
|
||||
@@ -470,13 +573,13 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.create_fake_libvirt_mock(getVersion=getVersion,
|
||||
getType=getType,
|
||||
listDomainsID=listDomainsID)
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
|
||||
self.mox.StubOutWithMock(connection.LibvirtConnection,
|
||||
'get_cpu_info')
|
||||
libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
|
||||
connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
|
||||
|
||||
# Start test
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn = connection.LibvirtConnection(False)
|
||||
conn.update_available_resource(self.context, 'dummy')
|
||||
service_ref = db.service_get(self.context, service_ref['id'])
|
||||
compute_node = service_ref['compute_node'][0]
|
||||
@@ -510,7 +613,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.create_fake_libvirt_mock()
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn = connection.LibvirtConnection(False)
|
||||
self.assertRaises(exception.ComputeServiceUnavailable,
|
||||
conn.update_available_resource,
|
||||
self.context, 'dummy')
|
||||
@@ -545,7 +648,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
# Start test
|
||||
self.mox.ReplayAll()
|
||||
try:
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn = connection.LibvirtConnection(False)
|
||||
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
||||
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
||||
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
|
||||
@@ -594,7 +697,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
# Start test
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn = connection.LibvirtConnection(False)
|
||||
self.assertRaises(libvirt.libvirtError,
|
||||
conn._live_migration,
|
||||
self.context, instance_ref, 'dest', '',
|
||||
@@ -623,7 +726,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
# Start test
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn = connection.LibvirtConnection(False)
|
||||
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
||||
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
||||
|
||||
@@ -644,10 +747,12 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
except Exception, e:
|
||||
count = (0 <= str(e.message).find('Unexpected method call'))
|
||||
|
||||
shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name))
|
||||
|
||||
self.assertTrue(count)
|
||||
|
||||
def test_get_host_ip_addr(self):
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn = connection.LibvirtConnection(False)
|
||||
ip = conn.get_host_ip_addr()
|
||||
self.assertEquals(ip, FLAGS.my_ip)
|
||||
|
||||
@@ -657,6 +762,31 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
super(LibvirtConnTestCase, self).tearDown()
|
||||
|
||||
|
||||
class NWFilterFakes:
|
||||
def __init__(self):
|
||||
self.filters = {}
|
||||
|
||||
def nwfilterLookupByName(self, name):
|
||||
if name in self.filters:
|
||||
return self.filters[name]
|
||||
raise libvirt.libvirtError('Filter Not Found')
|
||||
|
||||
def filterDefineXMLMock(self, xml):
|
||||
class FakeNWFilterInternal:
|
||||
def __init__(self, parent, name):
|
||||
self.name = name
|
||||
self.parent = parent
|
||||
|
||||
def undefine(self):
|
||||
del self.parent.filters[self.name]
|
||||
pass
|
||||
tree = xml_to_tree(xml)
|
||||
name = tree.get('name')
|
||||
if name not in self.filters:
|
||||
self.filters[name] = FakeNWFilterInternal(self, name)
|
||||
return True
|
||||
|
||||
|
||||
class IptablesFirewallTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(IptablesFirewallTestCase, self).setUp()
|
||||
@@ -669,11 +799,27 @@ class IptablesFirewallTestCase(test.TestCase):
|
||||
self.network = utils.import_object(FLAGS.network_manager)
|
||||
|
||||
class FakeLibvirtConnection(object):
|
||||
pass
|
||||
def nwfilterDefineXML(*args, **kwargs):
|
||||
"""setup_basic_rules in nwfilter calls this."""
|
||||
pass
|
||||
self.fake_libvirt_connection = FakeLibvirtConnection()
|
||||
self.fw = libvirt_conn.IptablesFirewallDriver(
|
||||
self.fw = firewall.IptablesFirewallDriver(
|
||||
get_connection=lambda: self.fake_libvirt_connection)
|
||||
|
||||
def lazy_load_library_exists(self):
|
||||
"""check if libvirt is available."""
|
||||
# try to connect libvirt. if fail, skip test.
|
||||
try:
|
||||
import libvirt
|
||||
import libxml2
|
||||
except ImportError:
|
||||
return False
|
||||
global libvirt
|
||||
libvirt = __import__('libvirt')
|
||||
connection.libvirt = __import__('libvirt')
|
||||
connection.libxml2 = __import__('libxml2')
|
||||
return True
|
||||
|
||||
def tearDown(self):
|
||||
self.manager.delete_project(self.project)
|
||||
self.manager.delete_user(self.user)
|
||||
@@ -849,7 +995,7 @@ class IptablesFirewallTestCase(test.TestCase):
|
||||
self.assertEquals(len(rulesv4), 2)
|
||||
self.assertEquals(len(rulesv6), 0)
|
||||
|
||||
def multinic_iptables_test(self):
|
||||
def test_multinic_iptables(self):
|
||||
ipv4_rules_per_network = 2
|
||||
ipv6_rules_per_network = 3
|
||||
networks_count = 5
|
||||
@@ -869,6 +1015,113 @@ class IptablesFirewallTestCase(test.TestCase):
|
||||
self.assertEquals(ipv6_network_rules,
|
||||
ipv6_rules_per_network * networks_count)
|
||||
|
||||
def test_do_refresh_security_group_rules(self):
|
||||
instance_ref = self._create_instance_ref()
|
||||
self.mox.StubOutWithMock(self.fw,
|
||||
'add_filters_for_instance',
|
||||
use_mock_anything=True)
|
||||
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg())
|
||||
self.fw.instances[instance_ref['id']] = instance_ref
|
||||
self.mox.ReplayAll()
|
||||
self.fw.do_refresh_security_group_rules("fake")
|
||||
|
||||
def test_unfilter_instance_undefines_nwfilter(self):
|
||||
# Skip if non-libvirt environment
|
||||
if not self.lazy_load_library_exists():
|
||||
return
|
||||
|
||||
admin_ctxt = context.get_admin_context()
|
||||
|
||||
fakefilter = NWFilterFakes()
|
||||
self.fw.nwfilter._conn.nwfilterDefineXML =\
|
||||
fakefilter.filterDefineXMLMock
|
||||
self.fw.nwfilter._conn.nwfilterLookupByName =\
|
||||
fakefilter.nwfilterLookupByName
|
||||
instance_ref = self._create_instance_ref()
|
||||
inst_id = instance_ref['id']
|
||||
instance = db.instance_get(self.context, inst_id)
|
||||
|
||||
ip = '10.11.12.13'
|
||||
network_ref = db.project_get_network(self.context, 'fake')
|
||||
fixed_ip = {'address': ip, 'network_id': network_ref['id']}
|
||||
db.fixed_ip_create(admin_ctxt, fixed_ip)
|
||||
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
|
||||
'instance_id': inst_id})
|
||||
self.fw.setup_basic_filtering(instance)
|
||||
self.fw.prepare_instance_filter(instance)
|
||||
self.fw.apply_instance_filter(instance)
|
||||
original_filter_count = len(fakefilter.filters)
|
||||
self.fw.unfilter_instance(instance)
|
||||
|
||||
# should undefine just the instance filter
|
||||
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
|
||||
|
||||
db.instance_destroy(admin_ctxt, instance_ref['id'])
|
||||
|
||||
def test_provider_firewall_rules(self):
|
||||
# setup basic instance data
|
||||
instance_ref = self._create_instance_ref()
|
||||
nw_info = _create_network_info(1)
|
||||
ip = '10.11.12.13'
|
||||
network_ref = db.project_get_network(self.context, 'fake')
|
||||
admin_ctxt = context.get_admin_context()
|
||||
fixed_ip = {'address': ip, 'network_id': network_ref['id']}
|
||||
db.fixed_ip_create(admin_ctxt, fixed_ip)
|
||||
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
|
||||
'instance_id': instance_ref['id']})
|
||||
# FRAGILE: peeks at how the firewall names chains
|
||||
chain_name = 'inst-%s' % instance_ref['id']
|
||||
|
||||
# create a firewall via setup_basic_filtering like libvirt_conn.spawn
|
||||
# should have a chain with 0 rules
|
||||
self.fw.setup_basic_filtering(instance_ref, network_info=nw_info)
|
||||
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
|
||||
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
|
||||
if rule.chain == 'provider']
|
||||
self.assertEqual(0, len(rules))
|
||||
|
||||
# add a rule and send the update message, check for 1 rule
|
||||
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
|
||||
{'protocol': 'tcp',
|
||||
'cidr': '10.99.99.99/32',
|
||||
'from_port': 1,
|
||||
'to_port': 65535})
|
||||
self.fw.refresh_provider_fw_rules()
|
||||
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
|
||||
if rule.chain == 'provider']
|
||||
self.assertEqual(1, len(rules))
|
||||
|
||||
# Add another, refresh, and make sure number of rules goes to two
|
||||
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
|
||||
{'protocol': 'udp',
|
||||
'cidr': '10.99.99.99/32',
|
||||
'from_port': 1,
|
||||
'to_port': 65535})
|
||||
self.fw.refresh_provider_fw_rules()
|
||||
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
|
||||
if rule.chain == 'provider']
|
||||
self.assertEqual(2, len(rules))
|
||||
|
||||
# create the instance filter and make sure it has a jump rule
|
||||
self.fw.prepare_instance_filter(instance_ref, network_info=nw_info)
|
||||
self.fw.apply_instance_filter(instance_ref)
|
||||
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
|
||||
if rule.chain == chain_name]
|
||||
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
|
||||
provjump_rules = []
|
||||
# IptablesTable doesn't make rules unique internally
|
||||
for rule in jump_rules:
|
||||
if 'provider' in rule.rule and rule not in provjump_rules:
|
||||
provjump_rules.append(rule)
|
||||
self.assertEqual(1, len(provjump_rules))
|
||||
|
||||
# remove a rule from the db, cast to compute to refresh rule
|
||||
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
|
||||
self.fw.refresh_provider_fw_rules()
|
||||
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
|
||||
if rule.chain == 'provider']
|
||||
self.assertEqual(1, len(rules))
|
||||
|
||||
|
||||
class NWFilterTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
@@ -885,7 +1138,7 @@ class NWFilterTestCase(test.TestCase):
|
||||
|
||||
self.fake_libvirt_connection = Mock()
|
||||
|
||||
self.fw = libvirt_conn.NWFilterFirewall(
|
||||
self.fw = firewall.NWFilterFirewall(
|
||||
lambda: self.fake_libvirt_connection)
|
||||
|
||||
def tearDown(self):
|
||||
@@ -1045,3 +1298,37 @@ class NWFilterTestCase(test.TestCase):
|
||||
network_info,
|
||||
"fake")
|
||||
self.assertEquals(len(result), 3)
|
||||
|
||||
def test_unfilter_instance_undefines_nwfilters(self):
|
||||
admin_ctxt = context.get_admin_context()
|
||||
|
||||
fakefilter = NWFilterFakes()
|
||||
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
|
||||
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
|
||||
|
||||
instance_ref = self._create_instance()
|
||||
inst_id = instance_ref['id']
|
||||
|
||||
self.security_group = self.setup_and_return_security_group()
|
||||
|
||||
db.instance_add_security_group(self.context, inst_id,
|
||||
self.security_group.id)
|
||||
|
||||
instance = db.instance_get(self.context, inst_id)
|
||||
|
||||
ip = '10.11.12.13'
|
||||
network_ref = db.project_get_network(self.context, 'fake')
|
||||
fixed_ip = {'address': ip, 'network_id': network_ref['id']}
|
||||
db.fixed_ip_create(admin_ctxt, fixed_ip)
|
||||
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
|
||||
'instance_id': inst_id})
|
||||
self.fw.setup_basic_filtering(instance)
|
||||
self.fw.prepare_instance_filter(instance)
|
||||
self.fw.apply_instance_filter(instance)
|
||||
original_filter_count = len(fakefilter.filters)
|
||||
self.fw.unfilter_instance(instance)
|
||||
|
||||
# should undefine 2 filters: instance and instance-secgroup
|
||||
self.assertEqual(original_filter_count - len(fakefilter.filters), 2)
|
||||
|
||||
db.instance_destroy(admin_ctxt, instance_ref['id'])
|
||||
@@ -16,7 +16,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import webob
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
@@ -21,11 +21,24 @@ import select
|
||||
from eventlet import greenpool
|
||||
from eventlet import greenthread
|
||||
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.utils import parse_mailmap, str_dict_replace
|
||||
|
||||
|
||||
class ExceptionTestCase(test.TestCase):
|
||||
@staticmethod
|
||||
def _raise_exc(exc):
|
||||
raise exc()
|
||||
|
||||
def test_exceptions_raise(self):
|
||||
for name in dir(exception):
|
||||
exc = getattr(exception, name)
|
||||
if isinstance(exc, type):
|
||||
self.assertRaises(exc, self._raise_exc, exc)
|
||||
|
||||
|
||||
class ProjectTestCase(test.TestCase):
|
||||
def test_authors_up_to_date(self):
|
||||
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"""
|
||||
Unit Tests for network code
|
||||
"""
|
||||
import IPy
|
||||
import netaddr
|
||||
import os
|
||||
|
||||
from nova import test
|
||||
@@ -164,3 +164,33 @@ class IptablesManagerTestCase(test.TestCase):
|
||||
self.assertTrue('-A %s -j run_tests.py-%s' \
|
||||
% (chain, chain) in new_lines,
|
||||
"Built-in chain %s not wrapped" % (chain,))
|
||||
|
||||
def test_will_empty_chain(self):
|
||||
self.manager.ipv4['filter'].add_chain('test-chain')
|
||||
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP')
|
||||
old_count = len(self.manager.ipv4['filter'].rules)
|
||||
self.manager.ipv4['filter'].empty_chain('test-chain')
|
||||
self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules))
|
||||
|
||||
def test_will_empty_unwrapped_chain(self):
|
||||
self.manager.ipv4['filter'].add_chain('test-chain', wrap=False)
|
||||
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP',
|
||||
wrap=False)
|
||||
old_count = len(self.manager.ipv4['filter'].rules)
|
||||
self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False)
|
||||
self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules))
|
||||
|
||||
def test_will_not_empty_wrapped_when_unwrapped(self):
|
||||
self.manager.ipv4['filter'].add_chain('test-chain')
|
||||
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP')
|
||||
old_count = len(self.manager.ipv4['filter'].rules)
|
||||
self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False)
|
||||
self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules))
|
||||
|
||||
def test_will_not_empty_unwrapped_when_wrapped(self):
|
||||
self.manager.ipv4['filter'].add_chain('test-chain', wrap=False)
|
||||
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP',
|
||||
wrap=False)
|
||||
old_count = len(self.manager.ipv4['filter'].rules)
|
||||
self.manager.ipv4['filter'].empty_chain('test-chain')
|
||||
self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules))
|
||||
|
||||
@@ -13,10 +13,12 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import nova
|
||||
import stubout
|
||||
|
||||
import nova
|
||||
from nova import context
|
||||
from nova import flags
|
||||
from nova import log
|
||||
from nova import rpc
|
||||
import nova.notifier.api
|
||||
from nova.notifier.api import notify
|
||||
@@ -24,8 +26,6 @@ from nova.notifier import no_op_notifier
|
||||
from nova.notifier import rabbit_notifier
|
||||
from nova import test
|
||||
|
||||
import stubout
|
||||
|
||||
|
||||
class NotifierTestCase(test.TestCase):
|
||||
"""Test case for notifications"""
|
||||
@@ -115,3 +115,22 @@ class NotifierTestCase(test.TestCase):
|
||||
notify('publisher_id',
|
||||
'event_type', 'DEBUG', dict(a=3))
|
||||
self.assertEqual(self.test_topic, 'testnotify.debug')
|
||||
|
||||
def test_error_notification(self):
|
||||
self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
|
||||
'nova.notifier.rabbit_notifier')
|
||||
self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True)
|
||||
LOG = log.getLogger('nova')
|
||||
LOG.setup_from_flags()
|
||||
msgs = []
|
||||
|
||||
def mock_cast(context, topic, data):
|
||||
msgs.append(data)
|
||||
|
||||
self.stubs.Set(nova.rpc, 'cast', mock_cast)
|
||||
LOG.error('foo')
|
||||
self.assertEqual(1, len(msgs))
|
||||
msg = msgs[0]
|
||||
self.assertEqual(msg['event_type'], 'error_notification')
|
||||
self.assertEqual(msg['priority'], 'ERROR')
|
||||
self.assertEqual(msg['payload']['error'], 'foo')
|
||||
|
||||
@@ -31,7 +31,6 @@ LOG = logging.getLogger('nova.tests.rpc')
|
||||
|
||||
|
||||
class RpcTestCase(test.TestCase):
|
||||
"""Test cases for rpc"""
|
||||
def setUp(self):
|
||||
super(RpcTestCase, self).setUp()
|
||||
self.conn = rpc.Connection.instance(True)
|
||||
@@ -43,14 +42,55 @@ class RpcTestCase(test.TestCase):
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
def test_call_succeed(self):
|
||||
"""Get a value through rpc call"""
|
||||
value = 42
|
||||
result = rpc.call(self.context, 'test', {"method": "echo",
|
||||
"args": {"value": value}})
|
||||
self.assertEqual(value, result)
|
||||
|
||||
def test_call_succeed_despite_multiple_returns(self):
|
||||
value = 42
|
||||
result = rpc.call(self.context, 'test', {"method": "echo_three_times",
|
||||
"args": {"value": value}})
|
||||
self.assertEqual(value + 2, result)
|
||||
|
||||
def test_call_succeed_despite_multiple_returns_yield(self):
|
||||
value = 42
|
||||
result = rpc.call(self.context, 'test',
|
||||
{"method": "echo_three_times_yield",
|
||||
"args": {"value": value}})
|
||||
self.assertEqual(value + 2, result)
|
||||
|
||||
def test_multicall_succeed_once(self):
|
||||
value = 42
|
||||
result = rpc.multicall(self.context,
|
||||
'test',
|
||||
{"method": "echo",
|
||||
"args": {"value": value}})
|
||||
for i, x in enumerate(result):
|
||||
if i > 0:
|
||||
self.fail('should only receive one response')
|
||||
self.assertEqual(value + i, x)
|
||||
|
||||
def test_multicall_succeed_three_times(self):
|
||||
value = 42
|
||||
result = rpc.multicall(self.context,
|
||||
'test',
|
||||
{"method": "echo_three_times",
|
||||
"args": {"value": value}})
|
||||
for i, x in enumerate(result):
|
||||
self.assertEqual(value + i, x)
|
||||
|
||||
def test_multicall_succeed_three_times_yield(self):
|
||||
value = 42
|
||||
result = rpc.multicall(self.context,
|
||||
'test',
|
||||
{"method": "echo_three_times_yield",
|
||||
"args": {"value": value}})
|
||||
for i, x in enumerate(result):
|
||||
self.assertEqual(value + i, x)
|
||||
|
||||
def test_context_passed(self):
|
||||
"""Makes sure a context is passed through rpc call"""
|
||||
"""Makes sure a context is passed through rpc call."""
|
||||
value = 42
|
||||
result = rpc.call(self.context,
|
||||
'test', {"method": "context",
|
||||
@@ -58,11 +98,12 @@ class RpcTestCase(test.TestCase):
|
||||
self.assertEqual(self.context.to_dict(), result)
|
||||
|
||||
def test_call_exception(self):
|
||||
"""Test that exception gets passed back properly
|
||||
"""Test that exception gets passed back properly.
|
||||
|
||||
rpc.call returns a RemoteError object. The value of the
|
||||
exception is converted to a string, so we convert it back
|
||||
to an int in the test.
|
||||
|
||||
"""
|
||||
value = 42
|
||||
self.assertRaises(rpc.RemoteError,
|
||||
@@ -81,7 +122,7 @@ class RpcTestCase(test.TestCase):
|
||||
self.assertEqual(int(exc.value), value)
|
||||
|
||||
def test_nested_calls(self):
|
||||
"""Test that we can do an rpc.call inside another call"""
|
||||
"""Test that we can do an rpc.call inside another call."""
|
||||
class Nested(object):
|
||||
@staticmethod
|
||||
def echo(context, queue, value):
|
||||
@@ -108,25 +149,80 @@ class RpcTestCase(test.TestCase):
|
||||
"value": value}})
|
||||
self.assertEqual(value, result)
|
||||
|
||||
def test_connectionpool_single(self):
|
||||
"""Test that ConnectionPool recycles a single connection."""
|
||||
conn1 = rpc.ConnectionPool.get()
|
||||
rpc.ConnectionPool.put(conn1)
|
||||
conn2 = rpc.ConnectionPool.get()
|
||||
rpc.ConnectionPool.put(conn2)
|
||||
self.assertEqual(conn1, conn2)
|
||||
|
||||
def test_connectionpool_double(self):
|
||||
"""Test that ConnectionPool returns and reuses separate connections.
|
||||
|
||||
When called consecutively we should get separate connections and upon
|
||||
returning them those connections should be reused for future calls
|
||||
before generating a new connection.
|
||||
|
||||
"""
|
||||
conn1 = rpc.ConnectionPool.get()
|
||||
conn2 = rpc.ConnectionPool.get()
|
||||
|
||||
self.assertNotEqual(conn1, conn2)
|
||||
rpc.ConnectionPool.put(conn1)
|
||||
rpc.ConnectionPool.put(conn2)
|
||||
|
||||
conn3 = rpc.ConnectionPool.get()
|
||||
conn4 = rpc.ConnectionPool.get()
|
||||
self.assertEqual(conn1, conn3)
|
||||
self.assertEqual(conn2, conn4)
|
||||
|
||||
def test_connectionpool_limit(self):
|
||||
"""Test connection pool limit and connection uniqueness."""
|
||||
max_size = FLAGS.rpc_conn_pool_size
|
||||
conns = []
|
||||
|
||||
for i in xrange(max_size):
|
||||
conns.append(rpc.ConnectionPool.get())
|
||||
|
||||
self.assertFalse(rpc.ConnectionPool.free_items)
|
||||
self.assertEqual(rpc.ConnectionPool.current_size,
|
||||
rpc.ConnectionPool.max_size)
|
||||
self.assertEqual(len(set(conns)), max_size)
|
||||
|
||||
|
||||
class TestReceiver(object):
|
||||
"""Simple Proxy class so the consumer has methods to call
|
||||
"""Simple Proxy class so the consumer has methods to call.
|
||||
|
||||
Uses static methods because we aren't actually storing any state"""
|
||||
Uses static methods because we aren't actually storing any state.
|
||||
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def echo(context, value):
|
||||
"""Simply returns whatever value is sent in"""
|
||||
"""Simply returns whatever value is sent in."""
|
||||
LOG.debug(_("Received %s"), value)
|
||||
return value
|
||||
|
||||
@staticmethod
|
||||
def context(context, value):
|
||||
"""Returns dictionary version of context"""
|
||||
"""Returns dictionary version of context."""
|
||||
LOG.debug(_("Received %s"), context)
|
||||
return context.to_dict()
|
||||
|
||||
@staticmethod
|
||||
def echo_three_times(context, value):
|
||||
context.reply(value)
|
||||
context.reply(value + 1)
|
||||
context.reply(value + 2)
|
||||
|
||||
@staticmethod
|
||||
def echo_three_times_yield(context, value):
|
||||
yield value
|
||||
yield value + 1
|
||||
yield value + 2
|
||||
|
||||
@staticmethod
|
||||
def fail(context, value):
|
||||
"""Raises an exception with the value sent in"""
|
||||
"""Raises an exception with the value sent in."""
|
||||
raise Exception(value)
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"""
|
||||
Unit Tests for vlan network code
|
||||
"""
|
||||
import IPy
|
||||
import netaddr
|
||||
import os
|
||||
|
||||
from nova import context
|
||||
@@ -44,8 +44,8 @@ class VlanNetworkTestCase(base.NetworkTestCase):
|
||||
# TODO(vish): better way of adding floating ips
|
||||
self.context._project = self.projects[0]
|
||||
self.context.project_id = self.projects[0].id
|
||||
pubnet = IPy.IP(flags.FLAGS.floating_range)
|
||||
address = str(pubnet[0])
|
||||
pubnet = netaddr.IPNetwork(flags.FLAGS.floating_range)
|
||||
address = str(list(pubnet)[0])
|
||||
try:
|
||||
db.floating_ip_get_by_address(context.get_admin_context(), address)
|
||||
except exception.NotFound:
|
||||
|
||||
@@ -55,8 +55,7 @@ class VMWareAPIVMTestCase(test.TestCase):
|
||||
vmwareapi_fake.reset()
|
||||
db_fakes.stub_out_db_instance_api(self.stubs)
|
||||
stubs.set_stubs(self.stubs)
|
||||
glance_stubs.stubout_glance_client(self.stubs,
|
||||
glance_stubs.FakeGlance)
|
||||
glance_stubs.stubout_glance_client(self.stubs)
|
||||
self.conn = vmwareapi_conn.get_connection(False)
|
||||
|
||||
def _create_instance_in_the_db(self):
|
||||
@@ -64,13 +63,13 @@ class VMWareAPIVMTestCase(test.TestCase):
|
||||
'id': 1,
|
||||
'project_id': self.project.id,
|
||||
'user_id': self.user.id,
|
||||
'image_id': "1",
|
||||
'image_ref': "1",
|
||||
'kernel_id': "1",
|
||||
'ramdisk_id': "1",
|
||||
'instance_type': 'm1.large',
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
}
|
||||
self.instance = db.instance_create(values)
|
||||
self.instance = db.instance_create(None, values)
|
||||
|
||||
def _create_vm(self):
|
||||
"""Create and spawn the VM."""
|
||||
|
||||
@@ -45,10 +45,11 @@ class VolumeTestCase(test.TestCase):
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
@staticmethod
|
||||
def _create_volume(size='0'):
|
||||
def _create_volume(size='0', snapshot_id=None):
|
||||
"""Create a volume object."""
|
||||
vol = {}
|
||||
vol['size'] = size
|
||||
vol['snapshot_id'] = snapshot_id
|
||||
vol['user_id'] = 'fake'
|
||||
vol['project_id'] = 'fake'
|
||||
vol['availability_zone'] = FLAGS.storage_availability_zone
|
||||
@@ -69,6 +70,25 @@ class VolumeTestCase(test.TestCase):
|
||||
self.context,
|
||||
volume_id)
|
||||
|
||||
def test_create_volume_from_snapshot(self):
|
||||
"""Test volume can be created from a snapshot."""
|
||||
volume_src_id = self._create_volume()
|
||||
self.volume.create_volume(self.context, volume_src_id)
|
||||
snapshot_id = self._create_snapshot(volume_src_id)
|
||||
self.volume.create_snapshot(self.context, volume_src_id, snapshot_id)
|
||||
volume_dst_id = self._create_volume(0, snapshot_id)
|
||||
self.volume.create_volume(self.context, volume_dst_id, snapshot_id)
|
||||
self.assertEqual(volume_dst_id, db.volume_get(
|
||||
context.get_admin_context(),
|
||||
volume_dst_id).id)
|
||||
self.assertEqual(snapshot_id, db.volume_get(
|
||||
context.get_admin_context(),
|
||||
volume_dst_id).snapshot_id)
|
||||
|
||||
self.volume.delete_volume(self.context, volume_dst_id)
|
||||
self.volume.delete_snapshot(self.context, snapshot_id)
|
||||
self.volume.delete_volume(self.context, volume_src_id)
|
||||
|
||||
def test_too_big_volume(self):
|
||||
"""Ensure failure if a too large of a volume is requested."""
|
||||
# FIXME(vish): validation needs to move into the data layer in
|
||||
@@ -176,6 +196,34 @@ class VolumeTestCase(test.TestCase):
|
||||
# This will allow us to test cross-node interactions
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _create_snapshot(volume_id, size='0'):
|
||||
"""Create a snapshot object."""
|
||||
snap = {}
|
||||
snap['volume_size'] = size
|
||||
snap['user_id'] = 'fake'
|
||||
snap['project_id'] = 'fake'
|
||||
snap['volume_id'] = volume_id
|
||||
snap['status'] = "creating"
|
||||
return db.snapshot_create(context.get_admin_context(), snap)['id']
|
||||
|
||||
def test_create_delete_snapshot(self):
|
||||
"""Test snapshot can be created and deleted."""
|
||||
volume_id = self._create_volume()
|
||||
self.volume.create_volume(self.context, volume_id)
|
||||
snapshot_id = self._create_snapshot(volume_id)
|
||||
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
|
||||
self.assertEqual(snapshot_id,
|
||||
db.snapshot_get(context.get_admin_context(),
|
||||
snapshot_id).id)
|
||||
|
||||
self.volume.delete_snapshot(self.context, snapshot_id)
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.snapshot_get,
|
||||
self.context,
|
||||
snapshot_id)
|
||||
self.volume.delete_volume(self.context, volume_id)
|
||||
|
||||
|
||||
class DriverTestCase(test.TestCase):
|
||||
"""Base Test class for Drivers."""
|
||||
|
||||
@@ -33,12 +33,12 @@ from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.compute import instance_types
|
||||
from nova.compute import power_state
|
||||
from nova import exception
|
||||
from nova.virt import xenapi_conn
|
||||
from nova.virt.xenapi import fake as xenapi_fake
|
||||
from nova.virt.xenapi import volume_utils
|
||||
from nova.virt.xenapi import vmops
|
||||
from nova.virt.xenapi import vm_utils
|
||||
from nova.virt.xenapi.vmops import SimpleDH
|
||||
from nova.virt.xenapi.vmops import VMOps
|
||||
from nova.tests.db import fakes as db_fakes
|
||||
from nova.tests.xenapi import stubs
|
||||
from nova.tests.glance import stubs as glance_stubs
|
||||
@@ -79,12 +79,13 @@ class XenAPIVolumeTestCase(test.TestCase):
|
||||
self.values = {'id': 1,
|
||||
'project_id': 'fake',
|
||||
'user_id': 'fake',
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
'os_type': 'linux',
|
||||
'architecture': 'x86-64'}
|
||||
|
||||
def _create_volume(self, size='0'):
|
||||
"""Create a volume object."""
|
||||
@@ -191,10 +192,9 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
stubs.stubout_get_this_vm_uuid(self.stubs)
|
||||
stubs.stubout_stream_disk(self.stubs)
|
||||
stubs.stubout_is_vdi_pv(self.stubs)
|
||||
self.stubs.Set(VMOps, 'reset_network', reset_network)
|
||||
self.stubs.Set(vmops.VMOps, 'reset_network', reset_network)
|
||||
stubs.stub_out_vm_methods(self.stubs)
|
||||
glance_stubs.stubout_glance_client(self.stubs,
|
||||
glance_stubs.FakeGlance)
|
||||
glance_stubs.stubout_glance_client(self.stubs)
|
||||
fake_utils.stub_out_utils_execute(self.stubs)
|
||||
self.context = context.RequestContext('fake', 'fake', False)
|
||||
self.conn = xenapi_conn.get_connection(False)
|
||||
@@ -207,12 +207,13 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'id': id,
|
||||
'project_id': proj,
|
||||
'user_id': user,
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
'os_type': 'linux',
|
||||
'architecture': 'x86-64'}
|
||||
instance = db.instance_create(self.context, values)
|
||||
self.conn.spawn(instance)
|
||||
|
||||
@@ -229,6 +230,23 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
instance = self._create_instance()
|
||||
self.conn.get_diagnostics(instance)
|
||||
|
||||
def test_instance_snapshot_fails_with_no_primary_vdi(self):
|
||||
def create_bad_vbd(vm_ref, vdi_ref):
|
||||
vbd_rec = {'VM': vm_ref,
|
||||
'VDI': vdi_ref,
|
||||
'userdevice': 'fake',
|
||||
'currently_attached': False}
|
||||
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
|
||||
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
|
||||
return vbd_ref
|
||||
|
||||
self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
|
||||
stubs.stubout_instance_snapshot(self.stubs)
|
||||
instance = self._create_instance()
|
||||
|
||||
name = "MySnapshot"
|
||||
self.assertRaises(exception.Error, self.conn.snapshot, instance, name)
|
||||
|
||||
def test_instance_snapshot(self):
|
||||
stubs.stubout_instance_snapshot(self.stubs)
|
||||
instance = self._create_instance()
|
||||
@@ -332,7 +350,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
|
||||
def check_vm_params_for_linux(self):
|
||||
self.assertEquals(self.vm['platform']['nx'], 'false')
|
||||
self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
|
||||
self.assertEquals(self.vm['PV_args'], '')
|
||||
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
|
||||
|
||||
# check that these are not set
|
||||
@@ -351,23 +369,27 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
self.assertEquals(self.vm['HVM_boot_params'], {})
|
||||
self.assertEquals(self.vm['HVM_boot_policy'], '')
|
||||
|
||||
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
|
||||
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
|
||||
instance_type_id="3", os_type="linux",
|
||||
instance_id=1, check_injection=False):
|
||||
architecture="x86-64", instance_id=1,
|
||||
check_injection=False):
|
||||
stubs.stubout_loopingcall_start(self.stubs)
|
||||
values = {'id': instance_id,
|
||||
'project_id': self.project.id,
|
||||
'user_id': self.user.id,
|
||||
'image_id': image_id,
|
||||
'image_ref': image_ref,
|
||||
'kernel_id': kernel_id,
|
||||
'ramdisk_id': ramdisk_id,
|
||||
'instance_type_id': instance_type_id,
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': os_type}
|
||||
'os_type': os_type,
|
||||
'architecture': architecture}
|
||||
instance = db.instance_create(self.context, values)
|
||||
self.conn.spawn(instance)
|
||||
self.create_vm_record(self.conn, os_type, instance_id)
|
||||
self.check_vm_record(self.conn, check_injection)
|
||||
self.assertTrue(instance.os_type)
|
||||
self.assertTrue(instance.architecture)
|
||||
|
||||
def test_spawn_not_enough_memory(self):
|
||||
FLAGS.xenapi_image_service = 'glance'
|
||||
@@ -392,13 +414,36 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
def test_spawn_vhd_glance_linux(self):
|
||||
FLAGS.xenapi_image_service = 'glance'
|
||||
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
|
||||
os_type="linux")
|
||||
os_type="linux", architecture="x86-64")
|
||||
self.check_vm_params_for_linux()
|
||||
|
||||
def test_spawn_vhd_glance_swapdisk(self):
|
||||
# Change the default host_call_plugin to one that'll return
|
||||
# a swap disk
|
||||
orig_func = stubs.FakeSessionForVMTests.host_call_plugin
|
||||
|
||||
stubs.FakeSessionForVMTests.host_call_plugin = \
|
||||
stubs.FakeSessionForVMTests.host_call_plugin_swap
|
||||
|
||||
try:
|
||||
# We'll steal the above glance linux test
|
||||
self.test_spawn_vhd_glance_linux()
|
||||
finally:
|
||||
# Make sure to put this back
|
||||
stubs.FakeSessionForVMTests.host_call_plugin = orig_func
|
||||
|
||||
# We should have 2 VBDs.
|
||||
self.assertEqual(len(self.vm['VBDs']), 2)
|
||||
# Now test that we have 1.
|
||||
self.tearDown()
|
||||
self.setUp()
|
||||
self.test_spawn_vhd_glance_linux()
|
||||
self.assertEqual(len(self.vm['VBDs']), 1)
|
||||
|
||||
def test_spawn_vhd_glance_windows(self):
|
||||
FLAGS.xenapi_image_service = 'glance'
|
||||
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
|
||||
os_type="windows")
|
||||
os_type="windows", architecture="i386")
|
||||
self.check_vm_params_for_windows()
|
||||
|
||||
def test_spawn_glance(self):
|
||||
@@ -544,12 +589,13 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'id': 1,
|
||||
'project_id': self.project.id,
|
||||
'user_id': self.user.id,
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
'os_type': 'linux',
|
||||
'architecture': 'x86-64'}
|
||||
instance = db.instance_create(self.context, values)
|
||||
self.conn.spawn(instance)
|
||||
return instance
|
||||
@@ -559,8 +605,8 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
|
||||
"""Unit tests for Diffie-Hellman code."""
|
||||
def setUp(self):
|
||||
super(XenAPIDiffieHellmanTestCase, self).setUp()
|
||||
self.alice = SimpleDH()
|
||||
self.bob = SimpleDH()
|
||||
self.alice = vmops.SimpleDH()
|
||||
self.bob = vmops.SimpleDH()
|
||||
|
||||
def test_shared(self):
|
||||
alice_pub = self.alice.get_public()
|
||||
@@ -569,11 +615,29 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
|
||||
bob_shared = self.bob.compute_shared(alice_pub)
|
||||
self.assertEquals(alice_shared, bob_shared)
|
||||
|
||||
def test_encryption(self):
|
||||
msg = "This is a top-secret message"
|
||||
enc = self.alice.encrypt(msg)
|
||||
def _test_encryption(self, message):
|
||||
enc = self.alice.encrypt(message)
|
||||
self.assertFalse(enc.endswith('\n'))
|
||||
dec = self.bob.decrypt(enc)
|
||||
self.assertEquals(dec, msg)
|
||||
self.assertEquals(dec, message)
|
||||
|
||||
def test_encrypt_simple_message(self):
|
||||
self._test_encryption('This is a simple message.')
|
||||
|
||||
def test_encrypt_message_with_newlines_at_end(self):
|
||||
self._test_encryption('This message has a newline at the end.\n')
|
||||
|
||||
def test_encrypt_many_newlines_at_end(self):
|
||||
self._test_encryption('Message with lotsa newlines.\n\n\n')
|
||||
|
||||
def test_encrypt_newlines_inside_message(self):
|
||||
self._test_encryption('Message\nwith\ninterior\nnewlines.')
|
||||
|
||||
def test_encrypt_with_leading_newlines(self):
|
||||
self._test_encryption('\n\nMessage with leading newlines.')
|
||||
|
||||
def test_encrypt_really_long_message(self):
|
||||
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
|
||||
|
||||
def tearDown(self):
|
||||
super(XenAPIDiffieHellmanTestCase, self).tearDown()
|
||||
@@ -600,19 +664,19 @@ class XenAPIMigrateInstance(test.TestCase):
|
||||
self.values = {'id': 1,
|
||||
'project_id': self.project.id,
|
||||
'user_id': self.user.id,
|
||||
'image_id': 1,
|
||||
'image_ref': 1,
|
||||
'kernel_id': None,
|
||||
'ramdisk_id': None,
|
||||
'local_gb': 5,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
'os_type': 'linux',
|
||||
'architecture': 'x86-64'}
|
||||
|
||||
fake_utils.stub_out_utils_execute(self.stubs)
|
||||
stubs.stub_out_migration_methods(self.stubs)
|
||||
stubs.stubout_get_this_vm_uuid(self.stubs)
|
||||
glance_stubs.stubout_glance_client(self.stubs,
|
||||
glance_stubs.FakeGlance)
|
||||
glance_stubs.stubout_glance_client(self.stubs)
|
||||
|
||||
def tearDown(self):
|
||||
super(XenAPIMigrateInstance, self).tearDown()
|
||||
@@ -638,8 +702,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
||||
"""Unit tests for code that detects the ImageType."""
|
||||
def setUp(self):
|
||||
super(XenAPIDetermineDiskImageTestCase, self).setUp()
|
||||
glance_stubs.stubout_glance_client(self.stubs,
|
||||
glance_stubs.FakeGlance)
|
||||
glance_stubs.stubout_glance_client(self.stubs)
|
||||
|
||||
class FakeInstance(object):
|
||||
pass
|
||||
@@ -647,6 +710,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
||||
self.fake_instance = FakeInstance()
|
||||
self.fake_instance.id = 42
|
||||
self.fake_instance.os_type = 'linux'
|
||||
self.fake_instance.architecture = 'x86-64'
|
||||
|
||||
def assert_disk_type(self, disk_type):
|
||||
dt = vm_utils.VMHelper.determine_disk_image_type(
|
||||
@@ -656,7 +720,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
||||
def test_instance_disk(self):
|
||||
"""If a kernel is specified, the image type is DISK (aka machine)."""
|
||||
FLAGS.xenapi_image_service = 'objectstore'
|
||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE
|
||||
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE
|
||||
self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
|
||||
self.assert_disk_type(vm_utils.ImageType.DISK)
|
||||
|
||||
@@ -666,7 +730,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
||||
DISK_RAW is assumed.
|
||||
"""
|
||||
FLAGS.xenapi_image_service = 'objectstore'
|
||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
|
||||
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
|
||||
self.fake_instance.kernel_id = None
|
||||
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
|
||||
|
||||
@@ -676,7 +740,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
||||
this case will be 'raw'.
|
||||
"""
|
||||
FLAGS.xenapi_image_service = 'glance'
|
||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
|
||||
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
|
||||
self.fake_instance.kernel_id = None
|
||||
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
|
||||
|
||||
@@ -686,11 +750,33 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
||||
this case will be 'vhd'.
|
||||
"""
|
||||
FLAGS.xenapi_image_service = 'glance'
|
||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
|
||||
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD
|
||||
self.fake_instance.kernel_id = None
|
||||
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
|
||||
|
||||
|
||||
class CompareVersionTestCase(test.TestCase):
|
||||
def test_less_than(self):
|
||||
"""Test that cmp_version compares a as less than b"""
|
||||
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
|
||||
|
||||
def test_greater_than(self):
|
||||
"""Test that cmp_version compares a as greater than b"""
|
||||
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
|
||||
|
||||
def test_equal(self):
|
||||
"""Test that cmp_version compares a as equal to b"""
|
||||
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
|
||||
|
||||
def test_non_lexical(self):
|
||||
"""Test that cmp_version compares non-lexically"""
|
||||
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
|
||||
|
||||
def test_length(self):
|
||||
"""Test that cmp_version compares by length as last resort"""
|
||||
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
|
||||
|
||||
|
||||
class FakeXenApi(object):
|
||||
"""Fake XenApi for testing HostState."""
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ def stub_out_db_instance_api(stubs):
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
def fake_instance_create(values):
|
||||
def fake_instance_create(context, values):
|
||||
"""Stubs out the db.instance_create method."""
|
||||
|
||||
type_data = INSTANCE_TYPES[values['instance_type']]
|
||||
@@ -61,7 +61,7 @@ def stub_out_db_instance_api(stubs):
|
||||
'name': values['name'],
|
||||
'id': values['id'],
|
||||
'reservation_id': utils.generate_uid('r'),
|
||||
'image_id': values['image_id'],
|
||||
'image_ref': values['image_ref'],
|
||||
'kernel_id': values['kernel_id'],
|
||||
'ramdisk_id': values['ramdisk_id'],
|
||||
'state_description': 'scheduling',
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
"""Stubouts, mocks and fixtures for the test suite"""
|
||||
|
||||
import eventlet
|
||||
import json
|
||||
from nova.virt import xenapi_conn
|
||||
from nova.virt.xenapi import fake
|
||||
from nova.virt.xenapi import volume_utils
|
||||
@@ -37,21 +38,7 @@ def stubout_instance_snapshot(stubs):
|
||||
sr_ref=sr_ref, sharable=False)
|
||||
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
|
||||
vdi_uuid = vdi_rec['uuid']
|
||||
return vdi_uuid
|
||||
|
||||
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
|
||||
|
||||
def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
|
||||
original_parent_uuid):
|
||||
from nova.virt.xenapi.fake import create_vdi
|
||||
name_label = "instance-%s" % instance_id
|
||||
#TODO: create fake SR record
|
||||
sr_ref = "fakesr"
|
||||
vdi_ref = create_vdi(name_label=name_label, read_only=False,
|
||||
sr_ref=sr_ref, sharable=False)
|
||||
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
|
||||
vdi_uuid = vdi_rec['uuid']
|
||||
return vdi_uuid
|
||||
return [dict(vdi_type='os', vdi_uuid=vdi_uuid)]
|
||||
|
||||
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
|
||||
|
||||
@@ -132,11 +119,30 @@ class FakeSessionForVMTests(fake.SessionBase):
|
||||
def __init__(self, uri):
|
||||
super(FakeSessionForVMTests, self).__init__(uri)
|
||||
|
||||
def host_call_plugin(self, _1, _2, _3, _4, _5):
|
||||
def host_call_plugin(self, _1, _2, plugin, method, _5):
|
||||
sr_ref = fake.get_all('SR')[0]
|
||||
vdi_ref = fake.create_vdi('', False, sr_ref, False)
|
||||
vdi_rec = fake.get_record('VDI', vdi_ref)
|
||||
return '<string>%s</string>' % vdi_rec['uuid']
|
||||
if plugin == "glance" and method == "download_vhd":
|
||||
ret_str = json.dumps([dict(vdi_type='os',
|
||||
vdi_uuid=vdi_rec['uuid'])])
|
||||
else:
|
||||
ret_str = vdi_rec['uuid']
|
||||
return '<string>%s</string>' % ret_str
|
||||
|
||||
def host_call_plugin_swap(self, _1, _2, plugin, method, _5):
|
||||
sr_ref = fake.get_all('SR')[0]
|
||||
vdi_ref = fake.create_vdi('', False, sr_ref, False)
|
||||
vdi_rec = fake.get_record('VDI', vdi_ref)
|
||||
if plugin == "glance" and method == "download_vhd":
|
||||
swap_vdi_ref = fake.create_vdi('', False, sr_ref, False)
|
||||
swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref)
|
||||
ret_str = json.dumps(
|
||||
[dict(vdi_type='os', vdi_uuid=vdi_rec['uuid']),
|
||||
dict(vdi_type='swap', vdi_uuid=swap_vdi_rec['uuid'])])
|
||||
else:
|
||||
ret_str = vdi_rec['uuid']
|
||||
return '<string>%s</string>' % ret_str
|
||||
|
||||
def VM_start(self, _1, ref, _2, _3):
|
||||
vm = fake.get_record('VM', ref)
|
||||
@@ -231,10 +237,10 @@ class FakeSessionForMigrationTests(fake.SessionBase):
|
||||
def __init__(self, uri):
|
||||
super(FakeSessionForMigrationTests, self).__init__(uri)
|
||||
|
||||
def VDI_get_by_uuid(*args):
|
||||
def VDI_get_by_uuid(self, *args):
|
||||
return 'hurr'
|
||||
|
||||
def VDI_resize_online(*args):
|
||||
def VDI_resize_online(self, *args):
|
||||
pass
|
||||
|
||||
def VM_start(self, _1, ref, _2, _3):
|
||||
|
||||
@@ -78,7 +78,7 @@ def WrapTwistedOptions(wrapped):
|
||||
self._absorbParameters()
|
||||
self._absorbHandlers()
|
||||
|
||||
super(TwistedOptionsToFlags, self).__init__()
|
||||
wrapped.__init__(self)
|
||||
|
||||
def _absorbFlags(self):
|
||||
twistd_flags = []
|
||||
@@ -163,12 +163,12 @@ def WrapTwistedOptions(wrapped):
|
||||
def parseArgs(self, *args):
|
||||
# TODO(termie): figure out a decent way of dealing with args
|
||||
#return
|
||||
super(TwistedOptionsToFlags, self).parseArgs(*args)
|
||||
wrapped.parseArgs(self, *args)
|
||||
|
||||
def postOptions(self):
|
||||
self._doHandlers()
|
||||
|
||||
super(TwistedOptionsToFlags, self).postOptions()
|
||||
wrapped.postOptions(self)
|
||||
|
||||
def __getitem__(self, key):
|
||||
key = key.replace('-', '_')
|
||||
|
||||
@@ -35,6 +35,7 @@ import struct
|
||||
import sys
|
||||
import time
|
||||
import types
|
||||
import uuid
|
||||
from xml.sax import saxutils
|
||||
|
||||
from eventlet import event
|
||||
@@ -142,24 +143,26 @@ def execute(*cmd, **kwargs):
|
||||
env = os.environ.copy()
|
||||
if addl_env:
|
||||
env.update(addl_env)
|
||||
_PIPE = subprocess.PIPE # pylint: disable=E1101
|
||||
obj = subprocess.Popen(cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=_PIPE,
|
||||
stdout=_PIPE,
|
||||
stderr=_PIPE,
|
||||
env=env)
|
||||
result = None
|
||||
if process_input is not None:
|
||||
result = obj.communicate(process_input)
|
||||
else:
|
||||
result = obj.communicate()
|
||||
obj.stdin.close()
|
||||
if obj.returncode:
|
||||
LOG.debug(_('Result was %s') % obj.returncode)
|
||||
obj.stdin.close() # pylint: disable=E1101
|
||||
_returncode = obj.returncode # pylint: disable=E1101
|
||||
if _returncode:
|
||||
LOG.debug(_('Result was %s') % _returncode)
|
||||
if type(check_exit_code) == types.IntType \
|
||||
and obj.returncode != check_exit_code:
|
||||
and _returncode != check_exit_code:
|
||||
(stdout, stderr) = result
|
||||
raise exception.ProcessExecutionError(
|
||||
exit_code=obj.returncode,
|
||||
exit_code=_returncode,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cmd=' '.join(cmd))
|
||||
@@ -307,7 +310,7 @@ def get_my_linklocal(interface):
|
||||
|
||||
|
||||
def utcnow():
|
||||
"""Overridable version of datetime.datetime.utcnow."""
|
||||
"""Overridable version of utils.utcnow."""
|
||||
if utcnow.override_time:
|
||||
return utcnow.override_time
|
||||
return datetime.datetime.utcnow()
|
||||
@@ -523,6 +526,16 @@ def loads(s):
|
||||
return json.loads(s)
|
||||
|
||||
|
||||
try:
|
||||
import anyjson
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
anyjson._modules.append(("nova.utils", "dumps", TypeError,
|
||||
"loads", ValueError))
|
||||
anyjson.force_implementation("nova.utils")
|
||||
|
||||
|
||||
_semaphores = {}
|
||||
|
||||
|
||||
@@ -724,3 +737,17 @@ def parse_server_string(server_str):
|
||||
except:
|
||||
LOG.debug(_('Invalid server_string: %s' % server_str))
|
||||
return ('', '')
|
||||
|
||||
|
||||
def gen_uuid():
|
||||
return uuid.uuid4()
|
||||
|
||||
|
||||
def is_uuid_like(val):
|
||||
"""For our purposes, a UUID is a string in canoical form:
|
||||
|
||||
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
|
||||
"""
|
||||
if not isinstance(val, basestring):
|
||||
return False
|
||||
return (len(val) == 36) and (val.count('-') == 4)
|
||||
|
||||
257
nova/wsgi.py
@@ -59,13 +59,16 @@ class Server(object):
|
||||
|
||||
def __init__(self, threads=1000):
|
||||
self.pool = eventlet.GreenPool(threads)
|
||||
self.socket_info = {}
|
||||
|
||||
def start(self, application, port, host='0.0.0.0', backlog=128):
|
||||
def start(self, application, port, host='0.0.0.0', key=None, backlog=128):
|
||||
"""Run a WSGI server with the given application."""
|
||||
arg0 = sys.argv[0]
|
||||
logging.audit(_('Starting %(arg0)s on %(host)s:%(port)s') % locals())
|
||||
socket = eventlet.listen((host, port), backlog=backlog)
|
||||
self.pool.spawn_n(self._run, application, socket)
|
||||
if key:
|
||||
self.socket_info[key] = socket.getsockname()
|
||||
|
||||
def wait(self):
|
||||
"""Wait until all servers have completed running."""
|
||||
@@ -82,36 +85,7 @@ class Server(object):
|
||||
|
||||
|
||||
class Request(webob.Request):
|
||||
|
||||
def best_match_content_type(self):
|
||||
"""Determine the most acceptable content-type.
|
||||
|
||||
Based on the query extension then the Accept header.
|
||||
|
||||
"""
|
||||
parts = self.path.rsplit('.', 1)
|
||||
|
||||
if len(parts) > 1:
|
||||
format = parts[1]
|
||||
if format in ['json', 'xml']:
|
||||
return 'application/{0}'.format(parts[1])
|
||||
|
||||
ctypes = ['application/json', 'application/xml']
|
||||
bm = self.accept.best_match(ctypes)
|
||||
|
||||
return bm or 'application/json'
|
||||
|
||||
def get_content_type(self):
|
||||
allowed_types = ("application/xml", "application/json")
|
||||
if not "Content-Type" in self.headers:
|
||||
msg = _("Missing Content-Type")
|
||||
LOG.debug(msg)
|
||||
raise webob.exc.HTTPBadRequest(msg)
|
||||
type = self.content_type
|
||||
if type in allowed_types:
|
||||
return type
|
||||
LOG.debug(_("Wrong Content-Type: %s") % type)
|
||||
raise webob.exc.HTTPBadRequest("Invalid content type")
|
||||
pass
|
||||
|
||||
|
||||
class Application(object):
|
||||
@@ -286,8 +260,8 @@ class Router(object):
|
||||
|
||||
Each route in `mapper` must specify a 'controller', which is a
|
||||
WSGI app to call. You'll probably want to specify an 'action' as
|
||||
well and have your controller be a wsgi.Controller, who will route
|
||||
the request to the action method.
|
||||
well and have your controller be an object that can route
|
||||
the request to the action-specific method.
|
||||
|
||||
Examples:
|
||||
mapper = routes.Mapper()
|
||||
@@ -335,223 +309,6 @@ class Router(object):
|
||||
return app
|
||||
|
||||
|
||||
class Controller(object):
|
||||
"""WSGI app that dispatched to methods.
|
||||
|
||||
WSGI app that reads routing information supplied by RoutesMiddleware
|
||||
and calls the requested action method upon itself. All action methods
|
||||
must, in addition to their normal parameters, accept a 'req' argument
|
||||
which is the incoming wsgi.Request. They raise a webob.exc exception,
|
||||
or return a dict which will be serialized by requested content type.
|
||||
|
||||
"""
|
||||
|
||||
@webob.dec.wsgify(RequestClass=Request)
|
||||
def __call__(self, req):
|
||||
"""Call the method specified in req.environ by RoutesMiddleware."""
|
||||
arg_dict = req.environ['wsgiorg.routing_args'][1]
|
||||
action = arg_dict['action']
|
||||
method = getattr(self, action)
|
||||
LOG.debug("%s %s" % (req.method, req.url))
|
||||
del arg_dict['controller']
|
||||
del arg_dict['action']
|
||||
if 'format' in arg_dict:
|
||||
del arg_dict['format']
|
||||
arg_dict['req'] = req
|
||||
result = method(**arg_dict)
|
||||
|
||||
if type(result) is dict:
|
||||
content_type = req.best_match_content_type()
|
||||
default_xmlns = self.get_default_xmlns(req)
|
||||
body = self._serialize(result, content_type, default_xmlns)
|
||||
|
||||
response = webob.Response()
|
||||
response.headers['Content-Type'] = content_type
|
||||
response.body = body
|
||||
msg_dict = dict(url=req.url, status=response.status_int)
|
||||
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
|
||||
LOG.debug(msg)
|
||||
return response
|
||||
else:
|
||||
return result
|
||||
|
||||
def _serialize(self, data, content_type, default_xmlns):
|
||||
"""Serialize the given dict to the provided content_type.
|
||||
|
||||
Uses self._serialization_metadata if it exists, which is a dict mapping
|
||||
MIME types to information needed to serialize to that type.
|
||||
|
||||
"""
|
||||
_metadata = getattr(type(self), '_serialization_metadata', {})
|
||||
|
||||
serializer = Serializer(_metadata, default_xmlns)
|
||||
try:
|
||||
return serializer.serialize(data, content_type)
|
||||
except exception.InvalidContentType:
|
||||
raise webob.exc.HTTPNotAcceptable()
|
||||
|
||||
def _deserialize(self, data, content_type):
|
||||
"""Deserialize the request body to the specefied content type.
|
||||
|
||||
Uses self._serialization_metadata if it exists, which is a dict mapping
|
||||
MIME types to information needed to serialize to that type.
|
||||
|
||||
"""
|
||||
_metadata = getattr(type(self), '_serialization_metadata', {})
|
||||
serializer = Serializer(_metadata)
|
||||
return serializer.deserialize(data, content_type)
|
||||
|
||||
def get_default_xmlns(self, req):
|
||||
"""Provide the XML namespace to use if none is otherwise specified."""
|
||||
return None
|
||||
|
||||
|
||||
class Serializer(object):
|
||||
"""Serializes and deserializes dictionaries to certain MIME types."""
|
||||
|
||||
def __init__(self, metadata=None, default_xmlns=None):
|
||||
"""Create a serializer based on the given WSGI environment.
|
||||
|
||||
'metadata' is an optional dict mapping MIME types to information
|
||||
needed to serialize a dictionary to that type.
|
||||
|
||||
"""
|
||||
self.metadata = metadata or {}
|
||||
self.default_xmlns = default_xmlns
|
||||
|
||||
def _get_serialize_handler(self, content_type):
|
||||
handlers = {
|
||||
'application/json': self._to_json,
|
||||
'application/xml': self._to_xml,
|
||||
}
|
||||
|
||||
try:
|
||||
return handlers[content_type]
|
||||
except Exception:
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
|
||||
def serialize(self, data, content_type):
|
||||
"""Serialize a dictionary into the specified content type."""
|
||||
return self._get_serialize_handler(content_type)(data)
|
||||
|
||||
def deserialize(self, datastring, content_type):
|
||||
"""Deserialize a string to a dictionary.
|
||||
|
||||
The string must be in the format of a supported MIME type.
|
||||
|
||||
"""
|
||||
return self.get_deserialize_handler(content_type)(datastring)
|
||||
|
||||
def get_deserialize_handler(self, content_type):
|
||||
handlers = {
|
||||
'application/json': self._from_json,
|
||||
'application/xml': self._from_xml,
|
||||
}
|
||||
|
||||
try:
|
||||
return handlers[content_type]
|
||||
except Exception:
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
|
||||
def _from_json(self, datastring):
|
||||
return utils.loads(datastring)
|
||||
|
||||
def _from_xml(self, datastring):
|
||||
xmldata = self.metadata.get('application/xml', {})
|
||||
plurals = set(xmldata.get('plurals', {}))
|
||||
node = minidom.parseString(datastring).childNodes[0]
|
||||
return {node.nodeName: self._from_xml_node(node, plurals)}
|
||||
|
||||
def _from_xml_node(self, node, listnames):
|
||||
"""Convert a minidom node to a simple Python type.
|
||||
|
||||
listnames is a collection of names of XML nodes whose subnodes should
|
||||
be considered list items.
|
||||
|
||||
"""
|
||||
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
|
||||
return node.childNodes[0].nodeValue
|
||||
elif node.nodeName in listnames:
|
||||
return [self._from_xml_node(n, listnames) for n in node.childNodes]
|
||||
else:
|
||||
result = dict()
|
||||
for attr in node.attributes.keys():
|
||||
result[attr] = node.attributes[attr].nodeValue
|
||||
for child in node.childNodes:
|
||||
if child.nodeType != node.TEXT_NODE:
|
||||
result[child.nodeName] = self._from_xml_node(child,
|
||||
listnames)
|
||||
return result
|
||||
|
||||
def _to_json(self, data):
|
||||
return utils.dumps(data)
|
||||
|
||||
def _to_xml(self, data):
|
||||
metadata = self.metadata.get('application/xml', {})
|
||||
# We expect data to contain a single key which is the XML root.
|
||||
root_key = data.keys()[0]
|
||||
doc = minidom.Document()
|
||||
node = self._to_xml_node(doc, metadata, root_key, data[root_key])
|
||||
|
||||
xmlns = node.getAttribute('xmlns')
|
||||
if not xmlns and self.default_xmlns:
|
||||
node.setAttribute('xmlns', self.default_xmlns)
|
||||
|
||||
return node.toprettyxml(indent=' ')
|
||||
|
||||
def _to_xml_node(self, doc, metadata, nodename, data):
|
||||
"""Recursive method to convert data members to XML nodes."""
|
||||
result = doc.createElement(nodename)
|
||||
|
||||
# Set the xml namespace if one is specified
|
||||
# TODO(justinsb): We could also use prefixes on the keys
|
||||
xmlns = metadata.get('xmlns', None)
|
||||
if xmlns:
|
||||
result.setAttribute('xmlns', xmlns)
|
||||
|
||||
if type(data) is list:
|
||||
collections = metadata.get('list_collections', {})
|
||||
if nodename in collections:
|
||||
metadata = collections[nodename]
|
||||
for item in data:
|
||||
node = doc.createElement(metadata['item_name'])
|
||||
node.setAttribute(metadata['item_key'], str(item))
|
||||
result.appendChild(node)
|
||||
return result
|
||||
singular = metadata.get('plurals', {}).get(nodename, None)
|
||||
if singular is None:
|
||||
if nodename.endswith('s'):
|
||||
singular = nodename[:-1]
|
||||
else:
|
||||
singular = 'item'
|
||||
for item in data:
|
||||
node = self._to_xml_node(doc, metadata, singular, item)
|
||||
result.appendChild(node)
|
||||
elif type(data) is dict:
|
||||
collections = metadata.get('dict_collections', {})
|
||||
if nodename in collections:
|
||||
metadata = collections[nodename]
|
||||
for k, v in data.items():
|
||||
node = doc.createElement(metadata['item_name'])
|
||||
node.setAttribute(metadata['item_key'], str(k))
|
||||
text = doc.createTextNode(str(v))
|
||||
node.appendChild(text)
|
||||
result.appendChild(node)
|
||||
return result
|
||||
attrs = metadata.get('attributes', {}).get(nodename, {})
|
||||
for k, v in data.items():
|
||||
if k in attrs:
|
||||
result.setAttribute(k, str(v))
|
||||
else:
|
||||
node = self._to_xml_node(doc, metadata, k, v)
|
||||
result.appendChild(node)
|
||||
else:
|
||||
# Type is atom
|
||||
node = doc.createTextNode(str(data))
|
||||
result.appendChild(node)
|
||||
return result
|
||||
|
||||
|
||||
def paste_config_file(basename):
|
||||
"""Find the best location in the system for a paste config file.
|
||||
|
||||
|
||||
99
run_tests.py
@@ -56,9 +56,11 @@ To run a single test module:
|
||||
"""
|
||||
|
||||
import gettext
|
||||
import heapq
|
||||
import os
|
||||
import unittest
|
||||
import sys
|
||||
import time
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
@@ -67,7 +69,6 @@ from nose import core
|
||||
from nose import result
|
||||
|
||||
from nova import log as logging
|
||||
from nova.tests import fake_flags
|
||||
|
||||
|
||||
class _AnsiColorizer(object):
|
||||
@@ -183,9 +184,21 @@ class _NullColorizer(object):
|
||||
self.stream.write(text)
|
||||
|
||||
|
||||
def get_elapsed_time_color(elapsed_time):
|
||||
if elapsed_time > 1.0:
|
||||
return 'red'
|
||||
elif elapsed_time > 0.25:
|
||||
return 'yellow'
|
||||
else:
|
||||
return 'green'
|
||||
|
||||
|
||||
class NovaTestResult(result.TextTestResult):
|
||||
def __init__(self, *args, **kw):
|
||||
self.show_elapsed = kw.pop('show_elapsed')
|
||||
result.TextTestResult.__init__(self, *args, **kw)
|
||||
self.num_slow_tests = 5
|
||||
self.slow_tests = [] # this is a fixed-sized heap
|
||||
self._last_case = None
|
||||
self.colorizer = None
|
||||
# NOTE(vish): reset stdout for the terminal check
|
||||
@@ -197,28 +210,49 @@ class NovaTestResult(result.TextTestResult):
|
||||
break
|
||||
sys.stdout = stdout
|
||||
|
||||
# NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
|
||||
# error results in it failing to be initialized later. Otherwise,
|
||||
# _handleElapsedTime will fail, causing the wrong error message to
|
||||
# be outputted.
|
||||
self.start_time = time.time()
|
||||
|
||||
def getDescription(self, test):
|
||||
return str(test)
|
||||
|
||||
def _handleElapsedTime(self, test):
|
||||
self.elapsed_time = time.time() - self.start_time
|
||||
item = (self.elapsed_time, test)
|
||||
# Record only the n-slowest tests using heap
|
||||
if len(self.slow_tests) >= self.num_slow_tests:
|
||||
heapq.heappushpop(self.slow_tests, item)
|
||||
else:
|
||||
heapq.heappush(self.slow_tests, item)
|
||||
|
||||
def _writeElapsedTime(self, test):
|
||||
color = get_elapsed_time_color(self.elapsed_time)
|
||||
self.colorizer.write(" %.2f" % self.elapsed_time, color)
|
||||
|
||||
def _writeResult(self, test, long_result, color, short_result, success):
|
||||
if self.showAll:
|
||||
self.colorizer.write(long_result, color)
|
||||
if self.show_elapsed and success:
|
||||
self._writeElapsedTime(test)
|
||||
self.stream.writeln()
|
||||
elif self.dots:
|
||||
self.stream.write(short_result)
|
||||
self.stream.flush()
|
||||
|
||||
# NOTE(vish): copied from unittest with edit to add color
|
||||
def addSuccess(self, test):
|
||||
unittest.TestResult.addSuccess(self, test)
|
||||
if self.showAll:
|
||||
self.colorizer.write("OK", 'green')
|
||||
self.stream.writeln()
|
||||
elif self.dots:
|
||||
self.stream.write('.')
|
||||
self.stream.flush()
|
||||
self._handleElapsedTime(test)
|
||||
self._writeResult(test, 'OK', 'green', '.', True)
|
||||
|
||||
# NOTE(vish): copied from unittest with edit to add color
|
||||
def addFailure(self, test, err):
|
||||
unittest.TestResult.addFailure(self, test, err)
|
||||
if self.showAll:
|
||||
self.colorizer.write("FAIL", 'red')
|
||||
self.stream.writeln()
|
||||
elif self.dots:
|
||||
self.stream.write('F')
|
||||
self.stream.flush()
|
||||
self._handleElapsedTime(test)
|
||||
self._writeResult(test, 'FAIL', 'red', 'F', False)
|
||||
|
||||
# NOTE(vish): copied from nose with edit to add color
|
||||
def addError(self, test, err):
|
||||
@@ -226,6 +260,7 @@ class NovaTestResult(result.TextTestResult):
|
||||
errorClasses. If the exception is a registered class, the
|
||||
error will be added to the list for that class, not errors.
|
||||
"""
|
||||
self._handleElapsedTime(test)
|
||||
stream = getattr(self, 'stream', None)
|
||||
ec, ev, tb = err
|
||||
try:
|
||||
@@ -252,14 +287,11 @@ class NovaTestResult(result.TextTestResult):
|
||||
self.errors.append((test, exc_info))
|
||||
test.passed = False
|
||||
if stream is not None:
|
||||
if self.showAll:
|
||||
self.colorizer.write("ERROR", 'red')
|
||||
self.stream.writeln()
|
||||
elif self.dots:
|
||||
stream.write('E')
|
||||
self._writeResult(test, 'ERROR', 'red', 'E', False)
|
||||
|
||||
def startTest(self, test):
|
||||
unittest.TestResult.startTest(self, test)
|
||||
self.start_time = time.time()
|
||||
current_case = test.test.__class__.__name__
|
||||
|
||||
if self.showAll:
|
||||
@@ -273,21 +305,47 @@ class NovaTestResult(result.TextTestResult):
|
||||
|
||||
|
||||
class NovaTestRunner(core.TextTestRunner):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.show_elapsed = kwargs.pop('show_elapsed')
|
||||
core.TextTestRunner.__init__(self, *args, **kwargs)
|
||||
|
||||
def _makeResult(self):
|
||||
return NovaTestResult(self.stream,
|
||||
self.descriptions,
|
||||
self.verbosity,
|
||||
self.config)
|
||||
self.config,
|
||||
show_elapsed=self.show_elapsed)
|
||||
|
||||
def _writeSlowTests(self, result_):
|
||||
# Pare out 'fast' tests
|
||||
slow_tests = [item for item in result_.slow_tests
|
||||
if get_elapsed_time_color(item[0]) != 'green']
|
||||
if slow_tests:
|
||||
slow_total_time = sum(item[0] for item in slow_tests)
|
||||
self.stream.writeln("Slowest %i tests took %.2f secs:"
|
||||
% (len(slow_tests), slow_total_time))
|
||||
for elapsed_time, test in sorted(slow_tests, reverse=True):
|
||||
time_str = "%.2f" % elapsed_time
|
||||
self.stream.writeln(" %s %s" % (time_str.ljust(10), test))
|
||||
|
||||
def run(self, test):
|
||||
result_ = core.TextTestRunner.run(self, test)
|
||||
if self.show_elapsed:
|
||||
self._writeSlowTests(result_)
|
||||
return result_
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.setup()
|
||||
# If any argument looks like a test name but doesn't have "nova.tests" in
|
||||
# front of it, automatically add that so we don't have to type as much
|
||||
show_elapsed = True
|
||||
argv = []
|
||||
for x in sys.argv:
|
||||
if x.startswith('test_'):
|
||||
argv.append('nova.tests.%s' % x)
|
||||
elif x.startswith('--hide-elapsed'):
|
||||
show_elapsed = False
|
||||
else:
|
||||
argv.append(x)
|
||||
|
||||
@@ -300,5 +358,6 @@ if __name__ == '__main__':
|
||||
|
||||
runner = NovaTestRunner(stream=c.stream,
|
||||
verbosity=c.verbosity,
|
||||
config=c)
|
||||
config=c,
|
||||
show_elapsed=show_elapsed)
|
||||
sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
|
||||
|
||||
37
run_tests.sh
@@ -6,10 +6,12 @@ function usage {
|
||||
echo ""
|
||||
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
|
||||
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
|
||||
echo " -r, --recreate-db Recreate the test database."
|
||||
echo " -x, --stop Stop running tests after the first error or failure."
|
||||
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
|
||||
echo " -p, --pep8 Just run pep8"
|
||||
echo " -h, --help Print this usage message"
|
||||
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
|
||||
echo ""
|
||||
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
|
||||
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
|
||||
@@ -22,8 +24,10 @@ function process_option {
|
||||
-h|--help) usage;;
|
||||
-V|--virtual-env) let always_venv=1; let never_venv=0;;
|
||||
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
|
||||
-r|--recreate-db) let recreate_db=1;;
|
||||
-f|--force) let force=1;;
|
||||
-p|--pep8) let just_pep8=1;;
|
||||
-*) noseopts="$noseopts $1";;
|
||||
*) noseargs="$noseargs $1"
|
||||
esac
|
||||
}
|
||||
@@ -34,8 +38,10 @@ always_venv=0
|
||||
never_venv=0
|
||||
force=0
|
||||
noseargs=
|
||||
noseopts=
|
||||
wrapper=""
|
||||
just_pep8=0
|
||||
recreate_db=0
|
||||
|
||||
for arg in "$@"; do
|
||||
process_option $arg
|
||||
@@ -59,17 +65,20 @@ function run_tests {
|
||||
|
||||
function run_pep8 {
|
||||
echo "Running pep8 ..."
|
||||
# Opt-out files from pep8
|
||||
ignore_scripts="*.sh:*nova-debug:*clean-vlans"
|
||||
ignore_files="*eventlet-patch:*pip-requires"
|
||||
ignore_dirs="*ajaxterm*"
|
||||
GLOBIGNORE="$ignore_scripts:$ignore_files:$ignore_dirs"
|
||||
srcfiles=`find bin -type f ! -name "nova.conf*"`
|
||||
srcfiles+=" `find tools/*`"
|
||||
srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance"
|
||||
pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles}
|
||||
# Just run PEP8 in current environment
|
||||
${wrapper} pep8 --repeat --show-pep8 --show-source \
|
||||
--exclude=vcsversion.py ${srcfiles}
|
||||
}
|
||||
|
||||
if [ $just_pep8 -eq 1 ]; then
|
||||
run_pep8
|
||||
exit
|
||||
fi
|
||||
|
||||
NOSETESTS="python run_tests.py $noseargs"
|
||||
NOSETESTS="python run_tests.py $noseopts $noseargs"
|
||||
|
||||
if [ $never_venv -eq 0 ]
|
||||
then
|
||||
@@ -97,9 +106,21 @@ then
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $just_pep8 -eq 1 ]; then
|
||||
run_pep8
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ $recreate_db -eq 1 ]; then
|
||||
rm tests.sqlite
|
||||
fi
|
||||
|
||||
run_tests || exit
|
||||
|
||||
# Also run pep8 if no options were provided.
|
||||
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
|
||||
# not when we're running tests individually. To handle this, we need to
|
||||
# distinguish between options (noseopts), which begin with a '-', and
|
||||
# arguments (noseargs).
|
||||
if [ -z "$noseargs" ]; then
|
||||
run_pep8
|
||||
fi
|
||||
|
||||