merge with trunk

This commit is contained in:
Isaku Yamahata
2011-07-08 12:07:58 +09:00
43 changed files with 1847 additions and 1124 deletions

View File

@@ -47,3 +47,7 @@
<vishvananda@gmail.com> <root@mirror.nasanebula.net> <vishvananda@gmail.com> <root@mirror.nasanebula.net>
<vishvananda@gmail.com> <root@ubuntu> <vishvananda@gmail.com> <root@ubuntu>
<vishvananda@gmail.com> <vishvananda@yahoo.com> <vishvananda@gmail.com> <vishvananda@yahoo.com>
<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>
<ilyaalekseyev@acm.org> <ilya@oscloud.ru>
<reldan@oscloud.ru> <enugaev@griddynamics.com>
<kshileev@gmail.com> <kshileev@griddynamics.com>

View File

@@ -22,14 +22,14 @@ David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com> Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com> Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com> Ed Leafe <ed@leafe.com>
Eldar Nugaev <enugaev@griddynamics.com> Eldar Nugaev <reldan@oscloud.ru>
Eric Day <eday@oddments.org> Eric Day <eday@oddments.org>
Eric Windisch <eric@cloudscaling.com> Eric Windisch <eric@cloudscaling.com>
Ewan Mellor <ewan.mellor@citrix.com> Ewan Mellor <ewan.mellor@citrix.com>
Gabe Westmaas <gabe.westmaas@rackspace.com> Gabe Westmaas <gabe.westmaas@rackspace.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp> Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com> Hisaki Ohara <hisaki.ohara@intel.com>
Ilya Alekseyev <ialekseev@griddynamics.com> Ilya Alekseyev <ilyaalekseyev@acm.org>
Isaku Yamahata <yamahata@valinux.co.jp> Isaku Yamahata <yamahata@valinux.co.jp>
Jason Cannavale <jason.cannavale@rackspace.com> Jason Cannavale <jason.cannavale@rackspace.com>
Jason Koelker <jason@koelker.net> Jason Koelker <jason@koelker.net>
@@ -53,6 +53,7 @@ Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com> Ken Pepple <ken.pepple@gmail.com>
Kevin Bringard <kbringard@attinteractive.com> Kevin Bringard <kbringard@attinteractive.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com> Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Kirill Shileev <kshileev@gmail.com>
Koji Iida <iida.koji@lab.ntt.co.jp> Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu> Lorin Hochstein <lorin@isi.edu>
Lvov Maxim <usrleon@gmail.com> Lvov Maxim <usrleon@gmail.com>

View File

@@ -23,6 +23,7 @@ include nova/compute/interfaces.template
include nova/console/xvp.conf.template include nova/console/xvp.conf.template
include nova/db/sqlalchemy/migrate_repo/migrate.cfg include nova/db/sqlalchemy/migrate_repo/migrate.cfg
include nova/db/sqlalchemy/migrate_repo/README include nova/db/sqlalchemy/migrate_repo/README
include nova/db/sqlalchemy/migrate_repo/versions/*.sql
include nova/virt/interfaces.template include nova/virt/interfaces.template
include nova/virt/libvirt*.xml.template include nova/virt/libvirt*.xml.template
include nova/virt/cpuinfo.xml.template include nova/virt/cpuinfo.xml.template

116
bin/instance-usage-audit Executable file
View File

@@ -0,0 +1,116 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cron script to generate usage notifications for instances neither created
nor destroyed in a given time period.
Together with the notifications generated by compute on instance
create/delete/resize, over that ime period, this allows an external
system consuming usage notification feeds to calculate instance usage
for each tenant.
Time periods are specified like so:
<number>[mdy]
1m = previous month. If the script is run April 1, it will generate usages
for March 1 thry March 31.
3m = 3 previous months.
90d = previous 90 days.
1y = previous year. If run on Jan 1, it generates usages for
Jan 1 thru Dec 31 of the previous year.
"""
import datetime
import gettext
import os
import sys
import time
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.notifier import api as notifier_api
FLAGS = flags.FLAGS
flags.DEFINE_string('instance_usage_audit_period', '1m',
'time period to generate instance usages for.')
def time_period(period):
today = datetime.date.today()
unit = period[-1]
if unit not in 'mdy':
raise ValueError('Time period must be m, d, or y')
n = int(period[:-1])
if unit == 'm':
year = today.year - (n // 12)
n = n % 12
if n >= today.month:
year -= 1
month = 12 + (today.month - n)
else:
month = today.month - n
begin = datetime.datetime(day=1, month=month, year=year)
end = datetime.datetime(day=1, month=today.month, year=today.year)
elif unit == 'y':
begin = datetime.datetime(day=1, month=1, year=today.year - n)
end = datetime.datetime(day=1, month=1, year=today.year)
elif unit == 'd':
b = today - datetime.timedelta(days=n)
begin = datetime.datetime(day=b.day, month=b.month, year=b.year)
end = datetime.datetime(day=today.day,
month=today.month,
year=today.year)
return (begin, end)
if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
begin, end = time_period(FLAGS.instance_usage_audit_period)
print "Creating usages for %s until %s" % (str(begin), str(end))
instances = db.instance_get_active_by_window(context.get_admin_context(),
begin,
end)
print "%s instances" % len(instances)
for instance_ref in instances:
usage_info = utils.usage_from_instance(instance_ref,
audit_period_begining=str(begin),
audit_period_ending=str(end))
notifier_api.notify('compute.%s' % FLAGS.host,
'compute.instance.exists',
notifier_api.INFO,
usage_info)

View File

@@ -137,8 +137,9 @@ if __name__ == '__main__':
utils.default_flagfile() utils.default_flagfile()
FLAGS(sys.argv) FLAGS(sys.argv)
logging.setup() logging.setup()
server = wsgi.Server() acp_port = FLAGS.ajax_console_proxy_port
acp = AjaxConsoleProxy() acp = AjaxConsoleProxy()
acp.register_listeners() acp.register_listeners()
server.start(acp, FLAGS.ajax_console_proxy_port, host='0.0.0.0') server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port)
server.start()
server.wait() server.wait()

View File

@@ -1,5 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python
# pylint: disable=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
@@ -18,44 +17,40 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Starter script for Nova API.""" """Starter script for Nova API.
Starts both the EC2 and OpenStack APIs in separate processes.
"""
import gettext
import os import os
import sys import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
# it will override what happens to be installed in /usr/(local/)lib/python... sys.argv[0]), os.pardir, os.pardir))
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1) import nova.service
import nova.utils
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
from nova import version
from nova import wsgi
LOG = logging.getLogger('nova.api') def main():
"""Launch EC2 and OSAPI services."""
nova.utils.Bootstrapper.bootstrap_binary(sys.argv)
ec2 = nova.service.WSGIService("ec2")
osapi = nova.service.WSGIService("osapi")
launcher = nova.service.Launcher()
launcher.launch_service(ec2)
launcher.launch_service(osapi)
try:
launcher.wait()
except KeyboardInterrupt:
launcher.stop()
FLAGS = flags.FLAGS
if __name__ == '__main__': if __name__ == '__main__':
utils.default_flagfile() sys.exit(main())
FLAGS(sys.argv)
logging.setup()
LOG.audit(_("Starting nova-api node (version %s)"),
version.version_string_with_vcs())
LOG.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
flag_get = FLAGS.get(flag, None)
LOG.debug("%(flag)s : %(flag_get)s" % locals())
service = service.serve_wsgi(service.ApiService)
service.wait()

View File

@@ -59,14 +59,12 @@ def add_lease(mac, ip_address, _hostname, _interface):
LOG.debug(_("leasing ip")) LOG.debug(_("leasing ip"))
network_manager = utils.import_object(FLAGS.network_manager) network_manager = utils.import_object(FLAGS.network_manager)
network_manager.lease_fixed_ip(context.get_admin_context(), network_manager.lease_fixed_ip(context.get_admin_context(),
mac,
ip_address) ip_address)
else: else:
rpc.cast(context.get_admin_context(), rpc.cast(context.get_admin_context(),
"%s.%s" % (FLAGS.network_topic, FLAGS.host), "%s.%s" % (FLAGS.network_topic, FLAGS.host),
{"method": "lease_fixed_ip", {"method": "lease_fixed_ip",
"args": {"mac": mac, "args": {"address": ip_address}})
"address": ip_address}})
def old_lease(mac, ip_address, hostname, interface): def old_lease(mac, ip_address, hostname, interface):
@@ -81,14 +79,12 @@ def del_lease(mac, ip_address, _hostname, _interface):
LOG.debug(_("releasing ip")) LOG.debug(_("releasing ip"))
network_manager = utils.import_object(FLAGS.network_manager) network_manager = utils.import_object(FLAGS.network_manager)
network_manager.release_fixed_ip(context.get_admin_context(), network_manager.release_fixed_ip(context.get_admin_context(),
mac,
ip_address) ip_address)
else: else:
rpc.cast(context.get_admin_context(), rpc.cast(context.get_admin_context(),
"%s.%s" % (FLAGS.network_topic, FLAGS.host), "%s.%s" % (FLAGS.network_topic, FLAGS.host),
{"method": "release_fixed_ip", {"method": "release_fixed_ip",
"args": {"mac": mac, "args": {"address": ip_address}})
"address": ip_address}})
def init_leases(interface): def init_leases(interface):

View File

@@ -93,6 +93,9 @@ if __name__ == '__main__':
with_req = direct.PostParamsMiddleware(with_json) with_req = direct.PostParamsMiddleware(with_json)
with_auth = direct.DelegatedAuthMiddleware(with_req) with_auth = direct.DelegatedAuthMiddleware(with_req)
server = wsgi.Server() server = wsgi.Server("Direct API",
server.start(with_auth, FLAGS.direct_port, host=FLAGS.direct_host) with_auth,
host=FLAGS.direct_host,
port=FLAGS.direct_port)
server.start()
server.wait() server.wait()

View File

@@ -172,17 +172,23 @@ class VpnCommands(object):
def change(self, project_id, ip, port): def change(self, project_id, ip, port):
"""Change the ip and port for a vpn. """Change the ip and port for a vpn.
this will update all networks associated with a project
not sure if that's the desired behavior or not, patches accepted
args: project, ip, port""" args: project, ip, port"""
# TODO(tr3buchet): perhaps this shouldn't update all networks
# associated with a project in the future
project = self.manager.get_project(project_id) project = self.manager.get_project(project_id)
if not project: if not project:
print 'No project %s' % (project_id) print 'No project %s' % (project_id)
return return
admin = context.get_admin_context() admin_context = context.get_admin_context()
network_ref = db.project_get_network(admin, project_id) networks = db.project_get_networks(admin_context, project_id)
db.network_update(admin, for network in networks:
network_ref['id'], db.network_update(admin_context,
{'vpn_public_address': ip, network['id'],
'vpn_public_port': int(port)}) {'vpn_public_address': ip,
'vpn_public_port': int(port)})
class ShellCommands(object): class ShellCommands(object):
@@ -446,12 +452,13 @@ class ProjectCommands(object):
def scrub(self, project_id): def scrub(self, project_id):
"""Deletes data associated with project """Deletes data associated with project
arguments: project_id""" arguments: project_id"""
ctxt = context.get_admin_context() admin_context = context.get_admin_context()
network_ref = db.project_get_network(ctxt, project_id) networks = db.project_get_networks(admin_context, project_id)
db.network_disassociate(ctxt, network_ref['id']) for network in networks:
groups = db.security_group_get_by_project(ctxt, project_id) db.network_disassociate(admin_context, network['id'])
groups = db.security_group_get_by_project(admin_context, project_id)
for group in groups: for group in groups:
db.security_group_destroy(ctxt, group['id']) db.security_group_destroy(admin_context, group['id'])
def zipfile(self, project_id, user_id, filename='nova.zip'): def zipfile(self, project_id, user_id, filename='nova.zip'):
"""Exports credentials for project to a zip file """Exports credentials for project to a zip file
@@ -505,7 +512,7 @@ class FixedIpCommands(object):
instance = fixed_ip['instance'] instance = fixed_ip['instance']
hostname = instance['hostname'] hostname = instance['hostname']
host = instance['host'] host = instance['host']
mac_address = instance['mac_address'] mac_address = fixed_ip['mac_address']['address']
print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % ( print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (
fixed_ip['network']['cidr'], fixed_ip['network']['cidr'],
fixed_ip['address'], fixed_ip['address'],
@@ -515,13 +522,12 @@ class FixedIpCommands(object):
class FloatingIpCommands(object): class FloatingIpCommands(object):
"""Class for managing floating ip.""" """Class for managing floating ip."""
def create(self, host, range): def create(self, range):
"""Creates floating ips for host by range """Creates floating ips for zone by range
arguments: host ip_range""" arguments: ip_range"""
for address in netaddr.IPNetwork(range): for address in netaddr.IPNetwork(range):
db.floating_ip_create(context.get_admin_context(), db.floating_ip_create(context.get_admin_context(),
{'address': str(address), {'address': str(address)})
'host': host})
def delete(self, ip_range): def delete(self, ip_range):
"""Deletes floating ips by range """Deletes floating ips by range
@@ -532,7 +538,8 @@ class FloatingIpCommands(object):
def list(self, host=None): def list(self, host=None):
"""Lists all floating ips (optionally by host) """Lists all floating ips (optionally by host)
arguments: [host]""" arguments: [host]
Note: if host is given, only active floating IPs are returned"""
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
if host is None: if host is None:
floating_ips = db.floating_ip_get_all(ctxt) floating_ips = db.floating_ip_get_all(ctxt)
@@ -550,10 +557,23 @@ class FloatingIpCommands(object):
class NetworkCommands(object): class NetworkCommands(object):
"""Class for managing networks.""" """Class for managing networks."""
def create(self, fixed_range=None, num_networks=None, network_size=None, def create(self, label=None, fixed_range=None, num_networks=None,
vlan_start=None, vpn_start=None, fixed_range_v6=None, network_size=None, vlan_start=None,
gateway_v6=None, label='public'): vpn_start=None, fixed_range_v6=None, gateway_v6=None,
"""Creates fixed ips for host by range""" flat_network_bridge=None, bridge_interface=None):
"""Creates fixed ips for host by range
arguments: label, fixed_range, [num_networks=FLAG],
[network_size=FLAG], [vlan_start=FLAG],
[vpn_start=FLAG], [fixed_range_v6=FLAG], [gateway_v6=FLAG],
[flat_network_bridge=FLAG], [bridge_interface=FLAG]
If you wish to use a later argument fill in the gaps with 0s
Ex: network create private 10.0.0.0/8 1 15 0 0 0 0 xenbr1 eth1
network create private 10.0.0.0/8 1 15
"""
if not label:
msg = _('a label (ex: public) is required to create networks.')
print msg
raise TypeError(msg)
if not fixed_range: if not fixed_range:
msg = _('Fixed range in the form of 10.0.0.0/8 is ' msg = _('Fixed range in the form of 10.0.0.0/8 is '
'required to create networks.') 'required to create networks.')
@@ -569,11 +589,17 @@ class NetworkCommands(object):
vpn_start = FLAGS.vpn_start vpn_start = FLAGS.vpn_start
if not fixed_range_v6: if not fixed_range_v6:
fixed_range_v6 = FLAGS.fixed_range_v6 fixed_range_v6 = FLAGS.fixed_range_v6
if not flat_network_bridge:
flat_network_bridge = FLAGS.flat_network_bridge
if not bridge_interface:
bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface
if not gateway_v6: if not gateway_v6:
gateway_v6 = FLAGS.gateway_v6 gateway_v6 = FLAGS.gateway_v6
net_manager = utils.import_object(FLAGS.network_manager) net_manager = utils.import_object(FLAGS.network_manager)
try: try:
net_manager.create_networks(context.get_admin_context(), net_manager.create_networks(context.get_admin_context(),
label=label,
cidr=fixed_range, cidr=fixed_range,
num_networks=int(num_networks), num_networks=int(num_networks),
network_size=int(network_size), network_size=int(network_size),
@@ -581,7 +607,8 @@ class NetworkCommands(object):
vpn_start=int(vpn_start), vpn_start=int(vpn_start),
cidr_v6=fixed_range_v6, cidr_v6=fixed_range_v6,
gateway_v6=gateway_v6, gateway_v6=gateway_v6,
label=label) bridge=flat_network_bridge,
bridge_interface=bridge_interface)
except ValueError, e: except ValueError, e:
print e print e
raise e raise e
@@ -617,7 +644,7 @@ class VmCommands(object):
:param host: show all instance on specified host. :param host: show all instance on specified host.
:param instance: show specificed instance. :param instance: show specificed instance.
""" """
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \ print "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" \
" %-10s %-10s %-10s %-5s" % ( " %-10s %-10s %-10s %-5s" % (
_('instance'), _('instance'),
_('node'), _('node'),
@@ -639,14 +666,14 @@ class VmCommands(object):
context.get_admin_context(), host) context.get_admin_context(), host)
for instance in instances: for instance in instances:
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \ print "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" \
" %-10s %-10s %-10s %-5d" % ( " %-10s %-10s %-10s %-5d" % (
instance['hostname'], instance['hostname'],
instance['host'], instance['host'],
instance['instance_type'], instance['instance_type'].name,
instance['state_description'], instance['state_description'],
instance['launched_at'], instance['launched_at'],
instance['image_id'], instance['image_ref'],
instance['kernel_id'], instance['kernel_id'],
instance['ramdisk_id'], instance['ramdisk_id'],
instance['project_id'], instance['project_id'],
@@ -878,7 +905,7 @@ class InstanceTypeCommands(object):
try: try:
instance_types.create(name, memory, vcpus, local_gb, instance_types.create(name, memory, vcpus, local_gb,
flavorid, swap, rxtx_quota, rxtx_cap) flavorid, swap, rxtx_quota, rxtx_cap)
except exception.InvalidInput: except exception.InvalidInput, e:
print "Must supply valid parameters to create instance_type" print "Must supply valid parameters to create instance_type"
print e print e
sys.exit(1) sys.exit(1)

View File

@@ -50,6 +50,9 @@ if __name__ == '__main__':
FLAGS(sys.argv) FLAGS(sys.argv)
logging.setup() logging.setup()
router = s3server.S3Application(FLAGS.buckets_path) router = s3server.S3Application(FLAGS.buckets_path)
server = wsgi.Server() server = wsgi.Server("S3 Objectstore",
server.start(router, FLAGS.s3_port, host=FLAGS.s3_host) router,
port=FLAGS.s3_port,
host=FLAGS.s3_host)
server.start()
server.wait() server.wait()

View File

@@ -63,6 +63,19 @@ flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag()) flags.DEFINE_flag(flags.HelpXMLFlag())
def handle_flash_socket_policy(socket):
LOG.info(_("Received connection on flash socket policy port"))
fd = socket.makefile('rw')
expected_command = "<policy-file-request/>"
if expected_command in fd.read(len(expected_command) + 1):
LOG.info(_("Received valid flash socket policy request"))
fd.write('<?xml version="1.0"?><cross-domain-policy><allow-'
'access-from domain="*" to-ports="%d" /></cross-'
'domain-policy>' % (FLAGS.vncproxy_port))
fd.flush()
socket.close()
if __name__ == "__main__": if __name__ == "__main__":
utils.default_flagfile() utils.default_flagfile()
FLAGS(sys.argv) FLAGS(sys.argv)
@@ -96,6 +109,11 @@ if __name__ == "__main__":
service.serve() service.serve()
server = wsgi.Server() server = wsgi.Server("VNC Proxy",
server.start(with_auth, FLAGS.vncproxy_port, host=FLAGS.vncproxy_host) with_auth,
host=FLAGS.vncproxy_host,
port=FLAGS.vncproxy_port)
server.start()
server.start_tcp(handle_flash_socket_policy, 843, host=FLAGS.vncproxy_host)
server.wait() server.wait()

View File

@@ -100,6 +100,11 @@ class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103
pass pass
class SERVER_DOWN(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
def initialize(_uri): def initialize(_uri):
"""Opens a fake connection with an LDAP server.""" """Opens a fake connection with an LDAP server."""
return FakeLDAP() return FakeLDAP()
@@ -202,25 +207,38 @@ def _to_json(unencoded):
return json.dumps(list(unencoded)) return json.dumps(list(unencoded))
server_fail = False
class FakeLDAP(object): class FakeLDAP(object):
"""Fake LDAP connection.""" """Fake LDAP connection."""
def simple_bind_s(self, dn, password): def simple_bind_s(self, dn, password):
"""This method is ignored, but provided for compatibility.""" """This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN
pass pass
def unbind_s(self): def unbind_s(self):
"""This method is ignored, but provided for compatibility.""" """This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN
pass pass
def add_s(self, dn, attr): def add_s(self, dn, attr):
"""Add an object with the specified attributes at dn.""" """Add an object with the specified attributes at dn."""
if server_fail:
raise SERVER_DOWN
key = "%s%s" % (self.__prefix, dn) key = "%s%s" % (self.__prefix, dn)
value_dict = dict([(k, _to_json(v)) for k, v in attr]) value_dict = dict([(k, _to_json(v)) for k, v in attr])
Store.instance().hmset(key, value_dict) Store.instance().hmset(key, value_dict)
def delete_s(self, dn): def delete_s(self, dn):
"""Remove the ldap object at specified dn.""" """Remove the ldap object at specified dn."""
if server_fail:
raise SERVER_DOWN
Store.instance().delete("%s%s" % (self.__prefix, dn)) Store.instance().delete("%s%s" % (self.__prefix, dn))
def modify_s(self, dn, attrs): def modify_s(self, dn, attrs):
@@ -232,6 +250,9 @@ class FakeLDAP(object):
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
""" """
if server_fail:
raise SERVER_DOWN
store = Store.instance() store = Store.instance()
key = "%s%s" % (self.__prefix, dn) key = "%s%s" % (self.__prefix, dn)
@@ -255,6 +276,9 @@ class FakeLDAP(object):
fields -- fields to return. Returns all fields if not specified fields -- fields to return. Returns all fields if not specified
""" """
if server_fail:
raise SERVER_DOWN
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE: if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope)) raise NotImplementedError(str(scope))
store = Store.instance() store = Store.instance()

View File

@@ -101,6 +101,41 @@ def sanitize(fn):
return _wrapped return _wrapped
class LDAPWrapper(object):
def __init__(self, ldap, url, user, password):
self.ldap = ldap
self.url = url
self.user = user
self.password = password
self.conn = None
def __wrap_reconnect(f):
def inner(self, *args, **kwargs):
if self.conn is None:
self.connect()
return f(self.conn)(*args, **kwargs)
else:
try:
return f(self.conn)(*args, **kwargs)
except self.ldap.SERVER_DOWN:
self.connect()
return f(self.conn)(*args, **kwargs)
return inner
def connect(self):
try:
self.conn = self.ldap.initialize(self.url)
self.conn.simple_bind_s(self.user, self.password)
except self.ldap.SERVER_DOWN:
self.conn = None
raise
search_s = __wrap_reconnect(lambda conn: conn.search_s)
add_s = __wrap_reconnect(lambda conn: conn.add_s)
delete_s = __wrap_reconnect(lambda conn: conn.delete_s)
modify_s = __wrap_reconnect(lambda conn: conn.modify_s)
class LdapDriver(object): class LdapDriver(object):
"""Ldap Auth driver """Ldap Auth driver
@@ -124,8 +159,8 @@ class LdapDriver(object):
LdapDriver.project_objectclass = 'novaProject' LdapDriver.project_objectclass = 'novaProject'
self.__cache = None self.__cache = None
if LdapDriver.conn is None: if LdapDriver.conn is None:
LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) LdapDriver.conn = LDAPWrapper(self.ldap, FLAGS.ldap_url,
LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_user_dn,
FLAGS.ldap_password) FLAGS.ldap_password)
if LdapDriver.mc is None: if LdapDriver.mc is None:
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)

View File

@@ -630,13 +630,17 @@ class AuthManager(object):
not been allocated for user. not been allocated for user.
""" """
network_ref = db.project_get_network(context.get_admin_context(), networks = db.project_get_networks(context.get_admin_context(),
Project.safe_id(project), False) Project.safe_id(project), False)
if not networks:
if not network_ref:
return (None, None) return (None, None)
return (network_ref['vpn_public_address'],
network_ref['vpn_public_port']) # TODO(tr3buchet): not sure what you guys plan on doing with this
# but it's possible for a project to have multiple sets of vpn data
# for now I'm just returning the first one
network = networks[0]
return (network['vpn_public_address'],
network['vpn_public_port'])
def delete_project(self, project): def delete_project(self, project):
"""Deletes a project""" """Deletes a project"""

View File

@@ -314,3 +314,14 @@ logging.setLoggerClass(NovaLogger)
def audit(msg, *args, **kwargs): def audit(msg, *args, **kwargs):
"""Shortcut for logging to root log with sevrity 'AUDIT'.""" """Shortcut for logging to root log with sevrity 'AUDIT'."""
logging.root.log(AUDIT, msg, *args, **kwargs) logging.root.log(AUDIT, msg, *args, **kwargs)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)

View File

@@ -0,0 +1,28 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from nova import flags
from nova import log as logging
FLAGS = flags.FLAGS
NOTIFICATIONS = []
def notify(message):
"""Test notifier, stores notifications in memory for unittests."""
NOTIFICATIONS.append(message)

View File

@@ -275,6 +275,11 @@ class FanoutAdapterConsumer(AdapterConsumer):
unique = uuid.uuid4().hex unique = uuid.uuid4().hex
self.queue = '%s_fanout_%s' % (topic, unique) self.queue = '%s_fanout_%s' % (topic, unique)
self.durable = False self.durable = False
# Fanout creates unique queue names, so we should auto-remove
# them when done, so they're not left around on restart.
# Also, we're the only one that should be consuming. exclusive
# implies auto_delete, so we'll just set that..
self.exclusive = True
LOG.info(_('Created "%(exchange)s" fanout exchange ' LOG.info(_('Created "%(exchange)s" fanout exchange '
'with "%(key)s" routing key'), 'with "%(key)s" routing key'),
dict(exchange=self.exchange, key=self.routing_key)) dict(exchange=self.exchange, key=self.routing_key))
@@ -355,6 +360,7 @@ class FanoutPublisher(Publisher):
self.exchange = '%s_fanout' % topic self.exchange = '%s_fanout' % topic
self.queue = '%s_fanout' % topic self.queue = '%s_fanout' % topic
self.durable = False self.durable = False
self.auto_delete = True
LOG.info(_('Creating "%(exchange)s" fanout exchange'), LOG.info(_('Creating "%(exchange)s" fanout exchange'),
dict(exchange=self.exchange)) dict(exchange=self.exchange))
super(FanoutPublisher, self).__init__(connection=connection) super(FanoutPublisher, self).__init__(connection=connection)

View File

@@ -51,6 +51,11 @@ def _call_scheduler(method, context, params=None):
return rpc.call(context, queue, kwargs) return rpc.call(context, queue, kwargs)
def get_host_list(context):
"""Return a list of hosts associated with this zone."""
return _call_scheduler('get_host_list', context)
def get_zone_list(context): def get_zone_list(context):
"""Return a list of zones assoicated with this zone.""" """Return a list of zones assoicated with this zone."""
items = _call_scheduler('get_zone_list', context) items = _call_scheduler('get_zone_list', context)
@@ -114,7 +119,8 @@ def _process(func, zone):
def call_zone_method(context, method_name, errors_to_ignore=None, def call_zone_method(context, method_name, errors_to_ignore=None,
novaclient_collection_name='zones', *args, **kwargs): novaclient_collection_name='zones', zones=None,
*args, **kwargs):
"""Returns a list of (zone, call_result) objects.""" """Returns a list of (zone, call_result) objects."""
if not isinstance(errors_to_ignore, (list, tuple)): if not isinstance(errors_to_ignore, (list, tuple)):
# This will also handle the default None # This will also handle the default None
@@ -122,7 +128,9 @@ def call_zone_method(context, method_name, errors_to_ignore=None,
pool = greenpool.GreenPool() pool = greenpool.GreenPool()
results = [] results = []
for zone in db.zone_get_all(context): if zones is None:
zones = db.zone_get_all(context)
for zone in zones:
try: try:
nova = novaclient.OpenStack(zone.username, zone.password, None, nova = novaclient.OpenStack(zone.username, zone.password, None,
zone.api_url) zone.api_url)
@@ -162,32 +170,53 @@ def child_zone_helper(zone_list, func):
_wrap_method(_process, func), zone_list)] _wrap_method(_process, func), zone_list)]
def _issue_novaclient_command(nova, zone, collection, method_name, item_id): def _issue_novaclient_command(nova, zone, collection,
method_name, *args, **kwargs):
"""Use novaclient to issue command to a single child zone. """Use novaclient to issue command to a single child zone.
One of these will be run in parallel for each child zone.""" One of these will be run in parallel for each child zone.
"""
manager = getattr(nova, collection) manager = getattr(nova, collection)
result = None
try: # NOTE(comstud): This is not ideal, but we have to do this based on
# how novaclient is implemented right now.
# 'find' is special cased as novaclient requires kwargs for it to
# filter on a 'get_all'.
# Every other method first needs to do a 'get' on the first argument
# passed, which should be a UUID. If it's 'get' itself that we want,
# we just return the result. Otherwise, we next call the real method
# that's wanted... passing other arguments that may or may not exist.
if method_name in ['find', 'findall']:
try: try:
result = manager.get(int(item_id)) return getattr(manager, method_name)(**kwargs)
except ValueError, e: except novaclient.NotFound:
result = manager.find(name=item_id) url = zone.api_url
LOG.debug(_("%(collection)s.%(method_name)s didn't find "
"anything matching '%(kwargs)s' on '%(url)s'" %
locals()))
return None
args = list(args)
# pop off the UUID to look up
item = args.pop(0)
try:
result = manager.get(item)
except novaclient.NotFound: except novaclient.NotFound:
url = zone.api_url url = zone.api_url
LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" % LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" %
locals())) locals()))
return None return None
if method_name.lower() not in ['get', 'find']: if method_name.lower() != 'get':
result = getattr(result, method_name)() # if we're doing something other than 'get', call it passing args.
result = getattr(result, method_name)(*args, **kwargs)
return result return result
def wrap_novaclient_function(f, collection, method_name, item_id): def wrap_novaclient_function(f, collection, method_name, *args, **kwargs):
"""Appends collection, method_name and item_id to the incoming """Appends collection, method_name and arguments to the incoming
(nova, zone) call from child_zone_helper.""" (nova, zone) call from child_zone_helper."""
def inner(nova, zone): def inner(nova, zone):
return f(nova, zone, collection, method_name, item_id) return f(nova, zone, collection, method_name, *args, **kwargs)
return inner return inner
@@ -220,7 +249,7 @@ class reroute_compute(object):
the wrapped method. (This ensures that zone-local code can the wrapped method. (This ensures that zone-local code can
continue to use integer IDs). continue to use integer IDs).
4. If the item was not found, we delgate the call to a child zone 4. If the item was not found, we delegate the call to a child zone
using the UUID. using the UUID.
""" """
def __init__(self, method_name): def __init__(self, method_name):

View File

@@ -93,6 +93,26 @@ class InstanceTypeFilter(HostFilter):
"""Use instance_type to filter hosts.""" """Use instance_type to filter hosts."""
return (self._full_name(), instance_type) return (self._full_name(), instance_type)
def _satisfies_extra_specs(self, capabilities, instance_type):
"""Check that the capabilities provided by the compute service
satisfy the extra specs associated with the instance type"""
if 'extra_specs' not in instance_type:
return True
# Note(lorinh): For now, we are just checking exact matching on the
# values. Later on, we want to handle numerical
# values so we can represent things like number of GPU cards
try:
for key, value in instance_type['extra_specs'].iteritems():
if capabilities[key] != value:
return False
except KeyError:
return False
return True
def filter_hosts(self, zone_manager, query): def filter_hosts(self, zone_manager, query):
"""Return a list of hosts that can create instance_type.""" """Return a list of hosts that can create instance_type."""
instance_type = query instance_type = query
@@ -103,7 +123,11 @@ class InstanceTypeFilter(HostFilter):
disk_bytes = capabilities['disk_available'] disk_bytes = capabilities['disk_available']
spec_ram = instance_type['memory_mb'] spec_ram = instance_type['memory_mb']
spec_disk = instance_type['local_gb'] spec_disk = instance_type['local_gb']
if host_ram_mb >= spec_ram and disk_bytes >= spec_disk: extra_specs = instance_type['extra_specs']
if host_ram_mb >= spec_ram and \
disk_bytes >= spec_disk and \
self._satisfies_extra_specs(capabilities, instance_type):
selected_hosts.append((host, capabilities)) selected_hosts.append((host, capabilities))
return selected_hosts return selected_hosts
@@ -227,8 +251,7 @@ class JsonFilter(HostFilter):
required_disk = instance_type['local_gb'] required_disk = instance_type['local_gb']
query = ['and', query = ['and',
['>=', '$compute.host_memory_free', required_ram], ['>=', '$compute.host_memory_free', required_ram],
['>=', '$compute.disk_available', required_disk], ['>=', '$compute.disk_available', required_disk]]
]
return (self._full_name(), json.dumps(query)) return (self._full_name(), json.dumps(query))
def _parse_string(self, string, host, services): def _parse_string(self, string, host, services):
@@ -305,8 +328,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
'instance_type': <InstanceType dict>} 'instance_type': <InstanceType dict>}
""" """
def filter_hosts(self, num, request_spec): def filter_hosts(self, topic, request_spec, hosts=None):
"""Filter the full host list (from the ZoneManager)""" """Filter the full host list (from the ZoneManager)"""
filter_name = request_spec.get('filter', None) filter_name = request_spec.get('filter', None)
host_filter = choose_host_filter(filter_name) host_filter = choose_host_filter(filter_name)
@@ -317,8 +341,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
name, query = host_filter.instance_type_to_filter(instance_type) name, query = host_filter.instance_type_to_filter(instance_type)
return host_filter.filter_hosts(self.zone_manager, query) return host_filter.filter_hosts(self.zone_manager, query)
def weigh_hosts(self, num, request_spec, hosts): def weigh_hosts(self, topic, request_spec, hosts):
"""Derived classes must override this method and return """Derived classes must override this method and return
a lists of hosts in [{weight, hostname}] format. a lists of hosts in [{weight, hostname}] format.
""" """
return [dict(weight=1, hostname=host) for host, caps in hosts] return [dict(weight=1, hostname=hostname, capabilities=caps)
for hostname, caps in hosts]

View File

@@ -48,25 +48,43 @@ def noop_cost_fn(host):
return 1 return 1
flags.DEFINE_integer('fill_first_cost_fn_weight', 1, flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
'How much weight to give the fill-first cost function') 'How much weight to give the fill-first cost function')
def fill_first_cost_fn(host): def compute_fill_first_cost_fn(host):
"""Prefer hosts that have less ram available, filter_hosts will exclude """Prefer hosts that have less ram available, filter_hosts will exclude
hosts that don't have enough ram""" hosts that don't have enough ram"""
hostname, caps = host hostname, caps = host
free_mem = caps['compute']['host_memory_free'] free_mem = caps['host_memory_free']
return free_mem return free_mem
class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
def get_cost_fns(self): def __init__(self, *args, **kwargs):
self.cost_fns_cache = {}
super(LeastCostScheduler, self).__init__(*args, **kwargs)
def get_cost_fns(self, topic):
"""Returns a list of tuples containing weights and cost functions to """Returns a list of tuples containing weights and cost functions to
use for weighing hosts use for weighing hosts
""" """
if topic in self.cost_fns_cache:
return self.cost_fns_cache[topic]
cost_fns = [] cost_fns = []
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
if '.' in cost_fn_str:
short_name = cost_fn_str.split('.')[-1]
else:
short_name = cost_fn_str
cost_fn_str = "%s.%s.%s" % (
__name__, self.__class__.__name__, short_name)
if not (short_name.startswith('%s_' % topic) or
short_name.startswith('noop')):
continue
try: try:
# NOTE(sirp): import_class is somewhat misnamed since it can # NOTE(sirp): import_class is somewhat misnamed since it can
@@ -84,23 +102,23 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
cost_fns.append((weight, cost_fn)) cost_fns.append((weight, cost_fn))
self.cost_fns_cache[topic] = cost_fns
return cost_fns return cost_fns
def weigh_hosts(self, num, request_spec, hosts): def weigh_hosts(self, topic, request_spec, hosts):
"""Returns a list of dictionaries of form: """Returns a list of dictionaries of form:
[ {weight: weight, hostname: hostname} ]""" [ {weight: weight, hostname: hostname, capabilities: capabs} ]
"""
# FIXME(sirp): weigh_hosts should handle more than just instances cost_fns = self.get_cost_fns(topic)
hostnames = [hostname for hostname, caps in hosts]
cost_fns = self.get_cost_fns()
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
weighted = [] weighted = []
weight_log = [] weight_log = []
for cost, hostname in zip(costs, hostnames): for cost, (hostname, caps) in zip(costs, hosts):
weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
weight_dict = dict(weight=cost, hostname=hostname) weight_dict = dict(weight=cost, hostname=hostname,
capabilities=caps)
weighted.append(weight_dict) weighted.append(weight_dict)
LOG.debug(_("Weighted Costs => %s") % weight_log) LOG.debug(_("Weighted Costs => %s") % weight_log)
@@ -127,7 +145,8 @@ def weighted_sum(domain, weighted_fns, normalize=True):
weighted_fns - list of weights and functions like: weighted_fns - list of weights and functions like:
[(weight, objective-functions)] [(weight, objective-functions)]
Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts) Returns an unsorted list of scores. To pair with hosts do:
zip(scores, hosts)
""" """
# Table of form: # Table of form:
# { domain1: [score1, score2, ..., scoreM] # { domain1: [score1, score2, ..., scoreM]
@@ -150,7 +169,6 @@ def weighted_sum(domain, weighted_fns, normalize=True):
domain_scores = [] domain_scores = []
for idx in sorted(score_table): for idx in sorted(score_table):
elem_score = sum(score_table[idx]) elem_score = sum(score_table[idx])
elem = domain[idx]
domain_scores.append(elem_score) domain_scores.append(elem_score)
return domain_scores return domain_scores

View File

@@ -33,6 +33,7 @@ from nova import flags
from nova import log as logging from nova import log as logging
from nova import rpc from nova import rpc
from nova.compute import api as compute_api
from nova.scheduler import api from nova.scheduler import api
from nova.scheduler import driver from nova.scheduler import driver
@@ -48,14 +49,25 @@ class InvalidBlob(exception.NovaException):
class ZoneAwareScheduler(driver.Scheduler): class ZoneAwareScheduler(driver.Scheduler):
"""Base class for creating Zone Aware Schedulers.""" """Base class for creating Zone Aware Schedulers."""
def _call_zone_method(self, context, method, specs): def _call_zone_method(self, context, method, specs, zones):
"""Call novaclient zone method. Broken out for testing.""" """Call novaclient zone method. Broken out for testing."""
return api.call_zone_method(context, method, specs=specs) return api.call_zone_method(context, method, specs=specs, zones=zones)
def _provision_resource_locally(self, context, item, instance_id, kwargs): def _provision_resource_locally(self, context, build_plan_item,
request_spec, kwargs):
"""Create the requested resource in this Zone.""" """Create the requested resource in this Zone."""
host = item['hostname'] host = build_plan_item['hostname']
base_options = request_spec['instance_properties']
# TODO(sandy): I guess someone needs to add block_device_mapping
# support at some point? Also, OS API has no concept of security
# groups.
instance = compute_api.API().create_db_entry_for_new_instance(context,
base_options, None, [])
instance_id = instance['id']
kwargs['instance_id'] = instance_id kwargs['instance_id'] = instance_id
rpc.cast(context, rpc.cast(context,
db.queue_get_for(context, "compute", host), db.queue_get_for(context, "compute", host),
{"method": "run_instance", {"method": "run_instance",
@@ -115,8 +127,8 @@ class ZoneAwareScheduler(driver.Scheduler):
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
child_blob, reservation_id=reservation_id) child_blob, reservation_id=reservation_id)
def _provision_resource_from_blob(self, context, item, instance_id, def _provision_resource_from_blob(self, context, build_plan_item,
request_spec, kwargs): instance_id, request_spec, kwargs):
"""Create the requested resource locally or in a child zone """Create the requested resource locally or in a child zone
based on what is stored in the zone blob info. based on what is stored in the zone blob info.
@@ -132,12 +144,12 @@ class ZoneAwareScheduler(driver.Scheduler):
request.""" request."""
host_info = None host_info = None
if "blob" in item: if "blob" in build_plan_item:
# Request was passed in from above. Is it for us? # Request was passed in from above. Is it for us?
host_info = self._decrypt_blob(item['blob']) host_info = self._decrypt_blob(build_plan_item['blob'])
elif "child_blob" in item: elif "child_blob" in build_plan_item:
# Our immediate child zone provided this info ... # Our immediate child zone provided this info ...
host_info = item host_info = build_plan_item
if not host_info: if not host_info:
raise InvalidBlob() raise InvalidBlob()
@@ -147,19 +159,44 @@ class ZoneAwareScheduler(driver.Scheduler):
self._ask_child_zone_to_create_instance(context, host_info, self._ask_child_zone_to_create_instance(context, host_info,
request_spec, kwargs) request_spec, kwargs)
else: else:
self._provision_resource_locally(context, host_info, self._provision_resource_locally(context, host_info, request_spec,
instance_id, kwargs) kwargs)
def _provision_resource(self, context, item, instance_id, request_spec, def _provision_resource(self, context, build_plan_item, instance_id,
kwargs): request_spec, kwargs):
"""Create the requested resource in this Zone or a child zone.""" """Create the requested resource in this Zone or a child zone."""
if "hostname" in item: if "hostname" in build_plan_item:
self._provision_resource_locally(context, item, instance_id, self._provision_resource_locally(context, build_plan_item,
kwargs) request_spec, kwargs)
return return
self._provision_resource_from_blob(context, item, instance_id, self._provision_resource_from_blob(context, build_plan_item,
request_spec, kwargs) instance_id, request_spec, kwargs)
def _adjust_child_weights(self, child_results, zones):
"""Apply the Scale and Offset values from the Zone definition
to adjust the weights returned from the child zones. Alters
child_results in place.
"""
for zone, result in child_results:
if not result:
continue
for zone_rec in zones:
if zone_rec['api_url'] != zone:
continue
for item in result:
try:
offset = zone_rec['weight_offset']
scale = zone_rec['weight_scale']
raw_weight = item['weight']
cooked_weight = offset + scale * raw_weight
item['weight'] = cooked_weight
item['raw_weight'] = raw_weight
except KeyError:
LOG.exception(_("Bad child zone scaling values "
"for Zone: %(zone)s") % locals())
def schedule_run_instance(self, context, instance_id, request_spec, def schedule_run_instance(self, context, instance_id, request_spec,
*args, **kwargs): *args, **kwargs):
@@ -180,18 +217,22 @@ class ZoneAwareScheduler(driver.Scheduler):
request_spec, kwargs) request_spec, kwargs)
return None return None
num_instances = request_spec.get('num_instances', 1)
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
locals())
# Create build plan and provision ... # Create build plan and provision ...
build_plan = self.select(context, request_spec) build_plan = self.select(context, request_spec)
if not build_plan: if not build_plan:
raise driver.NoValidHost(_('No hosts were available')) raise driver.NoValidHost(_('No hosts were available'))
for num in xrange(request_spec['num_instances']): for num in xrange(num_instances):
if not build_plan: if not build_plan:
break break
item = build_plan.pop(0) build_plan_item = build_plan.pop(0)
self._provision_resource(context, item, instance_id, request_spec, self._provision_resource(context, build_plan_item, instance_id,
kwargs) request_spec, kwargs)
# Returning None short-circuits the routing to Compute (since # Returning None short-circuits the routing to Compute (since
# we've already done it here) # we've already done it here)
@@ -224,23 +265,43 @@ class ZoneAwareScheduler(driver.Scheduler):
raise NotImplemented(_("Zone Aware Scheduler only understands " raise NotImplemented(_("Zone Aware Scheduler only understands "
"Compute nodes (for now)")) "Compute nodes (for now)"))
#TODO(sandy): how to infer this from OS API params? num_instances = request_spec.get('num_instances', 1)
num_instances = 1 instance_type = request_spec['instance_type']
# Filter local hosts based on requirements ... weighted = []
host_list = self.filter_hosts(num_instances, request_spec) host_list = None
# TODO(sirp): weigh_hosts should also be a function of 'topic' or for i in xrange(num_instances):
# resources, so that we can apply different objective functions to it # Filter local hosts based on requirements ...
#
# The first pass through here will pass 'None' as the
# host_list.. which tells the filter to build the full
# list of hosts.
# On a 2nd pass, the filter can modify the host_list with
# any updates it needs to make based on resources that
# may have been consumed from a previous build..
host_list = self.filter_hosts(topic, request_spec, host_list)
if not host_list:
LOG.warn(_("Filter returned no hosts after processing "
"%(i)d of %(num_instances)d instances") % locals())
break
# then weigh the selected hosts. # then weigh the selected hosts.
# weighted = [{weight=weight, name=hostname}, ...] # weighted = [{weight=weight, hostname=hostname,
weighted = self.weigh_hosts(num_instances, request_spec, host_list) # capabilities=capabs}, ...]
weights = self.weigh_hosts(topic, request_spec, host_list)
weights.sort(key=operator.itemgetter('weight'))
best_weight = weights[0]
weighted.append(best_weight)
self.consume_resources(topic, best_weight['capabilities'],
instance_type)
# Next, tack on the best weights from the child zones ... # Next, tack on the best weights from the child zones ...
json_spec = json.dumps(request_spec) json_spec = json.dumps(request_spec)
all_zones = db.zone_get_all(context)
child_results = self._call_zone_method(context, "select", child_results = self._call_zone_method(context, "select",
specs=json_spec) specs=json_spec, zones=all_zones)
self._adjust_child_weights(child_results, all_zones)
for child_zone, result in child_results: for child_zone, result in child_results:
for weighting in result: for weighting in result:
# Remember the child_zone so we can get back to # Remember the child_zone so we can get back to
@@ -254,18 +315,65 @@ class ZoneAwareScheduler(driver.Scheduler):
weighted.sort(key=operator.itemgetter('weight')) weighted.sort(key=operator.itemgetter('weight'))
return weighted return weighted
def filter_hosts(self, num, request_spec): def compute_filter(self, hostname, capabilities, request_spec):
"""Derived classes must override this method and return """Return whether or not we can schedule to this compute node.
a list of hosts in [(hostname, capability_dict)] format. Derived classes should override this and return True if the host
is acceptable for scheduling.
""" """
# NOTE(sirp): The default logic is the equivalent to AllHostsFilter instance_type = request_spec['instance_type']
service_states = self.zone_manager.service_states requested_mem = instance_type['memory_mb'] * 1024 * 1024
return [(host, services) return capabilities['host_memory_free'] >= requested_mem
for host, services in service_states.iteritems()]
def weigh_hosts(self, num, request_spec, hosts): def filter_hosts(self, topic, request_spec, host_list=None):
"""Return a list of hosts which are acceptable for scheduling.
Return value should be a list of (hostname, capability_dict)s.
Derived classes may override this, but may find the
'<topic>_filter' function more appropriate.
"""
def _default_filter(self, hostname, capabilities, request_spec):
"""Default filter function if there's no <topic>_filter"""
# NOTE(sirp): The default logic is the equivalent to
# AllHostsFilter
return True
filter_func = getattr(self, '%s_filter' % topic, _default_filter)
if host_list is None:
first_run = True
host_list = self.zone_manager.service_states.iteritems()
else:
first_run = False
filtered_hosts = []
for host, services in host_list:
if first_run:
if topic not in services:
continue
services = services[topic]
if filter_func(host, services, request_spec):
filtered_hosts.append((host, services))
return filtered_hosts
def weigh_hosts(self, topic, request_spec, hosts):
"""Derived classes may override this to provide more sophisticated """Derived classes may override this to provide more sophisticated
scheduling objectives scheduling objectives
""" """
# NOTE(sirp): The default logic is the same as the NoopCostFunction # NOTE(sirp): The default logic is the same as the NoopCostFunction
return [dict(weight=1, hostname=host) for host, caps in hosts] return [dict(weight=1, hostname=hostname, capabilities=capabilities)
for hostname, capabilities in hosts]
def compute_consume(self, capabilities, instance_type):
"""Consume compute resources for selected host"""
requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024
capabilities['host_memory_free'] -= requested_mem
def consume_resources(self, topic, capabilities, instance_type):
"""Consume resources for a specific host. 'host' is a tuple
of the hostname and the services"""
consume_func = getattr(self, '%s_consume' % topic, None)
if not consume_func:
return
consume_func(capabilities, instance_type)

View File

@@ -115,6 +115,18 @@ class ZoneManager(object):
"""Return the list of zones we know about.""" """Return the list of zones we know about."""
return [zone.to_dict() for zone in self.zone_states.values()] return [zone.to_dict() for zone in self.zone_states.values()]
def get_host_list(self):
"""Returns a list of dicts for each host that the Zone Manager
knows about. Each dict contains the host_name and the service
for that host.
"""
all_hosts = self.service_states.keys()
ret = []
for host in self.service_states:
for svc in self.service_states[host]:
ret.append({"service": svc, "host_name": host})
return ret
def get_zone_capabilities(self, context): def get_zone_capabilities(self, context):
"""Roll up all the individual host info to generic 'service' """Roll up all the individual host info to generic 'service'
capabilities. Each capability is aggregated into capabilities. Each capability is aggregated into
@@ -127,13 +139,15 @@ class ZoneManager(object):
combined = {} # { <service>_<cap> : (min, max), ... } combined = {} # { <service>_<cap> : (min, max), ... }
for host, host_dict in hosts_dict.iteritems(): for host, host_dict in hosts_dict.iteritems():
for service_name, service_dict in host_dict.iteritems(): for service_name, service_dict in host_dict.iteritems():
if not service_dict.get("enabled", True):
# Service is disabled; do no include it
continue
for cap, value in service_dict.iteritems(): for cap, value in service_dict.iteritems():
key = "%s_%s" % (service_name, cap) key = "%s_%s" % (service_name, cap)
min_value, max_value = combined.get(key, (value, value)) min_value, max_value = combined.get(key, (value, value))
min_value = min(min_value, value) min_value = min(min_value, value)
max_value = max(max_value, value) max_value = max(max_value, value)
combined[key] = (min_value, max_value) combined[key] = (min_value, max_value)
return combined return combined
def _refresh_from_db(self, context): def _refresh_from_db(self, context):

View File

@@ -0,0 +1,19 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Openstack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
from nova.tests import *

View File

@@ -67,7 +67,18 @@ class HostFilterTestCase(test.TestCase):
flavorid=1, flavorid=1,
swap=500, swap=500,
rxtx_quota=30000, rxtx_quota=30000,
rxtx_cap=200) rxtx_cap=200,
extra_specs={})
self.gpu_instance_type = dict(name='tiny.gpu',
memory_mb=50,
vcpus=10,
local_gb=500,
flavorid=2,
swap=500,
rxtx_quota=30000,
rxtx_cap=200,
extra_specs={'xpu_arch': 'fermi',
'xpu_info': 'Tesla 2050'})
self.zone_manager = FakeZoneManager() self.zone_manager = FakeZoneManager()
states = {} states = {}
@@ -75,6 +86,18 @@ class HostFilterTestCase(test.TestCase):
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
self.zone_manager.service_states = states self.zone_manager.service_states = states
# Add some extra capabilities to some hosts
host07 = self.zone_manager.service_states['host07']['compute']
host07['xpu_arch'] = 'fermi'
host07['xpu_info'] = 'Tesla 2050'
host08 = self.zone_manager.service_states['host08']['compute']
host08['xpu_arch'] = 'radeon'
host09 = self.zone_manager.service_states['host09']['compute']
host09['xpu_arch'] = 'fermi'
host09['xpu_info'] = 'Tesla 2150'
def tearDown(self): def tearDown(self):
FLAGS.default_host_filter = self.old_flag FLAGS.default_host_filter = self.old_flag
@@ -116,6 +139,17 @@ class HostFilterTestCase(test.TestCase):
self.assertEquals('host05', just_hosts[0]) self.assertEquals('host05', just_hosts[0])
self.assertEquals('host10', just_hosts[5]) self.assertEquals('host10', just_hosts[5])
def test_instance_type_filter_extra_specs(self):
hf = host_filter.InstanceTypeFilter()
# filter all hosts that can support 50 ram and 500 disk
name, cooked = hf.instance_type_to_filter(self.gpu_instance_type)
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
name)
hosts = hf.filter_hosts(self.zone_manager, cooked)
self.assertEquals(1, len(hosts))
just_hosts = [host for host, caps in hosts]
self.assertEquals('host07', just_hosts[0])
def test_json_filter(self): def test_json_filter(self):
hf = host_filter.JsonFilter() hf = host_filter.JsonFilter()
# filter all hosts that can support 50 ram and 500 disk # filter all hosts that can support 50 ram and 500 disk

View File

@@ -122,15 +122,16 @@ class LeastCostSchedulerTestCase(test.TestCase):
for hostname, caps in hosts] for hostname, caps in hosts]
self.assertWeights(expected, num, request_spec, hosts) self.assertWeights(expected, num, request_spec, hosts)
def test_fill_first_cost_fn(self): def test_compute_fill_first_cost_fn(self):
FLAGS.least_cost_scheduler_cost_functions = [ FLAGS.least_cost_scheduler_cost_functions = [
'nova.scheduler.least_cost.fill_first_cost_fn', 'nova.scheduler.least_cost.compute_fill_first_cost_fn',
] ]
FLAGS.fill_first_cost_fn_weight = 1 FLAGS.compute_fill_first_cost_fn_weight = 1
num = 1 num = 1
request_spec = {} instance_type = {'memory_mb': 1024}
hosts = self.sched.filter_hosts(num, request_spec) request_spec = {'instance_type': instance_type}
hosts = self.sched.filter_hosts('compute', request_spec, None)
expected = [] expected = []
for idx, (hostname, caps) in enumerate(hosts): for idx, (hostname, caps) in enumerate(hosts):

View File

@@ -16,6 +16,8 @@
Tests For Zone Aware Scheduler. Tests For Zone Aware Scheduler.
""" """
import nova.db
from nova import exception from nova import exception
from nova import test from nova import test
from nova.scheduler import driver from nova.scheduler import driver
@@ -55,29 +57,21 @@ def fake_zone_manager_service_states(num_hosts):
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
def filter_hosts(self, num, specs): # No need to stub anything at the moment
# NOTE(sirp): this is returning [(hostname, services)] pass
return self.zone_manager.service_states.items()
def weigh_hosts(self, num, specs, hosts):
fake_weight = 99
weighted = []
for hostname, caps in hosts:
weighted.append(dict(weight=fake_weight, name=hostname))
return weighted
class FakeZoneManager(zone_manager.ZoneManager): class FakeZoneManager(zone_manager.ZoneManager):
def __init__(self): def __init__(self):
self.service_states = { self.service_states = {
'host1': { 'host1': {
'compute': {'ram': 1000}, 'compute': {'host_memory_free': 1073741824},
}, },
'host2': { 'host2': {
'compute': {'ram': 2000}, 'compute': {'host_memory_free': 2147483648},
}, },
'host3': { 'host3': {
'compute': {'ram': 3000}, 'compute': {'host_memory_free': 3221225472},
}, },
} }
@@ -87,7 +81,7 @@ class FakeEmptyZoneManager(zone_manager.ZoneManager):
self.service_states = {} self.service_states = {}
def fake_empty_call_zone_method(context, method, specs): def fake_empty_call_zone_method(context, method, specs, zones):
return [] return []
@@ -106,7 +100,7 @@ def fake_ask_child_zone_to_create_instance(context, zone_info,
was_called = True was_called = True
def fake_provision_resource_locally(context, item, instance_id, kwargs): def fake_provision_resource_locally(context, build_plan, request_spec, kwargs):
global was_called global was_called
was_called = True was_called = True
@@ -126,7 +120,7 @@ def fake_decrypt_blob_returns_child_info(blob):
'child_blob': True} # values aren't important. Keys are. 'child_blob': True} # values aren't important. Keys are.
def fake_call_zone_method(context, method, specs): def fake_call_zone_method(context, method, specs, zones):
return [ return [
('zone1', [ ('zone1', [
dict(weight=1, blob='AAAAAAA'), dict(weight=1, blob='AAAAAAA'),
@@ -149,28 +143,67 @@ def fake_call_zone_method(context, method, specs):
] ]
def fake_zone_get_all(context):
return [
dict(id=1, api_url='zone1',
username='admin', password='password',
weight_offset=0.0, weight_scale=1.0),
dict(id=2, api_url='zone2',
username='admin', password='password',
weight_offset=1000.0, weight_scale=1.0),
dict(id=3, api_url='zone3',
username='admin', password='password',
weight_offset=0.0, weight_scale=1000.0),
]
class ZoneAwareSchedulerTestCase(test.TestCase): class ZoneAwareSchedulerTestCase(test.TestCase):
"""Test case for Zone Aware Scheduler.""" """Test case for Zone Aware Scheduler."""
def test_zone_aware_scheduler(self): def test_zone_aware_scheduler(self):
""" """
Create a nested set of FakeZones, ensure that a select call returns the Create a nested set of FakeZones, try to build multiple instances
appropriate build plan. and ensure that a select call returns the appropriate build plan.
""" """
sched = FakeZoneAwareScheduler() sched = FakeZoneAwareScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
zm = FakeZoneManager() zm = FakeZoneManager()
sched.set_zone_manager(zm) sched.set_zone_manager(zm)
fake_context = {} fake_context = {}
build_plan = sched.select(fake_context, {}) build_plan = sched.select(fake_context,
{'instance_type': {'memory_mb': 512},
'num_instances': 4})
self.assertEqual(15, len(build_plan)) # 4 from local zones, 12 from remotes
self.assertEqual(16, len(build_plan))
hostnames = [plan_item['name'] hostnames = [plan_item['hostname']
for plan_item in build_plan if 'name' in plan_item] for plan_item in build_plan if 'hostname' in plan_item]
self.assertEqual(3, len(hostnames)) # 4 local hosts
self.assertEqual(4, len(hostnames))
def test_adjust_child_weights(self):
"""Make sure the weights returned by child zones are
properly adjusted based on the scale/offset in the zone
db entries.
"""
sched = FakeZoneAwareScheduler()
child_results = fake_call_zone_method(None, None, None, None)
zones = fake_zone_get_all(None)
sched._adjust_child_weights(child_results, zones)
scaled = [130000, 131000, 132000, 3000]
for zone, results in child_results:
for item in results:
w = item['weight']
if zone == 'zone1': # No change
self.assertTrue(w < 1000.0)
if zone == 'zone2': # Offset +1000
self.assertTrue(w >= 1000.0 and w < 2000)
if zone == 'zone3': # Scale x1000
self.assertEqual(scaled.pop(0), w)
def test_empty_zone_aware_scheduler(self): def test_empty_zone_aware_scheduler(self):
""" """
@@ -178,6 +211,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
""" """
sched = FakeZoneAwareScheduler() sched = FakeZoneAwareScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
zm = FakeEmptyZoneManager() zm = FakeEmptyZoneManager()
sched.set_zone_manager(zm) sched.set_zone_manager(zm)
@@ -185,8 +219,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
fake_context = {} fake_context = {}
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
fake_context, 1, fake_context, 1,
dict(host_filter=None, dict(host_filter=None, instance_type={}))
request_spec={'instance_type': {}}))
def test_schedule_do_not_schedule_with_hint(self): def test_schedule_do_not_schedule_with_hint(self):
""" """

View File

@@ -56,7 +56,6 @@ class AdminApiTestCase(test.TestCase):
self.project = self.manager.create_project('proj', 'admin', 'proj') self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = context.RequestContext(user=self.user, self.context = context.RequestContext(user=self.user,
project=self.project) project=self.project)
host = self.network.get_network_host(self.context.elevated())
def fake_show(meh, context, id): def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
@@ -75,9 +74,6 @@ class AdminApiTestCase(test.TestCase):
self.stubs.Set(rpc, 'cast', finish_cast) self.stubs.Set(rpc, 'cast', finish_cast)
def tearDown(self): def tearDown(self):
network_ref = db.project_get_network(self.context,
self.project.id)
db.network_disassociate(self.context, network_ref['id'])
self.manager.delete_project(self.project) self.manager.delete_project(self.project)
self.manager.delete_user(self.user) self.manager.delete_user(self.user)
super(AdminApiTestCase, self).tearDown() super(AdminApiTestCase, self).tearDown()

View File

@@ -25,6 +25,7 @@ from nova import log as logging
from nova import test from nova import test
from nova.auth import manager from nova.auth import manager
from nova.api.ec2 import cloud from nova.api.ec2 import cloud
from nova.auth import fakeldap
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.auth_unittest') LOG = logging.getLogger('nova.tests.auth_unittest')
@@ -369,6 +370,15 @@ class _AuthManagerBaseTestCase(test.TestCase):
class AuthManagerLdapTestCase(_AuthManagerBaseTestCase): class AuthManagerLdapTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
def test_reconnect_on_server_failure(self):
self.manager.get_users()
fakeldap.server_fail = True
try:
self.assertRaises(fakeldap.SERVER_DOWN, self.manager.get_users)
finally:
fakeldap.server_fail = False
self.manager.get_users()
class AuthManagerDbTestCase(_AuthManagerBaseTestCase): class AuthManagerDbTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.dbdriver.DbDriver' auth_driver = 'nova.auth.dbdriver.DbDriver'

View File

@@ -64,7 +64,7 @@ class CloudTestCase(test.TestCase):
self.project = self.manager.create_project('proj', 'admin', 'proj') self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = context.RequestContext(user=self.user, self.context = context.RequestContext(user=self.user,
project=self.project) project=self.project)
host = self.network.get_network_host(self.context.elevated()) host = self.network.host
def fake_show(meh, context, id): def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
@@ -83,9 +83,10 @@ class CloudTestCase(test.TestCase):
self.stubs.Set(rpc, 'cast', finish_cast) self.stubs.Set(rpc, 'cast', finish_cast)
def tearDown(self): def tearDown(self):
network_ref = db.project_get_network(self.context, networks = db.project_get_networks(self.context, self.project.id,
self.project.id) associate=False)
db.network_disassociate(self.context, network_ref['id']) for network in networks:
db.network_disassociate(self.context, network['id'])
self.manager.delete_project(self.project) self.manager.delete_project(self.project)
self.manager.delete_user(self.user) self.manager.delete_user(self.user)
super(CloudTestCase, self).tearDown() super(CloudTestCase, self).tearDown()
@@ -116,6 +117,7 @@ class CloudTestCase(test.TestCase):
public_ip=address) public_ip=address)
db.floating_ip_destroy(self.context, address) db.floating_ip_destroy(self.context, address)
@test.skip_test("Skipping this pending future merge")
def test_allocate_address(self): def test_allocate_address(self):
address = "10.10.10.10" address = "10.10.10.10"
allocate = self.cloud.allocate_address allocate = self.cloud.allocate_address
@@ -128,6 +130,7 @@ class CloudTestCase(test.TestCase):
allocate, allocate,
self.context) self.context)
@test.skip_test("Skipping this pending future merge")
def test_associate_disassociate_address(self): def test_associate_disassociate_address(self):
"""Verifies associate runs cleanly without raising an exception""" """Verifies associate runs cleanly without raising an exception"""
address = "10.10.10.10" address = "10.10.10.10"
@@ -135,8 +138,27 @@ class CloudTestCase(test.TestCase):
{'address': address, {'address': address,
'host': self.network.host}) 'host': self.network.host})
self.cloud.allocate_address(self.context) self.cloud.allocate_address(self.context)
inst = db.instance_create(self.context, {'host': self.compute.host}) # TODO(jkoelker) Probably need to query for instance_type_id and
fixed = self.network.allocate_fixed_ip(self.context, inst['id']) # make sure we get a valid one
inst = db.instance_create(self.context, {'host': self.compute.host,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
self.network.set_network_host(self.context, network['id'])
project_id = self.context.project_id
type_id = inst['instance_type_id']
ips = self.network.allocate_for_instance(self.context,
instance_id=inst['id'],
instance_type_id=type_id,
project_id=project_id)
# TODO(jkoelker) Make this mas bueno
self.assertTrue(ips)
self.assertTrue('ips' in ips[0][1])
self.assertTrue(ips[0][1]['ips'])
self.assertTrue('ip' in ips[0][1]['ips'][0])
fixed = ips[0][1]['ips'][0]['ip']
ec2_id = ec2utils.id_to_ec2_id(inst['id']) ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.associate_address(self.context, self.cloud.associate_address(self.context,
instance_id=ec2_id, instance_id=ec2_id,
@@ -165,6 +187,102 @@ class CloudTestCase(test.TestCase):
sec['name']) sec['name'])
db.security_group_destroy(self.context, sec['id']) db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
result = self.cloud.describe_security_groups(self.context,
group_id=[sec['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
sec['name'])
default = db.security_group_get_by_name(self.context,
self.context.project_id,
'default')
result = self.cloud.describe_security_groups(self.context,
group_id=[default['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
'default')
db.security_group_destroy(self.context, sec['id'])
def test_create_delete_security_group(self):
descript = 'test description'
create = self.cloud.create_security_group
result = create(self.context, 'testgrp', descript)
group_descript = result['securityGroupSet'][0]['groupDescription']
self.assertEqual(descript, group_descript)
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, 'testgrp'))
def test_delete_security_group_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, group_id=sec['id']))
def test_delete_security_group_with_bad_name(self):
delete = self.cloud.delete_security_group
notfound = exception.SecurityGroupNotFound
self.assertRaises(notfound, delete, self.context, 'badname')
def test_delete_security_group_with_bad_group_id(self):
delete = self.cloud.delete_security_group
notfound = exception.SecurityGroupNotFound
self.assertRaises(notfound, delete, self.context, group_id=999)
def test_delete_security_group_no_params(self):
delete = self.cloud.delete_security_group
self.assertRaises(exception.ApiError, delete, self.context)
def test_authorize_revoke_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_name=sec['name'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
def test_authorize_revoke_security_group_ingress_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec['id'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
def test_authorize_security_group_ingress_missing_protocol_params(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
authz = self.cloud.authorize_security_group_ingress
self.assertRaises(exception.ApiError, authz, self.context, 'test')
def test_authorize_security_group_ingress_missing_group_name_or_id(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
authz = self.cloud.authorize_security_group_ingress
self.assertRaises(exception.ApiError, authz, self.context, **kwargs)
def test_authorize_security_group_ingress_already_exists(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_name=sec['name'], **kwargs)
self.assertRaises(exception.ApiError, authz, self.context,
group_name=sec['name'], **kwargs)
def test_revoke_security_group_ingress_missing_group_name_or_id(self):
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
revoke = self.cloud.revoke_security_group_ingress
self.assertRaises(exception.ApiError, revoke, self.context, **kwargs)
def test_describe_volumes(self): def test_describe_volumes(self):
"""Makes sure describe_volumes works and filters results.""" """Makes sure describe_volumes works and filters results."""
vol1 = db.volume_create(self.context, {}) vol1 = db.volume_create(self.context, {})
@@ -217,6 +335,8 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, service1['id']) db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id']) db.service_destroy(self.context, service2['id'])
# NOTE(jkoelker): this test relies on fixed_ip being in instances
@test.skip_test("EC2 stuff needs fixed_ip in instance_ref")
def test_describe_snapshots(self): def test_describe_snapshots(self):
"""Makes sure describe_snapshots works and filters results.""" """Makes sure describe_snapshots works and filters results."""
vol = db.volume_create(self.context, {}) vol = db.volume_create(self.context, {})
@@ -908,6 +1028,8 @@ class CloudTestCase(test.TestCase):
self.assertEqual('c00l 1m4g3', inst['display_name']) self.assertEqual('c00l 1m4g3', inst['display_name'])
db.instance_destroy(self.context, inst['id']) db.instance_destroy(self.context, inst['id'])
# NOTE(jkoelker): This test relies on mac_address in instance
@test.skip_test("EC2 stuff needs mac_address in instance_ref")
def test_update_of_instance_wont_update_private_fields(self): def test_update_of_instance_wont_update_private_fields(self):
inst = db.instance_create(self.context, {}) inst = db.instance_create(self.context, {})
ec2_id = ec2utils.id_to_ec2_id(inst['id']) ec2_id = ec2utils.id_to_ec2_id(inst['id'])
@@ -971,6 +1093,7 @@ class CloudTestCase(test.TestCase):
elevated = self.context.elevated(read_deleted=True) elevated = self.context.elevated(read_deleted=True)
self._wait_for_state(elevated, instance_id, is_deleted) self._wait_for_state(elevated, instance_id, is_deleted)
@test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_stop_start_instance(self): def test_stop_start_instance(self):
"""Makes sure stop/start instance works""" """Makes sure stop/start instance works"""
# enforce periodic tasks run in short time to avoid wait for 60s. # enforce periodic tasks run in short time to avoid wait for 60s.
@@ -1028,6 +1151,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(vol['status'], "available") self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached") self.assertEqual(vol['attach_status'], "detached")
@test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_stop_start_with_volume(self): def test_stop_start_with_volume(self):
"""Make sure run instance with block device mapping works""" """Make sure run instance with block device mapping works"""
@@ -1096,6 +1220,7 @@ class CloudTestCase(test.TestCase):
self._restart_compute_service() self._restart_compute_service()
@test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_stop_with_attached_volume(self): def test_stop_with_attached_volume(self):
"""Make sure attach info is reflected to block device mapping""" """Make sure attach info is reflected to block device mapping"""
# enforce periodic tasks run in short time to avoid wait for 60s. # enforce periodic tasks run in short time to avoid wait for 60s.
@@ -1171,6 +1296,7 @@ class CloudTestCase(test.TestCase):
greenthread.sleep(0.3) greenthread.sleep(0.3)
return result['snapshotId'] return result['snapshotId']
@test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_run_with_snapshot(self): def test_run_with_snapshot(self):
"""Makes sure run/stop/start instance with snapshot works.""" """Makes sure run/stop/start instance with snapshot works."""
vol = self._volume_create() vol = self._volume_create()

View File

@@ -37,6 +37,7 @@ from nova import log as logging
from nova import rpc from nova import rpc
from nova import test from nova import test
from nova import utils from nova import utils
from nova.notifier import test_notifier
LOG = logging.getLogger('nova.tests.compute') LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
@@ -62,6 +63,7 @@ class ComputeTestCase(test.TestCase):
super(ComputeTestCase, self).setUp() super(ComputeTestCase, self).setUp()
self.flags(connection_type='fake', self.flags(connection_type='fake',
stub_network=True, stub_network=True,
notification_driver='nova.notifier.test_notifier',
network_manager='nova.network.manager.FlatManager') network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager) self.compute = utils.import_object(FLAGS.compute_manager)
self.compute_api = compute.API() self.compute_api = compute.API()
@@ -69,6 +71,7 @@ class ComputeTestCase(test.TestCase):
self.user = self.manager.create_user('fake', 'fake', 'fake') self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake', False) self.context = context.RequestContext('fake', 'fake', False)
test_notifier.NOTIFICATIONS = []
def fake_show(meh, context, id): def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
@@ -90,7 +93,6 @@ class ComputeTestCase(test.TestCase):
inst['project_id'] = self.project.id inst['project_id'] = self.project.id
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id inst['instance_type_id'] = type_id
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0 inst['ami_launch_index'] = 0
inst.update(params) inst.update(params)
return db.instance_create(self.context, inst)['id'] return db.instance_create(self.context, inst)['id']
@@ -128,7 +130,7 @@ class ComputeTestCase(test.TestCase):
instance_ref = models.Instance() instance_ref = models.Instance()
instance_ref['id'] = 1 instance_ref['id'] = 1
instance_ref['volumes'] = [vol1, vol2] instance_ref['volumes'] = [vol1, vol2]
instance_ref['hostname'] = 'i-00000001' instance_ref['hostname'] = 'hostname-1'
instance_ref['host'] = 'dummy' instance_ref['host'] = 'dummy'
return instance_ref return instance_ref
@@ -160,6 +162,18 @@ class ComputeTestCase(test.TestCase):
db.security_group_destroy(self.context, group['id']) db.security_group_destroy(self.context, group['id'])
db.instance_destroy(self.context, ref[0]['id']) db.instance_destroy(self.context, ref[0]['id'])
def test_default_hostname_generator(self):
cases = [(None, 'server_1'), ('Hello, Server!', 'hello_server'),
('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello')]
for display_name, hostname in cases:
ref = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None,
display_name=display_name)
try:
self.assertEqual(ref[0]['hostname'], hostname)
finally:
db.instance_destroy(self.context, ref[0]['id'])
def test_destroy_instance_disassociates_security_groups(self): def test_destroy_instance_disassociates_security_groups(self):
"""Make sure destroying disassociates security groups""" """Make sure destroying disassociates security groups"""
group = self._create_group() group = self._create_group()
@@ -327,6 +341,50 @@ class ComputeTestCase(test.TestCase):
self.assert_(console) self.assert_(console)
self.compute.terminate_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id)
def test_run_instance_usage_notification(self):
"""Ensure run instance generates apropriate usage notification"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.create')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project.id)
self.assertEquals(payload['user_id'], self.user.id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEquals(payload['image_ref'], '1')
self.compute.terminate_instance(self.context, instance_id)
def test_terminate_usage_notification(self):
"""Ensure terminate_instance generates apropriate usage notification"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
test_notifier.NOTIFICATIONS = []
self.compute.terminate_instance(self.context, instance_id)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.delete')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project.id)
self.assertEquals(payload['user_id'], self.user.id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEquals(payload['image_ref'], '1')
def test_run_instance_existing(self): def test_run_instance_existing(self):
"""Ensure failure when running an instance that already exists""" """Ensure failure when running an instance that already exists"""
instance_id = self._create_instance() instance_id = self._create_instance()
@@ -363,6 +421,7 @@ class ComputeTestCase(test.TestCase):
pass pass
self.stubs.Set(self.compute.driver, 'finish_resize', fake) self.stubs.Set(self.compute.driver, 'finish_resize', fake)
self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake)
context = self.context.elevated() context = self.context.elevated()
instance_id = self._create_instance() instance_id = self._create_instance()
self.compute.prep_resize(context, instance_id, 1) self.compute.prep_resize(context, instance_id, 1)
@@ -378,6 +437,36 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id)
def test_resize_instance_notification(self):
"""Ensure notifications on instance migrate/resize"""
instance_id = self._create_instance()
context = self.context.elevated()
self.compute.run_instance(self.context, instance_id)
test_notifier.NOTIFICATIONS = []
db.instance_update(self.context, instance_id, {'host': 'foo'})
self.compute.prep_resize(context, instance_id, 1)
migration_ref = db.migration_get_by_instance_and_status(context,
instance_id, 'pre-migrating')
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.resize.prep')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project.id)
self.assertEquals(payload['user_id'], self.user.id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEquals(payload['image_ref'], '1')
self.compute.terminate_instance(context, instance_id)
def test_resize_instance(self): def test_resize_instance(self):
"""Ensure instance can be migrated/resized""" """Ensure instance can be migrated/resized"""
instance_id = self._create_instance() instance_id = self._create_instance()
@@ -456,7 +545,7 @@ class ComputeTestCase(test.TestCase):
dbmock = self.mox.CreateMock(db) dbmock = self.mox.CreateMock(db)
dbmock.instance_get(c, i_id).AndReturn(instance_ref) dbmock.instance_get(c, i_id).AndReturn(instance_ref)
dbmock.instance_get_fixed_address(c, i_id).AndReturn(None) dbmock.instance_get_fixed_addresses(c, i_id).AndReturn(None)
self.compute.db = dbmock self.compute.db = dbmock
self.mox.ReplayAll() self.mox.ReplayAll()
@@ -476,7 +565,7 @@ class ComputeTestCase(test.TestCase):
drivermock = self.mox.CreateMock(self.compute_driver) drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy') dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy')
for i in range(len(i_ref['volumes'])): for i in range(len(i_ref['volumes'])):
vid = i_ref['volumes'][i]['id'] vid = i_ref['volumes'][i]['id']
volmock.setup_compute_volume(c, vid).InAnyOrder('g1') volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
@@ -504,7 +593,7 @@ class ComputeTestCase(test.TestCase):
drivermock = self.mox.CreateMock(self.compute_driver) drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy') dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy')
self.mox.StubOutWithMock(compute_manager.LOG, 'info') self.mox.StubOutWithMock(compute_manager.LOG, 'info')
compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname']) compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
netmock.setup_compute_network(c, i_ref['id']) netmock.setup_compute_network(c, i_ref['id'])
@@ -534,7 +623,7 @@ class ComputeTestCase(test.TestCase):
volmock = self.mox.CreateMock(self.volume_manager) volmock = self.mox.CreateMock(self.volume_manager)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy') dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy')
for i in range(len(i_ref['volumes'])): for i in range(len(i_ref['volumes'])):
volmock.setup_compute_volume(c, i_ref['volumes'][i]['id']) volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
for i in range(FLAGS.live_migration_retry_count): for i in range(FLAGS.live_migration_retry_count):

View File

@@ -61,7 +61,6 @@ class ConsoleTestCase(test.TestCase):
inst['user_id'] = self.user.id inst['user_id'] = self.user.id
inst['project_id'] = self.project.id inst['project_id'] = self.project.id
inst['instance_type_id'] = 1 inst['instance_type_id'] = 1
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0 inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id'] return db.instance_create(self.context, inst)['id']

View File

@@ -105,24 +105,25 @@ class DirectTestCase(test.TestCase):
self.assertEqual(rv['data'], 'baz') self.assertEqual(rv['data'], 'baz')
class DirectCloudTestCase(test_cloud.CloudTestCase): # NOTE(jkoelker): This fails using the EC2 api
def setUp(self): #class DirectCloudTestCase(test_cloud.CloudTestCase):
super(DirectCloudTestCase, self).setUp() # def setUp(self):
compute_handle = compute.API(image_service=self.cloud.image_service) # super(DirectCloudTestCase, self).setUp()
volume_handle = volume.API() # compute_handle = compute.API(image_service=self.cloud.image_service)
network_handle = network.API() # volume_handle = volume.API()
direct.register_service('compute', compute_handle) # network_handle = network.API()
direct.register_service('volume', volume_handle) # direct.register_service('compute', compute_handle)
direct.register_service('network', network_handle) # direct.register_service('volume', volume_handle)
# direct.register_service('network', network_handle)
self.router = direct.JsonParamsMiddleware(direct.Router()) #
proxy = direct.Proxy(self.router) # self.router = direct.JsonParamsMiddleware(direct.Router())
self.cloud.compute_api = proxy.compute # proxy = direct.Proxy(self.router)
self.cloud.volume_api = proxy.volume # self.cloud.compute_api = proxy.compute
self.cloud.network_api = proxy.network # self.cloud.volume_api = proxy.volume
compute_handle.volume_api = proxy.volume # self.cloud.network_api = proxy.network
compute_handle.network_api = proxy.network # compute_handle.volume_api = proxy.volume
# compute_handle.network_api = proxy.network
def tearDown(self): #
super(DirectCloudTestCase, self).tearDown() # def tearDown(self):
direct.ROUTES = {} # super(DirectCloudTestCase, self).tearDown()
# direct.ROUTES = {}

View File

@@ -1,161 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for flat network code
"""
import netaddr
import os
import unittest
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
from nova.tests.network import base
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.network')
class FlatNetworkTestCase(base.NetworkTestCase):
"""Test cases for network code"""
def test_public_network_association(self):
"""Makes sure that we can allocate a public ip"""
# TODO(vish): better way of adding floating ips
self.context._project = self.projects[0]
self.context.project_id = self.projects[0].id
pubnet = netaddr.IPRange(flags.FLAGS.floating_range)
address = str(list(pubnet)[0])
try:
db.floating_ip_get_by_address(context.get_admin_context(), address)
except exception.NotFound:
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'host': FLAGS.host})
self.assertRaises(NotImplementedError,
self.network.allocate_floating_ip,
self.context, self.projects[0].id)
fix_addr = self._create_address(0)
float_addr = address
self.assertRaises(NotImplementedError,
self.network.associate_floating_ip,
self.context, float_addr, fix_addr)
address = db.instance_get_floating_address(context.get_admin_context(),
self.instance_id)
self.assertEqual(address, None)
self.assertRaises(NotImplementedError,
self.network.disassociate_floating_ip,
self.context, float_addr)
address = db.instance_get_floating_address(context.get_admin_context(),
self.instance_id)
self.assertEqual(address, None)
self.assertRaises(NotImplementedError,
self.network.deallocate_floating_ip,
self.context, float_addr)
self.network.deallocate_fixed_ip(self.context, fix_addr)
db.floating_ip_destroy(context.get_admin_context(), float_addr)
def test_allocate_deallocate_fixed_ip(self):
"""Makes sure that we can allocate and deallocate a fixed ip"""
address = self._create_address(0)
self.assertTrue(self._is_allocated_in_project(address,
self.projects[0].id))
self._deallocate_address(0, address)
# check if the fixed ip address is really deallocated
self.assertFalse(self._is_allocated_in_project(address,
self.projects[0].id))
def test_side_effects(self):
"""Ensures allocating and releasing has no side effects"""
address = self._create_address(0)
address2 = self._create_address(1, self.instance2_id)
self.assertTrue(self._is_allocated_in_project(address,
self.projects[0].id))
self.assertTrue(self._is_allocated_in_project(address2,
self.projects[1].id))
self._deallocate_address(0, address)
self.assertFalse(self._is_allocated_in_project(address,
self.projects[0].id))
# First address release shouldn't affect the second
self.assertTrue(self._is_allocated_in_project(address2,
self.projects[0].id))
self._deallocate_address(1, address2)
self.assertFalse(self._is_allocated_in_project(address2,
self.projects[1].id))
def test_ips_are_reused(self):
"""Makes sure that ip addresses that are deallocated get reused"""
address = self._create_address(0)
self.network.deallocate_fixed_ip(self.context, address)
address2 = self._create_address(0)
self.assertEqual(address, address2)
self.network.deallocate_fixed_ip(self.context, address2)
def test_too_many_addresses(self):
"""Test for a NoMoreAddresses exception when all fixed ips are used.
"""
admin_context = context.get_admin_context()
network = db.project_get_network(admin_context, self.projects[0].id)
num_available_ips = db.network_count_available_ips(admin_context,
network['id'])
addresses = []
instance_ids = []
for i in range(num_available_ips):
instance_ref = self._create_instance(0)
instance_ids.append(instance_ref['id'])
address = self._create_address(0, instance_ref['id'])
addresses.append(address)
ip_count = db.network_count_available_ips(context.get_admin_context(),
network['id'])
self.assertEqual(ip_count, 0)
self.assertRaises(db.NoMoreAddresses,
self.network.allocate_fixed_ip,
self.context,
'foo')
for i in range(num_available_ips):
self.network.deallocate_fixed_ip(self.context, addresses[i])
db.instance_destroy(context.get_admin_context(), instance_ids[i])
ip_count = db.network_count_available_ips(context.get_admin_context(),
network['id'])
self.assertEqual(ip_count, num_available_ips)
def run(self, result=None):
if(FLAGS.network_manager == 'nova.network.manager.FlatManager'):
super(FlatNetworkTestCase, self).run(result)

View File

@@ -67,7 +67,8 @@ class HostFilterTestCase(test.TestCase):
flavorid=1, flavorid=1,
swap=500, swap=500,
rxtx_quota=30000, rxtx_quota=30000,
rxtx_cap=200) rxtx_cap=200,
extra_specs={})
self.zone_manager = FakeZoneManager() self.zone_manager = FakeZoneManager()
states = {} states = {}

102
nova/tests/test_hosts.py Normal file
View File

@@ -0,0 +1,102 @@
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import stubout
import webob.exc
from nova import context
from nova import exception
from nova import flags
from nova import log as logging
from nova import test
from nova.api.openstack.contrib import hosts as os_hosts
from nova.scheduler import api as scheduler_api
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.hosts')
# Simulate the hosts returned by the zone manager.
HOST_LIST = [
{"host_name": "host_c1", "service": "compute"},
{"host_name": "host_c2", "service": "compute"},
{"host_name": "host_v1", "service": "volume"},
{"host_name": "host_v2", "service": "volume"}]
def stub_get_host_list(req):
return HOST_LIST
def stub_set_host_enabled(context, host, enabled):
# We'll simulate success and failure by assuming
# that 'host_c1' always succeeds, and 'host_c2'
# always fails
fail = (host == "host_c2")
status = "enabled" if (enabled ^ fail) else "disabled"
return status
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
class HostTestCase(test.TestCase):
"""Test Case for hosts."""
def setUp(self):
super(HostTestCase, self).setUp()
self.controller = os_hosts.HostController()
self.req = FakeRequest()
self.stubs.Set(scheduler_api, 'get_host_list', stub_get_host_list)
self.stubs.Set(self.controller.compute_api, 'set_host_enabled',
stub_set_host_enabled)
def test_list_hosts(self):
"""Verify that the compute hosts are returned."""
hosts = os_hosts._list_hosts(self.req)
self.assertEqual(hosts, HOST_LIST)
compute_hosts = os_hosts._list_hosts(self.req, "compute")
expected = [host for host in HOST_LIST
if host["service"] == "compute"]
self.assertEqual(compute_hosts, expected)
def test_disable_host(self):
dis_body = {"status": "disable"}
result_c1 = self.controller.update(self.req, "host_c1", body=dis_body)
self.assertEqual(result_c1["status"], "disabled")
result_c2 = self.controller.update(self.req, "host_c2", body=dis_body)
self.assertEqual(result_c2["status"], "enabled")
def test_enable_host(self):
en_body = {"status": "enable"}
result_c1 = self.controller.update(self.req, "host_c1", body=en_body)
self.assertEqual(result_c1["status"], "enabled")
result_c2 = self.controller.update(self.req, "host_c2", body=en_body)
self.assertEqual(result_c2["status"], "disabled")
def test_bad_status_value(self):
bad_body = {"status": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", body=bad_body)
def test_bad_update_key(self):
bad_body = {"crazy": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", body=bad_body)
def test_bad_host(self):
self.assertRaises(exception.HostNotFound, self.controller.update,
self.req, "bogus_host_name", body={"status": "disable"})

View File

@@ -54,12 +54,12 @@ def _create_network_info(count=1, ipv6=None):
fake_ip = '0.0.0.0/0' fake_ip = '0.0.0.0/0'
fake_ip_2 = '0.0.0.1/0' fake_ip_2 = '0.0.0.1/0'
fake_ip_3 = '0.0.0.1/0' fake_ip_3 = '0.0.0.1/0'
network = {'gateway': fake, network = {'bridge': fake,
'gateway_v6': fake,
'bridge': fake,
'cidr': fake_ip, 'cidr': fake_ip,
'cidr_v6': fake_ip} 'cidr_v6': fake_ip}
mapping = {'mac': fake, mapping = {'mac': fake,
'gateway': fake,
'gateway6': fake,
'ips': [{'ip': fake_ip}, {'ip': fake_ip}]} 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]}
if ipv6: if ipv6:
mapping['ip6s'] = [{'ip': fake_ip}, mapping['ip6s'] = [{'ip': fake_ip},
@@ -68,6 +68,24 @@ def _create_network_info(count=1, ipv6=None):
return [(network, mapping) for x in xrange(0, count)] return [(network, mapping) for x in xrange(0, count)]
def _setup_networking(instance_id, ip='1.2.3.4'):
ctxt = context.get_admin_context()
network_ref = db.project_get_networks(ctxt,
'fake',
associate=True)[0]
vif = {'address': '56:12:12:12:12:12',
'network_id': network_ref['id'],
'instance_id': instance_id}
vif_ref = db.virtual_interface_create(ctxt, vif)
fixed_ip = {'address': ip,
'network_id': network_ref['id'],
'virtual_interface_id': vif_ref['id']}
db.fixed_ip_create(ctxt, fixed_ip)
db.fixed_ip_update(ctxt, ip, {'allocated': True,
'instance_id': instance_id})
class CacheConcurrencyTestCase(test.TestCase): class CacheConcurrencyTestCase(test.TestCase):
def setUp(self): def setUp(self):
super(CacheConcurrencyTestCase, self).setUp() super(CacheConcurrencyTestCase, self).setUp()
@@ -155,11 +173,15 @@ class LibvirtConnTestCase(test.TestCase):
FLAGS.instances_path = '' FLAGS.instances_path = ''
self.call_libvirt_dependant_setup = False self.call_libvirt_dependant_setup = False
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(LibvirtConnTestCase, self).tearDown()
test_ip = '10.11.12.13' test_ip = '10.11.12.13'
test_instance = {'memory_kb': '1024000', test_instance = {'memory_kb': '1024000',
'basepath': '/some/path', 'basepath': '/some/path',
'bridge_name': 'br100', 'bridge_name': 'br100',
'mac_address': '02:12:34:46:56:67',
'vcpus': 2, 'vcpus': 2,
'project_id': 'fake', 'project_id': 'fake',
'bridge': 'br101', 'bridge': 'br101',
@@ -241,6 +263,7 @@ class LibvirtConnTestCase(test.TestCase):
return db.service_create(context.get_admin_context(), service_ref) return db.service_create(context.get_admin_context(), service_ref)
@test.skip_test("Please review this test to ensure intent")
def test_preparing_xml_info(self): def test_preparing_xml_info(self):
conn = connection.LibvirtConnection(True) conn = connection.LibvirtConnection(True)
instance_ref = db.instance_create(self.context, self.test_instance) instance_ref = db.instance_create(self.context, self.test_instance)
@@ -272,23 +295,27 @@ class LibvirtConnTestCase(test.TestCase):
self.assertTrue(params.find('PROJNETV6') > -1) self.assertTrue(params.find('PROJNETV6') > -1)
self.assertTrue(params.find('PROJMASKV6') > -1) self.assertTrue(params.find('PROJMASKV6') > -1)
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri_no_ramdisk_no_kernel(self): def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance) instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data, self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False) expect_kernel=False, expect_ramdisk=False)
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri_no_ramdisk(self): def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance) instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False) expect_kernel=True, expect_ramdisk=False)
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri_no_kernel(self): def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance) instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data, self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False) expect_kernel=False, expect_ramdisk=False)
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri(self): def test_xml_and_uri(self):
instance_data = dict(self.test_instance) instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['ramdisk_id'] = 'ari-deadbeef'
@@ -296,6 +323,7 @@ class LibvirtConnTestCase(test.TestCase):
self._check_xml_and_uri(instance_data, self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True) expect_kernel=True, expect_ramdisk=True)
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri_rescue(self): def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance) instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['ramdisk_id'] = 'ari-deadbeef'
@@ -303,6 +331,7 @@ class LibvirtConnTestCase(test.TestCase):
self._check_xml_and_uri(instance_data, expect_kernel=True, self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=True) expect_ramdisk=True, rescue=True)
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_lxc_container_and_uri(self): def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance) instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data) self._check_xml_and_container(instance_data)
@@ -402,12 +431,18 @@ class LibvirtConnTestCase(test.TestCase):
user_context = context.RequestContext(project=self.project, user_context = context.RequestContext(project=self.project,
user=self.user) user=self.user)
instance_ref = db.instance_create(user_context, instance) instance_ref = db.instance_create(user_context, instance)
host = self.network.get_network_host(user_context.elevated()) # Re-get the instance so it's bound to an actual session
network_ref = db.project_get_network(context.get_admin_context(), instance_ref = db.instance_get(user_context, instance_ref['id'])
self.project.id) network_ref = db.project_get_networks(context.get_admin_context(),
self.project.id)[0]
vif = {'address': '56:12:12:12:12:12',
'network_id': network_ref['id'],
'instance_id': instance_ref['id']}
vif_ref = db.virtual_interface_create(self.context, vif)
fixed_ip = {'address': self.test_ip, fixed_ip = {'address': self.test_ip,
'network_id': network_ref['id']} 'network_id': network_ref['id'],
'virtual_interface_id': vif_ref['id']}
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
@@ -442,18 +477,10 @@ class LibvirtConnTestCase(test.TestCase):
user_context = context.RequestContext(project=self.project, user_context = context.RequestContext(project=self.project,
user=self.user) user=self.user)
instance_ref = db.instance_create(user_context, instance) instance_ref = db.instance_create(user_context, instance)
host = self.network.get_network_host(user_context.elevated()) network_ref = db.project_get_networks(context.get_admin_context(),
network_ref = db.project_get_network(context.get_admin_context(), self.project.id)[0]
self.project.id)
fixed_ip = {'address': self.test_ip, _setup_networking(instance_ref['id'], ip=self.test_ip)
'network_id': network_ref['id']}
ctxt = context.get_admin_context()
fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
db.fixed_ip_update(ctxt, self.test_ip,
{'allocated': True,
'instance_id': instance_ref['id']})
type_uri_map = {'qemu': ('qemu:///system', type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'), [(lambda t: t.find('.').get('type'), 'qemu'),
@@ -712,6 +739,7 @@ class LibvirtConnTestCase(test.TestCase):
db.volume_destroy(self.context, volume_ref['id']) db.volume_destroy(self.context, volume_ref['id'])
db.instance_destroy(self.context, instance_ref['id']) db.instance_destroy(self.context, instance_ref['id'])
@test.skip_test("test needs rewrite: instance no longer has mac_address")
def test_spawn_with_network_info(self): def test_spawn_with_network_info(self):
# Skip if non-libvirt environment # Skip if non-libvirt environment
if not self.lazy_load_library_exists(): if not self.lazy_load_library_exists():
@@ -730,8 +758,8 @@ class LibvirtConnTestCase(test.TestCase):
conn.firewall_driver.setattr('setup_basic_filtering', fake_none) conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
conn.firewall_driver.setattr('prepare_instance_filter', fake_none) conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
network = db.project_get_network(context.get_admin_context(), network = db.project_get_networks(context.get_admin_context(),
self.project.id) self.project.id)[0]
ip_dict = {'ip': self.test_ip, ip_dict = {'ip': self.test_ip,
'netmask': network['netmask'], 'netmask': network['netmask'],
'enabled': '1'} 'enabled': '1'}
@@ -756,11 +784,6 @@ class LibvirtConnTestCase(test.TestCase):
ip = conn.get_host_ip_addr() ip = conn.get_host_ip_addr()
self.assertEquals(ip, FLAGS.my_ip) self.assertEquals(ip, FLAGS.my_ip)
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(LibvirtConnTestCase, self).tearDown()
class NWFilterFakes: class NWFilterFakes:
def __init__(self): def __init__(self):
@@ -866,19 +889,24 @@ class IptablesFirewallTestCase(test.TestCase):
return db.instance_create(self.context, return db.instance_create(self.context,
{'user_id': 'fake', {'user_id': 'fake',
'project_id': 'fake', 'project_id': 'fake',
'mac_address': '56:12:12:12:12:12',
'instance_type_id': 1}) 'instance_type_id': 1})
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_static_filters(self): def test_static_filters(self):
instance_ref = self._create_instance_ref() instance_ref = self._create_instance_ref()
ip = '10.11.12.13' ip = '10.11.12.13'
network_ref = db.project_get_network(self.context, network_ref = db.project_get_networks(self.context,
'fake') 'fake',
associate=True)[0]
vif = {'address': '56:12:12:12:12:12',
'network_id': network_ref['id'],
'instance_id': instance_ref['id']}
vif_ref = db.virtual_interface_create(self.context, vif)
fixed_ip = {'address': ip, fixed_ip = {'address': ip,
'network_id': network_ref['id']} 'network_id': network_ref['id'],
'virtual_interface_id': vif_ref['id']}
admin_ctxt = context.get_admin_context() admin_ctxt = context.get_admin_context()
db.fixed_ip_create(admin_ctxt, fixed_ip) db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
@@ -1015,6 +1043,7 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertEquals(ipv6_network_rules, self.assertEquals(ipv6_network_rules,
ipv6_rules_per_network * networks_count) ipv6_rules_per_network * networks_count)
@test.skip_test("skipping libvirt tests")
def test_do_refresh_security_group_rules(self): def test_do_refresh_security_group_rules(self):
instance_ref = self._create_instance_ref() instance_ref = self._create_instance_ref()
self.mox.StubOutWithMock(self.fw, self.mox.StubOutWithMock(self.fw,
@@ -1025,6 +1054,7 @@ class IptablesFirewallTestCase(test.TestCase):
self.mox.ReplayAll() self.mox.ReplayAll()
self.fw.do_refresh_security_group_rules("fake") self.fw.do_refresh_security_group_rules("fake")
@test.skip_test("skip libvirt test project_get_network no longer exists")
def test_unfilter_instance_undefines_nwfilter(self): def test_unfilter_instance_undefines_nwfilter(self):
# Skip if non-libvirt environment # Skip if non-libvirt environment
if not self.lazy_load_library_exists(): if not self.lazy_load_library_exists():
@@ -1058,6 +1088,7 @@ class IptablesFirewallTestCase(test.TestCase):
db.instance_destroy(admin_ctxt, instance_ref['id']) db.instance_destroy(admin_ctxt, instance_ref['id'])
@test.skip_test("skip libvirt test project_get_network no longer exists")
def test_provider_firewall_rules(self): def test_provider_firewall_rules(self):
# setup basic instance data # setup basic instance data
instance_ref = self._create_instance_ref() instance_ref = self._create_instance_ref()
@@ -1207,7 +1238,6 @@ class NWFilterTestCase(test.TestCase):
return db.instance_create(self.context, return db.instance_create(self.context,
{'user_id': 'fake', {'user_id': 'fake',
'project_id': 'fake', 'project_id': 'fake',
'mac_address': '00:A0:C9:14:C8:29',
'instance_type_id': 1}) 'instance_type_id': 1})
def _create_instance_type(self, params={}): def _create_instance_type(self, params={}):
@@ -1225,6 +1255,7 @@ class NWFilterTestCase(test.TestCase):
inst.update(params) inst.update(params)
return db.instance_type_create(context, inst)['id'] return db.instance_type_create(context, inst)['id']
@test.skip_test('Skipping this test')
def test_creates_base_rule_first(self): def test_creates_base_rule_first(self):
# These come pre-defined by libvirt # These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing', self.defined_filters = ['no-mac-spoofing',
@@ -1258,13 +1289,15 @@ class NWFilterTestCase(test.TestCase):
ip = '10.11.12.13' ip = '10.11.12.13'
network_ref = db.project_get_network(self.context, 'fake') #network_ref = db.project_get_networks(self.context, 'fake')[0]
fixed_ip = {'address': ip, 'network_id': network_ref['id']} #fixed_ip = {'address': ip, 'network_id': network_ref['id']}
admin_ctxt = context.get_admin_context() #admin_ctxt = context.get_admin_context()
db.fixed_ip_create(admin_ctxt, fixed_ip) #db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, #db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
'instance_id': inst_id}) # 'instance_id': inst_id})
self._setup_networking(instance_ref['id'], ip=ip)
def _ensure_all_called(): def _ensure_all_called():
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
@@ -1299,6 +1332,7 @@ class NWFilterTestCase(test.TestCase):
"fake") "fake")
self.assertEquals(len(result), 3) self.assertEquals(len(result), 3)
@test.skip_test("skip libvirt test project_get_network no longer exists")
def test_unfilter_instance_undefines_nwfilters(self): def test_unfilter_instance_undefines_nwfilters(self):
admin_ctxt = context.get_admin_context() admin_ctxt = context.get_admin_context()

View File

@@ -1,196 +1,240 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the # Copyright 2011 Rackspace
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved. # All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
# a copy of the License at # a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""
Unit Tests for network code
"""
import netaddr
import os
from nova import db
from nova import flags
from nova import log as logging
from nova import test from nova import test
from nova.network import linux_net from nova.network import manager as network_manager
class IptablesManagerTestCase(test.TestCase): import mox
sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
'*filter',
':INPUT ACCEPT [2223527:305688874]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [2172501:140856656]',
':nova-compute-FORWARD - [0:0]',
':nova-compute-INPUT - [0:0]',
':nova-compute-local - [0:0]',
':nova-compute-OUTPUT - [0:0]',
':nova-filter-top - [0:0]',
'-A FORWARD -j nova-filter-top ',
'-A OUTPUT -j nova-filter-top ',
'-A nova-filter-top -j nova-compute-local ',
'-A INPUT -j nova-compute-INPUT ',
'-A OUTPUT -j nova-compute-OUTPUT ',
'-A FORWARD -j nova-compute-FORWARD ',
'-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
'-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'-A FORWARD -o virbr0 -j REJECT --reject-with '
'icmp-port-unreachable ',
'-A FORWARD -i virbr0 -j REJECT --reject-with '
'icmp-port-unreachable ',
'COMMIT',
'# Completed on Fri Feb 18 15:17:05 2011']
sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011',
'*nat',
':PREROUTING ACCEPT [3936:762355]',
':INPUT ACCEPT [2447:225266]',
':OUTPUT ACCEPT [63491:4191863]',
':POSTROUTING ACCEPT [63112:4108641]',
':nova-compute-OUTPUT - [0:0]',
':nova-compute-floating-ip-snat - [0:0]',
':nova-compute-SNATTING - [0:0]',
':nova-compute-PREROUTING - [0:0]',
':nova-compute-POSTROUTING - [0:0]',
':nova-postrouting-bottom - [0:0]',
'-A PREROUTING -j nova-compute-PREROUTING ',
'-A OUTPUT -j nova-compute-OUTPUT ',
'-A POSTROUTING -j nova-compute-POSTROUTING ',
'-A POSTROUTING -j nova-postrouting-bottom ',
'-A nova-postrouting-bottom -j nova-compute-SNATTING ',
'-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ',
'COMMIT',
'# Completed on Fri Feb 18 15:17:05 2011']
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.network')
HOST = "testhost"
class FakeModel(dict):
"""Represent a model from the db"""
def __init__(self, *args, **kwargs):
self.update(kwargs)
def __getattr__(self, name):
return self[name]
networks = [{'id': 0,
'label': 'test0',
'injected': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns': '192.168.0.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2'},
{'id': 1,
'label': 'test1',
'injected': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns': '192.168.0.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_id': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_id': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'address': 'DE:AD:BE:EF:00:00',
'network_id': 0,
'network': FakeModel(**networks[0]),
'instance_id': 0},
{'id': 1,
'address': 'DE:AD:BE:EF:00:01',
'network_id': 1,
'network': FakeModel(**networks[1]),
'instance_id': 0}]
class FlatNetworkTestCase(test.TestCase):
def setUp(self): def setUp(self):
super(IptablesManagerTestCase, self).setUp() super(FlatNetworkTestCase, self).setUp()
self.manager = linux_net.IptablesManager() self.network = network_manager.FlatManager(host=HOST)
self.network.db = db
def test_filter_rules_are_wrapped(self): def test_set_network_hosts(self):
current_lines = self.sample_filter self.mox.StubOutWithMock(db, 'network_get_all')
self.mox.StubOutWithMock(db, 'network_set_host')
self.mox.StubOutWithMock(db, 'network_update')
table = self.manager.ipv4['filter'] db.network_get_all(mox.IgnoreArg()).AndReturn([networks[0]])
table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') db.network_set_host(mox.IgnoreArg(),
new_lines = self.manager._modify_rules(current_lines, table) networks[0]['id'],
self.assertTrue('-A run_tests.py-FORWARD ' mox.IgnoreArg()).AndReturn(HOST)
'-s 1.2.3.4/5 -j DROP' in new_lines) db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') self.network.set_network_hosts(None)
new_lines = self.manager._modify_rules(current_lines, table)
self.assertTrue('-A run_tests.py-FORWARD '
'-s 1.2.3.4/5 -j DROP' not in new_lines)
def test_nat_rules(self): def test_get_instance_nw_info(self):
current_lines = self.sample_nat self.mox.StubOutWithMock(db, 'fixed_ip_get_by_instance')
new_lines = self.manager._modify_rules(current_lines, self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance')
self.manager.ipv4['nat']) self.mox.StubOutWithMock(db, 'instance_type_get_by_id')
for line in [':nova-compute-OUTPUT - [0:0]', db.fixed_ip_get_by_instance(mox.IgnoreArg(),
':nova-compute-floating-ip-snat - [0:0]', mox.IgnoreArg()).AndReturn(fixed_ips)
':nova-compute-SNATTING - [0:0]', db.virtual_interface_get_by_instance(mox.IgnoreArg(),
':nova-compute-PREROUTING - [0:0]', mox.IgnoreArg()).AndReturn(vifs)
':nova-compute-POSTROUTING - [0:0]']: db.instance_type_get_by_id(mox.IgnoreArg(),
self.assertTrue(line in new_lines, "One of nova-compute's chains " mox.IgnoreArg()).AndReturn(flavor)
"went missing.") self.mox.ReplayAll()
seen_lines = set() nw_info = self.network.get_instance_nw_info(None, 0, 0)
for line in new_lines:
line = line.strip()
self.assertTrue(line not in seen_lines,
"Duplicate line: %s" % line)
seen_lines.add(line)
last_postrouting_line = '' self.assertTrue(nw_info)
for line in new_lines: for i, nw in enumerate(nw_info):
if line.startswith('-A POSTROUTING'): i8 = i + 8
last_postrouting_line = line check = {'bridge': 'fa%s' % i,
'cidr': '192.168.%s.0/24' % i,
'cidr_v6': '2001:db%s::/64' % i8,
'id': i,
'injected': 'DONTCARE'}
self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line, self.assertDictMatch(nw[0], check)
"Last POSTROUTING rule does not jump to "
"nova-postouting-bottom: %s" % last_postrouting_line)
for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']: check = {'broadcast': '192.168.%s.255' % i,
self.assertTrue('-A %s -j run_tests.py-%s' \ 'dns': 'DONTCARE',
% (chain, chain) in new_lines, 'gateway': '192.168.%s.1' % i,
"Built-in chain %s not wrapped" % (chain,)) 'gateway6': '2001:db%s::1' % i8,
'ip6s': 'DONTCARE',
'ips': 'DONTCARE',
'label': 'test%s' % i,
'mac': 'DE:AD:BE:EF:00:0%s' % i,
'rxtx_cap': 'DONTCARE'}
self.assertDictMatch(nw[1], check)
def test_filter_rules(self): check = [{'enabled': 'DONTCARE',
current_lines = self.sample_filter 'ip': '2001:db%s::dcad:beff:feef:%s' % (i8, i),
new_lines = self.manager._modify_rules(current_lines, 'netmask': '64'}]
self.manager.ipv4['filter']) self.assertDictListMatch(nw[1]['ip6s'], check)
for line in [':nova-compute-FORWARD - [0:0]', check = [{'enabled': '1',
':nova-compute-INPUT - [0:0]', 'ip': '192.168.%s.100' % i,
':nova-compute-local - [0:0]', 'netmask': '255.255.255.0'}]
':nova-compute-OUTPUT - [0:0]']: self.assertDictListMatch(nw[1]['ips'], check)
self.assertTrue(line in new_lines, "One of nova-compute's chains"
" went missing.")
seen_lines = set()
for line in new_lines:
line = line.strip()
self.assertTrue(line not in seen_lines,
"Duplicate line: %s" % line)
seen_lines.add(line)
for chain in ['FORWARD', 'OUTPUT']: class VlanNetworkTestCase(test.TestCase):
for line in new_lines: def setUp(self):
if line.startswith('-A %s' % chain): super(VlanNetworkTestCase, self).setUp()
self.assertTrue('-j nova-filter-top' in line, self.network = network_manager.VlanManager(host=HOST)
"First %s rule does not " self.network.db = db
"jump to nova-filter-top" % chain)
break
self.assertTrue('-A nova-filter-top ' def test_vpn_allocate_fixed_ip(self):
'-j run_tests.py-local' in new_lines, self.mox.StubOutWithMock(db, 'fixed_ip_associate')
"nova-filter-top does not jump to wrapped local chain") self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
for chain in ['INPUT', 'OUTPUT', 'FORWARD']: db.fixed_ip_associate(mox.IgnoreArg(),
self.assertTrue('-A %s -j run_tests.py-%s' \ mox.IgnoreArg(),
% (chain, chain) in new_lines, mox.IgnoreArg()).AndReturn('192.168.0.1')
"Built-in chain %s not wrapped" % (chain,)) db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
self.mox.ReplayAll()
def test_will_empty_chain(self): network = dict(networks[0])
self.manager.ipv4['filter'].add_chain('test-chain') network['vpn_private_address'] = '192.168.0.2'
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP') self.network.allocate_fixed_ip(None, 0, network, vpn=True)
old_count = len(self.manager.ipv4['filter'].rules)
self.manager.ipv4['filter'].empty_chain('test-chain')
self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules))
def test_will_empty_unwrapped_chain(self): def test_allocate_fixed_ip(self):
self.manager.ipv4['filter'].add_chain('test-chain', wrap=False) self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP', self.mox.StubOutWithMock(db, 'fixed_ip_update')
wrap=False) self.mox.StubOutWithMock(db,
old_count = len(self.manager.ipv4['filter'].rules) 'virtual_interface_get_by_instance_and_network')
self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False)
self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules))
def test_will_not_empty_wrapped_when_unwrapped(self): db.fixed_ip_associate_pool(mox.IgnoreArg(),
self.manager.ipv4['filter'].add_chain('test-chain') mox.IgnoreArg(),
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP') mox.IgnoreArg()).AndReturn('192.168.0.1')
old_count = len(self.manager.ipv4['filter'].rules) db.fixed_ip_update(mox.IgnoreArg(),
self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False) mox.IgnoreArg(),
self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules)) mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
self.mox.ReplayAll()
def test_will_not_empty_unwrapped_when_wrapped(self): network = dict(networks[0])
self.manager.ipv4['filter'].add_chain('test-chain', wrap=False) network['vpn_private_address'] = '192.168.0.2'
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP', self.network.allocate_fixed_ip(None, 0, network)
wrap=False)
old_count = len(self.manager.ipv4['filter'].rules) def test_create_networks_too_big(self):
self.manager.ipv4['filter'].empty_chain('test-chain') self.assertRaises(ValueError, self.network.create_networks, None,
self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules)) num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)

View File

@@ -1,242 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for vlan network code
"""
import netaddr
import os
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
from nova.tests.network import base
from nova.tests.network import binpath,\
lease_ip, release_ip
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.network')
class VlanNetworkTestCase(base.NetworkTestCase):
"""Test cases for network code"""
def test_public_network_association(self):
"""Makes sure that we can allocaate a public ip"""
# TODO(vish): better way of adding floating ips
self.context._project = self.projects[0]
self.context.project_id = self.projects[0].id
pubnet = netaddr.IPNetwork(flags.FLAGS.floating_range)
address = str(list(pubnet)[0])
try:
db.floating_ip_get_by_address(context.get_admin_context(), address)
except exception.NotFound:
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'host': FLAGS.host})
float_addr = self.network.allocate_floating_ip(self.context,
self.projects[0].id)
fix_addr = self._create_address(0)
lease_ip(fix_addr)
self.assertEqual(float_addr, str(pubnet[0]))
self.network.associate_floating_ip(self.context, float_addr, fix_addr)
address = db.instance_get_floating_address(context.get_admin_context(),
self.instance_id)
self.assertEqual(address, float_addr)
self.network.disassociate_floating_ip(self.context, float_addr)
address = db.instance_get_floating_address(context.get_admin_context(),
self.instance_id)
self.assertEqual(address, None)
self.network.deallocate_floating_ip(self.context, float_addr)
self.network.deallocate_fixed_ip(self.context, fix_addr)
release_ip(fix_addr)
db.floating_ip_destroy(context.get_admin_context(), float_addr)
def test_allocate_deallocate_fixed_ip(self):
"""Makes sure that we can allocate and deallocate a fixed ip"""
address = self._create_address(0)
self.assertTrue(self._is_allocated_in_project(address,
self.projects[0].id))
lease_ip(address)
self._deallocate_address(0, address)
# Doesn't go away until it's dhcp released
self.assertTrue(self._is_allocated_in_project(address,
self.projects[0].id))
release_ip(address)
self.assertFalse(self._is_allocated_in_project(address,
self.projects[0].id))
def test_side_effects(self):
"""Ensures allocating and releasing has no side effects"""
address = self._create_address(0)
address2 = self._create_address(1, self.instance2_id)
self.assertTrue(self._is_allocated_in_project(address,
self.projects[0].id))
self.assertTrue(self._is_allocated_in_project(address2,
self.projects[1].id))
self.assertFalse(self._is_allocated_in_project(address,
self.projects[1].id))
# Addresses are allocated before they're issued
lease_ip(address)
lease_ip(address2)
self._deallocate_address(0, address)
release_ip(address)
self.assertFalse(self._is_allocated_in_project(address,
self.projects[0].id))
# First address release shouldn't affect the second
self.assertTrue(self._is_allocated_in_project(address2,
self.projects[1].id))
self._deallocate_address(1, address2)
release_ip(address2)
self.assertFalse(self._is_allocated_in_project(address2,
self.projects[1].id))
def test_subnet_edge(self):
"""Makes sure that private ips don't overlap"""
first = self._create_address(0)
lease_ip(first)
instance_ids = []
for i in range(1, FLAGS.num_networks):
instance_ref = self._create_instance(i, mac=utils.generate_mac())
instance_ids.append(instance_ref['id'])
address = self._create_address(i, instance_ref['id'])
instance_ref = self._create_instance(i, mac=utils.generate_mac())
instance_ids.append(instance_ref['id'])
address2 = self._create_address(i, instance_ref['id'])
instance_ref = self._create_instance(i, mac=utils.generate_mac())
instance_ids.append(instance_ref['id'])
address3 = self._create_address(i, instance_ref['id'])
lease_ip(address)
lease_ip(address2)
lease_ip(address3)
self.context._project = self.projects[i]
self.context.project_id = self.projects[i].id
self.assertFalse(self._is_allocated_in_project(address,
self.projects[0].id))
self.assertFalse(self._is_allocated_in_project(address2,
self.projects[0].id))
self.assertFalse(self._is_allocated_in_project(address3,
self.projects[0].id))
self.network.deallocate_fixed_ip(self.context, address)
self.network.deallocate_fixed_ip(self.context, address2)
self.network.deallocate_fixed_ip(self.context, address3)
release_ip(address)
release_ip(address2)
release_ip(address3)
for instance_id in instance_ids:
db.instance_destroy(context.get_admin_context(), instance_id)
self.context._project = self.projects[0]
self.context.project_id = self.projects[0].id
self.network.deallocate_fixed_ip(self.context, first)
self._deallocate_address(0, first)
release_ip(first)
def test_vpn_ip_and_port_looks_valid(self):
"""Ensure the vpn ip and port are reasonable"""
self.assert_(self.projects[0].vpn_ip)
self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start)
self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start +
FLAGS.num_networks)
def test_too_many_networks(self):
"""Ensure error is raised if we run out of networks"""
projects = []
networks_left = (FLAGS.num_networks -
db.network_count(context.get_admin_context()))
for i in range(networks_left):
project = self.manager.create_project('many%s' % i, self.user)
projects.append(project)
db.project_get_network(context.get_admin_context(), project.id)
project = self.manager.create_project('last', self.user)
projects.append(project)
self.assertRaises(db.NoMoreNetworks,
db.project_get_network,
context.get_admin_context(),
project.id)
for project in projects:
self.manager.delete_project(project)
def test_ips_are_reused(self):
"""Makes sure that ip addresses that are deallocated get reused"""
address = self._create_address(0)
lease_ip(address)
self.network.deallocate_fixed_ip(self.context, address)
release_ip(address)
address2 = self._create_address(0)
self.assertEqual(address, address2)
lease_ip(address)
self.network.deallocate_fixed_ip(self.context, address2)
release_ip(address)
def test_too_many_addresses(self):
"""Test for a NoMoreAddresses exception when all fixed ips are used.
"""
admin_context = context.get_admin_context()
network = db.project_get_network(admin_context, self.projects[0].id)
num_available_ips = db.network_count_available_ips(admin_context,
network['id'])
addresses = []
instance_ids = []
for i in range(num_available_ips):
instance_ref = self._create_instance(0)
instance_ids.append(instance_ref['id'])
address = self._create_address(0, instance_ref['id'])
addresses.append(address)
lease_ip(address)
ip_count = db.network_count_available_ips(context.get_admin_context(),
network['id'])
self.assertEqual(ip_count, 0)
self.assertRaises(db.NoMoreAddresses,
self.network.allocate_fixed_ip,
self.context,
'foo')
for i in range(num_available_ips):
self.network.deallocate_fixed_ip(self.context, addresses[i])
release_ip(addresses[i])
db.instance_destroy(context.get_admin_context(), instance_ids[i])
ip_count = db.network_count_available_ips(context.get_admin_context(),
network['id'])
self.assertEqual(ip_count, num_available_ips)
def _is_allocated_in_project(self, address, project_id):
"""Returns true if address is in specified project"""
project_net = db.project_get_network(context.get_admin_context(),
project_id)
network = db.fixed_ip_get_network(context.get_admin_context(),
address)
instance = db.fixed_ip_get_instance(context.get_admin_context(),
address)
# instance exists until release
return instance is not None and network['id'] == project_net['id']
def run(self, result=None):
if(FLAGS.network_manager == 'nova.network.manager.VlanManager'):
super(VlanNetworkTestCase, self).run(result)

View File

@@ -1,251 +1,276 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc. # Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC. # Copyright 2011 OpenStack LLC.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
# a copy of the License at # a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """
Test suite for VMWareAPI. Test suite for VMWareAPI.
""" """
import stubout import stubout
from nova import context from nova import context
from nova import db from nova import db
from nova import flags from nova import flags
from nova import test from nova import test
from nova import utils from nova import utils
from nova.auth import manager from nova.auth import manager
from nova.compute import power_state from nova.compute import power_state
from nova.tests.glance import stubs as glance_stubs from nova.tests.glance import stubs as glance_stubs
from nova.tests.vmwareapi import db_fakes from nova.tests.vmwareapi import db_fakes
from nova.tests.vmwareapi import stubs from nova.tests.vmwareapi import stubs
from nova.virt import vmwareapi_conn from nova.virt import vmwareapi_conn
from nova.virt.vmwareapi import fake as vmwareapi_fake from nova.virt.vmwareapi import fake as vmwareapi_fake
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
class VMWareAPIVMTestCase(test.TestCase): class VMWareAPIVMTestCase(test.TestCase):
"""Unit tests for Vmware API connection calls.""" """Unit tests for Vmware API connection calls."""
def setUp(self): # NOTE(jkoelker): This is leaking stubs into the db module.
super(VMWareAPIVMTestCase, self).setUp() # Commenting out until updated for multi-nic.
self.flags(vmwareapi_host_ip='test_url', #def setUp(self):
vmwareapi_host_username='test_username', # super(VMWareAPIVMTestCase, self).setUp()
vmwareapi_host_password='test_pass') # self.flags(vmwareapi_host_ip='test_url',
self.manager = manager.AuthManager() # vmwareapi_host_username='test_username',
self.user = self.manager.create_user('fake', 'fake', 'fake', # vmwareapi_host_password='test_pass')
admin=True) # self.manager = manager.AuthManager()
self.project = self.manager.create_project('fake', 'fake', 'fake') # self.user = self.manager.create_user('fake', 'fake', 'fake',
self.network = utils.import_object(FLAGS.network_manager) # admin=True)
self.stubs = stubout.StubOutForTesting() # self.project = self.manager.create_project('fake', 'fake', 'fake')
vmwareapi_fake.reset() # self.network = utils.import_object(FLAGS.network_manager)
db_fakes.stub_out_db_instance_api(self.stubs) # self.stubs = stubout.StubOutForTesting()
stubs.set_stubs(self.stubs) # vmwareapi_fake.reset()
glance_stubs.stubout_glance_client(self.stubs) # db_fakes.stub_out_db_instance_api(self.stubs)
self.conn = vmwareapi_conn.get_connection(False) # stubs.set_stubs(self.stubs)
# glance_stubs.stubout_glance_client(self.stubs,
def _create_instance_in_the_db(self): # glance_stubs.FakeGlance)
values = {'name': 1, # self.conn = vmwareapi_conn.get_connection(False)
'id': 1,
'project_id': self.project.id, #def tearDown(self):
'user_id': self.user.id, # super(VMWareAPIVMTestCase, self).tearDown()
'image_ref': "1", # vmwareapi_fake.cleanup()
'kernel_id': "1", # self.manager.delete_project(self.project)
'ramdisk_id': "1", # self.manager.delete_user(self.user)
'instance_type': 'm1.large', # self.stubs.UnsetAll()
'mac_address': 'aa:bb:cc:dd:ee:ff',
} def _create_instance_in_the_db(self):
self.instance = db.instance_create(None, values) values = {'name': 1,
'id': 1,
def _create_vm(self): 'project_id': self.project.id,
"""Create and spawn the VM.""" 'user_id': self.user.id,
self._create_instance_in_the_db() 'image_id': "1",
self.type_data = db.instance_type_get_by_name(None, 'm1.large') 'kernel_id': "1",
self.conn.spawn(self.instance) 'ramdisk_id': "1",
self._check_vm_record() 'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
def _check_vm_record(self): }
""" self.instance = db.instance_create(values)
Check if the spawned VM's properties correspond to the instance in
the db. def _create_vm(self):
""" """Create and spawn the VM."""
instances = self.conn.list_instances() self._create_instance_in_the_db()
self.assertEquals(len(instances), 1) self.type_data = db.instance_type_get_by_name(None, 'm1.large')
self.conn.spawn(self.instance)
# Get Nova record for VM self._check_vm_record()
vm_info = self.conn.get_info(1)
def _check_vm_record(self):
# Get record for VM """
vms = vmwareapi_fake._get_objects("VirtualMachine") Check if the spawned VM's properties correspond to the instance in
vm = vms[0] the db.
"""
# Check that m1.large above turned into the right thing. instances = self.conn.list_instances()
mem_kib = long(self.type_data['memory_mb']) << 10 self.assertEquals(len(instances), 1)
vcpus = self.type_data['vcpus']
self.assertEquals(vm_info['max_mem'], mem_kib) # Get Nova record for VM
self.assertEquals(vm_info['mem'], mem_kib) vm_info = self.conn.get_info(1)
self.assertEquals(vm.get("summary.config.numCpu"), vcpus)
self.assertEquals(vm.get("summary.config.memorySizeMB"), # Get record for VM
self.type_data['memory_mb']) vms = vmwareapi_fake._get_objects("VirtualMachine")
vm = vms[0]
# Check that the VM is running according to Nova
self.assertEquals(vm_info['state'], power_state.RUNNING) # Check that m1.large above turned into the right thing.
mem_kib = long(self.type_data['memory_mb']) << 10
# Check that the VM is running according to vSphere API. vcpus = self.type_data['vcpus']
self.assertEquals(vm.get("runtime.powerState"), 'poweredOn') self.assertEquals(vm_info['max_mem'], mem_kib)
self.assertEquals(vm_info['mem'], mem_kib)
def _check_vm_info(self, info, pwr_state=power_state.RUNNING): self.assertEquals(vm.get("summary.config.numCpu"), vcpus)
""" self.assertEquals(vm.get("summary.config.memorySizeMB"),
Check if the get_info returned values correspond to the instance self.type_data['memory_mb'])
object in the db.
""" # Check that the VM is running according to Nova
mem_kib = long(self.type_data['memory_mb']) << 10 self.assertEquals(vm_info['state'], power_state.RUNNING)
self.assertEquals(info["state"], pwr_state)
self.assertEquals(info["max_mem"], mem_kib) # Check that the VM is running according to vSphere API.
self.assertEquals(info["mem"], mem_kib) self.assertEquals(vm.get("runtime.powerState"), 'poweredOn')
self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
def test_list_instances(self): """
instances = self.conn.list_instances() Check if the get_info returned values correspond to the instance
self.assertEquals(len(instances), 0) object in the db.
"""
def test_list_instances_1(self): mem_kib = long(self.type_data['memory_mb']) << 10
self._create_vm() self.assertEquals(info["state"], pwr_state)
instances = self.conn.list_instances() self.assertEquals(info["max_mem"], mem_kib)
self.assertEquals(len(instances), 1) self.assertEquals(info["mem"], mem_kib)
self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
def test_spawn(self):
self._create_vm() @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
info = self.conn.get_info(1) def test_list_instances(self):
self._check_vm_info(info, power_state.RUNNING) instances = self.conn.list_instances()
self.assertEquals(len(instances), 0)
def test_snapshot(self):
self._create_vm() @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
info = self.conn.get_info(1) def test_list_instances_1(self):
self._check_vm_info(info, power_state.RUNNING) self._create_vm()
self.conn.snapshot(self.instance, "Test-Snapshot") instances = self.conn.list_instances()
info = self.conn.get_info(1) self.assertEquals(len(instances), 1)
self._check_vm_info(info, power_state.RUNNING)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_snapshot_non_existent(self): def test_spawn(self):
self._create_instance_in_the_db() self._create_vm()
self.assertRaises(Exception, self.conn.snapshot, self.instance, info = self.conn.get_info(1)
"Test-Snapshot") self._check_vm_info(info, power_state.RUNNING)
def test_reboot(self): @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
self._create_vm() def test_snapshot(self):
info = self.conn.get_info(1) self._create_vm()
self._check_vm_info(info, power_state.RUNNING) info = self.conn.get_info(1)
self.conn.reboot(self.instance) self._check_vm_info(info, power_state.RUNNING)
info = self.conn.get_info(1) self.conn.snapshot(self.instance, "Test-Snapshot")
self._check_vm_info(info, power_state.RUNNING) info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_non_existent(self):
self._create_instance_in_the_db() @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
self.assertRaises(Exception, self.conn.reboot, self.instance) def test_snapshot_non_existent(self):
self._create_instance_in_the_db()
def test_reboot_not_poweredon(self): self.assertRaises(Exception, self.conn.snapshot, self.instance,
self._create_vm() "Test-Snapshot")
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING) @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
self.conn.suspend(self.instance, self.dummy_callback_handler) def test_reboot(self):
info = self.conn.get_info(1) self._create_vm()
self._check_vm_info(info, power_state.PAUSED) info = self.conn.get_info(1)
self.assertRaises(Exception, self.conn.reboot, self.instance) self._check_vm_info(info, power_state.RUNNING)
self.conn.reboot(self.instance)
def test_suspend(self): info = self.conn.get_info(1)
self._create_vm() self._check_vm_info(info, power_state.RUNNING)
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING) @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
self.conn.suspend(self.instance, self.dummy_callback_handler) def test_reboot_non_existent(self):
info = self.conn.get_info(1) self._create_instance_in_the_db()
self._check_vm_info(info, power_state.PAUSED) self.assertRaises(Exception, self.conn.reboot, self.instance)
def test_suspend_non_existent(self): @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
self._create_instance_in_the_db() def test_reboot_not_poweredon(self):
self.assertRaises(Exception, self.conn.suspend, self.instance, self._create_vm()
self.dummy_callback_handler) info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
def test_resume(self): self.conn.suspend(self.instance, self.dummy_callback_handler)
self._create_vm() info = self.conn.get_info(1)
info = self.conn.get_info(1) self._check_vm_info(info, power_state.PAUSED)
self._check_vm_info(info, power_state.RUNNING) self.assertRaises(Exception, self.conn.reboot, self.instance)
self.conn.suspend(self.instance, self.dummy_callback_handler)
info = self.conn.get_info(1) @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
self._check_vm_info(info, power_state.PAUSED) def test_suspend(self):
self.conn.resume(self.instance, self.dummy_callback_handler) self._create_vm()
info = self.conn.get_info(1) info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING) self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance, self.dummy_callback_handler)
def test_resume_non_existent(self): info = self.conn.get_info(1)
self._create_instance_in_the_db() self._check_vm_info(info, power_state.PAUSED)
self.assertRaises(Exception, self.conn.resume, self.instance,
self.dummy_callback_handler) @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_suspend_non_existent(self):
def test_resume_not_suspended(self): self._create_instance_in_the_db()
self._create_vm() self.assertRaises(Exception, self.conn.suspend, self.instance,
info = self.conn.get_info(1) self.dummy_callback_handler)
self._check_vm_info(info, power_state.RUNNING)
self.assertRaises(Exception, self.conn.resume, self.instance, @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
self.dummy_callback_handler) def test_resume(self):
self._create_vm()
def test_get_info(self): info = self.conn.get_info(1)
self._create_vm() self._check_vm_info(info, power_state.RUNNING)
info = self.conn.get_info(1) self.conn.suspend(self.instance, self.dummy_callback_handler)
self._check_vm_info(info, power_state.RUNNING) info = self.conn.get_info(1)
self._check_vm_info(info, power_state.PAUSED)
def test_destroy(self): self.conn.resume(self.instance, self.dummy_callback_handler)
self._create_vm() info = self.conn.get_info(1)
info = self.conn.get_info(1) self._check_vm_info(info, power_state.RUNNING)
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances() @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
self.assertEquals(len(instances), 1) def test_resume_non_existent(self):
self.conn.destroy(self.instance) self._create_instance_in_the_db()
instances = self.conn.list_instances() self.assertRaises(Exception, self.conn.resume, self.instance,
self.assertEquals(len(instances), 0) self.dummy_callback_handler)
def test_destroy_non_existent(self): @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
self._create_instance_in_the_db() def test_resume_not_suspended(self):
self.assertEquals(self.conn.destroy(self.instance), None) self._create_vm()
info = self.conn.get_info(1)
def test_pause(self): self._check_vm_info(info, power_state.RUNNING)
pass self.assertRaises(Exception, self.conn.resume, self.instance,
self.dummy_callback_handler)
def test_unpause(self):
pass @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_get_info(self):
def test_diagnostics(self): self._create_vm()
pass info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
def test_get_console_output(self):
pass @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_destroy(self):
def test_get_ajax_console(self): self._create_vm()
pass info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
def dummy_callback_handler(self, ret): instances = self.conn.list_instances()
""" self.assertEquals(len(instances), 1)
Dummy callback function to be passed to suspend, resume, etc., calls. self.conn.destroy(self.instance)
""" instances = self.conn.list_instances()
pass self.assertEquals(len(instances), 0)
def tearDown(self): @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
super(VMWareAPIVMTestCase, self).tearDown() def test_destroy_non_existent(self):
vmwareapi_fake.cleanup() self._create_instance_in_the_db()
self.manager.delete_project(self.project) self.assertEquals(self.conn.destroy(self.instance), None)
self.manager.delete_user(self.user)
self.stubs.UnsetAll() @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_pause(self):
pass
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_unpause(self):
pass
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_diagnostics(self):
pass
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_get_console_output(self):
pass
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_get_ajax_console(self):
pass
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def dummy_callback_handler(self, ret):
"""
Dummy callback function to be passed to suspend, resume, etc., calls.
"""
pass

View File

@@ -134,7 +134,6 @@ class VolumeTestCase(test.TestCase):
inst['user_id'] = 'fake' inst['user_id'] = 'fake'
inst['project_id'] = 'fake' inst['project_id'] = 'fake'
inst['instance_type_id'] = '2' # m1.tiny inst['instance_type_id'] = '2' # m1.tiny
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0 inst['ami_launch_index'] = 0
instance_id = db.instance_create(self.context, inst)['id'] instance_id = db.instance_create(self.context, inst)['id']
mountpoint = "/dev/sdf" mountpoint = "/dev/sdf"

View File

@@ -83,7 +83,6 @@ class XenAPIVolumeTestCase(test.TestCase):
'kernel_id': 2, 'kernel_id': 2,
'ramdisk_id': 3, 'ramdisk_id': 3,
'instance_type_id': '3', # m1.large 'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux', 'os_type': 'linux',
'architecture': 'x86-64'} 'architecture': 'x86-64'}
@@ -211,11 +210,24 @@ class XenAPIVMTestCase(test.TestCase):
'kernel_id': 2, 'kernel_id': 2,
'ramdisk_id': 3, 'ramdisk_id': 3,
'instance_type_id': '3', # m1.large 'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux', 'os_type': 'linux',
'architecture': 'x86-64'} 'architecture': 'x86-64'}
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
instance = db.instance_create(self.context, values) instance = db.instance_create(self.context, values)
self.conn.spawn(instance) self.conn.spawn(instance, network_info)
gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id) gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id)
gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id) gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id)
@@ -320,22 +332,22 @@ class XenAPIVMTestCase(test.TestCase):
if check_injection: if check_injection:
xenstore_data = self.vm['xenstore_data'] xenstore_data = self.vm['xenstore_data']
key = 'vm-data/networking/aabbccddeeff' key = 'vm-data/networking/DEADBEEF0000'
xenstore_value = xenstore_data[key] xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value) tcpip_data = ast.literal_eval(xenstore_value)
self.assertEquals(tcpip_data, self.assertEquals(tcpip_data,
{'label': 'fake_flat_network', {'broadcast': '192.168.0.255',
'broadcast': '10.0.0.255', 'dns': ['192.168.0.1'],
'ips': [{'ip': '10.0.0.3', 'gateway': '192.168.0.1',
'netmask':'255.255.255.0', 'gateway6': 'dead:beef::1',
'enabled':'1'}], 'ip6s': [{'enabled': '1',
'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff', 'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '120', 'netmask': '64'}],
'enabled': '1'}], 'ips': [{'enabled': '1',
'mac': 'aa:bb:cc:dd:ee:ff', 'ip': '192.168.0.100',
'dns': ['10.0.0.2'], 'netmask': '255.255.255.0'}],
'gateway': '10.0.0.1', 'label': 'fake',
'gateway6': 'fe80::a00:1'}) 'mac': 'DE:AD:BE:EF:00:00'})
def check_vm_params_for_windows(self): def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true') self.assertEquals(self.vm['platform']['nx'], 'true')
@@ -369,6 +381,18 @@ class XenAPIVMTestCase(test.TestCase):
self.assertEquals(self.vm['HVM_boot_params'], {}) self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '') self.assertEquals(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
session = xenapi_conn.XenAPISession(url, username, password)
return session.call_xenapi('VDI.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if not vdi_ref in start_list:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id, def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux", instance_type_id="3", os_type="linux",
architecture="x86-64", instance_id=1, architecture="x86-64", instance_id=1,
@@ -381,11 +405,24 @@ class XenAPIVMTestCase(test.TestCase):
'kernel_id': kernel_id, 'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id, 'ramdisk_id': ramdisk_id,
'instance_type_id': instance_type_id, 'instance_type_id': instance_type_id,
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': os_type, 'os_type': os_type,
'architecture': architecture} 'architecture': architecture}
instance = db.instance_create(self.context, values) instance = db.instance_create(self.context, values)
self.conn.spawn(instance) network_info = [({'bridge': 'fa0', 'id': 0, 'injected': True},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
self.conn.spawn(instance, network_info)
self.create_vm_record(self.conn, os_type, instance_id) self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection) self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance.os_type) self.assertTrue(instance.os_type)
@@ -397,6 +434,36 @@ class XenAPIVMTestCase(test.TestCase):
self._test_spawn, self._test_spawn,
1, 2, 3, "4") # m1.xlarge 1, 2, 3, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
FLAGS.xenapi_image_service = 'glance'
stubs.stubout_fetch_image_glance_disk(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
It verifies that VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
FLAGS.xenapi_image_service = 'glance'
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
def test_spawn_raw_objectstore(self): def test_spawn_raw_objectstore(self):
FLAGS.xenapi_image_service = 'objectstore' FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, None, None) self._test_spawn(1, None, None)
@@ -467,11 +534,11 @@ class XenAPIVMTestCase(test.TestCase):
index = config.index('auto eth0') index = config.index('auto eth0')
self.assertEquals(config[index + 1:index + 8], [ self.assertEquals(config[index + 1:index + 8], [
'iface eth0 inet static', 'iface eth0 inet static',
'address 10.0.0.3', 'address 192.168.0.100',
'netmask 255.255.255.0', 'netmask 255.255.255.0',
'broadcast 10.0.0.255', 'broadcast 192.168.0.255',
'gateway 10.0.0.1', 'gateway 192.168.0.1',
'dns-nameservers 10.0.0.2', 'dns-nameservers 192.168.0.1',
'']) ''])
self._tee_executed = True self._tee_executed = True
return '', '' return '', ''
@@ -532,23 +599,37 @@ class XenAPIVMTestCase(test.TestCase):
# guest agent is detected # guest agent is detected
self.assertFalse(self._tee_executed) self.assertFalse(self._tee_executed)
@test.skip_test("Never gets an address, not sure why")
def test_spawn_vlanmanager(self): def test_spawn_vlanmanager(self):
self.flags(xenapi_image_service='glance', self.flags(xenapi_image_service='glance',
network_manager='nova.network.manager.VlanManager', network_manager='nova.network.manager.VlanManager',
network_driver='nova.network.xenapi_net', network_driver='nova.network.xenapi_net',
vlan_interface='fake0') vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(VMOps, 'create_vifs', dummy)
# Reset network table # Reset network table
xenapi_fake.reset_table('network') xenapi_fake.reset_table('network')
# Instance id = 2 will use vlan network (see db/fakes.py) # Instance id = 2 will use vlan network (see db/fakes.py)
fake_instance_id = 2 ctxt = self.context.elevated()
instance_ref = self._create_instance(2)
network_bk = self.network network_bk = self.network
# Ensure we use xenapi_net driver # Ensure we use xenapi_net driver
self.network = utils.import_object(FLAGS.network_manager) self.network = utils.import_object(FLAGS.network_manager)
self.network.setup_compute_network(None, fake_instance_id) networks = self.network.db.network_get_all(ctxt)
for network in networks:
self.network.set_network_host(ctxt, network['id'])
self.network.allocate_for_instance(ctxt, instance_id=instance_ref.id,
instance_type_id=1, project_id=self.project.id)
self.network.setup_compute_network(ctxt, instance_ref.id)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL, glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK, glance_stubs.FakeGlance.IMAGE_RAMDISK,
instance_id=fake_instance_id) instance_id=instance_ref.id,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require # TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is # a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db # consistent with bridge specified in nova db
@@ -560,7 +641,7 @@ class XenAPIVMTestCase(test.TestCase):
vif_rec = xenapi_fake.get_record('VIF', vif_ref) vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit') self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEquals(vif_rec['qos_algorithm_params']['kbps'], self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
str(4 * 1024)) str(3 * 1024))
def test_rescue(self): def test_rescue(self):
self.flags(xenapi_inject_image=False) self.flags(xenapi_inject_image=False)
@@ -582,22 +663,35 @@ class XenAPIVMTestCase(test.TestCase):
self.vm = None self.vm = None
self.stubs.UnsetAll() self.stubs.UnsetAll()
def _create_instance(self): def _create_instance(self, instance_id=1):
"""Creates and spawns a test instance.""" """Creates and spawns a test instance."""
stubs.stubout_loopingcall_start(self.stubs) stubs.stubout_loopingcall_start(self.stubs)
values = { values = {
'id': 1, 'id': instance_id,
'project_id': self.project.id, 'project_id': self.project.id,
'user_id': self.user.id, 'user_id': self.user.id,
'image_ref': 1, 'image_ref': 1,
'kernel_id': 2, 'kernel_id': 2,
'ramdisk_id': 3, 'ramdisk_id': 3,
'instance_type_id': '3', # m1.large 'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux', 'os_type': 'linux',
'architecture': 'x86-64'} 'architecture': 'x86-64'}
instance = db.instance_create(self.context, values) instance = db.instance_create(self.context, values)
self.conn.spawn(instance) network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
self.conn.spawn(instance, network_info)
return instance return instance
@@ -669,7 +763,6 @@ class XenAPIMigrateInstance(test.TestCase):
'ramdisk_id': None, 'ramdisk_id': None,
'local_gb': 5, 'local_gb': 5,
'instance_type_id': '3', # m1.large 'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux', 'os_type': 'linux',
'architecture': 'x86-64'} 'architecture': 'x86-64'}
@@ -695,7 +788,22 @@ class XenAPIMigrateInstance(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs) stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False) conn = xenapi_conn.get_connection(False)
conn.finish_resize(instance, dict(base_copy='hurr', cow='durr')) network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'),
network_info)
class XenAPIDetermineDiskImageTestCase(test.TestCase): class XenAPIDetermineDiskImageTestCase(test.TestCase):

View File

@@ -98,6 +98,42 @@ def stubout_is_vdi_pv(stubs):
stubs.Set(vm_utils, '_is_vdi_pv', f) stubs.Set(vm_utils, '_is_vdi_pv', f)
def stubout_determine_is_pv_objectstore(stubs):
"""Assumes VMs never have PV kernels"""
@classmethod
def f(cls, *args):
return False
stubs.Set(vm_utils.VMHelper, '_determine_is_pv_objectstore', f)
def stubout_lookup_image(stubs):
"""Simulates a failure in lookup image."""
def f(_1, _2, _3, _4):
raise Exception("Test Exception raised by fake lookup_image")
stubs.Set(vm_utils, 'lookup_image', f)
def stubout_fetch_image_glance_disk(stubs):
"""Simulates a failure in fetch image_glance_disk."""
@classmethod
def f(cls, *args):
raise fake.Failure("Test Exception raised by " +
"fake fetch_image_glance_disk")
stubs.Set(vm_utils.VMHelper, '_fetch_image_glance_disk', f)
def stubout_create_vm(stubs):
"""Simulates a failure in create_vm."""
@classmethod
def f(cls, *args):
raise fake.Failure("Test Exception raised by " +
"fake create_vm")
stubs.Set(vm_utils.VMHelper, 'create_vm', f)
def stubout_loopingcall_start(stubs): def stubout_loopingcall_start(stubs):
def fake_start(self, interval, now=True): def fake_start(self, interval, now=True):
self.f(*self.args, **self.kw) self.f(*self.args, **self.kw)
@@ -120,6 +156,9 @@ class FakeSessionForVMTests(fake.SessionBase):
super(FakeSessionForVMTests, self).__init__(uri) super(FakeSessionForVMTests, self).__init__(uri)
def host_call_plugin(self, _1, _2, plugin, method, _5): def host_call_plugin(self, _1, _2, plugin, method, _5):
# If the call is for 'copy_kernel_vdi' return None.
if method == 'copy_kernel_vdi':
return
sr_ref = fake.get_all('SR')[0] sr_ref = fake.get_all('SR')[0]
vdi_ref = fake.create_vdi('', False, sr_ref, False) vdi_ref = fake.create_vdi('', False, sr_ref, False)
vdi_rec = fake.get_record('VDI', vdi_ref) vdi_rec = fake.get_record('VDI', vdi_ref)

View File

@@ -69,7 +69,6 @@ from nose import core
from nose import result from nose import result
from nova import log as logging from nova import log as logging
from nova.tests import fake_flags
class _AnsiColorizer(object): class _AnsiColorizer(object):
@@ -211,11 +210,11 @@ class NovaTestResult(result.TextTestResult):
break break
sys.stdout = stdout sys.stdout = stdout
# NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate # NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
# error results in it failing to be initialized later. Otherwise, # error results in it failing to be initialized later. Otherwise,
# _handleElapsedTime will fail, causing the wrong error message to # _handleElapsedTime will fail, causing the wrong error message to
# be outputted. # be outputted.
self.start_time = time.time() self.start_time = time.time()
def getDescription(self, test): def getDescription(self, test):
return str(test) return str(test)