merge lp:nova

This commit is contained in:
Mark Washenberger
2011-03-15 14:19:47 -04:00
12 changed files with 1532 additions and 31 deletions

View File

@@ -28,6 +28,7 @@
<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
<matt.dietz@rackspace.com> <mdietz@openstack>
<mordred@inaugust.com> <mordred@hudson>
<nirmal.ranganathan@rackspace.com> <nirmal.ranganathan@rackspace.coom>
<paul@openstack.org> <paul.voccio@rackspace.com>
<paul@openstack.org> <pvoccio@castor.local>
<rconradharris@gmail.com> <rick.harris@rackspace.com>

View File

@@ -19,6 +19,7 @@ Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com>
Eldar Nugaev <enugaev@griddynamics.com>
Eric Day <eday@oddments.org>
Eric Windisch <eric@cloudscaling.com>
Ewan Mellor <ewan.mellor@citrix.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com>

View File

@@ -25,6 +25,7 @@ include nova/db/sqlalchemy/migrate_repo/migrate.cfg
include nova/db/sqlalchemy/migrate_repo/README
include nova/virt/interfaces.template
include nova/virt/libvirt*.xml.template
include nova/virt/cpuinfo.xml.template
include nova/tests/CA/
include nova/tests/CA/cacert.pem
include nova/tests/CA/private/

View File

@@ -94,7 +94,7 @@ def init_leases(interface):
"""Get the list of hosts for an interface."""
ctxt = context.get_admin_context()
network_ref = db.network_get_by_bridge(ctxt, interface)
return linux_net.get_dhcp_hosts(ctxt, network_ref['id'])
return linux_net.get_dhcp_leases(ctxt, network_ref['id'])
def main():

View File

@@ -276,7 +276,7 @@ def _db_error(caught_exception):
print caught_exception
print _("The above error may show that the database has not "
"been created.\nPlease create a database using "
"nova-manage sync db before running this command.")
"'nova-manage db sync' before running this command.")
exit(1)
@@ -437,6 +437,8 @@ class ProjectCommands(object):
"been created.\nPlease create a database by running a "
"nova-api server on this host.")
AccountCommands = ProjectCommands
class FixedIpCommands(object):
"""Class for managing fixed ip."""
@@ -558,6 +560,40 @@ class NetworkCommands(object):
db.network_delete_safe(context.get_admin_context(), network.id)
class VmCommands(object):
"""Class for mangaging VM instances."""
def live_migration(self, ec2_id, dest):
"""Migrates a running instance to a new machine.
:param ec2_id: instance id which comes from euca-describe-instance.
:param dest: destination host name.
"""
ctxt = context.get_admin_context()
instance_id = ec2utils.ec2_id_to_id(ec2_id)
if FLAGS.connection_type != 'libvirt':
msg = _('Only KVM is supported for now. Sorry!')
raise exception.Error(msg)
if (FLAGS.volume_driver != 'nova.volume.driver.AOEDriver' and \
FLAGS.volume_driver != 'nova.volume.driver.ISCSIDriver'):
msg = _("Support only AOEDriver and ISCSIDriver. Sorry!")
raise exception.Error(msg)
rpc.call(ctxt,
FLAGS.scheduler_topic,
{"method": "live_migration",
"args": {"instance_id": instance_id,
"dest": dest,
"topic": FLAGS.compute_topic}})
print _('Migration of %s initiated.'
'Check its progress using euca-describe-instances.') % ec2_id
class ServiceCommands(object):
"""Enable and disable running services"""
@@ -602,6 +638,59 @@ class ServiceCommands(object):
return
db.service_update(ctxt, svc['id'], {'disabled': True})
def describe_resource(self, host):
"""Describes cpu/memory/hdd info for host.
:param host: hostname.
"""
result = rpc.call(context.get_admin_context(),
FLAGS.scheduler_topic,
{"method": "show_host_resources",
"args": {"host": host}})
if type(result) != dict:
print _('An unexpected error has occurred.')
print _('[Result]'), result
else:
cpu = result['resource']['vcpus']
mem = result['resource']['memory_mb']
hdd = result['resource']['local_gb']
cpu_u = result['resource']['vcpus_used']
mem_u = result['resource']['memory_mb_used']
hdd_u = result['resource']['local_gb_used']
print 'HOST\t\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)'
print '%s(total)\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd)
print '%s(used)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u)
for p_id, val in result['usage'].items():
print '%s\t\t%s\t\t%s\t%s\t%s' % (host,
p_id,
val['vcpus'],
val['memory_mb'],
val['local_gb'])
def update_resource(self, host):
"""Updates available vcpu/memory/disk info for host.
:param host: hostname.
"""
ctxt = context.get_admin_context()
service_refs = db.service_get_all_by_host(ctxt, host)
if len(service_refs) <= 0:
raise exception.Invalid(_('%s does not exist.') % host)
service_refs = [s for s in service_refs if s['topic'] == 'compute']
if len(service_refs) <= 0:
raise exception.Invalid(_('%s is not compute node.') % host)
rpc.call(ctxt,
db.queue_get_for(ctxt, FLAGS.compute_topic, host),
{"method": "update_available_resource"})
class LogCommands(object):
def request(self, request_id, logfile='/var/log/nova.log'):
@@ -898,6 +987,7 @@ class ImageCommands(object):
CATEGORIES = [
('user', UserCommands),
('account', AccountCommands),
('project', ProjectCommands),
('role', RoleCommands),
('shell', ShellCommands),
@@ -905,6 +995,7 @@ CATEGORIES = [
('fixed', FixedIpCommands),
('floating', FloatingIpCommands),
('network', NetworkCommands),
('vm', VmCommands),
('service', ServiceCommands),
('log', LogCommands),
('db', DbCommands),

View File

@@ -20,6 +20,7 @@ Tests For Compute
"""
import datetime
import mox
from nova import compute
from nova import context
@@ -27,15 +28,20 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.db.sqlalchemy import models
from nova.image import local
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
flags.DECLARE('stub_network', 'nova.compute.manager')
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
class ComputeTestCase(test.TestCase):
@@ -83,6 +89,21 @@ class ComputeTestCase(test.TestCase):
'project_id': self.project.id}
return db.security_group_create(self.context, values)
def _get_dummy_instance(self):
"""Get mock-return-value instance object
Use this when any testcase executed later than test_run_terminate
"""
vol1 = models.Volume()
vol1['id'] = 1
vol2 = models.Volume()
vol2['id'] = 2
instance_ref = models.Instance()
instance_ref['id'] = 1
instance_ref['volumes'] = [vol1, vol2]
instance_ref['hostname'] = 'i-00000001'
instance_ref['host'] = 'dummy'
return instance_ref
def test_create_instance_defaults_display_name(self):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
@@ -301,3 +322,256 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance_id)
type = instance_types.get_by_flavor_id("1")
self.assertEqual(type, 'm1.tiny')
def _setup_other_managers(self):
self.volume_manager = utils.import_object(FLAGS.volume_manager)
self.network_manager = utils.import_object(FLAGS.network_manager)
self.compute_driver = utils.import_object(FLAGS.compute_driver)
def test_pre_live_migration_instance_has_no_fixed_ip(self):
"""Confirm raising exception if instance doesn't have fixed_ip."""
instance_ref = self._get_dummy_instance()
c = context.get_admin_context()
i_id = instance_ref['id']
dbmock = self.mox.CreateMock(db)
dbmock.instance_get(c, i_id).AndReturn(instance_ref)
dbmock.instance_get_fixed_address(c, i_id).AndReturn(None)
self.compute.db = dbmock
self.mox.ReplayAll()
self.assertRaises(exception.NotFound,
self.compute.pre_live_migration,
c, instance_ref['id'])
def test_pre_live_migration_instance_has_volume(self):
"""Confirm setup_compute_volume is called when volume is mounted."""
i_ref = self._get_dummy_instance()
c = context.get_admin_context()
self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
volmock = self.mox.CreateMock(self.volume_manager)
netmock = self.mox.CreateMock(self.network_manager)
drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
for i in range(len(i_ref['volumes'])):
vid = i_ref['volumes'][i]['id']
volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
netmock.setup_compute_network(c, i_ref['id'])
drivermock.ensure_filtering_rules_for_instance(i_ref)
self.compute.db = dbmock
self.compute.volume_manager = volmock
self.compute.network_manager = netmock
self.compute.driver = drivermock
self.mox.ReplayAll()
ret = self.compute.pre_live_migration(c, i_ref['id'])
self.assertEqual(ret, None)
def test_pre_live_migration_instance_has_no_volume(self):
"""Confirm log meg when instance doesn't mount any volumes."""
i_ref = self._get_dummy_instance()
i_ref['volumes'] = []
c = context.get_admin_context()
self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
netmock = self.mox.CreateMock(self.network_manager)
drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
self.mox.StubOutWithMock(compute_manager.LOG, 'info')
compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
netmock.setup_compute_network(c, i_ref['id'])
drivermock.ensure_filtering_rules_for_instance(i_ref)
self.compute.db = dbmock
self.compute.network_manager = netmock
self.compute.driver = drivermock
self.mox.ReplayAll()
ret = self.compute.pre_live_migration(c, i_ref['id'])
self.assertEqual(ret, None)
def test_pre_live_migration_setup_compute_node_fail(self):
"""Confirm operation setup_compute_network() fails.
It retries and raise exception when timeout exceeded.
"""
i_ref = self._get_dummy_instance()
c = context.get_admin_context()
self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
netmock = self.mox.CreateMock(self.network_manager)
volmock = self.mox.CreateMock(self.volume_manager)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
for i in range(len(i_ref['volumes'])):
volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
for i in range(FLAGS.live_migration_retry_count):
netmock.setup_compute_network(c, i_ref['id']).\
AndRaise(exception.ProcessExecutionError())
self.compute.db = dbmock
self.compute.network_manager = netmock
self.compute.volume_manager = volmock
self.mox.ReplayAll()
self.assertRaises(exception.ProcessExecutionError,
self.compute.pre_live_migration,
c, i_ref['id'])
def test_live_migration_works_correctly_with_volume(self):
"""Confirm check_for_export to confirm volume health check."""
i_ref = self._get_dummy_instance()
c = context.get_admin_context()
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
dbmock = self.mox.CreateMock(db)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
"args": {'instance_id': i_ref['id']}})
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': i_ref['id']}})
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
self.compute.post_live_migration,
self.compute.recover_live_migration)
self.compute.db = dbmock
self.mox.ReplayAll()
ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
self.assertEqual(ret, None)
def test_live_migration_dest_raises_exception(self):
"""Confirm exception when pre_live_migration fails."""
i_ref = self._get_dummy_instance()
c = context.get_admin_context()
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
dbmock = self.mox.CreateMock(db)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
"args": {'instance_id': i_ref['id']}})
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': i_ref['id']}}).\
AndRaise(rpc.RemoteError('', '', ''))
dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
'state': power_state.RUNNING,
'host': i_ref['host']})
for v in i_ref['volumes']:
dbmock.volume_update(c, v['id'], {'status': 'in-use'})
self.compute.db = dbmock
self.mox.ReplayAll()
self.assertRaises(rpc.RemoteError,
self.compute.live_migration,
c, i_ref['id'], i_ref['host'])
def test_live_migration_dest_raises_exception_no_volume(self):
"""Same as above test(input pattern is different) """
i_ref = self._get_dummy_instance()
i_ref['volumes'] = []
c = context.get_admin_context()
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
dbmock = self.mox.CreateMock(db)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': i_ref['id']}}).\
AndRaise(rpc.RemoteError('', '', ''))
dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
'state': power_state.RUNNING,
'host': i_ref['host']})
self.compute.db = dbmock
self.mox.ReplayAll()
self.assertRaises(rpc.RemoteError,
self.compute.live_migration,
c, i_ref['id'], i_ref['host'])
def test_live_migration_works_correctly_no_volume(self):
"""Confirm live_migration() works as expected correctly."""
i_ref = self._get_dummy_instance()
i_ref['volumes'] = []
c = context.get_admin_context()
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
dbmock = self.mox.CreateMock(db)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
self.mox.StubOutWithMock(rpc, 'call')
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': i_ref['id']}})
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
self.compute.post_live_migration,
self.compute.recover_live_migration)
self.compute.db = dbmock
self.mox.ReplayAll()
ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
self.assertEqual(ret, None)
def test_post_live_migration_working_correctly(self):
"""Confirm post_live_migration() works as expected correctly."""
dest = 'desthost'
flo_addr = '1.2.1.2'
# Preparing datas
c = context.get_admin_context()
instance_id = self._create_instance()
i_ref = db.instance_get(c, instance_id)
db.instance_update(c, i_ref['id'], {'state_description': 'migrating',
'state': power_state.PAUSED})
v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id})
fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
'instance_id': instance_id})
fix_ref = db.fixed_ip_get_by_address(c, fix_addr)
flo_ref = db.floating_ip_create(c, {'address': flo_addr,
'fixed_ip_id': fix_ref['id']})
# reload is necessary before setting mocks
i_ref = db.instance_get(c, instance_id)
# Preparing mocks
self.mox.StubOutWithMock(self.compute.volume_manager,
'remove_compute_volume')
for v in i_ref['volumes']:
self.compute.volume_manager.remove_compute_volume(c, v['id'])
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(i_ref)
# executing
self.mox.ReplayAll()
ret = self.compute.post_live_migration(c, i_ref, dest)
# make sure every data is rewritten to dest
i_ref = db.instance_get(c, i_ref['id'])
c1 = (i_ref['host'] == dest)
flo_refs = db.floating_ip_get_all_by_host(c, dest)
c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr)
# post operaton
self.assertTrue(c1 and c2)
db.instance_destroy(c, instance_id)
db.volume_destroy(c, v_ref['id'])
db.floating_ip_destroy(c, flo_addr)

View File

@@ -24,18 +24,19 @@ from nova.utils import parse_mailmap, str_dict_replace, synchronized
class ProjectTestCase(test.TestCase):
def test_authors_up_to_date(self):
if os.path.exists('.bzr'):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
if os.path.exists(os.path.join(topdir, '.bzr')):
contributors = set()
mailmap = parse_mailmap('.mailmap')
mailmap = parse_mailmap(os.path.join(topdir, '.mailmap'))
import bzrlib.workingtree
tree = bzrlib.workingtree.WorkingTree.open('.')
tree = bzrlib.workingtree.WorkingTree.open(topdir)
tree.lock_read()
try:
parents = tree.get_parent_ids()
g = tree.branch.repository.get_graph()
for p in parents[1:]:
for p in parents:
rev_ids = [r for r, _ in g.iter_ancestry(parents)
if r != "null:"]
revs = tree.branch.repository.get_revisions(rev_ids)
@@ -44,7 +45,8 @@ class ProjectTestCase(test.TestCase):
email = author.split(' ')[-1]
contributors.add(str_dict_replace(email, mailmap))
authors_file = open('Authors', 'r').read()
authors_file = open(os.path.join(topdir, 'Authors'),
'r').read()
missing = set()
for contributor in contributors:

View File

@@ -20,6 +20,7 @@ Unit Tests for network code
"""
import IPy
import os
import time
from nova import context
from nova import db
@@ -463,6 +464,31 @@ class NetworkTestCase(test.TestCase):
network['id'])
self.assertEqual(ip_count, num_available_ips)
def test_dhcp_lease_output(self):
admin_ctxt = context.get_admin_context()
address = self._create_address(0, self.instance_id)
lease_ip(address)
network_ref = db.network_get_by_instance(admin_ctxt, self.instance_id)
leases = linux_net.get_dhcp_leases(context.get_admin_context(),
network_ref['id'])
for line in leases.split('\n'):
seconds, mac, ip, hostname, client_id = line.split(' ')
self.assertTrue(int(seconds) > time.time(), 'Lease expires in '
'the past')
octets = mac.split(':')
self.assertEqual(len(octets), 6, "Wrong number of octets "
"in %s" % (max,))
for octet in octets:
self.assertEqual(len(octet), 2, "Oddly sized octet: %s"
% (octet,))
# This will throw an exception if the octet is invalid
int(octet, 16)
# And this will raise an exception in case of an invalid IP
IPy.IP(ip)
release_ip(address)
def is_allocated_in_project(address, project_id):
"""Returns true if address is in specified project"""

View File

@@ -20,10 +20,12 @@ Tests For Scheduler
"""
import datetime
import mox
from mox import IgnoreArg
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import service
from nova import test
@@ -32,11 +34,14 @@ from nova import utils
from nova.auth import manager as auth_manager
from nova.scheduler import manager
from nova.scheduler import driver
from nova.compute import power_state
from nova.db.sqlalchemy import models
FLAGS = flags.FLAGS
flags.DECLARE('max_cores', 'nova.scheduler.simple')
flags.DECLARE('stub_network', 'nova.compute.manager')
flags.DECLARE('instances_path', 'nova.compute.manager')
class TestDriver(driver.Scheduler):
@@ -54,6 +59,34 @@ class SchedulerTestCase(test.TestCase):
super(SchedulerTestCase, self).setUp()
self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
def _create_compute_service(self):
"""Create compute-manager(ComputeNode and Service record)."""
ctxt = context.get_admin_context()
dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
'report_count': 0, 'availability_zone': 'dummyzone'}
s_ref = db.service_create(ctxt, dic)
dic = {'service_id': s_ref['id'],
'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
'cpu_info': ''}
db.compute_node_create(ctxt, dic)
return db.service_get(ctxt, s_ref['id'])
def _create_instance(self, **kwargs):
"""Create a test instance"""
ctxt = context.get_admin_context()
inst = {}
inst['user_id'] = 'admin'
inst['project_id'] = kwargs.get('project_id', 'fake')
inst['host'] = kwargs.get('host', 'dummy')
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['memory_mb'] = kwargs.get('memory_mb', 10)
inst['local_gb'] = kwargs.get('local_gb', 20)
return db.instance_create(ctxt, inst)
def test_fallback(self):
scheduler = manager.SchedulerManager()
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
@@ -76,6 +109,73 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
scheduler.named_method(ctxt, 'topic', num=7)
def test_show_host_resources_host_not_exit(self):
"""A host given as an argument does not exists."""
scheduler = manager.SchedulerManager()
dest = 'dummydest'
ctxt = context.get_admin_context()
try:
scheduler.show_host_resources(ctxt, dest)
except exception.NotFound, e:
c1 = (e.message.find(_("does not exist or is not a "
"compute node.")) >= 0)
self.assertTrue(c1)
def _dic_is_equal(self, dic1, dic2, keys=None):
"""Compares 2 dictionary contents(Helper method)"""
if not keys:
keys = ['vcpus', 'memory_mb', 'local_gb',
'vcpus_used', 'memory_mb_used', 'local_gb_used']
for key in keys:
if not (dic1[key] == dic2[key]):
return False
return True
def test_show_host_resources_no_project(self):
"""No instance are running on the given host."""
scheduler = manager.SchedulerManager()
ctxt = context.get_admin_context()
s_ref = self._create_compute_service()
result = scheduler.show_host_resources(ctxt, s_ref['host'])
# result checking
c1 = ('resource' in result and 'usage' in result)
compute_node = s_ref['compute_node'][0]
c2 = self._dic_is_equal(result['resource'], compute_node)
c3 = result['usage'] == {}
self.assertTrue(c1 and c2 and c3)
db.service_destroy(ctxt, s_ref['id'])
def test_show_host_resources_works_correctly(self):
"""Show_host_resources() works correctly as expected."""
scheduler = manager.SchedulerManager()
ctxt = context.get_admin_context()
s_ref = self._create_compute_service()
i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host'])
i_ref2 = self._create_instance(project_id='p-02', vcpus=3,
host=s_ref['host'])
result = scheduler.show_host_resources(ctxt, s_ref['host'])
c1 = ('resource' in result and 'usage' in result)
compute_node = s_ref['compute_node'][0]
c2 = self._dic_is_equal(result['resource'], compute_node)
c3 = result['usage'].keys() == ['p-01', 'p-02']
keys = ['vcpus', 'memory_mb', 'local_gb']
c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys)
c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys)
self.assertTrue(c1 and c2 and c3 and c4 and c5)
db.service_destroy(ctxt, s_ref['id'])
db.instance_destroy(ctxt, i_ref1['id'])
db.instance_destroy(ctxt, i_ref2['id'])
class ZoneSchedulerTestCase(test.TestCase):
"""Test case for zone scheduler"""
@@ -161,9 +261,15 @@ class SimpleDriverTestCase(test.TestCase):
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['ami_launch_index'] = 0
inst['vcpus'] = 1
inst['availability_zone'] = kwargs.get('availability_zone', None)
inst['host'] = kwargs.get('host', 'dummy')
inst['memory_mb'] = kwargs.get('memory_mb', 20)
inst['local_gb'] = kwargs.get('local_gb', 30)
inst['launched_on'] = kwargs.get('launghed_on', 'dummy')
inst['state_description'] = kwargs.get('state_description', 'running')
inst['state'] = kwargs.get('state', power_state.RUNNING)
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
@@ -173,6 +279,211 @@ class SimpleDriverTestCase(test.TestCase):
vol['availability_zone'] = 'test'
return db.volume_create(self.context, vol)['id']
def _create_compute_service(self, **kwargs):
"""Create a compute service."""
dic = {'binary': 'nova-compute', 'topic': 'compute',
'report_count': 0, 'availability_zone': 'dummyzone'}
dic['host'] = kwargs.get('host', 'dummy')
s_ref = db.service_create(self.context, dic)
if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():
t = datetime.datetime.utcnow() - datetime.timedelta(0)
dic['created_at'] = kwargs.get('created_at', t)
dic['updated_at'] = kwargs.get('updated_at', t)
db.service_update(self.context, s_ref['id'], dic)
dic = {'service_id': s_ref['id'],
'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
'vcpus_used': 16, 'local_gb_used': 10,
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
'cpu_info': ''}
dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32)
dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu')
dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003)
db.compute_node_create(self.context, dic)
return db.service_get(self.context, s_ref['id'])
def test_doesnt_report_disabled_hosts_as_up(self):
"""Ensures driver doesn't find hosts before they are enabled"""
# NOTE(vish): constructing service without create method
# because we are going to use it without queue
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
db.service_update(self.context, s2['id'], {'disabled': True})
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(0, len(hosts))
compute1.kill()
compute2.kill()
def test_reports_enabled_hosts_as_up(self):
"""Ensures driver can find the hosts that are up"""
# NOTE(vish): constructing service without create method
# because we are going to use it without queue
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2.start()
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(2, len(hosts))
compute1.kill()
compute2.kill()
def test_least_busy_host_gets_instance(self):
"""Ensures the host with less cores gets the next one"""
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2.start()
instance_id1 = self._create_instance()
compute1.run_instance(self.context, instance_id1)
instance_id2 = self._create_instance()
host = self.scheduler.driver.schedule_run_instance(self.context,
instance_id2)
self.assertEqual(host, 'host2')
compute1.terminate_instance(self.context, instance_id1)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
compute2.kill()
def test_specific_host_gets_instance(self):
"""Ensures if you set availability_zone it launches on that zone"""
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2.start()
instance_id1 = self._create_instance()
compute1.run_instance(self.context, instance_id1)
instance_id2 = self._create_instance(availability_zone='nova:host1')
host = self.scheduler.driver.schedule_run_instance(self.context,
instance_id2)
self.assertEqual('host1', host)
compute1.terminate_instance(self.context, instance_id1)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
compute2.kill()
def test_wont_sechedule_if_specified_host_is_down(self):
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
now = datetime.datetime.utcnow()
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
past = now - delta
db.service_update(self.context, s1['id'], {'updated_at': past})
instance_id2 = self._create_instance(availability_zone='nova:host1')
self.assertRaises(driver.WillNotSchedule,
self.scheduler.driver.schedule_run_instance,
self.context,
instance_id2)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
def test_will_schedule_on_disabled_host_if_specified(self):
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
instance_id2 = self._create_instance(availability_zone='nova:host1')
host = self.scheduler.driver.schedule_run_instance(self.context,
instance_id2)
self.assertEqual('host1', host)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
def test_too_many_cores(self):
"""Ensures we don't go over max cores"""
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2.start()
instance_ids1 = []
instance_ids2 = []
for index in xrange(FLAGS.max_cores):
instance_id = self._create_instance()
compute1.run_instance(self.context, instance_id)
instance_ids1.append(instance_id)
instance_id = self._create_instance()
compute2.run_instance(self.context, instance_id)
instance_ids2.append(instance_id)
instance_id = self._create_instance()
self.assertRaises(driver.NoValidHost,
self.scheduler.driver.schedule_run_instance,
self.context,
instance_id)
for instance_id in instance_ids1:
compute1.terminate_instance(self.context, instance_id)
for instance_id in instance_ids2:
compute2.terminate_instance(self.context, instance_id)
compute1.kill()
compute2.kill()
def test_least_busy_host_gets_volume(self):
"""Ensures the host with less gigabytes gets the next one"""
volume1 = service.Service('host1',
'nova-volume',
'volume',
FLAGS.volume_manager)
volume1.start()
volume2 = service.Service('host2',
'nova-volume',
'volume',
FLAGS.volume_manager)
volume2.start()
volume_id1 = self._create_volume()
volume1.create_volume(self.context, volume_id1)
volume_id2 = self._create_volume()
host = self.scheduler.driver.schedule_create_volume(self.context,
volume_id2)
self.assertEqual(host, 'host2')
volume1.delete_volume(self.context, volume_id1)
db.volume_destroy(self.context, volume_id2)
dic = {'service_id': s_ref['id'],
'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10,
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
'cpu_info': ''}
def test_doesnt_report_disabled_hosts_as_up(self):
"""Ensures driver doesn't find hosts before they are enabled"""
compute1 = self.start_service('compute', host='host1')
@@ -316,3 +627,313 @@ class SimpleDriverTestCase(test.TestCase):
volume2.delete_volume(self.context, volume_id)
volume1.kill()
volume2.kill()
def test_scheduler_live_migration_with_volume(self):
"""scheduler_live_migration() works correctly as expected.
Also, checks instance state is changed from 'running' -> 'migrating'.
"""
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
dic = {'instance_id': instance_id, 'size': 1}
v_ref = db.volume_create(self.context, dic)
# cannot check 2nd argument b/c the addresses of instance object
# is different.
driver_i = self.scheduler.driver
nocare = mox.IgnoreArg()
self.mox.StubOutWithMock(driver_i, '_live_migration_src_check')
self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check')
self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
driver_i._live_migration_src_check(nocare, nocare)
driver_i._live_migration_dest_check(nocare, nocare, i_ref['host'])
driver_i._live_migration_common_check(nocare, nocare, i_ref['host'])
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
kwargs = {'instance_id': instance_id, 'dest': i_ref['host']}
rpc.cast(self.context,
db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']),
{"method": 'live_migration', "args": kwargs})
self.mox.ReplayAll()
self.scheduler.live_migration(self.context, FLAGS.compute_topic,
instance_id=instance_id,
dest=i_ref['host'])
i_ref = db.instance_get(self.context, instance_id)
self.assertTrue(i_ref['state_description'] == 'migrating')
db.instance_destroy(self.context, instance_id)
db.volume_destroy(self.context, v_ref['id'])
def test_live_migration_src_check_instance_not_running(self):
"""The instance given by instance_id is not running."""
instance_id = self._create_instance(state_description='migrating')
i_ref = db.instance_get(self.context, instance_id)
try:
self.scheduler.driver._live_migration_src_check(self.context,
i_ref)
except exception.Invalid, e:
c = (e.message.find('is not running') > 0)
self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
def test_live_migration_src_check_volume_node_not_alive(self):
"""Raise exception when volume node is not alive."""
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
dic = {'instance_id': instance_id, 'size': 1}
v_ref = db.volume_create(self.context, {'instance_id': instance_id,
'size': 1})
t1 = datetime.datetime.utcnow() - datetime.timedelta(1)
dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',
'topic': 'volume', 'report_count': 0}
s_ref = db.service_create(self.context, dic)
try:
self.scheduler.driver.schedule_live_migration(self.context,
instance_id,
i_ref['host'])
except exception.Invalid, e:
c = (e.message.find('volume node is not alive') >= 0)
self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.volume_destroy(self.context, v_ref['id'])
def test_live_migration_src_check_compute_node_not_alive(self):
"""Confirms src-compute node is alive."""
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
t = datetime.datetime.utcnow() - datetime.timedelta(10)
s_ref = self._create_compute_service(created_at=t, updated_at=t,
host=i_ref['host'])
try:
self.scheduler.driver._live_migration_src_check(self.context,
i_ref)
except exception.Invalid, e:
c = (e.message.find('is not alive') >= 0)
self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
def test_live_migration_src_check_works_correctly(self):
"""Confirms this method finishes with no error."""
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
s_ref = self._create_compute_service(host=i_ref['host'])
ret = self.scheduler.driver._live_migration_src_check(self.context,
i_ref)
self.assertTrue(ret == None)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
def test_live_migration_dest_check_not_alive(self):
"""Confirms exception raises in case dest host does not exist."""
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
t = datetime.datetime.utcnow() - datetime.timedelta(10)
s_ref = self._create_compute_service(created_at=t, updated_at=t,
host=i_ref['host'])
try:
self.scheduler.driver._live_migration_dest_check(self.context,
i_ref,
i_ref['host'])
except exception.Invalid, e:
c = (e.message.find('is not alive') >= 0)
self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
def test_live_migration_dest_check_service_same_host(self):
"""Confirms exceptioin raises in case dest and src is same host."""
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
s_ref = self._create_compute_service(host=i_ref['host'])
try:
self.scheduler.driver._live_migration_dest_check(self.context,
i_ref,
i_ref['host'])
except exception.Invalid, e:
c = (e.message.find('choose other host') >= 0)
self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
def test_live_migration_dest_check_service_lack_memory(self):
"""Confirms exception raises when dest doesn't have enough memory."""
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
s_ref = self._create_compute_service(host='somewhere',
memory_mb_used=12)
try:
self.scheduler.driver._live_migration_dest_check(self.context,
i_ref,
'somewhere')
except exception.NotEmpty, e:
c = (e.message.find('Unable to migrate') >= 0)
self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
def test_live_migration_dest_check_service_works_correctly(self):
"""Confirms method finishes with no error."""
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
s_ref = self._create_compute_service(host='somewhere',
memory_mb_used=5)
ret = self.scheduler.driver._live_migration_dest_check(self.context,
i_ref,
'somewhere')
self.assertTrue(ret == None)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
def test_live_migration_common_check_service_orig_not_exists(self):
"""Destination host does not exist."""
dest = 'dummydest'
# mocks for live_migration_common_check()
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
t1 = datetime.datetime.utcnow() - datetime.timedelta(10)
s_ref = self._create_compute_service(created_at=t1, updated_at=t1,
host=dest)
# mocks for mounted_on_same_shared_storage()
fpath = '/test/20110127120000'
self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
topic = FLAGS.compute_topic
driver.rpc.call(mox.IgnoreArg(),
db.queue_get_for(self.context, topic, dest),
{"method": 'create_shared_storage_test_file'}).AndReturn(fpath)
driver.rpc.call(mox.IgnoreArg(),
db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']),
{"method": 'check_shared_storage_test_file',
"args": {'filename': fpath}})
driver.rpc.call(mox.IgnoreArg(),
db.queue_get_for(mox.IgnoreArg(), topic, dest),
{"method": 'cleanup_shared_storage_test_file',
"args": {'filename': fpath}})
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_common_check(self.context,
i_ref,
dest)
except exception.Invalid, e:
c = (e.message.find('does not exist') >= 0)
self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
def test_live_migration_common_check_service_different_hypervisor(self):
"""Original host and dest host has different hypervisor type."""
dest = 'dummydest'
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
# compute service for destination
s_ref = self._create_compute_service(host=i_ref['host'])
# compute service for original host
s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen')
# mocks
driver = self.scheduler.driver
self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_common_check(self.context,
i_ref,
dest)
except exception.Invalid, e:
c = (e.message.find(_('Different hypervisor type')) >= 0)
self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.service_destroy(self.context, s_ref2['id'])
def test_live_migration_common_check_service_different_version(self):
"""Original host and dest host has different hypervisor version."""
dest = 'dummydest'
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
# compute service for destination
s_ref = self._create_compute_service(host=i_ref['host'])
# compute service for original host
s_ref2 = self._create_compute_service(host=dest,
hypervisor_version=12002)
# mocks
driver = self.scheduler.driver
self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_common_check(self.context,
i_ref,
dest)
except exception.Invalid, e:
c = (e.message.find(_('Older hypervisor version')) >= 0)
self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.service_destroy(self.context, s_ref2['id'])
def test_live_migration_common_check_checking_cpuinfo_fail(self):
"""Raise excetion when original host doen't have compatible cpu."""
dest = 'dummydest'
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
# compute service for destination
s_ref = self._create_compute_service(host=i_ref['host'])
# compute service for original host
s_ref2 = self._create_compute_service(host=dest)
# mocks
driver = self.scheduler.driver
self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True)
rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
{"method": 'compare_cpu',
"args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\
AndRaise(rpc.RemoteError("doesn't have compatibility to", "", ""))
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_common_check(self.context,
i_ref,
dest)
except rpc.RemoteError, e:
c = (e.message.find(_("doesn't have compatibility to")) >= 0)
self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.service_destroy(self.context, s_ref2['id'])

View File

@@ -14,22 +14,29 @@
# License for the specific language governing permissions and limitations
# under the License.
import re
import os
import eventlet
import mox
import os
import re
import sys
from xml.etree.ElementTree import fromstring as xml_to_tree
from xml.dom.minidom import parseString as xml_to_dom
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.auth import manager
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.db.sqlalchemy import models
from nova.virt import libvirt_conn
libvirt = None
FLAGS = flags.FLAGS
flags.DECLARE('instances_path', 'nova.compute.manager')
@@ -104,11 +111,28 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_conn._late_load_cheetah()
self.flags(fake_call=True)
self.manager = manager.AuthManager()
try:
pjs = self.manager.get_projects()
pjs = [p for p in pjs if p.name == 'fake']
if 0 != len(pjs):
self.manager.delete_project(pjs[0])
users = self.manager.get_users()
users = [u for u in users if u.name == 'fake']
if 0 != len(users):
self.manager.delete_user(users[0])
except Exception, e:
pass
users = self.manager.get_users()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
self.context = context.get_admin_context()
FLAGS.instances_path = ''
self.call_libvirt_dependant_setup = False
test_ip = '10.11.12.13'
test_instance = {'memory_kb': '1024000',
@@ -120,6 +144,58 @@ class LibvirtConnTestCase(test.TestCase):
'bridge': 'br101',
'instance_type': 'm1.small'}
def lazy_load_library_exists(self):
"""check if libvirt is available."""
# try to connect libvirt. if fail, skip test.
try:
import libvirt
import libxml2
except ImportError:
return False
global libvirt
libvirt = __import__('libvirt')
libvirt_conn.libvirt = __import__('libvirt')
libvirt_conn.libxml2 = __import__('libxml2')
return True
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtConnection(libvirt is not used)."""
# A fake libvirt.virConnect
class FakeLibvirtConnection(object):
pass
# A fake libvirt_conn.IptablesFirewallDriver
class FakeIptablesFirewallDriver(object):
def __init__(self, **kwargs):
pass
def setattr(self, key, val):
self.__setattr__(key, val)
# Creating mocks
fake = FakeLibvirtConnection()
fakeip = FakeIptablesFirewallDriver
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
# Inevitable mocks for libvirt_conn.LibvirtConnection
self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class')
libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
libvirt_conn.LibvirtConnection._conn = fake
def create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0,
'availability_zone': 'zone'}
return db.service_create(context.get_admin_context(), service_ref)
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
@@ -259,8 +335,8 @@ class LibvirtConnTestCase(test.TestCase):
expected_result,
'%s failed common check %d' % (xml, i))
# This test is supposed to make sure we don't override a specifically
# set uri
# This test is supposed to make sure we don't
# override a specifically set uri
#
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
@@ -274,6 +350,150 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(uri, testuri)
db.instance_destroy(user_context, instance_ref['id'])
def test_update_available_resource_works_correctly(self):
"""Confirm compute_node table is updated successfully."""
org_path = FLAGS.instances_path = ''
FLAGS.instances_path = '.'
# Prepare mocks
def getVersion():
return 12003
def getType():
return 'qemu'
def listDomainsID():
return []
service_ref = self.create_service(host='dummy')
self.create_fake_libvirt_mock(getVersion=getVersion,
getType=getType,
listDomainsID=listDomainsID)
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
'get_cpu_info')
libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
# Start test
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
conn.update_available_resource(self.context, 'dummy')
service_ref = db.service_get(self.context, service_ref['id'])
compute_node = service_ref['compute_node'][0]
if sys.platform.upper() == 'LINUX2':
self.assertTrue(compute_node['vcpus'] >= 0)
self.assertTrue(compute_node['memory_mb'] > 0)
self.assertTrue(compute_node['local_gb'] > 0)
self.assertTrue(compute_node['vcpus_used'] == 0)
self.assertTrue(compute_node['memory_mb_used'] > 0)
self.assertTrue(compute_node['local_gb_used'] > 0)
self.assertTrue(len(compute_node['hypervisor_type']) > 0)
self.assertTrue(compute_node['hypervisor_version'] > 0)
else:
self.assertTrue(compute_node['vcpus'] >= 0)
self.assertTrue(compute_node['memory_mb'] == 0)
self.assertTrue(compute_node['local_gb'] > 0)
self.assertTrue(compute_node['vcpus_used'] == 0)
self.assertTrue(compute_node['memory_mb_used'] == 0)
self.assertTrue(compute_node['local_gb_used'] > 0)
self.assertTrue(len(compute_node['hypervisor_type']) > 0)
self.assertTrue(compute_node['hypervisor_version'] > 0)
db.service_destroy(self.context, service_ref['id'])
FLAGS.instances_path = org_path
def test_update_resource_info_no_compute_record_found(self):
"""Raise exception if no recorde found on services table."""
org_path = FLAGS.instances_path = ''
FLAGS.instances_path = '.'
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
self.assertRaises(exception.Invalid,
conn.update_available_resource,
self.context, 'dummy')
FLAGS.instances_path = org_path
def test_ensure_filtering_rules_for_instance_timeout(self):
"""ensure_filtering_fules_for_instance() finishes with timeout."""
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
return
# Preparing mocks
def fake_none(self):
return
def fake_raise(self):
raise libvirt.libvirtError('ERR')
self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise)
instance_ref = db.instance_create(self.context, self.test_instance)
# Start test
self.mox.ReplayAll()
try:
conn = libvirt_conn.LibvirtConnection(False)
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
conn.ensure_filtering_rules_for_instance(instance_ref)
except exception.Error, e:
c1 = (0 <= e.message.find('Timeout migrating for'))
self.assertTrue(c1)
db.instance_destroy(self.context, instance_ref['id'])
def test_live_migration_raises_exception(self):
"""Confirms recover method is called when exceptions are raised."""
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
return
# Preparing data
self.compute = utils.import_object(FLAGS.compute_manager)
instance_dict = {'host': 'fake', 'state': power_state.RUNNING,
'state_description': 'running'}
instance_ref = db.instance_create(self.context, self.test_instance)
instance_ref = db.instance_update(self.context, instance_ref['id'],
instance_dict)
vol_dict = {'status': 'migrating', 'size': 1}
volume_ref = db.volume_create(self.context, vol_dict)
db.volume_attached(self.context, volume_ref['id'], instance_ref['id'],
'/dev/fake')
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest',
mox.IgnoreArg(),
None, FLAGS.live_migration_bandwidth).\
AndRaise(libvirt.libvirtError('ERR'))
def fake_lookup(instance_name):
if instance_name == instance_ref.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
# Start test
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
self.context, instance_ref, 'dest', '',
self.compute.recover_live_migration)
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertTrue(instance_ref['state_description'] == 'running')
self.assertTrue(instance_ref['state'] == power_state.RUNNING)
volume_ref = db.volume_get(self.context, volume_ref['id'])
self.assertTrue(volume_ref['status'] == 'in-use')
db.volume_destroy(self.context, volume_ref['id'])
db.instance_destroy(self.context, instance_ref['id'])
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
@@ -308,7 +528,7 @@ class IptablesFirewallTestCase(test.TestCase):
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]'
':POSTROUTING ACCEPT [5063:386098]',
]
in_filter_rules = [

View File

@@ -20,6 +20,8 @@ Tests for Volume Code.
"""
import cStringIO
from nova import context
from nova import exception
from nova import db
@@ -173,3 +175,196 @@ class VolumeTestCase(test.TestCase):
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
class DriverTestCase(test.TestCase):
"""Base Test class for Drivers."""
driver_name = "nova.volume.driver.FakeAOEDriver"
def setUp(self):
super(DriverTestCase, self).setUp()
self.flags(volume_driver=self.driver_name,
logging_default_format_string="%(message)s")
self.volume = utils.import_object(FLAGS.volume_manager)
self.context = context.get_admin_context()
self.output = ""
def _fake_execute(_command, *_args, **_kwargs):
"""Fake _execute."""
return self.output, None
self.volume.driver._execute = _fake_execute
self.volume.driver._sync_execute = _fake_execute
log = logging.getLogger()
self.stream = cStringIO.StringIO()
log.addHandler(logging.StreamHandler(self.stream))
inst = {}
self.instance_id = db.instance_create(self.context, inst)['id']
def tearDown(self):
super(DriverTestCase, self).tearDown()
def _attach_volume(self):
"""Attach volumes to an instance. This function also sets
a fake log message."""
return []
def _detach_volume(self, volume_id_list):
"""Detach volumes from an instance."""
for volume_id in volume_id_list:
db.volume_detached(self.context, volume_id)
self.volume.delete_volume(self.context, volume_id)
class AOETestCase(DriverTestCase):
"""Test Case for AOEDriver"""
driver_name = "nova.volume.driver.AOEDriver"
def setUp(self):
super(AOETestCase, self).setUp()
def tearDown(self):
super(AOETestCase, self).tearDown()
def _attach_volume(self):
"""Attach volumes to an instance. This function also sets
a fake log message."""
volume_id_list = []
for index in xrange(3):
vol = {}
vol['size'] = 0
volume_id = db.volume_create(self.context,
vol)['id']
self.volume.create_volume(self.context, volume_id)
# each volume has a different mountpoint
mountpoint = "/dev/sd" + chr((ord('b') + index))
db.volume_attached(self.context, volume_id, self.instance_id,
mountpoint)
(shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
volume_id)
self.output += "%s %s eth0 /dev/nova-volumes/vol-foo auto run\n" \
% (shelf_id, blade_id)
volume_id_list.append(volume_id)
return volume_id_list
def test_check_for_export_with_no_volume(self):
"""No log message when no volume is attached to an instance."""
self.stream.truncate(0)
self.volume.check_for_export(self.context, self.instance_id)
self.assertEqual(self.stream.getvalue(), '')
def test_check_for_export_with_all_vblade_processes(self):
"""No log message when all the vblade processes are running."""
volume_id_list = self._attach_volume()
self.stream.truncate(0)
self.volume.check_for_export(self.context, self.instance_id)
self.assertEqual(self.stream.getvalue(), '')
self._detach_volume(volume_id_list)
def test_check_for_export_with_vblade_process_missing(self):
"""Output a warning message when some vblade processes aren't
running."""
volume_id_list = self._attach_volume()
# the first vblade process isn't running
self.output = self.output.replace("run", "down", 1)
(shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
volume_id_list[0])
msg_is_match = False
self.stream.truncate(0)
try:
self.volume.check_for_export(self.context, self.instance_id)
except exception.ProcessExecutionError, e:
volume_id = volume_id_list[0]
msg = _("Cannot confirm exported volume id:%(volume_id)s. "
"vblade process for e%(shelf_id)s.%(blade_id)s "
"isn't running.") % locals()
msg_is_match = (0 <= e.message.find(msg))
self.assertTrue(msg_is_match)
self._detach_volume(volume_id_list)
class ISCSITestCase(DriverTestCase):
"""Test Case for ISCSIDriver"""
driver_name = "nova.volume.driver.ISCSIDriver"
def setUp(self):
super(ISCSITestCase, self).setUp()
def tearDown(self):
super(ISCSITestCase, self).tearDown()
def _attach_volume(self):
"""Attach volumes to an instance. This function also sets
a fake log message."""
volume_id_list = []
for index in xrange(3):
vol = {}
vol['size'] = 0
vol_ref = db.volume_create(self.context, vol)
self.volume.create_volume(self.context, vol_ref['id'])
vol_ref = db.volume_get(self.context, vol_ref['id'])
# each volume has a different mountpoint
mountpoint = "/dev/sd" + chr((ord('b') + index))
db.volume_attached(self.context, vol_ref['id'], self.instance_id,
mountpoint)
volume_id_list.append(vol_ref['id'])
return volume_id_list
def test_check_for_export_with_no_volume(self):
"""No log message when no volume is attached to an instance."""
self.stream.truncate(0)
self.volume.check_for_export(self.context, self.instance_id)
self.assertEqual(self.stream.getvalue(), '')
def test_check_for_export_with_all_volume_exported(self):
"""No log message when all the vblade processes are running."""
volume_id_list = self._attach_volume()
self.mox.StubOutWithMock(self.volume.driver, '_execute')
for i in volume_id_list:
tid = db.volume_get_iscsi_target_num(self.context, i)
self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d"
% locals())
self.stream.truncate(0)
self.mox.ReplayAll()
self.volume.check_for_export(self.context, self.instance_id)
self.assertEqual(self.stream.getvalue(), '')
self.mox.UnsetStubs()
self._detach_volume(volume_id_list)
def test_check_for_export_with_some_volume_missing(self):
"""Output a warning message when some volumes are not recognied
by ietd."""
volume_id_list = self._attach_volume()
# the first vblade process isn't running
tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
self.mox.StubOutWithMock(self.volume.driver, '_execute')
self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d"
% locals()).AndRaise(exception.ProcessExecutionError())
self.mox.ReplayAll()
self.assertRaises(exception.ProcessExecutionError,
self.volume.check_for_export,
self.context,
self.instance_id)
msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
self.assertTrue(0 <= self.stream.getvalue().find(msg))
self.mox.UnsetStubs()
self._detach_volume(volume_id_list)

View File

@@ -18,6 +18,7 @@
Test suite for XenAPI
"""
import functools
import stubout
from nova import db
@@ -41,6 +42,21 @@ from nova.tests.glance import stubs as glance_stubs
FLAGS = flags.FLAGS
def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
"""
vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
orig_with_vdi_attached_here = vm_utils.with_vdi_attached_here
vm_utils.with_vdi_attached_here = lambda *x: should_return
function(self, *args, **kwargs)
vm_utils.with_vdi_attached_here = orig_with_vdi_attached_here
return decorated_function
class XenAPIVolumeTestCase(test.TestCase):
"""
Unit tests for Volume operations
@@ -62,7 +78,7 @@ class XenAPIVolumeTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
}
'os_type': 'linux'}
def _create_volume(self, size='0'):
"""Create a volume object."""
@@ -219,7 +235,7 @@ class XenAPIVMTestCase(test.TestCase):
check()
def check_vm_record(self, conn):
def create_vm_record(self, conn, os_type):
instances = conn.list_instances()
self.assertEquals(instances, [1])
@@ -231,28 +247,63 @@ class XenAPIVMTestCase(test.TestCase):
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn):
# Check that m1.large above turned into the right thing.
instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
self.assertEquals(vm_info['max_mem'], mem_kib)
self.assertEquals(vm_info['mem'], mem_kib)
self.assertEquals(vm['memory_static_max'], mem_bytes)
self.assertEquals(vm['memory_dynamic_max'], mem_bytes)
self.assertEquals(vm['memory_dynamic_min'], mem_bytes)
self.assertEquals(vm['VCPUs_max'], str(vcpus))
self.assertEquals(vm['VCPUs_at_startup'], str(vcpus))
self.assertEquals(self.vm_info['max_mem'], mem_kib)
self.assertEquals(self.vm_info['mem'], mem_kib)
self.assertEquals(self.vm['memory_static_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
# Check that the VM is running according to Nova
self.assertEquals(vm_info['state'], power_state.RUNNING)
self.assertEquals(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEquals(vm['power_state'], 'Running')
self.assertEquals(self.vm['power_state'], 'Running')
def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true')
self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], '')
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEquals(self.vm['PV_kernel'], '')
self.assertNotEquals(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
instance_type="m1.large"):
instance_type="m1.large", os_type="linux"):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
values = {'name': 1,
'id': 1,
@@ -263,10 +314,12 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': ramdisk_id,
'instance_type': instance_type,
'mac_address': 'aa:bb:cc:dd:ee:ff',
}
'os_type': os_type}
conn = xenapi_conn.get_connection(False)
instance = db.instance_create(values)
conn.spawn(instance)
self.create_vm_record(conn, os_type)
self.check_vm_record(conn)
def test_spawn_not_enough_memory(self):
@@ -283,24 +336,37 @@ class XenAPIVMTestCase(test.TestCase):
FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, 2, 3)
@stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
self.check_vm_params_for_linux()
def test_spawn_vhd_glance(self):
def test_spawn_vhd_glance_linux(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_windows(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="windows")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
self.vm_info = None
self.vm = None
self.stubs.UnsetAll()
def _create_instance(self):
@@ -314,7 +380,8 @@ class XenAPIVMTestCase(test.TestCase):
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff'}
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'}
instance = db.instance_create(values)
self.conn.spawn(instance)
return instance
@@ -372,7 +439,8 @@ class XenAPIMigrateInstance(test.TestCase):
'ramdisk_id': None,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
}
'os_type': 'linux'}
stubs.stub_out_migration_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs,
glance_stubs.FakeGlance)
@@ -410,6 +478,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.fake_instance = FakeInstance()
self.fake_instance.id = 42
self.fake_instance.os_type = 'linux'
def assert_disk_type(self, disk_type):
dt = vm_utils.VMHelper.determine_disk_image_type(