Merge request candidate version.
1. ISCSI checker is added 2. pep8 check, etc.
This commit is contained in:
1
.mailmap
1
.mailmap
@@ -33,3 +33,4 @@
|
||||
<corywright@gmail.com> <cory.wright@rackspace.com>
|
||||
<ant@openstack.org> <amesserl@rackspace.com>
|
||||
<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
|
||||
<justin@fathomdb.com> <superstack@superstack.org>
|
||||
|
||||
@@ -513,12 +513,10 @@ class InstanceCommands(object):
|
||||
msg = _('Only KVM is supported for now. Sorry!')
|
||||
raise exception.Error(msg)
|
||||
|
||||
if FLAGS.volume_driver != 'nova.volume.driver.AOEDriver':
|
||||
instance_ref = db.instance_get(ctxt, instance_id)
|
||||
if len(instance_ref['volumes']) != 0:
|
||||
msg = _(("""Volumes attached by ISCSIDriver"""
|
||||
""" are not supported. Sorry!"""))
|
||||
raise exception.Error(msg)
|
||||
if FLAGS.volume_driver != 'nova.volume.driver.AOEDriver' and \
|
||||
FLAGS.volume_driver != 'nova.volume.driver.ISCSIDriver':
|
||||
msg = _("Support only AOEDriver and ISCSIDriver. Sorry!")
|
||||
raise exception.Error(msg)
|
||||
|
||||
rpc.call(ctxt,
|
||||
FLAGS.scheduler_topic,
|
||||
@@ -620,8 +618,9 @@ class ServiceCommands(object):
|
||||
if len(service_refs) <= 0:
|
||||
raise exception.Invalid(_('%s is not compute node.') % host)
|
||||
|
||||
result = rpc.call(ctxt, db.queue_get_for(ctxt, FLAGS.compute_topic, host),
|
||||
{"method": "update_available_resource"})
|
||||
result = rpc.call(ctxt,
|
||||
db.queue_get_for(ctxt, FLAGS.compute_topic, host),
|
||||
{"method": "update_available_resource"})
|
||||
|
||||
|
||||
class LogCommands(object):
|
||||
|
||||
@@ -251,9 +251,6 @@ def msg_reply(msg_id, reply=None, failure=None):
|
||||
try:
|
||||
publisher.send({'result': reply, 'failure': failure})
|
||||
except TypeError:
|
||||
print '>>>>>>>>>>>>>>>>>>'
|
||||
print reply
|
||||
print '>>>>>>>>>>>>>>>>>>'
|
||||
publisher.send(
|
||||
{'result': dict((k, repr(v))
|
||||
for k, v in reply.__dict__.iteritems()),
|
||||
|
||||
@@ -82,6 +82,41 @@ class ComputeTestCase(test.TestCase):
|
||||
'project_id': self.project.id}
|
||||
return db.security_group_create(self.context, values)
|
||||
|
||||
def _get_dummy_instance(self):
|
||||
"""Get mock-return-value instance object
|
||||
Use this when any testcase executed later than test_run_terminate
|
||||
"""
|
||||
vol1 = models.Volume()
|
||||
vol1.__setitem__('id', 1)
|
||||
vol2 = models.Volume()
|
||||
vol2.__setitem__('id', 2)
|
||||
instance_ref = models.Instance()
|
||||
instance_ref.__setitem__('id', 1)
|
||||
instance_ref.__setitem__('volumes', [vol1, vol2])
|
||||
instance_ref.__setitem__('hostname', 'i-00000001')
|
||||
instance_ref.__setitem__('host', 'dummy')
|
||||
return instance_ref
|
||||
|
||||
def test_create_instance_defaults_display_name(self):
|
||||
"""Verify that an instance cannot be created without a display_name."""
|
||||
cases = [dict(), dict(display_name=None)]
|
||||
for instance in cases:
|
||||
ref = self.compute_api.create(self.context,
|
||||
FLAGS.default_instance_type, None, **instance)
|
||||
try:
|
||||
self.assertNotEqual(ref[0]['display_name'], None)
|
||||
finally:
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
|
||||
def test_create_instance_associates_security_groups(self):
|
||||
"""Make sure create associates security groups"""
|
||||
group = self._create_group()
|
||||
instance_ref = models.Instance()
|
||||
instance_ref.__setitem__('id', 1)
|
||||
instance_ref.__setitem__('volumes', [{'id': 1}, {'id': 2}])
|
||||
instance_ref.__setitem__('hostname', 'i-00000001')
|
||||
return instance_ref
|
||||
|
||||
def test_create_instance_defaults_display_name(self):
|
||||
"""Verify that an instance cannot be created without a display_name."""
|
||||
cases = [dict(), dict(display_name=None)]
|
||||
@@ -275,8 +310,7 @@ class ComputeTestCase(test.TestCase):
|
||||
if instances that are intended to be migrated doesnt have fixed_ip
|
||||
(not happens usually), pre_live_migration has to raise Exception.
|
||||
"""
|
||||
instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
|
||||
'hostname':'i-000000001'}
|
||||
instance_ref = self._get_dummy_instance()
|
||||
c = context.get_admin_context()
|
||||
i_id = instance_ref['id']
|
||||
|
||||
@@ -297,10 +331,8 @@ class ComputeTestCase(test.TestCase):
|
||||
called because aoe module should be inserted at destination
|
||||
host. This testcase checks on it.
|
||||
"""
|
||||
instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
|
||||
'hostname':'i-000000001'}
|
||||
i_ref = self._get_dummy_instance()
|
||||
c = context.get_admin_context()
|
||||
i_id=instance_ref['id']
|
||||
|
||||
self._setup_other_managers()
|
||||
dbmock = self.mox.CreateMock(db)
|
||||
@@ -308,13 +340,13 @@ class ComputeTestCase(test.TestCase):
|
||||
netmock = self.mox.CreateMock(self.network_manager)
|
||||
drivermock = self.mox.CreateMock(self.compute_driver)
|
||||
|
||||
dbmock.instance_get(c, i_id).AndReturn(instance_ref)
|
||||
dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy')
|
||||
for i in range(len(instance_ref['volumes'])):
|
||||
vid = instance_ref['volumes'][i]['id']
|
||||
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
|
||||
dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
|
||||
for i in range(len(i_ref['volumes'])):
|
||||
vid = i_ref['volumes'][i]['id']
|
||||
volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
|
||||
netmock.setup_compute_network(c, instance_ref['id'])
|
||||
drivermock.ensure_filtering_rules_for_instance(instance_ref)
|
||||
netmock.setup_compute_network(c, i_ref['id'])
|
||||
drivermock.ensure_filtering_rules_for_instance(i_ref)
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.compute.volume_manager = volmock
|
||||
@@ -322,7 +354,7 @@ class ComputeTestCase(test.TestCase):
|
||||
self.compute.driver = drivermock
|
||||
|
||||
self.mox.ReplayAll()
|
||||
ret = self.compute.pre_live_migration(c, i_id)
|
||||
ret = self.compute.pre_live_migration(c, i_ref['id'])
|
||||
self.assertEqual(ret, None)
|
||||
self.mox.ResetAll()
|
||||
|
||||
@@ -332,28 +364,28 @@ class ComputeTestCase(test.TestCase):
|
||||
because administrator can proove instance conditions before
|
||||
live_migration if any trouble occurs.
|
||||
"""
|
||||
instance_ref={'id':1, 'volumes':[], 'hostname':'i-20000001'}
|
||||
i_ref = self._get_dummy_instance()
|
||||
i_ref.__setitem__('volumes', [])
|
||||
c = context.get_admin_context()
|
||||
i_id = instance_ref['id']
|
||||
|
||||
self._setup_other_managers()
|
||||
dbmock = self.mox.CreateMock(db)
|
||||
netmock = self.mox.CreateMock(self.network_manager)
|
||||
drivermock = self.mox.CreateMock(self.compute_driver)
|
||||
|
||||
dbmock.instance_get(c, i_id).AndReturn(instance_ref)
|
||||
dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy')
|
||||
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
|
||||
dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
|
||||
self.mox.StubOutWithMock(compute_manager.LOG, 'info')
|
||||
compute_manager.LOG.info(_("%s has no volume."), instance_ref['hostname'])
|
||||
netmock.setup_compute_network(c, i_id)
|
||||
drivermock.ensure_filtering_rules_for_instance(instance_ref)
|
||||
compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
|
||||
netmock.setup_compute_network(c, i_ref['id'])
|
||||
drivermock.ensure_filtering_rules_for_instance(i_ref)
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.compute.network_manager = netmock
|
||||
self.compute.driver = drivermock
|
||||
|
||||
self.mox.ReplayAll()
|
||||
ret = self.compute.pre_live_migration(c, i_id)
|
||||
ret = self.compute.pre_live_migration(c, i_ref['id'])
|
||||
self.assertEqual(ret, None)
|
||||
self.mox.ResetAll()
|
||||
|
||||
@@ -363,37 +395,30 @@ class ComputeTestCase(test.TestCase):
|
||||
tries to retry, but raise exception in case of over
|
||||
max_retry_count. this method confirms raising exception.
|
||||
"""
|
||||
|
||||
instance_ref = models.Instance()
|
||||
instance_ref.__setitem__('id', 1)
|
||||
instance_ref.__setitem__('volumes', [])
|
||||
instance_ref.__setitem__('hostname', 'i-ec2id')
|
||||
|
||||
i_ref = self._get_dummy_instance()
|
||||
c = context.get_admin_context()
|
||||
i_id = instance_ref['id']
|
||||
|
||||
self._setup_other_managers()
|
||||
dbmock = self.mox.CreateMock(db)
|
||||
netmock = self.mox.CreateMock(self.network_manager)
|
||||
drivermock = self.mox.CreateMock(self.compute_driver)
|
||||
|
||||
dbmock.instance_get(c, i_id).AndReturn(instance_ref)
|
||||
dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy')
|
||||
self.mox.StubOutWithMock(compute_manager.LOG, 'info')
|
||||
compute_manager.LOG.info(_("%s has no volume."), instance_ref['hostname'])
|
||||
volmock = self.mox.CreateMock(self.volume_manager)
|
||||
|
||||
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
|
||||
dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
|
||||
for i in range(len(i_ref['volumes'])):
|
||||
volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
|
||||
for i in range(FLAGS.live_migration_retry_count):
|
||||
netmock.setup_compute_network(c, i_id).\
|
||||
netmock.setup_compute_network(c, i_ref['id']).\
|
||||
AndRaise(exception.ProcessExecutionError())
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.compute.network_manager = netmock
|
||||
self.compute.driver = drivermock
|
||||
self.compute.volume_manager = volmock
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.ProcessExecutionError,
|
||||
self.compute.pre_live_migration,
|
||||
c, i_id)
|
||||
c, i_ref['id'])
|
||||
self.mox.ResetAll()
|
||||
|
||||
def test_live_migration_instance_has_volume(self):
|
||||
@@ -402,135 +427,231 @@ class ComputeTestCase(test.TestCase):
|
||||
And that is checked by check_for_export().
|
||||
This testcase confirms check_for_export() is called.
|
||||
"""
|
||||
instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], 'hostname':'i-00000001'}
|
||||
i_ref = self._get_dummy_instance()
|
||||
c = context.get_admin_context()
|
||||
dest='dummydest'
|
||||
i_id = instance_ref['id']
|
||||
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
|
||||
|
||||
self._setup_other_managers()
|
||||
dbmock = self.mox.CreateMock(db)
|
||||
drivermock = self.mox.CreateMock(self.compute_driver)
|
||||
|
||||
dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref)
|
||||
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
|
||||
self.mox.StubOutWithMock(rpc, 'call')
|
||||
rpc.call(c, FLAGS.volume_topic,
|
||||
{"method": "check_for_export",
|
||||
"args": {'instance_id': i_id}}).InAnyOrder('g1')
|
||||
rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
|
||||
{"method": "pre_live_migration",
|
||||
"args": {'instance_id': i_id}}).InAnyOrder('g1')
|
||||
rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
|
||||
"args": {'instance_id': i_ref['id']}})
|
||||
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
|
||||
AndReturn(topic)
|
||||
rpc.call(c, topic, {"method": "pre_live_migration",
|
||||
"args": {'instance_id': i_ref['id']}})
|
||||
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
|
||||
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
|
||||
self.compute.post_live_migration,
|
||||
self.compute.recover_live_migration)
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.compute.driver = drivermock
|
||||
self.mox.ReplayAll()
|
||||
ret = self.compute.live_migration(c, i_id, dest)
|
||||
ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
|
||||
self.assertEqual(ret, None)
|
||||
self.mox.ResetAll()
|
||||
|
||||
def test_live_migration_instance_has_volume_and_exception(self):
|
||||
"""In addition to test_live_migration_instance_has_volume testcase,
|
||||
this testcase confirms if any exception raises from check_for_export().
|
||||
Then, valid seaquence of this method should recovering instance/volumes
|
||||
status(ex. instance['state_description'] is changed from 'migrating'
|
||||
-> 'running', was changed by scheduler)
|
||||
this testcase confirms if any exception raises from
|
||||
check_for_export(). Then, valid seaquence of this method should
|
||||
recovering instance/volumes status(ex. instance['state_description']
|
||||
is changed from 'migrating' -> 'running', was changed by scheduler)
|
||||
"""
|
||||
instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
|
||||
'hostname':'i-000000001'}
|
||||
dest='dummydest'
|
||||
i_ref = self._get_dummy_instance()
|
||||
c = context.get_admin_context()
|
||||
i_id = instance_ref['id']
|
||||
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
|
||||
|
||||
self._setup_other_managers()
|
||||
dbmock = self.mox.CreateMock(db)
|
||||
drivermock = self.mox.CreateMock(self.compute_driver)
|
||||
|
||||
dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref)
|
||||
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
|
||||
self.mox.StubOutWithMock(rpc, 'call')
|
||||
rpc.call(c, FLAGS.volume_topic,
|
||||
{"method": "check_for_export",
|
||||
"args": {'instance_id': i_id}}).InAnyOrder('g1')
|
||||
compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest)
|
||||
dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic)
|
||||
rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
|
||||
{"method": "pre_live_migration",
|
||||
"args": {'instance_id': i_id}}).\
|
||||
InAnyOrder('g1').AndRaise(rpc.RemoteError('', '', ''))
|
||||
#self.mox.StubOutWithMock(compute_manager.LOG, 'error')
|
||||
#compute_manager.LOG.error('Pre live migration for %s failed at %s',
|
||||
# instance_ref['hostname'], dest)
|
||||
dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running')
|
||||
for i in range(len(instance_ref['volumes'])):
|
||||
vid = instance_ref['volumes'][i]['id']
|
||||
dbmock.volume_update(c, vid, {'status': 'in-use'})
|
||||
rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
|
||||
"args": {'instance_id': i_ref['id']}})
|
||||
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
|
||||
AndReturn(topic)
|
||||
rpc.call(c, topic, {"method": "pre_live_migration",
|
||||
"args": {'instance_id': i_ref['id']}}).\
|
||||
AndRaise(rpc.RemoteError('', '', ''))
|
||||
dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
|
||||
'state': power_state.RUNNING,
|
||||
'host': i_ref['host']})
|
||||
for v in i_ref['volumes']:
|
||||
dbmock.volume_update(c, v['id'], {'status': 'in-use'})
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.compute.driver = drivermock
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(rpc.RemoteError,
|
||||
self.compute.live_migration,
|
||||
c, i_id, dest)
|
||||
c, i_ref['id'], i_ref['host'])
|
||||
self.mox.ResetAll()
|
||||
|
||||
def test_live_migration_instance_has_no_volume_and_exception(self):
|
||||
"""Simpler than test_live_migration_instance_has_volume_and_exception"""
|
||||
|
||||
instance_ref={'id':1, 'volumes':[], 'hostname':'i-000000001'}
|
||||
dest='dummydest'
|
||||
"""Simpler than
|
||||
test_live_migration_instance_has_volume_and_exception
|
||||
"""
|
||||
i_ref = self._get_dummy_instance()
|
||||
i_ref.__setitem__('volumes', [])
|
||||
c = context.get_admin_context()
|
||||
i_id = instance_ref['id']
|
||||
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
|
||||
|
||||
self._setup_other_managers()
|
||||
dbmock = self.mox.CreateMock(db)
|
||||
drivermock = self.mox.CreateMock(self.compute_driver)
|
||||
|
||||
dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref)
|
||||
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
|
||||
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
|
||||
AndReturn(topic)
|
||||
self.mox.StubOutWithMock(rpc, 'call')
|
||||
compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest)
|
||||
dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic)
|
||||
rpc.call(c, compute_topic,
|
||||
{"method": "pre_live_migration",
|
||||
"args": {'instance_id': i_id}}).\
|
||||
AndRaise(rpc.RemoteError('', '', ''))
|
||||
#self.mox.StubOutWithMock(compute_manager.LOG, 'error')
|
||||
#compute_manager.LOG.error('Pre live migration for %s failed at %s',
|
||||
# instance_ref['hostname'], dest)
|
||||
dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running')
|
||||
rpc.call(c, topic, {"method": "pre_live_migration",
|
||||
"args": {'instance_id': i_ref['id']}}).\
|
||||
AndRaise(rpc.RemoteError('', '', ''))
|
||||
dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
|
||||
'state': power_state.RUNNING,
|
||||
'host': i_ref['host']})
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.compute.driver = drivermock
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(rpc.RemoteError,
|
||||
self.compute.live_migration,
|
||||
c, i_id, dest)
|
||||
c, i_ref['id'], i_ref['host'])
|
||||
self.mox.ResetAll()
|
||||
|
||||
def test_live_migration_instance_has_volume(self):
|
||||
"""Simpler version than test_live_migration_instance_has_volume."""
|
||||
instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
|
||||
'hostname':'i-000000001'}
|
||||
def test_live_migration_instance_has_no_volume(self):
|
||||
"""Simpler than test_live_migration_instance_has_volume."""
|
||||
i_ref = self._get_dummy_instance()
|
||||
i_ref.__setitem__('volumes', [])
|
||||
c = context.get_admin_context()
|
||||
dest='dummydest'
|
||||
i_id = instance_ref['id']
|
||||
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
|
||||
|
||||
self._setup_other_managers()
|
||||
dbmock = self.mox.CreateMock(db)
|
||||
drivermock = self.mox.CreateMock(self.compute_driver)
|
||||
|
||||
dbmock.instance_get(c, i_id).AndReturn(instance_ref)
|
||||
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
|
||||
self.mox.StubOutWithMock(rpc, 'call')
|
||||
rpc.call(c, FLAGS.volume_topic,
|
||||
{"method": "check_for_export",
|
||||
"args": {'instance_id': i_id}}).InAnyOrder('g1')
|
||||
compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest)
|
||||
dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic)
|
||||
rpc.call(c, compute_topic,
|
||||
{"method": "pre_live_migration",
|
||||
"args": {'instance_id': i_id}}).InAnyOrder('g1')
|
||||
drivermock.live_migration(c, instance_ref, dest)
|
||||
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
|
||||
AndReturn(topic)
|
||||
rpc.call(c, topic, {"method": "pre_live_migration",
|
||||
"args": {'instance_id': i_ref['id']}})
|
||||
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
|
||||
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
|
||||
self.compute.post_live_migration,
|
||||
self.compute.recover_live_migration)
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.compute.driver = drivermock
|
||||
self.mox.ReplayAll()
|
||||
ret = self.compute.live_migration(c, i_id, dest)
|
||||
ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
|
||||
self.assertEqual(ret, None)
|
||||
self.mox.ResetAll()
|
||||
|
||||
def test_post_live_migration_working_correctly(self):
|
||||
"""post_live_migration works as expected correctly """
|
||||
|
||||
i_ref = self._get_dummy_instance()
|
||||
fixed_ip_ref = {'id': 1, 'address': '1.1.1.1'}
|
||||
floating_ip_ref = {'id': 1, 'address': '2.2.2.2'}
|
||||
c = context.get_admin_context()
|
||||
|
||||
dbmock = self.mox.CreateMock(db)
|
||||
dbmock.volume_get_all_by_instance(c, i_ref['id']).\
|
||||
AndReturn(i_ref['volumes'])
|
||||
self.mox.StubOutWithMock(self.compute.volume_manager,
|
||||
'remove_compute_volume')
|
||||
for v in i_ref['volumes']:
|
||||
self.compute.volume_manager.remove_compute_volume(c, v['id'])
|
||||
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
|
||||
self.compute.driver.unfilter_instance(i_ref)
|
||||
|
||||
fixed_ip = fixed_ip_ref['address']
|
||||
dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn(fixed_ip)
|
||||
dbmock.fixed_ip_update(c, fixed_ip, {'host': i_ref['host']})
|
||||
|
||||
fl_ip = floating_ip_ref['address']
|
||||
dbmock.instance_get_floating_address(c, i_ref['id']).AndReturn(fl_ip)
|
||||
dbmock.floating_ip_get_by_address(c, fl_ip).AndReturn(floating_ip_ref)
|
||||
dbmock.floating_ip_update(c, floating_ip_ref['address'],
|
||||
{'host': i_ref['host']})
|
||||
dbmock.instance_update(c, i_ref['id'],
|
||||
{'state_description': 'running',
|
||||
'state': power_state.RUNNING,
|
||||
'host': i_ref['host']})
|
||||
for v in i_ref['volumes']:
|
||||
dbmock.volume_update(c, v['id'], {'status': 'in-use'})
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.mox.ReplayAll()
|
||||
ret = self.compute.post_live_migration(c, i_ref, i_ref['host'])
|
||||
self.assertEqual(ret, None)
|
||||
self.mox.ResetAll()
|
||||
|
||||
def test_post_live_migration_no_floating_ip(self):
|
||||
"""
|
||||
post_live_migration works as expected correctly
|
||||
(in case instance doesnt have floaitng ip)
|
||||
"""
|
||||
i_ref = self._get_dummy_instance()
|
||||
i_ref.__setitem__('volumes', [])
|
||||
fixed_ip_ref = {'id': 1, 'address': '1.1.1.1'}
|
||||
floating_ip_ref = {'id': 1, 'address': '1.1.1.1'}
|
||||
c = context.get_admin_context()
|
||||
|
||||
dbmock = self.mox.CreateMock(db)
|
||||
dbmock.volume_get_all_by_instance(c, i_ref['id']).AndReturn([])
|
||||
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
|
||||
self.compute.driver.unfilter_instance(i_ref)
|
||||
|
||||
fixed_ip = fixed_ip_ref['address']
|
||||
dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn(fixed_ip)
|
||||
dbmock.fixed_ip_update(c, fixed_ip, {'host': i_ref['host']})
|
||||
|
||||
dbmock.instance_get_floating_address(c, i_ref['id']).AndReturn(None)
|
||||
dbmock.instance_update(c, i_ref['id'],
|
||||
{'state_description': 'running',
|
||||
'state': power_state.RUNNING,
|
||||
'host': i_ref['host']})
|
||||
for v in i_ref['volumes']:
|
||||
dbmock.volume_update(c, v['id'], {'status': 'in-use'})
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.mox.ReplayAll()
|
||||
ret = self.compute.post_live_migration(c, i_ref, i_ref['host'])
|
||||
self.assertEqual(ret, None)
|
||||
self.mox.ResetAll()
|
||||
|
||||
def test_post_live_migration_no_floating_ip_with_exception(self):
|
||||
"""
|
||||
post_live_migration works as expected correctly
|
||||
(in case instance doesnt have floaitng ip, and raise exception)
|
||||
"""
|
||||
i_ref = self._get_dummy_instance()
|
||||
i_ref.__setitem__('volumes', [])
|
||||
fixed_ip_ref = {'id': 1, 'address': '1.1.1.1'}
|
||||
floating_ip_ref = {'id': 1, 'address': '1.1.1.1'}
|
||||
c = context.get_admin_context()
|
||||
|
||||
dbmock = self.mox.CreateMock(db)
|
||||
dbmock.volume_get_all_by_instance(c, i_ref['id']).AndReturn([])
|
||||
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
|
||||
self.compute.driver.unfilter_instance(i_ref)
|
||||
|
||||
fixed_ip = fixed_ip_ref['address']
|
||||
dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn(fixed_ip)
|
||||
dbmock.fixed_ip_update(c, fixed_ip, {'host': i_ref['host']})
|
||||
dbmock.instance_get_floating_address(c, i_ref['id']).\
|
||||
AndRaise(exception.NotFound())
|
||||
|
||||
self.mox.StubOutWithMock(compute_manager.LOG, 'info')
|
||||
compute_manager.LOG.info(_('post_live_migration() is started..'))
|
||||
compute_manager.LOG.info(_('floating_ip is not found for %s'),
|
||||
i_ref.name)
|
||||
# first 2 messages are checked.
|
||||
compute_manager.LOG.info(mox.IgnoreArg())
|
||||
compute_manager.LOG.info(mox.IgnoreArg())
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_update')
|
||||
dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
|
||||
'state': power_state.RUNNING,
|
||||
'host': i_ref['host']})
|
||||
self.mox.StubOutWithMock(db, 'volume_update')
|
||||
for v in i_ref['volumes']:
|
||||
dbmock.volume_update(c, v['id'], {'status': 'in-use'})
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.mox.ReplayAll()
|
||||
ret = self.compute.post_live_migration(c, i_ref, i_ref['host'])
|
||||
self.assertEqual(ret, None)
|
||||
self.mox.ResetAll()
|
||||
|
||||
@@ -95,13 +95,13 @@ class SchedulerTestCase(test.TestCase):
|
||||
AndReturn([])
|
||||
|
||||
self.mox.ReplayAll()
|
||||
result = scheduler.show_host_resource(ctxt, dest)
|
||||
result = scheduler.show_host_resource(ctxt, dest)
|
||||
# ret should be dict
|
||||
keys = ['ret', 'msg']
|
||||
c1 = list(set(result.keys())) == list(set(keys))
|
||||
c2 = not result['ret']
|
||||
c3 = result['msg'].find('No such Host or not compute node') <= 0
|
||||
self.assertTrue( c1 and c2 and c3)
|
||||
self.assertTrue(c1 and c2 and c3)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
def test_show_host_resource_no_project(self):
|
||||
@@ -112,9 +112,9 @@ class SchedulerTestCase(test.TestCase):
|
||||
scheduler = manager.SchedulerManager()
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100,
|
||||
'vcpus_used':16, 'memory_mb_used':32, 'local_gb_used':10}
|
||||
service_ref = {'id':1, 'host':dest}
|
||||
r0 = {'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
|
||||
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10}
|
||||
service_ref = {'id': 1, 'host': dest}
|
||||
service_ref.update(r0)
|
||||
|
||||
self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True)
|
||||
@@ -124,14 +124,14 @@ class SchedulerTestCase(test.TestCase):
|
||||
AndReturn([])
|
||||
|
||||
self.mox.ReplayAll()
|
||||
result = scheduler.show_host_resource(ctxt, dest)
|
||||
result = scheduler.show_host_resource(ctxt, dest)
|
||||
# ret should be dict
|
||||
keys = ['ret', 'phy_resource', 'usage']
|
||||
c1 = list(set(result.keys())) == list(set(keys))
|
||||
c2 = result['ret']
|
||||
c3 = result['phy_resource'] == r0
|
||||
c4 = result['usage'] == {}
|
||||
self.assertTrue( c1 and c2 and c3 and c4)
|
||||
self.assertTrue(c1 and c2 and c3 and c4)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
def test_show_host_resource_works_correctly(self):
|
||||
@@ -142,15 +142,15 @@ class SchedulerTestCase(test.TestCase):
|
||||
scheduler = manager.SchedulerManager()
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100,
|
||||
'vcpus_used':16, 'memory_mb_used':32, 'local_gb_used':10}
|
||||
r1 = {'vcpus':10, 'memory_mb':4, 'local_gb':20}
|
||||
r2 = {'vcpus':10, 'memory_mb':20, 'local_gb':30}
|
||||
service_ref = {'id':1, 'host':dest}
|
||||
r0 = {'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
|
||||
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10}
|
||||
r1 = {'vcpus': 10, 'memory_mb': 4, 'local_gb': 20}
|
||||
r2 = {'vcpus': 10, 'memory_mb': 20, 'local_gb': 30}
|
||||
service_ref = {'id': 1, 'host': dest}
|
||||
service_ref.update(r0)
|
||||
instance_ref2 = {'id':2, 'project_id':'p-01', 'host':'dummy'}
|
||||
instance_ref2 = {'id': 2, 'project_id': 'p-01', 'host': 'dummy'}
|
||||
instance_ref2.update(r1)
|
||||
instance_ref3 = {'id':3, 'project_id':'p-02', 'host':'dummy'}
|
||||
instance_ref3 = {'id': 3, 'project_id': 'p-02', 'host': 'dummy'}
|
||||
instance_ref3.update(r2)
|
||||
|
||||
self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True)
|
||||
@@ -167,7 +167,7 @@ class SchedulerTestCase(test.TestCase):
|
||||
ctxt, dest, p).AndReturn(r2['local_gb'])
|
||||
|
||||
self.mox.ReplayAll()
|
||||
result = scheduler.show_host_resource(ctxt, dest)
|
||||
result = scheduler.show_host_resource(ctxt, dest)
|
||||
# ret should be dict
|
||||
keys = ['ret', 'phy_resource', 'usage']
|
||||
c1 = list(set(result.keys())) == list(set(keys))
|
||||
@@ -176,7 +176,7 @@ class SchedulerTestCase(test.TestCase):
|
||||
c4 = result['usage'].keys() == ['p-01', 'p-02']
|
||||
c5 = result['usage']['p-01'] == r2
|
||||
c6 = result['usage']['p-02'] == r2
|
||||
self.assertTrue( c1 and c2 and c3 and c4 and c5 and c6)
|
||||
self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
|
||||
@@ -498,8 +498,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
driver_i = self.scheduler.driver
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-00000001', 'host':'dummy',
|
||||
'volumes':[{'id':1}, {'id':2}]}
|
||||
i_ref = {'id': 1, 'hostname': 'i-00000001', 'host': 'dummy',
|
||||
'volumes': [{'id': 1}, {'id': 2}]}
|
||||
dest = 'dummydest'
|
||||
|
||||
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
|
||||
@@ -518,10 +518,9 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
driver.db.volume_update(mox.IgnoreArg(), v['id'],
|
||||
{'status': 'migrating'})
|
||||
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
|
||||
kwargs={'instance_id':i_ref['id'], 'dest':dest}
|
||||
kwargs = {'instance_id': i_ref['id'], 'dest': dest}
|
||||
rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']),
|
||||
{"method": 'live_migration',
|
||||
"args": kwargs})
|
||||
{"method": 'live_migration', "args": kwargs})
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.scheduler.live_migration(ctxt, topic,
|
||||
@@ -538,7 +537,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
driver_i = self.scheduler.driver
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[]}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', 'volumes': []}
|
||||
dest = 'dummydest'
|
||||
|
||||
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
|
||||
@@ -553,10 +552,9 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
driver.db.instance_set_state(mox.IgnoreArg(), i_ref['id'],
|
||||
power_state.PAUSED, 'migrating')
|
||||
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
|
||||
kwargs={'instance_id':i_ref['id'], 'dest':dest}
|
||||
kwargs = {'instance_id': i_ref['id'], 'dest': dest}
|
||||
rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']),
|
||||
{"method": 'live_migration',
|
||||
"args": kwargs})
|
||||
{"method": 'live_migration', "args": kwargs})
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.scheduler.live_migration(ctxt, topic,
|
||||
@@ -571,9 +569,9 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
dest = 'dummydest'
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy',
|
||||
'volumes':[], 'state_description':'migrating',
|
||||
'state':power_state.RUNNING}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy',
|
||||
'volumes': [], 'state_description': 'migrating',
|
||||
'state': power_state.RUNNING}
|
||||
|
||||
self.mox.ReplayAll()
|
||||
try:
|
||||
@@ -591,9 +589,9 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy',
|
||||
'volumes':[{'id':1}, {'id':2}],
|
||||
'state_description':'running', 'state':power_state.RUNNING}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy',
|
||||
'volumes': [{'id': 1}, {'id': 2}],
|
||||
'state_description': 'running', 'state': power_state.RUNNING}
|
||||
|
||||
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
|
||||
driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'volume').\
|
||||
@@ -614,8 +612,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[],
|
||||
'state_description':'running', 'state':power_state.RUNNING}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', 'volumes': [],
|
||||
'state_description': 'running', 'state': power_state.RUNNING}
|
||||
|
||||
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
|
||||
driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'compute').\
|
||||
@@ -637,8 +635,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[],
|
||||
'state_description':'running', 'state':power_state.RUNNING}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', 'volumes': [],
|
||||
'state_description': 'running', 'state': power_state.RUNNING}
|
||||
service_ref = models.Service()
|
||||
service_ref.__setitem__('id', 1)
|
||||
service_ref.__setitem__('host', i_ref['host'])
|
||||
@@ -663,7 +661,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
|
||||
service_ref = models.Service()
|
||||
service_ref.__setitem__('id', 1)
|
||||
service_ref.__setitem__('host', i_ref['host'])
|
||||
@@ -688,7 +686,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
|
||||
service_ref = models.Service()
|
||||
service_ref.__setitem__('id', 1)
|
||||
service_ref.__setitem__('host', i_ref['host'])
|
||||
@@ -713,7 +711,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
|
||||
service_ref = models.Service()
|
||||
service_ref.__setitem__('id', 1)
|
||||
service_ref.__setitem__('host', i_ref['host'])
|
||||
@@ -740,7 +738,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummydest'}
|
||||
service_ref = models.Service()
|
||||
service_ref.__setitem__('id', 1)
|
||||
service_ref.__setitem__('host', i_ref['host'])
|
||||
@@ -756,7 +754,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
try:
|
||||
self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest)
|
||||
except exception.Invalid, e:
|
||||
self.assertTrue(e.message.find('is running now. choose other host') >= 0)
|
||||
msg = 'is running now. choose other host'
|
||||
self.assertTrue(e.message.find(msg) >= 0)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
def test_live_migraiton_dest_check_service_works_correctly(self):
|
||||
@@ -767,7 +766,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummydest'}
|
||||
service_ref = models.Service()
|
||||
service_ref.__setitem__('id', 1)
|
||||
service_ref.__setitem__('host', i_ref['host'])
|
||||
@@ -785,7 +784,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
try:
|
||||
self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest)
|
||||
except exception.Invalid, e:
|
||||
self.assertTrue(e.message.find('is running now. choose other host') >= 0)
|
||||
msg = 'is running now. choose other host'
|
||||
self.assertTrue(e.message.find(msg) >= 0)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
def test_live_migraiton_common_check_service_dest_not_exists(self):
|
||||
@@ -796,7 +796,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
|
||||
driver_i = self.scheduler.driver
|
||||
|
||||
self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage')
|
||||
@@ -823,7 +823,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
driver_i = self.scheduler.driver
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'launched_on':'h1'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01',
|
||||
'host': 'dummy', 'launched_on': 'h1'}
|
||||
service_ref = models.Service()
|
||||
service_ref.__setitem__('id', 1)
|
||||
service_ref.__setitem__('topic', 'compute')
|
||||
@@ -857,8 +858,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
driver_i = self.scheduler.driver
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01',
|
||||
'host':'dummy', 'launched_on':'h1'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01',
|
||||
'host': 'dummy', 'launched_on': 'h1'}
|
||||
service_ref = models.Service()
|
||||
service_ref.__setitem__('id', 1)
|
||||
service_ref.__setitem__('topic', 'compute')
|
||||
@@ -895,8 +896,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
driver_i = self.scheduler.driver
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01',
|
||||
'host':'dummy', 'launched_on':'h1'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01',
|
||||
'host': 'dummy', 'launched_on': 'h1'}
|
||||
service_ref = models.Service()
|
||||
service_ref.__setitem__('id', 1)
|
||||
service_ref.__setitem__('topic', 'compute')
|
||||
@@ -933,8 +934,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
driver_i = self.scheduler.driver
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01',
|
||||
'host':'dummy', 'launched_on':'h1'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01',
|
||||
'host': 'dummy', 'launched_on': 'h1'}
|
||||
service_ref = models.Service()
|
||||
service_ref.__setitem__('id', 1)
|
||||
service_ref.__setitem__('topic', 'compute')
|
||||
@@ -978,8 +979,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
driver_i = self.scheduler.driver
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
i_ref = {'id':1, 'hostname':'i-01',
|
||||
'host':'dummy', 'launched_on':'h1'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01',
|
||||
'host': 'dummy', 'launched_on': 'h1'}
|
||||
service_ref = models.Service()
|
||||
service_ref.__setitem__('id', 1)
|
||||
service_ref.__setitem__('topic', 'compute')
|
||||
@@ -1018,9 +1019,10 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
service_ref = {'id':1, 'memory_mb':32, 'memory_mb_used':12, 'local_gb':100}
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy',
|
||||
'vcpus':5, 'memory_mb':20, 'local_gb':10}
|
||||
service_ref = {'id': 1, 'memory_mb': 32,
|
||||
'memory_mb_used': 12, 'local_gb': 100}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy',
|
||||
'vcpus': 5, 'memory_mb': 20, 'local_gb': 10}
|
||||
|
||||
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
|
||||
driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
|
||||
@@ -1043,9 +1045,9 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
service_ref = {'id':1, 'memory_mb':120, 'memory_mb_used':32}
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy',
|
||||
'vcpus':5, 'memory_mb':8, 'local_gb':10}
|
||||
service_ref = {'id': 1, 'memory_mb': 120, 'memory_mb_used': 32}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy',
|
||||
'vcpus': 5, 'memory_mb': 8, 'local_gb': 10}
|
||||
|
||||
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
|
||||
driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
|
||||
@@ -1066,7 +1068,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
fpath = '/test/20110127120000'
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
|
||||
|
||||
self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
|
||||
driver.rpc.call(mox.IgnoreArg(),
|
||||
@@ -1092,7 +1094,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
fpath = '/test/20110127120000'
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
|
||||
|
||||
self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
|
||||
driver.rpc.call(mox.IgnoreArg(),
|
||||
@@ -1100,8 +1102,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
{"method": 'mktmpfile'}).AndReturn(fpath)
|
||||
driver.rpc.call(mox.IgnoreArg(),
|
||||
db.queue_get_for(ctxt, FLAGS.compute_topic, i_ref['host']),
|
||||
{"method": 'confirm_tmpfile', "args":{'path':fpath}}).\
|
||||
AndRaise(rpc.RemoteError('','',''))
|
||||
{"method": 'confirm_tmpfile', "args": {'path': fpath}}).\
|
||||
AndRaise(rpc.RemoteError('', '', ''))
|
||||
self.mox.StubOutWithMock(driver.logging, 'error')
|
||||
msg = _("Cannot create tmpfile at %s to confirm shared storage.")
|
||||
driver.logging.error(msg % FLAGS.instances_path)
|
||||
@@ -1112,7 +1114,6 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
ctxt, i_ref, dest)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
|
||||
def test_mounted_on_same_shared_storage_works_correctly(self):
|
||||
"""
|
||||
A testcase of driver.mounted_on_same_shared_storage
|
||||
@@ -1122,15 +1123,17 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
ctxt = context.get_admin_context()
|
||||
topic = FLAGS.compute_topic
|
||||
fpath = '/test/20110127120000'
|
||||
i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
|
||||
i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
|
||||
|
||||
self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
|
||||
driver.rpc.call(mox.IgnoreArg(),
|
||||
db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest),
|
||||
{"method": 'mktmpfile'}).AndReturn(fpath)
|
||||
driver.rpc.call(mox.IgnoreArg(),
|
||||
db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, i_ref['host']),
|
||||
{"method": 'confirm_tmpfile', "args":{'path':fpath}})
|
||||
db.queue_get_for(mox.IgnoreArg(),
|
||||
FLAGS.compute_topic,
|
||||
i_ref['host']),
|
||||
{"method": 'confirm_tmpfile', "args": {'path': fpath}})
|
||||
|
||||
self.mox.ReplayAll()
|
||||
ret = self.scheduler.driver.mounted_on_same_shared_storage(ctxt,
|
||||
|
||||
@@ -38,6 +38,7 @@ flags.DECLARE('instances_path', 'nova.compute.manager')
|
||||
libvirt = None
|
||||
libxml2 = None
|
||||
|
||||
|
||||
class LibvirtConnTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(LibvirtConnTestCase, self).setUp()
|
||||
@@ -81,7 +82,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
Call this method at the top of each testcase method,
|
||||
if the testcase is necessary libvirt and cheetah.
|
||||
"""
|
||||
try :
|
||||
try:
|
||||
global libvirt
|
||||
global libxml2
|
||||
libvirt_conn.libvirt = __import__('libvirt')
|
||||
@@ -99,7 +100,8 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
fwmock = self.mox.CreateMock(obj)
|
||||
self.mox.StubOutWithMock(libvirt_conn, 'utils',
|
||||
use_mock_anything=True)
|
||||
libvirt_conn.utils.import_object(FLAGS.firewall_driver).AndReturn(fwmock)
|
||||
libvirt_conn.utils.import_object(FLAGS.firewall_driver).\
|
||||
AndReturn(fwmock)
|
||||
return fwmock
|
||||
|
||||
def test_xml_and_uri_no_ramdisk_no_kernel(self):
|
||||
@@ -241,7 +243,8 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
expected_result,
|
||||
'%s failed common check %d' % (xml, i))
|
||||
|
||||
# This test is supposed to make sure we don't override a specifically set uri
|
||||
# This test is supposed to make sure we don't
|
||||
# override a specifically set uri
|
||||
#
|
||||
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
|
||||
# checking against that later on. This way we make sure the
|
||||
@@ -270,7 +273,6 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.assertTrue(0 < conn.get_vcpu_total())
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
|
||||
def test_get_memory_mb_total(self):
|
||||
"""Check if get_memory_mb returns appropriate memory value"""
|
||||
try:
|
||||
@@ -285,8 +287,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
def test_get_local_gb_total(self):
|
||||
"""Check if get_local_gb_total returns appropriate disk value"""
|
||||
# Note(masumotok): cannot test b/c FLAGS.instances_path is
|
||||
# inevitable for this test..
|
||||
# Note(masumotok): leave this b/c FLAGS.instances_path is inevitable..
|
||||
#try:
|
||||
# self._driver_dependent_test_setup()
|
||||
#except:
|
||||
@@ -305,8 +306,9 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
except:
|
||||
return
|
||||
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
|
||||
libvirt_conn.LibvirtConnection._conn.listDomainsID().AndReturn([1,2])
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
|
||||
'_conn', use_mock_anything=True)
|
||||
libvirt_conn.LibvirtConnection._conn.listDomainsID().AndReturn([1, 2])
|
||||
vdmock = self.mox.CreateMock(libvirt.virDomain)
|
||||
self.mox.StubOutWithMock(vdmock, "vcpus", use_mock_anything=True)
|
||||
vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]])
|
||||
@@ -318,7 +320,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
self.assertTrue( conn.get_vcpu_used() == 4)
|
||||
self.assertTrue(conn.get_vcpu_used() == 4)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
def test_get_memory_mb_used(self):
|
||||
@@ -335,8 +337,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
def test_get_local_gb_used(self):
|
||||
"""Check if get_local_gb_total returns appropriate disk value"""
|
||||
# Note(masumotok): cannot test b/c FLAGS.instances_path is
|
||||
# inevitable for this test..
|
||||
# Note(masumotok): leave this b/c FLAGS.instances_path is inevitable
|
||||
#try:
|
||||
# self._driver_dependent_test_setup()
|
||||
#except:
|
||||
@@ -353,22 +354,23 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
Check if get_cpu_info works correctly.
|
||||
(in case libvirt.getCapabilities() works correctly)
|
||||
"""
|
||||
xml=("""<cpu><arch>x86_64</arch><model>Nehalem</model>"""
|
||||
"""<vendor>Intel</vendor><topology sockets='2' """
|
||||
"""cores='4' threads='2'/><feature name='rdtscp'/>"""
|
||||
"""<feature name='dca'/><feature name='xtpr'/>"""
|
||||
"""<feature name='tm2'/><feature name='est'/>"""
|
||||
"""<feature name='vmx'/><feature name='ds_cpl'/>"""
|
||||
"""<feature name='monitor'/><feature name='pbe'/>"""
|
||||
"""<feature name='tm'/><feature name='ht'/>"""
|
||||
"""<feature name='ss'/><feature name='acpi'/>"""
|
||||
"""<feature name='ds'/><feature name='vme'/></cpu>""")
|
||||
xml = ("""<cpu><arch>x86_64</arch><model>Nehalem</model>"""
|
||||
"""<vendor>Intel</vendor><topology sockets='2' """
|
||||
"""cores='4' threads='2'/><feature name='rdtscp'/>"""
|
||||
"""<feature name='dca'/><feature name='xtpr'/>"""
|
||||
"""<feature name='tm2'/><feature name='est'/>"""
|
||||
"""<feature name='vmx'/><feature name='ds_cpl'/>"""
|
||||
"""<feature name='monitor'/><feature name='pbe'/>"""
|
||||
"""<feature name='tm'/><feature name='ht'/>"""
|
||||
"""<feature name='ss'/><feature name='acpi'/>"""
|
||||
"""<feature name='ds'/><feature name='vme'/></cpu>""")
|
||||
|
||||
try:
|
||||
self._driver_dependent_test_setup()
|
||||
except:
|
||||
return
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
|
||||
'_conn', use_mock_anything=True)
|
||||
libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
@@ -382,22 +384,23 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
in case libvirt.getCapabilities() returns wrong xml
|
||||
(in case of xml doesnt have <cpu> tag)
|
||||
"""
|
||||
xml=("""<cccccpu><arch>x86_64</arch><model>Nehalem</model>"""
|
||||
"""<vendor>Intel</vendor><topology sockets='2' """
|
||||
"""cores='4' threads='2'/><feature name='rdtscp'/>"""
|
||||
"""<feature name='dca'/><feature name='xtpr'/>"""
|
||||
"""<feature name='tm2'/><feature name='est'/>"""
|
||||
"""<feature name='vmx'/><feature name='ds_cpl'/>"""
|
||||
"""<feature name='monitor'/><feature name='pbe'/>"""
|
||||
"""<feature name='tm'/><feature name='ht'/>"""
|
||||
"""<feature name='ss'/><feature name='acpi'/>"""
|
||||
"""<feature name='ds'/><feature name='vme'/></cccccpu>""")
|
||||
xml = ("""<cccccpu><arch>x86_64</arch><model>Nehalem</model>"""
|
||||
"""<vendor>Intel</vendor><topology sockets='2' """
|
||||
"""cores='4' threads='2'/><feature name='rdtscp'/>"""
|
||||
"""<feature name='dca'/><feature name='xtpr'/>"""
|
||||
"""<feature name='tm2'/><feature name='est'/>"""
|
||||
"""<feature name='vmx'/><feature name='ds_cpl'/>"""
|
||||
"""<feature name='monitor'/><feature name='pbe'/>"""
|
||||
"""<feature name='tm'/><feature name='ht'/>"""
|
||||
"""<feature name='ss'/><feature name='acpi'/>"""
|
||||
"""<feature name='ds'/><feature name='vme'/></cccccpu>""")
|
||||
|
||||
try:
|
||||
self._driver_dependent_test_setup()
|
||||
except:
|
||||
return
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
|
||||
'_conn', use_mock_anything=True)
|
||||
libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
@@ -405,7 +408,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
try:
|
||||
conn.get_cpu_info()
|
||||
except exception.Invalid, e:
|
||||
c1 = ( 0 <= e.message.find('Invalid xml') )
|
||||
c1 = (0 <= e.message.find('Invalid xml'))
|
||||
self.assertTrue(c1)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
@@ -416,22 +419,23 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
(in case of xml doesnt have inproper <topology> tag
|
||||
meaning missing "socket" attribute)
|
||||
"""
|
||||
xml=("""<cpu><arch>x86_64</arch><model>Nehalem</model>"""
|
||||
"""<vendor>Intel</vendor><topology """
|
||||
"""cores='4' threads='2'/><feature name='rdtscp'/>"""
|
||||
"""<feature name='dca'/><feature name='xtpr'/>"""
|
||||
"""<feature name='tm2'/><feature name='est'/>"""
|
||||
"""<feature name='vmx'/><feature name='ds_cpl'/>"""
|
||||
"""<feature name='monitor'/><feature name='pbe'/>"""
|
||||
"""<feature name='tm'/><feature name='ht'/>"""
|
||||
"""<feature name='ss'/><feature name='acpi'/>"""
|
||||
"""<feature name='ds'/><feature name='vme'/></cpu>""")
|
||||
xml = ("""<cpu><arch>x86_64</arch><model>Nehalem</model>"""
|
||||
"""<vendor>Intel</vendor><topology """
|
||||
"""cores='4' threads='2'/><feature name='rdtscp'/>"""
|
||||
"""<feature name='dca'/><feature name='xtpr'/>"""
|
||||
"""<feature name='tm2'/><feature name='est'/>"""
|
||||
"""<feature name='vmx'/><feature name='ds_cpl'/>"""
|
||||
"""<feature name='monitor'/><feature name='pbe'/>"""
|
||||
"""<feature name='tm'/><feature name='ht'/>"""
|
||||
"""<feature name='ss'/><feature name='acpi'/>"""
|
||||
"""<feature name='ds'/><feature name='vme'/></cpu>""")
|
||||
|
||||
try:
|
||||
self._driver_dependent_test_setup()
|
||||
except:
|
||||
return
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
|
||||
'_conn', use_mock_anything=True)
|
||||
libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
@@ -439,7 +443,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
try:
|
||||
conn.get_cpu_info()
|
||||
except exception.Invalid, e:
|
||||
c1 = ( 0 <= e.message.find('Invalid xml: topology') )
|
||||
c1 = (0 <= e.message.find('Invalid xml: topology'))
|
||||
self.assertTrue(c1)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
@@ -474,7 +478,8 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
host = 'foo'
|
||||
binary = 'nova-compute'
|
||||
service_ref = {'id':1, 'host':host, 'binary':binary, 'topic':'compute'}
|
||||
service_ref = {'id': 1, 'host': host, 'binary': binary,
|
||||
'topic': 'compute'}
|
||||
|
||||
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
|
||||
db.service_get_all_by_topic(mox.IgnoreMox(), 'compute').\
|
||||
@@ -511,7 +516,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn.update_available_resource(host)
|
||||
except exception.Invalid, e:
|
||||
msg = 'Cannot insert compute manager specific info'
|
||||
c1 = ( 0 <= e.message.find(msg))
|
||||
c1 = (0 <= e.message.find(msg))
|
||||
self.assertTrue(c1)
|
||||
self.mox.ResetAll()
|
||||
|
||||
@@ -528,12 +533,15 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
except:
|
||||
return
|
||||
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
|
||||
libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).AndReturn(1)
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
|
||||
'_conn',
|
||||
use_mock_anything=True)
|
||||
libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),
|
||||
0).AndReturn(1)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
self.assertTrue( None== conn.compare_cpu(cpu_info))
|
||||
self.assertTrue(None == conn.compare_cpu(cpu_info))
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
def test_compare_cpu_raises_exception(self):
|
||||
@@ -553,7 +561,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
|
||||
use_mock_anything=True)
|
||||
libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\
|
||||
libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(), 0).\
|
||||
AndRaise(libvirt.libvirtError('ERR'))
|
||||
|
||||
self.mox.ReplayAll()
|
||||
@@ -576,7 +584,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
|
||||
use_mock_anything=True)
|
||||
libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\
|
||||
libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(), 0).\
|
||||
AndRaise(exception.Invalid('ERR'))
|
||||
|
||||
self.mox.ReplayAll()
|
||||
@@ -632,7 +640,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
try:
|
||||
conn.ensure_filtering_rules_for_instance(instance_ref)
|
||||
except exception.Error, e:
|
||||
c1 = ( 0<=e.message.find('Timeout migrating for'))
|
||||
c1 = (0 <= e.message.find('Timeout migrating for'))
|
||||
self.assertTrue(c1)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
@@ -641,12 +649,13 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
class dummyCall(object):
|
||||
f = None
|
||||
|
||||
def start(self, interval=0, now=False):
|
||||
pass
|
||||
|
||||
instance_ref = models.Instance()
|
||||
instance_ref.__setitem__('id', 1)
|
||||
dest = 'desthost'
|
||||
i_ref = models.Instance()
|
||||
i_ref.__setitem__('id', 1)
|
||||
i_ref.__setitem__('host', 'dummy')
|
||||
ctxt = context.get_admin_context()
|
||||
|
||||
try:
|
||||
@@ -659,39 +668,33 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
vdmock = self.mox.CreateMock(libvirt.virDomain)
|
||||
self.mox.StubOutWithMock(vdmock, "migrateToURI",
|
||||
use_mock_anything=True)
|
||||
vdmock.migrateToURI(FLAGS.live_migration_uri % dest, mox.IgnoreArg(),
|
||||
vdmock.migrateToURI(FLAGS.live_migration_uri % i_ref['host'],
|
||||
mox.IgnoreArg(),
|
||||
None, FLAGS.live_migration_bandwidth).\
|
||||
AndReturn(None)
|
||||
libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\
|
||||
libvirt_conn.LibvirtConnection._conn.lookupByName(i_ref.name).\
|
||||
AndReturn(vdmock)
|
||||
# below description is also ok.
|
||||
#self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection._conn,
|
||||
# "lookupByName", use_mock_anything=True)
|
||||
|
||||
libvirt_conn.utils.LoopingCall(f=None).AndReturn(dummyCall())
|
||||
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
ret = conn._live_migration(ctxt, instance_ref, dest)
|
||||
# Not setting post_method/recover_method in this testcase.
|
||||
ret = conn._live_migration(ctxt, i_ref, i_ref['host'], '', '')
|
||||
self.assertTrue(ret == None)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
def test_live_migration_raises_exception(self):
|
||||
"""
|
||||
_live_migration raises exception, then this testcase confirms
|
||||
state_description/state for the instances/volumes are recovered.
|
||||
recovered method is called.
|
||||
"""
|
||||
class Instance(models.NovaBase):
|
||||
id = 0
|
||||
volumes = None
|
||||
name = 'name'
|
||||
|
||||
i_ref = models.Instance()
|
||||
i_ref.__setitem__('id', 1)
|
||||
i_ref.__setitem__('host', 'dummy')
|
||||
ctxt = context.get_admin_context()
|
||||
dest = 'desthost'
|
||||
instance_ref = Instance()
|
||||
instance_ref.__setitem__('id', 1)
|
||||
instance_ref.__setitem__('volumes', [{'id':1}, {'id':2}])
|
||||
|
||||
def dummy_recover_method(self, c, instance):
|
||||
pass
|
||||
|
||||
try:
|
||||
nwmock, fwmock = self._driver_dependent_test_setup()
|
||||
@@ -713,163 +716,14 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
power_state.RUNNING, 'running')
|
||||
self.mox.StubOutWithMock(db, 'volume_update')
|
||||
for v in instance_ref.volumes:
|
||||
db.volume_update(ctxt, v['id'], {'status': 'in-use'}).\
|
||||
InAnyOrder('g1')
|
||||
db.volume_update(ctxt, v['id'], {'status': 'in-use'})
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
self.assertRaises(libvirt.libvirtError,
|
||||
conn._live_migration,
|
||||
ctxt, instance_ref, dest)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
def test_post_live_migration_working_correctly(self):
|
||||
"""_post_live_migration works as expected correctly """
|
||||
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest,
|
||||
'fixed_ip':'dummyip', 'floating_ip':'dummyflip',
|
||||
'volumes':[{'id':1}, {'id':2} ]}
|
||||
network_ref = {'id':1, 'host':dest}
|
||||
floating_ip_ref = {'id':1, 'address':'1.1.1.1'}
|
||||
|
||||
try:
|
||||
nwmock, fwmock = self._driver_dependent_test_setup()
|
||||
except:
|
||||
return
|
||||
fwmock.unfilter_instance(instance_ref)
|
||||
|
||||
fixed_ip = instance_ref['fixed_ip']
|
||||
self.mox.StubOutWithMock(db, 'instance_get_fixed_address')
|
||||
db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip)
|
||||
self.mox.StubOutWithMock(db, 'fixed_ip_update')
|
||||
db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
|
||||
self.mox.StubOutWithMock(db, 'fixed_ip_get_network')
|
||||
db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref)
|
||||
|
||||
fl_ip = instance_ref['floating_ip']
|
||||
self.mox.StubOutWithMock(db, 'instance_get_floating_address')
|
||||
db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(fl_ip)
|
||||
self.mox.StubOutWithMock(db, 'floating_ip_get_by_address')
|
||||
db.floating_ip_get_by_address(ctxt, instance_ref['floating_ip']).\
|
||||
AndReturn(floating_ip_ref)
|
||||
self.mox.StubOutWithMock(db, 'floating_ip_update')
|
||||
db.floating_ip_update(ctxt, floating_ip_ref['address'], {'host': dest})
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_update')
|
||||
db.instance_update(ctxt, instance_ref['id'],
|
||||
{'state_description': 'running',
|
||||
'state': power_state.RUNNING, 'host': dest})
|
||||
self.mox.StubOutWithMock(db, 'volume_update')
|
||||
for v in instance_ref['volumes']:
|
||||
db.volume_update(ctxt, v['id'], {'status': 'in-use'})
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn._post_live_migration( ctxt, instance_ref, dest)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
def test_post_live_migration_no_floating_ip(self):
|
||||
"""
|
||||
_post_live_migration works as expected correctly
|
||||
(in case instance doesnt have floaitng ip)
|
||||
"""
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest,
|
||||
'fixed_ip':'dummyip', 'floating_ip':'dummyflip',
|
||||
'volumes':[{'id':1}, {'id':2} ]}
|
||||
network_ref = {'id':1, 'host':dest}
|
||||
floating_ip_ref = {'id':1, 'address':'1.1.1.1'}
|
||||
|
||||
try:
|
||||
nwmock, fwmock = self._driver_dependent_test_setup()
|
||||
except:
|
||||
return
|
||||
fwmock.unfilter_instance(instance_ref)
|
||||
|
||||
fixed_ip = instance_ref['fixed_ip']
|
||||
self.mox.StubOutWithMock(db, 'instance_get_fixed_address')
|
||||
db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip)
|
||||
self.mox.StubOutWithMock(db, 'fixed_ip_update')
|
||||
db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
|
||||
self.mox.StubOutWithMock(db, 'fixed_ip_get_network')
|
||||
db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref)
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_get_floating_address')
|
||||
db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(None)
|
||||
self.mox.StubOutWithMock(libvirt_conn.LOG, 'info')
|
||||
libvirt_conn.LOG.info(_('post livemigration operation is started..'))
|
||||
libvirt_conn.LOG.info(_('floating_ip is not found for %s'),
|
||||
instance_ref['hostname'])
|
||||
# Checking last messages are ignored. may be no need to check so strictly?
|
||||
libvirt_conn.LOG.info(mox.IgnoreArg())
|
||||
libvirt_conn.LOG.info(mox.IgnoreArg())
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_update')
|
||||
db.instance_update(ctxt, instance_ref['id'],
|
||||
{'state_description': 'running',
|
||||
'state': power_state.RUNNING,
|
||||
'host': dest})
|
||||
self.mox.StubOutWithMock(db, 'volume_update')
|
||||
for v in instance_ref['volumes']:
|
||||
db.volume_update(ctxt, v['id'], {'status': 'in-use'})
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn._post_live_migration( ctxt, instance_ref, dest)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
def test_post_live_migration_no_floating_ip_with_exception(self):
|
||||
"""
|
||||
_post_live_migration works as expected correctly
|
||||
(in case instance doesnt have floaitng ip, and raise exception)
|
||||
"""
|
||||
dest = 'dummydest'
|
||||
ctxt = context.get_admin_context()
|
||||
instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest,
|
||||
'fixed_ip':'dummyip', 'floating_ip':'dummyflip',
|
||||
'volumes':[{'id':1}, {'id':2} ]}
|
||||
network_ref = {'id':1, 'host':dest}
|
||||
floating_ip_ref = {'id':1, 'address':'1.1.1.1'}
|
||||
|
||||
try:
|
||||
nwmock, fwmock = self._driver_dependent_test_setup()
|
||||
except:
|
||||
return
|
||||
fwmock.unfilter_instance(instance_ref)
|
||||
|
||||
fixed_ip = instance_ref['fixed_ip']
|
||||
self.mox.StubOutWithMock(db, 'instance_get_fixed_address')
|
||||
db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip)
|
||||
self.mox.StubOutWithMock(db, 'fixed_ip_update')
|
||||
db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
|
||||
self.mox.StubOutWithMock(db, 'fixed_ip_get_network')
|
||||
db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref)
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_get_floating_address')
|
||||
db.instance_get_floating_address(ctxt, instance_ref['id']).\
|
||||
AndRaise(exception.NotFound())
|
||||
self.mox.StubOutWithMock(libvirt_conn.LOG, 'info')
|
||||
libvirt_conn.LOG.info(_('post livemigration operation is started..'))
|
||||
libvirt_conn.LOG.info(_('floating_ip is not found for %s'),
|
||||
instance_ref['hostname'])
|
||||
# the last message is ignored. may be no need to check so strictly?
|
||||
libvirt_conn.LOG.info(mox.IgnoreArg())
|
||||
libvirt_conn.LOG.info(mox.IgnoreArg())
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_update')
|
||||
db.instance_update(ctxt, instance_ref['id'],
|
||||
{'state_description': 'running',
|
||||
'state': power_state.RUNNING, 'host': dest})
|
||||
self.mox.StubOutWithMock(db, 'volume_update')
|
||||
for v in instance_ref['volumes']:
|
||||
db.volume_update(ctxt, v['id'], {'status': 'in-use'})
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn = libvirt_conn.LibvirtConnection(False)
|
||||
conn._post_live_migration( ctxt, instance_ref, dest)
|
||||
conn._mlive_migration,
|
||||
ctxt, instance_ref, dest,
|
||||
'', dummy_recover_method)
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
def tearDown(self):
|
||||
@@ -1181,4 +1035,3 @@ class NWFilterTestCase(test.TestCase):
|
||||
self.fw.apply_instance_filter(instance)
|
||||
_ensure_all_called()
|
||||
self.teardown_security_group()
|
||||
|
||||
|
||||
@@ -177,12 +177,13 @@ class VolumeTestCase(test.TestCase):
|
||||
pass
|
||||
|
||||
|
||||
class AOETestCase(test.TestCase):
|
||||
"""Test Case for AOEDriver"""
|
||||
class DriverTestCase(test.TestCase):
|
||||
"""Base Test class for Drivers."""
|
||||
driver_name = "nova.volume.driver.FakeAOEDriver"
|
||||
|
||||
def setUp(self):
|
||||
super(AOETestCase, self).setUp()
|
||||
self.flags(volume_driver='nova.volume.driver.AOEDriver',
|
||||
super(DriverTestCase, self).setUp()
|
||||
self.flags(volume_driver=self.driver_name,
|
||||
logging_default_format_string="%(message)s")
|
||||
self.volume = utils.import_object(FLAGS.volume_manager)
|
||||
self.context = context.get_admin_context()
|
||||
@@ -201,9 +202,30 @@ class AOETestCase(test.TestCase):
|
||||
inst = {}
|
||||
self.instance_id = db.instance_create(self.context, inst)['id']
|
||||
|
||||
def tearDown(self):
|
||||
super(DriverTestCase, self).tearDown()
|
||||
|
||||
def _attach_volume(self):
|
||||
"""Attach volumes to an instance. This function also sets
|
||||
a fake log message."""
|
||||
return []
|
||||
|
||||
def _detach_volume(self, volume_id_list):
|
||||
"""Detach volumes from an instance."""
|
||||
for volume_id in volume_id_list:
|
||||
db.volume_detached(self.context, volume_id)
|
||||
self.volume.delete_volume(self.context, volume_id)
|
||||
|
||||
|
||||
class AOETestCase(DriverTestCase):
|
||||
"""Test Case for AOEDriver"""
|
||||
driver_name = "nova.volume.driver.AOEDriver"
|
||||
|
||||
def setUp(self):
|
||||
super(AOETestCase, self).setUp()
|
||||
|
||||
def tearDown(self):
|
||||
super(AOETestCase, self).tearDown()
|
||||
db.instance_destroy(self.context, self.instance_id)
|
||||
|
||||
def _attach_volume(self):
|
||||
"""Attach volumes to an instance. This function also sets
|
||||
@@ -212,7 +234,7 @@ class AOETestCase(test.TestCase):
|
||||
for index in xrange(3):
|
||||
vol = {}
|
||||
vol['size'] = 0
|
||||
volume_id = db.volume_create(context.get_admin_context(),
|
||||
volume_id = db.volume_create(self.context,
|
||||
vol)['id']
|
||||
self.volume.create_volume(self.context, volume_id)
|
||||
|
||||
@@ -230,12 +252,6 @@ class AOETestCase(test.TestCase):
|
||||
|
||||
return volume_id_list
|
||||
|
||||
def _detach_volume(self, volume_id_list):
|
||||
"""Detach volumes from an instance."""
|
||||
for volume_id in volume_id_list:
|
||||
db.volume_detached(self.context, volume_id)
|
||||
self.volume.delete_volume(self.context, volume_id)
|
||||
|
||||
def test_check_for_export_with_no_volume(self):
|
||||
"""No log message when no volume is attached to an instance."""
|
||||
self.stream.truncate(0)
|
||||
@@ -262,10 +278,95 @@ class AOETestCase(test.TestCase):
|
||||
(shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
|
||||
volume_id_list[0])
|
||||
|
||||
msg_is_match = False
|
||||
self.stream.truncate(0)
|
||||
try:
|
||||
self.volume.check_for_export(self.context, self.instance_id)
|
||||
except exception.ProcessExecutionError, e:
|
||||
volume_id = volume_id_list[0]
|
||||
msg = _("""Cannot confirm exported volume id:%(volume_id)s."""
|
||||
"""vblade process for e%(shelf_id)s.%(blade_id)s """
|
||||
"""isn't running.""") % locals()
|
||||
msg_is_match = (0 <= e.message.find(msg))
|
||||
|
||||
self.assertTrue(msg_is_match)
|
||||
self._detach_volume(volume_id_list)
|
||||
|
||||
|
||||
class ISCSITestCase(DriverTestCase):
|
||||
"""Test Case for ISCSIDriver"""
|
||||
driver_name = "nova.volume.driver.ISCSIDriver"
|
||||
|
||||
def setUp(self):
|
||||
super(ISCSITestCase, self).setUp()
|
||||
|
||||
def tearDown(self):
|
||||
super(ISCSITestCase, self).tearDown()
|
||||
|
||||
def _attach_volume(self):
|
||||
"""Attach volumes to an instance. This function also sets
|
||||
a fake log message."""
|
||||
volume_id_list = []
|
||||
for index in xrange(3):
|
||||
vol = {}
|
||||
vol['size'] = 0
|
||||
vol_ref = db.volume_create(self.context, vol)
|
||||
self.volume.create_volume(self.context, vol_ref['id'])
|
||||
vol_ref = db.volume_get(self.context, vol_ref['id'])
|
||||
|
||||
# each volume has a different mountpoint
|
||||
mountpoint = "/dev/sd" + chr((ord('b') + index))
|
||||
db.volume_attached(self.context, vol_ref['id'], self.instance_id,
|
||||
mountpoint)
|
||||
#iscsi_target = db.volume_allocate_iscsi_target(self.context,
|
||||
# vol_ref['id'],
|
||||
# vol_ref['host'])
|
||||
volume_id_list.append(vol_ref['id'])
|
||||
|
||||
return volume_id_list
|
||||
|
||||
def test_check_for_export_with_no_volume(self):
|
||||
"""No log message when no volume is attached to an instance."""
|
||||
self.stream.truncate(0)
|
||||
self.volume.check_for_export(self.context, self.instance_id)
|
||||
self.assertEqual(self.stream.getvalue(),
|
||||
_("vblade process for e%s.%s isn't running.\n")
|
||||
% (shelf_id, blade_id))
|
||||
self.assertEqual(self.stream.getvalue(), '')
|
||||
|
||||
def test_check_for_export_with_all_volume_exported(self):
|
||||
"""No log message when all the vblade processes are running."""
|
||||
volume_id_list = self._attach_volume()
|
||||
|
||||
self.mox.StubOutWithMock(self.volume.driver, '_execute')
|
||||
for i in volume_id_list:
|
||||
tid = db.volume_get_iscsi_target_num(self.context, i)
|
||||
self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d"
|
||||
% locals())
|
||||
|
||||
self.stream.truncate(0)
|
||||
self.mox.ReplayAll()
|
||||
self.volume.check_for_export(self.context, self.instance_id)
|
||||
self.assertEqual(self.stream.getvalue(), '')
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
self._detach_volume(volume_id_list)
|
||||
|
||||
def test_check_for_export_with_some_volume_missing(self):
|
||||
"""Output a warning message when some volumes are not recognied
|
||||
by ietd."""
|
||||
volume_id_list = self._attach_volume()
|
||||
|
||||
# the first vblade process isn't running
|
||||
tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
|
||||
self.mox.StubOutWithMock(self.volume.driver, '_execute')
|
||||
self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d"
|
||||
% locals()).AndRaise(exception.ProcessExecutionError())
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.ProcessExecutionError,
|
||||
self.volume.check_for_export,
|
||||
self.context,
|
||||
self.instance_id)
|
||||
msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
|
||||
self.assertTrue(0 <= self.stream.getvalue().find(msg))
|
||||
self.mox.UnsetStubs()
|
||||
|
||||
self._detach_volume(volume_id_list)
|
||||
|
||||
Reference in New Issue
Block a user