diff --git a/.mailmap b/.mailmap index d13219ab..c6f6c9a8 100644 --- a/.mailmap +++ b/.mailmap @@ -33,3 +33,4 @@ + diff --git a/bin/nova-manage b/bin/nova-manage index 740fe4ab..7336a582 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -513,12 +513,10 @@ class InstanceCommands(object): msg = _('Only KVM is supported for now. Sorry!') raise exception.Error(msg) - if FLAGS.volume_driver != 'nova.volume.driver.AOEDriver': - instance_ref = db.instance_get(ctxt, instance_id) - if len(instance_ref['volumes']) != 0: - msg = _(("""Volumes attached by ISCSIDriver""" - """ are not supported. Sorry!""")) - raise exception.Error(msg) + if FLAGS.volume_driver != 'nova.volume.driver.AOEDriver' and \ + FLAGS.volume_driver != 'nova.volume.driver.ISCSIDriver': + msg = _("Support only AOEDriver and ISCSIDriver. Sorry!") + raise exception.Error(msg) rpc.call(ctxt, FLAGS.scheduler_topic, @@ -616,12 +614,13 @@ class ServiceCommands(object): if len(service_refs) <= 0: raise exception.Invalid(_('%s does not exists.') % host) - service_refs = [s for s in service_refs if s['topic'] == 'compute'] + service_refs = [s for s in service_refs if s['topic'] == 'compute'] if len(service_refs) <= 0: raise exception.Invalid(_('%s is not compute node.') % host) - - result = rpc.call(ctxt, db.queue_get_for(ctxt, FLAGS.compute_topic, host), - {"method": "update_available_resource"}) + + result = rpc.call(ctxt, + db.queue_get_for(ctxt, FLAGS.compute_topic, host), + {"method": "update_available_resource"}) class LogCommands(object): diff --git a/nova/rpc.py b/nova/rpc.py index 1ecb5d97..01fc6d44 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -251,9 +251,6 @@ def msg_reply(msg_id, reply=None, failure=None): try: publisher.send({'result': reply, 'failure': failure}) except TypeError: - print '>>>>>>>>>>>>>>>>>>' - print reply - print '>>>>>>>>>>>>>>>>>>' publisher.send( {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 3154fc5c..2862d965 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -82,6 +82,41 @@ class ComputeTestCase(test.TestCase): 'project_id': self.project.id} return db.security_group_create(self.context, values) + def _get_dummy_instance(self): + """Get mock-return-value instance object + Use this when any testcase executed later than test_run_terminate + """ + vol1 = models.Volume() + vol1.__setitem__('id', 1) + vol2 = models.Volume() + vol2.__setitem__('id', 2) + instance_ref = models.Instance() + instance_ref.__setitem__('id', 1) + instance_ref.__setitem__('volumes', [vol1, vol2]) + instance_ref.__setitem__('hostname', 'i-00000001') + instance_ref.__setitem__('host', 'dummy') + return instance_ref + + def test_create_instance_defaults_display_name(self): + """Verify that an instance cannot be created without a display_name.""" + cases = [dict(), dict(display_name=None)] + for instance in cases: + ref = self.compute_api.create(self.context, + FLAGS.default_instance_type, None, **instance) + try: + self.assertNotEqual(ref[0]['display_name'], None) + finally: + db.instance_destroy(self.context, ref[0]['id']) + + def test_create_instance_associates_security_groups(self): + """Make sure create associates security groups""" + group = self._create_group() + instance_ref = models.Instance() + instance_ref.__setitem__('id', 1) + instance_ref.__setitem__('volumes', [{'id': 1}, {'id': 2}]) + instance_ref.__setitem__('hostname', 'i-00000001') + return instance_ref + def test_create_instance_defaults_display_name(self): """Verify that an instance cannot be created without a display_name.""" cases = [dict(), dict(display_name=None)] @@ -270,13 +305,12 @@ class ComputeTestCase(test.TestCase): self.network_manager = utils.import_object(FLAGS.network_manager) self.compute_driver = utils.import_object(FLAGS.compute_driver) - def test_pre_live_migration_instance_has_no_fixed_ip(self): + def test_pre_live_migration_instance_has_no_fixed_ip(self): """ if instances that are intended to be migrated doesnt have fixed_ip (not happens usually), pre_live_migration has to raise Exception. """ - instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], - 'hostname':'i-000000001'} + instance_ref = self._get_dummy_instance() c = context.get_admin_context() i_id = instance_ref['id'] @@ -291,16 +325,14 @@ class ComputeTestCase(test.TestCase): c, instance_ref['id']) self.mox.ResetAll() - def test_pre_live_migration_instance_has_volume(self): - """if any volumes are attached to the instances that are + def test_pre_live_migration_instance_has_volume(self): + """if any volumes are attached to the instances that are intended to be migrated, setup_compute_volume must be called because aoe module should be inserted at destination host. This testcase checks on it. """ - instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], - 'hostname':'i-000000001'} + i_ref = self._get_dummy_instance() c = context.get_admin_context() - i_id=instance_ref['id'] self._setup_other_managers() dbmock = self.mox.CreateMock(db) @@ -308,229 +340,318 @@ class ComputeTestCase(test.TestCase): netmock = self.mox.CreateMock(self.network_manager) drivermock = self.mox.CreateMock(self.compute_driver) - dbmock.instance_get(c, i_id).AndReturn(instance_ref) - dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy') - for i in range(len(instance_ref['volumes'])): - vid = instance_ref['volumes'][i]['id'] + dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) + dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy') + for i in range(len(i_ref['volumes'])): + vid = i_ref['volumes'][i]['id'] volmock.setup_compute_volume(c, vid).InAnyOrder('g1') - netmock.setup_compute_network(c, instance_ref['id']) - drivermock.ensure_filtering_rules_for_instance(instance_ref) - + netmock.setup_compute_network(c, i_ref['id']) + drivermock.ensure_filtering_rules_for_instance(i_ref) + self.compute.db = dbmock self.compute.volume_manager = volmock self.compute.network_manager = netmock self.compute.driver = drivermock self.mox.ReplayAll() - ret = self.compute.pre_live_migration(c, i_id) + ret = self.compute.pre_live_migration(c, i_ref['id']) self.assertEqual(ret, None) self.mox.ResetAll() - - def test_pre_live_migration_instance_has_no_volume(self): - """if any volumes are not attached to the instances that are + + def test_pre_live_migration_instance_has_no_volume(self): + """if any volumes are not attached to the instances that are intended to be migrated, log message should be appears because administrator can proove instance conditions before live_migration if any trouble occurs. """ - instance_ref={'id':1, 'volumes':[], 'hostname':'i-20000001'} + i_ref = self._get_dummy_instance() + i_ref.__setitem__('volumes', []) c = context.get_admin_context() - i_id = instance_ref['id'] self._setup_other_managers() dbmock = self.mox.CreateMock(db) netmock = self.mox.CreateMock(self.network_manager) drivermock = self.mox.CreateMock(self.compute_driver) - - dbmock.instance_get(c, i_id).AndReturn(instance_ref) - dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy') + + dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) + dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy') self.mox.StubOutWithMock(compute_manager.LOG, 'info') - compute_manager.LOG.info(_("%s has no volume."), instance_ref['hostname']) - netmock.setup_compute_network(c, i_id) - drivermock.ensure_filtering_rules_for_instance(instance_ref) - + compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname']) + netmock.setup_compute_network(c, i_ref['id']) + drivermock.ensure_filtering_rules_for_instance(i_ref) + self.compute.db = dbmock self.compute.network_manager = netmock self.compute.driver = drivermock self.mox.ReplayAll() - ret = self.compute.pre_live_migration(c, i_id) + ret = self.compute.pre_live_migration(c, i_ref['id']) self.assertEqual(ret, None) self.mox.ResetAll() - def test_pre_live_migration_setup_compute_node_fail(self): + def test_pre_live_migration_setup_compute_node_fail(self): """setup_compute_node sometimes fail since concurrent request comes to iptables and iptables complains. Then this method tries to retry, but raise exception in case of over - max_retry_count. this method confirms raising exception. + max_retry_count. this method confirms raising exception. """ - - instance_ref = models.Instance() - instance_ref.__setitem__('id', 1) - instance_ref.__setitem__('volumes', []) - instance_ref.__setitem__('hostname', 'i-ec2id') - + i_ref = self._get_dummy_instance() c = context.get_admin_context() - i_id = instance_ref['id'] self._setup_other_managers() dbmock = self.mox.CreateMock(db) netmock = self.mox.CreateMock(self.network_manager) - drivermock = self.mox.CreateMock(self.compute_driver) + volmock = self.mox.CreateMock(self.volume_manager) - dbmock.instance_get(c, i_id).AndReturn(instance_ref) - dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy') - self.mox.StubOutWithMock(compute_manager.LOG, 'info') - compute_manager.LOG.info(_("%s has no volume."), instance_ref['hostname']) - - for i in range(FLAGS.live_migration_retry_count): - netmock.setup_compute_network(c, i_id).\ + dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) + dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy') + for i in range(len(i_ref['volumes'])): + volmock.setup_compute_volume(c, i_ref['volumes'][i]['id']) + for i in range(FLAGS.live_migration_retry_count): + netmock.setup_compute_network(c, i_ref['id']).\ AndRaise(exception.ProcessExecutionError()) self.compute.db = dbmock self.compute.network_manager = netmock - self.compute.driver = drivermock + self.compute.volume_manager = volmock self.mox.ReplayAll() - self.assertRaises(exception.ProcessExecutionError, + self.assertRaises(exception.ProcessExecutionError, self.compute.pre_live_migration, - c, i_id) + c, i_ref['id']) self.mox.ResetAll() - def test_live_migration_instance_has_volume(self): + def test_live_migration_instance_has_volume(self): """Any volumes are mounted by instances to be migrated are found, vblade health must be checked before starting live-migration. And that is checked by check_for_export(). - This testcase confirms check_for_export() is called. + This testcase confirms check_for_export() is called. """ - instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], 'hostname':'i-00000001'} + i_ref = self._get_dummy_instance() c = context.get_admin_context() - dest='dummydest' - i_id = instance_ref['id'] + topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) - self._setup_other_managers() dbmock = self.mox.CreateMock(db) - drivermock = self.mox.CreateMock(self.compute_driver) - - dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref) + dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) self.mox.StubOutWithMock(rpc, 'call') - rpc.call(c, FLAGS.volume_topic, - {"method": "check_for_export", - "args": {'instance_id': i_id}}).InAnyOrder('g1') - rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest), - {"method": "pre_live_migration", - "args": {'instance_id': i_id}}).InAnyOrder('g1') + rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export", + "args": {'instance_id': i_ref['id']}}) + dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ + AndReturn(topic) + rpc.call(c, topic, {"method": "pre_live_migration", + "args": {'instance_id': i_ref['id']}}) + self.mox.StubOutWithMock(self.compute.driver, 'live_migration') + self.compute.driver.live_migration(c, i_ref, i_ref['host'], + self.compute.post_live_migration, + self.compute.recover_live_migration) self.compute.db = dbmock - self.compute.driver = drivermock self.mox.ReplayAll() - ret = self.compute.live_migration(c, i_id, dest) + ret = self.compute.live_migration(c, i_ref['id'], i_ref['host']) self.assertEqual(ret, None) self.mox.ResetAll() - def test_live_migration_instance_has_volume_and_exception(self): - """In addition to test_live_migration_instance_has_volume testcase, - this testcase confirms if any exception raises from check_for_export(). - Then, valid seaquence of this method should recovering instance/volumes - status(ex. instance['state_description'] is changed from 'migrating' - -> 'running', was changed by scheduler) + def test_live_migration_instance_has_volume_and_exception(self): + """In addition to test_live_migration_instance_has_volume testcase, + this testcase confirms if any exception raises from + check_for_export(). Then, valid seaquence of this method should + recovering instance/volumes status(ex. instance['state_description'] + is changed from 'migrating' -> 'running', was changed by scheduler) """ - instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], - 'hostname':'i-000000001'} - dest='dummydest' + i_ref = self._get_dummy_instance() c = context.get_admin_context() - i_id = instance_ref['id'] + topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) - self._setup_other_managers() dbmock = self.mox.CreateMock(db) - drivermock = self.mox.CreateMock(self.compute_driver) - - dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref) + dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) self.mox.StubOutWithMock(rpc, 'call') - rpc.call(c, FLAGS.volume_topic, - {"method": "check_for_export", - "args": {'instance_id': i_id}}).InAnyOrder('g1') - compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest) - dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic) - rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest), - {"method": "pre_live_migration", - "args": {'instance_id': i_id}}).\ - InAnyOrder('g1').AndRaise(rpc.RemoteError('', '', '')) - #self.mox.StubOutWithMock(compute_manager.LOG, 'error') - #compute_manager.LOG.error('Pre live migration for %s failed at %s', - # instance_ref['hostname'], dest) - dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running') - for i in range(len(instance_ref['volumes'])): - vid = instance_ref['volumes'][i]['id'] - dbmock.volume_update(c, vid, {'status': 'in-use'}) + rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export", + "args": {'instance_id': i_ref['id']}}) + dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ + AndReturn(topic) + rpc.call(c, topic, {"method": "pre_live_migration", + "args": {'instance_id': i_ref['id']}}).\ + AndRaise(rpc.RemoteError('', '', '')) + dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', + 'state': power_state.RUNNING, + 'host': i_ref['host']}) + for v in i_ref['volumes']: + dbmock.volume_update(c, v['id'], {'status': 'in-use'}) self.compute.db = dbmock - self.compute.driver = drivermock self.mox.ReplayAll() - self.assertRaises(rpc.RemoteError, + self.assertRaises(rpc.RemoteError, self.compute.live_migration, - c, i_id, dest) + c, i_ref['id'], i_ref['host']) self.mox.ResetAll() - def test_live_migration_instance_has_no_volume_and_exception(self): - """Simpler than test_live_migration_instance_has_volume_and_exception""" - - instance_ref={'id':1, 'volumes':[], 'hostname':'i-000000001'} - dest='dummydest' + def test_live_migration_instance_has_no_volume_and_exception(self): + """Simpler than + test_live_migration_instance_has_volume_and_exception + """ + i_ref = self._get_dummy_instance() + i_ref.__setitem__('volumes', []) c = context.get_admin_context() - i_id = instance_ref['id'] + topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) - self._setup_other_managers() dbmock = self.mox.CreateMock(db) - drivermock = self.mox.CreateMock(self.compute_driver) - - dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref) + dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) + dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ + AndReturn(topic) self.mox.StubOutWithMock(rpc, 'call') - compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest) - dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic) - rpc.call(c, compute_topic, - {"method": "pre_live_migration", - "args": {'instance_id': i_id}}).\ - AndRaise(rpc.RemoteError('', '', '')) - #self.mox.StubOutWithMock(compute_manager.LOG, 'error') - #compute_manager.LOG.error('Pre live migration for %s failed at %s', - # instance_ref['hostname'], dest) - dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running') + rpc.call(c, topic, {"method": "pre_live_migration", + "args": {'instance_id': i_ref['id']}}).\ + AndRaise(rpc.RemoteError('', '', '')) + dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', + 'state': power_state.RUNNING, + 'host': i_ref['host']}) self.compute.db = dbmock - self.compute.driver = drivermock self.mox.ReplayAll() - self.assertRaises(rpc.RemoteError, + self.assertRaises(rpc.RemoteError, self.compute.live_migration, - c, i_id, dest) + c, i_ref['id'], i_ref['host']) self.mox.ResetAll() - def test_live_migration_instance_has_volume(self): - """Simpler version than test_live_migration_instance_has_volume.""" - instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], - 'hostname':'i-000000001'} + def test_live_migration_instance_has_no_volume(self): + """Simpler than test_live_migration_instance_has_volume.""" + i_ref = self._get_dummy_instance() + i_ref.__setitem__('volumes', []) c = context.get_admin_context() - dest='dummydest' - i_id = instance_ref['id'] + topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) - self._setup_other_managers() dbmock = self.mox.CreateMock(db) - drivermock = self.mox.CreateMock(self.compute_driver) - - dbmock.instance_get(c, i_id).AndReturn(instance_ref) + dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) self.mox.StubOutWithMock(rpc, 'call') - rpc.call(c, FLAGS.volume_topic, - {"method": "check_for_export", - "args": {'instance_id': i_id}}).InAnyOrder('g1') - compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest) - dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic) - rpc.call(c, compute_topic, - {"method": "pre_live_migration", - "args": {'instance_id': i_id}}).InAnyOrder('g1') - drivermock.live_migration(c, instance_ref, dest) + dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ + AndReturn(topic) + rpc.call(c, topic, {"method": "pre_live_migration", + "args": {'instance_id': i_ref['id']}}) + self.mox.StubOutWithMock(self.compute.driver, 'live_migration') + self.compute.driver.live_migration(c, i_ref, i_ref['host'], + self.compute.post_live_migration, + self.compute.recover_live_migration) self.compute.db = dbmock - self.compute.driver = drivermock self.mox.ReplayAll() - ret = self.compute.live_migration(c, i_id, dest) + ret = self.compute.live_migration(c, i_ref['id'], i_ref['host']) + self.assertEqual(ret, None) + self.mox.ResetAll() + + def test_post_live_migration_working_correctly(self): + """post_live_migration works as expected correctly """ + + i_ref = self._get_dummy_instance() + fixed_ip_ref = {'id': 1, 'address': '1.1.1.1'} + floating_ip_ref = {'id': 1, 'address': '2.2.2.2'} + c = context.get_admin_context() + + dbmock = self.mox.CreateMock(db) + dbmock.volume_get_all_by_instance(c, i_ref['id']).\ + AndReturn(i_ref['volumes']) + self.mox.StubOutWithMock(self.compute.volume_manager, + 'remove_compute_volume') + for v in i_ref['volumes']: + self.compute.volume_manager.remove_compute_volume(c, v['id']) + self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') + self.compute.driver.unfilter_instance(i_ref) + + fixed_ip = fixed_ip_ref['address'] + dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn(fixed_ip) + dbmock.fixed_ip_update(c, fixed_ip, {'host': i_ref['host']}) + + fl_ip = floating_ip_ref['address'] + dbmock.instance_get_floating_address(c, i_ref['id']).AndReturn(fl_ip) + dbmock.floating_ip_get_by_address(c, fl_ip).AndReturn(floating_ip_ref) + dbmock.floating_ip_update(c, floating_ip_ref['address'], + {'host': i_ref['host']}) + dbmock.instance_update(c, i_ref['id'], + {'state_description': 'running', + 'state': power_state.RUNNING, + 'host': i_ref['host']}) + for v in i_ref['volumes']: + dbmock.volume_update(c, v['id'], {'status': 'in-use'}) + + self.compute.db = dbmock + self.mox.ReplayAll() + ret = self.compute.post_live_migration(c, i_ref, i_ref['host']) + self.assertEqual(ret, None) + self.mox.ResetAll() + + def test_post_live_migration_no_floating_ip(self): + """ + post_live_migration works as expected correctly + (in case instance doesnt have floaitng ip) + """ + i_ref = self._get_dummy_instance() + i_ref.__setitem__('volumes', []) + fixed_ip_ref = {'id': 1, 'address': '1.1.1.1'} + floating_ip_ref = {'id': 1, 'address': '1.1.1.1'} + c = context.get_admin_context() + + dbmock = self.mox.CreateMock(db) + dbmock.volume_get_all_by_instance(c, i_ref['id']).AndReturn([]) + self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') + self.compute.driver.unfilter_instance(i_ref) + + fixed_ip = fixed_ip_ref['address'] + dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn(fixed_ip) + dbmock.fixed_ip_update(c, fixed_ip, {'host': i_ref['host']}) + + dbmock.instance_get_floating_address(c, i_ref['id']).AndReturn(None) + dbmock.instance_update(c, i_ref['id'], + {'state_description': 'running', + 'state': power_state.RUNNING, + 'host': i_ref['host']}) + for v in i_ref['volumes']: + dbmock.volume_update(c, v['id'], {'status': 'in-use'}) + + self.compute.db = dbmock + self.mox.ReplayAll() + ret = self.compute.post_live_migration(c, i_ref, i_ref['host']) + self.assertEqual(ret, None) + self.mox.ResetAll() + + def test_post_live_migration_no_floating_ip_with_exception(self): + """ + post_live_migration works as expected correctly + (in case instance doesnt have floaitng ip, and raise exception) + """ + i_ref = self._get_dummy_instance() + i_ref.__setitem__('volumes', []) + fixed_ip_ref = {'id': 1, 'address': '1.1.1.1'} + floating_ip_ref = {'id': 1, 'address': '1.1.1.1'} + c = context.get_admin_context() + + dbmock = self.mox.CreateMock(db) + dbmock.volume_get_all_by_instance(c, i_ref['id']).AndReturn([]) + self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') + self.compute.driver.unfilter_instance(i_ref) + + fixed_ip = fixed_ip_ref['address'] + dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn(fixed_ip) + dbmock.fixed_ip_update(c, fixed_ip, {'host': i_ref['host']}) + dbmock.instance_get_floating_address(c, i_ref['id']).\ + AndRaise(exception.NotFound()) + + self.mox.StubOutWithMock(compute_manager.LOG, 'info') + compute_manager.LOG.info(_('post_live_migration() is started..')) + compute_manager.LOG.info(_('floating_ip is not found for %s'), + i_ref.name) + # first 2 messages are checked. + compute_manager.LOG.info(mox.IgnoreArg()) + compute_manager.LOG.info(mox.IgnoreArg()) + + self.mox.StubOutWithMock(db, 'instance_update') + dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', + 'state': power_state.RUNNING, + 'host': i_ref['host']}) + self.mox.StubOutWithMock(db, 'volume_update') + for v in i_ref['volumes']: + dbmock.volume_update(c, v['id'], {'status': 'in-use'}) + + self.compute.db = dbmock + self.mox.ReplayAll() + ret = self.compute.post_live_migration(c, i_ref, i_ref['host']) self.assertEqual(ret, None) self.mox.ResetAll() diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 708b427c..e31e66c3 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -80,10 +80,10 @@ class SchedulerTestCase(test.TestCase): 'args': {'num': 7}}) self.mox.ReplayAll() scheduler.named_method(ctxt, 'topic', num=7) - + def test_show_host_resource_host_not_exit(self): """ - A testcase of driver.has_enough_resource + A testcase of driver.has_enough_resource given host does not exists. """ scheduler = manager.SchedulerManager() @@ -95,26 +95,26 @@ class SchedulerTestCase(test.TestCase): AndReturn([]) self.mox.ReplayAll() - result = scheduler.show_host_resource(ctxt, dest) + result = scheduler.show_host_resource(ctxt, dest) # ret should be dict keys = ['ret', 'msg'] c1 = list(set(result.keys())) == list(set(keys)) c2 = not result['ret'] c3 = result['msg'].find('No such Host or not compute node') <= 0 - self.assertTrue( c1 and c2 and c3) + self.assertTrue(c1 and c2 and c3) self.mox.UnsetStubs() def test_show_host_resource_no_project(self): """ - A testcase of driver.show_host_resource + A testcase of driver.show_host_resource no instance stays on the given host """ scheduler = manager.SchedulerManager() dest = 'dummydest' ctxt = context.get_admin_context() - r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100, - 'vcpus_used':16, 'memory_mb_used':32, 'local_gb_used':10} - service_ref = {'id':1, 'host':dest} + r0 = {'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10} + service_ref = {'id': 1, 'host': dest} service_ref.update(r0) self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True) @@ -122,35 +122,35 @@ class SchedulerTestCase(test.TestCase): AndReturn([(service_ref, 0)]) manager.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([]) - + self.mox.ReplayAll() - result = scheduler.show_host_resource(ctxt, dest) + result = scheduler.show_host_resource(ctxt, dest) # ret should be dict keys = ['ret', 'phy_resource', 'usage'] c1 = list(set(result.keys())) == list(set(keys)) c2 = result['ret'] c3 = result['phy_resource'] == r0 c4 = result['usage'] == {} - self.assertTrue( c1 and c2 and c3 and c4) + self.assertTrue(c1 and c2 and c3 and c4) self.mox.UnsetStubs() def test_show_host_resource_works_correctly(self): """ - A testcase of driver.show_host_resource + A testcase of driver.show_host_resource to make sure everything finished with no error. """ scheduler = manager.SchedulerManager() dest = 'dummydest' ctxt = context.get_admin_context() - r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100, - 'vcpus_used':16, 'memory_mb_used':32, 'local_gb_used':10} - r1 = {'vcpus':10, 'memory_mb':4, 'local_gb':20} - r2 = {'vcpus':10, 'memory_mb':20, 'local_gb':30} - service_ref = {'id':1, 'host':dest} + r0 = {'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, + 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10} + r1 = {'vcpus': 10, 'memory_mb': 4, 'local_gb': 20} + r2 = {'vcpus': 10, 'memory_mb': 20, 'local_gb': 30} + service_ref = {'id': 1, 'host': dest} service_ref.update(r0) - instance_ref2 = {'id':2, 'project_id':'p-01', 'host':'dummy'} + instance_ref2 = {'id': 2, 'project_id': 'p-01', 'host': 'dummy'} instance_ref2.update(r1) - instance_ref3 = {'id':3, 'project_id':'p-02', 'host':'dummy'} + instance_ref3 = {'id': 3, 'project_id': 'p-02', 'host': 'dummy'} instance_ref3.update(r2) self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True) @@ -165,9 +165,9 @@ class SchedulerTestCase(test.TestCase): ctxt, dest, p).AndReturn(r2['memory_mb']) manager.db.instance_get_disk_sum_by_host_and_project( ctxt, dest, p).AndReturn(r2['local_gb']) - + self.mox.ReplayAll() - result = scheduler.show_host_resource(ctxt, dest) + result = scheduler.show_host_resource(ctxt, dest) # ret should be dict keys = ['ret', 'phy_resource', 'usage'] c1 = list(set(result.keys())) == list(set(keys)) @@ -176,7 +176,7 @@ class SchedulerTestCase(test.TestCase): c4 = result['usage'].keys() == ['p-01', 'p-02'] c5 = result['usage']['p-01'] == r2 c6 = result['usage']['p-02'] == r2 - self.assertTrue( c1 and c2 and c3 and c4 and c5 and c6) + self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6) self.mox.UnsetStubs() @@ -498,8 +498,8 @@ class SimpleDriverTestCase(test.TestCase): driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-00000001', 'host':'dummy', - 'volumes':[{'id':1}, {'id':2}]} + i_ref = {'id': 1, 'hostname': 'i-00000001', 'host': 'dummy', + 'volumes': [{'id': 1}, {'id': 2}]} dest = 'dummydest' self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) @@ -518,10 +518,9 @@ class SimpleDriverTestCase(test.TestCase): driver.db.volume_update(mox.IgnoreArg(), v['id'], {'status': 'migrating'}) self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - kwargs={'instance_id':i_ref['id'], 'dest':dest} + kwargs = {'instance_id': i_ref['id'], 'dest': dest} rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']), - {"method": 'live_migration', - "args": kwargs}) + {"method": 'live_migration', "args": kwargs}) self.mox.ReplayAll() self.scheduler.live_migration(ctxt, topic, @@ -538,7 +537,7 @@ class SimpleDriverTestCase(test.TestCase): driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[]} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', 'volumes': []} dest = 'dummydest' self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) @@ -553,10 +552,9 @@ class SimpleDriverTestCase(test.TestCase): driver.db.instance_set_state(mox.IgnoreArg(), i_ref['id'], power_state.PAUSED, 'migrating') self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - kwargs={'instance_id':i_ref['id'], 'dest':dest} + kwargs = {'instance_id': i_ref['id'], 'dest': dest} rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']), - {"method": 'live_migration', - "args": kwargs}) + {"method": 'live_migration', "args": kwargs}) self.mox.ReplayAll() self.scheduler.live_migration(ctxt, topic, @@ -565,15 +563,15 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_src_check_instance_not_running(self): """ - A testcase of driver._live_migration_src_check. + A testcase of driver._live_migration_src_check. The instance given by instance_id is not running. """ ctxt = context.get_admin_context() topic = FLAGS.compute_topic dest = 'dummydest' - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', - 'volumes':[], 'state_description':'migrating', - 'state':power_state.RUNNING} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', + 'volumes': [], 'state_description': 'migrating', + 'state': power_state.RUNNING} self.mox.ReplayAll() try: @@ -584,21 +582,21 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_src_check_volume_node_not_alive(self): """ - A testcase of driver._live_migration_src_check. - Volume node is not alive if any volumes are attached to + A testcase of driver._live_migration_src_check. + Volume node is not alive if any volumes are attached to the given instance. """ dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', - 'volumes':[{'id':1}, {'id':2}], - 'state_description':'running', 'state':power_state.RUNNING} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', + 'volumes': [{'id': 1}, {'id': 2}], + 'state_description': 'running', 'state': power_state.RUNNING} self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'volume').\ AndReturn([]) - + self.mox.ReplayAll() try: self.scheduler.driver._live_migration_src_check(ctxt, i_ref) @@ -608,19 +606,19 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_src_check_volume_node_not_alive(self): """ - A testcase of driver._live_migration_src_check. + A testcase of driver._live_migration_src_check. The testcase make sure src-compute node is alive. """ dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[], - 'state_description':'running', 'state':power_state.RUNNING} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', 'volumes': [], + 'state_description': 'running', 'state': power_state.RUNNING} self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'compute').\ AndReturn([]) - + self.mox.ReplayAll() try: self.scheduler.driver._live_migration_src_check(ctxt, i_ref) @@ -630,15 +628,15 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_src_check_works_correctly(self): """ - A testcase of driver._live_migration_src_check. + A testcase of driver._live_migration_src_check. The testcase make sure everything finished with no error. """ driver_i = self.scheduler.driver dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[], - 'state_description':'running', 'state':power_state.RUNNING} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', 'volumes': [], + 'state_description': 'running', 'state': power_state.RUNNING} service_ref = models.Service() service_ref.__setitem__('id', 1) service_ref.__setitem__('host', i_ref['host']) @@ -648,7 +646,7 @@ class SimpleDriverTestCase(test.TestCase): AndReturn([service_ref]) self.mox.StubOutWithMock(driver_i, 'service_is_up') driver_i.service_is_up(service_ref).AndReturn(True) - + self.mox.ReplayAll() ret = driver_i._live_migration_src_check(ctxt, i_ref) self.assertTrue(ret == None) @@ -656,14 +654,14 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_dest_check_service_not_exists(self): """ - A testcase of driver._live_migration_dst_check. + A testcase of driver._live_migration_dst_check. Destination host does not exist. """ driver_i = self.scheduler.driver dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'} service_ref = models.Service() service_ref.__setitem__('id', 1) service_ref.__setitem__('host', i_ref['host']) @@ -671,7 +669,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([]) - + self.mox.ReplayAll() try: driver_i._live_migration_dest_check(ctxt, i_ref, dest) @@ -681,14 +679,14 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_dest_check_service_isnot_compute(self): """ - A testcase of driver._live_migration_dst_check. + A testcase of driver._live_migration_dst_check. Destination host does not provide compute. """ driver_i = self.scheduler.driver dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'} service_ref = models.Service() service_ref.__setitem__('id', 1) service_ref.__setitem__('host', i_ref['host']) @@ -697,7 +695,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) - + self.mox.ReplayAll() try: driver_i._live_migration_dest_check(ctxt, i_ref, dest) @@ -707,13 +705,13 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_dest_check_service_not_alive(self): """ - A testcase of driver._live_migration_dst_check. + A testcase of driver._live_migration_dst_check. Destination host compute service is not alive. """ dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'} service_ref = models.Service() service_ref.__setitem__('id', 1) service_ref.__setitem__('host', i_ref['host']) @@ -724,7 +722,7 @@ class SimpleDriverTestCase(test.TestCase): AndReturn([service_ref]) self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up') self.scheduler.driver.service_is_up(service_ref).AndReturn(False) - + self.mox.ReplayAll() try: self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest) @@ -734,13 +732,13 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_dest_check_service_same_host(self): """ - A testcase of driver._live_migration_dst_check. + A testcase of driver._live_migration_dst_check. Destination host is same as src host. """ dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummydest'} service_ref = models.Service() service_ref.__setitem__('id', 1) service_ref.__setitem__('host', i_ref['host']) @@ -751,23 +749,24 @@ class SimpleDriverTestCase(test.TestCase): AndReturn([service_ref]) self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up') self.scheduler.driver.service_is_up(service_ref).AndReturn(True) - + self.mox.ReplayAll() try: self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest) except exception.Invalid, e: - self.assertTrue(e.message.find('is running now. choose other host') >= 0) + msg = 'is running now. choose other host' + self.assertTrue(e.message.find(msg) >= 0) self.mox.UnsetStubs() def test_live_migraiton_dest_check_service_works_correctly(self): """ - A testcase of driver._live_migration_dst_check. + A testcase of driver._live_migration_dst_check. The testcase make sure everything finished with no error. """ dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummydest'} service_ref = models.Service() service_ref.__setitem__('id', 1) service_ref.__setitem__('host', i_ref['host']) @@ -780,23 +779,24 @@ class SimpleDriverTestCase(test.TestCase): self.scheduler.driver.service_is_up(service_ref).AndReturn(True) self.mox.StubOutWithMock(self.scheduler.driver, 'has_enough_resource') self.scheduler.driver.has_enough_resource(mox.IgnoreArg(), i_ref, dest) - + self.mox.ReplayAll() try: self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest) except exception.Invalid, e: - self.assertTrue(e.message.find('is running now. choose other host') >= 0) + msg = 'is running now. choose other host' + self.assertTrue(e.message.find(msg) >= 0) self.mox.UnsetStubs() def test_live_migraiton_common_check_service_dest_not_exists(self): """ - A testcase of driver._live_migration_common_check. + A testcase of driver._live_migration_common_check. Destination host does not exist. """ dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'} driver_i = self.scheduler.driver self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') @@ -804,7 +804,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([]) - + self.mox.ReplayAll() try: self.scheduler.driver._live_migration_common_check(ctxt, @@ -816,14 +816,15 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_common_check_service_orig_not_exists(self): """ - A testcase of driver._live_migration_common_check. + A testcase of driver._live_migration_common_check. Original host(an instance launched on) does not exist. """ dest = 'dummydest' driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'launched_on':'h1'} + i_ref = {'id': 1, 'hostname': 'i-01', + 'host': 'dummy', 'launched_on': 'h1'} service_ref = models.Service() service_ref.__setitem__('id', 1) service_ref.__setitem__('topic', 'compute') @@ -837,7 +838,7 @@ class SimpleDriverTestCase(test.TestCase): driver.db.service_get_all_by_host(mox.IgnoreArg(), i_ref['launched_on']).\ AndReturn([]) - + self.mox.ReplayAll() try: self.scheduler.driver._live_migration_common_check(ctxt, @@ -850,15 +851,15 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_common_check_service_different_hypervisor(self): """ - A testcase of driver._live_migration_common_check. + A testcase of driver._live_migration_common_check. Original host and dest host has different hypervisor type. """ dest = 'dummydest' driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', - 'host':'dummy', 'launched_on':'h1'} + i_ref = {'id': 1, 'hostname': 'i-01', + 'host': 'dummy', 'launched_on': 'h1'} service_ref = models.Service() service_ref.__setitem__('id', 1) service_ref.__setitem__('topic', 'compute') @@ -875,7 +876,7 @@ class SimpleDriverTestCase(test.TestCase): driver.db.service_get_all_by_host(mox.IgnoreArg(), i_ref['launched_on']).\ AndReturn([service_ref2]) - + self.mox.ReplayAll() try: self.scheduler.driver._live_migration_common_check(ctxt, @@ -888,15 +889,15 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_common_check_service_different_version(self): """ - A testcase of driver._live_migration_common_check. + A testcase of driver._live_migration_common_check. Original host and dest host has different hypervisor version. """ dest = 'dummydest' driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', - 'host':'dummy', 'launched_on':'h1'} + i_ref = {'id': 1, 'hostname': 'i-01', + 'host': 'dummy', 'launched_on': 'h1'} service_ref = models.Service() service_ref.__setitem__('id', 1) service_ref.__setitem__('topic', 'compute') @@ -913,7 +914,7 @@ class SimpleDriverTestCase(test.TestCase): driver.db.service_get_all_by_host(mox.IgnoreArg(), i_ref['launched_on']).\ AndReturn([service_ref2]) - + self.mox.ReplayAll() try: self.scheduler.driver._live_migration_common_check(ctxt, @@ -926,15 +927,15 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_common_check_service_checking_cpuinfo_fail(self): """ - A testcase of driver._live_migration_common_check. + A testcase of driver._live_migration_common_check. Original host and dest host has different hypervisor version. """ dest = 'dummydest' driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', - 'host':'dummy', 'launched_on':'h1'} + i_ref = {'id': 1, 'hostname': 'i-01', + 'host': 'dummy', 'launched_on': 'h1'} service_ref = models.Service() service_ref.__setitem__('id', 1) service_ref.__setitem__('topic', 'compute') @@ -958,7 +959,7 @@ class SimpleDriverTestCase(test.TestCase): {"method": 'compare_cpu', "args": {'cpu_info': service_ref2['cpu_info']}}).\ AndRaise(rpc.RemoteError('doesnt have compatibility to', '', '')) - + self.mox.ReplayAll() try: self.scheduler.driver._live_migration_common_check(ctxt, @@ -971,15 +972,15 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migraiton_common_check_service_works_correctly(self): """ - A testcase of driver._live_migration_common_check. + A testcase of driver._live_migration_common_check. The testcase make sure everything finished with no error. """ dest = 'dummydest' driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic - i_ref = {'id':1, 'hostname':'i-01', - 'host':'dummy', 'launched_on':'h1'} + i_ref = {'id': 1, 'hostname': 'i-01', + 'host': 'dummy', 'launched_on': 'h1'} service_ref = models.Service() service_ref.__setitem__('id', 1) service_ref.__setitem__('topic', 'compute') @@ -1002,7 +1003,7 @@ class SimpleDriverTestCase(test.TestCase): driver.rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), {"method": 'compare_cpu', "args": {'cpu_info': service_ref2['cpu_info']}}) - + self.mox.ReplayAll() ret = self.scheduler.driver._live_migration_common_check(ctxt, i_ref, @@ -1012,20 +1013,21 @@ class SimpleDriverTestCase(test.TestCase): def test_has_enough_resource_lack_resource_memory(self): """ - A testcase of driver.has_enough_resource. + A testcase of driver.has_enough_resource. Lack of memory_mb.(boundary check) """ dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - service_ref = {'id':1, 'memory_mb':32, 'memory_mb_used':12, 'local_gb':100} - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':20, 'local_gb':10} + service_ref = {'id': 1, 'memory_mb': 32, + 'memory_mb_used': 12, 'local_gb': 100} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', + 'vcpus': 5, 'memory_mb': 20, 'local_gb': 10} self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) - + self.mox.ReplayAll() try: self.scheduler.driver.has_enough_resource(ctxt, i_ref, dest) @@ -1037,20 +1039,20 @@ class SimpleDriverTestCase(test.TestCase): def test_has_enough_resource_works_correctly(self): """ - A testcase of driver.has_enough_resource + A testcase of driver.has_enough_resource to make sure everything finished with no error. """ dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - service_ref = {'id':1, 'memory_mb':120, 'memory_mb_used':32} - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} + service_ref = {'id': 1, 'memory_mb': 120, 'memory_mb_used': 32} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', + 'vcpus': 5, 'memory_mb': 8, 'local_gb': 10} self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) - + self.mox.ReplayAll() ret = self.scheduler.driver.has_enough_resource(ctxt, i_ref, dest) self.assertTrue(ret == None) @@ -1066,7 +1068,7 @@ class SimpleDriverTestCase(test.TestCase): ctxt = context.get_admin_context() topic = FLAGS.compute_topic fpath = '/test/20110127120000' - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'} self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) driver.rpc.call(mox.IgnoreArg(), @@ -1077,8 +1079,8 @@ class SimpleDriverTestCase(test.TestCase): driver.logging.error(msg % FLAGS.instances_path) self.mox.ReplayAll() - self.assertRaises(rpc.RemoteError, - driver_i.mounted_on_same_shared_storage, + self.assertRaises(rpc.RemoteError, + driver_i.mounted_on_same_shared_storage, ctxt, i_ref, dest) self.mox.UnsetStubs() @@ -1092,27 +1094,26 @@ class SimpleDriverTestCase(test.TestCase): ctxt = context.get_admin_context() topic = FLAGS.compute_topic fpath = '/test/20110127120000' - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'} self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(ctxt, FLAGS.compute_topic, dest), {"method": 'mktmpfile'}).AndReturn(fpath) - driver.rpc.call(mox.IgnoreArg(), + driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(ctxt, FLAGS.compute_topic, i_ref['host']), - {"method": 'confirm_tmpfile', "args":{'path':fpath}}).\ - AndRaise(rpc.RemoteError('','','')) + {"method": 'confirm_tmpfile', "args": {'path': fpath}}).\ + AndRaise(rpc.RemoteError('', '', '')) self.mox.StubOutWithMock(driver.logging, 'error') msg = _("Cannot create tmpfile at %s to confirm shared storage.") driver.logging.error(msg % FLAGS.instances_path) self.mox.ReplayAll() - self.assertRaises(rpc.RemoteError, - driver_i.mounted_on_same_shared_storage, + self.assertRaises(rpc.RemoteError, + driver_i.mounted_on_same_shared_storage, ctxt, i_ref, dest) self.mox.UnsetStubs() - def test_mounted_on_same_shared_storage_works_correctly(self): """ A testcase of driver.mounted_on_same_shared_storage @@ -1122,15 +1123,17 @@ class SimpleDriverTestCase(test.TestCase): ctxt = context.get_admin_context() topic = FLAGS.compute_topic fpath = '/test/20110127120000' - i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'} self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest), {"method": 'mktmpfile'}).AndReturn(fpath) - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, i_ref['host']), - {"method": 'confirm_tmpfile', "args":{'path':fpath}}) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), + FLAGS.compute_topic, + i_ref['host']), + {"method": 'confirm_tmpfile', "args": {'path': fpath}}) self.mox.ReplayAll() ret = self.scheduler.driver.mounted_on_same_shared_storage(ctxt, diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 0e734046..8ed726c2 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -38,6 +38,7 @@ flags.DECLARE('instances_path', 'nova.compute.manager') libvirt = None libxml2 = None + class LibvirtConnTestCase(test.TestCase): def setUp(self): super(LibvirtConnTestCase, self).setUp() @@ -48,12 +49,12 @@ class LibvirtConnTestCase(test.TestCase): try: pjs = self.manager.get_projects() pjs = [p for p in pjs if p.name == 'fake'] - if 0 != len(pjs): + if 0 != len(pjs): self.manager.delete_project(pjs[0]) users = self.manager.get_users() users = [u for u in users if u.name == 'fake'] - if 0 != len(users): + if 0 != len(users): self.manager.delete_user(users[0]) except Exception, e: pass @@ -75,13 +76,13 @@ class LibvirtConnTestCase(test.TestCase): 'bridge': 'br101', 'instance_type': 'm1.small'} - def _driver_dependent_test_setup(self): + def _driver_dependent_test_setup(self): """ Setup method. - Call this method at the top of each testcase method, + Call this method at the top of each testcase method, if the testcase is necessary libvirt and cheetah. """ - try : + try: global libvirt global libxml2 libvirt_conn.libvirt = __import__('libvirt') @@ -93,13 +94,14 @@ class LibvirtConnTestCase(test.TestCase): """using driver-dependent library Cheetah/libvirt/libxml2.""") raise e - # inebitable mocks for calling + # inebitable mocks for calling #nova.virt.libvirt_conn.LibvirtConnection.__init__ obj = utils.import_object(FLAGS.firewall_driver) fwmock = self.mox.CreateMock(obj) self.mox.StubOutWithMock(libvirt_conn, 'utils', use_mock_anything=True) - libvirt_conn.utils.import_object(FLAGS.firewall_driver).AndReturn(fwmock) + libvirt_conn.utils.import_object(FLAGS.firewall_driver).\ + AndReturn(fwmock) return fwmock def test_xml_and_uri_no_ramdisk_no_kernel(self): @@ -241,8 +243,9 @@ class LibvirtConnTestCase(test.TestCase): expected_result, '%s failed common check %d' % (xml, i)) - # This test is supposed to make sure we don't override a specifically set uri - # + # This test is supposed to make sure we don't + # override a specifically set uri + # # Deliberately not just assigning this string to FLAGS.libvirt_uri and # checking against that later on. This way we make sure the # implementation doesn't fiddle around with the FLAGS. @@ -256,27 +259,26 @@ class LibvirtConnTestCase(test.TestCase): def test_get_vcpu_total(self): """ - Check if get_vcpu_total returns appropriate cpu value + Check if get_vcpu_total returns appropriate cpu value Connection/OS/driver differenct does not matter for this method, everyone can execute for checking. """ - try: + try: self._driver_dependent_test_setup() - except: - return + except: + return self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(0 < conn.get_vcpu_total()) self.mox.UnsetStubs() - def test_get_memory_mb_total(self): """Check if get_memory_mb returns appropriate memory value""" - try: + try: self._driver_dependent_test_setup() - except: - return + except: + return self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) @@ -285,8 +287,7 @@ class LibvirtConnTestCase(test.TestCase): def test_get_local_gb_total(self): """Check if get_local_gb_total returns appropriate disk value""" - # Note(masumotok): cannot test b/c FLAGS.instances_path is - # inevitable for this test.. + # Note(masumotok): leave this b/c FLAGS.instances_path is inevitable.. #try: # self._driver_dependent_test_setup() #except: @@ -305,8 +306,9 @@ class LibvirtConnTestCase(test.TestCase): except: return - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) - libvirt_conn.LibvirtConnection._conn.listDomainsID().AndReturn([1,2]) + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, + '_conn', use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.listDomainsID().AndReturn([1, 2]) vdmock = self.mox.CreateMock(libvirt.virDomain) self.mox.StubOutWithMock(vdmock, "vcpus", use_mock_anything=True) vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]]) @@ -318,7 +320,7 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) - self.assertTrue( conn.get_vcpu_used() == 4) + self.assertTrue(conn.get_vcpu_used() == 4) self.mox.UnsetStubs() def test_get_memory_mb_used(self): @@ -335,8 +337,7 @@ class LibvirtConnTestCase(test.TestCase): def test_get_local_gb_used(self): """Check if get_local_gb_total returns appropriate disk value""" - # Note(masumotok): cannot test b/c FLAGS.instances_path is - # inevitable for this test.. + # Note(masumotok): leave this b/c FLAGS.instances_path is inevitable #try: # self._driver_dependent_test_setup() #except: @@ -353,22 +354,23 @@ class LibvirtConnTestCase(test.TestCase): Check if get_cpu_info works correctly. (in case libvirt.getCapabilities() works correctly) """ - xml=("""x86_64Nehalem""" - """Intel""" - """""" - """""" - """""" - """""" - """""" - """""" - """""") + xml = ("""x86_64Nehalem""" + """Intel""" + """""" + """""" + """""" + """""" + """""" + """""" + """""") - try: + try: self._driver_dependent_test_setup() - except: - return - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + except: + return + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, + '_conn', use_mock_anything=True) libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml) self.mox.ReplayAll() @@ -382,33 +384,34 @@ class LibvirtConnTestCase(test.TestCase): in case libvirt.getCapabilities() returns wrong xml (in case of xml doesnt have tag) """ - xml=("""x86_64Nehalem""" - """Intel""" - """""" - """""" - """""" - """""" - """""" - """""" - """""") + xml = ("""x86_64Nehalem""" + """Intel""" + """""" + """""" + """""" + """""" + """""" + """""" + """""") - try: + try: self._driver_dependent_test_setup() - except: - return - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + except: + return + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, + '_conn', use_mock_anything=True) libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml) self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) - try: + try: conn.get_cpu_info() except exception.Invalid, e: - c1 = ( 0 <= e.message.find('Invalid xml') ) + c1 = (0 <= e.message.find('Invalid xml')) self.assertTrue(c1) self.mox.UnsetStubs() - + def test_get_cpu_info_inappropreate_xml2(self): """ Check if get_cpu_info raises exception @@ -416,30 +419,31 @@ class LibvirtConnTestCase(test.TestCase): (in case of xml doesnt have inproper tag meaning missing "socket" attribute) """ - xml=("""x86_64Nehalem""" - """Intel""" - """""" - """""" - """""" - """""" - """""" - """""" - """""") + xml = ("""x86_64Nehalem""" + """Intel""" + """""" + """""" + """""" + """""" + """""" + """""" + """""") - try: + try: self._driver_dependent_test_setup() - except: - return - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + except: + return + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, + '_conn', use_mock_anything=True) libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml) self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) - try: + try: conn.get_cpu_info() except exception.Invalid, e: - c1 = ( 0 <= e.message.find('Invalid xml: topology') ) + c1 = (0 <= e.message.find('Invalid xml: topology')) self.assertTrue(c1) self.mox.UnsetStubs() @@ -461,10 +465,10 @@ class LibvirtConnTestCase(test.TestCase): driver(different depends on environment), only dictionary keys are checked. """ - try: + try: self._driver_dependent_test_setup() - except: - return + except: + return def dic_key_check(dic): validkey = ['vcpus', 'memory_mb', 'local_gb', @@ -474,7 +478,8 @@ class LibvirtConnTestCase(test.TestCase): host = 'foo' binary = 'nova-compute' - service_ref = {'id':1, 'host':host, 'binary':binary, 'topic':'compute'} + service_ref = {'id': 1, 'host': host, 'binary': binary, + 'topic': 'compute'} self.mox.StubOutWithMock(db, 'service_get_all_by_topic') db.service_get_all_by_topic(mox.IgnoreMox(), 'compute').\ @@ -493,10 +498,10 @@ class LibvirtConnTestCase(test.TestCase): This testcase confirms if no record found on Service table, exception can be raised. """ - try: + try: self._driver_dependent_test_setup() - except: - return + except: + return host = 'foo' binary = 'nova-compute' @@ -511,7 +516,7 @@ class LibvirtConnTestCase(test.TestCase): conn.update_available_resource(host) except exception.Invalid, e: msg = 'Cannot insert compute manager specific info' - c1 = ( 0 <= e.message.find(msg)) + c1 = (0 <= e.message.find(msg)) self.assertTrue(c1) self.mox.ResetAll() @@ -523,17 +528,20 @@ class LibvirtConnTestCase(test.TestCase): """"sockets":"%s"}, "features":[%s]}""") cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') - try: + try: self._driver_dependent_test_setup() - except: - return + except: + return - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) - libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).AndReturn(1) + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, + '_conn', + use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(), + 0).AndReturn(1) self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) - self.assertTrue( None== conn.compare_cpu(cpu_info)) + self.assertTrue(None == conn.compare_cpu(cpu_info)) self.mox.UnsetStubs() def test_compare_cpu_raises_exception(self): @@ -546,14 +554,14 @@ class LibvirtConnTestCase(test.TestCase): """"sockets":"%s"}, "features":[%s]}""") cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') - try: + try: self._driver_dependent_test_setup() - except: - return + except: + return self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) - libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\ + libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(), 0).\ AndRaise(libvirt.libvirtError('ERR')) self.mox.ReplayAll() @@ -569,14 +577,14 @@ class LibvirtConnTestCase(test.TestCase): """"sockets":"%s"}, "features":[%s]}""") cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') - try: + try: self._driver_dependent_test_setup() - except: - return + except: + return self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) - libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\ + libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(), 0).\ AndRaise(exception.Invalid('ERR')) self.mox.ReplayAll() @@ -590,10 +598,10 @@ class LibvirtConnTestCase(test.TestCase): instance_ref = models.Instance() instance_ref.__setitem__('id', 1) - try: + try: nwmock, fwmock = self._driver_dependent_test_setup() - except: - return + except: + return nwmock.setup_basic_filtering(mox.IgnoreArg()) fwmock.prepare_instance_filter(instance_ref) @@ -613,10 +621,10 @@ class LibvirtConnTestCase(test.TestCase): instance_ref = models.Instance() instance_ref.__setitem__('id', 1) - try: + try: nwmock, fwmock = self._driver_dependent_test_setup() - except: - return + except: + return nwmock.setup_basic_filtering(mox.IgnoreArg()) fwmock.prepare_instance_filter(instance_ref) @@ -629,10 +637,10 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) - try: + try: conn.ensure_filtering_rules_for_instance(instance_ref) except exception.Error, e: - c1 = ( 0<=e.message.find('Timeout migrating for')) + c1 = (0 <= e.message.find('Timeout migrating for')) self.assertTrue(c1) self.mox.UnsetStubs() @@ -641,62 +649,57 @@ class LibvirtConnTestCase(test.TestCase): class dummyCall(object): f = None - def start(self, interval=0, now=False): + + def start(self, interval=0, now=False): pass - instance_ref = models.Instance() - instance_ref.__setitem__('id', 1) - dest = 'desthost' + i_ref = models.Instance() + i_ref.__setitem__('id', 1) + i_ref.__setitem__('host', 'dummy') ctxt = context.get_admin_context() - try: + try: self._driver_dependent_test_setup() - except: - return + except: + return self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) vdmock = self.mox.CreateMock(libvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI", use_mock_anything=True) - vdmock.migrateToURI(FLAGS.live_migration_uri % dest, mox.IgnoreArg(), + vdmock.migrateToURI(FLAGS.live_migration_uri % i_ref['host'], + mox.IgnoreArg(), None, FLAGS.live_migration_bandwidth).\ AndReturn(None) - libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\ + libvirt_conn.LibvirtConnection._conn.lookupByName(i_ref.name).\ AndReturn(vdmock) - # below description is also ok. - #self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection._conn, - # "lookupByName", use_mock_anything=True) - libvirt_conn.utils.LoopingCall(f=None).AndReturn(dummyCall()) - self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) - ret = conn._live_migration(ctxt, instance_ref, dest) + # Not setting post_method/recover_method in this testcase. + ret = conn._live_migration(ctxt, i_ref, i_ref['host'], '', '') self.assertTrue(ret == None) self.mox.UnsetStubs() def test_live_migration_raises_exception(self): """ _live_migration raises exception, then this testcase confirms - state_description/state for the instances/volumes are recovered. + recovered method is called. """ - class Instance(models.NovaBase): - id = 0 - volumes = None - name = 'name' - + i_ref = models.Instance() + i_ref.__setitem__('id', 1) + i_ref.__setitem__('host', 'dummy') ctxt = context.get_admin_context() - dest = 'desthost' - instance_ref = Instance() - instance_ref.__setitem__('id', 1) - instance_ref.__setitem__('volumes', [{'id':1}, {'id':2}]) - try: + def dummy_recover_method(self, c, instance): + pass + + try: nwmock, fwmock = self._driver_dependent_test_setup() - except: - return + except: + return self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) @@ -709,167 +712,18 @@ class LibvirtConnTestCase(test.TestCase): libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\ AndReturn(vdmock) self.mox.StubOutWithMock(db, 'instance_set_state') - db.instance_set_state(ctxt, instance_ref['id'], + db.instance_set_state(ctxt, instance_ref['id'], power_state.RUNNING, 'running') self.mox.StubOutWithMock(db, 'volume_update') for v in instance_ref.volumes: - db.volume_update(ctxt, v['id'], {'status': 'in-use'}).\ - InAnyOrder('g1') - - self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) - self.assertRaises(libvirt.libvirtError, - conn._live_migration, - ctxt, instance_ref, dest) - self.mox.UnsetStubs() - - def test_post_live_migration_working_correctly(self): - """_post_live_migration works as expected correctly """ - - dest = 'dummydest' - ctxt = context.get_admin_context() - instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest, - 'fixed_ip':'dummyip', 'floating_ip':'dummyflip', - 'volumes':[{'id':1}, {'id':2} ]} - network_ref = {'id':1, 'host':dest} - floating_ip_ref = {'id':1, 'address':'1.1.1.1'} - - try: - nwmock, fwmock = self._driver_dependent_test_setup() - except: - return - fwmock.unfilter_instance(instance_ref) - - fixed_ip = instance_ref['fixed_ip'] - self.mox.StubOutWithMock(db, 'instance_get_fixed_address') - db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip) - self.mox.StubOutWithMock(db, 'fixed_ip_update') - db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) - self.mox.StubOutWithMock(db, 'fixed_ip_get_network') - db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref) - - fl_ip = instance_ref['floating_ip'] - self.mox.StubOutWithMock(db, 'instance_get_floating_address') - db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(fl_ip) - self.mox.StubOutWithMock(db, 'floating_ip_get_by_address') - db.floating_ip_get_by_address(ctxt, instance_ref['floating_ip']).\ - AndReturn(floating_ip_ref) - self.mox.StubOutWithMock(db, 'floating_ip_update') - db.floating_ip_update(ctxt, floating_ip_ref['address'], {'host': dest}) - - self.mox.StubOutWithMock(db, 'instance_update') - db.instance_update(ctxt, instance_ref['id'], - {'state_description': 'running', - 'state': power_state.RUNNING, 'host': dest}) - self.mox.StubOutWithMock(db, 'volume_update') - for v in instance_ref['volumes']: db.volume_update(ctxt, v['id'], {'status': 'in-use'}) self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) - conn._post_live_migration( ctxt, instance_ref, dest) - self.mox.UnsetStubs() - - def test_post_live_migration_no_floating_ip(self): - """ - _post_live_migration works as expected correctly - (in case instance doesnt have floaitng ip) - """ - dest = 'dummydest' - ctxt = context.get_admin_context() - instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest, - 'fixed_ip':'dummyip', 'floating_ip':'dummyflip', - 'volumes':[{'id':1}, {'id':2} ]} - network_ref = {'id':1, 'host':dest} - floating_ip_ref = {'id':1, 'address':'1.1.1.1'} - - try: - nwmock, fwmock = self._driver_dependent_test_setup() - except: - return - fwmock.unfilter_instance(instance_ref) - - fixed_ip = instance_ref['fixed_ip'] - self.mox.StubOutWithMock(db, 'instance_get_fixed_address') - db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip) - self.mox.StubOutWithMock(db, 'fixed_ip_update') - db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) - self.mox.StubOutWithMock(db, 'fixed_ip_get_network') - db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref) - - self.mox.StubOutWithMock(db, 'instance_get_floating_address') - db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(None) - self.mox.StubOutWithMock(libvirt_conn.LOG, 'info') - libvirt_conn.LOG.info(_('post livemigration operation is started..')) - libvirt_conn.LOG.info(_('floating_ip is not found for %s'), - instance_ref['hostname']) - # Checking last messages are ignored. may be no need to check so strictly? - libvirt_conn.LOG.info(mox.IgnoreArg()) - libvirt_conn.LOG.info(mox.IgnoreArg()) - - self.mox.StubOutWithMock(db, 'instance_update') - db.instance_update(ctxt, instance_ref['id'], - {'state_description': 'running', - 'state': power_state.RUNNING, - 'host': dest}) - self.mox.StubOutWithMock(db, 'volume_update') - for v in instance_ref['volumes']: - db.volume_update(ctxt, v['id'], {'status': 'in-use'}) - - self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) - conn._post_live_migration( ctxt, instance_ref, dest) - self.mox.UnsetStubs() - - def test_post_live_migration_no_floating_ip_with_exception(self): - """ - _post_live_migration works as expected correctly - (in case instance doesnt have floaitng ip, and raise exception) - """ - dest = 'dummydest' - ctxt = context.get_admin_context() - instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest, - 'fixed_ip':'dummyip', 'floating_ip':'dummyflip', - 'volumes':[{'id':1}, {'id':2} ]} - network_ref = {'id':1, 'host':dest} - floating_ip_ref = {'id':1, 'address':'1.1.1.1'} - - try: - nwmock, fwmock = self._driver_dependent_test_setup() - except: - return - fwmock.unfilter_instance(instance_ref) - - fixed_ip = instance_ref['fixed_ip'] - self.mox.StubOutWithMock(db, 'instance_get_fixed_address') - db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip) - self.mox.StubOutWithMock(db, 'fixed_ip_update') - db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) - self.mox.StubOutWithMock(db, 'fixed_ip_get_network') - db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref) - - self.mox.StubOutWithMock(db, 'instance_get_floating_address') - db.instance_get_floating_address(ctxt, instance_ref['id']).\ - AndRaise(exception.NotFound()) - self.mox.StubOutWithMock(libvirt_conn.LOG, 'info') - libvirt_conn.LOG.info(_('post livemigration operation is started..')) - libvirt_conn.LOG.info(_('floating_ip is not found for %s'), - instance_ref['hostname']) - # the last message is ignored. may be no need to check so strictly? - libvirt_conn.LOG.info(mox.IgnoreArg()) - libvirt_conn.LOG.info(mox.IgnoreArg()) - - self.mox.StubOutWithMock(db, 'instance_update') - db.instance_update(ctxt, instance_ref['id'], - {'state_description': 'running', - 'state': power_state.RUNNING, 'host': dest}) - self.mox.StubOutWithMock(db, 'volume_update') - for v in instance_ref['volumes']: - db.volume_update(ctxt, v['id'], {'status': 'in-use'}) - - self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) - conn._post_live_migration( ctxt, instance_ref, dest) + self.assertRaises(libvirt.libvirtError, + conn._mlive_migration, + ctxt, instance_ref, dest, + '', dummy_recover_method) self.mox.UnsetStubs() def tearDown(self): @@ -1181,4 +1035,3 @@ class NWFilterTestCase(test.TestCase): self.fw.apply_instance_filter(instance) _ensure_all_called() self.teardown_security_group() - diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index b35cf4eb..6ae075ca 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -177,12 +177,13 @@ class VolumeTestCase(test.TestCase): pass -class AOETestCase(test.TestCase): - """Test Case for AOEDriver""" +class DriverTestCase(test.TestCase): + """Base Test class for Drivers.""" + driver_name = "nova.volume.driver.FakeAOEDriver" def setUp(self): - super(AOETestCase, self).setUp() - self.flags(volume_driver='nova.volume.driver.AOEDriver', + super(DriverTestCase, self).setUp() + self.flags(volume_driver=self.driver_name, logging_default_format_string="%(message)s") self.volume = utils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() @@ -201,9 +202,30 @@ class AOETestCase(test.TestCase): inst = {} self.instance_id = db.instance_create(self.context, inst)['id'] + def tearDown(self): + super(DriverTestCase, self).tearDown() + + def _attach_volume(self): + """Attach volumes to an instance. This function also sets + a fake log message.""" + return [] + + def _detach_volume(self, volume_id_list): + """Detach volumes from an instance.""" + for volume_id in volume_id_list: + db.volume_detached(self.context, volume_id) + self.volume.delete_volume(self.context, volume_id) + + +class AOETestCase(DriverTestCase): + """Test Case for AOEDriver""" + driver_name = "nova.volume.driver.AOEDriver" + + def setUp(self): + super(AOETestCase, self).setUp() + def tearDown(self): super(AOETestCase, self).tearDown() - db.instance_destroy(self.context, self.instance_id) def _attach_volume(self): """Attach volumes to an instance. This function also sets @@ -212,7 +234,7 @@ class AOETestCase(test.TestCase): for index in xrange(3): vol = {} vol['size'] = 0 - volume_id = db.volume_create(context.get_admin_context(), + volume_id = db.volume_create(self.context, vol)['id'] self.volume.create_volume(self.context, volume_id) @@ -230,12 +252,6 @@ class AOETestCase(test.TestCase): return volume_id_list - def _detach_volume(self, volume_id_list): - """Detach volumes from an instance.""" - for volume_id in volume_id_list: - db.volume_detached(self.context, volume_id) - self.volume.delete_volume(self.context, volume_id) - def test_check_for_export_with_no_volume(self): """No log message when no volume is attached to an instance.""" self.stream.truncate(0) @@ -262,10 +278,95 @@ class AOETestCase(test.TestCase): (shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context, volume_id_list[0]) + msg_is_match = False + self.stream.truncate(0) + try: + self.volume.check_for_export(self.context, self.instance_id) + except exception.ProcessExecutionError, e: + volume_id = volume_id_list[0] + msg = _("""Cannot confirm exported volume id:%(volume_id)s.""" + """vblade process for e%(shelf_id)s.%(blade_id)s """ + """isn't running.""") % locals() + msg_is_match = (0 <= e.message.find(msg)) + + self.assertTrue(msg_is_match) + self._detach_volume(volume_id_list) + + +class ISCSITestCase(DriverTestCase): + """Test Case for ISCSIDriver""" + driver_name = "nova.volume.driver.ISCSIDriver" + + def setUp(self): + super(ISCSITestCase, self).setUp() + + def tearDown(self): + super(ISCSITestCase, self).tearDown() + + def _attach_volume(self): + """Attach volumes to an instance. This function also sets + a fake log message.""" + volume_id_list = [] + for index in xrange(3): + vol = {} + vol['size'] = 0 + vol_ref = db.volume_create(self.context, vol) + self.volume.create_volume(self.context, vol_ref['id']) + vol_ref = db.volume_get(self.context, vol_ref['id']) + + # each volume has a different mountpoint + mountpoint = "/dev/sd" + chr((ord('b') + index)) + db.volume_attached(self.context, vol_ref['id'], self.instance_id, + mountpoint) + #iscsi_target = db.volume_allocate_iscsi_target(self.context, + # vol_ref['id'], + # vol_ref['host']) + volume_id_list.append(vol_ref['id']) + + return volume_id_list + + def test_check_for_export_with_no_volume(self): + """No log message when no volume is attached to an instance.""" self.stream.truncate(0) self.volume.check_for_export(self.context, self.instance_id) - self.assertEqual(self.stream.getvalue(), - _("vblade process for e%s.%s isn't running.\n") - % (shelf_id, blade_id)) + self.assertEqual(self.stream.getvalue(), '') + + def test_check_for_export_with_all_volume_exported(self): + """No log message when all the vblade processes are running.""" + volume_id_list = self._attach_volume() + + self.mox.StubOutWithMock(self.volume.driver, '_execute') + for i in volume_id_list: + tid = db.volume_get_iscsi_target_num(self.context, i) + self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d" + % locals()) + + self.stream.truncate(0) + self.mox.ReplayAll() + self.volume.check_for_export(self.context, self.instance_id) + self.assertEqual(self.stream.getvalue(), '') + self.mox.UnsetStubs() + + self._detach_volume(volume_id_list) + + def test_check_for_export_with_some_volume_missing(self): + """Output a warning message when some volumes are not recognied + by ietd.""" + volume_id_list = self._attach_volume() + + # the first vblade process isn't running + tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0]) + self.mox.StubOutWithMock(self.volume.driver, '_execute') + self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d" + % locals()).AndRaise(exception.ProcessExecutionError()) + + self.mox.ReplayAll() + self.assertRaises(exception.ProcessExecutionError, + self.volume.check_for_export, + self.context, + self.instance_id) + msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0] + self.assertTrue(0 <= self.stream.getvalue().find(msg)) + self.mox.UnsetStubs() self._detach_volume(volume_id_list)