From ce6874b0a3c45e5e4c974844f12fcf9371ab82fe Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 27 Jan 2011 20:39:33 +0900 Subject: [PATCH 01/18] adding testcode --- bin/nova-api | 2 + nova/tests/test_compute.py | 305 +++++++++++++++ nova/tests/test_scheduler.py | 722 +++++++++++++++++++++++++++++++++++ nova/tests/test_virt.py | 520 ++++++++++++++++++++++++- 4 files changed, 1546 insertions(+), 3 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 7b4fbeab..fba09889 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -37,11 +37,13 @@ gettext.install('nova', unicode=1) from nova import flags from nova import log as logging from nova import wsgi +from nova import utils logging.basicConfig() LOG = logging.getLogger('nova.api') LOG.setLevel(logging.DEBUG) +utils.default_flagfile() FLAGS = flags.FLAGS API_ENDPOINTS = ['ec2', 'osapi'] diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 09f6ee94..344c2d2b 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -20,6 +20,7 @@ Tests For Compute """ import datetime +import mox from nova import compute from nova import context @@ -27,9 +28,12 @@ from nova import db from nova import exception from nova import flags from nova import log as logging +from nova import rpc from nova import test from nova import utils from nova.auth import manager +from nova.compute import manager as compute_manager +from nova.compute import power_state LOG = logging.getLogger('nova.tests.compute') @@ -219,3 +223,304 @@ class ComputeTestCase(test.TestCase): self.assertEqual(ret_val, None) self.compute.terminate_instance(self.context, instance_id) + + def test_update_service_exception(self): + """nova-compute updates Serivce table on DB like below. + nova.service.Serivce.start -> + nova.compute.ComputeManager.update_service. + This testcase confirms if no record found on Service + table, exception can be raised. + """ + host = 'foo' + binary = 'nova-compute' + dbmock = self.mox.CreateMock(db) + dbmock.service_get_by_args(mox.IgnoreArg(), + mox.StrContains(host), + mox.StrContains(binary)).\ + AndRaise(exception.NotFound()) + self.compute.db = dbmock + self.mox.ReplayAll() + try: + self.compute.update_service('dummy', host, binary) + except exception.Invalid, e: + msg = 'Cannot insert compute manager specific info' + c1 = ( 0 <= e.message.find(msg)) + self.assertTrue(c1) + self.mox.ResetAll() + + def test_update_service_success(self): + """nova-compute updates Serivce table on DB like below. + nova.service.Serivce.start -> + nova.compute.ComputeManager.update_service. + In this method, vcpus/memory_mb/local_gb/hypervisor_type/ + hypervisor_version/cpu_info should be changed. + Based on this specification, this testcase confirms + if this method finishes successfully, + meaning self.db.service_update is called with dictinary + + {'vcpu':aaa, 'memory_mb':bbb, 'local_gb':ccc, + 'hypervisor_type':ddd, 'hypervisor_version':eee, + 'cpu_info':fff} + + Since each value of above dict can be obtained through + driver(different depends on environment), + only dictionary keys are checked. + """ + + def dic_key_check(dic): + validkey = ['vcpus', 'memory_mb', 'local_gb', + 'hypervisor_type', 'hypervisor_version', 'cpu_info'] + return (list(set(validkey)) == list(set(dic.keys()))) + + host = 'foo' + binary = 'nova-compute' + service_ref = {'id':1, 'binary':'nova-compute', 'topic':'compute'} + dbmock = self.mox.CreateMock(db) + dbmock.service_get_by_args(mox.IgnoreArg(), + mox.StrContains(host), + mox.StrContains(binary)).\ + AndReturn(service_ref) + dbmock.service_update(mox.IgnoreArg(), + service_ref['id'], + mox.Func(dic_key_check)) + + self.compute.db = dbmock + self.mox.ReplayAll() + try: + self.compute.update_service('dummy', host, binary) + except exception.Invalid, e: + msg = 'Cannot insert compute manager specific info' + c1 = ( 0 <= e.message.find(msg)) + self.assertTrue(c1) + self.mox.ResetAll() + + def _setup_other_managers(self): + self.volume_manager = utils.import_object(FLAGS.volume_manager) + self.network_manager = utils.import_object(FLAGS.network_manager) + self.compute_driver = utils.import_object(FLAGS.compute_driver) + + def test_pre_live_migration_instance_has_no_fixed_ip(self): + """ + if instances that are intended to be migrated doesnt have fixed_ip + (not happens usually), pre_live_migration has to raise Exception. + """ + instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], + 'hostname':'i-000000001'} + c = context.get_admin_context() + i_id = instance_ref['id'] + + dbmock = self.mox.CreateMock(db) + dbmock.instance_get(c, i_id).AndReturn(instance_ref) + dbmock.instance_get_fixed_address(c, i_id).AndReturn(None) + + self.compute.db = dbmock + self.mox.ReplayAll() + self.assertRaises(exception.NotFound, + self.compute.pre_live_migration, + c, instance_ref['id']) + self.mox.ResetAll() + + def test_pre_live_migration_instance_has_volume(self): + """if any volumes are attached to the instances that are + intended to be migrated, setup_compute_volume must be + called because aoe module should be inserted at destination + host. This testcase checks on it. + """ + instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], + 'hostname':'i-000000001'} + c = context.get_admin_context() + i_id=instance_ref['id'] + + self._setup_other_managers() + dbmock = self.mox.CreateMock(db) + volmock = self.mox.CreateMock(self.volume_manager) + netmock = self.mox.CreateMock(self.network_manager) + drivermock = self.mox.CreateMock(self.compute_driver) + + dbmock.instance_get(c, i_id).AndReturn(instance_ref) + dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy') + for i in range(len(instance_ref['volumes'])): + vid = instance_ref['volumes'][i]['id'] + volmock.setup_compute_volume(c, vid).InAnyOrder('g1') + netmock.setup_compute_network(c, instance_ref['id']) + drivermock.ensure_filtering_rules_for_instance(instance_ref) + + self.compute.db = dbmock + self.compute.volume_manager = volmock + self.compute.network_manager = netmock + self.compute.driver = drivermock + + self.mox.ReplayAll() + ret = self.compute.pre_live_migration(c, i_id) + self.assertEqual(ret, None) + self.mox.ResetAll() + + def test_pre_live_migration_instance_has_no_volume(self): + """if any volumes are not attached to the instances that are + intended to be migrated, log message should be appears + because administrator can proove instance conditions before + live_migration if any trouble occurs. + """ + instance_ref={'id':1, 'volumes':[], 'hostname':'i-20000001'} + c = context.get_admin_context() + i_id = instance_ref['id'] + + self._setup_other_managers() + dbmock = self.mox.CreateMock(db) + netmock = self.mox.CreateMock(self.network_manager) + drivermock = self.mox.CreateMock(self.compute_driver) + + dbmock.instance_get(c, i_id).AndReturn(instance_ref) + dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy') + self.mox.StubOutWithMock(compute_manager.LOG, 'info') + compute_manager.LOG.info(_("%s has no volume."), instance_ref['hostname']) + netmock.setup_compute_network(c, i_id) + drivermock.ensure_filtering_rules_for_instance(instance_ref) + + self.compute.db = dbmock + self.compute.network_manager = netmock + self.compute.driver = drivermock + + self.mox.ReplayAll() + ret = self.compute.pre_live_migration(c, i_id) + self.assertEqual(ret, None) + self.mox.ResetAll() + + def test_live_migration_instance_has_volume(self): + """Any volumes are mounted by instances to be migrated are found, + vblade health must be checked before starting live-migration. + And that is checked by check_for_export(). + This testcase confirms check_for_export() is called. + """ + instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], 'hostname':'i-00000001'} + c = context.get_admin_context() + dest='dummydest' + i_id = instance_ref['id'] + + self._setup_other_managers() + dbmock = self.mox.CreateMock(db) + drivermock = self.mox.CreateMock(self.compute_driver) + + dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref) + self.mox.StubOutWithMock(rpc, 'call') + rpc.call(c, FLAGS.volume_topic, + {"method": "check_for_export", + "args": {'instance_id': i_id}}).InAnyOrder('g1') + rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest), + {"method": "pre_live_migration", + "args": {'instance_id': i_id}}).InAnyOrder('g1') + + self.compute.db = dbmock + self.compute.driver = drivermock + self.mox.ReplayAll() + ret = self.compute.live_migration(c, i_id, dest) + self.assertEqual(ret, None) + self.mox.ResetAll() + + def test_live_migration_instance_has_volume_and_exception(self): + """In addition to test_live_migration_instance_has_volume testcase, + this testcase confirms if any exception raises from check_for_export(). + Then, valid seaquence of this method should recovering instance/volumes + status(ex. instance['state_description'] is changed from 'migrating' + -> 'running', was changed by scheduler) + """ + instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], + 'hostname':'i-000000001'} + dest='dummydest' + c = context.get_admin_context() + i_id = instance_ref['id'] + + self._setup_other_managers() + dbmock = self.mox.CreateMock(db) + drivermock = self.mox.CreateMock(self.compute_driver) + + dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref) + self.mox.StubOutWithMock(rpc, 'call') + rpc.call(c, FLAGS.volume_topic, + {"method": "check_for_export", + "args": {'instance_id': i_id}}).InAnyOrder('g1') + compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest) + dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic) + rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest), + {"method": "pre_live_migration", + "args": {'instance_id': i_id}}).\ + InAnyOrder('g1').AndRaise(rpc.RemoteError('du', 'mm', 'y')) + self.mox.StubOutWithMock(compute_manager.LOG, 'error') + compute_manager.LOG.error('Pre live migration for %s failed at %s', + instance_ref['hostname'], dest) + dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running') + for i in range(len(instance_ref['volumes'])): + vid = instance_ref['volumes'][i]['id'] + dbmock.volume_update(c, vid, {'status': 'in-use'}) + + self.compute.db = dbmock + self.compute.driver = drivermock + self.mox.ReplayAll() + self.assertRaises(rpc.RemoteError, + self.compute.live_migration, + c, i_id, dest) + self.mox.ResetAll() + + def test_live_migration_instance_has_no_volume_and_exception(self): + """Simpler than test_live_migration_instance_has_volume_and_exception""" + + instance_ref={'id':1, 'volumes':[], 'hostname':'i-000000001'} + dest='dummydest' + c = context.get_admin_context() + i_id = instance_ref['id'] + + self._setup_other_managers() + dbmock = self.mox.CreateMock(db) + drivermock = self.mox.CreateMock(self.compute_driver) + + dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref) + self.mox.StubOutWithMock(rpc, 'call') + compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest) + dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic) + rpc.call(c, compute_topic, + {"method": "pre_live_migration", + "args": {'instance_id': i_id}}).\ + AndRaise(rpc.RemoteError('du', 'mm', 'y')) + self.mox.StubOutWithMock(compute_manager.LOG, 'error') + compute_manager.LOG.error('Pre live migration for %s failed at %s', + instance_ref['hostname'], dest) + dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running') + + self.compute.db = dbmock + self.compute.driver = drivermock + self.mox.ReplayAll() + self.assertRaises(rpc.RemoteError, + self.compute.live_migration, + c, i_id, dest) + self.mox.ResetAll() + + def test_live_migration_instance_has_volume(self): + """Simpler version than test_live_migration_instance_has_volume.""" + instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], + 'hostname':'i-000000001'} + c = context.get_admin_context() + dest='dummydest' + i_id = instance_ref['id'] + + self._setup_other_managers() + dbmock = self.mox.CreateMock(db) + drivermock = self.mox.CreateMock(self.compute_driver) + + dbmock.instance_get(c, i_id).AndReturn(instance_ref) + self.mox.StubOutWithMock(rpc, 'call') + rpc.call(c, FLAGS.volume_topic, + {"method": "check_for_export", + "args": {'instance_id': i_id}}).InAnyOrder('g1') + compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest) + dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic) + rpc.call(c, compute_topic, + {"method": "pre_live_migration", + "args": {'instance_id': i_id}}).InAnyOrder('g1') + drivermock.live_migration(c, instance_ref, dest) + + self.compute.db = dbmock + self.compute.driver = drivermock + self.mox.ReplayAll() + ret = self.compute.live_migration(c, i_id, dest) + self.assertEqual(ret, None) + self.mox.ResetAll() diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 9d458244..c62bca9b 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -20,10 +20,12 @@ Tests For Scheduler """ import datetime +import mox from mox import IgnoreArg from nova import context from nova import db +from nova import exception from nova import flags from nova import service from nova import test @@ -32,6 +34,8 @@ from nova import utils from nova.auth import manager as auth_manager from nova.scheduler import manager from nova.scheduler import driver +from nova.compute import power_state +from nova.db.sqlalchemy import models FLAGS = flags.FLAGS @@ -75,7 +79,102 @@ class SchedulerTestCase(test.TestCase): 'args': {'num': 7}}) self.mox.ReplayAll() scheduler.named_method(ctxt, 'topic', num=7) + + def test_show_host_resource_host_not_exit(self): + """ + A testcase of driver.has_enough_resource + given host does not exists. + """ + scheduler = manager.SchedulerManager() + dest = 'dummydest' + ctxt = context.get_admin_context() + self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True) + manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\ + AndReturn([]) + + self.mox.ReplayAll() + result = scheduler.show_host_resource(ctxt, dest) + # ret should be dict + keys = ['ret', 'msg'] + c1 = list(set(result.keys())) == list(set(keys)) + c2 = not result['ret'] + c3 = result['msg'].find('No such Host or not compute node') <= 0 + self.assertTrue( c1 and c2 and c3) + self.mox.UnsetStubs() + + def test_show_host_resource_no_project(self): + """ + A testcase of driver.show_host_resource + no instance stays on the given host + """ + scheduler = manager.SchedulerManager() + dest = 'dummydest' + ctxt = context.get_admin_context() + r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100} + service_ref = {'id':1, 'host':dest} + service_ref.update(r0) + + self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True) + manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\ + AndReturn([(service_ref, 0)]) + manager.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([]) + + self.mox.ReplayAll() + result = scheduler.show_host_resource(ctxt, dest) + # ret should be dict + keys = ['ret', 'phy_resource', 'usage'] + c1 = list(set(result.keys())) == list(set(keys)) + c2 = result['ret'] + c3 = result['phy_resource'] == r0 + c4 = result['usage'] == {} + self.assertTrue( c1 and c2 and c3 and c4) + self.mox.UnsetStubs() + + def test_show_host_resource_works_correctly(self): + """ + A testcase of driver.show_host_resource + to make sure everything finished with no error. + """ + scheduler = manager.SchedulerManager() + dest = 'dummydest' + ctxt = context.get_admin_context() + r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100} + r1 = {'vcpus':10, 'memory_mb':4, 'local_gb':20} + r2 = {'vcpus':10, 'memory_mb':20, 'local_gb':30} + service_ref = {'id':1, 'host':dest} + service_ref.update(r0) + instance_ref2 = {'id':2, 'project_id':'p-01', 'host':'dummy'} + instance_ref2.update(r1) + instance_ref3 = {'id':3, 'project_id':'p-02', 'host':'dummy'} + instance_ref3.update(r1) + + self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True) + manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\ + AndReturn([(service_ref, 0)]) + manager.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([instance_ref2, instance_ref3]) + for p in ['p-01', 'p-02']: + manager.db.instance_get_vcpu_sum_by_host_and_project( + ctxt, dest, p).AndReturn(r2['vcpus']) + manager.db.instance_get_memory_sum_by_host_and_project( + ctxt, dest, p).AndReturn(r2['memory_mb']) + manager.db.instance_get_disk_sum_by_host_and_project( + ctxt, dest, p).AndReturn(r2['local_gb']) + + self.mox.ReplayAll() + result = scheduler.show_host_resource(ctxt, dest) + # ret should be dict + keys = ['ret', 'phy_resource', 'usage'] + c1 = list(set(result.keys())) == list(set(keys)) + c2 = result['ret'] + c3 = result['phy_resource'] == r0 + c4 = result['usage'].keys() == ['p-01', 'p-02'] + c5 = result['usage']['p-01'] == r2 + c6 = result['usage']['p-02'] == r2 + self.assertTrue( c1 and c2 and c3 and c4 and c5 and c6) + self.mox.UnsetStubs() class ZoneSchedulerTestCase(test.TestCase): """Test case for zone scheduler""" @@ -384,3 +483,626 @@ class SimpleDriverTestCase(test.TestCase): volume2.delete_volume(self.context, volume_id) volume1.kill() volume2.kill() + + def test_scheduler_live_migraiton_with_volume(self): + """ + driver.scheduler_live_migration finishes successfully + (volumes are attached to instances) + This testcase make sure schedule_live_migration + changes instance state from 'running' -> 'migrating' + """ + driver_i = self.scheduler.driver + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-00000001', 'host':'dummy', + 'volumes':[{'id':1}, {'id':2}]} + dest = 'dummydest' + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + # must be IgnoreArg() because scheduler changes ctxt's memory address + driver.db.instance_get(mox.IgnoreArg(), i_ref['id']).AndReturn(i_ref) + + self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') + driver_i._live_migration_src_check(mox.IgnoreArg(), i_ref) + self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') + driver_i._live_migration_dest_check(mox.IgnoreArg(), i_ref, dest) + self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') + driver_i._live_migration_common_check(mox.IgnoreArg(), i_ref, dest) + driver.db.instance_set_state(mox.IgnoreArg(), i_ref['id'], + power_state.PAUSED, 'migrating') + for v in i_ref['volumes']: + driver.db.volume_update(mox.IgnoreArg(), v['id'], + {'status': 'migrating'}) + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + kwargs={'instance_id':i_ref['id'], 'dest':dest} + rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']), + {"method": 'live_migration', + "args": kwargs}) + + self.mox.ReplayAll() + self.scheduler.live_migration(ctxt, topic, + instance_id=i_ref['id'], dest=dest) + self.mox.UnsetStubs() + + def test_scheduler_live_migraiton_no_volume(self): + """ + driver.scheduler_live_migration finishes successfully + (volumes are attached to instances) + This testcase make sure schedule_live_migration + changes instance state from 'running' -> 'migrating' + """ + driver_i = self.scheduler.driver + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[]} + dest = 'dummydest' + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + # must be IgnoreArg() because scheduler changes ctxt's memory address + driver.db.instance_get(mox.IgnoreArg(), i_ref['id']).AndReturn(i_ref) + self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') + driver_i._live_migration_src_check(mox.IgnoreArg(), i_ref) + self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') + driver_i._live_migration_dest_check(mox.IgnoreArg(), i_ref, dest) + self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') + driver_i._live_migration_common_check(mox.IgnoreArg(), i_ref, dest) + driver.db.instance_set_state(mox.IgnoreArg(), i_ref['id'], + power_state.PAUSED, 'migrating') + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + kwargs={'instance_id':i_ref['id'], 'dest':dest} + rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']), + {"method": 'live_migration', + "args": kwargs}) + + self.mox.ReplayAll() + self.scheduler.live_migration(ctxt, topic, + instance_id=i_ref['id'], dest=dest) + self.mox.UnsetStubs() + + def test_live_migraiton_src_check_instance_not_running(self): + """ + A testcase of driver._live_migration_src_check. + The instance given by instance_id is not running. + """ + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + dest = 'dummydest' + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'volumes':[], 'state_description':'migrating', + 'state':power_state.RUNNING} + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_src_check(ctxt, i_ref) + except exception.Invalid, e: + self.assertTrue(e.message.find('is not running') > 0) + self.mox.UnsetStubs() + + def test_live_migraiton_src_check_volume_node_not_alive(self): + """ + A testcase of driver._live_migration_src_check. + Volume node is not alive if any volumes are attached to + the given instance. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'volumes':[{'id':1}, {'id':2}], + 'state_description':'running', 'state':power_state.RUNNING} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'volume').\ + AndReturn([]) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_src_check(ctxt, i_ref) + except exception.Invalid, e: + self.assertTrue(e.message.find('volume node is not alive') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_src_check_volume_node_not_alive(self): + """ + A testcase of driver._live_migration_src_check. + The testcase make sure src-compute node is alive. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[], + 'state_description':'running', 'state':power_state.RUNNING} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'compute').\ + AndReturn([]) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_src_check(ctxt, i_ref) + except exception.Invalid, e: + self.assertTrue(e.message.find('is not alive') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_src_check_works_correctly(self): + """ + A testcase of driver._live_migration_src_check. + The testcase make sure everything finished with no error. + """ + driver_i = self.scheduler.driver + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[], + 'state_description':'running', 'state':power_state.RUNNING} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('host', i_ref['host']) + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'compute').\ + AndReturn([service_ref]) + self.mox.StubOutWithMock(driver_i, 'service_is_up') + driver_i.service_is_up(service_ref).AndReturn(True) + + self.mox.ReplayAll() + ret = driver_i._live_migration_src_check(ctxt, i_ref) + self.assertTrue(ret == None) + self.mox.UnsetStubs() + + def test_live_migraiton_dest_check_service_not_exists(self): + """ + A testcase of driver._live_migration_dst_check. + Destination host does not exist. + """ + driver_i = self.scheduler.driver + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('host', i_ref['host']) + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([]) + + self.mox.ReplayAll() + try: + driver_i._live_migration_dest_check(ctxt, i_ref, dest) + except exception.Invalid, e: + self.assertTrue(e.message.find('does not exists') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_dest_check_service_isnot_compute(self): + """ + A testcase of driver._live_migration_dst_check. + Destination host does not provide compute. + """ + driver_i = self.scheduler.driver + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('host', i_ref['host']) + service_ref.__setitem__('topic', 'api') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + + self.mox.ReplayAll() + try: + driver_i._live_migration_dest_check(ctxt, i_ref, dest) + except exception.Invalid, e: + self.assertTrue(e.message.find('must be compute node') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_dest_check_service_not_alive(self): + """ + A testcase of driver._live_migration_dst_check. + Destination host compute service is not alive. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('host', i_ref['host']) + service_ref.__setitem__('topic', 'compute') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up') + self.scheduler.driver.service_is_up(service_ref).AndReturn(False) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest) + except exception.Invalid, e: + self.assertTrue(e.message.find('is not alive') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_dest_check_service_same_host(self): + """ + A testcase of driver._live_migration_dst_check. + Destination host is same as src host. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('host', i_ref['host']) + service_ref.__setitem__('topic', 'compute') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up') + self.scheduler.driver.service_is_up(service_ref).AndReturn(True) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest) + except exception.Invalid, e: + self.assertTrue(e.message.find('is running now. choose other host') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_dest_check_service_works_correctly(self): + """ + A testcase of driver._live_migration_dst_check. + The testcase make sure everything finished with no error. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('host', i_ref['host']) + service_ref.__setitem__('topic', 'compute') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up') + self.scheduler.driver.service_is_up(service_ref).AndReturn(True) + self.mox.StubOutWithMock(self.scheduler.driver, 'has_enough_resource') + self.scheduler.driver.has_enough_resource(mox.IgnoreArg(), i_ref, dest) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest) + except exception.Invalid, e: + self.assertTrue(e.message.find('is running now. choose other host') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_common_check_service_dest_not_exists(self): + """ + A testcase of driver._live_migration_common_check. + Destination host does not exist. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([]) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(ctxt, + i_ref, + dest) + except exception.Invalid, e: + self.assertTrue(e.message.find('does not exists') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_common_check_service_orig_not_exists(self): + """ + A testcase of driver._live_migration_common_check. + Original host(an instance launched on) does not exist. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'launched_on':'h1'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('topic', 'compute') + service_ref.__setitem__('host', i_ref['host']) + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.service_get_all_by_host(mox.IgnoreArg(), + i_ref['launched_on']).\ + AndReturn([]) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(ctxt, + i_ref, + dest) + except exception.Invalid, e: + msg = 'where instance was launched at) does not exists' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_common_check_service_different_hypervisor(self): + """ + A testcase of driver._live_migration_common_check. + Original host and dest host has different hypervisor type. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', + 'host':'dummy', 'launched_on':'h1'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('topic', 'compute') + service_ref.__setitem__('hypervisor_type', 'kvm') + service_ref2 = models.Service() + service_ref2.__setitem__('id', 2) + service_ref2.__setitem__('hypervisor_type', 'xen') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.service_get_all_by_host(mox.IgnoreArg(), + i_ref['launched_on']).\ + AndReturn([service_ref2]) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(ctxt, + i_ref, + dest) + except exception.Invalid, e: + msg = 'Different hypervisor type' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_common_check_service_different_version(self): + """ + A testcase of driver._live_migration_common_check. + Original host and dest host has different hypervisor version. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', + 'host':'dummy', 'launched_on':'h1'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('topic', 'compute') + service_ref.__setitem__('hypervisor_version', 12000) + service_ref2 = models.Service() + service_ref2.__setitem__('id', 2) + service_ref2.__setitem__('hypervisor_version', 12001) + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.service_get_all_by_host(mox.IgnoreArg(), + i_ref['launched_on']).\ + AndReturn([service_ref2]) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(ctxt, + i_ref, + dest) + except exception.Invalid, e: + msg = 'Older hypervisor version' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_common_check_service_checking_cpuinfo_fail(self): + """ + A testcase of driver._live_migration_common_check. + Original host and dest host has different hypervisor version. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', + 'host':'dummy', 'launched_on':'h1'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('topic', 'compute') + service_ref.__setitem__('hypervisor_version', 12000) + service_ref2 = models.Service() + service_ref2.__setitem__('id', 2) + service_ref2.__setitem__('hypervisor_version', 12000) + service_ref2.__setitem__('cpuinfo', 'info') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.service_get_all_by_host(mox.IgnoreArg(), + i_ref['launched_on']).\ + AndReturn([service_ref2]) + driver.db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest) + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + driver.rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), + {"method": 'compare_cpu', + "args": {'cpu_info': service_ref2['cpu_info']}}).\ + AndRaise(rpc.RemoteError('doesnt have compatibility to', '', '')) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(ctxt, + i_ref, + dest) + except rpc.RemoteError, e: + msg = 'doesnt have compatibility to' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_common_check_service_works_correctly(self): + """ + A testcase of driver._live_migration_common_check. + The testcase make sure everything finished with no error. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', + 'host':'dummy', 'launched_on':'h1'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('topic', 'compute') + service_ref.__setitem__('hypervisor_version', 12000) + service_ref2 = models.Service() + service_ref2.__setitem__('id', 2) + service_ref2.__setitem__('hypervisor_version', 12000) + service_ref2.__setitem__('cpuinfo', 'info') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.service_get_all_by_host(mox.IgnoreArg(), + i_ref['launched_on']).\ + AndReturn([service_ref2]) + driver.db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest) + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + driver.rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), + {"method": 'compare_cpu', + "args": {'cpu_info': service_ref2['cpu_info']}}) + + self.mox.ReplayAll() + ret = self.scheduler.driver._live_migration_common_check(ctxt, + i_ref, + dest) + self.assertTrue(ret == None) + self.mox.UnsetStubs() + + def test_has_enough_resource_lack_resource_vcpu(self): + """ + A testcase of driver.has_enough_resource. + Lack of vcpu.(boundary check) + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} + i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'vcpus':6, 'memory_mb':8, 'local_gb':10} + i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([i_ref2, i_ref3]) + + self.mox.ReplayAll() + try: + self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) + except exception.NotEmpty, e: + msg = 'is not capable to migrate' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + + def test_has_enough_resource_lack_resource_memory(self): + """ + A testcase of driver.has_enough_resource. + Lack of memory_mb.(boundary check) + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} + i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':16, 'local_gb':10} + i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([i_ref2, i_ref3]) + + self.mox.ReplayAll() + try: + self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) + except exception.NotEmpty, e: + msg = 'is not capable to migrate' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + self.mox.UnsetStubs() + + def test_has_enough_resource_lack_resource_disk(self): + """ + A testcase of driver.has_enough_resource. + Lack of local_gb.(boundary check) + """ + scheduler = manager.SchedulerManager() + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} + i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':80} + i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([i_ref2, i_ref3]) + + self.mox.ReplayAll() + try: + self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) + except exception.NotEmpty, e: + msg = 'is not capable to migrate' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + + def test_has_enough_resource_works_correctly(self): + """ + A testcase of driver.has_enough_resource + to make sure everything finished with no error. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} + i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([i_ref2, i_ref3]) + + self.mox.ReplayAll() + ret = self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) + self.assertTrue(ret == None) + self.mox.UnsetStubs() diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index afdc89ba..177e8f02 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -14,21 +14,29 @@ # License for the specific language governing permissions and limitations # under the License. +import mox + from xml.etree.ElementTree import fromstring as xml_to_tree from xml.dom.minidom import parseString as xml_to_dom from nova import context from nova import db +from nova import exception from nova import flags from nova import test +from nova import logging from nova import utils from nova.api.ec2 import cloud from nova.auth import manager +from nova.db.sqlalchemy import models +from nova.compute import power_state from nova.virt import libvirt_conn FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') +libvirt = None +libxml2 = None class LibvirtConnTestCase(test.TestCase): def setUp(self): @@ -52,6 +60,38 @@ class LibvirtConnTestCase(test.TestCase): 'bridge': 'br101', 'instance_type': 'm1.small'} + def _driver_dependent_test_setup(self): + """ + Setup method. + Call this method at the top of each testcase method, + if the testcase is necessary libvirt and cheetah. + """ + try : + global libvirt + global libxml2 + libvirt_conn.libvirt = __import__('libvirt') + libvirt_conn.libxml2 = __import__('libxml2') + libvirt_conn._late_load_cheetah() + libvirt = __import__('libvirt') + except ImportError, e: + logging.warn("""This test has not been done since """ + """using driver-dependent library Cheetah/libvirt/libxml2.""") + raise e + + # inebitable mocks for calling + #nova.virt.libvirt_conn.LibvirtConnection.__init__ + nwmock = self.mox.CreateMock(libvirt_conn.NWFilterFirewall) + self.mox.StubOutWithMock(libvirt_conn, 'NWFilterFirewall', + use_mock_anything=True) + libvirt_conn.NWFilterFirewall(mox.IgnoreArg()).AndReturn(nwmock) + + obj = utils.import_object(FLAGS.firewall_driver) + fwmock = self.mox.CreateMock(obj) + self.mox.StubOutWithMock(libvirt_conn, 'utils', + use_mock_anything=True) + libvirt_conn.utils.import_object(FLAGS.firewall_driver).AndReturn(fwmock) + return nwmock, fwmock + def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, @@ -188,9 +228,8 @@ class LibvirtConnTestCase(test.TestCase): expected_result, '%s failed common check %d' % (xml, i)) - # This test is supposed to make sure we don't override a specifically - # set uri - # + # This test is supposed to make sure we don't override a specifically set uri + # # Deliberately not just assigning this string to FLAGS.libvirt_uri and # checking against that later on. This way we make sure the # implementation doesn't fiddle around with the FLAGS. @@ -202,6 +241,480 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, testuri) + def test_get_memory_mb(self): + """ + Check if get_memory_mb returns memory value + Connection/OS/driver differenct does not matter for this method, + so everyone can execute for checking. + """ + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertTrue(0 < conn.get_memory_mb()) + self.mox.UnsetStubs() + + def test_get_cpu_info_works_correctly(self): + """ + Check if get_cpu_info works correctly. + (in case libvirt.getCapabilities() works correctly) + """ + xml=("""x86_64Nehalem""" + """Intel""" + """""" + """""" + """""" + """""" + """""" + """""" + """""") + + try: + self._driver_dependent_test_setup() + except: + return + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertTrue(0 < len(conn.get_cpu_info())) + self.mox.UnsetStubs() + + def test_get_cpu_info_inappropreate_xml(self): + """ + Check if get_cpu_info raises exception + in case libvirt.getCapabilities() returns wrong xml + (in case of xml doesnt have tag) + """ + xml=("""x86_64Nehalem""" + """Intel""" + """""" + """""" + """""" + """""" + """""" + """""" + """""") + + try: + self._driver_dependent_test_setup() + except: + return + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + try: + conn.get_cpu_info() + except exception.Invalid, e: + c1 = ( 0 <= e.message.find('Invalid xml') ) + self.assertTrue(c1) + self.mox.UnsetStubs() + + def test_get_cpu_info_inappropreate_xml2(self): + """ + Check if get_cpu_info raises exception + in case libvirt.getCapabilities() returns wrong xml + (in case of xml doesnt have inproper tag + meaning missing "socket" attribute) + """ + xml=("""x86_64Nehalem""" + """Intel""" + """""" + """""" + """""" + """""" + """""" + """""" + """""") + + try: + self._driver_dependent_test_setup() + except: + return + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + try: + conn.get_cpu_info() + except exception.Invalid, e: + c1 = ( 0 <= e.message.find('Invalid xml: topology') ) + self.assertTrue(c1) + self.mox.UnsetStubs() + + def test_compare_cpu_works_correctly(self): + """Calling libvirt.compute_cpu() and works correctly """ + + t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ + """"topology":{"cores":"%s", "threads":"%s", """ + """"sockets":"%s"}, "features":[%s]}""") + cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') + + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).AndReturn(1) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertTrue( None== conn.compare_cpu(cpu_info)) + self.mox.UnsetStubs() + + def test_compare_cpu_raises_exception(self): + """ + Libvirt-related exception occurs when calling + libvirt.compare_cpu(). + """ + t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ + """"topology":{"cores":"%s", "threads":"%s", """ + """"sockets":"%s"}, "features":[%s]}""") + cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') + + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', + use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\ + AndRaise(libvirt.libvirtError('ERR')) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertRaises(libvirt.libvirtError, conn.compare_cpu, cpu_info) + self.mox.UnsetStubs() + + def test_compare_cpu_no_compatibility(self): + """libvirt.compare_cpu() return less than 0.(no compatibility)""" + + t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ + """"topology":{"cores":"%s", "threads":"%s", """ + """"sockets":"%s"}, "features":[%s]}""") + cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') + + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', + use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\ + AndRaise(exception.Invalid('ERR')) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertRaises(exception.Invalid, conn.compare_cpu, cpu_info) + self.mox.UnsetStubs() + + def test_ensure_filtering_rules_for_instance_works_correctly(self): + """ensure_filtering_rules_for_instance works as expected correctly""" + + instance_ref = models.Instance() + instance_ref.__setitem__('id', 1) + + try: + nwmock, fwmock = self._driver_dependent_test_setup() + except: + return + + nwmock.setup_basic_filtering(mox.IgnoreArg()) + fwmock.prepare_instance_filter(instance_ref) + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', + use_mock_anything=True) + n = 'nova-instance-%s' % instance_ref.name + libvirt_conn.LibvirtConnection._conn.nwfilterLookupByName(n) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + conn.ensure_filtering_rules_for_instance(instance_ref) + self.mox.UnsetStubs() + + def test_ensure_filtering_rules_for_instance_timeout(self): + """ensure_filtering_fules_for_instance finishes with timeout""" + + instance_ref = models.Instance() + instance_ref.__setitem__('id', 1) + + try: + nwmock, fwmock = self._driver_dependent_test_setup() + except: + return + + nwmock.setup_basic_filtering(mox.IgnoreArg()) + fwmock.prepare_instance_filter(instance_ref) + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', + use_mock_anything=True) + n = 'nova-instance-%s' % instance_ref.name + for i in range(FLAGS.live_migration_timeout_sec * 2): + libvirt_conn.LibvirtConnection._conn.\ + nwfilterLookupByName(n).AndRaise(libvirt.libvirtError('ERR')) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + try: + conn.ensure_filtering_rules_for_instance(instance_ref) + except exception.Error, e: + c1 = ( 0<=e.message.find('Timeout migrating for')) + self.assertTrue(c1) + self.mox.UnsetStubs() + + def test_live_migration_works_correctly(self): + """_live_migration works as expected correctly """ + + class dummyCall(object): + f = None + def start(self, interval=0, now=False): + pass + + instance_ref = models.Instance() + instance_ref.__setitem__('id', 1) + dest = 'desthost' + ctxt = context.get_admin_context() + + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', + use_mock_anything=True) + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "migrateToURI", + use_mock_anything=True) + vdmock.migrateToURI(FLAGS.live_migration_uri % dest, mox.IgnoreArg(), + None, FLAGS.live_migration_bandwidth).\ + AndReturn(None) + libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\ + AndReturn(vdmock) + # below description is also ok. + #self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection._conn, + # "lookupByName", use_mock_anything=True) + + libvirt_conn.utils.LoopingCall(f=None).AndReturn(dummyCall()) + + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + ret = conn._live_migration(ctxt, instance_ref, dest) + self.assertTrue(ret == None) + self.mox.UnsetStubs() + + def test_live_migration_raises_exception(self): + """ + _live_migration raises exception, then this testcase confirms + state_description/state for the instances/volumes are recovered. + """ + class Instance(models.NovaBase): + id = 0 + volumes = None + name = 'name' + + ctxt = context.get_admin_context() + dest = 'desthost' + instance_ref = Instance() + instance_ref.__setitem__('id', 1) + instance_ref.__setitem__('volumes', [{'id':1}, {'id':2}]) + + try: + nwmock, fwmock = self._driver_dependent_test_setup() + except: + return + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', + use_mock_anything=True) + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "migrateToURI", + use_mock_anything=True) + vdmock.migrateToURI(FLAGS.live_migration_uri % dest, mox.IgnoreArg(), + None, FLAGS.live_migration_bandwidth).\ + AndRaise(libvirt.libvirtError('ERR')) + libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\ + AndReturn(vdmock) + self.mox.StubOutWithMock(db, 'instance_set_state') + db.instance_set_state(ctxt, instance_ref['id'], + power_state.RUNNING, 'running') + self.mox.StubOutWithMock(db, 'volume_update') + for v in instance_ref.volumes: + db.volume_update(ctxt, v['id'], {'status': 'in-use'}).\ + InAnyOrder('g1') + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertRaises(libvirt.libvirtError, + conn._live_migration, + ctxt, instance_ref, dest) + self.mox.UnsetStubs() + + def test_post_live_migration_working_correctly(self): + """_post_live_migration works as expected correctly """ + + dest = 'dummydest' + ctxt = context.get_admin_context() + instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest, + 'fixed_ip':'dummyip', 'floating_ip':'dummyflip', + 'volumes':[{'id':1}, {'id':2} ]} + network_ref = {'id':1, 'host':dest} + floating_ip_ref = {'id':1, 'address':'1.1.1.1'} + + try: + nwmock, fwmock = self._driver_dependent_test_setup() + except: + return + fwmock.unfilter_instance(instance_ref) + + fixed_ip = instance_ref['fixed_ip'] + self.mox.StubOutWithMock(db, 'instance_get_fixed_address') + db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip) + self.mox.StubOutWithMock(db, 'fixed_ip_update') + db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) + self.mox.StubOutWithMock(db, 'fixed_ip_get_network') + db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref) + self.mox.StubOutWithMock(db, 'network_update') + db.network_update(ctxt, network_ref['id'], {'host': dest}) + + fl_ip = instance_ref['floating_ip'] + self.mox.StubOutWithMock(db, 'instance_get_floating_address') + db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(fl_ip) + self.mox.StubOutWithMock(db, 'floating_ip_get_by_address') + db.floating_ip_get_by_address(ctxt, instance_ref['floating_ip']).\ + AndReturn(floating_ip_ref) + self.mox.StubOutWithMock(db, 'floating_ip_update') + db.floating_ip_update(ctxt, floating_ip_ref['address'], {'host': dest}) + + self.mox.StubOutWithMock(db, 'instance_update') + db.instance_update(ctxt, instance_ref['id'], + {'state_description': 'running', + 'state': power_state.RUNNING, 'host': dest}) + self.mox.StubOutWithMock(db, 'volume_update') + for v in instance_ref['volumes']: + db.volume_update(ctxt, v['id'], {'status': 'in-use'}) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + conn._post_live_migration( ctxt, instance_ref, dest) + self.mox.UnsetStubs() + + def test_post_live_migration_no_floating_ip(self): + """ + _post_live_migration works as expected correctly + (in case instance doesnt have floaitng ip) + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest, + 'fixed_ip':'dummyip', 'floating_ip':'dummyflip', + 'volumes':[{'id':1}, {'id':2} ]} + network_ref = {'id':1, 'host':dest} + floating_ip_ref = {'id':1, 'address':'1.1.1.1'} + + try: + nwmock, fwmock = self._driver_dependent_test_setup() + except: + return + fwmock.unfilter_instance(instance_ref) + + fixed_ip = instance_ref['fixed_ip'] + self.mox.StubOutWithMock(db, 'instance_get_fixed_address') + db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip) + self.mox.StubOutWithMock(db, 'fixed_ip_update') + db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) + self.mox.StubOutWithMock(db, 'fixed_ip_get_network') + db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref) + self.mox.StubOutWithMock(db, 'network_update') + db.network_update(ctxt, network_ref['id'], {'host': dest}) + + self.mox.StubOutWithMock(db, 'instance_get_floating_address') + db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(None) + self.mox.StubOutWithMock(libvirt_conn.LOG, 'info') + libvirt_conn.LOG.info(_('post livemigration operation is started..')) + libvirt_conn.LOG.info(_('floating_ip is not found for %s'), + instance_ref['hostname']) + # Checking last messages are ignored. may be no need to check so strictly? + libvirt_conn.LOG.info(mox.IgnoreArg()) + libvirt_conn.LOG.info(mox.IgnoreArg()) + + self.mox.StubOutWithMock(db, 'instance_update') + db.instance_update(ctxt, instance_ref['id'], + {'state_description': 'running', + 'state': power_state.RUNNING, + 'host': dest}) + self.mox.StubOutWithMock(db, 'volume_update') + for v in instance_ref['volumes']: + db.volume_update(ctxt, v['id'], {'status': 'in-use'}) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + conn._post_live_migration( ctxt, instance_ref, dest) + self.mox.UnsetStubs() + + def test_post_live_migration_no_floating_ip_with_exception(self): + """ + _post_live_migration works as expected correctly + (in case instance doesnt have floaitng ip, and raise exception) + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest, + 'fixed_ip':'dummyip', 'floating_ip':'dummyflip', + 'volumes':[{'id':1}, {'id':2} ]} + network_ref = {'id':1, 'host':dest} + floating_ip_ref = {'id':1, 'address':'1.1.1.1'} + + try: + nwmock, fwmock = self._driver_dependent_test_setup() + except: + return + fwmock.unfilter_instance(instance_ref) + + fixed_ip = instance_ref['fixed_ip'] + self.mox.StubOutWithMock(db, 'instance_get_fixed_address') + db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip) + self.mox.StubOutWithMock(db, 'fixed_ip_update') + db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) + self.mox.StubOutWithMock(db, 'fixed_ip_get_network') + db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref) + self.mox.StubOutWithMock(db, 'network_update') + db.network_update(ctxt, network_ref['id'], {'host': dest}) + + self.mox.StubOutWithMock(db, 'instance_get_floating_address') + db.instance_get_floating_address(ctxt, instance_ref['id']).\ + AndRaise(exception.NotFound()) + self.mox.StubOutWithMock(libvirt_conn.LOG, 'info') + libvirt_conn.LOG.info(_('post livemigration operation is started..')) + libvirt_conn.LOG.info(_('floating_ip is not found for %s'), + instance_ref['hostname']) + # the last message is ignored. may be no need to check so strictly? + libvirt_conn.LOG.info(mox.IgnoreArg()) + libvirt_conn.LOG.info(mox.IgnoreArg()) + + self.mox.StubOutWithMock(db, 'instance_update') + db.instance_update(ctxt, instance_ref['id'], + {'state_description': 'running', + 'state': power_state.RUNNING, 'host': dest}) + self.mox.StubOutWithMock(db, 'volume_update') + for v in instance_ref['volumes']: + db.volume_update(ctxt, v['id'], {'status': 'in-use'}) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + conn._post_live_migration( ctxt, instance_ref, dest) + self.mox.UnsetStubs() + def tearDown(self): super(LibvirtConnTestCase, self).tearDown() self.manager.delete_project(self.project) @@ -475,3 +988,4 @@ class NWFilterTestCase(test.TestCase): self.fw.prepare_instance_filter(instance) _ensure_all_called() self.teardown_security_group() + From 6c30ae7611ee807f02e110e2fd9743b4786e205a Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 31 Jan 2011 18:41:10 +0900 Subject: [PATCH 02/18] 1. Discard nova-manage host list Reason: nova-manage service list can be replacement. Changes: nova-manage 2. Fix resource checking inappropriate design. Reason: nova.scheduler.driver.has_enough_resource has inappropriate design, so fix it. This method didnt check free memory but check total memory. We need to register free memory onto databases(periodically). But periodically updating may causes flooding request to db in case of many compute-node. Currently, since memory information is only used in this feature, we take the choice that administrators manually has to execute nova-manage to let compute node update their own memory information. Changes: nova.db.sqlalchemy.models - Adding memory_mb_used, local_gb_used, vcpu_used column to Service. (local_gb and vcpu is just for reference to admins for now) nova.compute.manager - Changing nova.compute.manager.update_service Service table column is changed, so updating method must be changed. - Adding nova.compute.manager.update_available_resource a responder to admin's request to let compute nodes update their memory infomation nova.virt.libvirt_conn nova.virt.xenapi_conn nova.virt.fake - Adding getter method for memory_mb_used/local_gb_used/vcpu_used. nova-manage - request method to let compute nodes update their own memory info. --- bin/nova-manage | 92 ++++++++-------- nova/rpc.py | 3 + nova/tests/test_compute.py | 55 ++++++++-- nova/tests/test_scheduler.py | 203 ++++++++++++++++++++--------------- nova/tests/test_virt.py | 86 ++++++++++++++- 5 files changed, 300 insertions(+), 139 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 1ad3120b..2831e273 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -499,50 +499,6 @@ class InstanceCommands(object): print msg -class HostCommands(object): - """Class for mangaging host(physical nodes).""" - - def list(self): - """describe host list.""" - - # To supress msg: No handlers could be found for logger "amqplib" - logging.basicConfig() - - service_refs = db.service_get_all(context.get_admin_context()) - hosts = [h['host'] for h in service_refs] - hosts = list(set(hosts)) - for host in hosts: - print host - - def show(self, host): - """describe cpu/memory/hdd info for host.""" - - result = rpc.call(context.get_admin_context(), - FLAGS.scheduler_topic, - {"method": "show_host_resource", - "args": {"host": host}}) - - # Checking result msg format is necessary, that will have done - # when this feture is included in API. - if type(result) != dict: - print 'Unexpected error occurs' - elif not result['ret']: - print '%s' % result['msg'] - else: - cpu = result['phy_resource']['vcpus'] - mem = result['phy_resource']['memory_mb'] - hdd = result['phy_resource']['local_gb'] - - print 'HOST\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)' - print '%s\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd) - for p_id, val in result['usage'].items(): - print '%s\t%s\t\t%s\t%s\t%s' % (host, - p_id, - val['vcpus'], - val['memory_mb'], - val['local_gb']) - - class ServiceCommands(object): """Enable and disable running services""" @@ -587,6 +543,53 @@ class ServiceCommands(object): return db.service_update(ctxt, svc['id'], {'disabled': True}) + def describeresource(self, host): + """describe cpu/memory/hdd info for host.""" + + result = rpc.call(context.get_admin_context(), + FLAGS.scheduler_topic, + {"method": "show_host_resource", + "args": {"host": host}}) + + # Checking result msg format is necessary, that will have done + # when this feture is included in API. + if type(result) != dict: + print 'Unexpected error occurs' + elif not result['ret']: + print '%s' % result['msg'] + else: + cpu = result['phy_resource']['vcpus'] + mem = result['phy_resource']['memory_mb'] + hdd = result['phy_resource']['local_gb'] + cpu_u = result['phy_resource']['vcpus_used'] + mem_u = result['phy_resource']['memory_mb_used'] + hdd_u = result['phy_resource']['local_gb_used'] + + print 'HOST\t\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)' + print '%s(total)\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd) + print '%s(used)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u) + for p_id, val in result['usage'].items(): + print '%s\t\t%s\t\t%s\t%s\t%s' % (host, + p_id, + val['vcpus'], + val['memory_mb'], + val['local_gb']) + + def updateresource(self, host): + """update available vcpu/memory/disk info for host.""" + + ctxt = context.get_admin_context() + service_refs = db.service_get_all_by_host(ctxt, host) + if len(service_refs) <= 0: + raise exception.Invalid(_('%s does not exists.') % host) + + service_refs = [s for s in service_refs if s['topic'] == 'compute'] + if len(service_refs) <= 0: + raise exception.Invalid(_('%s is not compute node.') % host) + + result = rpc.call(ctxt, db.queue_get_for(ctxt, FLAGS.compute_topic, host), + {"method": "update_available_resource"}) + class LogCommands(object): def request(self, request_id, logfile='/var/log/nova.log'): @@ -606,7 +609,6 @@ CATEGORIES = [ ('floating', FloatingIpCommands), ('network', NetworkCommands), ('instance', InstanceCommands), - ('host', HostCommands), ('service', ServiceCommands), ('log', LogCommands)] diff --git a/nova/rpc.py b/nova/rpc.py index 49b11602..cf400407 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -250,6 +250,9 @@ def msg_reply(msg_id, reply=None, failure=None): try: publisher.send({'result': reply, 'failure': failure}) except TypeError: + print '>>>>>>>>>>>>>>>>>>' + print reply + print '>>>>>>>>>>>>>>>>>>' publisher.send( {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 344c2d2b..8d3ac315 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -268,7 +268,8 @@ class ComputeTestCase(test.TestCase): """ def dic_key_check(dic): - validkey = ['vcpus', 'memory_mb', 'local_gb', + validkey = ['vcpus', 'memory_mb', 'local_gb', + 'vcpus_used', 'memory_mb_used', 'local_gb_used', 'hypervisor_type', 'hypervisor_version', 'cpu_info'] return (list(set(validkey)) == list(set(dic.keys()))) @@ -286,13 +287,55 @@ class ComputeTestCase(test.TestCase): self.compute.db = dbmock self.mox.ReplayAll() + self.compute.update_service('dummy', host, binary) + self.mox.ResetAll() + + def test_update_available_resource_exception(self): + """a testcase of update_available_resource raises exception""" + host = 'foo' + binary = 'nova-compute' + ctxt = context.get_admin_context() + dbmock = self.mox.CreateMock(db) + dbmock.service_get_by_args(mox.IgnoreArg(), + mox.StrContains(host), + mox.StrContains(binary)).\ + AndRaise(exception.NotFound()) + self.compute.db = dbmock + self.compute.host = host + self.mox.ReplayAll() try: - self.compute.update_service('dummy', host, binary) + self.compute.update_available_resource(ctxt) except exception.Invalid, e: - msg = 'Cannot insert compute manager specific info' + msg = 'Cannot update resource info.' c1 = ( 0 <= e.message.find(msg)) self.assertTrue(c1) - self.mox.ResetAll() + self.mox.UnsetStubs() + + def test_update_available_resource_success(self): + """a testcase of update_available_resource finishes with no errors""" + + def dic_key_check(dic): + validkey = [ 'vcpus_avail', 'memory_mb_avail', 'local_gb_avail'] + return (list(set(validkey)) == list(set(dic.keys()))) + + host = 'foo' + binary = 'nova-compute' + ctxt = context.get_admin_context() + service_ref = {'id':1, 'binary':'nova-compute', 'topic':'compute'} + dbmock = self.mox.CreateMock(db) + dbmock.service_get_by_args(mox.IgnoreArg(), + mox.StrContains(host), + mox.StrContains(binary)).\ + AndReturn(service_ref) + dbmock.service_update(mox.IgnoreArg(), + service_ref['id'], + mox.Func(dic_key_check)) + + self.compute.db = dbmock + self.compute.host = host + self.mox.ReplayAll() + self.compute.update_available_resource(ctxt) + self.mox.UnsetStubs() def _setup_other_managers(self): self.volume_manager = utils.import_object(FLAGS.volume_manager) @@ -444,7 +487,7 @@ class ComputeTestCase(test.TestCase): rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest), {"method": "pre_live_migration", "args": {'instance_id': i_id}}).\ - InAnyOrder('g1').AndRaise(rpc.RemoteError('du', 'mm', 'y')) + InAnyOrder('g1').AndRaise(rpc.RemoteError('', '', '')) self.mox.StubOutWithMock(compute_manager.LOG, 'error') compute_manager.LOG.error('Pre live migration for %s failed at %s', instance_ref['hostname'], dest) @@ -480,7 +523,7 @@ class ComputeTestCase(test.TestCase): rpc.call(c, compute_topic, {"method": "pre_live_migration", "args": {'instance_id': i_id}}).\ - AndRaise(rpc.RemoteError('du', 'mm', 'y')) + AndRaise(rpc.RemoteError('', '', '')) self.mox.StubOutWithMock(compute_manager.LOG, 'error') compute_manager.LOG.error('Pre live migration for %s failed at %s', instance_ref['hostname'], dest) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index c62bca9b..36d99d66 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -41,6 +41,7 @@ from nova.db.sqlalchemy import models FLAGS = flags.FLAGS flags.DECLARE('max_cores', 'nova.scheduler.simple') flags.DECLARE('stub_network', 'nova.compute.manager') +flags.DECLARE('instances_path', 'nova.compute.manager') class TestDriver(driver.Scheduler): @@ -111,7 +112,8 @@ class SchedulerTestCase(test.TestCase): scheduler = manager.SchedulerManager() dest = 'dummydest' ctxt = context.get_admin_context() - r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100} + r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100, + 'vcpus_used':16, 'memory_mb_used':32, 'local_gb_used':10} service_ref = {'id':1, 'host':dest} service_ref.update(r0) @@ -140,7 +142,8 @@ class SchedulerTestCase(test.TestCase): scheduler = manager.SchedulerManager() dest = 'dummydest' ctxt = context.get_admin_context() - r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100} + r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100, + 'vcpus_used':16, 'memory_mb_used':32, 'local_gb_used':10} r1 = {'vcpus':10, 'memory_mb':4, 'local_gb':20} r2 = {'vcpus':10, 'memory_mb':20, 'local_gb':30} service_ref = {'id':1, 'host':dest} @@ -148,7 +151,7 @@ class SchedulerTestCase(test.TestCase): instance_ref2 = {'id':2, 'project_id':'p-01', 'host':'dummy'} instance_ref2.update(r1) instance_ref3 = {'id':3, 'project_id':'p-02', 'host':'dummy'} - instance_ref3.update(r1) + instance_ref3.update(r2) self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True) manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\ @@ -176,6 +179,7 @@ class SchedulerTestCase(test.TestCase): self.assertTrue( c1 and c2 and c3 and c4 and c5 and c6) self.mox.UnsetStubs() + class ZoneSchedulerTestCase(test.TestCase): """Test case for zone scheduler""" def setUp(self): @@ -495,7 +499,7 @@ class SimpleDriverTestCase(test.TestCase): ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-00000001', 'host':'dummy', - 'volumes':[{'id':1}, {'id':2}]} + 'volumes':[{'id':1}, {'id':2}]} dest = 'dummydest' self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) @@ -793,7 +797,10 @@ class SimpleDriverTestCase(test.TestCase): ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + driver_i = self.scheduler.driver + self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') + driver_i.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([]) @@ -813,6 +820,7 @@ class SimpleDriverTestCase(test.TestCase): Original host(an instance launched on) does not exist. """ dest = 'dummydest' + driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'launched_on':'h1'} @@ -821,6 +829,8 @@ class SimpleDriverTestCase(test.TestCase): service_ref.__setitem__('topic', 'compute') service_ref.__setitem__('host', i_ref['host']) + self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') + driver_i.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) @@ -844,6 +854,7 @@ class SimpleDriverTestCase(test.TestCase): Original host and dest host has different hypervisor type. """ dest = 'dummydest' + driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-01', @@ -856,6 +867,8 @@ class SimpleDriverTestCase(test.TestCase): service_ref2.__setitem__('id', 2) service_ref2.__setitem__('hypervisor_type', 'xen') + self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') + driver_i.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) @@ -879,6 +892,7 @@ class SimpleDriverTestCase(test.TestCase): Original host and dest host has different hypervisor version. """ dest = 'dummydest' + driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-01', @@ -891,6 +905,8 @@ class SimpleDriverTestCase(test.TestCase): service_ref2.__setitem__('id', 2) service_ref2.__setitem__('hypervisor_version', 12001) + self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') + driver_i.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) @@ -914,6 +930,7 @@ class SimpleDriverTestCase(test.TestCase): Original host and dest host has different hypervisor version. """ dest = 'dummydest' + driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-01', @@ -927,6 +944,8 @@ class SimpleDriverTestCase(test.TestCase): service_ref2.__setitem__('hypervisor_version', 12000) service_ref2.__setitem__('cpuinfo', 'info') + self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') + driver_i.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) @@ -956,6 +975,7 @@ class SimpleDriverTestCase(test.TestCase): The testcase make sure everything finished with no error. """ dest = 'dummydest' + driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-01', @@ -969,6 +989,8 @@ class SimpleDriverTestCase(test.TestCase): service_ref2.__setitem__('hypervisor_version', 12000) service_ref2.__setitem__('cpuinfo', 'info') + self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') + driver_i.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) @@ -988,36 +1010,6 @@ class SimpleDriverTestCase(test.TestCase): self.assertTrue(ret == None) self.mox.UnsetStubs() - def test_has_enough_resource_lack_resource_vcpu(self): - """ - A testcase of driver.has_enough_resource. - Lack of vcpu.(boundary check) - """ - dest = 'dummydest' - ctxt = context.get_admin_context() - topic = FLAGS.compute_topic - service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} - i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', - 'vcpus':6, 'memory_mb':8, 'local_gb':10} - i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - - self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) - driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ - AndReturn([service_ref]) - driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ - AndReturn([i_ref2, i_ref3]) - - self.mox.ReplayAll() - try: - self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) - except exception.NotEmpty, e: - msg = 'is not capable to migrate' - self.assertTrue(e.message.find(msg) >= 0) - self.mox.UnsetStubs() - def test_has_enough_resource_lack_resource_memory(self): """ A testcase of driver.has_enough_resource. @@ -1026,60 +1018,23 @@ class SimpleDriverTestCase(test.TestCase): dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} - i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':16, 'local_gb':10} - i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} + service_ref = {'id':1, 'memory_mb':32, 'memory_mb_used':12, 'local_gb':100} + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':20, 'local_gb':10} self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) - driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ - AndReturn([i_ref2, i_ref3]) self.mox.ReplayAll() try: - self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) + self.scheduler.driver.has_enough_resource(ctxt, i_ref, dest) except exception.NotEmpty, e: msg = 'is not capable to migrate' self.assertTrue(e.message.find(msg) >= 0) self.mox.UnsetStubs() self.mox.UnsetStubs() - def test_has_enough_resource_lack_resource_disk(self): - """ - A testcase of driver.has_enough_resource. - Lack of local_gb.(boundary check) - """ - scheduler = manager.SchedulerManager() - dest = 'dummydest' - ctxt = context.get_admin_context() - topic = FLAGS.compute_topic - service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} - i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':80} - i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - - self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) - driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ - AndReturn([service_ref]) - driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ - AndReturn([i_ref2, i_ref3]) - - self.mox.ReplayAll() - try: - self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) - except exception.NotEmpty, e: - msg = 'is not capable to migrate' - self.assertTrue(e.message.find(msg) >= 0) - self.mox.UnsetStubs() - def test_has_enough_resource_works_correctly(self): """ A testcase of driver.has_enough_resource @@ -1088,21 +1043,101 @@ class SimpleDriverTestCase(test.TestCase): dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} - i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', + service_ref = {'id':1, 'memory_mb':120, 'memory_mb_used':32} + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'vcpus':5, 'memory_mb':8, 'local_gb':10} self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) - driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ - AndReturn([i_ref2, i_ref3]) self.mox.ReplayAll() - ret = self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) + ret = self.scheduler.driver.has_enough_resource(ctxt, i_ref, dest) + self.assertTrue(ret == None) + self.mox.UnsetStubs() + + def test_mounted_on_same_shared_storage_cannot_make_tmpfile(self): + """ + A testcase of driver.mounted_on_same_shared_storage + checks log message when dest host cannot make tmpfile. + """ + dest = 'dummydest' + driver_i = self.scheduler.driver + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + fpath = '/test/20110127120000' + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + {"method": 'mktmpfile'}).AndRaise(rpc.RemoteError('', '', '')) + self.mox.StubOutWithMock(driver.logging, 'error') + msg = _("Cannot create tmpfile at %s to confirm shared storage.") + driver.logging.error(msg % FLAGS.instances_path) + + self.mox.ReplayAll() + self.assertRaises(rpc.RemoteError, + driver_i.mounted_on_same_shared_storage, + ctxt, i_ref, dest) + self.mox.UnsetStubs() + + def test_mounted_on_same_shared_storage_cannot_comfirm_tmpfile(self): + """ + A testcase of driver.mounted_on_same_shared_storage + checks log message when src host cannot comfirm tmpfile. + """ + dest = 'dummydest' + driver_i = self.scheduler.driver + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + fpath = '/test/20110127120000' + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + {"method": 'mktmpfile'}).AndReturn(fpath) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(ctxt, FLAGS.compute_topic, i_ref['host']), + {"method": 'exists', "args":{'path':fpath}}).\ + AndRaise(rpc.RemoteError('','','')) + self.mox.StubOutWithMock(driver.logging, 'error') + msg = _("Cannot create tmpfile at %s to confirm shared storage.") + driver.logging.error(msg % FLAGS.instances_path) + + self.mox.ReplayAll() + self.assertRaises(rpc.RemoteError, + driver_i.mounted_on_same_shared_storage, + ctxt, i_ref, dest) + self.mox.UnsetStubs() + + + def test_mounted_on_same_shared_storage_works_correctly(self): + """ + A testcase of driver.mounted_on_same_shared_storage + to make sure everything finished with no error. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + fpath = '/test/20110127120000' + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest), + {"method": 'mktmpfile'}).AndReturn(fpath) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, i_ref['host']), + {"method": 'exists', "args":{'path':fpath}}) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest), + {"method": 'remove', "args":{'path':fpath}}) + + self.mox.ReplayAll() + ret = self.scheduler.driver.mounted_on_same_shared_storage(ctxt, + i_ref, + dest) self.assertTrue(ret == None) self.mox.UnsetStubs() diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 177e8f02..2828bace 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -241,11 +241,11 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, testuri) - def test_get_memory_mb(self): + def test_get_vcpu_total(self): """ - Check if get_memory_mb returns memory value + Check if get_vcpu_total returns appropriate cpu value Connection/OS/driver differenct does not matter for this method, - so everyone can execute for checking. + everyone can execute for checking. """ try: self._driver_dependent_test_setup() @@ -254,9 +254,87 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) - self.assertTrue(0 < conn.get_memory_mb()) + self.assertTrue(0 < conn.get_vcpu_total()) self.mox.UnsetStubs() + + def test_get_memory_mb_total(self): + """Check if get_memory_mb returns appropriate memory value""" + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertTrue(0 < conn.get_memory_mb_total()) + self.mox.UnsetStubs() + + def test_get_local_gb_total(self): + """Check if get_local_gb_total returns appropriate disk value""" + # Note(masumotok): cannot test b/c FLAGS.instances_path is + # inevitable for this test.. + #try: + # self._driver_dependent_test_setup() + #except: + # return + # + #self.mox.ReplayAll() + #conn = libvirt_conn.LibvirtConnection(False) + #self.assertTrue(0 < conn.get_local_gb_total()) + #self.mox.UnsetStubs() + pass + + def test_get_vcpu_used(self): + """Check if get_local_gb_total returns appropriate disk value""" + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.listDomainsID().AndReturn([1,2]) + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "vcpus", use_mock_anything=True) + vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]]) + vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]]) + libvirt_conn.LibvirtConnection._conn.lookupByID(mox.IgnoreArg()).\ + AndReturn(vdmock) + libvirt_conn.LibvirtConnection._conn.lookupByID(mox.IgnoreArg()).\ + AndReturn(vdmock) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertTrue( conn.get_vcpu_used() == 4) + self.mox.UnsetStubs() + + def test_get_memory_mb_used(self): + """Check if get_memory_mb returns appropriate memory value""" + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertTrue(0 < conn.get_memory_mb_used()) + self.mox.UnsetStubs() + + def test_get_local_gb_used(self): + """Check if get_local_gb_total returns appropriate disk value""" + # Note(masumotok): cannot test b/c FLAGS.instances_path is + # inevitable for this test.. + #try: + # self._driver_dependent_test_setup() + #except: + # return + + #self.mox.ReplayAll() + #conn = libvirt_conn.LibvirtConnection(False) + #self.assertTrue(0 < conn.get_local_gb_used()) + #self.mox.UnsetStubs() + pass + def test_get_cpu_info_works_correctly(self): """ Check if get_cpu_info works correctly. From a98c52b6b38e1bd0efcc38eb6d66e2338a3e4e8d Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 18 Feb 2011 14:15:04 +0900 Subject: [PATCH 03/18] fixed based on reviewer's comment. 1. erase wrapper function(remove/exists/mktempfile) from nova.utils. 2. nova-manage service describeresource(->describe_resource) 3. nova-manage service updateresource(->update_resource) 4. erase "my mistake print" statement Additional changes are made at: 1. nova.image.s3.show 2. nova.compute.api.create that's because instances cannot launched without this changes. --- bin/nova-manage | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 7336a582..0bfe0d96 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -574,7 +574,7 @@ class ServiceCommands(object): return db.service_update(ctxt, svc['id'], {'disabled': True}) - def describeresource(self, host): + def describe_resource(self, host): """describe cpu/memory/hdd info for host.""" result = rpc.call(context.get_admin_context(), @@ -606,7 +606,7 @@ class ServiceCommands(object): val['memory_mb'], val['local_gb']) - def updateresource(self, host): + def update_resource(self, host): """update available vcpu/memory/disk info for host.""" ctxt = context.get_admin_context() @@ -618,9 +618,9 @@ class ServiceCommands(object): if len(service_refs) <= 0: raise exception.Invalid(_('%s is not compute node.') % host) - result = rpc.call(ctxt, - db.queue_get_for(ctxt, FLAGS.compute_topic, host), - {"method": "update_available_resource"}) + rpc.call(ctxt, + db.queue_get_for(ctxt, FLAGS.compute_topic, host), + {"method": "update_available_resource"}) class LogCommands(object): From ad3365473814cc117afddf40401cf922b1f129c0 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 22 Feb 2011 23:55:03 +0900 Subject: [PATCH 04/18] Fixed based on reviewer's comment. 1. Change docstrings format 2. Fix comment grammer mistake, etc --- bin/nova-api | 2 - bin/nova-dhcpbridge | 1 - bin/nova-manage | 28 +++- nova/tests/test_compute.py | 75 +++------ nova/tests/test_scheduler.py | 141 ++++++---------- nova/tests/test_virt.py | 307 +++++++++++++++-------------------- 6 files changed, 222 insertions(+), 332 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 59466a8c..11176a02 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -38,13 +38,11 @@ from nova import flags from nova import log as logging from nova import version from nova import wsgi -from nova import utils logging.basicConfig() LOG = logging.getLogger('nova.api') LOG.setLevel(logging.DEBUG) -utils.default_flagfile() FLAGS = flags.FLAGS API_ENDPOINTS = ['ec2', 'osapi'] diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index fb04a484..d38ba254 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -125,7 +125,6 @@ def main(): LOG.debug(msg) globals()[action + '_lease'](mac, ip, hostname, interface) else: - open('/tmp/aaa', 'w+').write('-- %s' % interface) print init_leases(interface) if __name__ == "__main__": diff --git a/bin/nova-manage b/bin/nova-manage index 696ce0ca..49246fcc 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -548,7 +548,12 @@ class InstanceCommands(object): """Class for mangaging VM instances.""" def live_migration(self, ec2_id, dest): - """Migrates a running instance to a new machine.""" + """Migrates a running instance to a new machine. + + :param ec2_id: instance id which comes from euca-describe-instance. + :param dest: destination host name. + + """ ctxt = context.get_admin_context() instance_id = ec2_id_to_id(ec2_id) @@ -569,9 +574,8 @@ class InstanceCommands(object): "dest": dest, "topic": FLAGS.compute_topic}}) - msg = 'Migration of %s initiated. ' % ec2_id - msg += 'Check its progress using euca-describe-instances.' - print msg + print _('Migration of %s initiated.' + 'Check its progress using euca-describe-instances.') % ec2_id class ServiceCommands(object): @@ -619,15 +623,17 @@ class ServiceCommands(object): db.service_update(ctxt, svc['id'], {'disabled': True}) def describe_resource(self, host): - """describe cpu/memory/hdd info for host.""" + """Describes cpu/memory/hdd info for host. + + :param host: hostname. + + """ result = rpc.call(context.get_admin_context(), FLAGS.scheduler_topic, - {"method": "show_host_resource", + {"method": "show_host_resources", "args": {"host": host}}) - # Checking result msg format is necessary, that will have done - # when this feture is included in API. if type(result) != dict: print 'Unexpected error occurs' print '[Result]', result @@ -650,7 +656,11 @@ class ServiceCommands(object): val['local_gb']) def update_resource(self, host): - """update available vcpu/memory/disk info for host.""" + """Updates available vcpu/memory/disk info for host. + + :param host: hostname. + + """ ctxt = context.get_admin_context() service_refs = db.service_get_all_by_host(ctxt, host) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 74cb82ee..3c88d186 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -314,10 +314,7 @@ class ComputeTestCase(test.TestCase): self.compute_driver = utils.import_object(FLAGS.compute_driver) def test_pre_live_migration_instance_has_no_fixed_ip(self): - """ - if instances that are intended to be migrated doesnt have fixed_ip - (not happens usually), pre_live_migration has to raise Exception. - """ + """Confirm raising exception if instance doesn't have fixed_ip.""" instance_ref = self._get_dummy_instance() c = context.get_admin_context() i_id = instance_ref['id'] @@ -331,14 +328,9 @@ class ComputeTestCase(test.TestCase): self.assertRaises(exception.NotFound, self.compute.pre_live_migration, c, instance_ref['id']) - self.mox.ResetAll() def test_pre_live_migration_instance_has_volume(self): - """if any volumes are attached to the instances that are - intended to be migrated, setup_compute_volume must be - called because aoe module should be inserted at destination - host. This testcase checks on it. - """ + """Confirm setup_compute_volume is called when volume is mounted.""" i_ref = self._get_dummy_instance() c = context.get_admin_context() @@ -364,14 +356,9 @@ class ComputeTestCase(test.TestCase): self.mox.ReplayAll() ret = self.compute.pre_live_migration(c, i_ref['id']) self.assertEqual(ret, None) - self.mox.ResetAll() def test_pre_live_migration_instance_has_no_volume(self): - """if any volumes are not attached to the instances that are - intended to be migrated, log message should be appears - because administrator can proove instance conditions before - live_migration if any trouble occurs. - """ + """Confirm log meg when instance doesn't mount any volumes.""" i_ref = self._get_dummy_instance() i_ref.__setitem__('volumes', []) c = context.get_admin_context() @@ -395,14 +382,14 @@ class ComputeTestCase(test.TestCase): self.mox.ReplayAll() ret = self.compute.pre_live_migration(c, i_ref['id']) self.assertEqual(ret, None) - self.mox.ResetAll() def test_pre_live_migration_setup_compute_node_fail(self): - """setup_compute_node sometimes fail since concurrent request - comes to iptables and iptables complains. Then this method - tries to retry, but raise exception in case of over - max_retry_count. this method confirms raising exception. + """Confirm operation setup_compute_network() fails. + + It retries and raise exception when timeout exceeded. + """ + i_ref = self._get_dummy_instance() c = context.get_admin_context() @@ -427,14 +414,9 @@ class ComputeTestCase(test.TestCase): self.assertRaises(exception.ProcessExecutionError, self.compute.pre_live_migration, c, i_ref['id']) - self.mox.ResetAll() - def test_live_migration_instance_has_volume(self): - """Any volumes are mounted by instances to be migrated are found, - vblade health must be checked before starting live-migration. - And that is checked by check_for_export(). - This testcase confirms check_for_export() is called. - """ + def test_live_migration_works_correctly_with_volume(self): + """Confirm check_for_export to confirm volume health check.""" i_ref = self._get_dummy_instance() c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) @@ -457,15 +439,9 @@ class ComputeTestCase(test.TestCase): self.mox.ReplayAll() ret = self.compute.live_migration(c, i_ref['id'], i_ref['host']) self.assertEqual(ret, None) - self.mox.ResetAll() - def test_live_migration_instance_has_volume_and_exception(self): - """In addition to test_live_migration_instance_has_volume testcase, - this testcase confirms if any exception raises from - check_for_export(). Then, valid seaquence of this method should - recovering instance/volumes status(ex. instance['state_description'] - is changed from 'migrating' -> 'running', was changed by scheduler) - """ + def test_live_migration_dest_raises_exception(self): + """Confirm exception when pre_live_migration fails.""" i_ref = self._get_dummy_instance() c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) @@ -484,20 +460,16 @@ class ComputeTestCase(test.TestCase): 'state': power_state.RUNNING, 'host': i_ref['host']}) for v in i_ref['volumes']: - dbmock.volume_update(c, v['id'], {'status': 'in-use', - 'host': i_ref['host']}) + dbmock.volume_update(c, v['id'], {'status': 'in-use'}) self.compute.db = dbmock self.mox.ReplayAll() self.assertRaises(rpc.RemoteError, self.compute.live_migration, c, i_ref['id'], i_ref['host']) - self.mox.ResetAll() - def test_live_migration_instance_has_no_volume_and_exception(self): - """Simpler than - test_live_migration_instance_has_volume_and_exception - """ + def test_live_migration_dest_raises_exception_no_volume(self): + """Same as above test(input pattern is different) """ i_ref = self._get_dummy_instance() i_ref.__setitem__('volumes', []) c = context.get_admin_context() @@ -520,10 +492,9 @@ class ComputeTestCase(test.TestCase): self.assertRaises(rpc.RemoteError, self.compute.live_migration, c, i_ref['id'], i_ref['host']) - self.mox.ResetAll() - def test_live_migration_instance_has_no_volume(self): - """Simpler than test_live_migration_instance_has_volume.""" + def test_live_migration_works_correctly_no_volume(self): + """Confirm live_migration() works as expected correctly.""" i_ref = self._get_dummy_instance() i_ref.__setitem__('volumes', []) c = context.get_admin_context() @@ -545,11 +516,9 @@ class ComputeTestCase(test.TestCase): self.mox.ReplayAll() ret = self.compute.live_migration(c, i_ref['id'], i_ref['host']) self.assertEqual(ret, None) - self.mox.ResetAll() def test_post_live_migration_working_correctly(self): - """post_live_migration works as expected correctly """ - + """Confirm post_live_migration() works as expected correctly.""" dest = 'desthost' flo_addr = '1.2.1.2' @@ -579,19 +548,15 @@ class ComputeTestCase(test.TestCase): # executing self.mox.ReplayAll() ret = self.compute.post_live_migration(c, i_ref, dest) - self.mox.UnsetStubs() # make sure every data is rewritten to dest i_ref = db.instance_get(c, i_ref['id']) c1 = (i_ref['host'] == dest) - v_ref = db.volume_get(c, v_ref['id']) - c2 = (v_ref['host'] == dest) - c3 = False flo_refs = db.floating_ip_get_all_by_host(c, dest) - c3 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr) + c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr) # post operaton - self.assertTrue(c1 and c2 and c3) + self.assertTrue(c1 and c2) db.instance_destroy(c, instance_id) db.volume_destroy(c, v_ref['id']) db.floating_ip_destroy(c, flo_addr) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 729bcb58..30110684 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -108,22 +108,21 @@ class SchedulerTestCase(test.TestCase): self.mox.ReplayAll() scheduler.named_method(ctxt, 'topic', num=7) - def test_show_host_resource_host_not_exit(self): - """ - A testcase of driver.has_enough_resource - given host does not exists. - """ + def test_show_host_resources_host_not_exit(self): + """A host given as an argument does not exists.""" + scheduler = manager.SchedulerManager() dest = 'dummydest' ctxt = context.get_admin_context() try: - scheduler.show_host_resource(ctxt, dest) + scheduler.show_host_resources(ctxt, dest) except exception.NotFound, e: c1 = (0 <= e.message.find('does not exist or not compute node')) self.assertTrue(c1) def _dic_is_equal(self, dic1, dic2, keys=None): + """Compares 2 dictionary contents(Helper method)""" if not keys: keys = ['vcpus', 'memory_mb', 'local_gb', 'vcpus_used', 'memory_mb_used', 'local_gb_used'] @@ -133,16 +132,14 @@ class SchedulerTestCase(test.TestCase): return False return True - def test_show_host_resource_no_project(self): - """ - A testcase of driver.show_host_resource - no instance stays on the given host - """ + def test_show_host_resources_no_project(self): + """No instance are running on the given host.""" + scheduler = manager.SchedulerManager() ctxt = context.get_admin_context() s_ref = self._create_compute_service() - result = scheduler.show_host_resource(ctxt, s_ref['host']) + result = scheduler.show_host_resources(ctxt, s_ref['host']) # result checking c1 = ('resource' in result and 'usage' in result) @@ -152,11 +149,9 @@ class SchedulerTestCase(test.TestCase): self.assertTrue(c1 and c2 and c3) db.service_destroy(ctxt, s_ref['id']) - def test_show_host_resource_works_correctly(self): - """ - A testcase of driver.show_host_resource - to make sure everything finished with no error. - """ + def test_show_host_resources_works_correctly(self): + """show_host_resources() works correctly as expected.""" + scheduler = manager.SchedulerManager() ctxt = context.get_admin_context() s_ref = self._create_compute_service() @@ -164,7 +159,7 @@ class SchedulerTestCase(test.TestCase): i_ref2 = self._create_instance(project_id='p-02', vcpus=3, host=s_ref['host']) - result = scheduler.show_host_resource(ctxt, s_ref['host']) + result = scheduler.show_host_resources(ctxt, s_ref['host']) c1 = ('resource' in result and 'usage' in result) compute_service = s_ref['compute_service'][0] @@ -284,6 +279,7 @@ class SimpleDriverTestCase(test.TestCase): return db.volume_create(self.context, vol)['id'] def _create_compute_service(self, **kwargs): + """Create a compute service.""" dic = {'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0, 'availability_zone': 'dummyzone'} @@ -698,13 +694,13 @@ class SimpleDriverTestCase(test.TestCase): volume1.kill() volume2.kill() - def test_scheduler_live_migraiton_with_volume(self): - """ - driver.scheduler_live_migration finishes successfully - (volumes are attached to instances) - This testcase make sure schedule_live_migration - changes instance state from 'running' -> 'migrating' + def test_scheduler_live_migration_with_volume(self): + """scheduler_live_migration() works correctly as expected. + + Also, checks instance state is changed from 'running' -> 'migrating'. + """ + instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {'instance_id': instance_id, 'size': 1} @@ -737,11 +733,9 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref['id']) - def test_live_migraiton_src_check_instance_not_running(self): - """ - A testcase of driver._live_migration_src_check. - The instance given by instance_id is not running. - """ + def test_live_migration_src_check_instance_not_running(self): + """The instance given by instance_id is not running.""" + instance_id = self._create_instance(state_description='migrating') i_ref = db.instance_get(self.context, instance_id) @@ -754,12 +748,9 @@ class SimpleDriverTestCase(test.TestCase): self.assertTrue(c) db.instance_destroy(self.context, instance_id) - def test_live_migraiton_src_check_volume_node_not_alive(self): - """ - A testcase of driver._live_migration_src_check. - Volume node is not alive if any volumes are attached to - the given instance. - """ + def test_live_migration_src_check_volume_node_not_alive(self): + """Raise exception when volume node is not alive.""" + instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {'instance_id': instance_id, 'size': 1} @@ -782,11 +773,8 @@ class SimpleDriverTestCase(test.TestCase): db.service_destroy(self.context, s_ref['id']) db.volume_destroy(self.context, v_ref['id']) - def test_live_migraiton_src_check_compute_node_not_alive(self): - """ - A testcase of driver._live_migration_src_check. - The testcase make sure src-compute node is alive. - """ + def test_live_migration_src_check_compute_node_not_alive(self): + """Confirms src-compute node is alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t = datetime.datetime.utcnow() - datetime.timedelta(10) @@ -803,11 +791,8 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_src_check_works_correctly(self): - """ - A testcase of driver._live_migration_src_check. - The testcase make sure everything finished with no error. - """ + def test_live_migration_src_check_works_correctly(self): + """Confirms this method finishes with no error.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host=i_ref['host']) @@ -819,11 +804,8 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_dest_check_not_alive(self): - """ - A testcase of driver._live_migration_dst_check. - Destination host does not exist. - """ + def test_live_migration_dest_check_not_alive(self): + """Confirms exception raises in case dest host does not exist.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t = datetime.datetime.utcnow() - datetime.timedelta(10) @@ -841,11 +823,8 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_dest_check_service_same_host(self): - """ - A testcase of driver._live_migration_dst_check. - Destination host is same as src host. - """ + def test_live_migration_dest_check_service_same_host(self): + """Confirms exceptioin raises in case dest and src is same host.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host=i_ref['host']) @@ -861,11 +840,8 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_dest_check_service_lack_memory(self): - """ - A testcase of driver._live_migration_dst_check. - destination host doesnt have enough memory. - """ + def test_live_migration_dest_check_service_lack_memory(self): + """Confirms exception raises when dest doesn't have enough memory.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host='somewhere', @@ -882,11 +858,8 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_dest_check_service_works_correctly(self): - """ - A testcase of driver._live_migration_dst_check. - The testcase make sure everything finished with no error. - """ + def test_live_migration_dest_check_service_works_correctly(self): + """Confirms method finishes with no error.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host='somewhere', @@ -899,13 +872,11 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_common_check_service_orig_not_exists(self): - """ - A testcase of driver._live_migration_common_check. - Destination host does not exist. - """ + def test_live_migration_common_check_service_orig_not_exists(self): + """Destination host does not exist.""" + dest = 'dummydest' - # mocks for live_migraiton_common_check() + # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t1 = datetime.datetime.utcnow() - datetime.timedelta(10) @@ -929,18 +900,15 @@ class SimpleDriverTestCase(test.TestCase): i_ref, dest) except exception.Invalid, e: - c = (e.message.find('does not exists') >= 0) + c = (e.message.find('does not exist') >= 0) self.assertTrue(c) self.mox.UnsetStubs() db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_common_check_service_different_hypervisor(self): - """ - A testcase of driver._live_migration_common_check. - Original host and dest host has different hypervisor type. - """ + def test_live_migration_common_check_service_different_hypervisor(self): + """Original host and dest host has different hypervisor type.""" dest = 'dummydest' instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) @@ -969,11 +937,8 @@ class SimpleDriverTestCase(test.TestCase): db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) - def test_live_migraiton_common_check_service_different_version(self): - """ - A testcase of driver._live_migration_common_check. - Original host and dest host has different hypervisor version. - """ + def test_live_migration_common_check_service_different_version(self): + """Original host and dest host has different hypervisor version.""" dest = 'dummydest' instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) @@ -1003,11 +968,9 @@ class SimpleDriverTestCase(test.TestCase): db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) - def test_live_migraiton_common_check_service_checking_cpuinfo_fail(self): - """ - A testcase of driver._live_migration_common_check. - Original host and dest host has different hypervisor version. - """ + def test_live_migration_common_check_checking_cpuinfo_fail(self): + """Raise excetion when original host doen't have compatible cpu.""" + dest = 'dummydest' instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) @@ -1025,7 +988,7 @@ class SimpleDriverTestCase(test.TestCase): rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), {"method": 'compare_cpu', "args": {'cpu_info': s_ref2['compute_service'][0]['cpu_info']}}).\ - AndRaise(rpc.RemoteError('doesnt have compatibility to', '', '')) + AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) self.mox.ReplayAll() try: @@ -1033,7 +996,7 @@ class SimpleDriverTestCase(test.TestCase): i_ref, dest) except rpc.RemoteError, e: - c = (e.message.find(_('doesnt have compatibility to')) >= 0) + c = (e.message.find(_("doesn't have compatibility to")) >= 0) self.assertTrue(c) self.mox.UnsetStubs() diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 8ed726c2..91bdfcc5 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -23,8 +23,8 @@ from nova import context from nova import db from nova import exception from nova import flags -from nova import test from nova import logging +from nova import test from nova import utils from nova.api.ec2 import cloud from nova.auth import manager @@ -76,12 +76,12 @@ class LibvirtConnTestCase(test.TestCase): 'bridge': 'br101', 'instance_type': 'm1.small'} - def _driver_dependent_test_setup(self): - """ - Setup method. - Call this method at the top of each testcase method, - if the testcase is necessary libvirt and cheetah. - """ + def _driver_dependant_test_setup(self): + """Call this method at the top of each testcase method. + + Checks if libvirt and cheetah, etc is installed. + Otherwise, skip testing.""" + try: global libvirt global libxml2 @@ -92,10 +92,9 @@ class LibvirtConnTestCase(test.TestCase): except ImportError, e: logging.warn("""This test has not been done since """ """using driver-dependent library Cheetah/libvirt/libxml2.""") - raise e + raise # inebitable mocks for calling - #nova.virt.libvirt_conn.LibvirtConnection.__init__ obj = utils.import_object(FLAGS.firewall_driver) fwmock = self.mox.CreateMock(obj) self.mox.StubOutWithMock(libvirt_conn, 'utils', @@ -258,51 +257,31 @@ class LibvirtConnTestCase(test.TestCase): self.assertEquals(uri, testuri) def test_get_vcpu_total(self): - """ - Check if get_vcpu_total returns appropriate cpu value - Connection/OS/driver differenct does not matter for this method, - everyone can execute for checking. - """ + """Check if get_vcpu_total returns appropriate cpu value.""" try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(0 < conn.get_vcpu_total()) - self.mox.UnsetStubs() def test_get_memory_mb_total(self): - """Check if get_memory_mb returns appropriate memory value""" + """Check if get_memory_mb returns appropriate memory value.""" try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(0 < conn.get_memory_mb_total()) - self.mox.UnsetStubs() - - def test_get_local_gb_total(self): - """Check if get_local_gb_total returns appropriate disk value""" - # Note(masumotok): leave this b/c FLAGS.instances_path is inevitable.. - #try: - # self._driver_dependent_test_setup() - #except: - # return - # - #self.mox.ReplayAll() - #conn = libvirt_conn.LibvirtConnection(False) - #self.assertTrue(0 < conn.get_local_gb_total()) - #self.mox.UnsetStubs() - pass def test_get_vcpu_used(self): - """Check if get_local_gb_total returns appropriate disk value""" + """Check if get_local_gb_total returns appropriate disk value.""" try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -321,52 +300,45 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(conn.get_vcpu_used() == 4) - self.mox.UnsetStubs() def test_get_memory_mb_used(self): - """Check if get_memory_mb returns appropriate memory value""" + """Check if get_memory_mb returns appropriate memory value.""" try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(0 < conn.get_memory_mb_used()) - self.mox.UnsetStubs() - - def test_get_local_gb_used(self): - """Check if get_local_gb_total returns appropriate disk value""" - # Note(masumotok): leave this b/c FLAGS.instances_path is inevitable - #try: - # self._driver_dependent_test_setup() - #except: - # return - - #self.mox.ReplayAll() - #conn = libvirt_conn.LibvirtConnection(False) - #self.assertTrue(0 < conn.get_local_gb_used()) - #self.mox.UnsetStubs() - pass def test_get_cpu_info_works_correctly(self): - """ - Check if get_cpu_info works correctly. - (in case libvirt.getCapabilities() works correctly) - """ - xml = ("""x86_64Nehalem""" - """Intel""" - """""" - """""" - """""" - """""" - """""" - """""" - """""") + """Check if get_cpu_info works correctly as expected.""" + xml = """ + x86_64 + Nehalem + Intel + + + + + + + + + + + + + + + + + + """ try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, @@ -376,27 +348,34 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(0 < len(conn.get_cpu_info())) - self.mox.UnsetStubs() def test_get_cpu_info_inappropreate_xml(self): - """ - Check if get_cpu_info raises exception - in case libvirt.getCapabilities() returns wrong xml - (in case of xml doesnt have tag) - """ - xml = ("""x86_64Nehalem""" - """Intel""" - """""" - """""" - """""" - """""" - """""" - """""" - """""") + """Raise exception if given xml is inappropriate.""" + xml = """ + x86_64 + Nehalem + Intel + + + + + + + + + + + + + + + + + + """ try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, @@ -409,29 +388,34 @@ class LibvirtConnTestCase(test.TestCase): conn.get_cpu_info() except exception.Invalid, e: c1 = (0 <= e.message.find('Invalid xml')) - self.assertTrue(c1) - self.mox.UnsetStubs() + self.assertTrue(c1) def test_get_cpu_info_inappropreate_xml2(self): - """ - Check if get_cpu_info raises exception - in case libvirt.getCapabilities() returns wrong xml - (in case of xml doesnt have inproper tag - meaning missing "socket" attribute) - """ - xml = ("""x86_64Nehalem""" - """Intel""" - """""" - """""" - """""" - """""" - """""" - """""" - """""") + """Raise exception if given xml is inappropriate(topology tag).""" + xml = """ + x86_64 + Nehalem + Intel + + + + + + + + + + + + + + + + + """ try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, @@ -444,29 +428,12 @@ class LibvirtConnTestCase(test.TestCase): conn.get_cpu_info() except exception.Invalid, e: c1 = (0 <= e.message.find('Invalid xml: topology')) - self.assertTrue(c1) - self.mox.UnsetStubs() + self.assertTrue(c1) def test_update_available_resource_works_correctly(self): - """ - In this method, vcpus/memory_mb/local_gb/vcpu_used/ - memory_mb_used/local_gb_used/hypervisor_type/ - hypervisor_version/cpu_info should be changed. - Based on this specification, this testcase confirms - if this method finishes successfully, - meaning self.db.service_update must be called with dictinary - - {'vcpu':aaa, 'memory_mb':bbb, 'local_gb':ccc, - 'vcpu_used':aaa, 'memory_mb_used':bbb, 'local_gb_sed':ccc, - 'hypervisor_type':ddd, 'hypervisor_version':eee, - 'cpu_info':fff} - - Since each value of above dict can be obtained through - driver(different depends on environment), - only dictionary keys are checked. - """ + """Confirm compute_service table is updated successfully.""" try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -478,7 +445,9 @@ class LibvirtConnTestCase(test.TestCase): host = 'foo' binary = 'nova-compute' - service_ref = {'id': 1, 'host': host, 'binary': binary, + service_ref = {'id': 1, + 'host': host, + 'binary': binary, 'topic': 'compute'} self.mox.StubOutWithMock(db, 'service_get_all_by_topic') @@ -491,15 +460,11 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) conn.update_available_resource(host) - self.mox.UnsetStubs() def test_update_resource_info_raise_exception(self): - """ - This testcase confirms if no record found on Service - table, exception can be raised. - """ + """Raise exception if no recorde found on services table.""" try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -518,18 +483,19 @@ class LibvirtConnTestCase(test.TestCase): msg = 'Cannot insert compute manager specific info' c1 = (0 <= e.message.find(msg)) self.assertTrue(c1) - self.mox.ResetAll() def test_compare_cpu_works_correctly(self): - """Calling libvirt.compute_cpu() and works correctly """ - - t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ - """"topology":{"cores":"%s", "threads":"%s", """ - """"sockets":"%s"}, "features":[%s]}""") - cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') + """Calling libvirt.compute_cpu() and works correctly.""" + t = {} + t['arch'] = 'x86' + t['model'] = 'model' + t['vendor'] = 'Intel' + t['topology'] = {'cores': "2", "threads": "1", "sockets": "4"} + t['features'] = ["tm"] + cpu_info = utils.dumps(t) try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -542,20 +508,19 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(None == conn.compare_cpu(cpu_info)) - self.mox.UnsetStubs() def test_compare_cpu_raises_exception(self): - """ - Libvirt-related exception occurs when calling - libvirt.compare_cpu(). - """ - t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ - """"topology":{"cores":"%s", "threads":"%s", """ - """"sockets":"%s"}, "features":[%s]}""") - cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') + """Libvirt-related exception occurs when calling compare_cpu().""" + t = {} + t['arch'] = 'x86' + t['model'] = 'model' + t['vendor'] = 'Intel' + t['topology'] = {'cores': "2", "threads": "1", "sockets": "4"} + t['features'] = ["tm"] + cpu_info = utils.dumps(t) try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -567,18 +532,19 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertRaises(libvirt.libvirtError, conn.compare_cpu, cpu_info) - self.mox.UnsetStubs() def test_compare_cpu_no_compatibility(self): - """libvirt.compare_cpu() return less than 0.(no compatibility)""" - - t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ - """"topology":{"cores":"%s", "threads":"%s", """ - """"sockets":"%s"}, "features":[%s]}""") - cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') + """Libvirt.compare_cpu() return less than 0.(no compatibility).""" + t = {} + t['arch'] = 'x86' + t['model'] = 'model' + t['vendor'] = 'Intel' + t['topology'] = {'cores': "2", "threads": "1", "sockets": "4"} + t['features'] = ["tm"] + cpu_info = utils.dumps(t) try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -590,16 +556,14 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertRaises(exception.Invalid, conn.compare_cpu, cpu_info) - self.mox.UnsetStubs() def test_ensure_filtering_rules_for_instance_works_correctly(self): - """ensure_filtering_rules_for_instance works as expected correctly""" - + """ensure_filtering_rules_for_instance() works successfully.""" instance_ref = models.Instance() instance_ref.__setitem__('id', 1) try: - nwmock, fwmock = self._driver_dependent_test_setup() + nwmock, fwmock = self._driver_dependant_test_setup() except: return @@ -613,16 +577,14 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) conn.ensure_filtering_rules_for_instance(instance_ref) - self.mox.UnsetStubs() def test_ensure_filtering_rules_for_instance_timeout(self): - """ensure_filtering_fules_for_instance finishes with timeout""" - + """ensure_filtering_fules_for_instance() finishes with timeout.""" instance_ref = models.Instance() instance_ref.__setitem__('id', 1) try: - nwmock, fwmock = self._driver_dependent_test_setup() + nwmock, fwmock = self._driver_dependant_test_setup() except: return @@ -642,11 +604,9 @@ class LibvirtConnTestCase(test.TestCase): except exception.Error, e: c1 = (0 <= e.message.find('Timeout migrating for')) self.assertTrue(c1) - self.mox.UnsetStubs() def test_live_migration_works_correctly(self): - """_live_migration works as expected correctly """ - + """_live_migration() works as expected correctly.""" class dummyCall(object): f = None @@ -659,7 +619,7 @@ class LibvirtConnTestCase(test.TestCase): ctxt = context.get_admin_context() try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -681,13 +641,9 @@ class LibvirtConnTestCase(test.TestCase): # Not setting post_method/recover_method in this testcase. ret = conn._live_migration(ctxt, i_ref, i_ref['host'], '', '') self.assertTrue(ret == None) - self.mox.UnsetStubs() def test_live_migration_raises_exception(self): - """ - _live_migration raises exception, then this testcase confirms - recovered method is called. - """ + """Confirms recover method is called when exceptions are raised.""" i_ref = models.Instance() i_ref.__setitem__('id', 1) i_ref.__setitem__('host', 'dummy') @@ -697,7 +653,7 @@ class LibvirtConnTestCase(test.TestCase): pass try: - nwmock, fwmock = self._driver_dependent_test_setup() + nwmock, fwmock = self._driver_dependant_test_setup() except: return @@ -724,7 +680,6 @@ class LibvirtConnTestCase(test.TestCase): conn._mlive_migration, ctxt, instance_ref, dest, '', dummy_recover_method) - self.mox.UnsetStubs() def tearDown(self): super(LibvirtConnTestCase, self).tearDown() From 684b5a81fe9a1d97083344e3548821f86ed2df67 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 23 Feb 2011 00:15:39 +0900 Subject: [PATCH 05/18] Fix tiny mitakes! (remove unnecessary comment, etc) --- nova/tests/test_scheduler.py | 2 +- nova/tests/test_volume.py | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 30110684..47a6d0e8 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -258,10 +258,10 @@ class SimpleDriverTestCase(test.TestCase): inst['project_id'] = self.project.id inst['instance_type'] = 'm1.tiny' inst['mac_address'] = utils.generate_mac() + inst['vcpus'] = kwargs.get('vcpus', 1) inst['ami_launch_index'] = 0 inst['availability_zone'] = kwargs.get('availability_zone', None) inst['host'] = kwargs.get('host', 'dummy') - inst['vcpus'] = kwargs.get('vcpus', 4) inst['memory_mb'] = kwargs.get('memory_mb', 20) inst['local_gb'] = kwargs.get('local_gb', 30) inst['launched_on'] = kwargs.get('launghed_on', 'dummy') diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 6ae075ca..e8b4ceee 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -318,9 +318,6 @@ class ISCSITestCase(DriverTestCase): mountpoint = "/dev/sd" + chr((ord('b') + index)) db.volume_attached(self.context, vol_ref['id'], self.instance_id, mountpoint) - #iscsi_target = db.volume_allocate_iscsi_target(self.context, - # vol_ref['id'], - # vol_ref['host']) volume_id_list.append(vol_ref['id']) return volume_id_list From f31e49ad7dab13f7c388135fc1c69e7066eb40db Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 23 Feb 2011 01:20:39 +0900 Subject: [PATCH 06/18] Fixed some docstring --- nova/tests/test_scheduler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 47a6d0e8..71e524bc 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -60,6 +60,7 @@ class SchedulerTestCase(test.TestCase): self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') def _create_compute_service(self): + """Create compute-manager(ComputeService and Service record).""" ctxt = context.get_admin_context() dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0, 'availability_zone': 'dummyzone'} @@ -150,7 +151,7 @@ class SchedulerTestCase(test.TestCase): db.service_destroy(ctxt, s_ref['id']) def test_show_host_resources_works_correctly(self): - """show_host_resources() works correctly as expected.""" + """Show_host_resources() works correctly as expected.""" scheduler = manager.SchedulerManager() ctxt = context.get_admin_context() From 6ebe2a9cac23453d444c4820564621406dc81d1d Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 1 Mar 2011 18:32:57 +0900 Subject: [PATCH 07/18] test_compute is changed b/c lack of import instance_types --- nova/tests/test_compute.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 3c88d186..2a18dd47 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -32,6 +32,7 @@ from nova import rpc from nova import test from nova import utils from nova.auth import manager +from nova.compute import instance_types from nova.compute import manager as compute_manager from nova.compute import power_state from nova.db.sqlalchemy import models From 9d9c0f7d2ca3b28c6c9163e315adc3ed501290e0 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 1 Mar 2011 18:53:02 +0900 Subject: [PATCH 08/18] At previous commit, I forget to erase conflict - fixed it. --- nova/tests/test_compute.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 75fbc932..3438719f 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -309,7 +309,13 @@ class ComputeTestCase(test.TestCase): self.compute.terminate_instance(self.context, instance_id) -<<<<<<< TREE + def test_get_by_flavor_id(self): + type = instance_types.get_by_flavor_id(1) + self.assertEqual(type, 'm1.tiny') + + type = instance_types.get_by_flavor_id("1") + self.assertEqual(type, 'm1.tiny') + def _setup_other_managers(self): self.volume_manager = utils.import_object(FLAGS.volume_manager) self.network_manager = utils.import_object(FLAGS.network_manager) @@ -562,11 +568,3 @@ class ComputeTestCase(test.TestCase): db.instance_destroy(c, instance_id) db.volume_destroy(c, v_ref['id']) db.floating_ip_destroy(c, flo_addr) -======= - def test_get_by_flavor_id(self): - type = instance_types.get_by_flavor_id(1) - self.assertEqual(type, 'm1.tiny') - - type = instance_types.get_by_flavor_id("1") - self.assertEqual(type, 'm1.tiny') ->>>>>>> MERGE-SOURCE From 7829c50f8392dc61971fb1cc26ee51ef88f563d1 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 3 Mar 2011 22:54:11 +0900 Subject: [PATCH 09/18] Fixed based on reviewer's comments. Main changes are below. 1. Rename nova.compute.manager.ComputeManager.mktmpfile for better naming. 2. Several tests code in tests/test_virt.py are removed. Because it only works in libvirt environment. Only db-related testcode remains. --- nova/tests/test_scheduler.py | 14 +- nova/tests/test_virt.py | 371 +++++++---------------------------- 2 files changed, 82 insertions(+), 303 deletions(-) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index c4e4d148..62db42b1 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -661,7 +661,6 @@ class SimpleDriverTestCase(test.TestCase): self.scheduler.live_migration(self.context, FLAGS.compute_topic, instance_id=instance_id, dest=i_ref['host']) - self.mox.UnsetStubs() i_ref = db.instance_get(self.context, instance_id) self.assertTrue(i_ref['state_description'] == 'migrating') @@ -824,10 +823,15 @@ class SimpleDriverTestCase(test.TestCase): topic = FLAGS.compute_topic driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(self.context, topic, dest), - {"method": 'mktmpfile'}).AndReturn(fpath) + {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), - {"method": 'confirm_tmpfile', "args": {'filename': fpath}}) + {"method": 'check_shared_storage_test_file', + "args": {'filename': fpath}}) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), topic, dest), + {"method": 'cleanup_shared_storage_test_file', + "args": {'filename': fpath}}) self.mox.ReplayAll() try: @@ -838,7 +842,6 @@ class SimpleDriverTestCase(test.TestCase): c = (e.message.find('does not exist') >= 0) self.assertTrue(c) - self.mox.UnsetStubs() db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -867,7 +870,6 @@ class SimpleDriverTestCase(test.TestCase): c = (e.message.find(_('Different hypervisor type')) >= 0) self.assertTrue(c) - self.mox.UnsetStubs() db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) @@ -898,7 +900,6 @@ class SimpleDriverTestCase(test.TestCase): c = (e.message.find(_('Older hypervisor version')) >= 0) self.assertTrue(c) - self.mox.UnsetStubs() db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) @@ -934,7 +935,6 @@ class SimpleDriverTestCase(test.TestCase): c = (e.message.find(_("doesn't have compatibility to")) >= 0) self.assertTrue(c) - self.mox.UnsetStubs() db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index f46b5950..17b80c29 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -14,7 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -import libvirt import mox from xml.etree.ElementTree import fromstring as xml_to_tree @@ -60,6 +59,7 @@ class LibvirtConnTestCase(test.TestCase): admin=True) self.project = self.manager.create_project('fake', 'fake', 'fake') self.network = utils.import_object(FLAGS.network_manager) + self.context = context.get_admin_context() FLAGS.instances_path = '' self.call_libvirt_dependant_setup = False @@ -73,22 +73,52 @@ class LibvirtConnTestCase(test.TestCase): 'bridge': 'br101', 'instance_type': 'm1.small'} - def libvirt_dependant_setup(self): - """A setup method of LibvirtConnection dependent test.""" - # try to connect libvirt. if fail, skip test. - self.call_libvirt_dependant_setup = True - try: - libvirt.openReadOnly('qemu:///system') - except libvirt.libvirtError: - return - return libvirt_conn.get_connection(False) + def create_fake_libvirt_mock(self, **kwargs): + """Defining mocks for LibvirtConnection(libvirt is not used).""" - def libvirt_dependant_teardown(self): - """teardown method of LibvirtConnection dependent test.""" - if self.call_libvirt_dependant_setup: - libvirt_conn.libvirt = None - libvirt_conn.libxml2 = None - self.call_libvirt_dependant_setup = False + # A fake libvirt.virtConnect + class FakeLibvirtConnection(object): + def getVersion(self): + return 12003 + + def getType(self): + return 'qemu' + + def getCapabilities(self): + return 'qemu' + + def listDomainsID(self): + return [] + + def getCapabilitied(self): + return + + # A fake libvirt_conn.IptablesFirewallDriver + class FakeIptablesFirewallDriver(object): + def __init__(self, **kwargs): + pass + + # Creating mocks + fake = FakeLibvirtConnection() + fakeip = FakeIptablesFirewallDriver + # Customizing above fake if necessary + for key, val in kwargs.items(): + fake.__setattr__(key, val) + + # Inevitable mocks for libvirt_conn.LibvirtConnection + self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class') + libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') + libvirt_conn.LibvirtConnection._conn = fake + + def create_service(self, **kwargs): + service_ref = {'host': kwargs.get('host', 'dummy'), + 'binary': 'nova-compute', + 'topic': 'compute', + 'report_count': 0, + 'availability_zone': 'zone'} + + return db.service_create(context.get_admin_context(), service_ref) def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) @@ -244,306 +274,55 @@ class LibvirtConnTestCase(test.TestCase): self.assertEquals(uri, testuri) db.instance_destroy(user_context, instance_ref['id']) - def test_get_vcpu_used(self): - """Check if get_local_gb_total returns appropriate disk value.""" - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - libvirt_conn.LibvirtConnection._conn.listDomainsID().AndReturn([1, 2]) - vdmock = self.mox.CreateMock(libvirt.virDomain) - self.mox.StubOutWithMock(vdmock, "vcpus") - vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]]) - vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]]) - arg = mox.IgnoreArg() - libvirt_conn.LibvirtConnection._conn.lookupByID(arg).AndReturn(vdmock) - libvirt_conn.LibvirtConnection._conn.lookupByID(arg).AndReturn(vdmock) + def test_update_available_resource_works_correctly(self): + """Confirm compute_service table is updated successfully.""" + org_path = FLAGS.instances_path = '' + FLAGS.instances_path = '.' + + service_ref = self.create_service(host='dummy') + self.create_fake_libvirt_mock() + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, + 'get_cpu_info') + libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) - self.assertTrue(conn.get_vcpu_used() == 4) - - def test_get_cpu_info_inappropreate_xml(self): - """Raise exception if given xml is inappropriate.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - xml = """ - x86_64 - Nehalem - Intel - - - - - - - - - - - - - - - - - - """ - - self.mox.StubOutWithMock(conn._conn, 'getCapabilities') - conn._conn.getCapabilities().AndReturn(xml) - - self.mox.ReplayAll() - try: - conn.get_cpu_info() - except exception.Invalid, e: - c1 = (0 <= e.message.find('Invalid xml')) - self.assertTrue(c1) - - def test_get_cpu_info_inappropreate_xml2(self): - """Raise exception if given xml is inappropriate(topology tag).""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - xml = """ - x86_64 - Nehalem - Intel - - - - - - - - - - - - - - - - - """ - self.mox.StubOutWithMock(conn._conn, 'getCapabilities') - conn._conn.getCapabilities().AndReturn(xml) - - self.mox.ReplayAll() - try: - conn.get_cpu_info() - except exception.Invalid, e: - c1 = (0 <= e.message.find('Invalid xml: topology')) - self.assertTrue(c1) - - def test_update_available_resource_works_correctly(self): - """Confirm compute_service table is updated successfully.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - host = 'dummy' - zone = 'dummyzone' - ctxt = context.get_admin_context() - org_path = FLAGS.instances_path = '' - FLAGS.instances_path = '.' - - service_ref = db.service_create(ctxt, - {'host': host, - 'binary': 'nova-compute', - 'topic': 'compute', - 'report_count': 0, - 'availability_zone': zone}) - conn.update_available_resource(ctxt, host) - - service_ref = db.service_get(ctxt, service_ref['id']) - print service_ref['compute_service'] + conn.update_available_resource(self.context, 'dummy') + service_ref = db.service_get(self.context, service_ref['id']) compute_service = service_ref['compute_service'][0] + c1 = (compute_service['vcpus'] > 0) c2 = (compute_service['memory_mb'] > 0) c3 = (compute_service['local_gb'] > 0) - # vcpu_used is checked at test_get_vcpu_used. - c4 = (compute_service['memory_mb_used'] > 0) - c5 = (compute_service['local_gb_used'] > 0) - c6 = (len(compute_service['hypervisor_type']) > 0) - c7 = (compute_service['hypervisor_version'] > 0) + c4 = (compute_service['vcpus_used'] == 0) + c5 = (compute_service['memory_mb_used'] > 0) + c6 = (compute_service['local_gb_used'] > 0) + c7 = (len(compute_service['hypervisor_type']) > 0) + c8 = (compute_service['hypervisor_version'] > 0) - self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7) + self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7 and c8) - db.service_destroy(ctxt, service_ref['id']) + db.service_destroy(self.context, service_ref['id']) FLAGS.instances_path = org_path - def test_update_resource_info_raise_exception(self): + def test_update_resource_info_no_compute_record_found(self): """Raise exception if no recorde found on services table.""" - host = 'dummy' org_path = FLAGS.instances_path = '' FLAGS.instances_path = '.' - try: - conn = libvirt_conn.LibvirtConnection(False) - conn.update_available_resource(context.get_admin_context(), host) - except exception.Invalid, e: - msg = 'Cannot update compute manager specific info' - c1 = (0 <= e.message.find(msg)) - self.assertTrue(c1) + self.create_fake_libvirt_mock() + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertRaises(exception.Invalid, + conn.update_available_resource, + self.context, 'dummy') + FLAGS.instances_path = org_path - def test_compare_cpu_works_correctly(self): - """Calling libvirt.compute_cpu() and works correctly.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - host = 'dummy' - zone = 'dummyzone' - ctxt = context.get_admin_context() - org_path = FLAGS.instances_path = '' - FLAGS.instances_path = '.' - - service_ref = db.service_create(ctxt, - {'host': host, - 'binary': 'nova-compute', - 'topic': 'compute', - 'report_count': 0, - 'availability_zone': zone}) - conn.update_available_resource(ctxt, host) - service_ref = db.service_get(ctxt, service_ref['id']) - ret = conn.compare_cpu(service_ref['compute_service'][0]['cpu_info']) - self.assertTrue(ret == None) - - db.service_destroy(ctxt, service_ref['id']) - FLAGS.instances_path = org_path - - def test_compare_cpu_no_compatibility(self): - """Libvirt.compare_cpu() return less than 0.(no compatibility).""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - t = {} - t['arch'] = 'x86' - t['model'] = 'model' - t['vendor'] = 'Intel' - t['topology'] = {'cores': "2", "threads": "1", "sockets": "4"} - t['features'] = ["tm"] - cpu_info = utils.dumps(t) - self.mox.StubOutWithMock(conn._conn, 'compareCPU') - conn._conn.compareCPU(mox.IgnoreArg(), 0).AndReturn(0) - - self.mox.ReplayAll() - self.assertRaises(exception.Invalid, conn.compare_cpu, cpu_info) - - def test_ensure_filtering_rules_for_instance_works_correctly(self): - """ensure_filtering_rules_for_instance() works successfully.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - instance_ref = models.Instance() - instance_ref.__setitem__('id', 1) - fwdriver = conn.firewall_driver - - self.mox.StubOutWithMock(fwdriver, 'setup_basic_filtering') - fwdriver.setup_basic_filtering(instance_ref) - self.mox.StubOutWithMock(fwdriver, 'prepare_instance_filter') - fwdriver.prepare_instance_filter(instance_ref) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - n = 'nova-instance-%s' % instance_ref.name - conn._conn.nwfilterLookupByName(n) - - self.mox.ReplayAll() - conn.ensure_filtering_rules_for_instance(instance_ref) - - def test_ensure_filtering_rules_for_instance_timeout(self): - """ensure_filtering_fules_for_instance() finishes with timeout.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - instance_ref = models.Instance() - instance_ref.__setitem__('id', 1) - fwdriver = conn.firewall_driver - - self.mox.StubOutWithMock(fwdriver, 'setup_basic_filtering') - fwdriver.setup_basic_filtering(instance_ref) - self.mox.StubOutWithMock(fwdriver, 'prepare_instance_filter') - fwdriver.prepare_instance_filter(instance_ref) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - n = 'nova-instance-%s' % instance_ref.name - for i in range(FLAGS.live_migration_retry_count): - conn._conn.nwfilterLookupByName(n).\ - AndRaise(libvirt.libvirtError('ERR')) - - self.mox.ReplayAll() - try: - conn.ensure_filtering_rules_for_instance(instance_ref) - except exception.Error, e: - c1 = (0 <= e.message.find('Timeout migrating for')) - self.assertTrue(c1) - - def test_live_migration_works_correctly(self): - """_live_migration() works as expected correctly.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - class dummyCall(object): - f = None - - def start(self, interval=0, now=False): - pass - - i_ref = models.Instance() - i_ref.__setitem__('id', 1) - ctxt = context.get_admin_context() - - vdmock = self.mox.CreateMock(libvirt.virDomain) - self.mox.StubOutWithMock(vdmock, "migrateToURI") - vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest', - mox.IgnoreArg(), - None, FLAGS.live_migration_bandwidth).\ - AndReturn(None) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - conn._conn.lookupByName(i_ref.name).AndReturn(vdmock) - self.mox.StubOutWithMock(libvirt_conn.utils, 'LoopingCall') - libvirt_conn.utils.LoopingCall(f=None).AndReturn(dummyCall()) - - self.mox.ReplayAll() - # Nothing to do with setting post_method/recover_method or not. - ret = conn._live_migration(ctxt, i_ref, 'dest', '', '') - self.assertTrue(ret == None) - - def test_live_migration_raises_exception(self): - """Confirms recover method is called when exceptions are raised.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - i_ref = models.Instance() - i_ref.__setitem__('id', 1) - ctxt = context.get_admin_context() - - def dummy_recover_method(c, instance, host=None): - pass - - vdmock = self.mox.CreateMock(libvirt.virDomain) - self.mox.StubOutWithMock(vdmock, "migrateToURI") - vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest', - mox.IgnoreArg(), - None, FLAGS.live_migration_bandwidth).\ - AndRaise(libvirt.libvirtError('ERR')) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - conn._conn.lookupByName(i_ref.name).AndReturn(vdmock) - - self.mox.ReplayAll() - self.assertRaises(libvirt.libvirtError, - conn._live_migration, - ctxt, i_ref, 'dest', - '', dummy_recover_method) - def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) super(LibvirtConnTestCase, self).tearDown() - self.libvirt_dependant_teardown() class IptablesFirewallTestCase(test.TestCase): From 32ba88238a222dfc15937bccb89864899129857a Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 4 Mar 2011 01:09:21 +0900 Subject: [PATCH 10/18] Merged to trunk rev 757. Main changes are below. 1. Rename db table ComputeService -> ComputeNode 2. nova-manage option instance_type is reserved and we cannot use option instance, so change instance -> vm. --- bin/nova-manage | 4 ++-- nova/tests/test_scheduler.py | 16 ++++++++-------- nova/tests/test_virt.py | 20 ++++++++++---------- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index f41950cd..d782f602 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -546,7 +546,7 @@ class NetworkCommands(object): network.dns) -class InstanceCommands(object): +class VmCommands(object): """Class for mangaging VM instances.""" def live_migration(self, ec2_id, dest): @@ -831,7 +831,7 @@ CATEGORIES = [ ('fixed', FixedIpCommands), ('floating', FloatingIpCommands), ('network', NetworkCommands), - ('instance', InstanceCommands), + ('vm', VmCommands), ('service', ServiceCommands), ('log', LogCommands), ('db', DbCommands), diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 62db42b1..711b66af 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -60,7 +60,7 @@ class SchedulerTestCase(test.TestCase): self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') def _create_compute_service(self): - """Create compute-manager(ComputeService and Service record).""" + """Create compute-manager(ComputeNode and Service record).""" ctxt = context.get_admin_context() dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0, 'availability_zone': 'dummyzone'} @@ -71,7 +71,7 @@ class SchedulerTestCase(test.TestCase): 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10, 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, 'cpu_info': ''} - db.compute_service_create(ctxt, dic) + db.compute_node_create(ctxt, dic) return db.service_get(ctxt, s_ref['id']) @@ -144,8 +144,8 @@ class SchedulerTestCase(test.TestCase): # result checking c1 = ('resource' in result and 'usage' in result) - compute_service = s_ref['compute_service'][0] - c2 = self._dic_is_equal(result['resource'], compute_service) + compute_node = s_ref['compute_node'][0] + c2 = self._dic_is_equal(result['resource'], compute_node) c3 = result['usage'] == {} self.assertTrue(c1 and c2 and c3) db.service_destroy(ctxt, s_ref['id']) @@ -163,8 +163,8 @@ class SchedulerTestCase(test.TestCase): result = scheduler.show_host_resources(ctxt, s_ref['host']) c1 = ('resource' in result and 'usage' in result) - compute_service = s_ref['compute_service'][0] - c2 = self._dic_is_equal(result['resource'], compute_service) + compute_node = s_ref['compute_node'][0] + c2 = self._dic_is_equal(result['resource'], compute_node) c3 = result['usage'].keys() == ['p-01', 'p-02'] keys = ['vcpus', 'memory_mb', 'local_gb'] c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys) @@ -301,7 +301,7 @@ class SimpleDriverTestCase(test.TestCase): dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32) dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu') dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003) - db.compute_service_create(self.context, dic) + db.compute_node_create(self.context, dic) return db.service_get(self.context, s_ref['id']) def test_doesnt_report_disabled_hosts_as_up(self): @@ -923,7 +923,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), {"method": 'compare_cpu', - "args": {'cpu_info': s_ref2['compute_service'][0]['cpu_info']}}).\ + "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) self.mox.ReplayAll() diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 17b80c29..aac55a89 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -275,7 +275,7 @@ class LibvirtConnTestCase(test.TestCase): db.instance_destroy(user_context, instance_ref['id']) def test_update_available_resource_works_correctly(self): - """Confirm compute_service table is updated successfully.""" + """Confirm compute_node table is updated successfully.""" org_path = FLAGS.instances_path = '' FLAGS.instances_path = '.' @@ -289,16 +289,16 @@ class LibvirtConnTestCase(test.TestCase): conn = libvirt_conn.LibvirtConnection(False) conn.update_available_resource(self.context, 'dummy') service_ref = db.service_get(self.context, service_ref['id']) - compute_service = service_ref['compute_service'][0] + compute_node = service_ref['compute_node'][0] - c1 = (compute_service['vcpus'] > 0) - c2 = (compute_service['memory_mb'] > 0) - c3 = (compute_service['local_gb'] > 0) - c4 = (compute_service['vcpus_used'] == 0) - c5 = (compute_service['memory_mb_used'] > 0) - c6 = (compute_service['local_gb_used'] > 0) - c7 = (len(compute_service['hypervisor_type']) > 0) - c8 = (compute_service['hypervisor_version'] > 0) + c1 = (compute_node['vcpus'] > 0) + c2 = (compute_node['memory_mb'] > 0) + c3 = (compute_node['local_gb'] > 0) + c4 = (compute_node['vcpus_used'] == 0) + c5 = (compute_node['memory_mb_used'] > 0) + c6 = (compute_node['local_gb_used'] > 0) + c7 = (len(compute_node['hypervisor_type']) > 0) + c8 = (compute_node['hypervisor_version'] > 0) self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7 and c8) From ae0fbfe18ec42c446bbe4ed47b48e6f7751433e0 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Sat, 5 Mar 2011 00:57:08 +0900 Subject: [PATCH 11/18] Fixed based on reviewer's comment. Main changes are below. 1. get_vcpu_total()/get_memory_mb()/get_memory_mb_used() is changed for users who used non-linux environment. 2. test code added to test_virt. --- nova/tests/test_virt.py | 163 +++++++++++++++++++++++++++++++++------- 1 file changed, 136 insertions(+), 27 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index aac55a89..5bb31659 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -15,6 +15,7 @@ # under the License. import mox +import sys from xml.etree.ElementTree import fromstring as xml_to_tree from xml.dom.minidom import parseString as xml_to_dom @@ -27,11 +28,15 @@ from nova import test from nova import utils from nova.api.ec2 import cloud from nova.auth import manager +from nova.compute import manager as compute_manager +from nova.compute import power_state from nova.db.sqlalchemy import models from nova.virt import libvirt_conn +libvirt = None FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') +flags.DECLARE('compute_driver', 'nova.compute.manager') class LibvirtConnTestCase(test.TestCase): @@ -73,31 +78,36 @@ class LibvirtConnTestCase(test.TestCase): 'bridge': 'br101', 'instance_type': 'm1.small'} + def lazy_load_library_exists(self): + """check if libvirt is available.""" + # try to connect libvirt. if fail, skip test. + try: + import libvirt + import libxml2 + except ImportError: + return False + global libvirt + libvirt = __import__('libvirt') + libvirt_conn.libvirt = __import__('libvirt') + libvirt_conn.libxml2 = __import__('libxml2') + return True + def create_fake_libvirt_mock(self, **kwargs): """Defining mocks for LibvirtConnection(libvirt is not used).""" - # A fake libvirt.virtConnect + # A fake libvirt.virConnect class FakeLibvirtConnection(object): - def getVersion(self): - return 12003 - - def getType(self): - return 'qemu' - - def getCapabilities(self): - return 'qemu' - - def listDomainsID(self): - return [] - - def getCapabilitied(self): - return + pass # A fake libvirt_conn.IptablesFirewallDriver class FakeIptablesFirewallDriver(object): + def __init__(self, **kwargs): pass + def setattr(self, key, val): + self.__setattr__(key, val) + # Creating mocks fake = FakeLibvirtConnection() fakeip = FakeIptablesFirewallDriver @@ -274,33 +284,54 @@ class LibvirtConnTestCase(test.TestCase): self.assertEquals(uri, testuri) db.instance_destroy(user_context, instance_ref['id']) - def test_update_available_resource_works_correctly(self): + def tes1t_update_available_resource_works_correctly(self): """Confirm compute_node table is updated successfully.""" org_path = FLAGS.instances_path = '' FLAGS.instances_path = '.' + # Prepare mocks + def getVersion(): + return 12003 + + def getType(): + return 'qemu' + + def listDomainsID(): + return [] + service_ref = self.create_service(host='dummy') - self.create_fake_libvirt_mock() + self.create_fake_libvirt_mock(getVersion=getVersion, + getType=getType, + listDomainsID=listDomainsID) self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, 'get_cpu_info') libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') + # Start test self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) conn.update_available_resource(self.context, 'dummy') service_ref = db.service_get(self.context, service_ref['id']) compute_node = service_ref['compute_node'][0] - c1 = (compute_node['vcpus'] > 0) - c2 = (compute_node['memory_mb'] > 0) - c3 = (compute_node['local_gb'] > 0) - c4 = (compute_node['vcpus_used'] == 0) - c5 = (compute_node['memory_mb_used'] > 0) - c6 = (compute_node['local_gb_used'] > 0) - c7 = (len(compute_node['hypervisor_type']) > 0) - c8 = (compute_node['hypervisor_version'] > 0) - - self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7 and c8) + if sys.platform.upper() == 'LINUX2': + self.assertTrue(compute_node['vcpus'] > 0) + self.assertTrue(compute_node['memory_mb'] > 0) + self.assertTrue(compute_node['local_gb'] > 0) + self.assertTrue(compute_node['vcpus_used'] == 0) + self.assertTrue(compute_node['memory_mb_used'] > 0) + self.assertTrue(compute_node['local_gb_used'] > 0) + self.assertTrue(len(compute_node['hypervisor_type']) > 0) + self.assertTrue(compute_node['hypervisor_version'] > 0) + else: + self.assertTrue(compute_node['vcpus'] > 0) + self.assertTrue(compute_node['memory_mb'] == 0) + self.assertTrue(compute_node['local_gb'] > 0) + self.assertTrue(compute_node['vcpus_used'] == 0) + self.assertTrue(compute_node['memory_mb_used'] == 0) + self.assertTrue(compute_node['local_gb_used'] > 0) + self.assertTrue(len(compute_node['hypervisor_type']) > 0) + self.assertTrue(compute_node['hypervisor_version'] > 0) db.service_destroy(self.context, service_ref['id']) FLAGS.instances_path = org_path @@ -319,6 +350,84 @@ class LibvirtConnTestCase(test.TestCase): FLAGS.instances_path = org_path + def test_ensure_filtering_rules_for_instance_timeout(self): + """ensure_filtering_fules_for_instance() finishes with timeout.""" + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Preparing mocks + def fake_none(self): + return + + def fake_raise(self): + raise libvirt.libvirtError('ERR') + + self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise) + instance_ref = db.instance_create(self.context, self.test_instance) + + # Start test + self.mox.ReplayAll() + try: + conn = libvirt_conn.LibvirtConnection(False) + conn.firewall_driver.setattr('setup_basic_filtering', fake_none) + conn.firewall_driver.setattr('prepare_instance_filter', fake_none) + conn.ensure_filtering_rules_for_instance(instance_ref) + except exception.Error, e: + c1 = (0 <= e.message.find('Timeout migrating for')) + self.assertTrue(c1) + + db.instance_destroy(self.context, instance_ref['id']) + + def test_live_migration_raises_exception(self): + """Confirms recover method is called when exceptions are raised.""" + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Preparing data + self.compute = utils.import_object(FLAGS.compute_manager) + instance_dict = {'host': 'fake', 'state': power_state.RUNNING, + 'state_description': 'running'} + instance_ref = db.instance_create(self.context, self.test_instance) + instance_ref = db.instance_update(self.context, instance_ref['id'], + instance_dict) + vol_dict = {'status': 'migrating', 'size': 1} + volume_ref = db.volume_create(self.context, vol_dict) + db.volume_attached(self.context, volume_ref['id'], instance_ref['id'], + '/dev/fake') + + # Preparing mocks + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "migrateToURI") + vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest', + mox.IgnoreArg(), + None, FLAGS.live_migration_bandwidth).\ + AndRaise(libvirt.libvirtError('ERR')) + + def fake_lookup(instance_name): + if instance_name == instance_ref.name: + return vdmock + + self.create_fake_libvirt_mock(lookupByName=fake_lookup) + + # Start test + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertRaises(libvirt.libvirtError, + conn._live_migration, + self.context, instance_ref, 'dest', '', + self.compute.recover_live_migration) + + instance_ref = db.instance_get(self.context, instance_ref['id']) + self.assertTrue(instance_ref['state_description'] == 'running') + self.assertTrue(instance_ref['state'] == power_state.RUNNING) + volume_ref = db.volume_get(self.context, volume_ref['id']) + self.assertTrue(volume_ref['status'] == 'in-use') + + db.volume_destroy(self.context, volume_ref['id']) + db.instance_destroy(self.context, instance_ref['id']) + def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) From 3afc07abb418024a92eaee884b4fa205a592068b Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Sat, 5 Mar 2011 01:07:12 +0900 Subject: [PATCH 12/18] delete unnecessary DECLARE --- nova/tests/test_virt.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 5bb31659..7ea8c0fb 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -36,7 +36,6 @@ from nova.virt import libvirt_conn libvirt = None FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') -flags.DECLARE('compute_driver', 'nova.compute.manager') class LibvirtConnTestCase(test.TestCase): From 0c55c2ddbef4d0c41f702a60a2a21b0506f69c6d Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 7 Mar 2011 15:12:26 +0100 Subject: [PATCH 13/18] Make "dhcpbridge init" output correctly formatted leases information. --- bin/nova-dhcpbridge | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 3dd9de36..7ef51feb 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -94,7 +94,7 @@ def init_leases(interface): """Get the list of hosts for an interface.""" ctxt = context.get_admin_context() network_ref = db.network_get_by_bridge(ctxt, interface) - return linux_net.get_dhcp_hosts(ctxt, network_ref['id']) + return linux_net.get_dhcp_leases(ctxt, network_ref['id']) def main(): From 12036e81acb2595074fe2dcca6eaadb492aa626c Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 10 Mar 2011 13:30:52 +0900 Subject: [PATCH 14/18] fixed based on reviewer's comment. --- bin/nova-manage | 8 ++++---- nova/tests/test_compute.py | 24 ++++++++++++------------ nova/tests/test_scheduler.py | 5 +++-- nova/tests/test_virt.py | 6 +++--- nova/tests/test_volume.py | 7 ++++--- 5 files changed, 26 insertions(+), 24 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index d782f602..f9e4fa8d 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -567,7 +567,7 @@ class VmCommands(object): if (FLAGS.volume_driver != 'nova.volume.driver.AOEDriver' and \ FLAGS.volume_driver != 'nova.volume.driver.ISCSIDriver'): msg = _("Support only AOEDriver and ISCSIDriver. Sorry!") - raise exception.Error(msg) + raise exception.Error(msg) rpc.call(ctxt, FLAGS.scheduler_topic, @@ -637,8 +637,8 @@ class ServiceCommands(object): "args": {"host": host}}) if type(result) != dict: - print 'Unexpected error occurs' - print '[Result]', result + print _('An unexpected error has occurred.') + print _('[Result]'), result else: cpu = result['resource']['vcpus'] mem = result['resource']['memory_mb'] @@ -667,7 +667,7 @@ class ServiceCommands(object): ctxt = context.get_admin_context() service_refs = db.service_get_all_by_host(ctxt, host) if len(service_refs) <= 0: - raise exception.Invalid(_('%s does not exists.') % host) + raise exception.Invalid(_('%s does not exist.') % host) service_refs = [s for s in service_refs if s['topic'] == 'compute'] if len(service_refs) <= 0: diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 85c2c948..71899ba9 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -89,14 +89,14 @@ class ComputeTestCase(test.TestCase): Use this when any testcase executed later than test_run_terminate """ vol1 = models.Volume() - vol1.__setitem__('id', 1) + vol1['id'] = 1 vol2 = models.Volume() - vol2.__setitem__('id', 2) + vol2['id'] = 2 instance_ref = models.Instance() - instance_ref.__setitem__('id', 1) - instance_ref.__setitem__('volumes', [vol1, vol2]) - instance_ref.__setitem__('hostname', 'i-00000001') - instance_ref.__setitem__('host', 'dummy') + instance_ref['id'] = 1 + instance_ref['volumes'] = [vol1, vol2] + instance_ref['hostname'] = 'i-00000001' + instance_ref['host'] = 'dummy' return instance_ref def test_create_instance_defaults_display_name(self): @@ -114,9 +114,9 @@ class ComputeTestCase(test.TestCase): """Make sure create associates security groups""" group = self._create_group() instance_ref = models.Instance() - instance_ref.__setitem__('id', 1) - instance_ref.__setitem__('volumes', [{'id': 1}, {'id': 2}]) - instance_ref.__setitem__('hostname', 'i-00000001') + instance_ref['id'] = 1 + instance_ref['volumes'] = [{'id': 1}, {'id': 2}] + instance_ref['hostname'] = 'i-00000001' return instance_ref def test_create_instance_defaults_display_name(self): @@ -390,7 +390,7 @@ class ComputeTestCase(test.TestCase): def test_pre_live_migration_instance_has_no_volume(self): """Confirm log meg when instance doesn't mount any volumes.""" i_ref = self._get_dummy_instance() - i_ref.__setitem__('volumes', []) + i_ref['volumes'] = [] c = context.get_admin_context() self._setup_other_managers() @@ -501,7 +501,7 @@ class ComputeTestCase(test.TestCase): def test_live_migration_dest_raises_exception_no_volume(self): """Same as above test(input pattern is different) """ i_ref = self._get_dummy_instance() - i_ref.__setitem__('volumes', []) + i_ref['volumes'] = [] c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) @@ -526,7 +526,7 @@ class ComputeTestCase(test.TestCase): def test_live_migration_works_correctly_no_volume(self): """Confirm live_migration() works as expected correctly.""" i_ref = self._get_dummy_instance() - i_ref.__setitem__('volumes', []) + i_ref['volumes'] = [] c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 711b66af..8ac02c5a 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -119,7 +119,8 @@ class SchedulerTestCase(test.TestCase): try: scheduler.show_host_resources(ctxt, dest) except exception.NotFound, e: - c1 = (0 <= e.message.find('does not exist or not compute node')) + c1 = (e.message.find(_("does not exist or is not a " + "compute node.")) >= 0) self.assertTrue(c1) def _dic_is_equal(self, dic1, dic2, keys=None): @@ -786,7 +787,7 @@ class SimpleDriverTestCase(test.TestCase): i_ref, 'somewhere') except exception.NotEmpty, e: - c = (e.message.find('is not capable to migrate') >= 0) + c = (e.message.find('Unable to migrate') >= 0) self.assertTrue(c) db.instance_destroy(self.context, instance_id) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 7ea8c0fb..ee41ae73 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -283,7 +283,7 @@ class LibvirtConnTestCase(test.TestCase): self.assertEquals(uri, testuri) db.instance_destroy(user_context, instance_ref['id']) - def tes1t_update_available_resource_works_correctly(self): + def test_update_available_resource_works_correctly(self): """Confirm compute_node table is updated successfully.""" org_path = FLAGS.instances_path = '' FLAGS.instances_path = '.' @@ -314,7 +314,7 @@ class LibvirtConnTestCase(test.TestCase): compute_node = service_ref['compute_node'][0] if sys.platform.upper() == 'LINUX2': - self.assertTrue(compute_node['vcpus'] > 0) + self.assertTrue(compute_node['vcpus'] >= 0) self.assertTrue(compute_node['memory_mb'] > 0) self.assertTrue(compute_node['local_gb'] > 0) self.assertTrue(compute_node['vcpus_used'] == 0) @@ -323,7 +323,7 @@ class LibvirtConnTestCase(test.TestCase): self.assertTrue(len(compute_node['hypervisor_type']) > 0) self.assertTrue(compute_node['hypervisor_version'] > 0) else: - self.assertTrue(compute_node['vcpus'] > 0) + self.assertTrue(compute_node['vcpus'] >= 0) self.assertTrue(compute_node['memory_mb'] == 0) self.assertTrue(compute_node['local_gb'] > 0) self.assertTrue(compute_node['vcpus_used'] == 0) diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index e8b4ceee..d88e363d 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -284,9 +284,10 @@ class AOETestCase(DriverTestCase): self.volume.check_for_export(self.context, self.instance_id) except exception.ProcessExecutionError, e: volume_id = volume_id_list[0] - msg = _("""Cannot confirm exported volume id:%(volume_id)s.""" - """vblade process for e%(shelf_id)s.%(blade_id)s """ - """isn't running.""") % locals() + msg = _("Cannot confirm exported volume id:%(volume_id)s. " + "vblade process for e%(shelf_id)s.%(blade_id)s " + "isn't running.") % locals() + msg_is_match = (0 <= e.message.find(msg)) self.assertTrue(msg_is_match) From 330fee95b4bea58c609c659b9fbb0185f3407025 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 10 Mar 2011 21:13:07 +0100 Subject: [PATCH 15/18] Add basic test case. --- nova/tests/test_cloud.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index cf8ee7ef..db7c15ae 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -353,6 +353,18 @@ class CloudTestCase(test.TestCase): self.assertEqual('', img.metadata['description']) shutil.rmtree(pathdir) + def test_metadata_works_without_kernel_and_ramdisk(self): + inst = db.instance_create(self.context, {'host': self.compute.host, + 'vcpus': 2, + 'image_id': '123456', + 'user_data': '' }) + fixed = self.network.allocate_fixed_ip(self.context, inst['id']) + try: + self.cloud.get_metadata(fixed) + finally: + self.network.deallocate_fixed_ip(self.context, fixed) + db.instance_destroy(self.context, inst['id']) + def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) ec2_id = ec2utils.id_to_ec2_id(inst['id']) From 1c56220a4e28f396fdd090b8a236221e588a2784 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 11 Mar 2011 17:55:28 +0100 Subject: [PATCH 16/18] Remove broken test. At least this way, it'll actually fix the problem and be mergable. --- nova/tests/test_cloud.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index db7c15ae..cf8ee7ef 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -353,18 +353,6 @@ class CloudTestCase(test.TestCase): self.assertEqual('', img.metadata['description']) shutil.rmtree(pathdir) - def test_metadata_works_without_kernel_and_ramdisk(self): - inst = db.instance_create(self.context, {'host': self.compute.host, - 'vcpus': 2, - 'image_id': '123456', - 'user_data': '' }) - fixed = self.network.allocate_fixed_ip(self.context, inst['id']) - try: - self.cloud.get_metadata(fixed) - finally: - self.network.deallocate_fixed_ip(self.context, fixed) - db.instance_destroy(self.context, inst['id']) - def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) ec2_id = ec2utils.id_to_ec2_id(inst['id']) From a00a7e7f405adb5b295b45bc73f8abc2cc817780 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 14 Mar 2011 14:06:10 +0100 Subject: [PATCH 17/18] Add a unit test --- nova/tests/test_network.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index ce1c7721..b7a76be8 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -20,6 +20,7 @@ Unit Tests for network code """ import IPy import os +import time from nova import context from nova import db @@ -29,6 +30,7 @@ from nova import log as logging from nova import test from nova import utils from nova.auth import manager +from nova.network import linux_net FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.network') @@ -321,6 +323,31 @@ class NetworkTestCase(test.TestCase): network['id']) self.assertEqual(ip_count, num_available_ips) + def test_dhcp_lease_output(self): + admin_ctxt = context.get_admin_context() + address = self._create_address(0, self.instance_id) + lease_ip(address) + network_ref = db.network_get_by_instance(admin_ctxt, self.instance_id) + leases = linux_net.get_dhcp_leases(context.get_admin_context(), + network_ref['id']) + for line in leases.split('\n'): + seconds, mac, ip, hostname, client_id = line.split(' ') + self.assertTrue(int(seconds) > time.time(), 'Lease expires in ' + 'the past') + octets = mac.split(':') + self.assertEqual(len(octets), 6, "Wrong number of octets " + "in %s" % (max,)) + for octet in octets: + self.assertEqual(len(octet), 2, "Oddly sized octet: %s" + % (octet,)) + # This will throw an exception if the octet is invalid + int(octet, 16) + + # And this will raise an exception in case of an invalid IP + IPy.IP(ip) + + release_ip(address) + def is_allocated_in_project(address, project_id): """Returns true if address is in specified project""" From a546b868c8c3ad3d72c508823ed2fa7685987440 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 14 Mar 2011 21:10:11 +0100 Subject: [PATCH 18/18] Include cpuinfo.xml.template in tarball. --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index 2ceed34f..bf30d154 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -25,6 +25,7 @@ include nova/db/sqlalchemy/migrate_repo/migrate.cfg include nova/db/sqlalchemy/migrate_repo/README include nova/virt/interfaces.template include nova/virt/libvirt*.xml.template +include nova/virt/cpuinfo.xml.template include nova/tests/CA/ include nova/tests/CA/cacert.pem include nova/tests/CA/private/