Merged to trunk rev 757. Main changes are below.
1. Rename db table ComputeService -> ComputeNode 2. nova-manage option instance_type is reserved and we cannot use option instance, so change instance -> vm.
This commit is contained in:
@@ -546,7 +546,7 @@ class NetworkCommands(object):
|
|||||||
network.dns)
|
network.dns)
|
||||||
|
|
||||||
|
|
||||||
class InstanceCommands(object):
|
class VmCommands(object):
|
||||||
"""Class for mangaging VM instances."""
|
"""Class for mangaging VM instances."""
|
||||||
|
|
||||||
def live_migration(self, ec2_id, dest):
|
def live_migration(self, ec2_id, dest):
|
||||||
@@ -831,7 +831,7 @@ CATEGORIES = [
|
|||||||
('fixed', FixedIpCommands),
|
('fixed', FixedIpCommands),
|
||||||
('floating', FloatingIpCommands),
|
('floating', FloatingIpCommands),
|
||||||
('network', NetworkCommands),
|
('network', NetworkCommands),
|
||||||
('instance', InstanceCommands),
|
('vm', VmCommands),
|
||||||
('service', ServiceCommands),
|
('service', ServiceCommands),
|
||||||
('log', LogCommands),
|
('log', LogCommands),
|
||||||
('db', DbCommands),
|
('db', DbCommands),
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ class SchedulerTestCase(test.TestCase):
|
|||||||
self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
|
self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
|
||||||
|
|
||||||
def _create_compute_service(self):
|
def _create_compute_service(self):
|
||||||
"""Create compute-manager(ComputeService and Service record)."""
|
"""Create compute-manager(ComputeNode and Service record)."""
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
|
dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
|
||||||
'report_count': 0, 'availability_zone': 'dummyzone'}
|
'report_count': 0, 'availability_zone': 'dummyzone'}
|
||||||
@@ -71,7 +71,7 @@ class SchedulerTestCase(test.TestCase):
|
|||||||
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
|
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
|
||||||
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
|
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
|
||||||
'cpu_info': ''}
|
'cpu_info': ''}
|
||||||
db.compute_service_create(ctxt, dic)
|
db.compute_node_create(ctxt, dic)
|
||||||
|
|
||||||
return db.service_get(ctxt, s_ref['id'])
|
return db.service_get(ctxt, s_ref['id'])
|
||||||
|
|
||||||
@@ -144,8 +144,8 @@ class SchedulerTestCase(test.TestCase):
|
|||||||
|
|
||||||
# result checking
|
# result checking
|
||||||
c1 = ('resource' in result and 'usage' in result)
|
c1 = ('resource' in result and 'usage' in result)
|
||||||
compute_service = s_ref['compute_service'][0]
|
compute_node = s_ref['compute_node'][0]
|
||||||
c2 = self._dic_is_equal(result['resource'], compute_service)
|
c2 = self._dic_is_equal(result['resource'], compute_node)
|
||||||
c3 = result['usage'] == {}
|
c3 = result['usage'] == {}
|
||||||
self.assertTrue(c1 and c2 and c3)
|
self.assertTrue(c1 and c2 and c3)
|
||||||
db.service_destroy(ctxt, s_ref['id'])
|
db.service_destroy(ctxt, s_ref['id'])
|
||||||
@@ -163,8 +163,8 @@ class SchedulerTestCase(test.TestCase):
|
|||||||
result = scheduler.show_host_resources(ctxt, s_ref['host'])
|
result = scheduler.show_host_resources(ctxt, s_ref['host'])
|
||||||
|
|
||||||
c1 = ('resource' in result and 'usage' in result)
|
c1 = ('resource' in result and 'usage' in result)
|
||||||
compute_service = s_ref['compute_service'][0]
|
compute_node = s_ref['compute_node'][0]
|
||||||
c2 = self._dic_is_equal(result['resource'], compute_service)
|
c2 = self._dic_is_equal(result['resource'], compute_node)
|
||||||
c3 = result['usage'].keys() == ['p-01', 'p-02']
|
c3 = result['usage'].keys() == ['p-01', 'p-02']
|
||||||
keys = ['vcpus', 'memory_mb', 'local_gb']
|
keys = ['vcpus', 'memory_mb', 'local_gb']
|
||||||
c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys)
|
c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys)
|
||||||
@@ -301,7 +301,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32)
|
dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32)
|
||||||
dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu')
|
dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu')
|
||||||
dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003)
|
dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003)
|
||||||
db.compute_service_create(self.context, dic)
|
db.compute_node_create(self.context, dic)
|
||||||
return db.service_get(self.context, s_ref['id'])
|
return db.service_get(self.context, s_ref['id'])
|
||||||
|
|
||||||
def test_doesnt_report_disabled_hosts_as_up(self):
|
def test_doesnt_report_disabled_hosts_as_up(self):
|
||||||
@@ -923,7 +923,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True)
|
self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True)
|
||||||
rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
|
rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
|
||||||
{"method": 'compare_cpu',
|
{"method": 'compare_cpu',
|
||||||
"args": {'cpu_info': s_ref2['compute_service'][0]['cpu_info']}}).\
|
"args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\
|
||||||
AndRaise(rpc.RemoteError("doesn't have compatibility to", "", ""))
|
AndRaise(rpc.RemoteError("doesn't have compatibility to", "", ""))
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
|
|||||||
@@ -275,7 +275,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
db.instance_destroy(user_context, instance_ref['id'])
|
db.instance_destroy(user_context, instance_ref['id'])
|
||||||
|
|
||||||
def test_update_available_resource_works_correctly(self):
|
def test_update_available_resource_works_correctly(self):
|
||||||
"""Confirm compute_service table is updated successfully."""
|
"""Confirm compute_node table is updated successfully."""
|
||||||
org_path = FLAGS.instances_path = ''
|
org_path = FLAGS.instances_path = ''
|
||||||
FLAGS.instances_path = '.'
|
FLAGS.instances_path = '.'
|
||||||
|
|
||||||
@@ -289,16 +289,16 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = libvirt_conn.LibvirtConnection(False)
|
||||||
conn.update_available_resource(self.context, 'dummy')
|
conn.update_available_resource(self.context, 'dummy')
|
||||||
service_ref = db.service_get(self.context, service_ref['id'])
|
service_ref = db.service_get(self.context, service_ref['id'])
|
||||||
compute_service = service_ref['compute_service'][0]
|
compute_node = service_ref['compute_node'][0]
|
||||||
|
|
||||||
c1 = (compute_service['vcpus'] > 0)
|
c1 = (compute_node['vcpus'] > 0)
|
||||||
c2 = (compute_service['memory_mb'] > 0)
|
c2 = (compute_node['memory_mb'] > 0)
|
||||||
c3 = (compute_service['local_gb'] > 0)
|
c3 = (compute_node['local_gb'] > 0)
|
||||||
c4 = (compute_service['vcpus_used'] == 0)
|
c4 = (compute_node['vcpus_used'] == 0)
|
||||||
c5 = (compute_service['memory_mb_used'] > 0)
|
c5 = (compute_node['memory_mb_used'] > 0)
|
||||||
c6 = (compute_service['local_gb_used'] > 0)
|
c6 = (compute_node['local_gb_used'] > 0)
|
||||||
c7 = (len(compute_service['hypervisor_type']) > 0)
|
c7 = (len(compute_node['hypervisor_type']) > 0)
|
||||||
c8 = (compute_service['hypervisor_version'] > 0)
|
c8 = (compute_node['hypervisor_version'] > 0)
|
||||||
|
|
||||||
self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7 and c8)
|
self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7 and c8)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user