Cleanup and removal of unused code in scheduler unit tests

This removes a bunch of unused scheduler test code, both unnecessary stubs
and unused fake methods/classes.

Change-Id: Ife979bca1699fc06e5a40264ba3e8173b9e0143b
This commit is contained in:
Hans Lindgren
2014-10-31 08:50:02 +01:00
parent a9a04eec29
commit f90885bacb
2 changed files with 0 additions and 242 deletions

View File

@@ -16,13 +16,7 @@
Fakes For Scheduler tests.
"""
from mox3 import mox
from oslo_serialization import jsonutils
from nova.compute import vm_states
from nova import db
from nova import objects
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
NUMA_TOPOLOGY = objects.NUMATopology(
@@ -63,209 +57,9 @@ COMPUTE_NODES = [
host='fake'),
]
COMPUTE_NODES_METRICS = [
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=512, free_ram_mb=512, vcpus_used=1,
free_disk_gb=512, local_gb_used=0, updated_at=None,
service=dict(host='host1', disabled=False),
host='host1', hypervisor_hostname='node1', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 512,
'timestamp': None,
'source': 'host1'
},
{'name': 'bar',
'value': 1.0,
'timestamp': None,
'source': 'host1'
},
])),
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_gb=1024, local_gb_used=0, updated_at=None,
service=dict(host='host2', disabled=True),
host='host2', hypervisor_hostname='node2', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 1024,
'timestamp': None,
'source': 'host2'
},
{'name': 'bar',
'value': 2.0,
'timestamp': None,
'source': 'host2'
},
])),
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3072, free_ram_mb=3072, vcpus_used=1,
free_disk_gb=3072, local_gb_used=0, updated_at=None,
service=dict(host='host3', disabled=False),
host='host3', hypervisor_hostname='node3', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 3072,
'timestamp': None,
'source': 'host3'
},
{'name': 'bar',
'value': 1.0,
'timestamp': None,
'source': 'host3'
},
])),
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_gb=8192, local_gb_used=0, updated_at=None,
service=dict(host='host4', disabled=False),
host='host4', hypervisor_hostname='node4', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 8192,
'timestamp': None,
'source': 'host4'
},
{'name': 'bar',
'value': 0,
'timestamp': None,
'source': 'host4'
},
])),
dict(id=5, local_gb=768, memory_mb=768, vcpus=8,
disk_available_least=768, free_ram_mb=768, vcpus_used=0,
free_disk_gb=768, local_gb_used=0, updated_at=None,
service=dict(host='host5', disabled=False),
host='host5', hypervisor_hostname='node5', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 768,
'timestamp': None,
'source': 'host5'
},
{'name': 'bar',
'value': 0,
'timestamp': None,
'source': 'host5'
},
{'name': 'zot',
'value': 1,
'timestamp': None,
'source': 'host5'
},
])),
dict(id=6, local_gb=2048, memory_mb=2048, vcpus=8,
disk_available_least=2048, free_ram_mb=2048, vcpus_used=0,
free_disk_gb=2048, local_gb_used=0, updated_at=None,
service=dict(host='host6', disabled=False),
host='host6', hypervisor_hostname='node6', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 2048,
'timestamp': None,
'source': 'host6'
},
{'name': 'bar',
'value': 0,
'timestamp': None,
'source': 'host6'
},
{'name': 'zot',
'value': 2,
'timestamp': None,
'source': 'host6'
},
])),
]
INSTANCES = [
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
host='host1', node='node1'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
host='host2', node='node2'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
host='host2', node='node2'),
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host='host3', node='node3'),
# Broken host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host=None),
# No matching host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host='host5', node='node5'),
]
class FakeFilterScheduler(filter_scheduler.FilterScheduler):
def __init__(self, *args, **kwargs):
super(FakeFilterScheduler, self).__init__(*args, **kwargs)
self.host_manager = host_manager.HostManager()
class FakeHostManager(host_manager.HostManager):
"""host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0
host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536
host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072
host4: free_ram_mb=8192 free_disk_gb=8192
"""
def __init__(self):
super(FakeHostManager, self).__init__()
self.service_states = {
'host1': {
'compute': {'host_memory_free': 1073741824},
},
'host2': {
'compute': {'host_memory_free': 2147483648},
},
'host3': {
'compute': {'host_memory_free': 3221225472},
},
'host4': {
'compute': {'host_memory_free': 999999999},
},
}
class FakeHostState(host_manager.HostState):
def __init__(self, host, node, attribute_dict):
super(FakeHostState, self).__init__(host, node)
for (key, val) in attribute_dict.iteritems():
setattr(self, key, val)
class FakeInstance(object):
def __init__(self, context=None, params=None):
"""Create a test instance. Returns uuid."""
self.context = context
i = self._create_fake_instance(params=params)
self.uuid = i['uuid']
def _create_fake_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = 2
inst['ami_launch_index'] = 0
inst.update(params)
return db.instance_create(self.context, inst)
class FakeComputeAPI(object):
def create_db_entry_for_new_instance(self, *args, **kwargs):
pass
def mox_host_manager_db_calls(mock, context):
mock.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES)

View File

@@ -18,22 +18,14 @@ Tests For Scheduler
"""
import mock
from oslo_config import cfg
from nova.compute import api as compute_api
from nova import context
from nova import db
from nova import exception
from nova.image import glance
from nova.scheduler import driver
from nova.scheduler import manager
from nova import servicegroup
from nova import test
from nova.tests.unit import fake_server_actions
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit.scheduler import fakes
CONF = cfg.CONF
class SchedulerManagerTestCase(test.NoDBTestCase):
@@ -46,7 +38,6 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
@@ -59,16 +50,6 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
manager = self.manager
self.assertIsInstance(manager.driver, self.driver_cls)
def _mox_schedule_method_helper(self, method_name):
# Make sure the method exists that we're going to test call
def stub_method(*args, **kwargs):
pass
setattr(self.manager.driver, method_name, stub_method)
self.mox.StubOutWithMock(self.manager.driver,
method_name)
def test_select_destination(self):
with mock.patch.object(self.manager, 'select_destinations'
) as select_destinations:
@@ -97,23 +78,6 @@ class SchedulerTestCase(test.NoDBTestCase):
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
def fake_show(meh, context, id, **kwargs):
if id:
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
'status': 'active',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}}
else:
raise exception.ImageNotFound(image_id=id)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
self.image_service = glance.get_default_image_service()
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'