Merge "Re-enable a lot of cinder scheduler tests"

This commit is contained in:
Jenkins
2013-08-26 18:07:29 +00:00
committed by Gerrit Code Review
7 changed files with 44 additions and 101 deletions

View File

@@ -223,7 +223,7 @@ class FilterScheduler(driver.Scheduler):
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties)
if not hosts:
return None
return []
LOG.debug(_("Filtered %s") % hosts)
# weighted_host = WeightedHost() ... the best

View File

@@ -24,21 +24,6 @@ from cinder.scheduler import filter_scheduler
from cinder.scheduler import host_manager
VOLUME_SERVICES = [
dict(id=1, host='host1', topic='volume', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=2, host='host2', topic='volume', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=3, host='host3', topic='volume', disabled=False,
availability_zone='zone2', updated_at=timeutils.utcnow()),
dict(id=4, host='host4', topic='volume', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow()),
# service on host5 is disabled
dict(id=5, host='host5', topic='volume', disabled=True,
availability_zone='zone4', updated_at=timeutils.utcnow()),
]
class FakeFilterScheduler(filter_scheduler.FilterScheduler):
def __init__(self, *args, **kwargs):
super(FakeFilterScheduler, self).__init__(*args, **kwargs)
@@ -79,5 +64,19 @@ class FakeHostState(host_manager.HostState):
def mox_host_manager_db_calls(mock, context):
mock.StubOutWithMock(db, 'service_get_all_by_topic')
services = [
dict(id=1, host='host1', topic='volume', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=2, host='host2', topic='volume', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=3, host='host3', topic='volume', disabled=False,
availability_zone='zone2', updated_at=timeutils.utcnow()),
dict(id=4, host='host4', topic='volume', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow()),
# service on host5 is disabled
dict(id=5, host='host5', topic='volume', disabled=True,
availability_zone='zone4', updated_at=timeutils.utcnow()),
]
db.service_get_all_by_topic(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(VOLUME_SERVICES)
mox.IgnoreArg()).AndReturn(services)

View File

@@ -48,8 +48,6 @@ class CapacityWeigherTestCase(test.TestCase):
self.mox.ResetAll()
return host_states
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_default_of_spreading_first(self):
hostinfo_list = self._get_all_hosts()
@@ -63,8 +61,6 @@ class CapacityWeigherTestCase(test.TestCase):
self.assertEqual(weighed_host.weight, 921.0)
self.assertEqual(weighed_host.obj.host, 'host1')
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_weight_multiplier1(self):
self.flags(capacity_weight_multiplier=-1.0)
hostinfo_list = self._get_all_hosts()
@@ -79,8 +75,6 @@ class CapacityWeigherTestCase(test.TestCase):
self.assertEqual(weighed_host.weight, -190.0)
self.assertEqual(weighed_host.obj.host, 'host4')
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_weight_multiplier2(self):
self.flags(capacity_weight_multiplier=2.0)
hostinfo_list = self._get_all_hosts()

View File

@@ -30,17 +30,11 @@ from cinder.tests.scheduler import test_scheduler
from cinder.tests import utils as test_utils
def fake_get_filtered_hosts(hosts, filter_properties):
return list(hosts)
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Filter Scheduler."""
driver_cls = filter_scheduler.FilterScheduler
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed (try setup.py develop')
def test_create_volume_no_hosts(self):
"""
Ensure empty hosts & child_zones result in NoValidHosts exception.
@@ -58,8 +52,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertRaises(exception.NoValidHost, sched.schedule_create_volume,
fake_context, request_spec, {})
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed (try setup.py develop')
def test_create_volume_non_admin(self):
"""Test creating an instance locally using run_instance, passing
a non-admin context. DB actions should work.
@@ -85,29 +77,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
fake_context, request_spec, {})
self.assertTrue(self.was_admin)
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed (try setup.py develop')
def test_schedule_happy_day(self):
"""Make sure there's nothing glaringly wrong with _schedule()
by doing a happy day pass through.
"""
self.next_weight = 1.0
def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
return [weights.WeighedHost(host_state, self.next_weight)]
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
request_spec = {'volume_type': {'name': 'LVM_iSCSI'},
@@ -129,8 +108,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertRaises(exception.InvalidParameterValue,
fakes.FakeFilterScheduler)
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed (try setup.py develop')
def test_retry_disabled(self):
# Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=1)
@@ -147,8 +124,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# should not have retry info in the populated filter properties:
self.assertFalse("retry" in filter_properties)
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed (try setup.py develop')
def test_retry_attempt_one(self):
# Test retry logic on initial scheduling attempt.
self.flags(scheduler_max_attempts=2)
@@ -165,8 +140,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(1, num_attempts)
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed (try setup.py develop')
def test_retry_attempt_two(self):
# Test retry logic when re-scheduling.
self.flags(scheduler_max_attempts=2)
@@ -229,29 +202,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual(1024, host_state.total_capacity_gb)
def _host_passes_filters_setup(self):
self.next_weight = 1.0
def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
return [weights.WeighedHost(host_state, self.next_weight)]
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
self.mox.ReplayAll()
return (sched, fake_context)
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed (try setup.py develop')
def test_host_passes_filters_happy_day(self):
"""Do a successful pass through of with host_passes_filters()."""
sched, ctx = self._host_passes_filters_setup()
@@ -262,8 +222,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
ret_host = sched.host_passes_filters(ctx, 'host1', request_spec, {})
self.assertEqual(ret_host.host, 'host1')
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed (try setup.py develop')
def test_host_passes_filters_no_capacity(self):
"""Fail the host due to insufficient capacity."""
sched, ctx = self._host_passes_filters_setup()

View File

@@ -78,8 +78,6 @@ class HostFiltersTestCase(test.TestCase):
return ret_value
self.stubs.Set(utils, 'service_is_up', fake_service_is_up)
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
@@ -91,8 +89,6 @@ class HostFiltersTestCase(test.TestCase):
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_fails(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
@@ -105,8 +101,6 @@ class HostFiltersTestCase(test.TestCase):
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_passes_infinite(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
@@ -118,8 +112,6 @@ class HostFiltersTestCase(test.TestCase):
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_passes_unknown(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
@@ -131,8 +123,6 @@ class HostFiltersTestCase(test.TestCase):
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_retry_filter_disabled(self):
# Test case where retry/re-scheduling is disabled.
filt_cls = self.class_map['RetryFilter']()
@@ -140,8 +130,6 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_retry_filter_pass(self):
# Node not previously tried.
filt_cls = self.class_map['RetryFilter']()
@@ -150,8 +138,6 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@testtools.skipIf(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_retry_filter_fail(self):
# Node was already tried.
filt_cls = self.class_map['RetryFilter']()

View File

@@ -143,26 +143,39 @@ class HostManagerTestCase(test.TestCase):
self.mox.StubOutWithMock(host_manager.LOG, 'warn')
self.mox.StubOutWithMock(host_manager.utils, 'service_is_up')
ret_services = fakes.VOLUME_SERVICES
db.service_get_all_by_topic(context, topic).AndReturn(ret_services)
host_manager.utils.service_is_up(ret_services[0]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[1]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[2]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[3]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[4]).AndReturn(True)
services = [
dict(id=1, host='host1', topic='volume', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=2, host='host2', topic='volume', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=3, host='host3', topic='volume', disabled=False,
availability_zone='zone2', updated_at=timeutils.utcnow()),
dict(id=4, host='host4', topic='volume', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow()),
# service on host5 is disabled
dict(id=5, host='host5', topic='volume', disabled=True,
availability_zone='zone4', updated_at=timeutils.utcnow()),
]
db.service_get_all_by_topic(context, topic).AndReturn(services)
host_manager.utils.service_is_up(services[0]).AndReturn(True)
host_manager.utils.service_is_up(services[1]).AndReturn(True)
host_manager.utils.service_is_up(services[2]).AndReturn(True)
host_manager.utils.service_is_up(services[3]).AndReturn(True)
host_manager.utils.service_is_up(services[4]).AndReturn(True)
# Disabled service
host_manager.LOG.warn("volume service is down or disabled. "
"(host: host5)")
db.service_get_all_by_topic(context, topic).AndReturn(ret_services)
host_manager.utils.service_is_up(ret_services[0]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[1]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[2]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[3]).AndReturn(False)
db.service_get_all_by_topic(context, topic).AndReturn(services)
host_manager.utils.service_is_up(services[0]).AndReturn(True)
host_manager.utils.service_is_up(services[1]).AndReturn(True)
host_manager.utils.service_is_up(services[2]).AndReturn(True)
host_manager.utils.service_is_up(services[3]).AndReturn(False)
# Stopped service
host_manager.LOG.warn("volume service is down or disabled. "
"(host: host4)")
host_manager.utils.service_is_up(ret_services[4]).AndReturn(True)
host_manager.utils.service_is_up(services[4]).AndReturn(True)
# Disabled service
host_manager.LOG.warn("volume service is down or disabled. "
"(host: host5)")
@@ -174,7 +187,7 @@ class HostManagerTestCase(test.TestCase):
self.assertEqual(len(host_state_map), 4)
# Check that service is up
for i in xrange(4):
volume_node = fakes.VOLUME_SERVICES[i]
volume_node = services[i]
host = volume_node['host']
self.assertEqual(host_state_map[host].service,
volume_node)
@@ -184,7 +197,7 @@ class HostManagerTestCase(test.TestCase):
self.assertEqual(len(host_state_map), 3)
for i in xrange(3):
volume_node = fakes.VOLUME_SERVICES[i]
volume_node = services[i]
host = volume_node['host']
self.assertEqual(host_state_map[host].service,
volume_node)

View File

@@ -26,13 +26,6 @@ def get_test_admin_context():
return context.get_admin_context()
def is_cinder_installed():
if os.path.exists('../../cinder.cinder.egg-info'):
return True
else:
return False
def create_volume(ctxt,
host='test_host',
display_name='test_volume',