Refactor test_scheduler into unit tests
Also adds a service_is_up test for utils/service_is_up, which used to be in the scheduler. Minor bug spotted when doing tests, also, with live migration. Was checking 'launched_on' host when should be checking current host. Change-Id: I964abb767d619afbd90da549b267d8a9d7c31fc1
This commit is contained in:
@@ -344,7 +344,7 @@ class Scheduler(object):
|
||||
# Checking original host( where instance was launched at) exists.
|
||||
try:
|
||||
oservice_refs = db.service_get_all_compute_by_host(context,
|
||||
instance_ref['launched_on'])
|
||||
instance_ref['host'])
|
||||
except exception.NotFound:
|
||||
raise exception.SourceHostUnavailable()
|
||||
oservice_ref = oservice_refs[0]['compute_node'][0]
|
||||
@@ -406,7 +406,6 @@ class Scheduler(object):
|
||||
:param dest: destination host
|
||||
|
||||
"""
|
||||
|
||||
# Getting total available memory of host
|
||||
avail = self._get_compute_info(context, dest, 'memory_mb')
|
||||
|
||||
@@ -466,7 +465,7 @@ class Scheduler(object):
|
||||
instance_ref['host'])
|
||||
ret = rpc.call(context, topic,
|
||||
{"method": 'get_instance_disk_info',
|
||||
"args": {'instance_name': instance_ref.name}})
|
||||
"args": {'instance_name': instance_ref['name']}})
|
||||
disk_infos = utils.loads(ret)
|
||||
except rpc.RemoteError:
|
||||
LOG.exception(_("host %(dest)s is not compatible with "
|
||||
|
||||
@@ -94,6 +94,11 @@ class FakeHostState(host_manager.HostState):
|
||||
setattr(self, key, val)
|
||||
|
||||
|
||||
class FakeComputeAPI(object):
|
||||
def create_db_entry_for_new_instance(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
def mox_host_manager_db_calls(mox, context):
|
||||
mox.StubOutWithMock(db, 'compute_node_get_all')
|
||||
mox.StubOutWithMock(db, 'instance_get_all')
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
@@ -16,24 +18,29 @@
|
||||
Tests For Chance Scheduler.
|
||||
"""
|
||||
|
||||
from nova import test
|
||||
import random
|
||||
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import chance
|
||||
from nova.tests.scheduler import test_scheduler
|
||||
|
||||
|
||||
class ChanceSchedulerTestCase(test.TestCase):
|
||||
class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
"""Test case for Chance Scheduler."""
|
||||
|
||||
def test_filter_hosts_avoid_matches(self):
|
||||
driver_cls = chance.ChanceScheduler
|
||||
|
||||
def test_filter_hosts_avoid(self):
|
||||
"""Test to make sure _filter_hosts() filters original hosts if
|
||||
avoid_original_host is True."""
|
||||
|
||||
sched = chance.ChanceScheduler()
|
||||
|
||||
hosts = ['host1', 'host2', 'host3']
|
||||
request_spec = dict(instance_properties=dict(host='host2'))
|
||||
filter_properties = {'ignore_hosts': ['host2']}
|
||||
|
||||
filtered = sched._filter_hosts(request_spec, hosts,
|
||||
filtered = self.driver._filter_hosts(request_spec, hosts,
|
||||
filter_properties=filter_properties)
|
||||
self.assertEqual(filtered, ['host1', 'host3'])
|
||||
|
||||
@@ -41,12 +48,133 @@ class ChanceSchedulerTestCase(test.TestCase):
|
||||
"""Test to make sure _filter_hosts() does not filter original
|
||||
hosts if avoid_original_host is False."""
|
||||
|
||||
sched = chance.ChanceScheduler()
|
||||
|
||||
hosts = ['host1', 'host2', 'host3']
|
||||
request_spec = dict(instance_properties=dict(host='host2'))
|
||||
filter_properties = {'ignore_hosts': []}
|
||||
|
||||
filtered = sched._filter_hosts(request_spec, hosts,
|
||||
filtered = self.driver._filter_hosts(request_spec, hosts,
|
||||
filter_properties=filter_properties)
|
||||
self.assertEqual(filtered, hosts)
|
||||
|
||||
def test_basic_schedule_run_instance(self):
|
||||
ctxt = context.RequestContext('fake', 'fake', False)
|
||||
ctxt_elevated = 'fake-context-elevated'
|
||||
fake_args = (1, 2, 3)
|
||||
fake_kwargs = {'fake_kwarg1': 'fake_value1',
|
||||
'fake_kwarg2': 'fake_value2'}
|
||||
instance_opts = {'fake_opt1': 'meow'}
|
||||
request_spec = {'num_instances': 2,
|
||||
'instance_properties': instance_opts}
|
||||
instance1 = {'uuid': 'fake-uuid1'}
|
||||
instance2 = {'uuid': 'fake-uuid2'}
|
||||
instance1_encoded = {'uuid': 'fake-uuid1', '_is_precooked': False}
|
||||
instance2_encoded = {'uuid': 'fake-uuid2', '_is_precooked': False}
|
||||
|
||||
# create_instance_db_entry() usually does this, but we're
|
||||
# stubbing it.
|
||||
def _add_uuid1(ctxt, request_spec):
|
||||
request_spec['instance_properties']['uuid'] = 'fake-uuid1'
|
||||
|
||||
def _add_uuid2(ctxt, request_spec):
|
||||
request_spec['instance_properties']['uuid'] = 'fake-uuid2'
|
||||
|
||||
self.mox.StubOutWithMock(ctxt, 'elevated')
|
||||
self.mox.StubOutWithMock(self.driver, 'hosts_up')
|
||||
self.mox.StubOutWithMock(random, 'random')
|
||||
self.mox.StubOutWithMock(self.driver, 'create_instance_db_entry')
|
||||
self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
|
||||
self.mox.StubOutWithMock(driver, 'encode_instance')
|
||||
|
||||
ctxt.elevated().AndReturn(ctxt_elevated)
|
||||
# instance 1
|
||||
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
|
||||
['host1', 'host2', 'host3', 'host4'])
|
||||
random.random().AndReturn(.5)
|
||||
self.driver.create_instance_db_entry(ctxt,
|
||||
request_spec).WithSideEffects(_add_uuid1).AndReturn(
|
||||
instance1)
|
||||
driver.cast_to_compute_host(ctxt, 'host3', 'run_instance',
|
||||
instance_uuid=instance1['uuid'], **fake_kwargs)
|
||||
driver.encode_instance(instance1).AndReturn(instance1_encoded)
|
||||
# instance 2
|
||||
ctxt.elevated().AndReturn(ctxt_elevated)
|
||||
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
|
||||
['host1', 'host2', 'host3', 'host4'])
|
||||
random.random().AndReturn(.2)
|
||||
self.driver.create_instance_db_entry(ctxt,
|
||||
request_spec).WithSideEffects(_add_uuid2).AndReturn(
|
||||
instance2)
|
||||
driver.cast_to_compute_host(ctxt, 'host1', 'run_instance',
|
||||
instance_uuid=instance2['uuid'], **fake_kwargs)
|
||||
driver.encode_instance(instance2).AndReturn(instance2_encoded)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
result = self.driver.schedule_run_instance(ctxt, request_spec,
|
||||
*fake_args, **fake_kwargs)
|
||||
expected = [instance1_encoded, instance2_encoded]
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
def test_basic_schedule_run_instance_no_hosts(self):
|
||||
ctxt = context.RequestContext('fake', 'fake', False)
|
||||
ctxt_elevated = 'fake-context-elevated'
|
||||
fake_args = (1, 2, 3)
|
||||
fake_kwargs = {'fake_kwarg1': 'fake_value1',
|
||||
'fake_kwarg2': 'fake_value2'}
|
||||
instance_opts = 'fake_instance_opts'
|
||||
request_spec = {'num_instances': 2,
|
||||
'instance_properties': instance_opts}
|
||||
|
||||
self.mox.StubOutWithMock(ctxt, 'elevated')
|
||||
self.mox.StubOutWithMock(self.driver, 'hosts_up')
|
||||
|
||||
# instance 1
|
||||
ctxt.elevated().AndReturn(ctxt_elevated)
|
||||
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn([])
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.driver.schedule_run_instance, ctxt, request_spec,
|
||||
*fake_args, **fake_kwargs)
|
||||
|
||||
def test_basic_schedule_fallback(self):
|
||||
ctxt = context.RequestContext('fake', 'fake', False)
|
||||
ctxt_elevated = 'fake-context-elevated'
|
||||
topic = 'fake_topic'
|
||||
method = 'fake_method'
|
||||
fake_args = (1, 2, 3)
|
||||
fake_kwargs = {'fake_kwarg1': 'fake_value1',
|
||||
'fake_kwarg2': 'fake_value2'}
|
||||
|
||||
self.mox.StubOutWithMock(ctxt, 'elevated')
|
||||
self.mox.StubOutWithMock(self.driver, 'hosts_up')
|
||||
self.mox.StubOutWithMock(random, 'random')
|
||||
self.mox.StubOutWithMock(driver, 'cast_to_host')
|
||||
|
||||
ctxt.elevated().AndReturn(ctxt_elevated)
|
||||
self.driver.hosts_up(ctxt_elevated, topic).AndReturn(
|
||||
['host1', 'host2', 'host3', 'host4'])
|
||||
random.random().AndReturn(.5)
|
||||
driver.cast_to_host(ctxt, topic, 'host3', method, **fake_kwargs)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.driver.schedule(ctxt, topic, method, *fake_args, **fake_kwargs)
|
||||
|
||||
def test_basic_schedule_fallback_no_hosts(self):
|
||||
ctxt = context.RequestContext('fake', 'fake', False)
|
||||
ctxt_elevated = 'fake-context-elevated'
|
||||
topic = 'fake_topic'
|
||||
method = 'fake_method'
|
||||
fake_args = (1, 2, 3)
|
||||
fake_kwargs = {'fake_kwarg1': 'fake_value1',
|
||||
'fake_kwarg2': 'fake_value2'}
|
||||
|
||||
self.mox.StubOutWithMock(ctxt, 'elevated')
|
||||
self.mox.StubOutWithMock(self.driver, 'hosts_up')
|
||||
|
||||
ctxt.elevated().AndReturn(ctxt_elevated)
|
||||
self.driver.hosts_up(ctxt_elevated, topic).AndReturn([])
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.driver.schedule, ctxt, topic, method,
|
||||
*fake_args, **fake_kwargs)
|
||||
|
||||
107
nova/tests/scheduler/test_multi_scheduler.py
Normal file
107
nova/tests/scheduler/test_multi_scheduler.py
Normal file
@@ -0,0 +1,107 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 OpenStack LLC
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Multi Scheduler
|
||||
"""
|
||||
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import multi
|
||||
from nova.tests.scheduler import test_scheduler
|
||||
|
||||
|
||||
class FakeComputeScheduler(driver.Scheduler):
|
||||
is_fake_compute = True
|
||||
|
||||
def schedule_run_instance(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def schedule_start_instance(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def schedule(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
class FakeVolumeScheduler(driver.Scheduler):
|
||||
is_fake_volume = True
|
||||
|
||||
def schedule_create_volume(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def schedule_create_volumes(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def schedule(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
class MultiDriverTestCase(test_scheduler.SchedulerTestCase):
|
||||
"""Test case for multi driver"""
|
||||
|
||||
driver_cls = multi.MultiScheduler
|
||||
|
||||
def setUp(self):
|
||||
super(MultiDriverTestCase, self).setUp()
|
||||
base_name = 'nova.tests.scheduler.test_multi_scheduler.%s'
|
||||
compute_cls_name = base_name % 'FakeComputeScheduler'
|
||||
volume_cls_name = base_name % 'FakeVolumeScheduler'
|
||||
self.flags(compute_scheduler_driver=compute_cls_name,
|
||||
volume_scheduler_driver=volume_cls_name)
|
||||
self._manager = multi.MultiScheduler()
|
||||
|
||||
def test_drivers_inited(self):
|
||||
mgr = self._manager
|
||||
self.assertEqual(len(mgr.drivers), 2)
|
||||
self.assertTrue(mgr.drivers['compute'].is_fake_compute)
|
||||
self.assertTrue(mgr.drivers['volume'].is_fake_volume)
|
||||
|
||||
def test_proxy_calls(self):
|
||||
mgr = self._manager
|
||||
compute_driver = mgr.drivers['compute']
|
||||
volume_driver = mgr.drivers['volume']
|
||||
|
||||
test_methods = {compute_driver: ['run_instance', 'start_instance'],
|
||||
volume_driver: ['create_volume', 'create_volumes']}
|
||||
|
||||
for driver, methods in test_methods.iteritems():
|
||||
for method in methods:
|
||||
mgr_func = getattr(mgr, 'schedule_' + method)
|
||||
driver_func = getattr(driver, 'schedule_' + method)
|
||||
self.assertEqual(mgr_func, driver_func)
|
||||
|
||||
def test_schedule_fallback_proxy(self):
|
||||
mgr = self._manager
|
||||
|
||||
self.mox.StubOutWithMock(mgr.drivers['compute'], 'schedule')
|
||||
self.mox.StubOutWithMock(mgr.drivers['volume'], 'schedule')
|
||||
|
||||
ctxt = 'fake_context'
|
||||
method = 'fake_method'
|
||||
fake_args = (1, 2, 3)
|
||||
fake_kwargs = {'fake_kwarg1': 'fake_value1',
|
||||
'fake_kwarg2': 'fake_value2'}
|
||||
|
||||
mgr.drivers['compute'].schedule(ctxt, 'compute', method,
|
||||
*fake_args, **fake_kwargs)
|
||||
mgr.drivers['volume'].schedule(ctxt, 'volume', method,
|
||||
*fake_args, **fake_kwargs)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
mgr.schedule(ctxt, 'compute', method, *fake_args, **fake_kwargs)
|
||||
mgr.schedule(ctxt, 'volume', method, *fake_args, **fake_kwargs)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,18 +13,18 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import nova
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova.scheduler import vsa as vsa_sched
|
||||
from nova import test
|
||||
from nova.tests.scheduler import test_scheduler
|
||||
from nova import utils
|
||||
from nova.volume import volume_types
|
||||
|
||||
from nova.scheduler import vsa as vsa_sched
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.scheduler.vsa')
|
||||
@@ -50,7 +50,32 @@ class FakeVsaMostAvailCapacityScheduler(
|
||||
pass
|
||||
|
||||
|
||||
class VsaSchedulerTestCase(test.TestCase):
|
||||
class VsaSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
|
||||
driver_cls = FakeVsaLeastUsedScheduler
|
||||
|
||||
def setUp(self):
|
||||
super(VsaSchedulerTestCase, self).setUp()
|
||||
|
||||
self.host_num = 10
|
||||
self.drive_type_num = 5
|
||||
|
||||
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
|
||||
self.stubs.Set(self.driver,
|
||||
'_get_service_states', self._fake_get_service_states)
|
||||
self.stubs.Set(self.driver,
|
||||
'_provision_volume', self._fake_provision_volume)
|
||||
self.stubs.Set(db, 'vsa_update', self._fake_vsa_update)
|
||||
|
||||
self.stubs.Set(db, 'volume_get', self._fake_volume_get)
|
||||
self.stubs.Set(db, 'volume_update', self._fake_volume_update)
|
||||
|
||||
self.created_types_lst = []
|
||||
|
||||
def tearDown(self):
|
||||
for name in self.created_types_lst:
|
||||
volume_types.purge(self.context.elevated(), name)
|
||||
super(VsaSchedulerTestCase, self).tearDown()
|
||||
|
||||
def _get_vol_creation_request(self, num_vols, drive_ix, size=0):
|
||||
volume_params = []
|
||||
@@ -58,7 +83,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
|
||||
name = 'name_' + str(i)
|
||||
try:
|
||||
volume_types.create(self.context, name,
|
||||
volume_types.create(self.context.elevated(), name,
|
||||
extra_specs={'type': 'vsa_drive',
|
||||
'drive_name': name,
|
||||
'drive_type': 'type_' + str(drive_ix),
|
||||
@@ -205,35 +230,6 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
def _fake_service_is_up_False(self, service):
|
||||
return False
|
||||
|
||||
def setUp(self, sched_class=None):
|
||||
super(VsaSchedulerTestCase, self).setUp()
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
if sched_class is None:
|
||||
self.sched = FakeVsaLeastUsedScheduler()
|
||||
else:
|
||||
self.sched = sched_class
|
||||
|
||||
self.host_num = 10
|
||||
self.drive_type_num = 5
|
||||
|
||||
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
|
||||
self.stubs.Set(self.sched,
|
||||
'_get_service_states', self._fake_get_service_states)
|
||||
self.stubs.Set(self.sched,
|
||||
'_provision_volume', self._fake_provision_volume)
|
||||
self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update)
|
||||
|
||||
self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get)
|
||||
self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
|
||||
|
||||
self.created_types_lst = []
|
||||
|
||||
def tearDown(self):
|
||||
for name in self.created_types_lst:
|
||||
volume_types.purge(self.context, name)
|
||||
super(VsaSchedulerTestCase, self).tearDown()
|
||||
|
||||
def test_vsa_sched_create_volumes_simple(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
@@ -245,7 +241,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
prev = self._generate_default_service_states()
|
||||
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
self.driver.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
@@ -271,7 +267,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
init_num_drives=1)
|
||||
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6)
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.driver.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
@@ -288,7 +284,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0)
|
||||
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.driver.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
@@ -311,7 +307,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
|
||||
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.driver.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
@@ -326,13 +322,13 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
|
||||
|
||||
self.stubs.UnsetAll()
|
||||
self.stubs.Set(self.sched,
|
||||
self.stubs.Set(self.driver,
|
||||
'_get_service_states', self._fake_get_service_states)
|
||||
self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
|
||||
self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
|
||||
self.stubs.Set(db, 'volume_create', self._fake_volume_create)
|
||||
self.stubs.Set(db, 'volume_update', self._fake_volume_update)
|
||||
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
self.driver.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
@@ -346,7 +342,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
init_num_drives=1)
|
||||
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
self.driver.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
@@ -356,13 +352,13 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
|
||||
new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
self.driver.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
self._print_service_states()
|
||||
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.driver.schedule_create_volumes,
|
||||
self.context,
|
||||
new_request,
|
||||
availability_zone=None)
|
||||
@@ -379,26 +375,26 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
|
||||
|
||||
self.assertRaises(exception.HostBinaryNotFound,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
self.driver.schedule_create_volumes,
|
||||
self.context.elevated(),
|
||||
request_spec,
|
||||
availability_zone="nova:host_5")
|
||||
|
||||
self.stubs.Set(nova.db,
|
||||
self.stubs.Set(db,
|
||||
'service_get_by_args', self._fake_service_get_by_args)
|
||||
self.stubs.Set(utils,
|
||||
'service_is_up', self._fake_service_is_up_False)
|
||||
|
||||
self.assertRaises(exception.WillNotSchedule,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
self.driver.schedule_create_volumes,
|
||||
self.context.elevated(),
|
||||
request_spec,
|
||||
availability_zone="nova:host_5")
|
||||
|
||||
self.stubs.Set(utils,
|
||||
'service_is_up', self._fake_service_is_up_True)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
self.driver.schedule_create_volumes(self.context.elevated(),
|
||||
request_spec,
|
||||
availability_zone="nova:host_5")
|
||||
|
||||
@@ -419,7 +415,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
request_spec = self._get_vol_creation_request(num_vols=3,
|
||||
drive_ix=3,
|
||||
size=50)
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
self.driver.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
@@ -459,13 +455,13 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals())
|
||||
return {'id': volume_id, 'availability_zone': 'nova:host_3'}
|
||||
|
||||
self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az)
|
||||
self.stubs.Set(nova.db,
|
||||
'service_get_by_args', self._fake_service_get_by_args)
|
||||
self.stubs.Set(db, 'volume_get', _fake_volume_get_az)
|
||||
self.stubs.Set(db, 'service_get_by_args',
|
||||
self._fake_service_get_by_args)
|
||||
self.stubs.Set(utils,
|
||||
'service_is_up', self._fake_service_is_up_True)
|
||||
|
||||
self.sched.schedule_create_volume(self.context,
|
||||
self.driver.schedule_create_volume(self.context.elevated(),
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
@@ -480,10 +476,10 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
global_volume['volume_type_id'] = None
|
||||
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.sched.schedule_create_volume,
|
||||
self.context,
|
||||
123,
|
||||
availability_zone=None)
|
||||
self.driver.schedule_create_volume,
|
||||
self.context,
|
||||
123,
|
||||
availability_zone=None)
|
||||
|
||||
def test_vsa_sched_create_single_volume(self):
|
||||
global scheduled_volume
|
||||
@@ -500,7 +496,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
|
||||
drive_ix = 2
|
||||
name = 'name_' + str(drive_ix)
|
||||
volume_types.create(self.context, name,
|
||||
volume_types.create(self.context.elevated(), name,
|
||||
extra_specs={'type': 'vsa_drive',
|
||||
'drive_name': name,
|
||||
'drive_type': 'type_' + str(drive_ix),
|
||||
@@ -511,7 +507,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
global_volume['volume_type_id'] = volume_type['id']
|
||||
global_volume['size'] = 0
|
||||
|
||||
self.sched.schedule_create_volume(self.context,
|
||||
self.driver.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
@@ -520,12 +516,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
|
||||
class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(VsaSchedulerTestCaseMostAvail, self).setUp(
|
||||
FakeVsaMostAvailCapacityScheduler())
|
||||
|
||||
def tearDown(self):
|
||||
super(VsaSchedulerTestCaseMostAvail, self).tearDown()
|
||||
driver_cls = FakeVsaMostAvailCapacityScheduler
|
||||
|
||||
def test_vsa_sched_create_single_volume(self):
|
||||
global scheduled_volume
|
||||
@@ -542,7 +533,7 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
|
||||
|
||||
drive_ix = 2
|
||||
name = 'name_' + str(drive_ix)
|
||||
volume_types.create(self.context, name,
|
||||
volume_types.create(self.context.elevated(), name,
|
||||
extra_specs={'type': 'vsa_drive',
|
||||
'drive_name': name,
|
||||
'drive_type': 'type_' + str(drive_ix),
|
||||
@@ -553,7 +544,7 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
|
||||
global_volume['volume_type_id'] = volume_type['id']
|
||||
global_volume['size'] = 0
|
||||
|
||||
self.sched.schedule_create_volume(self.context,
|
||||
self.driver.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
@@ -572,7 +563,7 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
|
||||
|
||||
self._print_service_states()
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
self.driver.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
@@ -603,7 +594,7 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
|
||||
request_spec = self._get_vol_creation_request(num_vols=3,
|
||||
drive_ix=3,
|
||||
size=50)
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
self.driver.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
|
||||
@@ -645,3 +645,37 @@ class DeprecationTest(test.TestCase):
|
||||
|
||||
# Make sure that did *not* generate a warning
|
||||
self.assertEqual(self.warn, None)
|
||||
|
||||
def test_service_is_up(self):
|
||||
fts_func = datetime.datetime.fromtimestamp
|
||||
fake_now = 1000
|
||||
down_time = 5
|
||||
|
||||
self.flags(service_down_time=down_time)
|
||||
self.mox.StubOutWithMock(utils, 'utcnow')
|
||||
|
||||
# Up (equal)
|
||||
utils.utcnow().AndReturn(fts_func(fake_now))
|
||||
service = {'updated_at': fts_func(fake_now - down_time),
|
||||
'created_at': fts_func(fake_now - down_time)}
|
||||
self.mox.ReplayAll()
|
||||
result = utils.service_is_up(service)
|
||||
self.assertTrue(result)
|
||||
|
||||
self.mox.ResetAll()
|
||||
# Up
|
||||
utils.utcnow().AndReturn(fts_func(fake_now))
|
||||
service = {'updated_at': fts_func(fake_now - down_time + 1),
|
||||
'created_at': fts_func(fake_now - down_time + 1)}
|
||||
self.mox.ReplayAll()
|
||||
result = utils.service_is_up(service)
|
||||
self.assertTrue(result)
|
||||
|
||||
self.mox.ResetAll()
|
||||
# Down
|
||||
utils.utcnow().AndReturn(fts_func(fake_now))
|
||||
service = {'updated_at': fts_func(fake_now - down_time - 1),
|
||||
'created_at': fts_func(fake_now - down_time - 1)}
|
||||
self.mox.ReplayAll()
|
||||
result = utils.service_is_up(service)
|
||||
self.assertFalse(result)
|
||||
|
||||
Reference in New Issue
Block a user