Remove scheduler RPC API version 1.x

Like with the compute RPC API, we're unlikely to still work well with
RPC client using older 1.x version because of DB schema changes.

In that case, we may as well remove 1.x support in Folsom and rip out
the potentially buggy backwards compat code. This should also make
backporting fixes from Grizzly easier.

Deployers following trunk can upgrade all their nodes to use the version
2.0 API before deploying this commit.

Change-Id: Iee099751bda9637da5e134357d28e89d5fba9895
This commit is contained in:
Mark McLoughlin 2012-08-29 11:18:04 +01:00
parent 86e5aad160
commit d1ed5376aa
10 changed files with 86 additions and 664 deletions

View File

@ -56,22 +56,11 @@ class ChanceScheduler(driver.Scheduler):
return hosts[int(random.random() * len(hosts))]
def schedule(self, context, topic, method, *_args, **kwargs):
"""Picks a host that is up at random."""
filter_properties = kwargs.get('filter_properties', {})
host = self._schedule(context, topic, None, filter_properties)
driver.cast_to_host(context, topic, host, method, **kwargs)
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties, reservations):
filter_properties):
"""Create and run an instance or instances"""
if 'instance_uuids' not in request_spec:
return self._legacy_schedule_run_instance(context, request_spec,
admin_password, injected_files, requested_networks,
is_first_time, filter_properties, reservations)
instances = []
instance_uuids = request_spec.get('instance_uuids')
for num, instance_uuid in enumerate(instance_uuids):
@ -90,37 +79,9 @@ class ChanceScheduler(driver.Scheduler):
instances.append(driver.encode_instance(updated_instance))
return instances
def _legacy_schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties, reservations):
num_instances = request_spec.get('num_instances', 1)
instances = []
for num in xrange(num_instances):
host = self._schedule(context, 'compute', request_spec,
filter_properties)
request_spec['instance_properties']['launch_index'] = num
instance = self.create_instance_db_entry(context, request_spec,
reservations)
updated_instance = driver.instance_update_db(context,
instance['uuid'], host)
self.compute_rpcapi.run_instance(context,
instance=updated_instance, host=host,
requested_networks=requested_networks,
injected_files=injected_files,
admin_password=admin_password, is_first_time=is_first_time,
request_spec=request_spec,
filter_properties=filter_properties)
instances.append(driver.encode_instance(updated_instance))
# So if we loop around, create_instance_db_entry will actually
# create a new entry, instead of assume it's been created
# already
del request_spec['instance_properties']['uuid']
return instances
def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
reservations=None):
reservations):
"""Select a target for resize."""
host = self._schedule(context, 'compute', request_spec,
filter_properties)
@ -129,6 +90,8 @@ class ChanceScheduler(driver.Scheduler):
def schedule_create_volume(self, context, volume_id, snapshot_id,
reservations):
self.schedule(context, FLAGS.volume_topic, 'create_volume',
volume_id=volume_id, snapshot_id=snapshot_id,
reservations=reservations)
"""Picks a host that is up at random."""
host = self._schedule(context, FLAGS.volume_topic, None, {})
driver.cast_to_host(context, FLAGS.volume_topic, host, 'create_volume',
volume_id=volume_id, snapshot_id=snapshot_id,
reservations=reservations)

View File

@ -161,35 +161,9 @@ class Scheduler(object):
for service in services
if utils.service_is_up(service)]
def create_instance_db_entry(self, context, request_spec, reservations):
"""Create instance DB entry based on request_spec"""
base_options = request_spec['instance_properties']
if base_options.get('uuid'):
# Instance was already created before calling scheduler
return db.instance_get_by_uuid(context, base_options['uuid'])
image = request_spec['image']
instance_type = request_spec.get('instance_type')
security_group = request_spec.get('security_group', 'default')
block_device_mapping = request_spec.get('block_device_mapping', [])
instance = self.compute_api.create_db_entry_for_new_instance(
context, instance_type, image, base_options,
security_group, block_device_mapping)
if reservations:
quota.QUOTAS.commit(context, reservations)
# NOTE(comstud): This needs to be set for the generic exception
# checking in scheduler manager, so that it'll set this instance
# to ERROR properly.
base_options['uuid'] = instance['uuid']
return instance
def schedule(self, context, topic, method, *_args, **_kwargs):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_("Must implement a fallback schedule"))
def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
reservations=None):
reservations):
"""Must override schedule_prep_resize method for scheduler to work."""
msg = _("Driver must implement schedule_prep_resize")
raise NotImplementedError(msg)
@ -197,7 +171,7 @@ class Scheduler(object):
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties, reservations):
filter_properties):
"""Must override schedule_run_instance method for scheduler to work."""
msg = _("Driver must implement schedule_run_instance")
raise NotImplementedError(msg)
@ -207,13 +181,11 @@ class Scheduler(object):
msg = _("Driver must implement schedule_create_volune")
raise NotImplementedError(msg)
def schedule_live_migration(self, context, dest,
block_migration=False, disk_over_commit=False,
instance=None, instance_id=None):
def schedule_live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
"""Live migration scheduling method.
:param context:
:param instance_id: (deprecated)
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, block_migration.
@ -225,9 +197,6 @@ class Scheduler(object):
Then scheduler send request that host.
"""
# Check we can do live migration
if not instance:
instance = db.instance_get(context, instance_id)
self._live_migration_src_check(context, instance)
self._live_migration_dest_check(context, instance, dest)
self._live_migration_common_check(context, instance, dest)

View File

@ -42,15 +42,6 @@ class FilterScheduler(driver.Scheduler):
self.cost_function_cache = {}
self.options = scheduler_options.SchedulerOptions()
def schedule(self, context, topic, method, *args, **kwargs):
"""The schedule() contract requires we return the one
best-suited host for this request.
NOTE: We're only focused on compute instances right now,
so this method will always raise NoValidHost()."""
msg = _("No host selection for %s defined.") % topic
raise exception.NoValidHost(reason=msg)
def schedule_create_volume(self, context, volume_id, snapshot_id,
reservations):
# NOTE: We're only focused on compute instances right now,
@ -61,17 +52,13 @@ class FilterScheduler(driver.Scheduler):
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties, reservations):
filter_properties):
"""This method is called from nova.compute.api to provision
an instance. We first create a build plan (a list of WeightedHosts)
and then provision.
Returns a list of the instances created.
"""
if 'instance_uuids' not in request_spec:
return self._legacy_schedule_run_instance(context, request_spec,
admin_password, injected_files, requested_networks,
is_first_time, filter_properties, reservations)
elevated = context.elevated()
instance_uuids = request_spec.get('instance_uuids')
num_instances = len(instance_uuids)
@ -114,60 +101,9 @@ class FilterScheduler(driver.Scheduler):
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.end', notifier.INFO, payload)
def _legacy_schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties, reservations):
elevated = context.elevated()
num_instances = request_spec.get('num_instances', 1)
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
locals())
payload = dict(request_spec=request_spec)
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.start', notifier.INFO, payload)
weighted_hosts = self._schedule(context, "compute", request_spec,
filter_properties)
if not weighted_hosts:
raise exception.NoValidHost(reason="")
# NOTE(comstud): Make sure we do not pass this through. It
# contains an instance of RpcContext that cannot be serialized.
filter_properties.pop('context', None)
instances = []
for num in xrange(num_instances):
if not weighted_hosts:
break
weighted_host = weighted_hosts.pop(0)
request_spec['instance_properties']['launch_index'] = num
instance = self._provision_resource(elevated, weighted_host,
request_spec,
filter_properties,
requested_networks,
injected_files, admin_password,
is_first_time,
reservations=reservations)
# scrub retry host list in case we're scheduling multiple
# instances:
retry = filter_properties.get('retry', {})
retry['hosts'] = []
if instance:
instances.append(instance)
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.end', notifier.INFO, payload)
return instances
def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
reservations=None):
reservations):
"""Select a target for resize.
Selects a target host for the instance, post-resize, and casts
@ -186,13 +122,8 @@ class FilterScheduler(driver.Scheduler):
def _provision_resource(self, context, weighted_host, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, reservations=None,
instance_uuid=None):
admin_password, is_first_time, instance_uuid=None):
"""Create the requested resource in this Zone."""
if reservations:
instance = self.create_instance_db_entry(context, request_spec,
reservations)
instance_uuid = instance['uuid']
# Add a retry entry for the selected compute host:
self._add_retry_host(filter_properties, weighted_host.host_state.host)
@ -213,16 +144,6 @@ class FilterScheduler(driver.Scheduler):
injected_files=injected_files,
admin_password=admin_password, is_first_time=is_first_time)
if reservations:
inst = driver.encode_instance(updated_instance, local=True)
# So if another instance is created, create_instance_db_entry will
# actually create a new entry, instead of assume it's been created
# already
del request_spec['instance_properties']['uuid']
return inst
def _add_retry_host(self, filter_properties, host):
"""Add a retry entry for the selected compute host. In the event that
the request gets re-scheduled, this entry will signal that the given

View File

@ -35,7 +35,6 @@ from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
from nova import quota
@ -54,7 +53,7 @@ QUOTAS = quota.QUOTAS
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
RPC_API_VERSION = '1.7'
RPC_API_VERSION = '2.0'
def __init__(self, scheduler_driver=None, *args, **kwargs):
if not scheduler_driver:
@ -62,41 +61,8 @@ class SchedulerManager(manager.Manager):
self.driver = importutils.import_object(scheduler_driver)
super(SchedulerManager, self).__init__(*args, **kwargs)
def create_rpc_dispatcher(self):
"""Get the rpc dispatcher for this manager.
Return a dispatcher which can call out to either SchedulerManager
or _V2SchedulerManagerProxy depending on the RPC API version.
"""
return rpc_dispatcher.RpcDispatcher([self,
_V2SchedulerManagerProxy(self)])
def __getattr__(self, key):
"""Converts all method calls to use the schedule method"""
# NOTE(russellb) Because of what this is doing, we must be careful
# when changing the API of the scheduler drivers, as that changes
# the rpc API as well, and the version should be updated accordingly.
# NOTE(markmc): This remains only for backwards compat support
# and can be removed when we bump the major version
return functools.partial(self._schedule, key)
def get_host_list(self, context):
"""Get a list of hosts from the HostManager.
Currently unused, but left for backwards compatibility.
"""
raise rpc_common.RPCException(message=_('Deprecated in version 1.0'))
def get_service_capabilities(self, context):
"""Get the normalized set of capabilities for this zone.
Has been unused since pre-essex, but remains for rpc API 1.X
completeness.
"""
raise rpc_common.RPCException(message=_('Deprecated in version 1.0'))
def update_service_capabilities(self, context, service_name=None,
host=None, capabilities=None, **kwargs):
def update_service_capabilities(self, context, service_name,
host, capabilities):
"""Process a capability update from a service node."""
if capabilities is None:
capabilities = {}
@ -113,87 +79,45 @@ class SchedulerManager(manager.Manager):
{'vm_state': vm_states.ERROR},
context, ex, {})
def live_migration(self, context, dest,
block_migration=False, disk_over_commit=False,
instance=None, instance_id=None, topic=None):
def live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
try:
return self.driver.schedule_live_migration(
context, dest,
block_migration, disk_over_commit,
instance, instance_id)
context, instance, dest,
block_migration, disk_over_commit)
except Exception as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('live_migration',
{'vm_state': vm_states.ERROR},
context, ex, {})
def _schedule(self, method, context, topic, *args, **kwargs):
"""Tries to call schedule_* method on the driver to retrieve host.
Falls back to schedule(context, topic) if method doesn't exist.
"""
driver_method_name = 'schedule_%s' % method
try:
driver_method = getattr(self.driver, driver_method_name)
args = (context,) + args
except AttributeError, e:
LOG.warning(_("Driver Method %(driver_method_name)s missing: "
"%(e)s. Reverting to schedule()") % locals())
driver_method = self.driver.schedule
args = (context, topic, method) + args
# Scheduler methods are responsible for casting.
try:
return driver_method(*args, **kwargs)
except Exception as ex:
with excutils.save_and_reraise_exception():
request_spec = kwargs.get('request_spec', {})
self._set_vm_state_and_notify(method,
{'vm_state': vm_states.ERROR},
context, ex, request_spec)
def run_instance(self, context, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties, reservations=None, topic=None):
filter_properties):
"""Tries to call schedule_run_instance on the driver.
Sets instance vm_state to ERROR on exceptions
"""
try:
result = self.driver.schedule_run_instance(context,
return self.driver.schedule_run_instance(context,
request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties,
reservations)
return result
requested_networks, is_first_time, filter_properties)
except exception.NoValidHost as ex:
# don't re-raise
self._set_vm_state_and_notify('run_instance',
{'vm_state': vm_states.ERROR},
context, ex, request_spec)
if reservations:
QUOTAS.rollback(context, reservations)
except Exception as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('run_instance',
{'vm_state': vm_states.ERROR},
context, ex, request_spec)
if reservations:
QUOTAS.rollback(context, reservations)
# FIXME(comstud): Remove 'update_db' in a future version. It's only
# here for rpcapi backwards compatibility.
def prep_resize(self, context, image, request_spec, filter_properties,
update_db=None, instance=None, instance_uuid=None,
instance_type=None, instance_type_id=None,
reservations=None, topic=None):
instance, instance_type, reservations):
"""Tries to call schedule_prep_resize on the driver.
Sets instance vm_state to ACTIVE on NoHostFound
Sets vm_state to ERROR on other exceptions
"""
if not instance:
instance = db.instance_get_by_uuid(context, instance_uuid)
if not instance_type:
instance_type = db.instance_type_get(context, instance_type_id)
try:
kwargs = {
'context': context,
@ -328,67 +252,3 @@ class SchedulerManager(manager.Manager):
@manager.periodic_task
def _expire_reservations(self, context):
QUOTAS.expire(context)
class _V2SchedulerManagerProxy(object):
RPC_API_VERSION = '2.0'
# Notes:
# - remove get_host_list()
# - remove get_service_capabilities()
# - add explicit live_migration() method
# - remove __getattr__ magic which is replaced by schedule()
def __init__(self, manager):
self.manager = manager
def create_volume(self, context, volume_id, snapshot_id, reservations):
return self.manager.create_volume(
context, volume_id, snapshot_id, reservations)
# Remove instance_id, require instance
# Remove topic
# Make block_migration and disk_over_commit required
def live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
return self.manager.live_migration(
context, dest, instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit,
instance_id=None)
# Remove update_db
# Remove instance_uuid, require instance
# Remove instance_type_id, require instance_type
# Remove topic
# Make reservations required
def prep_resize(self, context, image, request_spec, filter_properties,
instance, instance_type, reservations):
return self.manager.prep_resize(
context, image=image, request_spec=request_spec,
filter_properties=filter_properties,
instance=instance, instance_type=instance_type,
reservations=reservations, topic=None,
update_db=None, instance_uuid=None, instance_type_id=None)
# Remove reservations and topic
# Require instance_uuids in request_spec
def run_instance(self, context, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties):
return self.manager.run_instance(
context, request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties,
reservations=None, topic=None)
def show_host_resources(self, context, host):
return self.manager.show_host_resources(context, host)
# remove kwargs
# require service_name, host and capabilities
def update_service_capabilities(self, context, service_name,
host, capabilities):
return self.manager.update_service_capabilities(
context, service_name=service_name, host=host,
capabilities=capabilities)

View File

@ -65,11 +65,6 @@ class MultiScheduler(driver.Scheduler):
'volume': volume_driver,
'default': default_driver}
def schedule(self, context, topic, method, *_args, **_kwargs):
driver = self.drivers.get(topic, self.drivers['default'])
return driver.schedule(context, topic,
method, *_args, **_kwargs)
def schedule_run_instance(self, *args, **kwargs):
return self.drivers['compute'].schedule_run_instance(*args, **kwargs)

View File

@ -46,13 +46,15 @@ FLAGS.register_opts(simple_scheduler_opts)
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""
def schedule_run_instance(self, context, request_spec, reservations,
*_args, **_kwargs):
def schedule_run_instance(self, context, request_spec, admin_password,
injected_files, requested_networks,
is_first_time, filter_properties):
deprecated.warn(_('SimpleScheduler now only covers volume scheduling '
'and is deprecated in Folsom. Non-volume functionality in '
'SimpleScheduler has been replaced by FilterScheduler'))
super(SimpleScheduler, self).schedule_run_instance(context,
request_spec, reservations, *_args, **_kwargs)
request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties)
def schedule_create_volume(self, context, volume_id, snapshot_id,
reservations):

View File

@ -63,27 +63,22 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
fake_args = (1, 2, 3)
instance_opts = {'fake_opt1': 'meow'}
request_spec = {'num_instances': 2,
'instance_properties': instance_opts}
instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
instance1 = {'uuid': 'fake-uuid1'}
instance2 = {'uuid': 'fake-uuid2'}
request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
'instance_properties': instance_opts}
instance1_encoded = {'uuid': 'fake-uuid1', '_is_precooked': False}
instance2_encoded = {'uuid': 'fake-uuid2', '_is_precooked': False}
reservations = ['resv1', 'resv2']
# create_instance_db_entry() usually does this, but we're
# stubbing it.
def _add_uuid1(ctxt, request_spec, reservations):
request_spec['instance_properties']['uuid'] = 'fake-uuid1'
def _add_uuid2(ctxt, request_spec, reservations):
request_spec['instance_properties']['uuid'] = 'fake-uuid2'
def inc_launch_index(*args):
request_spec['instance_properties']['launch_index'] = (
request_spec['instance_properties']['launch_index'] + 1)
self.mox.StubOutWithMock(ctxt, 'elevated')
self.mox.StubOutWithMock(self.driver, 'hosts_up')
self.mox.StubOutWithMock(random, 'random')
self.mox.StubOutWithMock(self.driver, 'create_instance_db_entry')
self.mox.StubOutWithMock(driver, 'encode_instance')
self.mox.StubOutWithMock(driver, 'instance_update_db')
self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
@ -93,110 +88,39 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
['host1', 'host2', 'host3', 'host4'])
random.random().AndReturn(.5)
self.driver.create_instance_db_entry(ctxt, request_spec,
reservations).WithSideEffects(_add_uuid1).AndReturn(
instance1)
driver.instance_update_db(ctxt, instance1['uuid'],
'host3').AndReturn(instance1)
'host3').WithSideEffects(inc_launch_index).AndReturn(instance1)
compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host3',
instance=instance1, requested_networks=None,
injected_files=None, admin_password=None, is_first_time=None,
request_spec=request_spec, filter_properties={})
driver.encode_instance(instance1).AndReturn(instance1_encoded)
# instance 2
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
['host1', 'host2', 'host3', 'host4'])
random.random().AndReturn(.2)
self.driver.create_instance_db_entry(ctxt, request_spec,
reservations).WithSideEffects(_add_uuid2).AndReturn(
instance2)
driver.instance_update_db(ctxt, instance2['uuid'],
'host1').AndReturn(instance2)
'host1').WithSideEffects(inc_launch_index).AndReturn(instance2)
compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host1',
instance=instance2, requested_networks=None,
injected_files=None, admin_password=None, is_first_time=None,
request_spec=request_spec, filter_properties={})
driver.encode_instance(instance2).AndReturn(instance2_encoded)
self.mox.ReplayAll()
result = self.driver.schedule_run_instance(ctxt, request_spec,
None, None, None, None, {}, reservations)
None, None, None, None, {})
expected = [instance1_encoded, instance2_encoded]
self.assertEqual(result, expected)
def test_scheduler_includes_launch_index(self):
ctxt = context.RequestContext('fake', 'fake', False)
instance_opts = {'fake_opt1': 'meow'}
request_spec = {'num_instances': 2,
'instance_properties': instance_opts}
instance1 = {'uuid': 'fake-uuid1'}
instance2 = {'uuid': 'fake-uuid2'}
# create_instance_db_entry() usually does this, but we're
# stubbing it.
def _add_uuid(num):
"""Return a function that adds the provided uuid number."""
def _add_uuid_num(_, spec, reservations):
spec['instance_properties']['uuid'] = 'fake-uuid%d' % num
return _add_uuid_num
def _has_launch_index(expected_index):
"""Return a function that verifies the expected index."""
def _check_launch_index(value):
if 'instance_properties' in value:
if 'launch_index' in value['instance_properties']:
index = value['instance_properties']['launch_index']
if index == expected_index:
return True
return False
return _check_launch_index
self.mox.StubOutWithMock(self.driver, '_schedule')
self.mox.StubOutWithMock(self.driver, 'create_instance_db_entry')
self.mox.StubOutWithMock(driver, 'encode_instance')
self.mox.StubOutWithMock(driver, 'instance_update_db')
self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
# instance 1
self.driver._schedule(ctxt, 'compute', request_spec,
{}).AndReturn('host')
self.driver.create_instance_db_entry(
ctxt, mox.Func(_has_launch_index(0)), None
).WithSideEffects(_add_uuid(1)).AndReturn(instance1)
driver.instance_update_db(ctxt, instance1['uuid'],
'host').AndReturn(instance1)
compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host',
instance=instance1, requested_networks=None,
injected_files=None, admin_password=None, is_first_time=None,
request_spec=request_spec, filter_properties={})
driver.encode_instance(instance1).AndReturn(instance1)
# instance 2
self.driver._schedule(ctxt, 'compute', request_spec,
{}).AndReturn('host')
self.driver.create_instance_db_entry(
ctxt, mox.Func(_has_launch_index(1)), None
).WithSideEffects(_add_uuid(2)).AndReturn(instance2)
driver.instance_update_db(ctxt, instance2['uuid'],
'host').AndReturn(instance2)
compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host',
instance=instance2, requested_networks=None,
injected_files=None, admin_password=None, is_first_time=None,
request_spec=request_spec, filter_properties={})
driver.encode_instance(instance2).AndReturn(instance2)
self.mox.ReplayAll()
self.driver.schedule_run_instance(ctxt, request_spec, None, None,
None, None, {}, None)
def test_basic_schedule_run_instance_no_hosts(self):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
fake_args = (1, 2, 3)
instance_opts = 'fake_instance_opts'
request_spec = {'num_instances': 2,
instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
request_spec = {'instance_uuids': ['fake-uuid1'],
'instance_properties': instance_opts}
self.mox.StubOutWithMock(ctxt, 'elevated')
@ -209,50 +133,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost,
self.driver.schedule_run_instance, ctxt, request_spec,
None, None, None, None, {}, None)
def test_basic_schedule_fallback(self):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
topic = 'fake_topic'
method = 'fake_method'
fake_args = (1, 2, 3)
fake_kwargs = {'fake_kwarg1': 'fake_value1',
'fake_kwarg2': 'fake_value2'}
self.mox.StubOutWithMock(ctxt, 'elevated')
self.mox.StubOutWithMock(self.driver, 'hosts_up')
self.mox.StubOutWithMock(random, 'random')
self.mox.StubOutWithMock(driver, 'cast_to_host')
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, topic).AndReturn(
['host1', 'host2', 'host3', 'host4'])
random.random().AndReturn(.5)
driver.cast_to_host(ctxt, topic, 'host3', method, **fake_kwargs)
self.mox.ReplayAll()
self.driver.schedule(ctxt, topic, method, *fake_args, **fake_kwargs)
def test_basic_schedule_fallback_no_hosts(self):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
topic = 'fake_topic'
method = 'fake_method'
fake_args = (1, 2, 3)
fake_kwargs = {'fake_kwarg1': 'fake_value1',
'fake_kwarg2': 'fake_value2'}
self.mox.StubOutWithMock(ctxt, 'elevated')
self.mox.StubOutWithMock(self.driver, 'hosts_up')
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, topic).AndReturn([])
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost,
self.driver.schedule, ctxt, topic, method,
*fake_args, **fake_kwargs)
None, None, None, None, {})
def test_schedule_prep_resize_doesnt_update_host(self):
fake_context = context.RequestContext('user', 'project',
@ -275,5 +156,5 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance = {'uuid': 'fake-uuid', 'host': 'host1'}
self.driver.schedule_prep_resize(fake_context, {}, {}, {},
instance, {})
instance, {}, None)
self.assertEqual(info['called'], 0)

View File

@ -49,10 +49,11 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
fake_context = context.RequestContext('user', 'project')
request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1,
'ephemeral_gb': 0},
'instance_properties': {'project_id': 1}}
'instance_properties': {'project_id': 1},
'instance_uuids': ['fake-uuid1']}
self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
fake_context, request_spec, None, None, None,
None, {}, None)
None, {})
def test_run_instance_non_admin(self):
"""Test creating an instance locally using run_instance, passing
@ -71,10 +72,11 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
fake_context = context.RequestContext('user', 'project')
request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
'instance_properties': {'project_id': 1}}
'instance_properties': {'project_id': 1},
'instance_uuids': ['fake-uuid1']}
self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
fake_context, request_spec, None, None, None,
None, {}, None)
None, {})
self.assertTrue(self.was_admin)
def test_schedule_bad_topic(self):
@ -89,7 +91,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
fake_kwargs = {'fake_kwarg1': 'fake_value1',
'fake_kwarg2': 'fake_value2'}
instance_opts = {'fake_opt1': 'meow'}
request_spec = {'num_instances': 2,
request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
'instance_properties': instance_opts}
instance1 = {'uuid': 'fake-uuid1'}
instance2 = {'uuid': 'fake-uuid2'}
@ -114,22 +116,24 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.mox.StubOutWithMock(self.driver, '_provision_resource')
self.driver._schedule(context_fake, 'compute',
request_spec, {}
request_spec, {}, ['fake-uuid1', 'fake-uuid2']
).AndReturn(['host1', 'host2'])
# instance 1
self.driver._provision_resource(
ctxt, 'host1',
mox.Func(_has_launch_index(0)), {},
None, None, None, None, reservations=None).AndReturn(instance1)
None, None, None, None,
instance_uuid='fake-uuid1').AndReturn(instance1)
# instance 2
self.driver._provision_resource(
ctxt, 'host2',
mox.Func(_has_launch_index(1)), {},
None, None, None, None, reservations=None).AndReturn(instance2)
None, None, None, None,
instance_uuid='fake-uuid2').AndReturn(instance2)
self.mox.ReplayAll()
self.driver.schedule_run_instance(context_fake, request_spec,
None, None, None, None, {}, None)
None, None, None, None, {})
def test_schedule_happy_day(self):
"""Make sure there's nothing glaringly wrong with _schedule()
@ -191,7 +195,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance = {'uuid': 'fake-uuid', 'host': 'host1'}
sched.schedule_prep_resize(fake_context, {}, {}, {}, instance, {})
sched.schedule_prep_resize(fake_context, {}, {}, {},
instance, {}, None)
self.assertEqual(info['called'], 0)
def test_get_cost_functions(self):

View File

@ -35,9 +35,6 @@ class FakeComputeScheduler(driver.Scheduler):
def schedule_theoretical(self, *args, **kwargs):
pass
def schedule(self, *args, **kwargs):
pass
class FakeVolumeScheduler(driver.Scheduler):
is_fake_volume = True
@ -46,9 +43,6 @@ class FakeVolumeScheduler(driver.Scheduler):
super(FakeVolumeScheduler, self).__init__()
self.is_update_caps_called = False
def schedule(self, *args, **kwargs):
pass
class FakeDefaultScheduler(driver.Scheduler):
is_fake_default = True
@ -57,9 +51,6 @@ class FakeDefaultScheduler(driver.Scheduler):
super(FakeDefaultScheduler, self).__init__()
self.is_update_caps_called = False
def schedule(self, *args, **kwargs):
pass
class MultiDriverTestCase(test_scheduler.SchedulerTestCase):
"""Test case for multi driver"""
@ -84,31 +75,6 @@ class MultiDriverTestCase(test_scheduler.SchedulerTestCase):
self.assertTrue(mgr.drivers['volume'].is_fake_volume)
self.assertTrue(mgr.drivers['default'].is_fake_default)
def test_schedule_fallback_proxy(self):
mgr = self._manager
self.mox.StubOutWithMock(mgr.drivers['compute'], 'schedule')
self.mox.StubOutWithMock(mgr.drivers['volume'], 'schedule')
self.mox.StubOutWithMock(mgr.drivers['default'], 'schedule')
ctxt = 'fake_context'
method = 'fake_method'
fake_args = (1, 2, 3)
fake_kwargs = {'fake_kwarg1': 'fake_value1',
'fake_kwarg2': 'fake_value2'}
mgr.drivers['compute'].schedule(ctxt, 'compute', method,
*fake_args, **fake_kwargs)
mgr.drivers['volume'].schedule(ctxt, 'volume', method,
*fake_args, **fake_kwargs)
mgr.drivers['default'].schedule(ctxt, 'random_topic', method,
*fake_args, **fake_kwargs)
self.mox.ReplayAll()
mgr.schedule(ctxt, 'compute', method, *fake_args, **fake_kwargs)
mgr.schedule(ctxt, 'volume', method, *fake_args, **fake_kwargs)
mgr.schedule(ctxt, 'random_topic', method, *fake_args, **fake_kwargs)
def test_update_service_capabilities(self):
def fake_update_service_capabilities(self, service, host, caps):
self.is_update_caps_called = True
@ -159,6 +125,3 @@ class SimpleSchedulerTestCase(MultiDriverTestCase):
self.assertTrue(mgr.drivers['compute'].is_fake_compute)
self.assertTrue(mgr.drivers['volume'] is not None)
self.assertTrue(mgr.drivers['default'].is_fake_default)
def test_proxy_calls(self):
pass

View File

@ -78,7 +78,7 @@ class SchedulerManagerTestCase(test.TestCase):
host, {})
self.mox.ReplayAll()
result = self.manager.update_service_capabilities(self.context,
service_name=service_name, host=host)
service_name=service_name, host=host, capabilities={})
self.mox.VerifyAll()
self.mox.ResetAll()
@ -91,29 +91,6 @@ class SchedulerManagerTestCase(test.TestCase):
service_name=service_name, host=host,
capabilities=capabilities)
def test_existing_method(self):
def stub_method(self, *args, **kwargs):
pass
setattr(self.manager.driver, 'schedule_stub_method', stub_method)
self.mox.StubOutWithMock(self.manager.driver,
'schedule_stub_method')
self.manager.driver.schedule_stub_method(self.context,
*self.fake_args, **self.fake_kwargs)
self.mox.ReplayAll()
self.manager.stub_method(self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
def test_missing_method_fallback(self):
self.mox.StubOutWithMock(self.manager.driver, 'schedule')
self.manager.driver.schedule(self.context, self.topic,
'noexist', *self.fake_args, **self.fake_kwargs)
self.mox.ReplayAll()
self.manager.noexist(self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
def test_show_host_resources(self):
host = 'fake_host'
@ -175,37 +152,6 @@ class SchedulerManagerTestCase(test.TestCase):
self.mox.StubOutWithMock(self.manager.driver,
method_name)
def test_schedule_exeception_changes_state_notifies_and_raises(self):
"""Test that an exception scheduling calls
_set_vm_state_and_notify and reraises
"""
fake_instance_uuid = 'fake-instance-id'
self._mox_schedule_method_helper('schedule_something')
self.mox.StubOutWithMock(self.manager, '_set_vm_state_and_notify')
request_spec = {'instance_properties':
{'uuid': fake_instance_uuid}}
self.fake_kwargs['request_spec'] = request_spec
ex = self.AnException('something happened')
self.manager.driver.schedule_something(self.context,
*self.fake_args, **self.fake_kwargs).AndRaise(ex)
# Adding the context to the args is kind of gnarly, but thats what
# happens. Could be refactored to keep all the context, spec, topic
# stuff a bit cleaner.
self.manager._set_vm_state_and_notify('something',
{'vm_state': vm_states.ERROR}, self.context,
ex, request_spec)
self.mox.ReplayAll()
self.assertRaises(self.AnException, self.manager.something,
self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
def test_run_instance_exception_puts_instance_in_error_state(self):
"""Test that a NoValidHost exception for run_instance puts
the instance in ERROR state and eats the exception.
@ -221,14 +167,14 @@ class SchedulerManagerTestCase(test.TestCase):
{'uuid': fake_instance_uuid}}
self.manager.driver.schedule_run_instance(self.context,
request_spec, None, None, None, None, {}, None).AndRaise(
request_spec, None, None, None, None, {}).AndRaise(
exception.NoValidHost(reason=""))
db.instance_update_and_get_original(self.context, fake_instance_uuid,
{"vm_state": vm_states.ERROR}).AndReturn((inst, inst))
self.mox.ReplayAll()
self.manager.run_instance(self.context, request_spec,
None, None, None, None, {}, None)
None, None, None, None, {})
def test_prep_resize_no_valid_host_back_in_active_state(self):
"""Test that a NoValidHost exception for prep_resize puts
@ -260,8 +206,7 @@ class SchedulerManagerTestCase(test.TestCase):
(inst, inst))
self.mox.ReplayAll()
# FIXME(comstud): Remove 'update_db' on future RPC version bump.
self.manager.prep_resize(update_db=False, **kwargs)
self.manager.prep_resize(**kwargs)
def test_prep_resize_exception_host_in_error_state_and_raise(self):
"""Test that a NoValidHost exception for prep_resize puts
@ -297,10 +242,7 @@ class SchedulerManagerTestCase(test.TestCase):
self.mox.ReplayAll()
# FIXME(comstud): Remove 'update_db' on future RPC version bump.
self.assertRaises(self.AnException, self.manager.prep_resize,
update_db=False,
**kwargs)
self.assertRaises(self.AnException, self.manager.prep_resize, **kwargs)
class SchedulerTestCase(test.TestCase):
@ -347,48 +289,6 @@ class SchedulerTestCase(test.TestCase):
result = self.driver.hosts_up(self.context, self.topic)
self.assertEqual(result, ['host2'])
def test_create_instance_db_entry(self):
base_options = {'fake_option': 'meow'}
image = 'fake_image'
instance_type = 'fake_instance_type'
security_group = 'fake_security_group'
block_device_mapping = 'fake_block_device_mapping'
request_spec = {'instance_properties': base_options,
'image': image,
'instance_type': instance_type,
'security_group': security_group,
'block_device_mapping': block_device_mapping}
self.mox.StubOutWithMock(self.driver.compute_api,
'create_db_entry_for_new_instance')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
# New entry
fake_instance = {'uuid': 'fake-uuid'}
self.driver.compute_api.create_db_entry_for_new_instance(
self.context, instance_type, image, base_options,
security_group,
block_device_mapping).AndReturn(fake_instance)
self.mox.ReplayAll()
instance = self.driver.create_instance_db_entry(self.context,
request_spec, None)
self.mox.VerifyAll()
self.assertEqual(instance, fake_instance)
# Entry created by compute already
self.mox.ResetAll()
fake_uuid = 'fake-uuid'
base_options['uuid'] = fake_uuid
fake_instance = {'uuid': fake_uuid}
db.instance_get_by_uuid(self.context, fake_uuid).AndReturn(
fake_instance)
self.mox.ReplayAll()
instance = self.driver.create_instance_db_entry(self.context,
request_spec, None)
self.assertEqual(instance, fake_instance)
def _live_migration_instance(self):
volume1 = {'id': 31338}
volume2 = {'id': 31339}
@ -516,37 +416,28 @@ class SchedulerTestCase(test.TestCase):
def test_live_migration_instance_not_running(self):
"""The instance given by instance_id is not running."""
self.mox.StubOutWithMock(db, 'instance_get')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
instance_id = instance['id']
instance['power_state'] = power_state.NOSTATE
db.instance_get(self.context,
instance_id).AndReturn(instance)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceNotRunning,
self.driver.schedule_live_migration, self.context,
instance_id=instance_id, dest=dest,
block_migration=block_migration)
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_exist(self):
"""Raise exception when src compute node is does not exist."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(utils, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
instance_id = instance['id']
db.instance_get(self.context,
instance_id).AndReturn(instance)
# Compute down
db.service_get_all_compute_by_host(self.context,
@ -556,22 +447,20 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance_id=instance_id, dest=dest,
block_migration=block_migration)
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_alive(self):
"""Raise exception when src compute node is not alive."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(utils, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
instance_id = instance['id']
db.instance_get(self.context,
instance_id).AndReturn(instance)
# Compute down
db.service_get_all_compute_by_host(self.context,
@ -581,23 +470,21 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance_id=instance_id, dest=dest,
block_migration=block_migration)
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_compute_dest_not_alive(self):
"""Raise exception when dest compute node is not alive."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(utils, 'service_is_up')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
instance_id = instance['id']
db.instance_get(self.context,
instance_id).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
db.service_get_all_compute_by_host(self.context,
@ -608,13 +495,13 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance_id=instance_id, dest=dest,
block_migration=block_migration)
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_dest_check_service_same_host(self):
"""Confirms exception raises in case dest and src is same host."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(utils, 'service_is_up')
@ -622,13 +509,9 @@ class SchedulerTestCase(test.TestCase):
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
instance_id = instance['id']
# make dest same as src
dest = instance['host']
db.instance_get(self.context,
instance_id).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
db.service_get_all_compute_by_host(self.context,
dest).AndReturn(['fake_service3'])
@ -637,14 +520,13 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.UnableToMigrateToSelf,
self.driver.schedule_live_migration, self.context,
instance_id=instance_id, dest=dest,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=False)
def test_live_migration_dest_check_service_lack_memory(self):
"""Confirms exception raises when dest doesn't have enough memory."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(utils, 'service_is_up')
@ -655,9 +537,6 @@ class SchedulerTestCase(test.TestCase):
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
instance_id = instance['id']
db.instance_get(self.context,
instance_id).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
db.service_get_all_compute_by_host(self.context,
@ -672,13 +551,12 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
self.driver.schedule_live_migration, self.context,
instance_id=instance_id, dest=dest,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_different_hypervisor_type_raises(self):
"""Confirm live_migration to hypervisor of different type raises"""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(rpc, 'queue_get_for')
@ -690,9 +568,6 @@ class SchedulerTestCase(test.TestCase):
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
instance_id = instance['id']
db.instance_get(self.context,
instance_id).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
@ -708,13 +583,12 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
self.driver.schedule_live_migration, self.context,
instance_id=instance_id, dest=dest,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_dest_hypervisor_version_older_raises(self):
"""Confirm live migration to older hypervisor raises"""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(rpc, 'queue_get_for')
@ -726,9 +600,6 @@ class SchedulerTestCase(test.TestCase):
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
instance_id = instance['id']
db.instance_get(self.context,
instance_id).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
@ -743,7 +614,7 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.driver.schedule_live_migration, self.context,
instance_id=instance_id, dest=dest,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
@ -752,14 +623,6 @@ class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods
that can't will fail if the driver is changed"""
def test_unimplemented_schedule(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
def test_unimplemented_schedule_run_instance(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
@ -769,7 +632,7 @@ class SchedulerDriverBaseTestCase(SchedulerTestCase):
self.assertRaises(NotImplementedError,
self.driver.schedule_run_instance,
self.context, fake_request_spec, None, None, None,
None, None, None)
None, None)
def test_unimplemented_schedule_prep_resize(self):
fake_args = (1, 2, 3)
@ -780,7 +643,7 @@ class SchedulerDriverBaseTestCase(SchedulerTestCase):
self.assertRaises(NotImplementedError,
self.driver.schedule_prep_resize,
self.context, {},
fake_request_spec, {}, {}, {})
fake_request_spec, {}, {}, {}, None)
class SchedulerDriverModuleTestCase(test.TestCase):