Merge "Bump the Compute RPC API to version 6.0"

This commit is contained in:
Zuul 2021-03-25 14:15:04 +00:00 committed by Gerrit Code Review
commit 572ae578bb
10 changed files with 766 additions and 515 deletions

View File

@ -525,7 +525,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='5.13')
target = messaging.Target(version='6.0')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@ -571,6 +571,9 @@ class ComputeManager(manager.Manager):
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
# TODO(sbauza): Remove this call once we delete the V5Proxy class
self.additional_endpoints.append(_ComputeV5Proxy(self))
# NOTE(russellb) Load the driver last. It may call back into the
# compute manager via the virtapi, so we want it to be fully
# initialized before that happens.
@ -1561,34 +1564,6 @@ class ComputeManager(manager.Manager):
except exception.InstanceNotFound:
return power_state.NOSTATE
# TODO(stephenfin): Remove this once we bump the compute API to v6.0
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
# TODO(mdragon): perhaps make this variable by console_type?
return 'console.%s' % CONF.console_host
# TODO(stephenfin): Remove this once we bump the compute API to v6.0
@wrap_exception()
def get_console_pool_info(self, context, console_type):
raise NotImplementedError()
# TODO(stephenfin): Remove this as it's nova-network only
@wrap_exception()
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
an instance.
Passes straight through to the virtualization driver.
Synchronize the call because we may still be in the middle of
creating the instance.
"""
pass
def _await_block_device_map_created(self, context, vol_id):
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
@ -2092,10 +2067,10 @@ class ComputeManager(manager.Manager):
@reverts_task_state
@wrap_instance_fault
def build_and_run_instance(self, context, instance, image, request_spec,
filter_properties, admin_password=None,
filter_properties, accel_uuids, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None, host_list=None, accel_uuids=None):
node=None, limits=None, host_list=None):
@utils.synchronized(instance.uuid)
def _locked_do_build_and_run_instance(*args, **kwargs):
@ -2313,6 +2288,8 @@ class ComputeManager(manager.Manager):
port_id, to resource provider UUID that provides resource for that
RequestGroup. Or None if the request_spec was None.
"""
# TODO(sbauza): Remove this conditional once we only support
# RPC API 6.0
if request_spec:
return request_spec.get_request_group_mapping()
else:
@ -3320,8 +3297,7 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec,
accel_uuids=None):
scheduled_node, limits, request_spec, accel_uuids):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@ -3352,8 +3328,7 @@ class ComputeManager(manager.Manager):
:param limits: Overcommit limits set by the scheduler. If a host was
specified by the user, this will be None
:param request_spec: a RequestSpec object used to schedule the instance
:param accel_uuids: a list of cyborg ARQ uuids or None if the RPC API
is <=5.11
:param accel_uuids: a list of cyborg ARQ uuids
"""
# recreate=True means the instance is being evacuated from a failed
@ -4222,12 +4197,6 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.UNRESCUE,
phase=fields.NotificationPhase.END)
# TODO(stephenfin): Remove this once we bump the compute API to v6.0
@wrap_exception()
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance):
raise NotImplementedError()
@wrap_exception()
@wrap_instance_event(prefix='compute')
@errors_out_migration
@ -4817,7 +4786,7 @@ class ComputeManager(manager.Manager):
@wrap_instance_event(prefix='compute')
@errors_out_migration
@wrap_instance_fault
def revert_resize(self, context, instance, migration, request_spec=None):
def revert_resize(self, context, instance, migration, request_spec):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
@ -4909,8 +4878,7 @@ class ComputeManager(manager.Manager):
@wrap_instance_event(prefix='compute')
@errors_out_migration
@wrap_instance_fault
def finish_revert_resize(
self, context, instance, migration, request_spec=None):
def finish_revert_resize(self, context, instance, migration, request_spec):
"""Finishes the second half of reverting a resize on the source host.
Bring the original source instance state back (active/shutoff) and
@ -5033,6 +5001,8 @@ class ComputeManager(manager.Manager):
provider_mappings = self._get_request_group_mapping(
request_spec)
else:
# TODO(sbauza): Remove this conditional once we only support RPC
# API 6.0
# NOTE(gibi): The compute RPC is pinned to be older than 5.2
# and therefore request_spec is not sent. We cannot calculate
# the provider mappings. If the instance has ports with
@ -5180,7 +5150,7 @@ class ComputeManager(manager.Manager):
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
def prep_resize(self, context, image, instance, flavor,
request_spec, filter_properties, node,
clean_shutdown, migration, host_list):
"""Initiates the process of moving a running instance to another host.
@ -5206,10 +5176,10 @@ class ComputeManager(manager.Manager):
errors_out_migration_ctxt(migration):
self._send_prep_resize_notifications(
context, instance, fields.NotificationPhase.START,
instance_type)
flavor)
try:
self._prep_resize(context, image, instance,
instance_type, filter_properties,
flavor, filter_properties,
node, migration, request_spec,
clean_shutdown)
except exception.BuildAbortException:
@ -5227,12 +5197,12 @@ class ComputeManager(manager.Manager):
# try to re-schedule the resize elsewhere:
exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, instance,
exc_info, instance_type, request_spec,
exc_info, flavor, request_spec,
filter_properties, host_list)
finally:
self._send_prep_resize_notifications(
context, instance, fields.NotificationPhase.END,
instance_type)
flavor)
def _reschedule_resize_or_reraise(self, context, instance, exc_info,
instance_type, request_spec, filter_properties, host_list):
@ -5308,14 +5278,12 @@ class ComputeManager(manager.Manager):
raise exc.with_traceback(exc_info[2])
raise exc
# TODO(stephenfin): Remove unused request_spec parameter in API v6.0
@messaging.expected_exceptions(exception.MigrationPreCheckError)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def prep_snapshot_based_resize_at_dest(
self, ctxt, instance, flavor, nodename, migration, limits,
request_spec):
self, ctxt, instance, flavor, nodename, migration, limits):
"""Performs pre-cross-cell resize resource claim on the dest host.
This runs on the destination host in a cross-cell resize operation
@ -5338,7 +5306,6 @@ class ComputeManager(manager.Manager):
:param nodename: Name of the target compute node
:param migration: nova.objects.Migration object for the operation
:param limits: nova.objects.SchedulerLimits object of resource limits
:param request_spec: nova.objects.RequestSpec object for the operation
:returns: nova.objects.MigrationContext; the migration context created
on the destination host during the resize_claim.
:raises: nova.exception.MigrationPreCheckError if the pre-check
@ -5523,8 +5490,8 @@ class ComputeManager(manager.Manager):
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def resize_instance(self, context, instance, image,
migration, instance_type, clean_shutdown,
request_spec=None):
migration, flavor, clean_shutdown,
request_spec):
"""Starts the migration of a running instance to another host.
This is initiated from the destination host's ``prep_resize`` routine
@ -5532,7 +5499,7 @@ class ComputeManager(manager.Manager):
"""
try:
self._resize_instance(context, instance, image, migration,
instance_type, clean_shutdown, request_spec)
flavor, clean_shutdown, request_spec)
except Exception:
with excutils.save_and_reraise_exception():
self._revert_allocation(context, instance, migration)
@ -5791,7 +5758,7 @@ class ComputeManager(manager.Manager):
@errors_out_migration
@wrap_instance_fault
def finish_resize(self, context, disk_info, image, instance,
migration, request_spec=None):
migration, request_spec):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
@ -5866,14 +5833,13 @@ class ComputeManager(manager.Manager):
action=fields.NotificationAction.RESIZE_FINISH, phase=phase,
bdms=bdms)
# TODO(stephenfin): Remove unused request_spec parameter in API v6.0
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@errors_out_migration
@wrap_instance_fault
def finish_snapshot_based_resize_at_dest(
self, ctxt, instance, migration, snapshot_id, request_spec):
self, ctxt, instance, migration, snapshot_id):
"""Finishes the snapshot-based resize at the destination compute.
Sets up block devices and networking on the destination compute and
@ -5890,7 +5856,6 @@ class ComputeManager(manager.Manager):
be "finished".
:param snapshot_id: ID of the image snapshot created for a
non-volume-backed instance, else None.
:param request_spec: nova.objects.RequestSpec object for the operation
"""
LOG.info('Finishing snapshot based resize on destination host %s.',
self.host, instance=instance)
@ -6333,7 +6298,7 @@ class ComputeManager(manager.Manager):
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def shelve_instance(self, context, instance, image_id,
clean_shutdown, accel_uuids=None):
clean_shutdown, accel_uuids):
"""Shelve an instance.
This should be used when you want to take a snapshot of the instance.
@ -6418,7 +6383,7 @@ class ComputeManager(manager.Manager):
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def shelve_offload_instance(self, context, instance, clean_shutdown,
accel_uuids=None):
accel_uuids):
"""Remove a shelved instance from the hypervisor.
This frees up those resources for use by other instances, but may lead
@ -6509,9 +6474,8 @@ class ComputeManager(manager.Manager):
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def unshelve_instance(
self, context, instance, image, filter_properties, node,
request_spec=None, accel_uuids=None):
def unshelve_instance(self, context, instance, image,
filter_properties, node, request_spec, accel_uuids):
"""Unshelve the instance.
:param context: request context
@ -6653,12 +6617,6 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.UNSHELVE,
phase=fields.NotificationPhase.END, bdms=bdms)
# TODO(stephenfin): Remove this in RPC 6.0 since it's nova-network only
@messaging.expected_exceptions(NotImplementedError)
def reset_network(self, context, instance):
"""Reset networking on the given instance."""
raise NotImplementedError()
def _inject_network_info(self, instance, network_info):
"""Inject network info for the given instance."""
LOG.debug('Inject network info', instance=instance)
@ -7907,13 +7865,11 @@ class ComputeManager(manager.Manager):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host)
# TODO(stephenfin): Remove the unused instance argument in RPC version 6.0
@wrap_exception()
def check_instance_shared_storage(self, ctxt, instance, data):
def check_instance_shared_storage(self, ctxt, data):
"""Check if the instance files are shared
:param ctxt: security context
:param instance: dict of instance data
:param data: result of driver.check_instance_shared_storage_local
Returns True if instance disks located on shared storage and
@ -7937,7 +7893,7 @@ class ComputeManager(manager.Manager):
@wrap_instance_fault
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit,
migration=None, limits=None):
migration, limits):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
@ -8075,18 +8031,14 @@ class ComputeManager(manager.Manager):
LOG.debug('source check data is %s', result)
return result
# TODO(mriedem): Remove the block_migration argument in v6.0 of the compute
# RPC API.
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def pre_live_migration(self, context, instance, block_migration, disk,
migrate_data):
def pre_live_migration(self, context, instance, disk, migrate_data):
"""Preparations for live migration at dest host.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param disk: disk info of instance
:param migrate_data: A dict or LiveMigrateData object holding data
required for live migration without shared
@ -10157,18 +10109,6 @@ class ComputeManager(manager.Manager):
# NOTE(mriedem): Why don't we pass clean_task_state=True here?
self._set_instance_obj_error_state(instance)
# TODO(stephenfin): Remove this once we bump the compute API to v6.0
@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):
"""(REMOVED) Notify hypervisor of change (for hypervisor pools)."""
raise NotImplementedError()
# TODO(stephenfin): Remove this once we bump the compute API to v6.0
@wrap_exception()
def remove_aggregate_host(self, context, host, slave_info, aggregate):
"""(REMOVED) Removes a host from a physical hypervisor pool."""
raise NotImplementedError()
def _process_instance_event(self, instance, event):
_event = self.instance_events.pop_instance_event(instance, event)
if _event:
@ -10714,3 +10654,144 @@ class ComputeManager(manager.Manager):
LOG.debug("Updating migrate VIF profile for port %(port_id)s:"
"%(profile)s", {'port_id': port_id,
'profile': profile})
# TODO(sbauza): Remove this proxy class in the X release once we drop the 5.x
# support.
# NOTE(sbauza): This proxy class will support the existing <=5.13 RPC calls
# from any RPC client but will also make sure that the new 6.0 RPC calls will
# be supported.
class _ComputeV5Proxy(object):
target = messaging.Target(version='5.13')
def __init__(self, manager):
self.manager = manager
def __getattr__(self, name):
# NOTE(sbauza): Proxying all the other methods but the V5 ones.
return getattr(self.manager, name)
# 5.0 support for block_migration argument
def pre_live_migration(self, context, instance, block_migration, disk,
migrate_data):
return self.manager.pre_live_migration(context, instance, disk,
migrate_data)
# 5.1 support for legacy request_spec argument
def prep_resize(self, context, image, instance, instance_type,
request_spec, filter_properties, node,
clean_shutdown, migration, host_list):
if not isinstance(request_spec, objects.RequestSpec):
# Prior to compute RPC API 5.1 conductor would pass a legacy dict
# version of the request spec to compute and since Stein compute
# could be sending that back to conductor on reschedule, so if we
# got a dict convert it to an object.
# TODO(mriedem): We can drop this compat code when we only support
# compute RPC API >=6.0.
request_spec = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
# We don't have to set the new flavor on the request spec because
# if we got here it was due to a reschedule from the compute and
# the request spec would already have the new flavor in it from the
# else block below.
self.manager.prep_resize(context, image, instance, instance_type,
request_spec, filter_properties, node,
clean_shutdown, migration, host_list)
# 5.2 support for optional request_spec argument
def resize_instance(self, context, instance, image,
migration, instance_type, clean_shutdown,
request_spec=None):
self.manager.resize_instance(context, instance, image,
migration, instance_type, clean_shutdown,
request_spec)
# 5.2 support for optional request_spec argument
def finish_resize(self, context, disk_info, image, instance,
migration, request_spec=None):
self.manager.finish_resize(context, disk_info, image, instance,
migration, request_spec)
# 5.2 support for optional request_spec argument
def revert_resize(self, context, instance, migration, request_spec=None):
self.manager.revert_resize(context, instance, migration, request_spec)
# 5.2 support for optional request_spec argument
def finish_revert_resize(
self, context, instance, migration, request_spec=None):
self.manager.finish_revert_resize(context, instance, migration,
request_spec)
# 5.2 support for optional request_spec argument
# 5.13 support for optional accel_uuids argument
def unshelve_instance(self, context, instance, image, filter_properties,
node, request_spec=None, accel_uuids=None):
self.manager.unshelve_instance(context, instance, image,
filter_properties, node, request_spec,
accel_uuids or [])
# 5.3 support for optional migration and limits arguments
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit,
migration=None, limits=None):
return self.manager.check_can_live_migrate_destination(
ctxt, instance, block_migration, disk_over_commit,
migration, limits)
# 5.11 support for optional accel_uuids argument
def build_and_run_instance(self, context, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None, host_list=None, accel_uuids=None):
self.manager.build_and_run_instance(
context, instance, image, request_spec,
filter_properties, accel_uuids, admin_password,
injected_files, requested_networks,
security_groups, block_device_mapping,
node, limits, host_list)
# 5.12 support for optional accel_uuids argument
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec,
accel_uuids=None):
self.manager.rebuild_instance(
context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec,
accel_uuids)
# 5.13 support for optional accel_uuids argument
def shelve_instance(self, context, instance, image_id,
clean_shutdown, accel_uuids=None):
self.manager.shelve_instance(context, instance, image_id,
clean_shutdown, accel_uuids)
# 5.13 support for optional accel_uuids argument
def shelve_offload_instance(self, context, instance, clean_shutdown,
accel_uuids=None):
self.manager.shelve_offload_instance(
context, instance, clean_shutdown, accel_uuids)
# 6.0 drop unused request_spec argument
def prep_snapshot_based_resize_at_dest(
self, ctxt, instance, flavor, nodename, migration, limits,
request_spec):
return self.manager.prep_snapshot_based_resize_at_dest(
ctxt, instance, flavor, nodename, migration, limits)
# 6.0 drop unused request_spec argument
def finish_snapshot_based_resize_at_dest(
self, ctxt, instance, migration, snapshot_id, request_spec):
self.manager.finish_snapshot_based_resize_at_dest(
ctxt, instance, migration, snapshot_id)
# 6.0 drop unused instance argument
def check_instance_shared_storage(self, ctxt, instance, data):
return self.manager.check_instance_shared_storage(ctxt, data)

View File

@ -383,6 +383,25 @@ class ComputeAPI(object):
* 5.13 - Add accel_uuids (accelerator requests) parameter to
shelve_instance(), shelve_offload_instance() and
unshelve_instance()
... Version 6.0 is functionally equivalent to 5.13, aside from
removing deprecated parameters and methods. Wallaby sends 6.0 by
default, can accept 5.x calls from Victoria nodes, and can be pinned to
5.x for Victoria compatibility. All new changes should go against 6.x.
* 6.0 - Remove 5.x compatibility
* ... - Remove add_aggregate_host()
* ... - Remove remove_aggregate_host()
* ... - Remove test_get_console_pool_info()
* ... - Remove test_get_console_topic()
* ... - Remove refresh_instance_security_rules()
* ... - Remove request_spec argument from
prep_snapshot_based_resize_at_dest() and
finish_snapshot_based_resize_at_dest()
* ... - Remove instance argument from check_instance_shared_storage()
* ... - Rename the instance_type argument of prep_resize() to flavor
* ... - Rename the instance_type argument of resize_instance() to
flavor
'''
VERSION_ALIASES = {
@ -400,6 +419,7 @@ class ComputeAPI(object):
'train': '5.3',
'ussuri': '5.11',
'victoria': '5.12',
'wallaby': '6.0',
}
@property
@ -414,7 +434,7 @@ class ComputeAPI(object):
if _ROUTER is None:
with lockutils.lock('compute-rpcapi-router'):
if _ROUTER is None:
target = messaging.Target(topic=RPC_TOPIC, version='5.0')
target = messaging.Target(topic=RPC_TOPIC, version='6.0')
upgrade_level = CONF.upgrade_levels.compute
if upgrade_level == 'auto':
version_cap = self._determine_version_cap(target)
@ -432,6 +452,22 @@ class ComputeAPI(object):
_ROUTER = rpc.ClientRouter(default_client)
return _ROUTER
def _ver(self, ctxt, old):
"""Determine compatibility version.
This is to be used when we could send either the current major or
a revision of the previous major when they are equivalent. This
should only be used by calls that are the exact same in the current
and previous major versions. Returns either old, or the current major
version.
:param old: The version under the previous major version that should
be sent if we're pinned to it.
"""
client = self.router.client(ctxt)
if client.can_send_version('6.0'):
return '6.0'
else:
return old
@staticmethod
def _determine_version_cap(target):
global LAST_VERSION
@ -515,26 +551,8 @@ class ComputeAPI(object):
serializer=serializer,
call_monitor_timeout=cmt)
# TODO(stephenfin): This is no longer used and can be removed in v6.0
def add_aggregate_host(self, ctxt, host, aggregate, host_param,
slave_info=None):
'''Add aggregate host.
:param ctxt: request context
:param aggregate:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
version = '5.0'
cctxt = self.router.client(ctxt).prepare(
server=host, version=version)
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'add_fixed_ip_to_instance',
@ -545,14 +563,14 @@ class ComputeAPI(object):
kw = {'instance': instance, 'network_id': network_id,
'port_id': port_id, 'requested_ip': requested_ip,
'tag': tag}
version = '5.0'
version = self._ver(ctxt, '5.0')
client = self.router.client(ctxt)
cctxt = client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'attach_interface', **kw)
def attach_volume(self, ctxt, instance, bdm):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'attach_volume', instance=instance, bdm=bdm)
@ -561,7 +579,7 @@ class ComputeAPI(object):
block_migration, disk_over_commit,
migration, limits):
client = self.router.client(ctxt)
version = '5.3'
version = self._ver(ctxt, '5.3')
kwargs = {
'instance': instance,
'block_migration': block_migration,
@ -579,7 +597,7 @@ class ComputeAPI(object):
return cctxt.call(ctxt, 'check_can_live_migrate_destination', **kwargs)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
version = '5.0'
version = self._ver(ctxt, '5.0')
client = self.router.client(ctxt)
source = _compute_host(None, instance)
cctxt = client.prepare(server=source, version=version)
@ -587,18 +605,22 @@ class ComputeAPI(object):
instance=instance,
dest_check_data=dest_check_data)
def check_instance_shared_storage(self, ctxt, instance, data, host=None):
version = '5.0'
cctxt = self.router.client(ctxt).prepare(
def check_instance_shared_storage(self, ctxt, data, instance=None,
host=None):
msg_args = {'data': data}
version = self._ver(ctxt, '5.0')
client = self.router.client(ctxt)
if not client.can_send_version('6.0'):
# We always pass the instance until the 5.0 version
msg_args['instance'] = instance
cctxt = client.prepare(
server=_compute_host(host, instance), version=version)
return cctxt.call(ctxt, 'check_instance_shared_storage',
instance=instance,
data=data)
return cctxt.call(ctxt, 'check_instance_shared_storage', **msg_args)
def confirm_resize(self, ctxt, instance, migration, host,
cast=True):
client = self.router.client(ctxt)
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = client.prepare(
server=_compute_host(host, instance), version=version)
rpc_method = cctxt.cast if cast else cctxt.call
@ -628,7 +650,7 @@ class ComputeAPI(object):
:raises: oslo_messaging.exceptions.MessagingTimeout if the RPC call
times out
"""
version = '5.8'
version = self._ver(ctxt, '5.8')
client = self.router.client(ctxt)
if not client.can_send_version(version):
raise exception.MigrationError(reason=_('Compute too old'))
@ -641,14 +663,14 @@ class ComputeAPI(object):
instance=instance, migration=migration)
def detach_interface(self, ctxt, instance, port_id):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'detach_interface',
instance=instance, port_id=port_id)
def detach_volume(self, ctxt, instance, volume_id, attachment_id=None):
version = '5.0'
version = self._ver(ctxt, '5.0')
client = self.router.client(ctxt)
cctxt = client.prepare(server=_compute_host(None, instance),
version=version)
@ -667,7 +689,7 @@ class ComputeAPI(object):
}
client = self.router.client(ctxt)
version = '5.2'
version = self._ver(ctxt, '5.2')
if not client.can_send_version(version):
msg_args.pop('request_spec')
@ -686,7 +708,7 @@ class ComputeAPI(object):
}
client = self.router.client(ctxt)
version = '5.2'
version = self._ver(ctxt, '5.2')
if not client.can_send_version(version):
msg_args.pop('request_spec')
@ -723,8 +745,13 @@ class ComputeAPI(object):
:raises: oslo_messaging.exceptions.MessagingTimeout if the pre-check
RPC call times out
"""
msg_args = {'instance': instance,
'migration': migration,
'snapshot_id': snapshot_id}
client = self.router.client(ctxt)
version = '5.7'
version = self._ver(ctxt, '5.7')
if not client.can_send_version('6.0'):
msg_args['request_spec'] = request_spec
if not client.can_send_version(version):
raise exception.MigrationError(reason=_('Compute too old'))
cctxt = client.prepare(
@ -732,9 +759,7 @@ class ComputeAPI(object):
call_monitor_timeout=CONF.rpc_response_timeout,
timeout=CONF.long_rpc_timeout)
return cctxt.call(
ctxt, 'finish_snapshot_based_resize_at_dest',
instance=instance, migration=migration, snapshot_id=snapshot_id,
request_spec=request_spec)
ctxt, 'finish_snapshot_based_resize_at_dest', **msg_args)
def finish_revert_snapshot_based_resize_at_source(
self, ctxt, instance, migration):
@ -759,7 +784,7 @@ class ComputeAPI(object):
:raises: oslo_messaging.exceptions.MessagingTimeout if the RPC call
times out
"""
version = '5.10'
version = self._ver(ctxt, '5.10')
client = self.router.client(ctxt)
if not client.can_send_version(version):
raise exception.MigrationError(reason=_('Compute too old'))
@ -772,77 +797,62 @@ class ComputeAPI(object):
instance=instance, migration=migration)
def get_console_output(self, ctxt, instance, tail_length):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
return cctxt.call(ctxt, 'get_console_output',
instance=instance, tail_length=tail_length)
# TODO(stephenfin): This is no longer used and can be removed in v6.0
def get_console_pool_info(self, ctxt, host, console_type):
version = '5.0'
cctxt = self.router.client(ctxt).prepare(
server=host, version=version)
return cctxt.call(ctxt, 'get_console_pool_info',
console_type=console_type)
# TODO(stephenfin): This is no longer used and can be removed in v6.0
def get_console_topic(self, ctxt, host):
version = '5.0'
cctxt = self.router.client(ctxt).prepare(
server=host, version=version)
return cctxt.call(ctxt, 'get_console_topic')
def get_diagnostics(self, ctxt, instance):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
return cctxt.call(ctxt, 'get_diagnostics', instance=instance)
def get_instance_diagnostics(self, ctxt, instance):
version = '5.0'
version = self._ver(ctxt, '5.0')
client = self.router.client(ctxt)
cctxt = client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_instance_diagnostics', instance=instance)
def get_vnc_console(self, ctxt, instance, console_type):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
return cctxt.call(ctxt, 'get_vnc_console',
instance=instance, console_type=console_type)
def get_spice_console(self, ctxt, instance, console_type):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
return cctxt.call(ctxt, 'get_spice_console',
instance=instance, console_type=console_type)
def get_rdp_console(self, ctxt, instance, console_type):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
return cctxt.call(ctxt, 'get_rdp_console',
instance=instance, console_type=console_type)
def get_mks_console(self, ctxt, instance, console_type):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
return cctxt.call(ctxt, 'get_mks_console',
instance=instance, console_type=console_type)
def get_serial_console(self, ctxt, instance, console_type):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
return cctxt.call(ctxt, 'get_serial_console',
instance=instance, console_type=console_type)
def validate_console_port(self, ctxt, instance, port, console_type):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
return cctxt.call(ctxt, 'validate_console_port',
@ -858,27 +868,27 @@ class ComputeAPI(object):
:param mode:
:param host: This is the host to send the message to.
'''
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=host, version=version)
return cctxt.call(ctxt, 'host_maintenance_mode',
host=host_param, mode=mode)
def host_power_action(self, ctxt, host, action):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=host, version=version)
return cctxt.call(ctxt, 'host_power_action', action=action)
def inject_network_info(self, ctxt, instance):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'inject_network_info', instance=instance)
def live_migration(self, ctxt, instance, dest, block_migration, host,
migration, migrate_data=None):
version = '5.0'
version = self._ver(ctxt, '5.0')
client = self.router.client(ctxt)
cctxt = client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'live_migration', instance=instance,
@ -886,7 +896,7 @@ class ComputeAPI(object):
migrate_data=migrate_data, migration=migration)
def live_migration_force_complete(self, ctxt, instance, migration):
version = '5.0'
version = self._ver(ctxt, '5.0')
client = self.router.client(ctxt)
cctxt = client.prepare(
server=_compute_host(migration.source_compute, instance),
@ -894,21 +904,21 @@ class ComputeAPI(object):
cctxt.cast(ctxt, 'live_migration_force_complete', instance=instance)
def live_migration_abort(self, ctxt, instance, migration_id):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'live_migration_abort', instance=instance,
migration_id=migration_id)
def pause_instance(self, ctxt, instance):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'pause_instance', instance=instance)
def post_live_migration_at_destination(self, ctxt, instance,
block_migration, host):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=host, version=version,
call_monitor_timeout=CONF.rpc_response_timeout,
@ -920,26 +930,32 @@ class ComputeAPI(object):
# the compute RPC API.
def pre_live_migration(self, ctxt, instance, block_migration, disk,
host, migrate_data):
version = '5.0'
version = '6.0'
msg_args = {}
client = self.router.client(ctxt)
if not client.can_send_version(version):
version = '5.0'
# We just need to honor the argument in the v5.0 RPC API method
msg_args['block_migration'] = None
cctxt = client.prepare(server=host, version=version,
timeout=CONF.long_rpc_timeout,
call_monitor_timeout=CONF.rpc_response_timeout)
return cctxt.call(ctxt, 'pre_live_migration',
instance=instance,
block_migration=block_migration,
disk=disk, migrate_data=migrate_data)
disk=disk, migrate_data=migrate_data,
**msg_args)
# TODO(mriedem): Drop compat for request_spec being a legacy dict in v6.0.
def prep_resize(self, ctxt, instance, image, instance_type, host,
def prep_resize(self, ctxt, instance, image, flavor, host,
migration, request_spec, filter_properties, node,
clean_shutdown, host_list):
version = '6.0'
# TODO(mriedem): We should pass the ImageMeta object through to the
# compute but that also requires plumbing changes through the resize
# flow for other methods like resize_instance and finish_resize.
image_p = objects_base.obj_to_primitive(image)
msg_args = {'instance': instance,
'instance_type': instance_type,
'flavor': flavor,
'image': image_p,
'request_spec': request_spec,
'filter_properties': filter_properties,
@ -948,11 +964,14 @@ class ComputeAPI(object):
'clean_shutdown': clean_shutdown,
'host_list': host_list}
client = self.router.client(ctxt)
version = '5.1'
if not client.can_send_version(version):
msg_args['request_spec'] = (
request_spec.to_legacy_request_spec_dict())
version = '5.0'
version = '5.1'
del msg_args['flavor']
msg_args['instance_type'] = flavor
if not client.can_send_version(version):
version = '5.0'
msg_args['request_spec'] = (
request_spec.to_legacy_request_spec_dict())
cctxt = client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'prep_resize', **msg_args)
@ -990,17 +1009,22 @@ class ComputeAPI(object):
:raises: oslo_messaging.exceptions.MessagingTimeout if the pre-check
RPC call times out
"""
version = '5.5'
msg_args = {'instance': instance,
'flavor': flavor,
'nodename': nodename,
'migration': migration,
'limits': limits}
version = self._ver(ctxt, '5.5')
client = self.router.client(ctxt)
if not client.can_send_version('6.0'):
msg_args['request_spec'] = request_spec
if not client.can_send_version(version):
raise exception.MigrationPreCheckError(reason=_('Compute too old'))
cctxt = client.prepare(server=destination, version=version,
call_monitor_timeout=CONF.rpc_response_timeout,
timeout=CONF.long_rpc_timeout)
return cctxt.call(ctxt, 'prep_snapshot_based_resize_at_dest',
instance=instance, flavor=flavor, nodename=nodename,
migration=migration, limits=limits,
request_spec=request_spec)
**msg_args)
def prep_snapshot_based_resize_at_source(
self, ctxt, instance, migration, snapshot_id=None):
@ -1028,7 +1052,7 @@ class ComputeAPI(object):
:raises: oslo_messaging.exceptions.MessagingTimeout if the RPC call
times out
"""
version = '5.6'
version = self._ver(ctxt, '5.6')
client = self.router.client(ctxt)
if not client.can_send_version(version):
raise exception.MigrationError(reason=_('Compute too old'))
@ -1042,7 +1066,7 @@ class ComputeAPI(object):
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'reboot_instance',
@ -1068,7 +1092,7 @@ class ComputeAPI(object):
'request_spec': request_spec,
'accel_uuids': accel_uuids
}
version = '5.12'
version = self._ver(ctxt, '5.12')
client = self.router.client(ctxt)
if not client.can_send_version(version):
del msg_args['accel_uuids']
@ -1083,33 +1107,15 @@ class ComputeAPI(object):
recreate=recreate, on_shared_storage=on_shared_storage,
**msg_args)
# TODO(stephenfin): This is no longer used and can be removed in v6.0
def remove_aggregate_host(self, ctxt, host, aggregate, host_param,
slave_info=None):
'''Remove aggregate host.
:param ctxt: request context
:param aggregate:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
version = '5.0'
cctxt = self.router.client(ctxt).prepare(
server=host, version=version)
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'remove_fixed_ip_from_instance',
instance=instance, address=address)
def remove_volume_connection(self, ctxt, instance, volume_id, host):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=host, version=version)
return cctxt.call(ctxt, 'remove_volume_connection',
@ -1117,7 +1123,7 @@ class ComputeAPI(object):
def rescue_instance(self, ctxt, instance, rescue_password,
rescue_image_ref=None, clean_shutdown=True):
version = '5.0'
version = self._ver(ctxt, '5.0')
msg_args = {'rescue_password': rescue_password,
'clean_shutdown': clean_shutdown,
'rescue_image_ref': rescue_image_ref,
@ -1127,27 +1133,32 @@ class ComputeAPI(object):
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'rescue_instance', **msg_args)
def resize_instance(self, ctxt, instance, migration, image, instance_type,
def resize_instance(self, ctxt, instance, migration, image, flavor,
request_spec, clean_shutdown=True):
version = '6.0'
msg_args = {'instance': instance, 'migration': migration,
'image': image,
'instance_type': instance_type,
'flavor': flavor,
'clean_shutdown': clean_shutdown,
'request_spec': request_spec,
}
version = '5.2'
client = self.router.client(ctxt)
if not client.can_send_version(version):
msg_args.pop('request_spec')
version = '5.0'
version = self._ver(ctxt, '5.2')
del msg_args['flavor']
msg_args['instance_type'] = flavor
if not client.can_send_version(version):
msg_args.pop('request_spec')
version = '5.0'
cctxt = client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resize_instance', **msg_args)
def resume_instance(self, ctxt, instance):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'resume_instance', instance=instance)
@ -1161,7 +1172,7 @@ class ComputeAPI(object):
}
client = self.router.client(ctxt)
version = '5.2'
version = self._ver(ctxt, '5.2')
if not client.can_send_version(version):
msg_args.pop('request_spec')
@ -1190,7 +1201,7 @@ class ComputeAPI(object):
:raises: oslo_messaging.exceptions.MessagingTimeout if the RPC call
times out
"""
version = '5.9'
version = self._ver(ctxt, '5.9')
client = self.router.client(ctxt)
if not client.can_send_version(version):
raise exception.MigrationError(reason=_('Compute too old'))
@ -1205,18 +1216,20 @@ class ComputeAPI(object):
def rollback_live_migration_at_destination(self, ctxt, instance, host,
destroy_disks,
migrate_data):
version = '5.0'
version = self._ver(ctxt, '5.0')
client = self.router.client(ctxt)
cctxt = client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'rollback_live_migration_at_destination',
instance=instance, destroy_disks=destroy_disks,
migrate_data=migrate_data)
# TODO(sbauza): Remove this when we bump the compute API to v6.0
def supports_numa_live_migration(self, ctxt):
"""Returns whether we can send 5.3, needed for NUMA live migration.
"""
client = self.router.client(ctxt)
return client.can_send_version('5.3')
version = self._ver(ctxt, '5.3')
return client.can_send_version(version)
def drop_move_claim_at_destination(self, ctxt, instance, host):
"""Called by the source of a live migration that's being rolled back.
@ -1227,20 +1240,20 @@ class ComputeAPI(object):
dropping the move claim before we drop the migration context from the
instance.
"""
version = '5.3'
version = self._ver(ctxt, '5.3')
client = self.router.client(ctxt)
cctxt = client.prepare(server=host, version=version)
cctxt.call(ctxt, 'drop_move_claim_at_destination', instance=instance)
def set_admin_password(self, ctxt, instance, new_pass):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
return cctxt.call(ctxt, 'set_admin_password',
instance=instance, new_pass=new_pass)
def set_host_enabled(self, ctxt, host, enabled):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=host, version=version,
call_monitor_timeout=CONF.rpc_response_timeout,
@ -1249,7 +1262,7 @@ class ComputeAPI(object):
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id,
new_attachment_id):
version = '5.0'
version = self._ver(ctxt, '5.0')
client = self.router.client(ctxt)
kwargs = dict(instance=instance,
old_volume_id=old_volume_id,
@ -1260,7 +1273,7 @@ class ComputeAPI(object):
cctxt.cast(ctxt, 'swap_volume', **kwargs)
def get_host_uptime(self, ctxt, host):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=host, version=version)
return cctxt.call(ctxt, 'get_host_uptime')
@ -1272,7 +1285,7 @@ class ComputeAPI(object):
'volume_id': volume_id, 'disk_bus': disk_bus,
'device_type': device_type, 'tag': tag,
'multiattach': multiattach}
version = '5.0'
version = self._ver(ctxt, '5.0')
client = self.router.client(ctxt)
cctxt = client.prepare(server=_compute_host(None, instance),
version=version,
@ -1282,7 +1295,7 @@ class ComputeAPI(object):
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'backup_instance',
@ -1292,7 +1305,7 @@ class ComputeAPI(object):
rotation=rotation)
def snapshot_instance(self, ctxt, instance, image_id):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'snapshot_instance',
@ -1300,7 +1313,7 @@ class ComputeAPI(object):
image_id=image_id)
def start_instance(self, ctxt, instance):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'start_instance', instance=instance)
@ -1308,46 +1321,46 @@ class ComputeAPI(object):
def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True):
msg_args = {'instance': instance,
'clean_shutdown': clean_shutdown}
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
rpc_method = cctxt.cast if do_cast else cctxt.call
return rpc_method(ctxt, 'stop_instance', **msg_args)
def suspend_instance(self, ctxt, instance):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms):
client = self.router.client(ctxt)
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = client.prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'terminate_instance', instance=instance, bdms=bdms)
def unpause_instance(self, ctxt, instance):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'unpause_instance', instance=instance)
def unrescue_instance(self, ctxt, instance):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'unrescue_instance', instance=instance)
def soft_delete_instance(self, ctxt, instance):
client = self.router.client(ctxt)
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = client.prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'soft_delete_instance', instance=instance)
def restore_instance(self, ctxt, instance):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'restore_instance', instance=instance)
@ -1361,7 +1374,7 @@ class ComputeAPI(object):
'accel_uuids': accel_uuids,
}
client = self.router.client(ctxt)
version = '5.13'
version = self._ver(ctxt, '5.13')
if not client.can_send_version(version):
if accel_uuids:
LOG.error("Shelve with accelerators is not supported as "
@ -1382,7 +1395,7 @@ class ComputeAPI(object):
'accel_uuids': accel_uuids,
}
client = self.router.client(ctxt)
version = '5.13'
version = self._ver(ctxt, '5.13')
if not client.can_send_version(version):
msg_kwargs.pop('accel_uuids')
version = '5.0'
@ -1392,7 +1405,7 @@ class ComputeAPI(object):
def unshelve_instance(self, ctxt, instance, host, request_spec, image=None,
filter_properties=None, node=None, accel_uuids=None):
version = '5.13'
version = self._ver(ctxt, '5.13')
msg_kwargs = {
'instance': instance,
'image': image,
@ -1416,7 +1429,7 @@ class ComputeAPI(object):
def volume_snapshot_create(self, ctxt, instance, volume_id,
create_info):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance,
@ -1424,7 +1437,7 @@ class ComputeAPI(object):
def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,
delete_info):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance,
@ -1433,9 +1446,10 @@ class ComputeAPI(object):
def external_instance_event(self, ctxt, instances, events, host=None):
instance = instances[0]
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(host, instance),
version='5.0')
version=version)
cctxt.cast(ctxt, 'external_instance_event', instances=instances,
events=events)
@ -1462,7 +1476,7 @@ class ComputeAPI(object):
"accel_uuids": accel_uuids,
}
client = self.router.client(ctxt)
version = '5.11'
version = self._ver(ctxt, '5.11')
if not client.can_send_version(version):
kwargs.pop('accel_uuids')
version = '5.0'
@ -1470,36 +1484,27 @@ class ComputeAPI(object):
cctxt.cast(ctxt, 'build_and_run_instance', **kwargs)
def quiesce_instance(self, ctxt, instance):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
return cctxt.call(ctxt, 'quiesce_instance', instance=instance)
def unquiesce_instance(self, ctxt, instance, mapping=None):
version = '5.0'
version = self._ver(ctxt, '5.0')
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'unquiesce_instance', instance=instance,
mapping=mapping)
# TODO(stephenfin): Remove this as it's nova-network only
def refresh_instance_security_rules(self, ctxt, instance, host):
version = '5.0'
client = self.router.client(ctxt)
cctxt = client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'refresh_instance_security_rules',
instance=instance)
def trigger_crash_dump(self, ctxt, instance):
version = '5.0'
version = self._ver(ctxt, '5.0')
client = self.router.client(ctxt)
cctxt = client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.cast(ctxt, "trigger_crash_dump", instance=instance)
def cache_images(self, ctxt, host, image_ids):
version = '5.4'
version = self._ver(ctxt, '5.4')
client = self.router.client(ctxt)
if not client.can_send_version(version):
raise exception.NovaException('Compute RPC version pin does not '

View File

@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 55
SERVICE_VERSION = 56
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@ -197,6 +197,8 @@ SERVICE_VERSION_HISTORY = (
# Version 55: Compute RPC v5.13:
# Add support for qos interface attach
{'compute_rpc': '5.13'},
# Version 56: Compute RPC v6.0:
{'compute_rpc': '6.0'},
)
# This is used to raise an error at service startup if older than N-1 computes

View File

@ -86,7 +86,7 @@ class StubComputeRPCAPI(compute_rpcapi.ComputeAPI):
@property
def router(self):
with lockutils.lock('compute-rpcapi-router'):
target = messaging.Target(topic='compute', version='5.0')
target = messaging.Target(topic='compute', version='6.0')
version_cap = self.version
serializer = objects_base.NovaObjectSerializer()
rpc.get_client(target, version_cap, serializer)

View File

@ -479,9 +479,21 @@ class NUMALiveMigrationLegacyBase(NUMALiveMigrationPositiveBase):
if pin_source:
src_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
'5.2')
else:
# Since we upgraded the RPC API to 6.0, we somehow need to pin the
# compute service here to 5.max to verify the legacy behaviours.
# TODO(sbauza): Remove this cruft
src_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
'5.13')
if pin_cond:
cond_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
'5.2')
else:
# Since we upgraded the RPC API to 6.0, we somehow need to pin the
# compute service here to 5.max to verify the legacy behaviours.
# TODO(sbauza): Remove this cruft
cond_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
'5.13')
self.assertEqual(
not pin_source,

View File

@ -80,6 +80,6 @@ class ComputeManagerTestCase(test.TestCase):
filter_properties)
self.compute.manager.build_and_run_instance(
self.context, instance, {}, request_spec,
filter_properties, block_device_mapping=[])
filter_properties, accel_uuids=[], block_device_mapping=[])
_test()
self.assertIn('Preserve this', instance.fault.message)

View File

@ -1532,7 +1532,8 @@ class ComputeTestCase(BaseTestCase,
try:
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
{}, [],
block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
@ -1548,7 +1549,8 @@ class ComputeTestCase(BaseTestCase,
try:
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
{}, [],
block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
@ -1564,7 +1566,7 @@ class ComputeTestCase(BaseTestCase,
filter_properties = {'limits': {'memory_mb': None}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties,
filter_properties, [],
block_device_mapping=[])
cn = self.rt.compute_nodes[NODENAME]
self.assertEqual(999999999999, cn.memory_mb_used)
@ -1577,7 +1579,7 @@ class ComputeTestCase(BaseTestCase,
filter_properties = {'limits': {'disk_gb': None}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
filter_properties, [], block_device_mapping=[])
def test_create_multiple_instances_then_starve(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
@ -1587,7 +1589,7 @@ class ComputeTestCase(BaseTestCase,
"ephemeral_gb": 128}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[], limits=limits)
{}, [], block_device_mapping=[], limits=limits)
cn = self.rt.compute_nodes[NODENAME]
self.assertEqual(1024, cn.memory_mb_used)
@ -1597,7 +1599,7 @@ class ComputeTestCase(BaseTestCase,
"ephemeral_gb": 256}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[], limits=limits)
{}, [], block_device_mapping=[], limits=limits)
self.assertEqual(3072, cn.memory_mb_used)
self.assertEqual(768, cn.local_gb_used)
@ -1605,7 +1607,7 @@ class ComputeTestCase(BaseTestCase,
"ephemeral_gb": 8192}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance,
{}, {}, {}, block_device_mapping=[], limits=limits)
{}, {}, {}, [], block_device_mapping=[], limits=limits)
# NOTE(danms): Since we no longer claim memory and disk, this should
# complete normally. In reality, this would have been rejected by
@ -1646,7 +1648,7 @@ class ComputeTestCase(BaseTestCase,
limits = {'memory_mb': oversub_limit_mb}
filter_properties = {'limits': limits}
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
filter_properties, [], block_device_mapping=[])
cn = self.rt.compute_nodes[NODENAME]
self.assertEqual(instance_mb, cn.memory_mb_used)
@ -1674,7 +1676,8 @@ class ComputeTestCase(BaseTestCase,
filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}
self.compute.build_and_run_instance(self.context, instance,
{}, {}, filter_properties, block_device_mapping=[])
{}, {}, filter_properties, [],
block_device_mapping=[])
def test_create_instance_with_oversubscribed_disk(self):
# Test passing of oversubscribed disk policy from the scheduler.
@ -1697,7 +1700,7 @@ class ComputeTestCase(BaseTestCase,
limits = {'disk_gb': oversub_limit_gb}
filter_properties = {'limits': limits}
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
filter_properties, [], block_device_mapping=[])
cn = self.rt.compute_nodes[NODENAME]
self.assertEqual(instance_gb, cn.local_gb_used)
@ -1706,7 +1709,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj({'node': None})
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
@ -1717,7 +1720,7 @@ class ComputeTestCase(BaseTestCase,
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
self._assert_state({'vm_state': vm_states.ACTIVE,
'task_state': None})
@ -1757,7 +1760,7 @@ class ComputeTestCase(BaseTestCase,
try:
self.compute.build_and_run_instance(self.context, instance, {},
{}, {}, block_device_mapping=[])
{}, {}, [], block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
@ -1771,7 +1774,7 @@ class ComputeTestCase(BaseTestCase,
try:
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
{}, [], block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
@ -1803,7 +1806,8 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(
self.context, instance=instance, image={},
request_spec={}, block_device_mapping=[],
request_spec={}, accel_uuids=[],
block_device_mapping=[],
filter_properties={}, requested_networks=[],
injected_files=None, admin_password=None,
node=None)
@ -1826,7 +1830,8 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(
self.context, instance=instance, request_spec={},
filter_properties={}, requested_networks=[],
filter_properties={}, accel_uuids=[],
requested_networks=[],
injected_files=None, admin_password=None,
node=None, block_device_mapping=[], image={})
# check state is failed even after the periodic poll
@ -1849,7 +1854,8 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(
self.context, instance=instance, request_spec={},
filter_properties={}, requested_networks=[],
filter_properties={}, accel_uuids=[],
requested_networks=[],
injected_files=None, admin_password=None,
block_device_mapping=[], image={}, node=None)
# check state is failed even after the periodic poll
@ -1876,7 +1882,8 @@ class ComputeTestCase(BaseTestCase,
) as (mock_deallocate, mock_spawn):
mock_spawn.side_effect = fake
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
{}, [],
block_device_mapping=[])
mock_deallocate.assert_called_with(mock.ANY, mock.ANY, None)
self.assertTrue(mock_spawn.called)
@ -1888,7 +1895,8 @@ class ComputeTestCase(BaseTestCase,
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = exception.InstanceNotFound(instance_id=1)
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
{}, [],
block_device_mapping=[])
self.assertTrue(mock_save.called)
def test_run_instance_bails_on_deleting_instance(self):
@ -1901,7 +1909,8 @@ class ComputeTestCase(BaseTestCase,
expected={'task_state': 'bar'},
actual={'task_state': 'foo'})
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
{}, [],
block_device_mapping=[])
self.assertTrue(mock_save.called)
def test_can_terminate_on_error_state(self):
@ -1921,7 +1930,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
@ -1947,7 +1956,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
@ -2022,7 +2031,7 @@ class ComputeTestCase(BaseTestCase,
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
self._assert_state({'vm_state': vm_states.ACTIVE,
'task_state': None})
@ -2035,7 +2044,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
@ -2055,7 +2064,7 @@ class ComputeTestCase(BaseTestCase,
self.assertIsNone(instance['deleted_at'])
launch = timeutils.utcnow()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.refresh()
self.assertGreater(instance['launched_at'].replace(tzinfo=None),
launch)
@ -2075,7 +2084,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
@ -2099,7 +2108,7 @@ class ComputeTestCase(BaseTestCase,
# Ensure instance can be stopped.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
{}, [], block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
inst_uuid = instance['uuid']
@ -2121,7 +2130,7 @@ class ComputeTestCase(BaseTestCase,
# Ensure instance can be started.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
extra = ['system_metadata', 'metadata']
@ -2161,7 +2170,7 @@ class ComputeTestCase(BaseTestCase,
instance.save()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF,
"vm_state": vm_states.SHELVED})
@ -2186,7 +2195,7 @@ class ComputeTestCase(BaseTestCase,
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
extra = ['system_metadata', 'metadata']
@ -2220,7 +2229,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.task_state = task_states.RESCUING
instance.save()
@ -2246,7 +2255,7 @@ class ComputeTestCase(BaseTestCase,
mock_context.return_value = self.context
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
fake_notifier.NOTIFICATIONS = []
instance.task_state = task_states.RESCUING
@ -2300,7 +2309,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
fake_notifier.NOTIFICATIONS = []
instance.task_state = task_states.UNRESCUING
@ -2444,7 +2453,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
extra = ['system_metadata', 'metadata']
inst_obj = objects.Instance.get_by_uuid(self.context,
instance['uuid'],
@ -2488,7 +2497,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
extra = ['system_metadata', 'metadata']
inst_obj = objects.Instance.get_by_uuid(self.context,
instance['uuid'],
@ -2508,7 +2517,7 @@ class ComputeTestCase(BaseTestCase,
ctxt = context.get_admin_context()
mock_context.return_value = ctxt
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
instance.task_state = task_states.PAUSING
instance.save()
fake_notifier.NOTIFICATIONS = []
@ -2552,7 +2561,7 @@ class ComputeTestCase(BaseTestCase,
mock_context.return_value = context
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.task_state = task_states.SUSPENDING
instance.save()
self.compute.suspend_instance(context, instance)
@ -2579,7 +2588,7 @@ class ComputeTestCase(BaseTestCase,
# Ensure vm_state is ERROR when suspend error occurs.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
with mock.patch.object(self.compute.driver, 'suspend',
side_effect=test.TestingException):
@ -2596,7 +2605,7 @@ class ComputeTestCase(BaseTestCase,
# restore to original value if suspend is not implemented by driver
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
with mock.patch.object(self.compute.driver, 'suspend',
side_effect=NotImplementedError('suspend test')):
@ -2612,7 +2621,7 @@ class ComputeTestCase(BaseTestCase,
# ensure rescued instance can be suspended and resumed.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.vm_state = vm_states.RESCUED
instance.task_state = task_states.SUSPENDING
@ -2640,7 +2649,7 @@ class ComputeTestCase(BaseTestCase,
bdms = block_device_obj.block_device_make_list(self.context, [])
mock_get_bdms.return_value = bdms
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.task_state = task_states.SUSPENDING
instance.save()
self.compute.suspend_instance(self.context, instance)
@ -2666,7 +2675,7 @@ class ComputeTestCase(BaseTestCase,
# ACTIVE state
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.vm_state = vm_states.SUSPENDED
instance.task_state = task_states.RESUMING
@ -2681,7 +2690,7 @@ class ComputeTestCase(BaseTestCase,
# Ensure vm_state is ERROR when resume error occurs.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.task_state = task_states.SUSPENDING
instance.save()
self.compute.suspend_instance(self.context, instance)
@ -2704,7 +2713,7 @@ class ComputeTestCase(BaseTestCase,
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(
@ -2734,7 +2743,7 @@ class ComputeTestCase(BaseTestCase,
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(
@ -2786,7 +2795,7 @@ class ComputeTestCase(BaseTestCase,
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(
@ -2805,7 +2814,7 @@ class ComputeTestCase(BaseTestCase,
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(
@ -2825,7 +2834,7 @@ class ComputeTestCase(BaseTestCase,
image_ref = instance['image_ref']
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
time_fixture.advance_time_delta(cur_time - old_time)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
@ -3322,7 +3331,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
self.compute.inject_network_info(self.context, instance=instance)
self.assertTrue(called['inject'])
self.compute.terminate_instance(self.context, instance, [])
@ -3331,7 +3340,7 @@ class ComputeTestCase(BaseTestCase,
# Ensure instance can be snapshotted.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save()
return instance
@ -3647,7 +3656,7 @@ class ComputeTestCase(BaseTestCase,
# Make sure we can get console output from instance.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
output = self.compute.get_console_output(self.context,
instance=instance, tail_length=None)
@ -3671,7 +3680,7 @@ class ComputeTestCase(BaseTestCase,
# Make sure we can get console output from instance.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
output = self.compute.get_console_output(self.context,
instance=instance, tail_length=2)
@ -3687,7 +3696,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
self.assertRaises(messaging.ExpectedException,
self.compute.get_console_output, self.context,
@ -3710,7 +3719,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
self.assertRaises(messaging.ExpectedException,
self.compute.get_console_output, self.context,
@ -3731,7 +3740,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
# Try with the full instance
console = self.compute.get_vnc_console(self.context, 'novnc',
@ -3837,7 +3846,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
self.assertRaises(messaging.ExpectedException,
self.compute.get_vnc_console,
@ -3858,7 +3867,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
self.assertRaises(messaging.ExpectedException,
self.compute.get_vnc_console,
@ -3878,7 +3887,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
self.assertRaises(messaging.ExpectedException,
self.compute.get_vnc_console,
@ -3899,7 +3908,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
# Try with the full instance
console = self.compute.get_spice_console(self.context, 'spice-html5',
@ -3921,7 +3930,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
self.assertRaises(messaging.ExpectedException,
self.compute.get_spice_console,
@ -3943,7 +3952,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
self.assertRaises(messaging.ExpectedException,
self.compute.get_spice_console,
@ -3963,7 +3972,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
self.assertRaises(messaging.ExpectedException,
self.compute.get_spice_console,
@ -3984,7 +3993,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
# Try with the full instance
console = self.compute.get_rdp_console(self.context, 'rdp-html5',
@ -4006,7 +4015,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
self.assertRaises(messaging.ExpectedException,
self.compute.get_rdp_console,
@ -4027,7 +4036,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
self.assertRaises(messaging.ExpectedException,
self.compute.get_rdp_console,
@ -4149,7 +4158,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
diagnostics = self.compute.get_diagnostics(self.context,
instance=instance)
@ -4160,7 +4169,7 @@ class ComputeTestCase(BaseTestCase,
# Make sure we can get diagnostics for an instance.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
diagnostics = self.compute.get_instance_diagnostics(self.context,
instance=instance)
@ -4242,6 +4251,7 @@ class ComputeTestCase(BaseTestCase,
filter_properties={},
image={'name':
expected_image_name},
accel_uuids=[],
block_device_mapping=[])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
instance.refresh()
@ -4298,7 +4308,7 @@ class ComputeTestCase(BaseTestCase,
build_inst_abort)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
self.assertGreaterEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type, 'compute.instance.create.start')
@ -4323,7 +4333,7 @@ class ComputeTestCase(BaseTestCase,
build_inst_fail)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
self.assertGreaterEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
@ -4347,7 +4357,8 @@ class ComputeTestCase(BaseTestCase,
build_inst_fail)
self.compute.build_and_run_instance(
self.context, instance, {}, {}, {}, block_device_mapping=[])
self.context, instance, {}, {}, {}, [],
block_device_mapping=[])
self.assertGreaterEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
@ -4372,7 +4383,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
fake_notifier.NOTIFICATIONS = []
time_fixture.advance_time_delta(cur_time - old_time)
self.compute.terminate_instance(self.context, instance, [])
@ -4418,7 +4429,7 @@ class ComputeTestCase(BaseTestCase,
@mock.patch.object(self.compute.network_api, 'deallocate_for_instance')
def _do_test(mock_deallocate, mock_allocate):
self.compute.build_and_run_instance(self.context, instance, {},
{}, {}, block_device_mapping=[])
{}, {}, [], block_device_mapping=[])
instance.refresh()
self.assertEqual(vm_states.ERROR, instance.vm_state)
@ -4492,7 +4503,7 @@ class ComputeTestCase(BaseTestCase,
mock_prep.side_effect = messaging.RemoteError('', '', '')
self.compute.build_and_run_instance(
self.context, instance, {}, {}, {}, block_device_mapping=[])
self.context, instance, {}, {}, {}, [], block_device_mapping=[])
self.compute.terminate_instance(self.context, instance, [])
mock_prep.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
@ -4583,7 +4594,7 @@ class ComputeTestCase(BaseTestCase,
'request_spec': {}}),
("prep_resize", task_states.RESIZE_PREP,
{'image': {},
'instance_type': instance_type,
'flavor': instance_type,
'request_spec': {},
'filter_properties': {},
'node': None,
@ -4593,7 +4604,7 @@ class ComputeTestCase(BaseTestCase,
("resize_instance", task_states.RESIZE_PREP,
{'migration': migration,
'image': {},
'instance_type': {},
'flavor': {},
'clean_shutdown': True,
'request_spec': {}}),
("pause_instance", task_states.PAUSING),
@ -4684,7 +4695,7 @@ class ComputeTestCase(BaseTestCase,
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
flavor=instance_type,
image={}, request_spec={},
filter_properties={}, node=None,
migration=None, clean_shutdown=True,
@ -4926,7 +4937,7 @@ class ComputeTestCase(BaseTestCase,
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
flavor=instance_type,
image={}, request_spec=request_spec,
filter_properties={}, node=None,
clean_shutdown=True, migration=None,
@ -4943,7 +4954,7 @@ class ComputeTestCase(BaseTestCase,
instance.uuid, 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={},
instance_type=jsonutils.to_primitive(instance_type),
flavor=jsonutils.to_primitive(instance_type),
clean_shutdown=True, request_spec=request_spec)
# assert bdm is unchanged
@ -5015,7 +5026,7 @@ class ComputeTestCase(BaseTestCase,
request_spec = objects.RequestSpec()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
flavor=instance_type,
image={},
request_spec=request_spec,
filter_properties={},
@ -5066,7 +5077,7 @@ class ComputeTestCase(BaseTestCase,
time_fixture = self.useFixture(utils_fixture.TimeFixture(old_time))
inst_ref = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, inst_ref, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
time_fixture.advance_time_delta(cur_time - old_time)
fake_notifier.NOTIFICATIONS = []
@ -5138,7 +5149,7 @@ class ComputeTestCase(BaseTestCase,
flavor_id = new_type['flavorid']
request_spec = objects.RequestSpec()
self.compute.build_and_run_instance(self.context, instance, {},
request_spec, {},
request_spec, {}, [],
block_device_mapping=[])
instance.host = 'foo'
@ -5146,7 +5157,7 @@ class ComputeTestCase(BaseTestCase,
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=new_type, image={},
flavor=new_type, image={},
request_spec={}, filter_properties={}, node=None,
clean_shutdown=True, migration=None, host_list=[])
@ -5154,7 +5165,7 @@ class ComputeTestCase(BaseTestCase,
self.context.elevated(),
instance.uuid, 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={}, instance_type=new_type,
migration=migration, image={}, flavor=new_type,
clean_shutdown=True, request_spec=request_spec)
time_fixture.advance_time_delta(cur_time - old_time)
fake_notifier.NOTIFICATIONS = []
@ -5197,7 +5208,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
time_fixture.advance_time_delta(cur_time - old_time)
fake_notifier.NOTIFICATIONS = []
@ -5206,7 +5217,7 @@ class ComputeTestCase(BaseTestCase,
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=self.default_flavor, image={},
flavor=self.default_flavor, image={},
request_spec={}, filter_properties={}, node=None,
clean_shutdown=True, migration=None, host_list=[])
db.migration_get_by_instance_and_status(self.context.elevated(),
@ -5253,13 +5264,13 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.host = None
instance.save()
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance=instance,
instance_type=self.default_flavor, image={},
flavor=self.default_flavor, image={},
request_spec={},
filter_properties={}, node=None,
clean_shutdown=True, migration=mock.Mock(),
@ -5279,11 +5290,11 @@ class ComputeTestCase(BaseTestCase,
request_spec = objects.RequestSpec()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=self.default_flavor, image={},
flavor=self.default_flavor, image={},
request_spec=request_spec,
filter_properties={}, node=None,
clean_shutdown=True, migration=None,
@ -5298,7 +5309,7 @@ class ComputeTestCase(BaseTestCase,
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
instance_type=jsonutils.to_primitive(
flavor=jsonutils.to_primitive(
self.default_flavor),
clean_shutdown=True, request_spec=request_spec)
# NOTE(comstud): error path doesn't use objects, so our object
@ -5320,11 +5331,11 @@ class ComputeTestCase(BaseTestCase,
request_spec = objects.RequestSpec()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=self.default_flavor, image={},
flavor=self.default_flavor, image={},
request_spec=request_spec,
filter_properties={}, node=None,
migration=None, host_list=[],
@ -5339,7 +5350,7 @@ class ComputeTestCase(BaseTestCase,
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
instance_type=jsonutils.to_primitive(
flavor=jsonutils.to_primitive(
self.default_flavor),
clean_shutdown=True, request_spec=request_spec)
# NOTE(comstud): error path doesn't use objects, so our object
@ -5356,11 +5367,11 @@ class ComputeTestCase(BaseTestCase,
request_spec = objects.RequestSpec()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=self.default_flavor, image={},
flavor=self.default_flavor, image={},
request_spec=request_spec, filter_properties={}, node=None,
clean_shutdown=True, migration=None, host_list=[])
@ -5395,7 +5406,7 @@ class ComputeTestCase(BaseTestCase,
mock_check_is_bfv):
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={},
instance_type=jsonutils.to_primitive(self.default_flavor),
flavor=jsonutils.to_primitive(self.default_flavor),
clean_shutdown=clean_shutdown, request_spec=request_spec)
mock_notify_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
@ -5455,7 +5466,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.build_and_run_instance(self.context, instance, {},
request_spec, {},
block_device_mapping=[])
[], block_device_mapping=[])
# Confirm the instance size before the resize starts
instance.refresh()
@ -5475,7 +5486,7 @@ class ComputeTestCase(BaseTestCase,
new_instance_type_ref = flavors.get_flavor_by_flavor_id(3)
self.compute.prep_resize(self.context,
instance=instance,
instance_type=new_instance_type_ref,
flavor=new_instance_type_ref,
image={}, request_spec=request_spec,
filter_properties={}, node=None, clean_shutdown=True,
migration=None, host_list=None)
@ -5502,7 +5513,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
instance_type=new_instance_type_ref,
flavor=new_instance_type_ref,
clean_shutdown=True,
request_spec=request_spec)
self.compute.finish_resize(self.context,
@ -5792,7 +5803,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.build_and_run_instance(self.context, instance, {},
request_spec, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.refresh()
flavor = objects.Flavor.get_by_id(self.context,
@ -5812,7 +5823,7 @@ class ComputeTestCase(BaseTestCase,
new_instance_type_ref = flavors.get_flavor_by_flavor_id(3)
self.compute.prep_resize(self.context,
instance=instance,
instance_type=new_instance_type_ref,
flavor=new_instance_type_ref,
image={}, request_spec=request_spec,
filter_properties={}, node=None,
migration=None, clean_shutdown=True, host_list=[])
@ -5838,7 +5849,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
instance_type=new_instance_type_ref,
flavor=new_instance_type_ref,
clean_shutdown=True,
request_spec=request_spec)
self.compute.finish_resize(self.context,
@ -5943,12 +5954,12 @@ class ComputeTestCase(BaseTestCase,
self.compute.build_and_run_instance(self.context, instance, {},
request_spec, {},
block_device_mapping=[])
[], block_device_mapping=[])
new_instance_type_ref = flavors.get_flavor_by_flavor_id(3)
self.compute.prep_resize(self.context,
instance=instance,
instance_type=new_instance_type_ref,
flavor=new_instance_type_ref,
image={}, request_spec=request_spec,
filter_properties={}, node=None,
clean_shutdown=True, migration=None,
@ -5966,7 +5977,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
instance_type=new_instance_type_ref,
flavor=new_instance_type_ref,
clean_shutdown=True,
request_spec=request_spec)
self.compute.finish_resize(self.context,
@ -6007,11 +6018,11 @@ class ComputeTestCase(BaseTestCase,
request_spec = objects.RequestSpec()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=self.default_flavor,
flavor=self.default_flavor,
image={},
request_spec=request_spec,
filter_properties={},
@ -6025,7 +6036,7 @@ class ComputeTestCase(BaseTestCase,
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
instance_type=jsonutils.to_primitive(
flavor=jsonutils.to_primitive(
self.default_flavor),
clean_shutdown=True, request_spec=request_spec)
# NOTE(comstud): error path doesn't use objects, so our object
@ -6064,7 +6075,6 @@ class ComputeTestCase(BaseTestCase,
with mock.patch.object(self.compute.network_api,
'setup_networks_on_host') as mock_setup:
ret = self.compute.pre_live_migration(c, instance=instance,
block_migration=False,
disk=None,
migrate_data=migrate_data)
self.assertIs(migrate_data, ret)
@ -6729,7 +6739,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
@ -8289,7 +8299,7 @@ class ComputeTestCase(BaseTestCase,
mock_snapshot_get.side_effect = [{'status': 'creating'},
{'status': 'available'}] * 2
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
self.compute.quiesce_instance(self.context, instance)
self.compute.unquiesce_instance(self.context, instance, mapping)
self.compute.terminate_instance(self.context, instance, [])
@ -8310,7 +8320,7 @@ class ComputeTestCase(BaseTestCase,
with mock.patch.object(self.compute.driver, 'spawn') as mock_spawn:
mock_spawn.side_effect = test.TestingException('Preserve this')
self.compute.build_and_run_instance(
self.context, instance, {}, {}, {},
self.context, instance, {}, {}, {}, [],
block_device_mapping=[])
self.assertEqual('Preserve this', instance.fault.message)
@ -8463,7 +8473,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance_obj(params, services=True)
instance_uuid = instance['uuid']
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance.refresh()
self.assertIsNone(instance['task_state'])
@ -8907,7 +8917,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance_obj()
instance_uuid = instance['uuid']
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance = objects.Instance.get_by_uuid(self.context,
instance_uuid)
@ -8969,7 +8979,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance_obj(params={'image_ref': ''})
self.stub_out('nova.tests.fixtures.GlanceFixture.show', self.fake_show)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"vm_state": vm_states.ERROR,
@ -9177,7 +9187,7 @@ class ComputeAPITestCase(BaseTestCase):
with mock.patch.object(self.compute, '_prep_block_device'):
self.compute.build_and_run_instance(self.context,
volume_backed_inst_1, {}, {}, {},
volume_backed_inst_1, {}, {}, {}, [],
block_device_mapping=[])
self.assertRaises(exception.InstanceNotRescuable,
@ -9200,7 +9210,7 @@ class ComputeAPITestCase(BaseTestCase):
with mock.patch.object(self.compute, '_prep_block_device'):
self.compute.build_and_run_instance(self.context,
volume_backed_inst_2, {}, {}, {},
volume_backed_inst_2, {}, {}, {}, [],
block_device_mapping=[])
self.assertRaises(exception.InstanceNotRescuable,
@ -9814,7 +9824,7 @@ class ComputeAPITestCase(BaseTestCase):
params={'architecture': ''})
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
instance = db.instance_get_by_uuid(self.context,
instance['uuid'])
self.assertNotEqual(instance['architecture'], 'Unknown')
@ -11595,7 +11605,7 @@ class ComputeAPITestCase(BaseTestCase):
self.stub_out('nova.compute.manager.ComputeManager_prep_block_device',
mock.MagicMock())
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
[], block_device_mapping=[])
self.compute.terminate_instance(self.context, instance, bdms)
@ -11606,7 +11616,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_inject_network_info(self):
instance = self._create_fake_instance_obj(params={'host': CONF.host})
self.compute.build_and_run_instance(self.context,
instance, {}, {}, {}, block_device_mapping=[])
instance, {}, {}, {}, [], block_device_mapping=[])
instance = self.compute_api.get(self.context, instance['uuid'])
self.compute_api.inject_network_info(self.context, instance)
@ -13155,7 +13165,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
self.compute.prep_resize(self.context, image=None,
instance=inst_obj,
instance_type=self.instance_type,
flavor=self.instance_type,
request_spec=self.request_spec,
filter_properties={}, migration=mock.Mock(),
node=None,
@ -13562,7 +13572,7 @@ class EvacuateHostTestCase(BaseTestCase):
db.instance_update(self.context, self.inst.uuid,
{"task_state": task_states.SCHEDULING})
self.compute.build_and_run_instance(self.context,
self.inst, {}, {}, {}, block_device_mapping=[])
self.inst, {}, {}, {}, [], block_device_mapping=[])
self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',
lambda *a, **kw: True)
@ -13725,7 +13735,8 @@ class ComputeInjectedFilesTestCase(BaseTestCase):
def _test(self, injected_files, decoded_files):
self.expected = decoded_files
self.compute.build_and_run_instance(self.context, self.instance, {},
{}, {}, block_device_mapping=[],
{}, {}, [],
block_device_mapping=[],
injected_files=injected_files)
def test_injected_none(self):
@ -13758,7 +13769,7 @@ class ComputeInjectedFilesTestCase(BaseTestCase):
self.assertRaises(exception.Base64Exception,
self.compute.build_and_run_instance,
self.context, self.instance, {}, {}, {},
self.context, self.instance, {}, {}, {}, [],
block_device_mapping=[],
injected_files=injected_files)

View File

@ -718,7 +718,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.build_and_run_instance(self.context, instance,
mock.sentinel.image,
mock.sentinel.request_spec,
{})
{}, [])
self.assertEqual(3, mock_sem.__enter__.call_count)
def test_max_concurrent_builds_limited(self):
@ -6092,6 +6092,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
'hosts': [[self.compute.host,
'fake-node']]}}
self.resource_provider_mapping = None
self.accel_uuids = []
self.useFixture(fixtures.SpawnIsSynchronousFixture())
@ -6480,6 +6481,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
accel_uuids=self.accel_uuids,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
@ -6506,6 +6508,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
accel_uuids = self.accel_uuids,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=[objects.NetworkRequest(
@ -6555,6 +6558,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
accel_uuids = self.accel_uuids,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
@ -6614,6 +6618,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
accel_uuids=self.accel_uuids,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
@ -6706,7 +6711,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits, host_list=fake_host_list)
limits=self.limits, host_list=fake_host_list,
accel_uuids=self.accel_uuids)
mock_build_and_run.assert_called_once_with(self.context,
instance,
@ -6750,7 +6756,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits, host_list=fake_host_list)
limits=self.limits, host_list=fake_host_list,
accel_uuids=self.accel_uuids)
mock_build_and_run.assert_called_once_with(self.context,
instance,
@ -6804,7 +6811,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits, host_list=fake_host_list)
limits=self.limits, host_list=fake_host_list,
accel_uuids=self.accel_uuids)
mock_build_and_run.assert_called_once_with(self.context,
instance,
@ -6852,6 +6860,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance,
self.image, request_spec={},
filter_properties={},
accel_uuids=[],
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
@ -6905,6 +6914,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
accel_uuids=self.accel_uuids,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
@ -6958,6 +6968,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
accel_uuids=self.accel_uuids,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
@ -7016,6 +7027,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
accel_uuids=self.accel_uuids,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
@ -7073,7 +7085,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
instance = objects.Instance(uuid=uuids.instance)
for i in range(0, 10):
self.compute.build_and_run_instance(self.context, instance, None,
None, None)
None, None, [])
self.assertEqual(10, mock_failed.call_count)
@ -7086,7 +7098,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
instance = objects.Instance(uuid=uuids.instance)
for i in range(0, 10):
self.compute.build_and_run_instance(self.context, instance, None,
None, None)
None, None, [])
mock_failed.assert_not_called()
@ -7110,7 +7122,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
instance = objects.Instance(uuid=uuids.instance)
for i in range(0, 10):
self.compute.build_and_run_instance(self.context, instance, None,
None, None)
None, None, [])
self.assertEqual(2, mock_failed.call_count)
self.assertEqual(8, mock_succeeded.call_count)
@ -7125,7 +7137,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
instance = objects.Instance(uuid=uuids.instance)
for i in range(0, 10):
self.compute.build_and_run_instance(self.context, instance, None,
None, None)
None, None, [])
self.assertEqual(10, mock_failed.call_count)
mock_succeeded.assert_not_called()
@ -7151,7 +7163,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.assertRaises(test.TestingException,
self.compute.build_and_run_instance,
self.context, instance, None,
None, None)
None, None, [])
self.assertEqual(10, mock_failed.call_count)
mock_succeeded.assert_not_called()
@ -7368,6 +7380,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
accel_uuids=self.accel_uuids,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
@ -7407,7 +7420,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping,
self.node, self.limits, self.filter_properties,
self.accel_uuids)
request_spec=[], accel_uuids=self.accel_uuids)
mock_save.assert_called_once_with()
mock_notify.assert_has_calls([
@ -8380,7 +8393,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.TestResizeError, self.compute.resize_instance,
context=self.context, instance=self.instance, image=self.image,
migration=self.migration,
instance_type='type', clean_shutdown=True,
flavor='type', clean_shutdown=True,
request_spec=objects.RequestSpec())
# Assert that we set the migration to an error state
@ -8402,7 +8415,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
exception.ResizeError, self.compute.resize_instance,
context=self.context, instance=self.instance, image=self.image,
migration=self.migration,
instance_type='type', clean_shutdown=True,
flavor='type', clean_shutdown=True,
request_spec=objects.RequestSpec())
# Assert the instance vm_state was unchanged.
@ -8422,7 +8435,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.TestResizeError, self.compute.resize_instance,
context=self.context, instance=self.instance, image=self.image,
migration=self.migration,
instance_type='type', clean_shutdown=True,
flavor='type', clean_shutdown=True,
request_spec=objects.RequestSpec())
# Assert that we did not set the migration to an error state
@ -9112,7 +9125,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
connector = compute.driver.get_volume_connector(instance)
r = compute.pre_live_migration(self.context, instance,
False, {}, migrate_data)
{}, migrate_data)
mock_notify_about_inst.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
@ -9186,7 +9199,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertRaises(test.TestingException,
compute.pre_live_migration,
self.context, instance, False, {}, migrate_data)
self.context, instance, {}, migrate_data)
self.assertEqual(vol1_orig_attachment_id, vol1_bdm.attachment_id)
self.assertEqual(vol2_orig_attachment_id, vol2_bdm.attachment_id)
@ -9249,7 +9262,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertRaises(test.TestingException,
compute.pre_live_migration,
self.context, instance, False, {}, migrate_data)
self.context, instance, {}, migrate_data)
self.assertEqual(2, mock_vol_api.attachment_create.call_count)
@ -10815,7 +10828,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
# Setup mocks.
flavor = self.instance.flavor
limits = objects.SchedulerLimits()
request_spec = objects.RequestSpec()
# resize_claim normally sets instance.migration_context and returns
# a MoveClaim which is a context manager. Rather than deal with
# mocking a context manager we just set the migration_context on the
@ -10830,7 +10842,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
# Run the code.
mc = self.compute.prep_snapshot_based_resize_at_dest(
self.context, self.instance, flavor, 'nodename',
self.migration, limits, request_spec)
self.migration, limits)
self.assertIs(mc, self.instance.migration_context)
# Assert the mock calls.
_send_prep_resize_notifications.assert_has_calls([
@ -10854,7 +10866,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
# Setup mocks.
flavor = self.instance.flavor
limits = objects.SchedulerLimits()
request_spec = objects.RequestSpec()
ex1 = exception.ConsumerAllocationRetrievalFailed(
consumer_uuid=self.instance.uuid, error='oops')
get_allocs.side_effect = ex1
@ -10870,7 +10881,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
messaging.ExpectedException,
self.compute.prep_snapshot_based_resize_at_dest,
self.context, self.instance, flavor, 'nodename',
self.migration, limits, request_spec)
self.migration, limits)
wrapped_exc = ex2.exc_info[1]
# The original error should be in the MigrationPreCheckError which
# itself is in the ExpectedException.
@ -10904,7 +10915,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
# Setup mocks.
flavor = self.instance.flavor
limits = objects.SchedulerLimits()
request_spec = objects.RequestSpec()
ex1 = exception.ComputeResourcesUnavailable(reason='numa')
with test.nested(
mock.patch.object(self.compute, '_send_prep_resize_notifications'),
@ -10917,7 +10927,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
messaging.ExpectedException,
self.compute.prep_snapshot_based_resize_at_dest,
self.context, self.instance, flavor, 'nodename',
self.migration, limits, request_spec)
self.migration, limits)
wrapped_exc = ex2.exc_info[1]
# The original error should be in the MigrationPreCheckError which
# itself is in the ExpectedException.
@ -11147,7 +11157,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
"""Tests the error handling on the finish_snapshot_based_resize_at_dest
method.
"""
request_spec = objects.RequestSpec()
self.instance.task_state = task_states.RESIZE_MIGRATED
with mock.patch.object(
self.compute, '_finish_snapshot_based_resize_at_dest',
@ -11155,8 +11164,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
ex = self.assertRaises(
test.TestingException,
self.compute.finish_snapshot_based_resize_at_dest,
self.context, self.instance, self.migration, uuids.snapshot_id,
request_spec)
self.context, self.instance, self.migration, uuids.snapshot_id)
# Assert the non-decorator mock calls.
_finish.assert_called_once_with(
self.context, self.instance, self.migration, uuids.snapshot_id)
@ -11194,7 +11202,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
inst_save, apply_migration_context, get_bdms, snapshot_id=None):
"""Happy path test for finish_snapshot_based_resize_at_dest."""
# Setup the fake instance.
request_spec = objects.RequestSpec()
self.instance.task_state = task_states.RESIZE_MIGRATED
nwinfo = network_model.NetworkInfo([
network_model.VIF(id=uuids.port_id)])
@ -11214,8 +11221,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
network_api.get_instance_nw_info.return_value = nwinfo
# Run that big beautiful code!
self.compute.finish_snapshot_based_resize_at_dest(
self.context, self.instance, self.migration, snapshot_id,
request_spec)
self.context, self.instance, self.migration, snapshot_id)
# Check the changes to the instance and migration object.
self.assertEqual(vm_states.RESIZED, self.instance.vm_state)
self.assertIsNone(self.instance.task_state)

View File

@ -134,9 +134,18 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
if timeout:
prepare_extra_kwargs['timeout'] = timeout
expected_kwargs = kwargs.copy()
# NOTE(sbauza): If expected args are provided, we need to use them
# for the expected kwargs and just add the needed _return_value that
# is passed by kwargs.
if expected_args:
expected_kwargs.update(expected_args)
expected_kwargs = expected_args.copy()
if '_return_value' in kwargs:
# Copy the existing return value
expected_kwargs['_return_value'] = kwargs['_return_value']
# NOTE(sbauza) : No expected args were provided so let's just use the
# kwargs as also the expected args.
else:
expected_kwargs = kwargs.copy()
if 'host_param' in expected_kwargs:
expected_kwargs['host'] = expected_kwargs.pop('host_param')
else:
@ -157,7 +166,10 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
elif 'prepare_server' in kwargs:
# This is the "server" kwarg to the prepare() method so remove it
# from both kwargs that go to the actual RPC method call.
expected_kwargs.pop('prepare_server')
# NOTE(sbauza): If we copy expected args from the provided kwargs,
# sometimes we don't have a prepare_server argument in them hence
# the default None value.
expected_kwargs.pop('prepare_server', None)
host = kwargs.pop('prepare_server')
else:
host = kwargs['instance']['host']
@ -191,31 +203,48 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
**prepare_extra_kwargs)
rpc_mock.assert_called_once_with(ctxt, method, **expected_kwargs)
def test_add_aggregate_host(self):
self._test_compute_api('add_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
slave_info={})
def test_add_fixed_ip_to_instance(self):
self._test_compute_api('add_fixed_ip_to_instance', 'cast',
instance=self.fake_instance_obj, network_id='id',
version='5.0')
version='6.0')
def test_attach_interface(self):
self._test_compute_api('attach_interface', 'call',
instance=self.fake_instance_obj, network_id='id',
port_id='id2', version='5.0', requested_ip='192.168.1.50',
port_id='id2', version='6.0', requested_ip='192.168.1.50',
tag='foo')
def test_attach_volume(self):
self._test_compute_api('attach_volume', 'cast',
instance=self.fake_instance_obj, bdm=self.fake_volume_bdm,
version='5.0')
version='6.0')
def test_check_instance_shared_storage(self):
expected_args = {'data': 'foo'}
self._test_compute_api('check_instance_shared_storage', 'call',
expected_args,
instance=self.fake_instance_obj, data='foo',
version='5.0')
version='6.0')
def test_check_instance_shared_storage_old_compute(self):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = compute_rpcapi.ComputeAPI()
rpcapi.router.client = mock.Mock()
mock_client = mock.MagicMock()
rpcapi.router.client.return_value = mock_client
mock_client.can_send_version.return_value = False
mock_cctx = mock.MagicMock()
mock_client.prepare.return_value = mock_cctx
rpcapi.check_instance_shared_storage(
ctxt, instance=self.fake_instance_obj, data='foo')
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('6.0')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
mock_cctx.call.assert_called_with(
ctxt, 'check_instance_shared_storage',
instance=self.fake_instance_obj, data='foo')
def test_confirm_resize_cast(self):
self._test_compute_api('confirm_resize', 'cast',
@ -229,19 +258,19 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_detach_interface(self):
self._test_compute_api('detach_interface', 'cast',
version='5.0', instance=self.fake_instance_obj,
version='6.0', instance=self.fake_instance_obj,
port_id='fake_id')
def test_detach_volume(self):
self._test_compute_api('detach_volume', 'cast',
instance=self.fake_instance_obj, volume_id='id',
attachment_id='fake_id', version='5.0')
attachment_id='fake_id', version='6.0')
def test_finish_resize(self):
self._test_compute_api('finish_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'foo'},
image='image', disk_info='disk_info', host='host',
request_spec=self.fake_request_spec_obj, version='5.2')
request_spec=self.fake_request_spec_obj, version='6.0')
def test_finish_resize_old_compute(self):
ctxt = context.RequestContext('fake_user', 'fake_project')
@ -260,7 +289,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
disk_info='disk_info', host='host',
request_spec=self.fake_request_spec_obj)
mock_client.can_send_version.assert_called_once_with('5.2')
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('5.2')])
mock_client.prepare.assert_called_with(
server='host', version='5.0')
mock_cctx.cast.assert_called_with(
@ -272,7 +302,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
self._test_compute_api('finish_revert_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
host='host', request_spec=self.fake_request_spec_obj,
version='5.2')
version='6.0')
def test_finish_revert_resize_old_compute(self):
ctxt = context.RequestContext('fake_user', 'fake_project')
@ -290,7 +320,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
migration=mock.sentinel.migration, host='host',
request_spec=self.fake_request_spec_obj)
mock_client.can_send_version.assert_called_once_with('5.2')
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('5.2')])
mock_client.prepare.assert_called_with(
server='host', version='5.0')
mock_cctx.cast.assert_called_with(
@ -300,54 +331,47 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_get_console_output(self):
self._test_compute_api('get_console_output', 'call',
instance=self.fake_instance_obj, tail_length='tl',
version='5.0')
def test_get_console_pool_info(self):
self._test_compute_api('get_console_pool_info', 'call',
console_type='type', host='host')
def test_get_console_topic(self):
self._test_compute_api('get_console_topic', 'call', host='host')
version='6.0')
def test_get_diagnostics(self):
self._test_compute_api('get_diagnostics', 'call',
instance=self.fake_instance_obj, version='5.0')
instance=self.fake_instance_obj, version='6.0')
def test_get_instance_diagnostics(self):
expected_args = {'instance': self.fake_instance_obj}
self._test_compute_api('get_instance_diagnostics', 'call',
expected_args, instance=self.fake_instance_obj,
version='5.0')
version='6.0')
def test_get_vnc_console(self):
self._test_compute_api('get_vnc_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='5.0')
version='6.0')
def test_get_spice_console(self):
self._test_compute_api('get_spice_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='5.0')
version='6.0')
def test_get_rdp_console(self):
self._test_compute_api('get_rdp_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='5.0')
version='6.0')
def test_get_serial_console(self):
self._test_compute_api('get_serial_console', 'call',
instance=self.fake_instance_obj, console_type='serial',
version='5.0')
version='6.0')
def test_get_mks_console(self):
self._test_compute_api('get_mks_console', 'call',
instance=self.fake_instance_obj, console_type='webmks',
version='5.0')
version='6.0')
def test_validate_console_port(self):
self._test_compute_api('validate_console_port', 'call',
instance=self.fake_instance_obj, port="5900",
console_type="novnc", version='5.0')
console_type="novnc", version='6.0')
def test_host_maintenance_mode(self):
self._test_compute_api('host_maintenance_mode', 'call',
@ -366,14 +390,14 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
instance=self.fake_instance_obj, dest='dest',
block_migration='blockity_block', host='tsoh',
migration='migration',
migrate_data={}, version='5.0')
migrate_data={}, version='6.0')
def test_live_migration_force_complete(self):
migration = migration_obj.Migration()
migration.id = 1
migration.source_compute = 'fake'
ctxt = context.RequestContext('fake_user', 'fake_project')
version = '5.0'
version = '6.0'
rpcapi = compute_rpcapi.ComputeAPI()
rpcapi.router.client = mock.Mock()
mock_client = mock.MagicMock()
@ -392,13 +416,13 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_live_migration_abort(self):
self._test_compute_api('live_migration_abort', 'cast',
instance=self.fake_instance_obj,
migration_id='1', version='5.0')
migration_id='1', version='6.0')
def test_post_live_migration_at_destination(self):
self.flags(long_rpc_timeout=1234)
self._test_compute_api('post_live_migration_at_destination', 'call',
instance=self.fake_instance_obj,
block_migration='block_migration', host='host', version='5.0',
block_migration='block_migration', host='host', version='6.0',
timeout=1234, call_monitor_timeout=60)
def test_pause_instance(self):
@ -413,18 +437,20 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
self._test_compute_api('swap_volume', 'cast',
instance=self.fake_instance_obj, old_volume_id='oldid',
new_volume_id='newid', new_attachment_id=uuids.attachment_id,
version='5.0')
version='6.0')
def test_restore_instance(self):
self._test_compute_api('restore_instance', 'cast',
instance=self.fake_instance_obj, version='5.0')
instance=self.fake_instance_obj, version='6.0')
def test_pre_live_migration(self):
self.flags(long_rpc_timeout=1234)
expected_args = {'instance': self.fake_instance_obj,
'disk': 'disk', 'host': 'host', 'migrate_data': None}
self._test_compute_api('pre_live_migration', 'call',
instance=self.fake_instance_obj,
expected_args, instance=self.fake_instance_obj,
block_migration='block_migration', disk='disk', host='host',
migrate_data=None, version='5.0',
migrate_data=None, version='6.0',
call_monitor_timeout=60, timeout=1234)
def test_supports_numa_live_migration(self):
@ -439,7 +465,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
mock_client.can_send_version.return_value = True
self.assertTrue(rpcapi.supports_numa_live_migration(ctxt))
mock_client.can_send_version.assert_has_calls(
[mock.call('5.3'), mock.call('5.3')])
[mock.call('6.0'), mock.call('5.3'),
mock.call('6.0'), mock.call('6.0')])
def test_check_can_live_migrate_destination(self):
self.flags(long_rpc_timeout=1234)
@ -448,7 +475,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
destination='dest',
block_migration=False,
disk_over_commit=False,
version='5.3', call_monitor_timeout=60,
version='6.0', call_monitor_timeout=60,
migration='migration',
limits='limits',
timeout=1234)
@ -472,6 +499,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
migration='migration',
limits='limits')
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('5.3')])
mock_client.prepare.assert_called_with(server='dest', version='5.0',
call_monitor_timeout=mock.ANY,
timeout=mock.ANY)
@ -483,34 +512,73 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_drop_move_claim_at_destination(self):
self._test_compute_api('drop_move_claim_at_destination', 'call',
instance=self.fake_instance_obj, host='host',
version='5.3', _return_value=None)
version='6.0', _return_value=None)
def test_prep_resize(self):
self._test_compute_api('prep_resize', 'cast',
instance=self.fake_instance_obj,
instance_type=self.fake_flavor_obj,
flavor=self.fake_flavor_obj,
image='fake_image', host='host',
request_spec='fake_spec',
filter_properties={'fakeprop': 'fakeval'},
migration='migration',
node='node', clean_shutdown=True, host_list=None,
version='5.1')
version='6.0')
def test_prep_resize_old_compute(self):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = compute_rpcapi.ComputeAPI()
rpcapi.router.client = mock.Mock()
mock_client = mock.MagicMock()
rpcapi.router.client.return_value = mock_client
# So we expect that the messages is backported therefore the
# request_spec is dropped
mock_client.can_send_version.return_value = False
mock_cctx = mock.MagicMock()
mock_client.prepare.return_value = mock_cctx
request_spec = objects.RequestSpec()
rpcapi.prep_resize(
ctxt, instance=self.fake_instance_obj,
flavor=self.fake_flavor_obj,
image='fake_image', host='host',
request_spec=request_spec,
filter_properties={'fakeprop': 'fakeval'},
migration='migration',
node='node', clean_shutdown=True, host_list=None)
mock_client.can_send_version.assert_has_calls([
mock.call('6.0'), mock.call('5.1'),
])
mock_client.prepare.assert_called_with(
server='host', version='5.0')
# we should call with a dict for request_spec and an instance_type
# parameter instead of flavor
mock_cctx.cast.assert_called_with(
ctxt, 'prep_resize', instance=self.fake_instance_obj,
instance_type=self.fake_flavor_obj,
image='fake_image',
request_spec=request_spec.to_legacy_request_spec_dict(),
filter_properties={'fakeprop': 'fakeval'},
migration='migration',
node='node', clean_shutdown=True, host_list=None)
def test_prep_snapshot_based_resize_at_dest(self):
"""Tests happy path for prep_snapshot_based_resize_at_dest rpc call"""
expected_args = {'instance': self.fake_instance_obj,
'flavor': self.fake_flavor_obj,
'nodename': 'node',
'migration': migration_obj.Migration(),
'limits': {},
'destination': 'dest'}
self.flags(long_rpc_timeout=1234)
self._test_compute_api(
'prep_snapshot_based_resize_at_dest', 'call',
expected_args,
# compute method kwargs
instance=self.fake_instance_obj,
flavor=self.fake_flavor_obj,
nodename='node',
migration=migration_obj.Migration(),
limits={},
**expected_args,
request_spec=objects.RequestSpec(),
destination='dest',
# client.prepare kwargs
version='5.5', call_monitor_timeout=60, timeout=1234,
version='6.0', call_monitor_timeout=60, timeout=1234,
# assert the expected return value
_return_value=mock.sentinel.migration_context)
@ -535,6 +603,39 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
destination='dest')
self.assertIn('Compute too old', str(ex))
def test_prep_snapshot_based_resize_at_dest_6_0_non_compat(self):
"""Tests when the destination compute service is not compatible with
the 6.0 RPC API version.
"""
self.flags(long_rpc_timeout=1234)
rpcapi = compute_rpcapi.ComputeAPI()
rpcapi.router.client = mock.Mock()
mock_client = mock.MagicMock()
rpcapi.router.client.return_value = mock_client
mock_client.can_send_version.side_effect = [False, False, True]
mock_cctx = mock.MagicMock()
mock_client.prepare.return_value = mock_cctx
expected_args = {'instance': self.fake_instance_obj,
'flavor': self.fake_flavor_obj,
'nodename': 'node',
'migration': migration_obj.Migration(),
'limits': {},
'request_spec': objects.RequestSpec()}
rpcapi.prep_snapshot_based_resize_at_dest(
self.context,
**expected_args,
destination='dest')
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('6.0'),
mock.call('5.5')])
mock_client.prepare.assert_called_with(
server='dest', version='5.5',
call_monitor_timeout=60, timeout=1234)
mock_cctx.call.assert_called_with(
self.context, 'prep_snapshot_based_resize_at_dest',
**expected_args)
def test_prep_snapshot_based_resize_at_source(self):
"""Tests happy path for prep_snapshot_based_resize_at_source rpc call
"""
@ -546,7 +647,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
migration=migration_obj.Migration(),
snapshot_id=uuids.snapshot_id,
# client.prepare kwargs
version='5.6', call_monitor_timeout=60, timeout=1234)
version='6.0', call_monitor_timeout=60, timeout=1234)
@mock.patch('nova.rpc.ClientRouter.client')
def test_prep_snapshot_based_resize_at_source_old_compute(
@ -567,16 +668,19 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_finish_snapshot_based_resize_at_dest(self):
"""Tests happy path for finish_snapshot_based_resize_at_dest."""
expected_args = {'instance': self.fake_instance_obj,
'migration': migration_obj.Migration(
dest_compute='dest'),
'snapshot_id': uuids.snapshot_id}
self.flags(long_rpc_timeout=1234)
self._test_compute_api(
'finish_snapshot_based_resize_at_dest', 'call',
expected_args,
# compute method kwargs
instance=self.fake_instance_obj,
migration=migration_obj.Migration(dest_compute='dest'),
snapshot_id=uuids.snapshot_id,
**expected_args,
request_spec=objects.RequestSpec(),
# client.prepare kwargs
version='5.7', prepare_server='dest',
version='6.0', prepare_server='dest',
call_monitor_timeout=60, timeout=1234)
@mock.patch('nova.rpc.ClientRouter.client')
@ -596,6 +700,37 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
request_spec=objects.RequestSpec())
self.assertIn('Compute too old', str(ex))
def test_finish_snapshot_based_resize_at_dest_6_0_non_compat(self):
"""Tests when the destination compute service is not compatible with
the 6.0 RPC API version.
"""
self.flags(long_rpc_timeout=1234)
rpcapi = compute_rpcapi.ComputeAPI()
rpcapi.router.client = mock.Mock()
mock_client = mock.MagicMock()
rpcapi.router.client.return_value = mock_client
mock_client.can_send_version.side_effect = [False, False, True]
mock_cctx = mock.MagicMock()
mock_client.prepare.return_value = mock_cctx
expected_args = {'instance': self.fake_instance_obj,
'migration': migration_obj.Migration(
dest_compute='dest'),
'snapshot_id': uuids.snapshot_id,
'request_spec': objects.RequestSpec()}
rpcapi.finish_snapshot_based_resize_at_dest(
self.context,
**expected_args)
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('6.0'),
mock.call('5.7')])
mock_client.prepare.assert_called_with(
server='dest', version='5.7',
call_monitor_timeout=60, timeout=1234)
mock_cctx.call.assert_called_with(
self.context, 'finish_snapshot_based_resize_at_dest',
**expected_args)
def test_confirm_snapshot_based_resize_at_source(self):
"""Tests happy path for confirm_snapshot_based_resize_at_source."""
self.flags(long_rpc_timeout=1234)
@ -605,7 +740,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
instance=self.fake_instance_obj,
migration=migration_obj.Migration(source_compute='source'),
# client.prepare kwargs
version='5.8', prepare_server='source',
version='6.0', prepare_server='source',
call_monitor_timeout=60, timeout=1234)
@mock.patch('nova.rpc.ClientRouter.client')
@ -632,7 +767,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
instance=self.fake_instance_obj,
migration=migration_obj.Migration(dest_compute='dest'),
# client.prepare kwargs
version='5.9', prepare_server='dest',
version='6.0', prepare_server='dest',
call_monitor_timeout=60, timeout=1234)
@mock.patch('nova.rpc.ClientRouter.client')
@ -660,7 +795,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
instance=self.fake_instance_obj,
migration=migration_obj.Migration(source_compute='source'),
# client.prepare kwargs
version='5.10', prepare_server='source',
version='6.0', prepare_server='source',
call_monitor_timeout=60, timeout=1234)
@mock.patch('nova.rpc.ClientRouter.client')
@ -698,7 +833,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
bdms=[], instance=self.fake_instance_obj, host='new_host',
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, migration=None, node=None,
limits=None, request_spec=None, accel_uuids=[], version='5.12')
limits=None, request_spec=None, accel_uuids=[], version='6.0')
def test_rebuild_instance_old_rpcapi(self):
# With rpcapi < 5.12, accel_uuids must be dropped in the client call.
@ -730,7 +865,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311'],
node=None, host=None, **rebuild_args)
mock_client.can_send_version.assert_called_once_with('5.12')
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('5.12')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
mock_cctx.cast.assert_called_with( # No accel_uuids
@ -743,44 +879,32 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
self._test_compute_api('reserve_block_device_name', 'call',
instance=self.fake_instance_obj, device='device',
volume_id='id', disk_bus='ide', device_type='cdrom',
tag='foo', multiattach=True, version='5.0',
tag='foo', multiattach=True, version='6.0',
timeout=1234, call_monitor_timeout=60,
_return_value=objects_block_dev.BlockDeviceMapping())
# TODO(stephenfin): Remove this since it's nova-network only
def test_refresh_instance_security_rules(self):
expected_args = {'instance': self.fake_instance_obj}
self._test_compute_api('refresh_instance_security_rules', 'cast',
expected_args, host='fake_host',
instance=self.fake_instance_obj, version='5.0')
def test_remove_aggregate_host(self):
self._test_compute_api('remove_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
slave_info={})
def test_remove_fixed_ip_from_instance(self):
self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
instance=self.fake_instance_obj, address='addr',
version='5.0')
version='6.0')
def test_remove_volume_connection(self):
self._test_compute_api('remove_volume_connection', 'call',
instance=self.fake_instance_obj, volume_id='id', host='host',
version='5.0')
version='6.0')
def test_rescue_instance(self):
self._test_compute_api('rescue_instance', 'cast',
instance=self.fake_instance_obj, rescue_password='pw',
rescue_image_ref='fake_image_ref',
clean_shutdown=True, version='5.0')
clean_shutdown=True, version='6.0')
def test_resize_instance(self):
self._test_compute_api('resize_instance', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
image='image', instance_type=self.fake_flavor_obj,
image='image', flavor=self.fake_flavor_obj,
clean_shutdown=True, request_spec=self.fake_request_spec_obj,
version='5.2')
version='6.0')
def test_resize_instance_old_compute(self):
ctxt = context.RequestContext('fake_user', 'fake_project')
@ -796,10 +920,11 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
rpcapi.resize_instance(
ctxt, instance=self.fake_instance_obj,
migration=mock.sentinel.migration, image='image',
instance_type='instance_type', clean_shutdown=True,
flavor='instance_type', clean_shutdown=True,
request_spec=self.fake_request_spec_obj)
mock_client.can_send_version.assert_called_once_with('5.2')
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('5.2')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
mock_cctx.cast.assert_called_with(
@ -815,7 +940,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
self._test_compute_api('revert_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
host='host', request_spec=self.fake_request_spec_obj,
version='5.2')
version='6.0')
def test_revert_resize_old_compute(self):
ctxt = context.RequestContext('fake_user', 'fake_project')
@ -833,7 +958,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
migration=mock.sentinel.migration, host='host',
request_spec=self.fake_request_spec_obj)
mock_client.can_send_version.assert_called_once_with('5.2')
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('5.2')])
mock_client.prepare.assert_called_with(
server='host', version='5.0')
mock_cctx.cast.assert_called_with(
@ -843,7 +969,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_set_admin_password(self):
self._test_compute_api('set_admin_password', 'call',
instance=self.fake_instance_obj, new_pass='pw',
version='5.0')
version='6.0')
def test_set_host_enabled(self):
self.flags(long_rpc_timeout=600, rpc_response_timeout=120)
@ -870,12 +996,12 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_stop_instance_cast(self):
self._test_compute_api('stop_instance', 'cast',
instance=self.fake_instance_obj,
clean_shutdown=True, version='5.0')
clean_shutdown=True, version='6.0')
def test_stop_instance_call(self):
self._test_compute_api('stop_instance', 'call',
instance=self.fake_instance_obj,
clean_shutdown=True, version='5.0')
clean_shutdown=True, version='6.0')
def test_suspend_instance(self):
self._test_compute_api('suspend_instance', 'cast',
@ -884,7 +1010,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_terminate_instance(self):
self._test_compute_api('terminate_instance', 'cast',
instance=self.fake_instance_obj, bdms=[],
version='5.0')
version='6.0')
def test_unpause_instance(self):
self._test_compute_api('unpause_instance', 'cast',
@ -892,12 +1018,12 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_unrescue_instance(self):
self._test_compute_api('unrescue_instance', 'cast',
instance=self.fake_instance_obj, version='5.0')
instance=self.fake_instance_obj, version='6.0')
def test_shelve_instance(self):
self._test_compute_api('shelve_instance', 'cast',
instance=self.fake_instance_obj, image_id='image_id',
clean_shutdown=True, accel_uuids=None, version='5.13')
clean_shutdown=True, accel_uuids=None, version='6.0')
def test_shelve_instance_old_rpcapi(self):
# With rpcapi < 5.13, accel_uuids must be dropped in the client call.
@ -915,7 +1041,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
accel_uuids=[],
image_id='image_id', clean_shutdown=True)
mock_client.can_send_version.assert_called_once_with('5.13')
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('5.13')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
mock_cctx.cast.assert_called_with( # No accel_uuids
@ -926,7 +1053,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_shelve_offload_instance(self):
self._test_compute_api('shelve_offload_instance', 'cast',
instance=self.fake_instance_obj,
clean_shutdown=True, accel_uuids=None, version='5.13')
clean_shutdown=True, accel_uuids=None, version='6.0')
def test_shelve_offload_instance_old_rpcapi(self):
# With rpcapi < 5.13, accel_uuids must be dropped in the client call.
@ -944,7 +1071,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311'],
clean_shutdown=True,)
mock_client.can_send_version.assert_called_once_with('5.13')
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('5.13')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
mock_cctx.cast.assert_called_with( # No accel_uuids
@ -957,13 +1085,13 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
instance=self.fake_instance_obj, host='host', image='image',
filter_properties={'fakeprop': 'fakeval'}, node='node',
request_spec=self.fake_request_spec_obj, accel_uuids=None,
version='5.13')
version='6.0')
def test_cache_image(self):
self._test_compute_api('cache_images', 'call',
host='host', image_ids=['image'],
call_monitor_timeout=60, timeout=1800,
version='5.4')
version='6.0')
def test_cache_image_pinned(self):
ctxt = context.RequestContext('fake_user', 'fake_project')
@ -992,6 +1120,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
image='image')
mock_client.can_send_version.assert_has_calls([
mock.call('6.0'),
mock.call('5.13'),
mock.call('5.2'),
])
@ -1004,18 +1133,18 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_volume_snapshot_create(self):
self._test_compute_api('volume_snapshot_create', 'cast',
instance=self.fake_instance_obj, volume_id='fake_id',
create_info={}, version='5.0')
create_info={}, version='6.0')
def test_volume_snapshot_delete(self):
self._test_compute_api('volume_snapshot_delete', 'cast',
instance=self.fake_instance_obj, volume_id='fake_id',
snapshot_id='fake_id2', delete_info={}, version='5.0')
snapshot_id='fake_id2', delete_info={}, version='6.0')
def test_external_instance_event(self):
self._test_compute_api('external_instance_event', 'cast',
instances=[self.fake_instance_obj],
events=['event'],
version='5.0')
version='6.0')
def test_build_and_run_instance(self):
# With rpcapi 5.11, when a list of accel_uuids is passed as a param,
@ -1029,7 +1158,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
admin_password='passwd', injected_files=None,
requested_networks=['network1'], security_groups=None,
block_device_mapping=None, node='node', limits=[],
host_list=None, accel_uuids=accel_uuids, version='5.11')
host_list=None, accel_uuids=accel_uuids, version='6.0')
def test_build_and_run_instance_old_rpcapi(self):
# With rpcapi < 5.11, accel_uuids must be dropped in the client call.
@ -1049,7 +1178,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
filter_properties={},
accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311'])
mock_client.can_send_version.assert_called_once_with('5.11')
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('5.11')])
mock_client.prepare.assert_called_with(
server='host', version='5.0')
mock_cctx.cast.assert_called_with( # No accel_uuids
@ -1063,15 +1193,15 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_quiesce_instance(self):
self._test_compute_api('quiesce_instance', 'call',
instance=self.fake_instance_obj, version='5.0')
instance=self.fake_instance_obj, version='6.0')
def test_unquiesce_instance(self):
self._test_compute_api('unquiesce_instance', 'cast',
instance=self.fake_instance_obj, mapping=None, version='5.0')
instance=self.fake_instance_obj, mapping=None, version='6.0')
def test_trigger_crash_dump(self):
self._test_compute_api('trigger_crash_dump', 'cast',
instance=self.fake_instance_obj, version='5.0')
instance=self.fake_instance_obj, version='6.0')
@mock.patch('nova.compute.rpcapi.LOG')
@mock.patch('nova.objects.Service.get_minimum_version')

View File

@ -242,7 +242,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.stub_out('nova.objects.Instance.save', stub_instance_save)
self.compute.shelve_offload_instance(self.context, instance,
clean_shutdown=clean_shutdown)
clean_shutdown=clean_shutdown,
accel_uuids=[])
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='shelve_offload', phase='start',
@ -509,7 +510,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
mock_save.side_effect = check_save
self.compute.unshelve_instance(self.context, instance, image=None,
filter_properties=filter_properties, node=node,
request_spec=objects.RequestSpec())
request_spec=objects.RequestSpec(), accel_uuids=[])
mock_notify_instance_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
@ -604,7 +605,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.compute.unshelve_instance,
self.context, instance, image=None,
filter_properties=filter_properties, node=node,
request_spec=objects.RequestSpec())
request_spec=objects.RequestSpec(),
accel_uuids=[])
mock_notify_instance_action.assert_called_once_with(
self.context, instance, 'fake-mini', action='unshelve',
@ -638,7 +640,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.compute.unshelve_instance(
self.context, instance, image=None,
filter_properties={}, node='fake-node', request_spec=request_spec)
filter_properties={}, node='fake-node', request_spec=request_spec,
accel_uuids=[])
mock_update_pci.assert_called_once_with(
self.context, self.compute.reportclient, [],
@ -667,7 +670,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.assertRaises(
exception.UnexpectedResourceProviderNameForPCIRequest,
self.compute.unshelve_instance, self.context, instance, image=None,
filter_properties={}, node='fake-node', request_spec=request_spec)
filter_properties={}, node='fake-node', request_spec=request_spec,
accel_uuids=[])
mock_update_pci.assert_called_once_with(
self.context, self.compute.reportclient, [],
@ -724,7 +728,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
data = []
def fake_soi(context, instance, **kwargs):
def fake_soi(context, instance, accel_uuids, **kwargs):
data.append(instance.uuid)
with mock.patch.object(self.compute, 'shelve_offload_instance') as soi:
@ -753,7 +757,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
data = []
def fake_soi(context, instance, **kwargs):
def fake_soi(context, instance, accel_uuids, **kwargs):
data.append(instance.uuid)
with mock.patch.object(self.compute, 'shelve_offload_instance') as soi: