Remove unused function parameters

These were picked up with pylint:

  pylint -d all -e unused-argument nova/compute/manager.py

Note that this returns additional entries but we can't remove those
because they're either (a) part of the API or (b) actually used by
decorators. To summarize the changes:

- Remove 'context' argument from '_set_instance_obj_error_state'
- Remove 'context' argument from '_retry_reboot'
- Remove 'context' argument from '_get_power_state'
- Remove 'context' argument from '_check_instance_exists'
- Remove 'context' argument from '_update_instance_after_spawn'
- Remove 'dp_name' argument from '_get_bound_arq_resources'
- Remove 'context' argument from '_get_power_off_values'
- Remove 'context' argument from '_power_off_instance'

Change-Id: Ifc8fe3b8cc589d27e9d9f00122dffd7e6ed10b13
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2020-05-22 12:57:42 +01:00
parent 6ae11a0bcf
commit 69079f072d
4 changed files with 149 additions and 167 deletions

View File

@ -604,8 +604,7 @@ class ComputeManager(manager.Manager):
# therefore is not in an availability zone.
instance.availability_zone = None
def _set_instance_obj_error_state(self, context, instance,
clean_task_state=False):
def _set_instance_obj_error_state(self, instance, clean_task_state=False):
try:
instance.vm_state = vm_states.ERROR
if clean_task_state:
@ -941,8 +940,7 @@ class ComputeManager(manager.Manager):
# instance has already been scheduled to this particular host.
LOG.debug("Instance failed to spawn correctly, "
"setting to ERROR state", instance=instance)
self._set_instance_obj_error_state(
context, instance, clean_task_state=True)
self._set_instance_obj_error_state(instance, clean_task_state=True)
return
if (instance.vm_state in [vm_states.ACTIVE, vm_states.STOPPED] and
@ -953,8 +951,7 @@ class ComputeManager(manager.Manager):
# spawned so set to ERROR state. This is consistent to BUILD
LOG.debug("Instance failed to rebuild correctly, "
"setting to ERROR state", instance=instance)
self._set_instance_obj_error_state(
context, instance, clean_task_state=True)
self._set_instance_obj_error_state(instance, clean_task_state=True)
return
if (instance.vm_state != vm_states.ERROR and
@ -995,12 +992,12 @@ class ComputeManager(manager.Manager):
# we don't want that an exception blocks the init_host
LOG.exception('Failed to complete a deletion',
instance=instance)
self._set_instance_obj_error_state(context, instance)
self._set_instance_obj_error_state(instance)
return
current_power_state = self._get_power_state(context, instance)
try_reboot, reboot_type = self._retry_reboot(context, instance,
current_power_state)
current_power_state = self._get_power_state(instance)
try_reboot, reboot_type = self._retry_reboot(
instance, current_power_state)
if try_reboot:
LOG.debug("Instance in transitional state (%(task_state)s) at "
@ -1092,7 +1089,7 @@ class ComputeManager(manager.Manager):
LOG.exception('Virtual interface plugging failed for instance. '
'The port binding:host_id may need to be manually '
'updated.', instance=instance)
self._set_instance_obj_error_state(context, instance)
self._set_instance_obj_error_state(instance)
return
if instance.task_state == task_states.RESIZE_MIGRATING:
@ -1128,7 +1125,7 @@ class ComputeManager(manager.Manager):
self._reset_live_migration(context, instance)
db_state = instance.power_state
drv_state = self._get_power_state(context, instance)
drv_state = self._get_power_state(instance)
expect_running = (db_state == power_state.RUNNING and
drv_state != db_state)
@ -1157,9 +1154,9 @@ class ComputeManager(manager.Manager):
# instance to error and attempt to continue.
LOG.warning('Failed to resume instance',
instance=instance)
self._set_instance_obj_error_state(context, instance)
self._set_instance_obj_error_state(instance)
def _retry_reboot(self, context, instance, current_power_state):
def _retry_reboot(self, instance, current_power_state):
current_task_state = instance.task_state
retry_reboot = False
reboot_type = compute_utils.get_reboot_type(current_task_state,
@ -1222,7 +1219,7 @@ class ComputeManager(manager.Manager):
# Note(lpetrut): The event may be delayed, thus not reflecting
# the current instance power state. In that case, ignore the event.
current_power_state = self._get_power_state(context, instance)
current_power_state = self._get_power_state(instance)
if current_power_state == vm_power_state:
LOG.debug('Synchronizing instance power state after lifecycle '
'event "%(event)s"; current vm_state: %(vm_state)s, '
@ -1459,7 +1456,7 @@ class ComputeManager(manager.Manager):
"Instance spawn was interrupted before instance_claim, "
"setting instance to ERROR state", instance=instance)
self._set_instance_obj_error_state(
context, instance, clean_task_state=True)
instance, clean_task_state=True)
def cleanup_host(self):
self.driver.register_event_listener(None)
@ -1494,7 +1491,7 @@ class ComputeManager(manager.Manager):
self.update_available_resource(nova.context.get_admin_context(),
startup=True)
def _get_power_state(self, context, instance):
def _get_power_state(self, instance):
"""Retrieve the power state for the given instance."""
LOG.debug('Checking state', instance=instance)
try:
@ -1512,6 +1509,7 @@ class ComputeManager(manager.Manager):
# TODO(mdragon): perhaps make this variable by console_type?
return 'console.%s' % CONF.console_host
# TODO(stephenfin): Remove this once we bump the compute API to v6.0
@wrap_exception()
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@ -1648,11 +1646,11 @@ class ComputeManager(manager.Manager):
for instance in building_insts:
if timeutils.is_older_than(instance.created_at, timeout):
self._set_instance_obj_error_state(context, instance)
self._set_instance_obj_error_state(instance)
LOG.warning("Instance build timed out. Set to error "
"state.", instance=instance)
def _check_instance_exists(self, context, instance):
def _check_instance_exists(self, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance):
raise exception.InstanceExists(name=instance.name)
@ -1914,9 +1912,9 @@ class ComputeManager(manager.Manager):
# useful details which the standard InvalidBDM error message lacks.
raise exception.InvalidBDM(six.text_type(ex))
def _update_instance_after_spawn(self, context, instance,
def _update_instance_after_spawn(self, instance,
vm_state=vm_states.ACTIVE):
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
instance.vm_state = vm_state
instance.task_state = None
# NOTE(sean-k-mooney): configdrive.update_instance checks
@ -2170,7 +2168,7 @@ class ComputeManager(manager.Manager):
instance, e, sys.exc_info(),
fault_message=e.kwargs['reason'])
self._nil_out_instance_obj_host_and_node(instance)
self._set_instance_obj_error_state(context, instance,
self._set_instance_obj_error_state(instance,
clean_task_state=True)
return build_results.FAILED
LOG.debug(e.format_message(), instance=instance)
@ -2220,8 +2218,7 @@ class ComputeManager(manager.Manager):
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._nil_out_instance_obj_host_and_node(instance)
self._set_instance_obj_error_state(context, instance,
clean_task_state=True)
self._set_instance_obj_error_state(instance, clean_task_state=True)
return build_results.FAILED
@staticmethod
@ -2444,7 +2441,7 @@ class ComputeManager(manager.Manager):
instance.access_ip_v6 = ip['address']
break
self._update_instance_after_spawn(context, instance)
self._update_instance_after_spawn(instance)
try:
instance.save(expected_task_state=task_states.SPAWNING)
@ -2543,20 +2540,19 @@ class ComputeManager(manager.Manager):
reason=msg)
arqs = []
dp_name = instance.flavor.extra_specs.get('accel:device_profile')
try:
if dp_name:
if instance.flavor.extra_specs.get('accel:device_profile'):
try:
arqs = self._get_bound_arq_resources(
context, dp_name, instance, accel_uuids)
except (Exception, eventlet.timeout.Timeout) as exc:
LOG.exception(exc.format_message())
self._build_resources_cleanup(instance, network_info)
compute_utils.delete_arqs_if_needed(context, instance)
msg = _('Failure getting accelerator requests.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
resources['accel_info'] = arqs
context, instance, accel_uuids)
except (Exception, eventlet.timeout.Timeout) as exc:
LOG.exception(exc.format_message())
self._build_resources_cleanup(instance, network_info)
compute_utils.delete_arqs_if_needed(context, instance)
msg = _('Failure getting accelerator requests.')
raise exception.BuildAbortException(
reason=msg, instance_uuid=instance.uuid)
resources['accel_info'] = arqs
try:
yield resources
except Exception as exc:
@ -2590,7 +2586,7 @@ class ComputeManager(manager.Manager):
# Call Cyborg to delete accelerator requests
compute_utils.delete_arqs_if_needed(context, instance)
def _get_bound_arq_resources(self, context, dp_name, instance, arq_uuids):
def _get_bound_arq_resources(self, context, instance, arq_uuids):
"""Get bound accelerator requests.
The ARQ binding was kicked off in the conductor as an async
@ -2602,7 +2598,6 @@ class ComputeManager(manager.Manager):
[1] https://review.opendev.org/#/c/631244/46/nova/compute/
manager.py@2627
:param dp_name: Device profile name. Caller ensures this is valid.
:param instance: instance object
:param arq_uuids: List of accelerator request (ARQ) UUIDs.
:returns: List of ARQs for which bindings have completed,
@ -2727,9 +2722,9 @@ class ComputeManager(manager.Manager):
with excutils.save_and_reraise_exception():
LOG.error('Failed to deallocate network for instance. '
'Error: %s', ex, instance=instance)
self._set_instance_obj_error_state(context, instance)
self._set_instance_obj_error_state(instance)
def _get_power_off_values(self, context, instance, clean_shutdown):
def _get_power_off_values(self, instance, clean_shutdown):
"""Get the timing configuration for powering down this instance."""
if clean_shutdown:
timeout = compute_utils.get_value_from_system_metadata(instance,
@ -2742,10 +2737,10 @@ class ComputeManager(manager.Manager):
return timeout, retry_interval
def _power_off_instance(self, context, instance, clean_shutdown=True):
def _power_off_instance(self, instance, clean_shutdown=True):
"""Power off an instance on this host."""
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
timeout, retry_interval = self._get_power_off_values(
instance, clean_shutdown)
self.driver.power_off(instance, timeout, retry_interval)
def _shutdown_instance(self, context, instance,
@ -2990,7 +2985,7 @@ class ComputeManager(manager.Manager):
with excutils.save_and_reraise_exception():
LOG.exception('Setting instance vm_state to ERROR',
instance=instance)
self._set_instance_obj_error_state(context, instance)
self._set_instance_obj_error_state(instance)
do_terminate_instance(instance, bdms)
@ -3006,7 +3001,7 @@ class ComputeManager(manager.Manager):
@utils.synchronized(instance.uuid)
def do_stop_instance():
current_power_state = self._get_power_state(context, instance)
current_power_state = self._get_power_state(instance)
LOG.debug('Stopping instance; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, current VM '
@ -3038,8 +3033,8 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.POWER_OFF,
phase=fields.NotificationPhase.START)
self._power_off_instance(context, instance, clean_shutdown)
instance.power_state = self._get_power_state(context, instance)
self._power_off_instance(instance, clean_shutdown)
instance.power_state = self._get_power_state(instance)
instance.vm_state = vm_states.STOPPED
instance.task_state = None
instance.save(expected_task_state=expected_task_state)
@ -3090,7 +3085,7 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.POWER_ON,
phase=fields.NotificationPhase.START)
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
@ -3149,7 +3144,7 @@ class ComputeManager(manager.Manager):
# Fallback to just powering off the instance if the
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save(expected_task_state=[task_states.SOFT_DELETING])
@ -3170,7 +3165,7 @@ class ComputeManager(manager.Manager):
# Fallback to just powering on the instance if the hypervisor
# doesn't implement the restore method
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.RESTORING)
@ -3205,7 +3200,7 @@ class ComputeManager(manager.Manager):
if evacuate:
detach_block_devices(context, bdms)
else:
self._power_off_instance(context, instance, clean_shutdown=True)
self._power_off_instance(instance, clean_shutdown=True)
detach_block_devices(context, bdms)
self.driver.destroy(context, instance,
network_info=network_info,
@ -3454,7 +3449,7 @@ class ComputeManager(manager.Manager):
if not self.driver.capabilities.get("supports_evacuate", False):
raise exception.InstanceEvacuateNotSupported
self._check_instance_exists(context, instance)
self._check_instance_exists(instance)
if on_shared_storage is None:
LOG.debug('on_shared_storage is not provided, using driver '
@ -3514,7 +3509,7 @@ class ComputeManager(manager.Manager):
phase=fields.NotificationPhase.START,
bdms=bdms)
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
@ -3588,7 +3583,7 @@ class ComputeManager(manager.Manager):
# NOTE(rpodolyaka): driver doesn't provide specialized version
# of rebuild, fall back to the default implementation
self._rebuild_default_impl(**kwargs)
self._update_instance_after_spawn(context, instance)
self._update_instance_after_spawn(instance)
instance.save(expected_task_state=[task_states.REBUILD_SPAWNING])
if orig_vm_state == vm_states.STOPPED:
@ -3689,7 +3684,7 @@ class ComputeManager(manager.Manager):
bdms=bdms
)
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
instance.save(expected_task_state=expected_states)
if instance.power_state != power_state.RUNNING:
@ -3730,7 +3725,7 @@ class ComputeManager(manager.Manager):
exc_info = sys.exc_info()
# if the reboot failed but the VM is running don't
# put it into an error state
new_power_state = self._get_power_state(context, instance)
new_power_state = self._get_power_state(instance)
if new_power_state == power_state.RUNNING:
LOG.warning('Reboot failed but instance is running',
instance=instance)
@ -3748,10 +3743,10 @@ class ComputeManager(manager.Manager):
else:
LOG.error('Cannot reboot instance: %s', error,
instance=instance)
self._set_instance_obj_error_state(context, instance)
self._set_instance_obj_error_state(instance)
if not new_power_state:
new_power_state = self._get_power_state(context, instance)
new_power_state = self._get_power_state(instance)
try:
instance.power_state = new_power_state
instance.vm_state = new_vm_state
@ -3827,7 +3822,7 @@ class ComputeManager(manager.Manager):
expected_task_state):
context = context.elevated()
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
try:
instance.save()
@ -3979,7 +3974,7 @@ class ComputeManager(manager.Manager):
"""
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
current_power_state = self._get_power_state(instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
@ -4088,7 +4083,7 @@ class ComputeManager(manager.Manager):
phase=fields.NotificationPhase.START)
try:
self._power_off_instance(context, instance, clean_shutdown)
self._power_off_instance(instance, clean_shutdown)
self.driver.rescue(context, instance, network_info,
rescue_image_meta, admin_password,
@ -4096,7 +4091,7 @@ class ComputeManager(manager.Manager):
except Exception as e:
LOG.exception("Error trying to Rescue Instance",
instance=instance)
self._set_instance_obj_error_state(context, instance)
self._set_instance_obj_error_state(instance)
raise exception.InstanceNotRescuable(
instance_id=instance.uuid,
reason=_("Driver Error: %s") % e)
@ -4106,7 +4101,7 @@ class ComputeManager(manager.Manager):
instance.vm_state = vm_states.RESCUED
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESCUING)
@ -4138,7 +4133,7 @@ class ComputeManager(manager.Manager):
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
instance.save(expected_task_state=task_states.UNRESCUING)
self._notify_about_instance_usage(context,
@ -4441,7 +4436,7 @@ class ComputeManager(manager.Manager):
# Delete port bindings for the source host.
self._confirm_snapshot_based_resize_delete_port_bindings(
ctxt, instance, migration)
ctxt, instance)
# Delete volume attachments for the source host.
self._delete_volume_attachments(ctxt, instance.get_bdms())
@ -4456,13 +4451,12 @@ class ComputeManager(manager.Manager):
migration.save()
def _confirm_snapshot_based_resize_delete_port_bindings(
self, ctxt, instance, migration):
self, ctxt, instance):
"""Delete port bindings for the source host when confirming
snapshot-based resize on the source host."
:param ctxt: nova auth RequestContext
:param instance: Instance object that was resized/cold migrated
:param migration: Migration object for the resize/cold migrate
"""
LOG.debug('Deleting port bindings for source host.',
instance=instance)
@ -4728,8 +4722,7 @@ class ComputeManager(manager.Manager):
instance.drop_migration_context()
# If the original vm_state was STOPPED, set it back to STOPPED.
vm_state = vm_states.ACTIVE if power_on else vm_states.STOPPED
self._update_instance_after_spawn(
ctxt, instance, vm_state=vm_state)
self._update_instance_after_spawn(instance, vm_state=vm_state)
instance.save(expected_task_state=[task_states.RESIZE_REVERTING])
finally:
# Complete any volume attachments so the volumes are in-use. We
@ -5034,7 +5027,7 @@ class ComputeManager(manager.Manager):
filter_properties = {}
if not instance.host:
self._set_instance_obj_error_state(context, instance)
self._set_instance_obj_error_state(instance)
msg = _('Instance has no source host')
raise exception.MigrationError(reason=msg)
@ -5260,6 +5253,7 @@ class ComputeManager(manager.Manager):
# not re-scheduling
six.reraise(*exc_info)
# TODO(stephenfin): Remove unused request_spec parameter in API v6.0
@messaging.expected_exceptions(exception.MigrationPreCheckError)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@ -5411,11 +5405,11 @@ class ComputeManager(manager.Manager):
# potentially running in two places.
LOG.debug('Stopping instance', instance=instance)
try:
self._power_off_instance(ctxt, instance)
self._power_off_instance(instance)
except Exception as e:
LOG.exception('Failed to power off instance.', instance=instance)
raise exception.InstancePowerOffFailure(reason=six.text_type(e))
instance.power_state = self._get_power_state(ctxt, instance)
instance.power_state = self._get_power_state(instance)
# If a snapshot image ID was provided, we need to snapshot the guest
# disk image and upload it to the image service.
@ -5516,8 +5510,8 @@ class ComputeManager(manager.Manager):
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
timeout, retry_interval = self._get_power_off_values(
instance, clean_shutdown)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
@ -5817,6 +5811,7 @@ class ComputeManager(manager.Manager):
action=fields.NotificationAction.RESIZE_FINISH, phase=phase,
bdms=bdms)
# TODO(stephenfin): Remove unused request_spec parameter in API v6.0
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@ -5902,8 +5897,7 @@ class ComputeManager(manager.Manager):
migration.status = 'finished'
migration.save()
self._update_instance_after_spawn(
ctxt, instance, vm_state=vm_states.RESIZED)
self._update_instance_after_spawn(instance, vm_state=vm_states.RESIZED)
# Setting the host/node values will make the ResourceTracker continue
# to track usage for this instance on this host.
instance.host = migration.dest_compute
@ -6020,7 +6014,7 @@ class ComputeManager(manager.Manager):
network_info = self.network_api.add_fixed_ip_to_instance(context,
instance,
network_id)
self._inject_network_info(context, instance, network_info)
self._inject_network_info(instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
@ -6043,7 +6037,7 @@ class ComputeManager(manager.Manager):
network_info = self.network_api.remove_fixed_ip_from_instance(context,
instance,
address)
self._inject_network_info(context, instance, network_info)
self._inject_network_info(instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
@ -6066,7 +6060,7 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.PAUSE,
phase=fields.NotificationPhase.START)
self.driver.pause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
instance.vm_state = vm_states.PAUSED
instance.task_state = None
instance.save(expected_task_state=task_states.PAUSING)
@ -6088,7 +6082,7 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.UNPAUSE,
phase=fields.NotificationPhase.START)
self.driver.unpause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.UNPAUSING)
@ -6186,7 +6180,7 @@ class ComputeManager(manager.Manager):
@wrap_instance_fault
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
current_power_state = self._get_power_state(instance)
if current_power_state == power_state.RUNNING:
LOG.info("Retrieving diagnostics", instance=instance)
return self.driver.get_diagnostics(instance)
@ -6201,7 +6195,7 @@ class ComputeManager(manager.Manager):
@wrap_instance_fault
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
current_power_state = self._get_power_state(instance)
if current_power_state == power_state.RUNNING:
LOG.info("Retrieving diagnostics", instance=instance)
return self.driver.get_instance_diagnostics(instance)
@ -6229,7 +6223,7 @@ class ComputeManager(manager.Manager):
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.suspend(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
instance.vm_state = vm_states.SUSPENDED
instance.task_state = None
instance.save(expected_task_state=task_states.SUSPENDING)
@ -6265,7 +6259,7 @@ class ComputeManager(manager.Manager):
self.driver.resume(context, instance, network_info,
block_device_info)
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
# We default to the ACTIVE state for backwards compatibility
instance.vm_state = instance.system_metadata.pop('old_vm_state',
@ -6336,7 +6330,7 @@ class ComputeManager(manager.Manager):
# running.
if instance.power_state == power_state.PAUSED:
clean_shutdown = False
self._power_off_instance(context, instance, clean_shutdown)
self._power_off_instance(instance, clean_shutdown)
self.driver.snapshot(context, instance, image_id, update_task_state)
instance.system_metadata['shelved_at'] = timeutils.utcnow().isoformat()
@ -6346,7 +6340,7 @@ class ComputeManager(manager.Manager):
instance.task_state = None
if CONF.shelved_offload_time == 0:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.power_state = self._get_power_state(context, instance)
instance.power_state = self._get_power_state(instance)
instance.save(expected_task_state=[
task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING])
@ -6394,8 +6388,8 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.SHELVE_OFFLOAD,
phase=fields.NotificationPhase.START, bdms=bdms)
self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
self._power_off_instance(instance, clean_shutdown)
current_power_state = self._get_power_state(instance)
network_info = self.network_api.get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
@ -6562,7 +6556,7 @@ class ComputeManager(manager.Manager):
image['id'])
self._unshelve_instance_key_restore(instance, scrubbed_keys)
self._update_instance_after_spawn(context, instance)
self._update_instance_after_spawn(instance)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
@ -6573,6 +6567,7 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.UNSHELVE,
phase=fields.NotificationPhase.END, bdms=bdms)
# TODO(stephenfin): Remove this in RPC 6.0 since it's nova-network only
@messaging.expected_exceptions(NotImplementedError)
@wrap_instance_fault
def reset_network(self, context, instance):
@ -6580,20 +6575,19 @@ class ComputeManager(manager.Manager):
LOG.debug('Reset network', instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance, network_info):
def _inject_network_info(self, instance, network_info):
"""Inject network info for the given instance."""
LOG.debug('Inject network info', instance=instance)
LOG.debug('network_info to inject: |%s|', network_info,
instance=instance)
self.driver.inject_network_info(instance,
network_info)
self.driver.inject_network_info(instance, network_info)
@wrap_instance_fault
def inject_network_info(self, context, instance):
"""Inject network info, but don't return the info."""
network_info = self.network_api.get_instance_nw_info(context, instance)
self._inject_network_info(context, instance, network_info)
self._inject_network_info(instance, network_info)
@messaging.expected_exceptions(NotImplementedError,
exception.ConsoleNotAvailable,
@ -7536,6 +7530,7 @@ class ComputeManager(manager.Manager):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host)
# TODO(stephenfin): Remove the unused instance argument in RPC version 6.0
@wrap_exception()
def check_instance_shared_storage(self, ctxt, instance, data):
"""Check if the instance files are shared
@ -8051,7 +8046,7 @@ class ComputeManager(manager.Manager):
# first refresh instance as it may have got updated by
# post_live_migration_at_destination
instance.refresh()
self._set_instance_obj_error_state(context, instance,
self._set_instance_obj_error_state(instance,
clean_task_state=True)
@wrap_exception()
@ -8496,7 +8491,7 @@ class ComputeManager(manager.Manager):
'destination host.', instance=instance)
finally:
# Restore instance state and update host
current_power_state = self._get_power_state(context, instance)
current_power_state = self._get_power_state(instance)
node_name = None
prev_host = instance.host
try:
@ -9856,7 +9851,7 @@ class ComputeManager(manager.Manager):
instance_uuid=instance_uuid)
with excutils.save_and_reraise_exception():
# NOTE(mriedem): Why don't we pass clean_task_state=True here?
self._set_instance_obj_error_state(context, instance)
self._set_instance_obj_error_state(instance)
@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):

View File

@ -3021,7 +3021,7 @@ class ComputeTestCase(BaseTestCase,
mock.call(econtext, instance, 'fake-mini', action='reboot',
phase='start', bdms=bdms)]
ps_call_list = [mock.call(econtext, instance)]
ps_call_list = [mock.call(instance)]
db_call_list = [mock.call(econtext, instance['uuid'],
{'task_state': task_pending,
'expected_task_state': expected_tasks,
@ -3061,12 +3061,12 @@ class ComputeTestCase(BaseTestCase,
# Power state should be updated again
if not fail_reboot or fail_running:
new_power_state = fake_power_state2
ps_call_list.append(mock.call(econtext, instance))
ps_call_list.append(mock.call(instance))
mock_get_power.side_effect = chain(mock_get_power.side_effect,
[fake_power_state2])
else:
new_power_state = fake_power_state3
ps_call_list.append(mock.call(econtext, instance))
ps_call_list.append(mock.call(instance))
mock_get_power.side_effect = chain(mock_get_power.side_effect,
[fake_power_state3])
@ -5495,8 +5495,8 @@ class ComputeTestCase(BaseTestCase,
self.context, instance, bdms='fake_bdms')
mock_terminate_vol_conn.assert_called_once_with(self.context,
instance, 'fake_bdms')
mock_get_power_off_values.assert_called_once_with(self.context,
instance, clean_shutdown)
mock_get_power_off_values.assert_called_once_with(
instance, clean_shutdown)
self.assertEqual(migration.dest_compute, instance.host)
self.compute.terminate_instance(self.context, instance, [])
@ -8082,8 +8082,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.handle_events(event.LifecycleEvent(uuid, lifecycle_event))
mock_get.assert_called_once_with(mock.ANY,
test.ContainKeyValue('uuid', uuid))
mock_get.assert_called_once_with(test.ContainKeyValue('uuid', uuid))
if actual_state == vm_power_state:
mock_sync.assert_called_once_with(mock.ANY,
test.ContainKeyValue('uuid', uuid),

View File

@ -1190,7 +1190,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.patch.object(self.compute, '_set_instance_obj_error_state')
) as (get_admin_context, get_nw_info, plug_vifs, set_error_state):
self.compute._init_instance(self.context, instance)
set_error_state.assert_called_once_with(self.context, instance)
set_error_state.assert_called_once_with(instance)
def _test__validate_pinning_configuration(self, supports_pcpus=True):
instance_1 = fake_instance.fake_instance_obj(
@ -1288,9 +1288,10 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
with mock.patch.object(self.compute.driver,
'get_info',
side_effect=exception.InstanceNotFound(instance_id=1)):
self.assertEqual(self.compute._get_power_state(self.context,
instance),
power_state.NOSTATE)
self.assertEqual(
power_state.NOSTATE,
self.compute._get_power_state(instance),
)
def test__get_power_state_NotFound(self):
instance = fake_instance.fake_instance_obj(
@ -1301,7 +1302,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.NotFound()):
self.assertRaises(exception.NotFound,
self.compute._get_power_state,
self.context, instance)
instance)
@mock.patch.object(manager.ComputeManager, '_get_power_state')
@mock.patch.object(fake_driver.FakeDriver, 'plug_vifs')
@ -1327,13 +1328,13 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_get_inst.return_value = 'fake-bdm'
mock_resume.side_effect = test.TestingException
self.compute._init_instance('fake-context', instance)
mock_get_power.assert_has_calls([mock.call(mock.ANY, instance),
mock.call(mock.ANY, instance)])
mock_get_power.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_plug.assert_called_once_with(instance, mock.ANY)
mock_get_inst.assert_called_once_with(mock.ANY, instance)
mock_resume.assert_called_once_with(mock.ANY, instance, mock.ANY,
'fake-bdm')
mock_set_inst.assert_called_once_with(mock.ANY, instance)
mock_set_inst.assert_called_once_with(instance)
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@ -1467,8 +1468,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_get_by_uuid.return_value = instance
mock_delete_instance.side_effect = test.TestingException('test')
self.compute._init_instance(self.context, instance)
mock_set_instance_error_state.assert_called_once_with(
self.context, instance)
mock_set_instance_error_state.assert_called_once_with(instance)
def _test_init_instance_reverts_crashed_migrations(self,
old_vm_state=None):
@ -1512,8 +1512,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute._init_instance(self.context, instance)
mock_get_mig.assert_called_with(self.context, 42, instance.uuid)
mock_retry.assert_called_once_with(self.context, instance,
power_state.SHUTDOWN)
mock_retry.assert_called_once_with(instance, power_state.SHUTDOWN)
mock_get_nw.assert_called_once_with()
mock_plug.assert_called_once_with(instance, [])
mock_get_inst.assert_called_once_with(self.context, instance)
@ -1771,9 +1770,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
self.compute._power_off_instance(
self.context, instance,
clean_shutdown=True)
self.compute._power_off_instance(instance, clean_shutdown=True)
mock_power_off.assert_called_once_with(
instance,
CONF.shutdown_timeout,
@ -2015,7 +2012,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
with mock.patch.object(self.compute.driver, 'get_info') as mock_info:
mock_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN)
res = self.compute._get_power_state(self.context, instance)
res = self.compute._get_power_state(instance)
mock_info.assert_called_once_with(instance, use_cache=False)
self.assertEqual(res, power_state.SHUTDOWN)
@ -3933,7 +3930,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance.task_state = task_states.REBOOT_PENDING
instance.vm_state = vm_states.ACTIVE
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance, power_state.RUNNING)
instance, power_state.RUNNING)
self.assertTrue(allow_reboot)
self.assertEqual(reboot_type, 'SOFT')
@ -3943,7 +3940,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance.task_state = task_states.REBOOT_PENDING_HARD
instance.vm_state = vm_states.ACTIVE
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance, power_state.RUNNING)
instance, power_state.RUNNING)
self.assertTrue(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
@ -3952,7 +3949,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance.uuid = uuids.instance
instance.task_state = task_states.REBOOT_STARTED
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance, power_state.NOSTATE)
instance, power_state.NOSTATE)
self.assertTrue(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
@ -3961,7 +3958,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance.uuid = uuids.instance
instance.task_state = task_states.REBOOT_STARTED_HARD
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance, power_state.NOSTATE)
instance, power_state.NOSTATE)
self.assertTrue(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
@ -3970,7 +3967,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance.uuid = uuids.instance
instance.task_state = task_states.REBOOT_STARTED_HARD
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance, power_state.RUNNING)
instance, power_state.RUNNING)
self.assertFalse(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
@ -3979,7 +3976,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance.uuid = uuids.instance
instance.task_state = 'bar'
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance, power_state.RUNNING)
instance, power_state.RUNNING)
self.assertFalse(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
@ -4348,7 +4345,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
]
notify_instance_usage.assert_has_calls(notify_calls)
power_off_instance.assert_called_once_with(self.context, instance,
power_off_instance.assert_called_once_with(instance,
clean_shutdown)
driver_rescue.assert_called_once_with(
@ -4439,7 +4436,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
power_state_mock.assert_called_once_with(self.context, instance)
power_state_mock.assert_called_once_with(instance)
driver_mock.assert_called_once_with(instance, 'fake-pass')
instance_save_mock.assert_called_once_with(
expected_task_state=task_states.UPDATING_PASSWORD)
@ -4464,7 +4461,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, instance, None)
# make our assertions
power_state_mock.assert_called_once_with(self.context, instance)
power_state_mock.assert_called_once_with(instance)
instance_save_mock.assert_called_once_with(
expected_task_state=task_states.UPDATING_PASSWORD)
add_fault_mock.assert_called_once_with(
@ -4843,7 +4840,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
raise test.TestingException('test')
self.assertRaises(test.TestingException, do_test)
set_error.assert_called_once_with(self.context, instance)
set_error.assert_called_once_with(instance)
@mock.patch('nova.compute.manager.ComputeManager.'
'_detach_volume')
@ -4964,8 +4961,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.call(self.context, instance, 'fake-mini',
action='power_off', phase='end'),
])
power_off_mock.assert_called_once_with(
self.context, instance, True)
power_off_mock.assert_called_once_with(instance, True)
save_mock.assert_called_once_with(
expected_task_state=[task_states.POWERING_OFF, None])
self.assertEqual(power_state.SHUTDOWN, instance.power_state)
@ -5366,7 +5362,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, instance,
network_info=None, block_device_info=None)
mock_power_off.assert_called_once_with(
self.context, instance, clean_shutdown=True)
instance, clean_shutdown=True)
def test_do_rebuild_instance_check_trusted_certs(self):
"""Tests the scenario that we're rebuilding an instance with
@ -5551,7 +5547,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.BUILDING, task_state=task_states.SPAWNING)
with mock.patch.object(instance, 'save'):
self.compute._set_instance_obj_error_state(self.context, instance,
self.compute._set_instance_obj_error_state(instance,
clean_task_state=True)
self.assertEqual(vm_states.ERROR, instance.vm_state)
self.assertIsNone(instance.task_state)
@ -5560,7 +5556,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.BUILDING, task_state=task_states.SPAWNING)
with mock.patch.object(instance, 'save'):
self.compute._set_instance_obj_error_state(self.context, instance)
self.compute._set_instance_obj_error_state(instance)
self.assertEqual(vm_states.ERROR, instance.vm_state)
self.assertEqual(task_states.SPAWNING, instance.task_state)
@ -6079,8 +6075,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
resources = self._test_accel_build_resources(arq_uuids)
mock_get_arqs.assert_called_once_with(self.context,
dp_name, self.instance, arq_uuids)
mock_get_arqs.assert_called_once_with(
self.context, self.instance, arq_uuids)
self.assertEqual(sorted(resources['accel_info']), sorted(arq_list))
@mock.patch.object(virt_driver.ComputeDriver,
@ -6117,7 +6113,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_get_arqs.return_value = arq_list
ret_arqs = self.compute._get_bound_arq_resources(
self.context, dp_name, self.instance, arq_uuids)
self.context, self.instance, arq_uuids)
mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
@ -6147,7 +6143,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_get_arqs.side_effect = [arq_list, arq_list]
ret_arqs = self.compute._get_bound_arq_resources(
self.context, dp_name, self.instance, arq_uuids=None)
self.context, self.instance, arq_uuids=None)
mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
@ -6179,7 +6175,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_get_arqs.side_effect = [[], arq_list]
ret_arqs = self.compute._get_bound_arq_resources(
self.context, dp_name, self.instance, arq_uuids)
self.context, self.instance, arq_uuids)
mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
@ -6210,7 +6206,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.assertRaises(eventlet_timeout.Timeout,
self.compute._get_bound_arq_resources,
self.context, dp_name, self.instance, arq_uuids)
self.context, self.instance, arq_uuids)
mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
@ -6239,7 +6235,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.assertRaises(exception.AcceleratorRequestOpFailed,
self.compute._get_bound_arq_resources,
self.context, dp_name, self.instance, arq_uuids)
self.context, self.instance, arq_uuids)
mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
@ -6447,8 +6443,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_add.assert_called_once_with(self.context, self.instance,
mock.ANY, mock.ANY)
mock_nil.assert_called_once_with(self.instance)
mock_set.assert_called_once_with(self.context, self.instance,
clean_task_state=True)
mock_set.assert_called_once_with(self.instance, clean_task_state=True)
@mock.patch.object(objects.InstanceActionEvent,
'event_finish_with_failure')
@ -6728,8 +6723,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_add.assert_called_once_with(self.context, self.instance,
mock.ANY, mock.ANY, fault_message=mock.ANY)
mock_nil.assert_called_once_with(self.instance)
mock_set.assert_called_once_with(self.context, self.instance,
clean_task_state=True)
mock_set.assert_called_once_with(self.instance, clean_task_state=True)
@mock.patch.object(objects.InstanceActionEvent,
'event_finish_with_failure')
@ -6865,8 +6859,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
if set_error:
mock_add.assert_called_once_with(self.context, self.instance,
mock.ANY, mock.ANY)
mock_set.assert_called_once_with(self.context,
self.instance, clean_task_state=True)
mock_set.assert_called_once_with(
self.instance, clean_task_state=True)
mock_build_run.assert_called_once_with(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
@ -9455,8 +9449,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
)
get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
_get_power_state.assert_called_once_with(self.context,
self.instance)
_get_power_state.assert_called_once_with(self.instance)
_get_compute_info.assert_called_once_with(self.context,
self.compute.host)
rt_mock.allocate_pci_devices_for_instance.assert_called_once_with(
@ -10809,8 +10802,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
mock.call(self.context, self.instance, get_bdms.return_value,
get_instance_nw_info.return_value,
fields.NotificationPhase.END)])
_power_off_instance.assert_called_once_with(
self.context, self.instance)
_power_off_instance.assert_called_once_with(self.instance)
self.assertEqual(power_state.SHUTDOWN, self.instance.power_state)
if snapshot_id is None:
_snapshot_for_resize.assert_not_called()
@ -10862,8 +10854,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
exception.InstancePowerOffFailure,
self.compute._prep_snapshot_based_resize_at_source,
self.context, self.instance, self.migration)
_power_off_instance.assert_called_once_with(
self.context, self.instance)
_power_off_instance.assert_called_once_with(self.instance)
@mock.patch('nova.objects.Instance.get_bdms',
return_value=objects.BlockDeviceMappingList())
@ -11388,7 +11379,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
block_device_info=None, destroy_disks=True, destroy_vifs=False)
# Ports and volumes were cleaned up.
mock_delete_bindings.assert_called_once_with(
self.context, self.instance, self.migration)
self.context, self.instance)
mock_delete_vols.assert_called_once_with(
self.context, mock_get_bdms.return_value)
# Move claim and migration context were dropped.
@ -11410,7 +11401,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.compute.network_api,
'cleanup_instance_network_on_host') as cleanup_networks:
self.compute._confirm_snapshot_based_resize_delete_port_bindings(
self.context, self.instance, self.migration)
self.context, self.instance)
cleanup_networks.assert_called_once_with(
self.context, self.instance, self.compute.host)
@ -11425,7 +11416,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
port_id=uuids.port_id, host=self.compute.host)
) as cleanup_networks:
self.compute._confirm_snapshot_based_resize_delete_port_bindings(
self.context, self.instance, self.migration)
self.context, self.instance)
cleanup_networks.assert_called_once_with(
self.context, self.instance, self.compute.host)
self.assertIn('Failed to delete port bindings from source host',
@ -11438,7 +11429,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
side_effect=test.TestingException('neutron down')
) as cleanup_networks:
self.assertRaises(test.TestingException, func,
self.context, self.instance, self.migration)
self.context, self.instance)
cleanup_networks.assert_called_once_with(
self.context, self.instance, self.compute.host)
@ -11746,7 +11737,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
# Assert final DB cleanup for the instance.
mock_drop_mig_context.assert_called_once_with()
mock_update_after_spawn.assert_called_once_with(
self.context, self.instance, vm_state=vm_states.STOPPED)
self.instance, vm_state=vm_states.STOPPED)
mock_inst_save.assert_has_calls([
mock.call(expected_task_state=[task_states.RESIZE_REVERTING])] * 2)
# And finally that the volume attachments were completed.

View File

@ -139,8 +139,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
mock.call(self.context, instance, 'shelve.start'),
mock.call(self.context, instance, 'shelve.end')]
mock_power_off_call_list = []
mock_get_power_state_call_list = [
mock.call(self.context, instance)]
mock_get_power_state_call_list = [mock.call(instance)]
if clean_shutdown:
if guest_power_state == power_state.PAUSED:
@ -157,8 +156,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
mock.call(self.context, instance, 'shelve_offload.start'),
mock.call(self.context, instance, 'shelve_offload.end')])
mock_power_off_call_list.append(mock.call(instance, 0, 0))
mock_get_power_state_call_list.append(mock.call(self.context,
instance))
mock_get_power_state_call_list.append(mock.call(instance))
mock_notify_instance_usage.assert_has_calls(
mock_notify_instance_usage_call_list)
@ -252,8 +250,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
# instance.host is replaced with host because
# original instance.host is clear after
# ComputeManager.shelve_offload_instance execute
mock_get_power_state.assert_called_once_with(
self.context, instance)
mock_get_power_state.assert_called_once_with(instance)
mock_update_resource_tracker.assert_called_once_with(self.context,
instance)
mock_delete_alloc.assert_called_once_with(self.context, instance)
@ -369,7 +366,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
block_device_info='fake_bdm')
self.mock_get_allocations.assert_called_once_with(self.context,
instance.uuid)
mock_get_power_state.assert_called_once_with(self.context, instance)
mock_get_power_state.assert_called_once_with(instance)
self.assertNotIn('shelved_at', instance.system_metadata)
self.assertNotIn('shelved_image_id', instance.system_metadata)
@ -472,7 +469,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
allocations={}, network_info=[], block_device_info='fake_bdm')
self.mock_get_allocations.assert_called_once_with(self.context,
instance.uuid)
mock_get_power_state.assert_called_once_with(self.context, instance)
mock_get_power_state.assert_called_once_with(instance)
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')