diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 89bbed9c5c84..c815d40cc041 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -328,7 +328,7 @@ def delete_image_on_error(function): return decorated_function -# TODO(danms): Remove me after havana +# TODO(danms): Remove me after Icehouse def object_compat(function): """Wraps a method that expects a new-world instance @@ -419,7 +419,7 @@ class ComputeVirtAPI(virtapi.VirtAPI): class ComputeManager(manager.SchedulerDependentManager): """Manages the running instances from creation to destruction.""" - RPC_API_VERSION = '2.48' + RPC_API_VERSION = '3.0' def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" @@ -451,12 +451,6 @@ class ComputeManager(manager.SchedulerDependentManager): self.use_legacy_block_device_info = \ self.driver.need_legacy_block_device_info - def create_rpc_dispatcher(self, backdoor_port=None, additional_apis=None): - additional_apis = additional_apis or [] - additional_apis.append(_ComputeV3Proxy(self)) - return super(ComputeManager, self).create_rpc_dispatcher( - backdoor_port, additional_apis) - def _get_resource_tracker(self, nodename): rt = self._resource_tracker_dict.get(nodename) if not rt: @@ -804,12 +798,6 @@ class ComputeManager(manager.SchedulerDependentManager): except exception.NotFound: return power_state.NOSTATE - # NOTE(russellb) This method can be removed in 3.0 of this API. It is - # deprecated in favor of the method in the base API. - def get_backdoor_port(self, context): - """Return backdoor port for eventlet_backdoor.""" - return self.backdoor_port - def get_console_topic(self, context): """Retrieves the console host for a project on this host. @@ -1792,10 +1780,10 @@ class ComputeManager(manager.SchedulerDependentManager): @reverts_task_state @wrap_instance_event @wrap_instance_fault - def run_instance(self, context, instance, request_spec=None, - filter_properties=None, requested_networks=None, - injected_files=None, admin_password=None, - is_first_time=False, node=None, legacy_bdm_in_spec=True): + def run_instance(self, context, instance, request_spec, + filter_properties, requested_networks, + injected_files, admin_password, + is_first_time, node, legacy_bdm_in_spec): if filter_properties is None: filter_properties = {} @@ -1963,17 +1951,12 @@ class ComputeManager(manager.SchedulerDependentManager): quotas, system_meta) - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault - def terminate_instance(self, context, instance, bdms=None, - reservations=None): + def terminate_instance(self, context, instance, bdms, reservations): """Terminate an instance on this host.""" - # NOTE(danms): remove this compatibility in the future - if not bdms: - bdms = self._get_instance_volume_bdms(context, instance) @utils.synchronized(instance['uuid']) def do_terminate_instance(instance, bdms): @@ -1992,7 +1975,6 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(johannes): This is probably better named power_off_instance # so it matches the driver method, but because of other issues, we # can't use that name in grizzly. - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @@ -2019,7 +2001,6 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(johannes): This is probably better named power_on_instance # so it matches the driver method, but because of other issues, we # can't use that name in grizzly. - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @@ -2035,12 +2016,11 @@ class ComputeManager(manager.SchedulerDependentManager): instance.save(expected_task_state=task_states.POWERING_ON) self._notify_about_instance_usage(context, instance, "power_on.end") - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault - def soft_delete_instance(self, context, instance, reservations=None): + def soft_delete_instance(self, context, instance, reservations): """Soft delete an instance on this host.""" if context.is_admin and context.project_id != instance['project_id']: @@ -2102,8 +2082,8 @@ class ComputeManager(manager.SchedulerDependentManager): @wrap_instance_event @wrap_instance_fault def rebuild_instance(self, context, instance, orig_image_ref, image_ref, - injected_files, new_pass, orig_sys_metadata=None, - bdms=None, recreate=False, on_shared_storage=False): + injected_files, new_pass, orig_sys_metadata, + bdms, recreate, on_shared_storage): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and @@ -2247,7 +2227,9 @@ class ComputeManager(manager.SchedulerDependentManager): vm_state=vm_states.ACTIVE, task_state=task_states.POWERING_OFF, progress=0) - self.stop_instance(context, instance=instance) + inst_obj = instance_obj.Instance.get_by_uuid(context, + instance['uuid']) + self.stop_instance(context, inst_obj) self._notify_about_instance_usage( context, instance, "rebuild.end", @@ -2280,15 +2262,12 @@ class ComputeManager(manager.SchedulerDependentManager): # Manager-detach self.detach_volume(context, volume_id, instance) - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault - def reboot_instance(self, context, instance, - block_device_info=None, - network_info=None, - reboot_type="SOFT"): + def reboot_instance(self, context, instance, block_device_info, + reboot_type): """Reboot an instance on this host.""" context = context.elevated() LOG.audit(_("Rebooting instance"), context=context, instance=instance) @@ -2381,39 +2360,17 @@ class ComputeManager(manager.SchedulerDependentManager): task_states.IMAGE_BACKUP) self._rotate_backups(context, instance, backup_type, rotation) - # FIXME(comstud): Remove 'image_type', 'backup_type', and 'rotation' - # on next major RPC version bump. - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault @delete_image_on_error - def snapshot_instance(self, context, image_id, instance, - image_type=None, backup_type=None, - rotation=None): + def snapshot_instance(self, context, image_id, instance): """Snapshot an instance on this host. :param context: security context :param instance: an Instance dict :param image_id: glance.db.sqlalchemy.models.Image.Id - The following params are for RPC versions prior to 2.39 where - this method also handled backups: - :param image_type: snapshot | backup - :param backup_type: daily | weekly - :param rotation: int representing how many backups to keep around; - None if rotation shouldn't be used (as in the case of snapshots) """ - if image_type is not None: - # Old RPC version - if image_type == 'backup': - if rotation < 0: - raise exception.RotationRequiredForBackup() - self._snapshot_instance(context, image_id, instance, - task_states.IMAGE_BACKUP) - self._rotate_backups(context, instance, backup_type, rotation) - return - if rotation: - raise exception.ImageRotationNotAllowed() self._snapshot_instance(context, image_id, instance, task_states.IMAGE_SNAPSHOT) @@ -2518,7 +2475,7 @@ class ComputeManager(manager.SchedulerDependentManager): @reverts_task_state @wrap_instance_event @wrap_instance_fault - def set_admin_password(self, context, instance, new_pass=None): + def set_admin_password(self, context, instance, new_pass): """Set the root/admin password for an instance on this host. This is generally only called by API password resets after an @@ -2626,7 +2583,7 @@ class ComputeManager(manager.SchedulerDependentManager): @wrap_exception() @reverts_task_state @wrap_instance_event - def rescue_instance(self, context, instance, rescue_password=None): + def rescue_instance(self, context, instance, rescue_password): """ Rescue an instance on this host. :param rescue_password: password to set on rescue instance @@ -2731,12 +2688,10 @@ class ComputeManager(manager.SchedulerDependentManager): return sys_meta, instance_type - @object_compat @wrap_exception() @wrap_instance_event @wrap_instance_fault - def confirm_resize(self, context, instance, reservations=None, - migration=None, migration_id=None): + def confirm_resize(self, context, instance, reservations, migration): @utils.synchronized(instance['uuid']) def do_confirm_resize(context, instance, migration_id): @@ -2745,6 +2700,8 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.debug(_("Going to confirm migration %s") % migration_id, context=context, instance=instance) try: + # TODO(russellb) Why are we sending the migration object just + # to turn around and look it up from the db again? migration = migration_obj.Migration.get_by_id( context.elevated(), migration_id) except exception.MigrationNotFound: @@ -2777,8 +2734,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._confirm_resize(context, instance, reservations=reservations, migration=migration) - migration_id = migration_id if migration_id else migration.id - do_confirm_resize(context, instance, migration_id) + do_confirm_resize(context, instance, migration.id) def _confirm_resize(self, context, instance, reservations=None, migration=None): @@ -2836,23 +2792,17 @@ class ComputeManager(manager.SchedulerDependentManager): self._quota_commit(context, reservations) - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault - def revert_resize(self, context, instance, migration=None, - migration_id=None, reservations=None): + def revert_resize(self, context, instance, migration, reservations): """Destroys the new instance on the destination machine. Reverts the model changes, and powers on the old instance on the source machine. """ - if not migration: - migration = migration_obj.Migration.get_by_id( - context.elevated(), migration_id) - # NOTE(comstud): A revert_resize is essentially a resize back to # the old size, so we need to send a usage event here. self.conductor_api.notify_usage_exists( @@ -2889,23 +2839,17 @@ class ComputeManager(manager.SchedulerDependentManager): migration, migration.source_compute, reservations=reservations) - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault - def finish_revert_resize(self, context, instance, reservations=None, - migration=None, migration_id=None): + def finish_revert_resize(self, context, instance, reservations, migration): """Finishes the second half of reverting a resize. Bring the original source instance state back (active/shutoff) and revert the resized attributes in the database. """ - if not migration: - migration = migration_obj.Migration.get_by_id( - context.elevated(), migration_id) - with self._error_out_instance_on_exception(context, instance.uuid, reservations): network_info = self._get_instance_nw_info(context, instance) @@ -3018,14 +2962,12 @@ class ComputeManager(manager.SchedulerDependentManager): self.compute_rpcapi.resize_instance(context, instance, claim.migration, image, instance_type, reservations) - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def prep_resize(self, context, image, instance, instance_type, - reservations=None, request_spec=None, - filter_properties=None, node=None): + reservations, request_spec, filter_properties, node): """Initiates the process of moving a running instance to another host. Possibly changes the RAM and disk size in the process. @@ -3102,18 +3044,13 @@ class ComputeManager(manager.SchedulerDependentManager): # not re-scheduling raise exc_info[0], exc_info[1], exc_info[2] - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def resize_instance(self, context, instance, image, - reservations=None, migration=None, migration_id=None, - instance_type=None): + reservations, migration, instance_type): """Starts the migration of a running instance to another host.""" - if not migration: - migration = migration_obj.Migration.get_by_id( - context.elevated(), migration_id) with self._error_out_instance_on_exception(context, instance.uuid, reservations): if not instance_type: @@ -3241,22 +3178,18 @@ class ComputeManager(manager.SchedulerDependentManager): context, instance, "finish_resize.end", network_info=network_info) - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def finish_resize(self, context, disk_info, image, instance, - reservations=None, migration=None, migration_id=None): + reservations, migration): """Completes the migration process. Sets up the newly transferred disk and turns on the instance at its new host machine. """ - if not migration: - migration = migration_obj.Migration.get_by_id( - context.elevated(), migration_id) try: self._finish_resize(context, instance, migration, disk_info, image) @@ -3325,7 +3258,6 @@ class ComputeManager(manager.SchedulerDependentManager): self._notify_about_instance_usage( context, instance, "delete_ip.end", network_info=network_info) - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @@ -3343,7 +3275,6 @@ class ComputeManager(manager.SchedulerDependentManager): instance.save(expected_task_state=task_states.PAUSING) self._notify_about_instance_usage(context, instance, 'pause.end') - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @@ -3362,9 +3293,10 @@ class ComputeManager(manager.SchedulerDependentManager): self._notify_about_instance_usage(context, instance, 'unpause.end') @wrap_exception() - def host_power_action(self, context, host=None, action=None): + def host_power_action(self, context, action): """Reboots, shuts down or powers up the host.""" - return self.driver.host_power_action(host, action) + # TODO(russellb) Remove the unused host parameter from the driver API + return self.driver.host_power_action(None, action) @wrap_exception() def host_maintenance_mode(self, context, host, mode): @@ -3374,9 +3306,10 @@ class ComputeManager(manager.SchedulerDependentManager): return self.driver.host_maintenance_mode(host, mode) @wrap_exception() - def set_host_enabled(self, context, host=None, enabled=None): + def set_host_enabled(self, context, enabled): """Sets the specified host's ability to accept new instances.""" - return self.driver.set_host_enabled(host, enabled) + # TODO(russellb) Remove the unused host parameter from the driver API + return self.driver.set_host_enabled(None, enabled) @wrap_exception() def get_host_uptime(self, context): @@ -3393,7 +3326,6 @@ class ComputeManager(manager.SchedulerDependentManager): instance=instance) return self.driver.get_diagnostics(instance) - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @@ -3412,7 +3344,6 @@ class ComputeManager(manager.SchedulerDependentManager): instance.save(expected_task_state=task_states.SUSPENDING) self._notify_about_instance_usage(context, instance, 'suspend') - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @@ -3528,7 +3459,7 @@ class ComputeManager(manager.SchedulerDependentManager): @reverts_task_state @wrap_instance_event @wrap_instance_fault - def unshelve_instance(self, context, instance, image=None): + def unshelve_instance(self, context, instance, image): """Unshelve the instance. :param context: request context @@ -3588,7 +3519,6 @@ class ComputeManager(manager.SchedulerDependentManager): instance.save(expected_task_state=task_states.SPAWNING) self._notify_about_instance_usage(context, instance, 'unshelve.end') - @object_compat @reverts_task_state @wrap_instance_fault def reset_network(self, context, instance): @@ -3608,7 +3538,6 @@ class ComputeManager(manager.SchedulerDependentManager): network_info) return network_info - @object_compat @wrap_instance_fault def inject_network_info(self, context, instance): """Inject network info, but don't return the info.""" @@ -3616,7 +3545,7 @@ class ComputeManager(manager.SchedulerDependentManager): @wrap_exception() @wrap_instance_fault - def get_console_output(self, context, instance, tail_length=None): + def get_console_output(self, context, instance, tail_length): """Send the console output for the given instance.""" context = context.elevated() LOG.audit(_("Get console output"), context=context, @@ -3724,7 +3653,7 @@ class ComputeManager(manager.SchedulerDependentManager): @reverts_task_state @wrap_instance_fault def reserve_block_device_name(self, context, instance, device, - volume_id=None): + volume_id): @utils.synchronized(instance['uuid']) def do_reserve(): @@ -3986,7 +3915,7 @@ class ComputeManager(manager.SchedulerDependentManager): pass def attach_interface(self, context, instance, network_id, port_id, - requested_ip=None): + requested_ip): """Use hotplug to add an network adapter to an instance.""" network_info = self.network_api.allocate_port_for_instance( context, instance, port_id, network_id, requested_ip) @@ -4041,11 +3970,9 @@ class ComputeManager(manager.SchedulerDependentManager): """ return self.driver.check_instance_shared_storage_remote(ctxt, data) - @object_compat @wrap_exception() def check_can_live_migrate_destination(self, ctxt, instance, - block_migration=False, - disk_over_commit=False): + block_migration, disk_over_commit): """Check if it is possible to execute live migration. This runs checks on the destination host, and then calls @@ -4074,7 +4001,6 @@ class ComputeManager(manager.SchedulerDependentManager): migrate_data.update(dest_check_data['migrate_data']) return migrate_data - @object_compat @wrap_exception() def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): """Check if it is possible to execute live migration. @@ -4100,9 +4026,8 @@ class ComputeManager(manager.SchedulerDependentManager): dest_check_data) @wrap_exception() - def pre_live_migration(self, context, instance, - block_migration=False, disk=None, - migrate_data=None): + def pre_live_migration(self, context, instance, block_migration, disk, + migrate_data): """Preparations for live migration at dest host. :param context: security context @@ -4147,8 +4072,8 @@ class ComputeManager(manager.SchedulerDependentManager): return pre_live_migration_data @wrap_exception() - def live_migration(self, context, dest, instance, - block_migration=False, migrate_data=None): + def live_migration(self, context, dest, instance, block_migration, + migrate_data): """Executing live migration. :param context: security context @@ -4287,7 +4212,7 @@ class ComputeManager(manager.SchedulerDependentManager): @wrap_exception() def post_live_migration_at_destination(self, context, instance, - block_migration=False): + block_migration): """Post operations for live migration . :param context: security context @@ -5123,12 +5048,8 @@ class ComputeManager(manager.SchedulerDependentManager): @aggregate_object_compat @wrap_exception() - def add_aggregate_host(self, context, host, slave_info=None, - aggregate=None, aggregate_id=None): + def add_aggregate_host(self, context, aggregate, host, slave_info): """Notify hypervisor of change (for hypervisor pools).""" - if not aggregate: - aggregate_obj.Aggregate.get_by_id(context, aggregate_id) - try: self.driver.add_to_aggregate(context, aggregate, host, slave_info=slave_info) @@ -5144,12 +5065,8 @@ class ComputeManager(manager.SchedulerDependentManager): @aggregate_object_compat @wrap_exception() - def remove_aggregate_host(self, context, host, slave_info=None, - aggregate=None, aggregate_id=None): + def remove_aggregate_host(self, context, host, slave_info, aggregate): """Removes a host from a physical hypervisor pool.""" - if not aggregate: - aggregate_obj.Aggregate.get_by_id(context, aggregate_id) - try: self.driver.remove_from_aggregate(context, aggregate, host, slave_info=slave_info) @@ -5224,267 +5141,3 @@ class ComputeManager(manager.SchedulerDependentManager): instance.cleaned = True with utils.temporary_mutation(context, read_deleted='yes'): instance.save(context) - - -class _ComputeV3Proxy(object): - - RPC_API_VERSION = '3.0' - - def __init__(self, manager): - self.manager = manager - - def add_aggregate_host(self, ctxt, aggregate, host, slave_info): - return self.manager.add_aggregate_host(ctxt, aggregate=aggregate, - host=host, slave_info=slave_info) - - def add_fixed_ip_to_instance(self, ctxt, network_id, instance): - return self.manager.add_fixed_ip_to_instance(ctxt, network_id, - instance) - - def attach_interface(self, ctxt, instance, network_id, port_id, - requested_ip): - return self.manager.attach_interface(ctxt, instance, network_id, - port_id, requested_ip) - - def attach_volume(self, ctxt, volume_id, mountpoint, instance): - return self.manager.attach_volume(ctxt, volume_id, mountpoint, - instance) - - def change_instance_metadata(self, ctxt, diff, instance): - return self.manager.change_instance_metadata(ctxt, diff, instance) - - def check_can_live_migrate_destination(self, ctxt, - instance, block_migration, disk_over_commit): - return self.manager.check_can_live_migrate_destination(ctxt, instance, - block_migration, disk_over_commit) - - def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): - return self.manager.check_can_live_migrate_source(ctxt, instance, - dest_check_data) - - def check_instance_shared_storage(self, ctxt, instance, data): - return self.manager.check_instance_shared_storage(ctxt, instance, data) - - def confirm_resize(self, ctxt, instance, reservations, migration): - return self.manager.confirm_resize(ctxt, instance=instance, - reservations=reservations, migration=migration) - - def detach_interface(self, ctxt, instance, port_id): - return self.manager.detach_interface(ctxt, instance, port_id) - - def detach_volume(self, ctxt, volume_id, instance): - return self.manager.detach_volume(ctxt, volume_id, instance) - - def finish_resize(self, ctxt, instance, migration, image, disk_info, - reservations): - return self.manager.finish_resize(ctxt, instance=instance, - migration=migration, image=image, - disk_info=disk_info, reservations=reservations) - - def finish_revert_resize(self, ctxt, instance, migration, reservations): - return self.manager.finish_revert_resize(ctxt, instance=instance, - migration=migration, reservations=reservations) - - def get_console_output(self, ctxt, instance, tail_length): - return self.manager.get_console_output(ctxt, instance, tail_length) - - def get_console_pool_info(self, ctxt, console_type): - return self.manager.get_console_pool_info(ctxt, console_type) - - def get_console_topic(self, ctxt): - return self.manager.get_console_topic(ctxt) - - def get_diagnostics(self, ctxt, instance): - return self.manager.get_diagnostics(ctxt, instance) - - def get_vnc_console(self, ctxt, console_type, instance): - return self.manager.get_vnc_console(ctxt, console_type, instance) - - def get_spice_console(self, ctxt, console_type, instance): - return self.manager.get_spice_console(ctxt, console_type, instance) - - def validate_console_port(self, ctxt, instance, port, console_type): - return self.manager.validate_console_port(ctxt, instance, port, - console_type) - - def host_maintenance_mode(self, ctxt, host, mode): - return self.manager.host_maintenance_mode(ctxt, host, mode) - - def host_power_action(self, ctxt, action): - return self.manager.host_power_action(ctxt, None, action) - - def inject_file(self, ctxt, instance, path, file_contents): - return self.manager.inject_file(ctxt, instance, path, file_contents) - - def inject_network_info(self, ctxt, instance): - return self.manager.inject_network_info(ctxt, instance) - - def live_migration(self, ctxt, instance, dest, block_migration, - migrate_data): - return self.manager.live_migration(ctxt, instance, dest, - block_migration, migrate_data) - - def pause_instance(self, ctxt, instance): - return self.manager.pause_instance(ctxt, instance) - - def post_live_migration_at_destination(self, ctxt, instance, - block_migration): - return self.manager.post_live_migration_at_destination(ctxt, instance, - block_migration) - - def pre_live_migration(self, ctxt, instance, block_migration, disk, - migrate_data): - return self.manager.pre_live_migration(ctxt, instance, block_migration, - disk, migrate_data) - - def prep_resize(self, ctxt, image, instance, instance_type, - reservations, request_spec, - filter_properties, node): - return self.manager.prep_resize(ctxt, image=image, - instance=instance, instance_type=instance_type, - reservations=reservations, request_spec=request_spec, - filter_properties=filter_properties, node=node) - - def reboot_instance(self, ctxt, instance, block_device_info, - reboot_type): - return self.manager.reboot_instance(ctxt, instance=instance, - block_device_info=block_device_info, reboot_type=reboot_type) - - def rebuild_instance(self, ctxt, instance, orig_image_ref, image_ref, - injected_files, new_pass, orig_sys_metadata, bdms, recreate, - on_shared_storage): - return self.manager.rebuild_instance(ctxt, instance, - orig_image_ref, image_ref, injected_files, new_pass, - orig_sys_metadata, bdms, recreate, - on_shared_storage) - - def refresh_provider_fw_rules(self, ctxt): - return self.manager.refresh_provider_fw_rules(ctxt) - - def remove_aggregate_host(self, ctxt, aggregate, host, slave_info): - return self.manager.remove_aggregate_host(ctxt, aggregate=aggregate, - host=host, slave_info=slave_info) - - def remove_fixed_ip_from_instance(self, ctxt, address, instance): - return self.manager.remove_fixed_ip_from_instance(ctxt, address, - instance) - - def remove_volume_connection(self, ctxt, instance, volume_id): - return self.manager.remove_volume_connection(ctxt, instance, volume_id) - - def rescue_instance(self, ctxt, instance, rescue_password): - return self.manager.rescue_instance(ctxt, instance, rescue_password) - - def reset_network(self, ctxt, instance): - return self.manager.reset_network(ctxt, instance) - - def resize_instance(self, ctxt, instance, image, reservations, migration, - instance_type): - return self.manager.resize_instance(ctxt, instance=instance, - image=image, reservations=reservations, migration=migration, - instance_type=instance_type) - - def resume_instance(self, ctxt, instance): - return self.manager.resume_instance(ctxt, instance) - - def revert_resize(self, ctxt, instance, migration, reservations): - return self.manager.revert_resize(ctxt, instance=instance, - migration=migration, reservations=reservations) - - def rollback_live_migration_at_destination(self, ctxt, instance): - return self.manager.rollback_live_migration_at_destination(ctxt, - instance) - - def run_instance(self, ctxt, instance, request_spec, - filter_properties, requested_networks, - injected_files, admin_password, - is_first_time, node, legacy_bdm_in_spec): - return self.manager.run_instance(ctxt, instance, request_spec, - filter_properties, requested_networks, - injected_files, admin_password, - is_first_time, node, legacy_bdm_in_spec) - - def set_admin_password(self, ctxt, instance, new_pass): - return self.manager.set_admin_password(ctxt, instance, new_pass) - - def set_host_enabled(self, ctxt, enabled): - return self.manager.set_host_enabled(ctxt, enabled=enabled) - - def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id): - return self.manager.swap_volume(ctxt, instance, old_volume_id, - new_volume_id) - - def get_host_uptime(self, ctxt): - return self.manager.get_host_uptime(ctxt) - - def reserve_block_device_name(self, ctxt, instance, device, volume_id): - return self.manager.reserve_block_device_name(ctxt, instance, device, - volume_id) - - def live_snapshot_instance(self, ctxt, instance, image_id): - return self.manager.live_snapshot_instance(ctxt, instance, image_id) - - def backup_instance(self, ctxt, image_id, instance, backup_type, - rotation): - return self.manager.backup_instance(ctxt, image_id, instance, - backup_type, rotation) - - def snapshot_instance(self, ctxt, instance, image_id): - return self.manager.snapshot_instance(ctxt, instance=instance, - image_id=image_id) - - def start_instance(self, ctxt, instance): - return self.manager.start_instance(ctxt, instance) - - def stop_instance(self, ctxt, instance): - return self.manager.stop_instance(ctxt, instance) - - def suspend_instance(self, ctxt, instance): - return self.manager.suspend_instance(ctxt, instance) - - def terminate_instance(self, ctxt, instance, bdms, reservations): - return self.manager.terminate_instance(ctxt, instance=instance, - bdms=bdms, reservations=reservations) - - def unpause_instance(self, ctxt, instance): - return self.manager.unpause_instance(ctxt, instance) - - def unrescue_instance(self, ctxt, instance): - return self.manager.unrescue_instance(ctxt, instance) - - def soft_delete_instance(self, ctxt, instance, reservations): - return self.manager.soft_delete_instance(ctxt, instance=instance, - reservations=reservations) - - def restore_instance(self, ctxt, instance): - return self.manager.restore_instance(ctxt, instance) - - def shelve_instance(self, ctxt, instance, image_id): - return self.manager.shelve_instance(ctxt, instance, image_id) - - def shelve_offload_instance(self, ctxt, instance): - return self.manager.shelve_offload_instance(ctxt, instance) - - def unshelve_instance(self, ctxt, instance, image): - return self.manager.unshelve_instance(ctxt, instance, image) - - def volume_snapshot_create(self, ctxt, instance, volume_id, - create_info): - return self.manager.volume_snapshot_create(ctxt, instance, volume_id, - create_info) - - def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id, - delete_info): - return self.manager.volume_snapshot_delete(ctxt, instance, volume_id, - snapshot_id, delete_info) - - def refresh_security_group_rules(self, ctxt, security_group_id): - return self.manager.refresh_security_group_rules(ctxt, - security_group_id) - - def refresh_security_group_members(self, ctxt, security_group_id): - return self.manager.refresh_security_group_members(ctxt, - security_group_id) - - def refresh_instance_security_rules(self, ctxt, instance): - return self.manager.refresh_instance_security_rules(ctxt, instance) diff --git a/nova/manager.py b/nova/manager.py index 68c8f9449d12..710fa2ff3153 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -59,7 +59,6 @@ from nova import baserpc from nova.db import base from nova import notifier from nova.objects import base as objects_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import periodic_task from nova.openstack.common.rpc import dispatcher as rpc_dispatcher @@ -153,16 +152,3 @@ class SchedulerDependentManager(Manager): if not isinstance(capabilities, list): capabilities = [capabilities] self.last_capabilities = capabilities - - def publish_service_capabilities(self, context): - """Pass data back to the scheduler. - - Called at a periodic interval. And also called via rpc soon after - the start of the scheduler. - """ - #NOTE(jogo): this is now deprecated, unused and can be removed in - #V3.0 of compute RPCAPI - if self.last_capabilities: - LOG.debug(_('Notifying Schedulers of capabilities ...')) - self.scheduler_rpcapi.update_service_capabilities(context, - self.service_name, self.host, self.last_capabilities) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 461bb3f86532..eaae237d8360 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -1037,9 +1037,24 @@ class ComputeTestCase(BaseTestCase): def test_fn(_self, context, instance): self.assertIsInstance(instance, instance_obj.Instance) self.assertEqual(instance.uuid, db_inst['uuid']) - test_fn(None, self.context, instance=db_inst) + def test_create_instance_with_img_ref_associates_config_drive(self): + # Make sure create associates a config drive. + + instance = jsonutils.to_primitive(self._create_fake_instance( + params={'config_drive': '1234', })) + + try: + self.compute.run_instance(self.context, instance, {}, {}, + [], None, None, True, None, False) + instances = db.instance_get_all(self.context) + instance = instances[0] + + self.assertTrue(instance['config_drive']) + finally: + db.instance_destroy(self.context, instance['uuid']) + def test_create_instance_associates_config_drive(self): # Make sure create associates a config drive. @@ -1047,7 +1062,8 @@ class ComputeTestCase(BaseTestCase): params={'config_drive': '1234', })) try: - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, + [], None, None, True, None, False) instances = db.instance_get_all(self.context) instance = instances[0] @@ -1062,8 +1078,8 @@ class ComputeTestCase(BaseTestCase): params = {"memory_mb": 999999999999} filter_properties = {'limits': {'memory_mb': None}} instance = self._create_fake_instance(params) - self.compute.run_instance(self.context, instance=instance, - filter_properties=filter_properties) + self.compute.run_instance(self.context, instance, {}, + filter_properties, [], None, None, True, None, False) self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used']) def test_create_instance_unlimited_disk(self): @@ -1073,8 +1089,8 @@ class ComputeTestCase(BaseTestCase): "ephemeral_gb": 99999999999} filter_properties = {'limits': {'disk_gb': None}} instance = self._create_fake_instance(params) - self.compute.run_instance(self.context, instance=instance, - filter_properties=filter_properties) + self.compute.run_instance(self.context, instance, {}, + filter_properties, [], None, None, True, None, False) def test_create_multiple_instances_then_starve(self): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) @@ -1082,23 +1098,23 @@ class ComputeTestCase(BaseTestCase): filter_properties = {'limits': {'memory_mb': 4096, 'disk_gb': 1000}} params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128} instance = self._create_fake_instance(params) - self.compute.run_instance(self.context, instance=instance, - filter_properties=filter_properties) + self.compute.run_instance(self.context, instance, {}, + filter_properties, [], None, None, True, None, False) self.assertEqual(1024, self.rt.compute_node['memory_mb_used']) self.assertEqual(256, self.rt.compute_node['local_gb_used']) params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256} instance = self._create_fake_instance(params) - self.compute.run_instance(self.context, instance=instance, - filter_properties=filter_properties) + self.compute.run_instance(self.context, instance, {}, + filter_properties, [], None, None, True, None, False) self.assertEqual(3072, self.rt.compute_node['memory_mb_used']) self.assertEqual(768, self.rt.compute_node['local_gb_used']) params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192} instance = self._create_fake_instance(params) self.assertRaises(exception.ComputeResourcesUnavailable, - self.compute.run_instance, self.context, instance=instance, - filter_properties=filter_properties) + self.compute.run_instance, self.context, instance, + {}, filter_properties, [], None, None, True, None, False) def test_create_multiple_instance_with_neutron_port(self): instance_type = flavors.get_default_flavor() @@ -1135,8 +1151,8 @@ class ComputeTestCase(BaseTestCase): limits = {'memory_mb': oversub_limit_mb} filter_properties = {'limits': limits} - self.compute.run_instance(self.context, instance=instance, - filter_properties=filter_properties) + self.compute.run_instance(self.context, instance, {}, + filter_properties, [], None, None, True, None, False) self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used']) @@ -1163,8 +1179,8 @@ class ComputeTestCase(BaseTestCase): filter_properties = {'limits': {'memory_mb': oversub_limit_mb}} self.assertRaises(exception.ComputeResourcesUnavailable, - self.compute.run_instance, self.context, instance=instance, - filter_properties=filter_properties) + self.compute.run_instance, self.context, instance, {}, + filter_properties, [], None, None, True, None, False) def test_create_instance_with_oversubscribed_cpu(self): # Test passing of oversubscribed cpu policy from the scheduler. @@ -1183,8 +1199,8 @@ class ComputeTestCase(BaseTestCase): params = {"memory_mb": 10, "root_gb": 1, "ephemeral_gb": 1, "vcpus": 2} instance = self._create_fake_instance(params) - self.compute.run_instance(self.context, instance=instance, - filter_properties=filter_properties) + self.compute.run_instance(self.context, instance, {}, + filter_properties, [], None, None, True, None, False) self.assertEqual(2, self.rt.compute_node['vcpus_used']) @@ -1192,8 +1208,8 @@ class ComputeTestCase(BaseTestCase): params = {"memory_mb": 10, "root_gb": 1, "ephemeral_gb": 1, "vcpus": 1} instance = self._create_fake_instance(params) - self.compute.run_instance(self.context, instance=instance, - filter_properties=filter_properties) + self.compute.run_instance(self.context, instance, {}, + filter_properties, [], None, None, True, None, False) self.assertEqual(3, self.rt.compute_node['vcpus_used']) @@ -1212,8 +1228,8 @@ class ComputeTestCase(BaseTestCase): limits = {'vcpu': 3} filter_properties = {'limits': limits} self.assertRaises(exception.ComputeResourcesUnavailable, - self.compute.run_instance, self.context, instance=instance, - filter_properties=filter_properties) + self.compute.run_instance, self.context, instance, {}, + filter_properties, [], None, None, True, None, False) def test_create_instance_with_oversubscribed_disk(self): # Test passing of oversubscribed disk policy from the scheduler. @@ -1235,8 +1251,8 @@ class ComputeTestCase(BaseTestCase): limits = {'disk_gb': oversub_limit_gb} filter_properties = {'limits': limits} - self.compute.run_instance(self.context, instance=instance, - filter_properties=filter_properties) + self.compute.run_instance(self.context, instance, {}, + filter_properties, [], None, None, True, None, False) self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used']) @@ -1262,13 +1278,14 @@ class ComputeTestCase(BaseTestCase): limits = {'disk_gb': oversub_limit_gb} filter_properties = {'limits': limits} self.assertRaises(exception.ComputeResourcesUnavailable, - self.compute.run_instance, self.context, instance=instance, - filter_properties=filter_properties) + self.compute.run_instance, self.context, instance, {}, + filter_properties, [], None, None, True, None, False) def test_create_instance_without_node_param(self): instance = self._create_fake_instance({'node': None}) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) instances = db.instance_get_all(self.context) instance = instances[0] @@ -1278,7 +1295,8 @@ class ComputeTestCase(BaseTestCase): # Create instance with no image provided. params = {'image_ref': ''} instance = self._create_fake_instance(params) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self._assert_state({'vm_state': vm_states.ACTIVE, 'task_state': None}) @@ -1300,8 +1318,8 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(self.compute, '_instance_update', _instance_update) try: - self.compute.run_instance(self.context, instance=instance, - is_first_time=True) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) instances = db.instance_get_all(self.context) instance = instances[0] @@ -1314,8 +1332,8 @@ class ComputeTestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance()) try: - self.compute.run_instance(self.context, instance=instance, - is_first_time=True) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) instances = db.instance_get_all(self.context) instance = instances[0] @@ -1346,7 +1364,11 @@ class ComputeTestCase(BaseTestCase): '_prep_block_device', fake) instance = self._create_fake_instance() self.assertRaises(test.TestingException, self.compute.run_instance, - self.context, instance=instance) + self.context, instance=instance, request_spec={}, + filter_properties={}, requested_networks=[], + injected_files=None, admin_password=None, + is_first_time=True, node=None, + legacy_bdm_in_spec=False) #check state is failed even after the periodic poll self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) @@ -1365,7 +1387,11 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(self.compute.driver, 'spawn', fake) instance = self._create_fake_instance() self.assertRaises(test.TestingException, self.compute.run_instance, - self.context, instance=instance) + self.context, instance=instance, request_spec={}, + filter_properties={}, requested_networks=[], + injected_files=None, admin_password=None, + is_first_time=True, node=None, + legacy_bdm_in_spec=False) #check state is failed even after the periodic poll self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) @@ -1389,7 +1415,8 @@ class ComputeTestCase(BaseTestCase): self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) def test_run_instance_bails_on_missing_instance(self): # Make sure that run_instance() will quickly ignore a deleted instance @@ -1401,7 +1428,8 @@ class ComputeTestCase(BaseTestCase): raise exception.InstanceNotFound(instance_id='foo') self.stubs.Set(self.compute, '_instance_update', fake_instance_update) - self.compute.run_instance(self.context, instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertIn('instance_update', called) def test_run_instance_bails_on_deleting_instance(self): @@ -1415,7 +1443,8 @@ class ComputeTestCase(BaseTestCase): expected='scheduling', actual='deleting') self.stubs.Set(self.compute, '_instance_update', fake_instance_update) - self.compute.run_instance(self.context, instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertIn('instance_update', called) def test_run_instance_bails_on_missing_instance_2(self): @@ -1429,7 +1458,8 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(self.compute, '_default_block_device_names', fake_default_block_device_names) - self.compute.run_instance(self.context, instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertIn('default_block_device_names', called) def test_can_terminate_on_error_state(self): @@ -1437,7 +1467,7 @@ class ComputeTestCase(BaseTestCase): #check failed to schedule --> terminate params = {'vm_state': vm_states.ERROR} instance = self._create_fake_instance_obj(params=params) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, instance, [], []) self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, self.context, instance['uuid']) # Double check it's not there for admins, either. @@ -1448,13 +1478,15 @@ class ComputeTestCase(BaseTestCase): # Make sure it is possible to run and terminate instance. instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) instances = db.instance_get_all(self.context) LOG.info(_("Running instances: %s"), instances) self.assertEqual(len(instances), 1) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) instances = db.instance_get_all(self.context) LOG.info(_("After terminating instances: %s"), instances) @@ -1473,7 +1505,8 @@ class ComputeTestCase(BaseTestCase): """ instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) instances = db.instance_get_all(self.context) LOG.info(_("Running instances: %s"), instances) @@ -1502,7 +1535,8 @@ class ComputeTestCase(BaseTestCase): self.compute_api.attach_volume(self.context, instance, 1, '/dev/vdc') - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) instances = db.instance_get_all(self.context) LOG.info(_("After terminating instances: %s"), instances) @@ -1518,11 +1552,13 @@ class ComputeTestCase(BaseTestCase): """ params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self._assert_state({'vm_state': vm_states.ACTIVE, 'task_state': None}) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) instances = db.instance_get_all(self.context) self.assertEqual(len(instances), 0) @@ -1530,7 +1566,8 @@ class ComputeTestCase(BaseTestCase): # This is as reported in LP bug 1008875 instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) instances = db.instance_get_all(self.context) LOG.info(_("Running instances: %s"), instances) @@ -1545,7 +1582,8 @@ class ComputeTestCase(BaseTestCase): ) self.mox.ReplayAll() - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) instances = db.instance_get_all(self.context) LOG.info(_("After terminating instances: %s"), instances) @@ -1555,7 +1593,8 @@ class ComputeTestCase(BaseTestCase): # This is as reported in LP bug 1192893 instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) instances = db.instance_get_all(self.context) LOG.info(_("Running instances: %s"), instances) @@ -1569,7 +1608,8 @@ class ComputeTestCase(BaseTestCase): ) self.mox.ReplayAll() - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) instances = db.instance_get_all(self.context) LOG.info(_("After terminating instances: %s"), instances) @@ -1582,13 +1622,15 @@ class ComputeTestCase(BaseTestCase): self.assertIsNone(instance['launched_at']) self.assertIsNone(instance['deleted_at']) launch = timeutils.utcnow() - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertTrue(instance['launched_at'] > launch) self.assertIsNone(instance['deleted_at']) terminate = timeutils.utcnow() self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(instance)) + self._objectify(instance), [], []) + with utils.temporary_mutation(self.context, read_deleted='only'): instance = db.instance_get_by_uuid(self.context, instance['uuid']) @@ -1598,7 +1640,8 @@ class ComputeTestCase(BaseTestCase): def test_run_terminate_deallocate_net_failure_sets_error_state(self): instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) instances = db.instance_get_all(self.context) LOG.info(_("Running instances: %s"), instances) @@ -1611,7 +1654,8 @@ class ComputeTestCase(BaseTestCase): _fake_deallocate_network) try: - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) except test.TestingException: pass @@ -1621,7 +1665,8 @@ class ComputeTestCase(BaseTestCase): def test_stop(self): # Ensure instance can be stopped. instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF}) inst_uuid = instance['uuid'] @@ -1630,12 +1675,14 @@ class ComputeTestCase(BaseTestCase): inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_start(self): # Ensure instance can be started. instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF}) extra = ['system_metadata', 'metadata'] @@ -1647,12 +1694,14 @@ class ComputeTestCase(BaseTestCase): inst_obj.task_state = task_states.POWERING_ON inst_obj.save(self.context) self.compute.start_instance(self.context, instance=inst_obj) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_stop_start_no_image(self): params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF}) extra = ['system_metadata', 'metadata'] @@ -1664,7 +1713,8 @@ class ComputeTestCase(BaseTestCase): inst_obj.task_state = task_states.POWERING_ON inst_obj.save(self.context) self.compute.start_instance(self.context, instance=inst_obj) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_rescue(self): # Ensure instance can be rescued and unrescued. @@ -1686,18 +1736,20 @@ class ComputeTestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) db.instance_update(self.context, instance_uuid, {"task_state": task_states.RESCUING}) - self.compute.rescue_instance(self.context, instance=instance) + self.compute.rescue_instance(self.context, instance, None) self.assertTrue(called['rescued']) db.instance_update(self.context, instance_uuid, {"task_state": task_states.UNRESCUING}) self.compute.unrescue_instance(self.context, instance=instance) self.assertTrue(called['unrescued']) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_rescue_notifications(self): # Ensure notifications on instance rescue. @@ -1708,12 +1760,13 @@ class ComputeTestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) fake_notifier.NOTIFICATIONS = [] db.instance_update(self.context, instance_uuid, {"task_state": task_states.RESCUING}) - self.compute.rescue_instance(self.context, instance=instance) + self.compute.rescue_instance(self.context, instance, None) expected_notifications = ['compute.instance.exists', 'compute.instance.rescue.start', @@ -1738,7 +1791,8 @@ class ComputeTestCase(BaseTestCase): msg = fake_notifier.NOTIFICATIONS[1] self.assertIn('rescue_image_name', msg.payload) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_unrescue_notifications(self): # Ensure notifications on instance rescue. @@ -1749,7 +1803,8 @@ class ComputeTestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) fake_notifier.NOTIFICATIONS = [] db.instance_update(self.context, instance_uuid, @@ -1776,7 +1831,8 @@ class ComputeTestCase(BaseTestCase): image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_rescue_handle_err(self): # If the driver fails to rescue, instance state should remain the same @@ -1818,7 +1874,8 @@ class ComputeTestCase(BaseTestCase): fake_driver_power_on) instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) extra = ['system_metadata', 'metadata'] inst_obj = instance_obj.Instance.get_by_uuid(self.context, instance['uuid'], @@ -1827,7 +1884,8 @@ class ComputeTestCase(BaseTestCase): inst_obj.save(self.context) self.compute.start_instance(self.context, instance=inst_obj) self.assertTrue(called['power_on']) - self.compute.terminate_instance(self.context, instance=inst_obj) + self.compute.terminate_instance(self.context, + self._objectify(inst_obj), [], []) def test_power_off(self): # Ensure instance can be powered off. @@ -1841,7 +1899,8 @@ class ComputeTestCase(BaseTestCase): fake_driver_power_off) instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) extra = ['system_metadata', 'metadata'] inst_obj = instance_obj.Instance.get_by_uuid(self.context, instance['uuid'], @@ -1850,14 +1909,18 @@ class ComputeTestCase(BaseTestCase): inst_obj.save(self.context) self.compute.stop_instance(self.context, instance=inst_obj) self.assertTrue(called['power_off']) - self.compute.terminate_instance(self.context, instance=inst_obj) + self.compute.terminate_instance(self.context, + self._objectify(inst_obj), [], []) def test_pause(self): # Ensure instance can be paused and unpaused. - instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) - db.instance_update(self.context, instance['uuid'], - {"task_state": task_states.PAUSING}) + instance = self._create_fake_instance() + self.compute.run_instance(self.context, + jsonutils.to_primitive(instance), {}, {}, [], None, None, True, + None, False) + instance = self._objectify(instance) + instance.task_state = task_states.PAUSING + instance.save() fake_notifier.NOTIFICATIONS = [] self.compute.pause_instance(self.context, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) @@ -1867,8 +1930,8 @@ class ComputeTestCase(BaseTestCase): msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.pause.end') - db.instance_update(self.context, instance['uuid'], - {"task_state": task_states.UNPAUSING}) + instance.task_state = task_states.UNPAUSING + instance.save() fake_notifier.NOTIFICATIONS = [] self.compute.unpause_instance(self.context, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) @@ -1878,19 +1941,21 @@ class ComputeTestCase(BaseTestCase): msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.unpause.end') - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, instance, [], []) def test_suspend(self): # ensure instance can be suspended and resumed. instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) - db.instance_update(self.context, instance['uuid'], - {"task_state": task_states.SUSPENDING}) - self.compute.suspend_instance(self.context, instance=instance) - db.instance_update(self.context, instance['uuid'], - {"task_state": task_states.RESUMING}) - self.compute.resume_instance(self.context, instance=instance) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) + instance = self._objectify(instance) + instance.task_state = task_states.SUSPENDING + instance.save() + self.compute.suspend_instance(self.context, instance) + instance.task_state = task_states.RESUMING + instance.save() + self.compute.resume_instance(self.context, instance) + self.compute.terminate_instance(self.context, instance, [], []) def test_suspend_error(self): # Ensure vm_state is ERROR when suspend error occurs. @@ -1900,7 +1965,8 @@ class ComputeTestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertRaises(test.TestingException, self.compute.suspend_instance, self.context, @@ -1908,7 +1974,7 @@ class ComputeTestCase(BaseTestCase): instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertEqual(instance['vm_state'], vm_states.ERROR) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(instance)) + self._objectify(instance), [], []) def test_suspend_not_implemented(self): # Ensure expected exception is raised and the vm_state of instance @@ -1920,7 +1986,8 @@ class ComputeTestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance()) instance_state = instance['vm_state'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertRaises(NotImplementedError, self.compute.suspend_instance, self.context, @@ -1928,7 +1995,7 @@ class ComputeTestCase(BaseTestCase): instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertEqual(instance_state, instance['vm_state']) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(instance)) + self._objectify(instance), [], []) def test_rebuild(self): # Ensure instance can be rebuilt. @@ -1936,16 +2003,19 @@ class ComputeTestCase(BaseTestCase): image_ref = instance['image_ref'] sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) - self.compute.rebuild_instance(self.context, instance, + self.compute.rebuild_instance(self.context, self._objectify(instance), image_ref, image_ref, injected_files=[], new_pass="new_password", orig_sys_metadata=sys_metadata, - bdms=[]) - self.compute.terminate_instance(self.context, instance=instance) + bdms=[], recreate=False, + on_shared_storage=False) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_rebuild_no_image(self): # Ensure instance can be rebuilt when started with no image. @@ -1953,14 +2023,17 @@ class ComputeTestCase(BaseTestCase): instance = self._create_fake_instance_obj(params) sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) - self.compute.rebuild_instance(self.context, instance, + self.compute.rebuild_instance(self.context, self._objectify(instance), '', '', injected_files=[], new_pass="new_password", - orig_sys_metadata=sys_metadata) - self.compute.terminate_instance(self.context, instance=instance) + orig_sys_metadata=sys_metadata, bdms=[], + recreate=False, on_shared_storage=False) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_rebuild_launched_at_time(self): # Ensure instance can be rebuilt. @@ -1971,19 +2044,22 @@ class ComputeTestCase(BaseTestCase): instance_uuid = instance['uuid'] image_ref = instance['image_ref'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) timeutils.set_time_override(cur_time) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) - self.compute.rebuild_instance(self.context, instance, + self.compute.rebuild_instance(self.context, self._objectify(instance), image_ref, image_ref, injected_files=[], new_pass="new_password", - bdms=[]) + orig_sys_metadata={}, + bdms=[], recreate=False, + on_shared_storage=False) instance = db.instance_get_by_uuid(self.context, instance_uuid,) self.assertEqual(cur_time, instance['launched_at']) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(instance)) + self._objectify(instance), [], []) def test_rebuild_with_injected_files(self): # Ensure instance can be rebuilt with injected files. @@ -2006,13 +2082,15 @@ class ComputeTestCase(BaseTestCase): instance['uuid']) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) - self.compute.rebuild_instance(self.context, instance, + self.compute.rebuild_instance(self.context, self._objectify(instance), image_ref, image_ref, injected_files=injected_files, new_pass="new_password", orig_sys_metadata=sys_metadata, - bdms=[]) - self.compute.terminate_instance(self.context, instance=instance) + bdms=[], recreate=False, + on_shared_storage=False) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def _test_reboot(self, soft, test_delete=False, test_unrescue=False, @@ -2241,7 +2319,8 @@ class ComputeTestCase(BaseTestCase): def test_set_admin_password(self): # Ensure instance can have its admin password set. instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) db.instance_update(self.context, instance['uuid'], {'task_state': task_states.UPDATING_PASSWORD}) @@ -2249,19 +2328,20 @@ class ComputeTestCase(BaseTestCase): self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE) self.assertEqual(inst_ref['task_state'], task_states.UPDATING_PASSWORD) - self.compute.set_admin_password(self.context, instance=instance) + self.compute.set_admin_password(self.context, instance, None) inst_ref = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE) self.assertIsNone(inst_ref['task_state']) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(inst_ref)) + self._objectify(inst_ref), [], []) def test_set_admin_password_bad_state(self): # Test setting password while instance is rebuilding. instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) db.instance_update(self.context, instance['uuid'], { "power_state": power_state.NOSTATE, }) @@ -2285,8 +2365,9 @@ class ComputeTestCase(BaseTestCase): self.assertRaises(exception.InstancePasswordSetFailed, self.compute.set_admin_password, self.context, - instance=instance) - self.compute.terminate_instance(self.context, instance=instance) + instance, None) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def _do_test_set_admin_password_driver_error(self, exc, expected_vm_state, expected_task_state, @@ -2305,7 +2386,8 @@ class ComputeTestCase(BaseTestCase): fake_driver_set_pass) instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) db.instance_update(self.context, instance['uuid'], {'task_state': task_states.UPDATING_PASSWORD}) @@ -2318,14 +2400,15 @@ class ComputeTestCase(BaseTestCase): self.assertRaises(expected_exception, self.compute.set_admin_password, self.context, - instance=jsonutils.to_primitive(inst_ref)) + instance=jsonutils.to_primitive(inst_ref), + new_pass=None) inst_ref = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertEqual(inst_ref['vm_state'], expected_vm_state) self.assertEqual(inst_ref['task_state'], expected_task_state) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(inst_ref)) + self._objectify(inst_ref), [], []) def test_set_admin_password_driver_not_authorized(self): """ @@ -2364,11 +2447,13 @@ class ComputeTestCase(BaseTestCase): fake_driver_inject_file) instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.compute.inject_file(self.context, "/tmp/test", "File Contents", instance=instance) self.assertTrue(called['inject']) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_inject_network_info(self): # Ensure we can inject network info. @@ -2381,11 +2466,13 @@ class ComputeTestCase(BaseTestCase): fake_driver_inject_network) instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) inst_obj = self._objectify(instance) self.compute.inject_network_info(self.context, instance=inst_obj) self.assertTrue(called['inject']) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_reset_network(self): # Ensure we can reset networking on an instance. @@ -2398,19 +2485,22 @@ class ComputeTestCase(BaseTestCase): fake_driver_reset_network) instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.compute.reset_network(self.context, instance=self._objectify(instance)) self.assertEqual(called['count'], 1) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def _get_snapshotting_instance(self): # Ensure instance can be snapshotted. instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) instance = db.instance_update( self.context, instance['uuid'], {"task_state": task_states.IMAGE_SNAPSHOT}) @@ -2489,22 +2579,26 @@ class ComputeTestCase(BaseTestCase): def test_console_output(self): # Make sure we can get console output from instance. instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) output = self.compute.get_console_output(self.context, - instance=instance) + instance=instance, tail_length=None) self.assertEqual(output, 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE') - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_console_output_tail(self): # Make sure we can get console output from instance. instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) output = self.compute.get_console_output(self.context, instance=instance, tail_length=2) self.assertEqual(output, 'ANOTHER\nLAST LINE') - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_novnc_vnc_console(self): # Make sure we can a vnc console for an instance. @@ -2512,14 +2606,16 @@ class ComputeTestCase(BaseTestCase): self.flags(enabled=False, group='spice') instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) # Try with the full instance console = self.compute.get_vnc_console(self.context, 'novnc', instance=instance) self.assertTrue(console) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_validate_console_port_vnc(self): self.flags(vnc_enabled=True) @@ -2575,12 +2671,14 @@ class ComputeTestCase(BaseTestCase): self.flags(enabled=False, group='spice') instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) console = self.compute.get_vnc_console(self.context, 'xvpvnc', instance=instance) self.assertTrue(console) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_invalid_vnc_console_type(self): # Raise useful error if console type is an unrecognised string. @@ -2588,7 +2686,8 @@ class ComputeTestCase(BaseTestCase): self.flags(enabled=False, group='spice') instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertRaises(rpc_common.ClientException, self.compute.get_vnc_console, @@ -2600,7 +2699,8 @@ class ComputeTestCase(BaseTestCase): self.compute.get_vnc_console, self.context, 'invalid', instance=instance) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_missing_vnc_console_type(self): # Raise useful error is console type is None. @@ -2608,7 +2708,8 @@ class ComputeTestCase(BaseTestCase): self.flags(enabled=False, group='spice') instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertRaises(rpc_common.ClientException, self.compute.get_vnc_console, @@ -2620,7 +2721,8 @@ class ComputeTestCase(BaseTestCase): self.compute.get_vnc_console, self.context, None, instance=instance) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_spicehtml5_spice_console(self): # Make sure we can a spice console for an instance. @@ -2628,14 +2730,16 @@ class ComputeTestCase(BaseTestCase): self.flags(enabled=True, group='spice') instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) # Try with the full instance console = self.compute.get_spice_console(self.context, 'spice-html5', instance=instance) self.assertTrue(console) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_invalid_spice_console_type(self): # Raise useful error if console type is an unrecognised string @@ -2643,7 +2747,8 @@ class ComputeTestCase(BaseTestCase): self.flags(enabled=True, group='spice') instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertRaises(rpc_common.ClientException, self.compute.get_spice_console, @@ -2655,7 +2760,8 @@ class ComputeTestCase(BaseTestCase): self.compute.get_spice_console, self.context, 'invalid', instance=instance) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_missing_spice_console_type(self): # Raise useful error is console type is None @@ -2663,7 +2769,8 @@ class ComputeTestCase(BaseTestCase): self.flags(enabled=True, group='spice') instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertRaises(rpc_common.ClientException, self.compute.get_spice_console, @@ -2675,7 +2782,8 @@ class ComputeTestCase(BaseTestCase): self.compute.get_spice_console, self.context, None, instance=instance) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_vnc_console_instance_not_ready(self): self.flags(vnc_enabled=True) @@ -2735,12 +2843,14 @@ class ComputeTestCase(BaseTestCase): } instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) diagnostics = self.compute.get_diagnostics(self.context, instance=instance) self.assertEqual(diagnostics, expected_diagnostic) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_add_fixed_ip_usage_notification(self): def dummy(*args, **kwargs): @@ -2760,7 +2870,8 @@ class ComputeTestCase(BaseTestCase): instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_remove_fixed_ip_usage_notification(self): def dummy(*args, **kwargs): @@ -2780,13 +2891,15 @@ class ComputeTestCase(BaseTestCase): instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_run_instance_usage_notification(self): # Ensure run instance generates appropriate usage notification. instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) inst_ref = db.instance_get_by_uuid(self.context, instance_uuid) msg = fake_notifier.NOTIFICATIONS[0] @@ -2816,7 +2929,7 @@ class ComputeTestCase(BaseTestCase): self.assertEqual(payload['image_ref_url'], image_ref_url) self.assertEqual('Success', payload['message']) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(inst_ref)) + self._objectify(inst_ref), [], []) def test_run_instance_end_notification_on_abort(self): # Test that an end notif is sent if the build is aborted @@ -2829,7 +2942,8 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(self.compute, '_build_instance', build_inst_abort) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') @@ -2852,7 +2966,8 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(self.compute, '_build_instance', build_inst_fail) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2) msg = fake_notifier.NOTIFICATIONS[0] @@ -2875,7 +2990,8 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(self.compute, '_build_instance', build_inst_fail) self.assertRaises(test.TestingException, self.compute.run_instance, - self.context, instance=instance) + self.context, instance, {}, {}, [], None, None, True, None, + False) self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2) msg = fake_notifier.NOTIFICATIONS[0] @@ -2895,10 +3011,12 @@ class ComputeTestCase(BaseTestCase): timeutils.set_time_override(old_time) instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) fake_notifier.NOTIFICATIONS = [] timeutils.set_time_override(cur_time) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4) @@ -2933,12 +3051,14 @@ class ComputeTestCase(BaseTestCase): def test_run_instance_existing(self): # Ensure failure when running an instance that already exists. instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertRaises(exception.InstanceExists, self.compute.run_instance, - self.context, - instance=instance) - self.compute.terminate_instance(self.context, instance=instance) + self.context, instance, {}, {}, [], None, None, True, + None, False) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_run_instance_queries_macs(self): # run_instance should ask the driver for node mac addresses and pass @@ -2960,7 +3080,8 @@ class ComputeTestCase(BaseTestCase): self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance") self.compute.driver.macs_for_instance(instance).AndReturn(macs) self.mox.ReplayAll() - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) def test_instance_set_to_error_on_uncaught_exception(self): # Test that instance is set to error state when exception is raised. @@ -2988,15 +3109,15 @@ class ComputeTestCase(BaseTestCase): self.assertRaises(rpc_common.RemoteError, self.compute.run_instance, - self.context, - instance=instance) + self.context, instance, {}, {}, None, None, None, + True, None, False) instance = db.instance_get_by_uuid(context.get_admin_context(), instance['uuid']) self.assertEqual(vm_states.ERROR, instance['vm_state']) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(instance)) + self._objectify(instance), [], []) def test_delete_instance_succedes_on_volume_fail(self): instance = self._create_fake_instance_obj() @@ -3096,7 +3217,8 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(self.compute, '_delete_instance', fake_delete_instance) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertEqual(instance['vm_state'], vm_states.ERROR) @@ -3113,9 +3235,11 @@ class ComputeTestCase(BaseTestCase): self.assertRaises(rpc.common.RemoteError, self.compute.run_instance, - self.context, instance=instance) + self.context, instance, {}, {}, None, None, None, + True, None, False) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_lock(self): # FIXME(comstud): This test is such crap. This is testing @@ -3126,7 +3250,8 @@ class ComputeTestCase(BaseTestCase): # have the decorator. instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) non_admin_context = context.RequestContext(None, None, @@ -3163,7 +3288,7 @@ class ComputeTestCase(BaseTestCase): self.compute_api.reboot(self.context, inst_obj, 'SOFT') check_task_state(task_states.REBOOTING) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(instance)) + self._objectify(instance), [], []) def _check_locked_by(self, instance_uuid, locked_by): instance = db.instance_get_by_uuid(self.context, instance_uuid) @@ -3184,7 +3309,8 @@ class ComputeTestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) # Ensure that an admin can override the owner lock inst_obj = self._objectify(instance) @@ -3206,7 +3332,8 @@ class ComputeTestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) # Ensure that an admin can upgrade the lock and that # the owner can no longer unlock @@ -3257,29 +3384,50 @@ class ComputeTestCase(BaseTestCase): def test_state_revert(self): # ensure that task_state is reverted after a failed operation. + migration = migration_obj.Migration() + migration.new_instance_type_id = '1' + actions = [ - ("reboot_instance", task_states.REBOOTING), + ("reboot_instance", task_states.REBOOTING, + {'block_device_info': [], + 'reboot_type': 'SOFT'}), ("stop_instance", task_states.POWERING_OFF), ("start_instance", task_states.POWERING_ON), - ("terminate_instance", task_states.DELETING), - ("soft_delete_instance", task_states.SOFT_DELETING), + ("terminate_instance", task_states.DELETING, + {'bdms': [], + 'reservations': []}), + ("soft_delete_instance", task_states.SOFT_DELETING, + {'reservations': []}), ("restore_instance", task_states.RESTORING), ("rebuild_instance", task_states.REBUILDING, {'orig_image_ref': None, 'image_ref': None, 'injected_files': [], - 'new_pass': ''}), - ("set_admin_password", task_states.UPDATING_PASSWORD), - ("rescue_instance", task_states.RESCUING), + 'new_pass': '', + 'orig_sys_metadata': {}, + 'bdms': [], + 'recreate': False, + 'on_shared_storage': False}), + ("set_admin_password", task_states.UPDATING_PASSWORD, + {'new_pass': None}), + ("rescue_instance", task_states.RESCUING, + {'rescue_password': None}), ("unrescue_instance", task_states.UNRESCUING), ("revert_resize", task_states.RESIZE_REVERTING, - {'migration_id': None}), + {'migration': migration, + 'reservations': []}), ("prep_resize", task_states.RESIZE_PREP, {'image': {}, - 'instance_type': {}}), + 'instance_type': {}, + 'reservations': [], + 'request_spec': {}, + 'filter_properties': {}, + 'node': None}), ("resize_instance", task_states.RESIZE_PREP, - {'migration_id': None, - 'image': {}}), + {'migration': migration, + 'image': {}, + 'reservations': [], + 'instance_type': {}}), ("pause_instance", task_states.PAUSING), ("unpause_instance", task_states.UNPAUSING), ("suspend_instance", task_states.SUSPENDING), @@ -3291,6 +3439,7 @@ class ComputeTestCase(BaseTestCase): 'revert_resize', 'confirm_resize' ] + self._stub_out_resize_network_methods() instance = self._create_fake_instance() inst_obj = instance_obj.Instance._from_db_object( self.context, instance_obj.Instance(), instance, @@ -3334,8 +3483,8 @@ class ComputeTestCase(BaseTestCase): def test_quotas_succesful_delete(self): instance = jsonutils.to_primitive(self._create_fake_instance()) resvs = self._ensure_quota_reservations_committed(True, True) - self.compute.terminate_instance(self.context, instance, - bdms=None, reservations=resvs) + self.compute.terminate_instance(self.context, + self._objectify(instance), bdms=[], reservations=resvs) def test_quotas_failed_delete(self): instance = jsonutils.to_primitive(self._create_fake_instance()) @@ -3349,12 +3498,12 @@ class ComputeTestCase(BaseTestCase): resvs = self._ensure_quota_reservations_rolledback(True, True) self.assertRaises(test.TestingException, self.compute.terminate_instance, - self.context, instance, - bdms=None, reservations=resvs) + self.context, self._objectify(instance), + bdms=[], reservations=resvs) def test_quotas_succesful_soft_delete(self): - instance = jsonutils.to_primitive(self._create_fake_instance( - params=dict(task_state=task_states.SOFT_DELETING))) + instance = self._objectify(self._create_fake_instance( + params=dict(task_state=task_states.SOFT_DELETING))) resvs = self._ensure_quota_reservations_committed(True, True) self.compute.soft_delete_instance(self.context, instance, reservations=resvs) @@ -3381,8 +3530,8 @@ class ComputeTestCase(BaseTestCase): # Termination should be successful, but quota reservations # rolled back because the instance was in SOFT_DELETED state. resvs = self._ensure_quota_reservations_rolledback() - self.compute.terminate_instance(self.context, instance, - bdms=None, reservations=resvs) + self.compute.terminate_instance(self.context, + self._objectify(instance), bdms=[], reservations=resvs) def _stub_out_resize_network_methods(self): def fake(cls, ctxt, instance, *args, **kwargs): @@ -3410,7 +3559,8 @@ class ComputeTestCase(BaseTestCase): instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, - image={}) + image={}, reservations=[], request_spec={}, + filter_properties={}, node=None) instance.task_state = task_states.RESIZE_MIGRATED instance.save() @@ -3595,7 +3745,8 @@ class ComputeTestCase(BaseTestCase): instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, - image={}) + image={}, reservations=[], request_spec={}, + filter_properties={}, node=None) # fake out detach for prep_resize (and later terminate) def fake_terminate_connection(self, context, volume, connector): @@ -3609,7 +3760,7 @@ class ComputeTestCase(BaseTestCase): self.context.elevated(), instance.uuid, 'pre-migrating') self.compute.resize_instance(self.context, instance=instance, - migration=migration, image={}, + migration=migration, image={}, reservations=[], instance_type=jsonutils.to_primitive(instance_type)) # assert bdm is unchanged @@ -3665,7 +3816,8 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(cinder.API, "detach", fake_detach) # clean up - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_finish_resize_handles_error(self): # Make sure we don't leave the instance in RESIZE on error. @@ -3687,7 +3839,9 @@ class ComputeTestCase(BaseTestCase): self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, - image={}, reservations=reservations) + image={}, reservations=reservations, + request_spec={}, filter_properties={}, + node=None) migration = migration_obj.Migration.get_by_instance_and_status( self.context.elevated(), @@ -3712,7 +3866,8 @@ class ComputeTestCase(BaseTestCase): cur_time = datetime.datetime(2012, 12, 21, 12, 21) timeutils.set_time_override(old_time) inst_ref = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=inst_ref) + self.compute.run_instance(self.context, inst_ref, {}, {}, None, None, + None, True, None, False) timeutils.set_time_override(cur_time) fake_notifier.NOTIFICATIONS = [] @@ -3731,12 +3886,13 @@ class ComputeTestCase(BaseTestCase): db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, - jsonutils.to_primitive(instance), + self._objectify(instance), image_ref, new_image_ref, injected_files=[], new_pass=password, orig_sys_metadata=orig_sys_metadata, - bdms=[]) + bdms=[], recreate=False, + on_shared_storage=False) instance = db.instance_get_by_uuid(self.context, inst_ref['uuid']) @@ -3773,7 +3929,7 @@ class ComputeTestCase(BaseTestCase): self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time)) self.assertEqual(payload['image_ref_url'], new_image_ref_url) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(inst_ref)) + self._objectify(inst_ref), [], []) def test_finish_resize_instance_notification(self): # Ensure notifications on instance migrate/resize. @@ -3786,14 +3942,16 @@ class ComputeTestCase(BaseTestCase): new_type_id = new_type['id'] flavor_id = new_type['flavorid'] instance_p = obj_base.obj_to_primitive(instance) - self.compute.run_instance(self.context, instance=instance_p) + self.compute.run_instance(self.context, instance_p, {}, {}, None, None, + None, True, None, False) instance.host = 'foo' instance.task_state = task_states.RESIZE_PREP instance.save() self.compute.prep_resize(self.context, instance=instance, - instance_type=new_type, image={}) + instance_type=new_type, image={}, reservations=[], + request_spec={}, filter_properties={}, node=None) self._stub_out_resize_network_methods() @@ -3801,12 +3959,13 @@ class ComputeTestCase(BaseTestCase): self.context.elevated(), instance.uuid, 'pre-migrating') self.compute.resize_instance(self.context, instance=instance, - migration=migration, image={}, instance_type=new_type) + migration=migration, image={}, instance_type=new_type, + reservations=[]) timeutils.set_time_override(cur_time) fake_notifier.NOTIFICATIONS = [] self.compute.finish_resize(self.context, - migration=migration, + migration=migration, reservations=[], disk_info={}, image={}, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) @@ -3830,7 +3989,8 @@ class ComputeTestCase(BaseTestCase): self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time)) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) - self.compute.terminate_instance(self.context, instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_resize_instance_notification(self): # Ensure notifications on instance migrate/resize. @@ -3839,7 +3999,8 @@ class ComputeTestCase(BaseTestCase): timeutils.set_time_override(old_time) instance = self._create_fake_instance_obj() - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) timeutils.set_time_override(cur_time) fake_notifier.NOTIFICATIONS = [] @@ -3849,7 +4010,8 @@ class ComputeTestCase(BaseTestCase): instance_type = flavors.get_default_flavor() self.compute.prep_resize(self.context, instance=instance, - instance_type=instance_type, image={}) + instance_type=instance_type, image={}, reservations=[], + request_spec={}, filter_properties={}, node=None) db.migration_get_by_instance_and_status(self.context.elevated(), instance.uuid, 'pre-migrating') @@ -3879,7 +4041,8 @@ class ComputeTestCase(BaseTestCase): self.assertIn('launched_at', payload) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_prep_resize_instance_migration_error_on_same_host(self): """Ensure prep_resize raise a migration error if destination is set on @@ -3892,7 +4055,8 @@ class ComputeTestCase(BaseTestCase): reservations = self._ensure_quota_reservations_rolledback() instance_p = obj_base.obj_to_primitive(instance) - self.compute.run_instance(self.context, instance=instance_p) + self.compute.run_instance(self.context, instance_p, {}, {}, None, None, + None, True, None, False) instance.host = self.compute.host instance.save() instance_type = flavors.get_default_flavor() @@ -3900,8 +4064,10 @@ class ComputeTestCase(BaseTestCase): self.assertRaises(exception.MigrationError, self.compute.prep_resize, self.context, instance=instance, instance_type=instance_type, image={}, - reservations=reservations) - self.compute.terminate_instance(self.context, instance=instance) + reservations=reservations, request_spec={}, + filter_properties={}, node=None) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_prep_resize_instance_migration_error_on_none_host(self): """Ensure prep_resize raises a migration error if destination host is @@ -3912,7 +4078,8 @@ class ComputeTestCase(BaseTestCase): reservations = self._ensure_quota_reservations_rolledback() instance_p = obj_base.obj_to_primitive(instance) - self.compute.run_instance(self.context, instance=instance_p) + self.compute.run_instance(self.context, instance_p, {}, {}, None, None, + None, True, None, False) instance.host = None instance.save() instance_type = flavors.get_default_flavor() @@ -3920,8 +4087,10 @@ class ComputeTestCase(BaseTestCase): self.assertRaises(exception.MigrationError, self.compute.prep_resize, self.context, instance=instance, instance_type=instance_type, image={}, - reservations=reservations) - self.compute.terminate_instance(self.context, instance=instance) + reservations=reservations, request_spec={}, + filter_properties={}, node=None) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_resize_instance_driver_error(self): # Ensure instance status set to Error on resize error. @@ -3938,12 +4107,14 @@ class ComputeTestCase(BaseTestCase): reservations = self._ensure_quota_reservations_rolledback() instance_p = obj_base.obj_to_primitive(instance) - self.compute.run_instance(self.context, instance=instance_p) + self.compute.run_instance(self.context, instance_p, {}, {}, None, None, + None, True, None, False) instance.host = 'foo' instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, - reservations=reservations) + reservations=reservations, request_spec={}, + filter_properties={}, node=None) instance.task_state = task_states.RESIZE_PREP instance.save() migration = migration_obj.Migration.get_by_instance_and_status( @@ -3960,7 +4131,8 @@ class ComputeTestCase(BaseTestCase): # is not updated. Refresh and compare against the DB. instance.refresh() self.assertEqual(instance.vm_state, vm_states.ERROR) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_resize_instance_driver_rollback(self): # Ensure instance status set to Running after rollback. @@ -3975,12 +4147,14 @@ class ComputeTestCase(BaseTestCase): instance_type = flavors.get_default_flavor() reservations = self._ensure_quota_reservations_rolledback() instance_p = obj_base.obj_to_primitive(instance) - self.compute.run_instance(self.context, instance=instance_p) + self.compute.run_instance(self.context, instance_p, {}, {}, None, None, + None, True, None, False) instance.host = 'foo' instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, - reservations=reservations) + reservations=reservations, request_spec={}, + filter_properties={}, node=None) instance.task_state = task_states.RESIZE_PREP instance.save() @@ -3998,7 +4172,8 @@ class ComputeTestCase(BaseTestCase): instance.refresh() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertIsNone(instance.task_state) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_resize_instance(self): # Ensure instance can be migrated/resized. @@ -4006,11 +4181,13 @@ class ComputeTestCase(BaseTestCase): instance_type = flavors.get_default_flavor() instance_p = obj_base.obj_to_primitive(instance) - self.compute.run_instance(self.context, instance=instance_p) + self.compute.run_instance(self.context, instance_p, {}, {}, None, None, + None, True, None, False) instance.host = 'foo' instance.save() self.compute.prep_resize(self.context, instance=instance, - instance_type=instance_type, image={}) + instance_type=instance_type, image={}, reservations=[], + request_spec={}, filter_properties={}, node=None) # verify 'old_vm_state' was set on system_metadata instance.refresh() @@ -4027,10 +4204,11 @@ class ComputeTestCase(BaseTestCase): instance.uuid, 'pre-migrating') self.compute.resize_instance(self.context, instance=instance, - migration=migration, image={}, + migration=migration, image={}, reservations=[], instance_type=jsonutils.to_primitive(instance_type)) self.assertEqual(migration.dest_compute, instance.host) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def _test_confirm_resize(self, power_on): # Common test case method for confirm_resize @@ -4064,7 +4242,8 @@ class ComputeTestCase(BaseTestCase): reservations = self._ensure_quota_reservations_committed() instance_p = obj_base.obj_to_primitive(instance) - self.compute.run_instance(self.context, instance=instance_p) + self.compute.run_instance(self.context, instance_p, {}, {}, None, None, + None, True, None, False) # Confirm the instance size before the resize starts instance.refresh() @@ -4082,7 +4261,8 @@ class ComputeTestCase(BaseTestCase): self.compute.prep_resize(self.context, instance=instance, instance_type=new_instance_type_p, - image={}, reservations=reservations) + image={}, reservations=reservations, request_spec={}, + filter_properties={}, node=None) migration = migration_obj.Migration.get_by_instance_and_status( self.context.elevated(), @@ -4096,9 +4276,10 @@ class ComputeTestCase(BaseTestCase): self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, + reservations=[], instance_type=new_instance_type_p) self.compute.finish_resize(self.context, - migration=migration, + migration=migration, reservations=[], disk_info={}, image={}, instance=instance) # Prove that the instance size is now the new size @@ -4122,7 +4303,8 @@ class ComputeTestCase(BaseTestCase): self.assertEqual(old_vm_state, instance.vm_state) self.assertIsNone(instance.task_state) self.assertEqual(p_state, instance.power_state) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_confirm_resize_from_active(self): self._test_confirm_resize(power_on=True) @@ -4167,7 +4349,8 @@ class ComputeTestCase(BaseTestCase): reservations = self._ensure_quota_reservations_committed() instance_p = obj_base.obj_to_primitive(instance) - self.compute.run_instance(self.context, instance=instance_p) + self.compute.run_instance(self.context, instance_p, {}, {}, None, None, + None, True, None, False) instance.refresh() instance_type_ref = db.flavor_get(self.context, @@ -4186,7 +4369,8 @@ class ComputeTestCase(BaseTestCase): self.compute.prep_resize(self.context, instance=instance, instance_type=new_instance_type_p, - image={}, reservations=reservations) + image={}, reservations=reservations, request_spec={}, + filter_properties={}, node=None) migration = migration_obj.Migration.get_by_instance_and_status( self.context.elevated(), @@ -4200,9 +4384,10 @@ class ComputeTestCase(BaseTestCase): self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, + reservations=[], instance_type=new_instance_type_p) self.compute.finish_resize(self.context, - migration=migration, + migration=migration, reservations=[], disk_info={}, image={}, instance=instance) # Prove that the instance size is now the new size @@ -4307,14 +4492,17 @@ class ComputeTestCase(BaseTestCase): reservations = self._ensure_quota_reservations_rolledback() instance = self._create_fake_instance_obj() instance_p = obj_base.obj_to_primitive(instance) - self.compute.run_instance(self.context, instance=instance_p) + self.compute.run_instance(self.context, instance_p, {}, {}, None, None, + None, True, None, False) instance.refresh() instance_type = flavors.get_default_flavor() self.assertRaises(exception.MigrationError, self.compute.prep_resize, self.context, instance=instance, instance_type=instance_type, image={}, - reservations=reservations) - self.compute.terminate_instance(self.context, instance) + reservations=reservations, request_spec={}, + filter_properties={}, node=None) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_resize_instance_handles_migration_error(self): # Ensure vm_state is ERROR when error occurs. @@ -4330,12 +4518,15 @@ class ComputeTestCase(BaseTestCase): instance_type = flavors.get_default_flavor() instance_p = obj_base.obj_to_primitive(instance) - self.compute.run_instance(self.context, instance=instance_p) + self.compute.run_instance(self.context, instance_p, {}, {}, None, None, + None, True, None, False) instance.host = 'foo' instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, - image={}, reservations=reservations) + image={}, reservations=reservations, + request_spec={}, filter_properties={}, + node=None) migration = migration_obj.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') @@ -4350,7 +4541,8 @@ class ComputeTestCase(BaseTestCase): # is not updated. Refresh and compare against the DB. instance.refresh() self.assertEqual(instance.vm_state, vm_states.ERROR) - self.compute.terminate_instance(self.context, instance=instance) + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) def test_pre_live_migration_instance_has_no_fixed_ip(self): # Confirm that no exception is raised if there is no fixed ip on @@ -4400,7 +4592,7 @@ class ComputeTestCase(BaseTestCase): self.mox.ReplayAll() migrate_data = {'is_shared_storage': False} ret = self.compute.pre_live_migration(c, instance=instance, - block_migration=False, + block_migration=False, disk=None, migrate_data=migrate_data) self.assertIsNone(ret) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) @@ -4467,7 +4659,7 @@ class ComputeTestCase(BaseTestCase): self.assertRaises(test.TestingException, self.compute.live_migration, c, dest=dest_host, block_migration=True, - instance=instance) + instance=instance, migrate_data={}) def test_live_migration_works_correctly(self): # Confirm live_migration() works as expected correctly. @@ -4508,6 +4700,7 @@ class ComputeTestCase(BaseTestCase): ret = self.compute.live_migration(c, dest=dest, instance=instance, + block_migration=False, migrate_data=migrate_data) self.assertIsNone(ret) @@ -4657,7 +4850,7 @@ class ComputeTestCase(BaseTestCase): self.mox.ReplayAll() self.compute.post_live_migration_at_destination(self.admin_ctxt, - self.instance) + self.instance, False) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] @@ -4728,7 +4921,8 @@ class ComputeTestCase(BaseTestCase): # Detect when a vm is terminated behind the scenes. instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) instances = db.instance_get_all(self.context) LOG.info(_("Running instances: %s"), instances) @@ -5750,7 +5944,7 @@ class ComputeTestCase(BaseTestCase): instance.save() self.compute.confirm_resize(self.context, instance=instance, - migration=migration) + migration=migration, reservations=[]) instance.refresh() self.assertEqual(vm_states.ACTIVE, instance['vm_state']) @@ -5864,7 +6058,8 @@ class ComputeAPITestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance(params, services=True)) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertIsNone(instance['task_state']) @@ -6213,7 +6408,8 @@ class ComputeAPITestCase(BaseTestCase): def _test_rebuild(self, vm_state): instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertIsNone(instance['task_state']) @@ -6270,7 +6466,8 @@ class ComputeAPITestCase(BaseTestCase): instance = jsonutils.to_primitive( self._create_fake_instance(params={'image_ref': ''})) self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) db.instance_update(self.context, instance['uuid'], {"vm_state": vm_states.ERROR, @@ -6290,7 +6487,8 @@ class ComputeAPITestCase(BaseTestCase): self._create_fake_instance(params={'image_ref': ''})) instance_uuid = instance['uuid'] self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) self.compute_api.rebuild(self.context, instance, '', 'new_password') instance = db.instance_get_by_uuid(self.context, instance_uuid) @@ -6432,7 +6630,8 @@ class ComputeAPITestCase(BaseTestCase): # Ensure instance can have its admin password set. instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) inst_ref = db.instance_get_by_uuid(self.context, instance_uuid) self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE) @@ -6451,12 +6650,13 @@ class ComputeAPITestCase(BaseTestCase): task_states.UPDATING_PASSWORD) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(inst_ref)) + self._objectify(inst_ref), [], []) def test_rescue_unrescue(self): instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertEqual(instance['vm_state'], vm_states.ACTIVE) @@ -6479,7 +6679,7 @@ class ComputeAPITestCase(BaseTestCase): self.assertEqual(instance['task_state'], task_states.UNRESCUING) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(instance)) + self._objectify(instance), [], []) def test_rescue_volume_backed(self): # Instance started without an image @@ -6509,9 +6709,11 @@ class ComputeAPITestCase(BaseTestCase): self.stubs.Set(cinder.API, 'get', fake_volume_get) self.compute.run_instance(self.context, - instance=volume_backed_inst_1) + volume_backed_inst_1, {}, {}, None, None, + None, True, None, False) self.compute.run_instance(self.context, - instance=volume_backed_inst_2) + volume_backed_inst_2, {}, {}, None, None, + None, True, None, False) self.assertRaises(exception.InstanceNotRescuable, self.compute_api.rescue, self.context, @@ -6521,9 +6723,9 @@ class ComputeAPITestCase(BaseTestCase): volume_backed_inst_2) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(volume_backed_inst_1)) + self._objectify(volume_backed_inst_1), [], []) self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(volume_backed_inst_2)) + self._objectify(volume_backed_inst_2), [], []) def test_get(self): # Test get instance. @@ -7147,7 +7349,7 @@ class ComputeAPITestCase(BaseTestCase): db.block_device_mapping_destroy(self.context, bdm['id']) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.compute.terminate_instance(self.context, - jsonutils.to_primitive(instance)) + self._objectify(instance), [], []) def test_check_and_transform_bdm(self): base_options = {'root_device_name': 'vdb', @@ -7330,7 +7532,8 @@ class ComputeAPITestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance( params={'architecture': ''})) try: - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, + None, None, True, None, False) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertNotEqual(instance['architecture'], 'Unknown') @@ -7771,7 +7974,11 @@ class ComputeAPITestCase(BaseTestCase): fake_terminate_connection) # Kill the instance and check that it was detached - self.compute.terminate_instance(admin, instance=instance) + bdms = db.block_device_mapping_get_all_by_instance(admin, + instance['uuid']) + self.compute.terminate_instance(admin, self._objectify(instance), bdms, + []) + self.assertTrue(result["detached"]) def test_terminate_deletes_all_bdms(self): @@ -7796,9 +8003,12 @@ class ComputeAPITestCase(BaseTestCase): self.stubs.Set(self.compute, 'volume_api', mox.MockAnything()) self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, None, None, + None, True, None, False) + + self.compute.terminate_instance(self.context, + self._objectify(instance), [], []) - self.compute.terminate_instance(self.context, instance=instance) bdms = db.block_device_mapping_get_all_by_instance(admin, instance['uuid']) self.assertEqual(len(bdms), 0) @@ -7806,7 +8016,8 @@ class ComputeAPITestCase(BaseTestCase): def test_inject_network_info(self): instance = self._create_fake_instance(params={'host': CONF.host}) self.compute.run_instance(self.context, - instance=jsonutils.to_primitive(instance)) + jsonutils.to_primitive(instance), {}, {}, None, None, + None, True, None, False) instance = self.compute_api.get(self.context, instance['uuid'], want_objects=True) self.compute_api.inject_network_info(self.context, instance) @@ -7818,7 +8029,8 @@ class ComputeAPITestCase(BaseTestCase): def test_reset_network(self): instance = self._create_fake_instance() self.compute.run_instance(self.context, - instance=jsonutils.to_primitive(instance)) + jsonutils.to_primitive(instance), {}, {}, None, None, + None, True, None, False) instance = self.compute_api.get(self.context, instance['uuid'], want_objects=True) self.compute_api.reset_network(self.context, instance) @@ -7845,7 +8057,8 @@ class ComputeAPITestCase(BaseTestCase): instance = self._create_fake_instance() self.compute.run_instance(self.context, - instance=jsonutils.to_primitive(instance)) + jsonutils.to_primitive(instance), {}, {}, None, None, + None, True, None, False) instance = self.compute_api.get(self.context, instance['uuid']) security_group_name = self._create_group()['name'] @@ -8511,8 +8724,8 @@ class ComputeAggrTestCase(BaseTestCase): self.stubs.Set(self.compute.driver, "add_to_aggregate", fake_driver_add_to_aggregate) - self.compute.add_aggregate_host(self.context, "host", - aggregate=jsonutils.to_primitive(self.aggr)) + self.compute.add_aggregate_host(self.context, host="host", + aggregate=jsonutils.to_primitive(self.aggr), slave_info=None) self.assertTrue(fake_driver_add_to_aggregate.called) def test_remove_aggregate_host(self): @@ -8525,7 +8738,8 @@ class ComputeAggrTestCase(BaseTestCase): fake_driver_remove_from_aggregate) self.compute.remove_aggregate_host(self.context, - aggregate=jsonutils.to_primitive(self.aggr), host="host") + aggregate=jsonutils.to_primitive(self.aggr), host="host", + slave_info=None) self.assertTrue(fake_driver_remove_from_aggregate.called) def test_add_aggregate_host_passes_slave_info_to_driver(self): @@ -8538,7 +8752,7 @@ class ComputeAggrTestCase(BaseTestCase): self.stubs.Set(self.compute.driver, "add_to_aggregate", driver_add_to_aggregate) - self.compute.add_aggregate_host(self.context, "the_host", + self.compute.add_aggregate_host(self.context, host="the_host", slave_info="SLAVE_INFO", aggregate=jsonutils.to_primitive(self.aggr)) @@ -9046,14 +9260,16 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase): mox.IgnoreArg()).AndRaise(test.TestingException("Original")) self.compute._reschedule_resize_or_reraise(mox.IgnoreArg(), None, - inst_obj, mox.IgnoreArg(), self.instance_type, None, None, - None) + inst_obj, mox.IgnoreArg(), self.instance_type, [], {}, + {}) self.mox.ReplayAll() self.compute.prep_resize(self.context, image=None, instance=inst_obj, - instance_type=self.instance_type) + instance_type=self.instance_type, + reservations=[], request_spec={}, + filter_properties={}, node=None) def test_reschedule_fails_with_exception(self): """Original exception should be raised if the _reschedule method @@ -9173,9 +9389,11 @@ class EvacuateHostTestCase(BaseTestCase): orig_image_ref = None image_ref = None injected_files = None + bdms = db.block_device_mapping_get_all_by_instance(self.context, + self.inst_ref['uuid']) self.compute.rebuild_instance( - self.context, self.inst_ref, orig_image_ref, image_ref, - injected_files, 'newpass', recreate=True, + self.context, self._objectify(self.inst_ref), orig_image_ref, + image_ref, injected_files, 'newpass', {}, bdms, recreate=True, on_shared_storage=on_shared_storage) def test_rebuild_on_host_updated_target(self): @@ -9329,7 +9547,8 @@ class EvacuateHostTestCase(BaseTestCase): """Rebuild if instance exists raises an exception.""" db.instance_update(self.context, self.inst_ref['uuid'], {"task_state": task_states.SCHEDULING}) - self.compute.run_instance(self.context, instance=self.inst_ref) + self.compute.run_instance(self.context, self.inst_ref, {}, {}, + [], None, None, True, None, False) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.assertRaises(exception.InstanceExists, @@ -9358,8 +9577,8 @@ class ComputeInjectedFilesTestCase(BaseTestCase): def _test(self, injected_files, decoded_files): self.expected = decoded_files - self.compute.run_instance(self.context, self.instance, - injected_files=injected_files) + self.compute.run_instance(self.context, self.instance, {}, {}, [], + injected_files, None, True, None, False) def test_injected_none(self): # test an input of None for injected_files @@ -9390,7 +9609,8 @@ class ComputeInjectedFilesTestCase(BaseTestCase): ] self.assertRaises(exception.Base64Exception, self.compute.run_instance, - self.context, self.instance, injected_files=injected_files) + self.context, self.instance, {}, {}, [], injected_files, None, + True, None, False) def test_reschedule(self): # test that rescheduling is done with original encoded files @@ -9413,8 +9633,8 @@ class ComputeInjectedFilesTestCase(BaseTestCase): self.stubs.Set(self.compute.driver, 'spawn', spawn_explode) self.stubs.Set(self.compute, '_reschedule_or_error', _roe) - self.compute.run_instance(self.context, self.instance, - injected_files=expected) + self.compute.run_instance(self.context, self.instance, {}, {}, [], + expected, None, True, None, False) class CheckConfigDriveTestCase(test.TestCase): diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py index b55a2459aa7a..264e944facdd 100644 --- a/nova/tests/compute/test_compute_utils.py +++ b/nova/tests/compute/test_compute_utils.py @@ -419,14 +419,13 @@ class UsageInfoTestCase(test.TestCase): def test_notify_usage_exists(self): # Ensure 'exists' notification generates appropriate usage data. instance_id = self._create_instance() - instance = db.instance_get(self.context, instance_id) + instance = instance_obj.Instance.get_by_id(self.context, instance_id) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} - db.instance_system_metadata_update(self.context, instance['uuid'], - sys_metadata, False) - instance = db.instance_get(self.context, instance_id) + instance.system_metadata.update(sys_metadata) + instance.save() compute_utils.notify_usage_exists( notify.get_notifier('compute'), self.context, instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) @@ -452,23 +451,23 @@ class UsageInfoTestCase(test.TestCase): {'md_key1': 'val1', 'md_key2': 'val2'}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) - self.compute.terminate_instance(self.context, - jsonutils.to_primitive(instance)) + self.compute.terminate_instance(self.context, instance, [], []) def test_notify_usage_exists_deleted_instance(self): # Ensure 'exists' notification generates appropriate usage data. instance_id = self._create_instance() - instance = db.instance_get(self.context, instance_id) + instance = instance_obj.Instance.get_by_id(self.context, instance_id, + expected_attrs=['metadata', 'system_metadata']) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} - db.instance_system_metadata_update(self.context, instance['uuid'], - sys_metadata, False) - self.compute.terminate_instance(self.context, - jsonutils.to_primitive(instance)) - instance = db.instance_get(self.context.elevated(read_deleted='yes'), - instance_id) + instance.system_metadata.update(sys_metadata) + instance.save() + self.compute.terminate_instance(self.context, instance, [], []) + instance = instance_obj.Instance.get_by_id( + self.context.elevated(read_deleted='yes'), instance_id, + expected_attrs=['system_metadata']) compute_utils.notify_usage_exists( notify.get_notifier('compute'), self.context, instance) msg = fake_notifier.NOTIFICATIONS[-1] @@ -497,9 +496,9 @@ class UsageInfoTestCase(test.TestCase): def test_notify_usage_exists_instance_not_found(self): # Ensure 'exists' notification generates appropriate usage data. instance_id = self._create_instance() - instance = db.instance_get(self.context, instance_id) - self.compute.terminate_instance(self.context, - jsonutils.to_primitive(instance)) + instance = instance_obj.Instance.get_by_id(self.context, instance_id, + expected_attrs=['metadata', 'system_metadata', 'info_cache']) + self.compute.terminate_instance(self.context, instance, [], []) compute_utils.notify_usage_exists( notify.get_notifier('compute'), self.context, instance) msg = fake_notifier.NOTIFICATIONS[-1] @@ -526,17 +525,15 @@ class UsageInfoTestCase(test.TestCase): def test_notify_about_instance_usage(self): instance_id = self._create_instance() - instance = db.instance_get(self.context, instance_id) + instance = instance_obj.Instance.get_by_id(self.context, instance_id, + expected_attrs=['metadata', 'system_metadata', 'info_cache']) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} + instance.system_metadata.update(sys_metadata) + instance.save() extra_usage_info = {'image_name': 'fake_name'} - db.instance_system_metadata_update(self.context, instance['uuid'], - sys_metadata, False) - # NOTE(russellb) Make sure our instance has the latest system_metadata - # in it. - instance = db.instance_get(self.context, instance_id) compute_utils.notify_about_instance_usage( notify.get_notifier('compute'), self.context, instance, 'create.start', @@ -563,8 +560,7 @@ class UsageInfoTestCase(test.TestCase): self.assertEqual(payload['image_name'], 'fake_name') image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) - self.compute.terminate_instance(self.context, - jsonutils.to_primitive(instance)) + self.compute.terminate_instance(self.context, instance, [], []) def test_notify_about_aggregate_update_with_id(self): # Set aggregate payload diff --git a/nova/tests/compute/test_shelve.py b/nova/tests/compute/test_shelve.py index db8e396b53b8..a13d32f86d94 100644 --- a/nova/tests/compute/test_shelve.py +++ b/nova/tests/compute/test_shelve.py @@ -31,7 +31,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase): def test_shelve(self): CONF.shelved_offload_time = -1 db_instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=db_instance) + self.compute.run_instance(self.context, db_instance, {}, {}, [], None, + None, True, None, False) instance = instance_obj.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) @@ -81,7 +82,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase): def test_shelve_volume_backed(self): db_instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=db_instance) + self.compute.run_instance(self.context, db_instance, {}, {}, [], None, + None, True, None, False) instance = instance_obj.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) @@ -123,7 +125,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase): def test_unshelve(self): db_instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=db_instance) + self.compute.run_instance(self.context, db_instance, {}, {}, [], None, + None, True, None, False) instance = instance_obj.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) @@ -202,7 +205,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase): cur_time = timeutils.utcnow() cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc()) timeutils.set_time_override(cur_time) - self.compute.run_instance(self.context, instance=db_instance) + self.compute.run_instance(self.context, db_instance, {}, {}, [], None, + None, True, None, False) instance = instance_obj.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) @@ -261,7 +265,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase): def test_shelved_poll_none_exist(self): instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.mox.StubOutWithMock(timeutils, 'is_older_than') self.mox.ReplayAll() @@ -269,7 +274,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase): def test_shelved_poll_not_timedout(self): instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) sys_meta = utils.metadata_to_dict(instance['system_metadata']) shelved_time = timeutils.utcnow() timeutils.set_time_override(shelved_time) @@ -284,10 +290,12 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase): def test_shelved_poll_timedout(self): active_instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=active_instance) + self.compute.run_instance(self.context, active_instance, {}, {}, [], + None, None, True, None, False) instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) sys_meta = utils.metadata_to_dict(instance['system_metadata']) shelved_time = timeutils.utcnow() timeutils.set_time_override(shelved_time) @@ -313,7 +321,8 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase): fake_instance = self._create_fake_instance({'display_name': 'vm01'}) instance = jsonutils.to_primitive(fake_instance) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertIsNone(instance['task_state']) @@ -345,7 +354,8 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase): # Ensure instance can be unshelved. instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance=instance) + self.compute.run_instance(self.context, instance, {}, {}, [], None, + None, True, None, False) self.assertIsNone(instance['task_state']) diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index 419a4038941e..385e83c376a3 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -3126,8 +3126,9 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase): self.assertRaises(exception.AggregateError, self.compute.add_aggregate_host, - self.context, "fake_host", - aggregate=jsonutils.to_primitive(self.aggr)) + self.context, host="fake_host", + aggregate=jsonutils.to_primitive(self.aggr), + slave_info=None) excepted = db.aggregate_get(self.context, self.aggr['id']) self.assertEqual(excepted['metadetails'][pool_states.KEY], pool_states.ERROR)