diff --git a/nova/tests/functional/api_sample_tests/test_instance_actions.py b/nova/tests/functional/api_sample_tests/test_instance_actions.py index 796ca1ce5026..785575fde86b 100644 --- a/nova/tests/functional/api_sample_tests/test_instance_actions.py +++ b/nova/tests/functional/api_sample_tests/test_instance_actions.py @@ -34,7 +34,7 @@ class ServerActionsSampleJsonTest(test_servers.ServersSampleBase, response_data = api_samples_test_base.pretty_data(response.content) actions = api_samples_test_base.objectify(response_data) self.action_stop = actions['instanceActions'][0] - self._wait_for_state_change(self.api, {'id': self.uuid}, 'SHUTOFF') + self._wait_for_state_change({'id': self.uuid}, 'SHUTOFF') def _get_subs(self): return { diff --git a/nova/tests/functional/api_sample_tests/test_multinic.py b/nova/tests/functional/api_sample_tests/test_multinic.py index 6990aa47a239..526e4dc6b5e5 100644 --- a/nova/tests/functional/api_sample_tests/test_multinic.py +++ b/nova/tests/functional/api_sample_tests/test_multinic.py @@ -32,14 +32,14 @@ class MultinicSampleJsonTest(integrated_helpers.InstanceHelperMixin, def _boot_a_server(self, expected_status='ACTIVE', extra_params=None): server = self._build_minimal_create_server_request( - self.api, 'MultinicSampleJsonTestServer') + 'MultinicSampleJsonTestServer') if extra_params: server.update(extra_params) created_server = self.api.post_server({'server': server}) # Wait for it to finish being created - found_server = self._wait_for_state_change(self.api, created_server, + found_server = self._wait_for_state_change(created_server, expected_status) return found_server diff --git a/nova/tests/functional/compute/test_init_host.py b/nova/tests/functional/compute/test_init_host.py index 4840536d2366..d4c1ba28b57b 100644 --- a/nova/tests/functional/compute/test_init_host.py +++ b/nova/tests/functional/compute/test_init_host.py @@ -40,10 +40,10 @@ class ComputeManagerInitHostTestCase( # Create a server, it does not matter on which host it lands. name = 'test_migrate_disk_and_power_off_crash_finish_revert_migration' server = self._build_minimal_create_server_request( - self.api, name, image_uuid=fake_image.get_valid_image_id(), + name, image_uuid=fake_image.get_valid_image_id(), networks='auto') server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Save the source hostname for assertions later. source_host = server['OS-EXT-SRV-ATTR:host'] @@ -66,8 +66,7 @@ class ComputeManagerInitHostTestCase( self.admin_api.post_server_action(server['id'], {'migrate': None}) # Now wait for the task_state to be reset to None during # _init_instance. - server = self._wait_for_server_parameter( - self.admin_api, server, { + server = self._wait_for_server_parameter(server, { 'status': 'ACTIVE', 'OS-EXT-STS:task_state': None, 'OS-EXT-SRV-ATTR:host': source_host @@ -158,7 +157,7 @@ class TestComputeRestartInstanceStuckInBuild( # instance_claim() to stop it. This is less realistic but it works in # the test env. server_req = self._build_minimal_create_server_request( - self.api, 'interrupted-server', flavor_id=self.flavor1['id'], + 'interrupted-server', flavor_id=self.flavor1['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') @@ -170,7 +169,7 @@ class TestComputeRestartInstanceStuckInBuild( mock_instance_claim.side_effect = sleep_forever server = self.api.post_server({'server': server_req}) - self._wait_for_state_change(self.admin_api, server, 'BUILD') + self._wait_for_state_change(server, 'BUILD') # the instance.create.start is the closest thing to the # instance_claim call we can wait for in the test @@ -182,7 +181,7 @@ class TestComputeRestartInstanceStuckInBuild( # We expect that the instance is pushed to ERROR state during the # compute restart. - self._wait_for_state_change(self.admin_api, server, 'ERROR') + self._wait_for_state_change(server, 'ERROR') mock_log.assert_called_with( 'Instance spawn was interrupted before instance_claim, setting ' 'instance to ERROR state', diff --git a/nova/tests/functional/compute/test_live_migration.py b/nova/tests/functional/compute/test_live_migration.py index e630a40cb5a8..1966b3d13d6b 100644 --- a/nova/tests/functional/compute/test_live_migration.py +++ b/nova/tests/functional/compute/test_live_migration.py @@ -69,7 +69,7 @@ class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase, 'uuid': uuids.working_volume, 'source_type': 'volume', 'destination_type': 'volume'}]}}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') source = server['OS-EXT-SRV-ATTR:host'] if source == self.compute.host: @@ -87,7 +87,7 @@ class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase, self.stub_out('nova.volume.cinder.API.attachment_delete', stub_attachment_delete) self.api.post_server_action(server['id'], post) - self._wait_for_server_parameter(self.api, server, + self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': dest, 'status': 'ACTIVE'}) self.assertEqual(2, stub_attachment_delete.call_count) diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py index a50caeb72606..c1e40c7177f9 100644 --- a/nova/tests/functional/integrated_helpers.py +++ b/nova/tests/functional/integrated_helpers.py @@ -73,11 +73,14 @@ def generate_new_element(items, prefix, numeric=False): class InstanceHelperMixin(object): - def _wait_for_server_parameter(self, admin_api, server, expected_params, - max_retries=10): + + def _wait_for_server_parameter( + self, server, expected_params, max_retries=10, api=None): + api = api or getattr(self, 'admin_api', self.api) + retry_count = 0 while True: - server = admin_api.get_server(server['id']) + server = api.get_server(server['id']) if all([server[attr] == expected_params[attr] for attr in expected_params]): break @@ -90,22 +93,21 @@ class InstanceHelperMixin(object): return server - def _wait_for_state_change(self, admin_api, server, expected_status, - max_retries=10): + def _wait_for_state_change(self, server, expected_status, max_retries=10): return self._wait_for_server_parameter( - admin_api, server, {'status': expected_status}, max_retries) + server, {'status': expected_status}, max_retries) + + def _build_minimal_create_server_request( + self, name=None, image_uuid=None, flavor_id=None, networks=None, + az=None, host=None): - def _build_minimal_create_server_request(self, api, name=None, - image_uuid=None, flavor_id=None, - networks=None, az=None, - host=None): server = {} if not image_uuid: # NOTE(takashin): In API version 2.36, image APIs were deprecated. # In API version 2.36 or greater, self.api.get_images() returns # a 404 error. In that case, 'image_uuid' should be specified. - image_uuid = api.get_images()[0]['id'] + image_uuid = self.api.get_images()[0]['id'] server['imageRef'] = image_uuid if not name: @@ -115,7 +117,7 @@ class InstanceHelperMixin(object): if not flavor_id: # Set a valid flavorId - flavor_id = api.get_flavors()[0]['id'] + flavor_id = self.api.get_flavors()[0]['id'] server['flavorRef'] = 'http://fake.server/%s' % flavor_id if networks is not None: @@ -142,40 +144,43 @@ class InstanceHelperMixin(object): return def _wait_for_action_fail_completion( - self, server, expected_action, event_name, api=None): + self, server, expected_action, event_name): """Polls instance action events for the given instance, action and action event name until it finds the action event with an error result. """ - if api is None: - api = self.api return self._wait_for_instance_action_event( - api, server, expected_action, event_name, event_result='error') + server, expected_action, event_name, event_result='error') def _wait_for_instance_action_event( - self, api, server, action_name, event_name, event_result): + self, server, action_name, event_name, event_result): """Polls the instance action events for the given instance, action, event, and event result until it finds the event. """ + api = getattr(self, 'admin_api', self.api) + actions = [] events = [] for attempt in range(10): actions = api.get_instance_actions(server['id']) # The API returns the newest event first for action in actions: - if action['action'] == action_name: - events = ( - api.api_get( - '/servers/%s/os-instance-actions/%s' % - (server['id'], action['request_id']) - ).body['instanceAction']['events']) - # Look for the action event being in error state. - for event in events: - result = event['result'] - if (event['event'] == event_name and - result is not None and - result.lower() == event_result.lower()): - return event + if action['action'] != action_name: + continue + + events = api.api_get( + '/servers/%s/os-instance-actions/%s' % ( + server['id'], action['request_id']) + ).body['instanceAction']['events'] + + # Look for the action event being in error state. + for event in events: + result = event['result'] + if (event['event'] == event_name and + result is not None and + result.lower() == event_result.lower()): + return event + # We didn't find the completion event yet, so wait a bit. time.sleep(0.5) @@ -192,9 +197,8 @@ class InstanceHelperMixin(object): :param action: Either "resize" or "migrate" instance action. :param error_in_tb: Some expected part of the error event traceback. """ - api = self.admin_api if hasattr(self, 'admin_api') else self.api event = self._wait_for_action_fail_completion( - server, action, 'conductor_migrate_server', api=api) + server, action, 'conductor_migrate_server') self.assertIn(error_in_tb, event['traceback']) def _wait_for_migration_status(self, server, expected_statuses): @@ -202,9 +206,7 @@ class InstanceHelperMixin(object): for the given server, else the test fails. The migration record, if found, is returned. """ - api = getattr(self, 'admin_api', None) - if api is None: - api = self.api + api = getattr(self, 'admin_api', self.api) statuses = [status.lower() for status in expected_statuses] for attempt in range(10): @@ -296,10 +298,14 @@ class _IntegratedTestBase(test.TestCase): self.api = self.api_fixture.admin_api else: self.api = self.api_fixture.api + self.admin_api = self.api_fixture.admin_api if hasattr(self, 'microversion'): self.api.microversion = self.microversion + if not self.ADMIN_API: + self.admin_api.microversion = self.microversion + def get_unused_server_name(self): servers = self.api.get_servers() server_names = [server['name'] for server in servers] @@ -725,14 +731,13 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin): :return: the API representation of the booted instance """ server_req = self._build_minimal_create_server_request( - self.api, 'some-server', flavor_id=flavor['id'], + 'some-server', flavor_id=flavor['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks=networks) server_req['availability_zone'] = 'nova:%s' % source_hostname LOG.info('booting on %s', source_hostname) created_server = self.api.post_server({'server': server_req}) - server = self._wait_for_state_change( - self.admin_api, created_server, 'ACTIVE') + server = self._wait_for_state_change(created_server, 'ACTIVE') # Verify that our source host is what the server ended up on self.assertEqual(source_hostname, server['OS-EXT-SRV-ATTR:host']) @@ -846,7 +851,7 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin): def _move_and_check_allocations(self, server, request, old_flavor, new_flavor, source_rp_uuid, dest_rp_uuid): self.api.post_server_action(server['id'], request) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') def _check_allocation(): self.assertFlavorMatchesUsage(source_rp_uuid, old_flavor) @@ -908,7 +913,7 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin): } } self.api.post_server_action(server['id'], resize_req) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') self.assertFlavorMatchesUsage(rp_uuid, old_flavor, new_flavor) @@ -978,15 +983,15 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin): def _confirm_resize(self, server): self.api.post_server_action(server['id'], {'confirmResize': None}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self._wait_for_instance_action_event( - self.api, server, instance_actions.CONFIRM_RESIZE, + server, instance_actions.CONFIRM_RESIZE, 'compute_confirm_resize', 'success') return server def _revert_resize(self, server): self.api.post_server_action(server['id'], {'revertResize': None}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self._wait_for_migration_status(server, ['reverted']) # Note that the migration status is changed to "reverted" in the # dest host revert_resize method but the allocations are cleaned up diff --git a/nova/tests/functional/libvirt/test_shared_resource_provider.py b/nova/tests/functional/libvirt/test_shared_resource_provider.py index 8d056aadec58..31daa5387e4b 100644 --- a/nova/tests/functional/libvirt/test_shared_resource_provider.py +++ b/nova/tests/functional/libvirt/test_shared_resource_provider.py @@ -77,7 +77,7 @@ class SharedStorageProviderUsageTestCase( } # create server server = self.api.post_server(server_req_body) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # get shared_rp and cn_rp usages shared_rp_usages = self._get_provider_usages(shared_RP['uuid']) @@ -135,7 +135,7 @@ class SharedStorageProviderUsageTestCase( } # create server server = self.api.post_server(server_req_body) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') rebuild_image_ref = ( nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID) @@ -152,7 +152,7 @@ class SharedStorageProviderUsageTestCase( self.api.api_post('/servers/%s/action' % server['id'], rebuild_req_body) self._wait_for_server_parameter( - self.api, server, {'OS-EXT-STS:task_state': None}) + server, {'OS-EXT-STS:task_state': None}) # get shared_rp and cn_rp usages shared_rp_usages = self._get_provider_usages(shared_rp_uuid) @@ -198,7 +198,7 @@ class SharedStorageProviderUsageTestCase( } # create server server = self.api.post_server(server_req_body) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') rebuild_image_ref = ( nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID) @@ -216,7 +216,7 @@ class SharedStorageProviderUsageTestCase( rebuild_req_body) # Look for the failed rebuild action. self._wait_for_action_fail_completion( - server, instance_actions.REBUILD, 'rebuild_server', self.admin_api) + server, instance_actions.REBUILD, 'rebuild_server') # Assert the server image_ref was rolled back on failure. server = self.api.get_server(server['id']) self.assertEqual(org_image_id, server['image']['id']) diff --git a/nova/tests/functional/libvirt/test_vpmem.py b/nova/tests/functional/libvirt/test_vpmem.py index 399444b99343..1fe8efd50aed 100644 --- a/nova/tests/functional/libvirt/test_vpmem.py +++ b/nova/tests/functional/libvirt/test_vpmem.py @@ -126,7 +126,7 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase): def _create_server(self, flavor_id, hostname): server_req = self._build_minimal_create_server_request( - self.api, 'some-server', flavor_id=flavor_id, + 'some-server', flavor_id=flavor_id, image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') server_req['availability_zone'] = 'nova:%s' % hostname @@ -173,22 +173,22 @@ class VPMEMTests(VPMEMTestBase): # Boot two servers with pmem server1 = self._create_server(self.flavor, self.compute1.host) - self._wait_for_state_change(self.api, server1, 'ACTIVE') + self._wait_for_state_change(server1, 'ACTIVE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server1['id'], cn1_uuid) server2 = self._create_server(self.flavor, self.compute1.host) - self._wait_for_state_change(self.api, server2, 'ACTIVE') + self._wait_for_state_change(server2, 'ACTIVE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server2['id'], cn1_uuid) # 'SMALL' VPMEM resource has used up server3 = self._create_server(self.flavor, self.compute1.host) - self._wait_for_state_change(self.api, server3, 'ERROR') + self._wait_for_state_change(server3, 'ERROR') # Delete server2, one 'SMALL' VPMEM will be released self._delete_server(server2) server3 = self._create_server(self.flavor, self.compute1.host) - self._wait_for_state_change(self.api, server3, 'ACTIVE') + self._wait_for_state_change(server3, 'ACTIVE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server3['id'], cn1_uuid) @@ -237,29 +237,29 @@ class VPMEMResizeTests(VPMEMTestBase): # Boot one server with pmem, then resize the server server = self._create_server(self.flavor1, self.compute1.host) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server['id'], cn1_uuid) # Revert resize self._resize_server(server, self.flavor2) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1, 'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server['id'], cn2_uuid) self._revert_resize(server) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server['id'], cn1_uuid) # Confirm resize self._resize_server(server, self.flavor2) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1, 'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server['id'], cn2_uuid) self._confirm_resize(server) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1, 'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server['id'], cn2_uuid) @@ -272,29 +272,29 @@ class VPMEMResizeTests(VPMEMTestBase): # Boot one server with pmem, then resize the server server = self._create_server(self.flavor1, self.compute1.host) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server['id'], cn1_uuid) # Revert resize self._resize_server(server, self.flavor2) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1, 'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server['id'], cn1_uuid) self._revert_resize(server) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server['id'], cn1_uuid) # Confirm resize self._resize_server(server, self.flavor2) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1, 'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server['id'], cn1_uuid) self._confirm_resize(server) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1, 'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, server['id'], cn1_uuid) diff --git a/nova/tests/functional/notification_sample_tests/notification_sample_base.py b/nova/tests/functional/notification_sample_tests/notification_sample_base.py index 7bfb754b428a..a491af33ca2c 100644 --- a/nova/tests/functional/notification_sample_tests/notification_sample_base.py +++ b/nova/tests/functional/notification_sample_tests/notification_sample_base.py @@ -220,7 +220,7 @@ class NotificationSampleTestBase(test.TestCase, actual=fake_notifier.VERSIONED_NOTIFICATIONS.pop(0)) server = self._build_minimal_create_server_request( - self.api, 'some-server', + 'some-server', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', flavor_id=flavor_id) @@ -246,7 +246,7 @@ class NotificationSampleTestBase(test.TestCase, self.assertTrue(created_server['id']) # Wait for it to finish being created - found_server = self._wait_for_state_change(self.api, created_server, + found_server = self._wait_for_state_change(created_server, expected_status) found_server['reservation_id'] = reservation_id diff --git a/nova/tests/functional/notification_sample_tests/test_instance.py b/nova/tests/functional/notification_sample_tests/test_instance.py index 641056efa474..c5b96a0ac6a4 100644 --- a/nova/tests/functional/notification_sample_tests/test_instance.py +++ b/nova/tests/functional/notification_sample_tests/test_instance.py @@ -59,7 +59,7 @@ class TestInstanceNotificationSampleWithMultipleCompute( fake_notifier.reset() action(server) # Ensure that instance is in active state after an action - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') @mock.patch('nova.compute.manager.ComputeManager.' '_live_migration_cleanup_flags', return_value=[True, False]) @@ -188,13 +188,13 @@ class TestInstanceNotificationSampleWithMultipleCompute( } self.admin_api.post_server_action(server['id'], post) - self._wait_for_state_change(self.api, server, 'MIGRATING') + self._wait_for_state_change(server, 'MIGRATING') migrations = self._wait_and_get_migrations(server) self.admin_api.delete_migration(server['id'], migrations[0]['id']) self._wait_for_notification('instance.live_migration_abort.start') - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # NOTE(gibi): the intance.live_migration_rollback notification emitted # after the instance.live_migration_abort notification so we have to # wait for the rollback to ensure we can assert both notifications @@ -261,10 +261,8 @@ class TestInstanceNotificationSampleWithMultipleCompute( } self.admin_api.post_server_action(server['id'], evacuate) - self._wait_for_state_change(self.api, server, - expected_status='REBUILD') - self._wait_for_state_change(self.api, server, - expected_status='ACTIVE') + self._wait_for_state_change(server, expected_status='REBUILD') + self._wait_for_state_change(server, expected_status='ACTIVE') notifications = self._get_notifications('instance.evacuate') self.assertEqual(1, len(notifications), @@ -286,7 +284,7 @@ class TestInstanceNotificationSampleWithMultipleCompute( } self.admin_api.post_server_action(server['id'], post) - self._wait_for_state_change(self.api, server, 'MIGRATING') + self._wait_for_state_change(server, 'MIGRATING') migrations = self._wait_and_get_migrations(server) migration_id = migrations[0]['id'] @@ -390,7 +388,7 @@ class TestInstanceNotificationSample( fake_notifier.reset() action(server) # Ensure that instance is in active state after an action - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # if the test step did not raised then we consider the step as # succeeded. We drop the logs to avoid causing subunit parser @@ -520,10 +518,8 @@ class TestInstanceNotificationSample( } } self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.api, server, - expected_status='REBUILD') - self._wait_for_state_change(self.api, server, - expected_status='ACTIVE') + self._wait_for_state_change(server, expected_status='REBUILD') + self._wait_for_state_change(server, expected_status='ACTIVE') notifications = self._get_notifications('instance.exists') self._verify_notification( @@ -751,11 +747,9 @@ class TestInstanceNotificationSample( def _test_power_off_on_server(self, server): self.api.post_server_action(server['id'], {'os-stop': {}}) - self._wait_for_state_change(self.api, server, - expected_status='SHUTOFF') + self._wait_for_state_change(server, expected_status='SHUTOFF') self.api.post_server_action(server['id'], {'os-start': {}}) - self._wait_for_state_change(self.api, server, - expected_status='ACTIVE') + self._wait_for_state_change(server, expected_status='ACTIVE') self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS), fake_notifier.VERSIONED_NOTIFICATIONS) @@ -788,8 +782,7 @@ class TestInstanceNotificationSample( def _test_shelve_and_shelve_offload_server(self, server): self.flags(shelved_offload_time=-1) self.api.post_server_action(server['id'], {'shelve': {}}) - self._wait_for_state_change(self.api, server, - expected_status='SHELVED') + self._wait_for_state_change(server, expected_status='SHELVED') self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS), fake_notifier.VERSIONED_NOTIFICATIONS) @@ -812,7 +805,7 @@ class TestInstanceNotificationSample( # we can unshelve to make sure that the unshelve.start notification # payload is stable as the compute manager first sets the instance # state then a bit later sets the instance.host to None. - self._wait_for_server_parameter(self.api, server, + self._wait_for_server_parameter(server, {'status': 'SHELVED_OFFLOADED', 'OS-EXT-SRV-ATTR:host': None}) @@ -832,7 +825,7 @@ class TestInstanceNotificationSample( actual=fake_notifier.VERSIONED_NOTIFICATIONS[1]) self.api.post_server_action(server['id'], {'unshelve': None}) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self._wait_for_notification('instance.unshelve.end') def _test_unshelve_server(self, server): @@ -844,13 +837,13 @@ class TestInstanceNotificationSample( # we can unshelve to make sure that the unshelve.start notification # payload is stable as the compute manager first sets the instance # state then a bit later sets the instance.host to None. - self._wait_for_server_parameter(self.api, server, + self._wait_for_server_parameter(server, {'status': 'SHELVED_OFFLOADED', 'OS-EXT-SRV-ATTR:host': None}) post = {'unshelve': None} self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self._wait_for_notification('instance.unshelve.end') self.assertEqual(9, len(fake_notifier.VERSIONED_NOTIFICATIONS), fake_notifier.VERSIONED_NOTIFICATIONS) @@ -870,11 +863,11 @@ class TestInstanceNotificationSample( def _test_suspend_resume_server(self, server): post = {'suspend': {}} self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.admin_api, server, 'SUSPENDED') + self._wait_for_state_change(server, 'SUSPENDED') post = {'resume': None} self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Four versioned notification are generated. # 0. instance-suspend-start @@ -913,10 +906,10 @@ class TestInstanceNotificationSample( def _test_pause_unpause_server(self, server): self.api.post_server_action(server['id'], {'pause': {}}) - self._wait_for_state_change(self.api, server, 'PAUSED') + self._wait_for_state_change(server, 'PAUSED') self.api.post_server_action(server['id'], {'unpause': {}}) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Four versioned notifications are generated # 0. instance-pause-start @@ -997,7 +990,7 @@ class TestInstanceNotificationSample( } } self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') self._pop_and_verify_dest_select_notification(server['id'], replacements={ @@ -1034,7 +1027,7 @@ class TestInstanceNotificationSample( # the following is the revert server request post = {'revertResize': None} self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS), fake_notifier.VERSIONED_NOTIFICATIONS) @@ -1170,7 +1163,7 @@ class TestInstanceNotificationSample( self.addCleanup(patcher.stop) patcher.start() self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.api, server, expected_status='ERROR') + self._wait_for_state_change(server, expected_status='ERROR') self._wait_for_notification('compute.exception') # There should be the following notifications after scheduler's # select_destination notifications: @@ -1248,10 +1241,8 @@ class TestInstanceNotificationSample( self.api.post_server_action(server['id'], post) # Before going back to ACTIVE state # server state need to be changed to REBUILD state - self._wait_for_state_change(self.api, server, - expected_status='REBUILD') - self._wait_for_state_change(self.api, server, - expected_status='ACTIVE') + self._wait_for_state_change(server, expected_status='REBUILD') + self._wait_for_state_change(server, expected_status='ACTIVE') self._pop_and_verify_dest_select_notification(server['id'], replacements={ @@ -1345,10 +1336,8 @@ class TestInstanceNotificationSample( self.api.post_server_action(server['id'], post) # Before going back to ACTIVE state # server state need to be changed to REBUILD state - self._wait_for_state_change(self.api, server, - expected_status='REBUILD') - self._wait_for_state_change(self.api, server, - expected_status='ACTIVE') + self._wait_for_state_change(server, expected_status='REBUILD') + self._wait_for_state_change(server, expected_status='ACTIVE') self._pop_and_verify_dest_select_notification(server['id'], replacements={ @@ -1434,7 +1423,7 @@ class TestInstanceNotificationSample( } self.api.post_server_action(server['id'], post) mock_rebuild.side_effect = _virtual_interface_create_failed - self._wait_for_state_change(self.api, server, expected_status='ERROR') + self._wait_for_state_change(server, expected_status='ERROR') notification = self._get_notifications('instance.rebuild.error') self.assertEqual(1, len(notification), fake_notifier.VERSIONED_NOTIFICATIONS) @@ -1455,11 +1444,11 @@ class TestInstanceNotificationSample( def _test_restore_server(self, server): self.flags(reclaim_instance_interval=30) self.api.delete_server(server['id']) - self._wait_for_state_change(self.api, server, 'SOFT_DELETED') + self._wait_for_state_change(server, 'SOFT_DELETED') # we don't want to test soft_delete here fake_notifier.reset() self.api.post_server_action(server['id'], {'restore': {}}) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS), fake_notifier.VERSIONED_NOTIFICATIONS) @@ -1641,12 +1630,12 @@ class TestInstanceNotificationSample( self.flags(allow_resize_to_same_host=True) post = {'resize': {'flavorRef': '2'}} self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') fake_notifier.reset() post = {'confirmResize': None} self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS), fake_notifier.VERSIONED_NOTIFICATIONS) @@ -1733,7 +1722,7 @@ class TestInstanceNotificationSample( } } self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.admin_api, server, 'RESCUE') + self._wait_for_state_change(server, 'RESCUE') # 0. instance.rescue.start # 1. instance.exists @@ -1759,7 +1748,7 @@ class TestInstanceNotificationSample( 'unrescue': None } self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS), fake_notifier.VERSIONED_NOTIFICATIONS) @@ -1779,7 +1768,7 @@ class TestInstanceNotificationSample( def _test_soft_delete_server(self, server): self.flags(reclaim_instance_interval=30) self.api.delete_server(server['id']) - self._wait_for_state_change(self.api, server, 'SOFT_DELETED') + self._wait_for_state_change(server, 'SOFT_DELETED') self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS), fake_notifier.VERSIONED_NOTIFICATIONS) @@ -1948,9 +1937,9 @@ class TestInstanceNotificationSample( def _test_lock_unlock_instance(self, server): self.api.post_server_action(server['id'], {'lock': {}}) - self._wait_for_server_parameter(self.api, server, {'locked': True}) + self._wait_for_server_parameter(server, {'locked': True}) self.api.post_server_action(server['id'], {'unlock': {}}) - self._wait_for_server_parameter(self.api, server, {'locked': False}) + self._wait_for_server_parameter(server, {'locked': False}) # Two versioned notifications are generated # 0. instance-lock # 1. instance-unlock @@ -1973,9 +1962,9 @@ class TestInstanceNotificationSample( def _test_lock_unlock_instance_with_reason(self, server): self.api.post_server_action( server['id'], {'lock': {"locked_reason": "global warming"}}) - self._wait_for_server_parameter(self.api, server, {'locked': True}) + self._wait_for_server_parameter(server, {'locked': True}) self.api.post_server_action(server['id'], {'unlock': {}}) - self._wait_for_server_parameter(self.api, server, {'locked': False}) + self._wait_for_server_parameter(server, {'locked': False}) # Two versioned notifications are generated # 0. instance-lock # 1. instance-unlock diff --git a/nova/tests/functional/regressions/test_bug_1404867.py b/nova/tests/functional/regressions/test_bug_1404867.py index ba8e90613294..fa15dc900615 100644 --- a/nova/tests/functional/regressions/test_bug_1404867.py +++ b/nova/tests/functional/regressions/test_bug_1404867.py @@ -55,7 +55,7 @@ class DeleteWithReservedVolumes(integrated_helpers._IntegratedTestBase, ] } }) - return self._wait_for_state_change(self.api, server, 'ERROR') + return self._wait_for_state_change(server, 'ERROR') def test_delete_with_reserved_volumes_new(self): self.cinder = self.useFixture( diff --git a/nova/tests/functional/regressions/test_bug_1669054.py b/nova/tests/functional/regressions/test_bug_1669054.py index 1945ad923055..1054ae9dbda2 100644 --- a/nova/tests/functional/regressions/test_bug_1669054.py +++ b/nova/tests/functional/regressions/test_bug_1669054.py @@ -42,7 +42,7 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase, flavor1 = flavors[0]['id'] server = self._build_server(flavor1) server = self.api.post_server({'server': server}) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Start up another compute service so we can resize. host2 = self.start_service('compute', host='host2') @@ -51,10 +51,10 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase, flavor2 = flavors[1]['id'] req = {'resize': {'flavorRef': flavor2}} self.api.post_server_action(server['id'], req) - server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host']) self.api.post_server_action(server['id'], {'confirmResize': None}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Disable the host on which the server is now running (host2). host2.stop() @@ -62,7 +62,7 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase, # Now try to evacuate the server back to the original source compute. req = {'evacuate': {'onSharedStorage': False}} self.api.post_server_action(server['id'], req) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # The evacuate flow in the compute manager is annoying in that it # sets the instance status to ACTIVE before updating the host, so we # have to wait for the migration record to be 'done' to avoid a race. diff --git a/nova/tests/functional/regressions/test_bug_1679750.py b/nova/tests/functional/regressions/test_bug_1679750.py index 0d3e7e1d74d6..81d709435aad 100644 --- a/nova/tests/functional/regressions/test_bug_1679750.py +++ b/nova/tests/functional/regressions/test_bug_1679750.py @@ -81,10 +81,10 @@ class TestLocalDeleteAllocations(test.TestCase, self.assertEqual(0, usage) # Create a server. - server = self._build_minimal_create_server_request(self.api, + server = self._build_minimal_create_server_request( 'local-delete-test', self.image_id, self.flavor_id, 'none') server = self.admin_api.post_server({'server': server}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Assert usages are non zero now. usages_during = self._get_usages(placement_api, rp_uuid) @@ -136,10 +136,10 @@ class TestLocalDeleteAllocations(test.TestCase, self.assertEqual(0, usage) # Create a server. - server = self._build_minimal_create_server_request(self.api, + server = self._build_minimal_create_server_request( 'local-delete-test', self.image_id, self.flavor_id, 'none') server = self.admin_api.post_server({'server': server}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Assert usages are non zero now. usages_during = self._get_usages(placement_api, rp_uuid) diff --git a/nova/tests/functional/regressions/test_bug_1682693.py b/nova/tests/functional/regressions/test_bug_1682693.py index 6f466c1b5813..28d8cc30bfb1 100644 --- a/nova/tests/functional/regressions/test_bug_1682693.py +++ b/nova/tests/functional/regressions/test_bug_1682693.py @@ -59,10 +59,10 @@ class ServerTagsFilteringTest(test.TestCase, for x in range(2): server = self.api.post_server( dict(server=self._build_minimal_create_server_request( - self.api, 'test-list-server-tag-filters%i' % x, image_id, + 'test-list-server-tag-filters%i' % x, image_id, networks='none'))) self.addCleanup(self.api.delete_server, server['id']) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self.servers.append(server) # now apply two tags to the first server diff --git a/nova/tests/functional/regressions/test_bug_1689692.py b/nova/tests/functional/regressions/test_bug_1689692.py index 1b6cf48d835e..3a1d4fe425df 100644 --- a/nova/tests/functional/regressions/test_bug_1689692.py +++ b/nova/tests/functional/regressions/test_bug_1689692.py @@ -66,10 +66,10 @@ class ServerListLimitMarkerCell0Test(test.TestCase, for x in range(3): server = self.api.post_server( dict(server=self._build_minimal_create_server_request( - self.api, 'test-list-server-limit%i' % x, self.image_id, + 'test-list-server-limit%i' % x, self.image_id, networks='none'))) self.addCleanup(self.api.delete_server, server['id']) - self._wait_for_state_change(self.api, server, 'ERROR') + self._wait_for_state_change(server, 'ERROR') servers = self.api.get_servers() self.assertEqual(3, len(servers)) diff --git a/nova/tests/functional/regressions/test_bug_1702454.py b/nova/tests/functional/regressions/test_bug_1702454.py index cfa764fb28d0..874a11eb2abd 100644 --- a/nova/tests/functional/regressions/test_bug_1702454.py +++ b/nova/tests/functional/regressions/test_bug_1702454.py @@ -95,11 +95,11 @@ class SchedulerOnlyChecksTargetTest(test.TestCase, # We first create the instance server = self.admin_api.post_server( dict(server=self._build_minimal_create_server_request( - self.api, 'my-pretty-instance-to-evacuate', self.image_id, + 'my-pretty-instance-to-evacuate', self.image_id, networks='none'))) server_id = server['id'] self.addCleanup(self.api.delete_server, server_id) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # We need to get instance details for knowing its host server = self.admin_api.get_server(server_id) @@ -125,7 +125,7 @@ class SchedulerOnlyChecksTargetTest(test.TestCase, } self.admin_api.post_server_action(server['id'], evacuate) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') server = self.admin_api.get_server(server_id) # Yeepee, that works! diff --git a/nova/tests/functional/regressions/test_bug_1713783.py b/nova/tests/functional/regressions/test_bug_1713783.py index 164c9eb04edc..bee0829a0aea 100644 --- a/nova/tests/functional/regressions/test_bug_1713783.py +++ b/nova/tests/functional/regressions/test_bug_1713783.py @@ -84,13 +84,12 @@ class FailedEvacuateStateTests(test.TestCase, def _boot_a_server(self): server_req = self._build_minimal_create_server_request( - self.api, 'some-server', flavor_id=self.flavor1['id'], + 'some-server', flavor_id=self.flavor1['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') LOG.info('booting on %s', self.hostname) created_server = self.api.post_server({'server': server_req}) - return self._wait_for_state_change( - self.api, created_server, 'ACTIVE') + return self._wait_for_state_change(created_server, 'ACTIVE') def test_evacuate_no_valid_host(self): # Boot a server @@ -110,7 +109,7 @@ class FailedEvacuateStateTests(test.TestCase, self._wait_for_notification_event_type('compute_task.rebuild_server') - server = self._wait_for_state_change(self.api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertEqual(self.hostname, server['OS-EXT-SRV-ATTR:host']) # Check migrations diff --git a/nova/tests/functional/regressions/test_bug_1718455.py b/nova/tests/functional/regressions/test_bug_1718455.py index a166d4bfb534..0687bbe28c1e 100644 --- a/nova/tests/functional/regressions/test_bug_1718455.py +++ b/nova/tests/functional/regressions/test_bug_1718455.py @@ -70,7 +70,7 @@ class TestLiveMigrateOneOfConcurrentlyCreatedInstances( def _boot_servers(self, num_servers=1): server_req = self._build_minimal_create_server_request( - self.api, 'some-server', flavor_id=self.flavor1['id'], + 'some-server', flavor_id=self.flavor1['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') server_req.update({'min_count': str(num_servers), @@ -81,7 +81,7 @@ class TestLiveMigrateOneOfConcurrentlyCreatedInstances( servers = self.api.get_servers(detail=True, search_opts={'reservation_id': reservation_id}) for idx, server in enumerate(servers): - servers[idx] = self._wait_for_state_change(self.api, server, + servers[idx] = self._wait_for_state_change(server, 'ACTIVE') return servers diff --git a/nova/tests/functional/regressions/test_bug_1718512.py b/nova/tests/functional/regressions/test_bug_1718512.py index a4fb73dc3d86..65fb9adca40f 100644 --- a/nova/tests/functional/regressions/test_bug_1718512.py +++ b/nova/tests/functional/regressions/test_bug_1718512.py @@ -103,9 +103,9 @@ class TestRequestSpecRetryReschedule(test.TestCase, # create the instance which should go to host1 server = self.admin_api.post_server( dict(server=self._build_minimal_create_server_request( - self.api, 'test_resize_with_reschedule_then_live_migrate', + 'test_resize_with_reschedule_then_live_migrate', self.image_id, flavor_id=flavor1['id'], networks='none'))) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host']) # Stub out the resize to fail on host2, which will trigger a reschedule @@ -116,17 +116,17 @@ class TestRequestSpecRetryReschedule(test.TestCase, # on host3. data = {'resize': {'flavorRef': flavor2['id']}} self.api.post_server_action(server['id'], data) - server = self._wait_for_state_change(self.admin_api, server, + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host']) self.api.post_server_action(server['id'], {'confirmResize': None}) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Now live migrate the server to host2 specifically, which previously # failed the resize attempt but here it should pass. data = {'os-migrateLive': {'host': 'host2', 'block_migration': 'auto'}} self.admin_api.post_server_action(server['id'], data) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host']) # NOTE(mriedem): The instance status effectively goes to ACTIVE before # the migration status is changed to "completed" since diff --git a/nova/tests/functional/regressions/test_bug_1719730.py b/nova/tests/functional/regressions/test_bug_1719730.py index 1cf09df8b9ae..054c6a535bf3 100644 --- a/nova/tests/functional/regressions/test_bug_1719730.py +++ b/nova/tests/functional/regressions/test_bug_1719730.py @@ -104,8 +104,7 @@ class TestRescheduleWithServerGroup(test.TestCase, hints = {'group': created_group['id']} created_server = self.api.post_server({'server': server, 'os:scheduler_hints': hints}) - found_server = self._wait_for_state_change(self.admin_api, - created_server, 'ACTIVE') + found_server = self._wait_for_state_change(created_server, 'ACTIVE') # Assert that the host is not the failed host. self.assertNotEqual(self.failed_host, found_server['OS-EXT-SRV-ATTR:host']) diff --git a/nova/tests/functional/regressions/test_bug_1732947.py b/nova/tests/functional/regressions/test_bug_1732947.py index 1d780fa8784b..97f207f44297 100644 --- a/nova/tests/functional/regressions/test_bug_1732947.py +++ b/nova/tests/functional/regressions/test_bug_1732947.py @@ -64,7 +64,7 @@ class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase, } } server = self.api.post_server(server_req_body) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # For a volume-backed server, the image ref will be an empty string # in the server response. self.assertEqual('', server['image']) diff --git a/nova/tests/functional/regressions/test_bug_1735407.py b/nova/tests/functional/regressions/test_bug_1735407.py index 846915ab3a2d..3488b1d6a05b 100644 --- a/nova/tests/functional/regressions/test_bug_1735407.py +++ b/nova/tests/functional/regressions/test_bug_1735407.py @@ -99,13 +99,11 @@ class TestParallelEvacuationWithServerGroup( hints = {'group': group['id']} created_server1 = self.api.post_server({'server': server, 'os:scheduler_hints': hints}) - server1 = self._wait_for_state_change(self.api, - created_server1, 'ACTIVE') + server1 = self._wait_for_state_change(created_server1, 'ACTIVE') created_server2 = self.api.post_server({'server': server, 'os:scheduler_hints': hints}) - server2 = self._wait_for_state_change(self.api, - created_server2, 'ACTIVE') + server2 = self._wait_for_state_change(created_server2, 'ACTIVE') # assert that the anti-affinity policy is enforced during the boot self.assertNotEqual(server1['OS-EXT-SRV-ATTR:host'], @@ -134,9 +132,9 @@ class TestParallelEvacuationWithServerGroup( fake_notifier.wait_for_versioned_notifications( 'instance.rebuild.start', n_events=1) server1 = self._wait_for_server_parameter( - self.api, server1, {'OS-EXT-STS:task_state': None}) + server1, {'OS-EXT-STS:task_state': None}) server2 = self._wait_for_server_parameter( - self.api, server2, {'OS-EXT-STS:task_state': None}) + server2, {'OS-EXT-STS:task_state': None}) # NOTE(gibi): The instance.host set _after_ the instance state and # tast_state is set back to normal so it is not enough to wait for diff --git a/nova/tests/functional/regressions/test_bug_1741125.py b/nova/tests/functional/regressions/test_bug_1741125.py index 4e2480014b95..73606ebf5a0c 100644 --- a/nova/tests/functional/regressions/test_bug_1741125.py +++ b/nova/tests/functional/regressions/test_bug_1741125.py @@ -48,13 +48,13 @@ class TestServerResizeReschedule(integrated_helpers.ProviderUsageBaseTestCase): supplied host_list, and does not call the scheduler. """ server_req = self._build_minimal_create_server_request( - self.api, 'some-server', flavor_id=self.flavor1['id'], + 'some-server', flavor_id=self.flavor1['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') self.first_attempt = True created_server = self.api.post_server({'server': server_req}) - server = self._wait_for_state_change(self.api, created_server, + server = self._wait_for_state_change(created_server, 'ACTIVE') actual_prep_resize = compute_manager.ComputeManager._prep_resize @@ -74,7 +74,7 @@ class TestServerResizeReschedule(integrated_helpers.ProviderUsageBaseTestCase): data = {"resize": {"flavorRef": self.flavor2['id']}} self.api.post_server_action(server_uuid, data) - server = self._wait_for_state_change(self.api, created_server, + server = self._wait_for_state_change(created_server, 'VERIFY_RESIZE') self.assertEqual(self.flavor2['name'], server['flavor']['original_name']) diff --git a/nova/tests/functional/regressions/test_bug_1741307.py b/nova/tests/functional/regressions/test_bug_1741307.py index c829aa0cd205..33212848bb78 100644 --- a/nova/tests/functional/regressions/test_bug_1741307.py +++ b/nova/tests/functional/regressions/test_bug_1741307.py @@ -77,11 +77,11 @@ class TestResizeWithNoAllocationScheduler( def test_resize(self): # Create our server without networking just to keep things simple. server_req = self._build_minimal_create_server_request( - self.api, 'test-resize', flavor_id=self.old_flavor['id'], + 'test-resize', flavor_id=self.old_flavor['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') server = self.api.post_server({'server': server_req}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') original_host = server['OS-EXT-SRV-ATTR:host'] target_host = 'host1' if original_host == 'host2' else 'host2' @@ -95,8 +95,7 @@ class TestResizeWithNoAllocationScheduler( self.api.post_server_action(server['id'], post) # Poll the server until the resize is done. - server = self._wait_for_state_change( - self.api, server, 'VERIFY_RESIZE') + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') # Assert that the server was migrated to the other host. self.assertEqual(target_host, server['OS-EXT-SRV-ATTR:host']) # Confirm the resize. diff --git a/nova/tests/functional/regressions/test_bug_1746483.py b/nova/tests/functional/regressions/test_bug_1746483.py index 675720123781..3471a5822687 100644 --- a/nova/tests/functional/regressions/test_bug_1746483.py +++ b/nova/tests/functional/regressions/test_bug_1746483.py @@ -90,7 +90,7 @@ class TestBootFromVolumeIsolatedHostsFilter( # networks='none'. with utils.temporary_mutation(self.api, microversion='2.37'): server = self.api.post_server(server_req_body) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # NOTE(mriedem): The instance is successfully scheduled but since # the image_id from the volume_image_metadata isn't stored in the # RequestSpec.image.id, and restrict_isolated_hosts_to_isolated_images diff --git a/nova/tests/functional/regressions/test_bug_1764556.py b/nova/tests/functional/regressions/test_bug_1764556.py index 2a9cce0c50c2..151c977e3a02 100644 --- a/nova/tests/functional/regressions/test_bug_1764556.py +++ b/nova/tests/functional/regressions/test_bug_1764556.py @@ -62,13 +62,12 @@ class InstanceListWithDeletedServicesTestCase( def _migrate_server(self, server, target_host): self.admin_api.api_post('/servers/%s/action' % server['id'], {'migrate': None}) - server = self._wait_for_state_change( - self.admin_api, server, 'VERIFY_RESIZE') + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') self.assertEqual(target_host, server['OS-EXT-SRV-ATTR:host']) self.admin_api.api_post('/servers/%s/action' % server['id'], {'confirmResize': None}, check_response_status=[204]) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') return server def test_instance_list_deleted_service_with_no_uuid(self): @@ -87,10 +86,10 @@ class InstanceListWithDeletedServicesTestCase( # Create an instance which will be on host1 since it's the only host. server_req = self._build_minimal_create_server_request( - self.api, 'test_instance_list_deleted_service_with_no_uuid', + 'test_instance_list_deleted_service_with_no_uuid', image_uuid=self.image_id, networks='none') server = self.api.post_server({'server': server_req}) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Now we start a 2nd compute which is "upgraded" (has a uuid) and # we'll migrate the instance to that host. diff --git a/nova/tests/functional/regressions/test_bug_1764883.py b/nova/tests/functional/regressions/test_bug_1764883.py index 2c77a72e56aa..3b79f9872315 100644 --- a/nova/tests/functional/regressions/test_bug_1764883.py +++ b/nova/tests/functional/regressions/test_bug_1764883.py @@ -94,7 +94,7 @@ class TestEvacuationWithSourceReturningDuringRebuild( 'imageRef': self.image_id, 'flavorRef': self.flavor_id} server_response = self.api.post_server({'server': server_request}) - server = self._wait_for_state_change(self.api, server_response, + server = self._wait_for_state_change(server_response, 'ACTIVE') # Record where the instance is running before forcing the service down @@ -106,7 +106,7 @@ class TestEvacuationWithSourceReturningDuringRebuild( self.api.post_server_action(server['id'], {'evacuate': {}}) # Wait for the instance to go into an ACTIVE state - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') server = self.api.get_server(server['id']) host = server['OS-EXT-SRV-ATTR:host'] migrations = self.api.get_migrations() diff --git a/nova/tests/functional/regressions/test_bug_1780373.py b/nova/tests/functional/regressions/test_bug_1780373.py index 07abcd83917e..fde04a2b8915 100644 --- a/nova/tests/functional/regressions/test_bug_1780373.py +++ b/nova/tests/functional/regressions/test_bug_1780373.py @@ -60,7 +60,7 @@ class TestMultiCreateServerGroupMemberOverQuota( multi-create POST /servers request. """ server_req = self._build_minimal_create_server_request( - self.api, 'test_multi_create_server_group_members_over_quota', + 'test_multi_create_server_group_members_over_quota', image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, networks='none') server_req['min_count'] = 3 @@ -88,7 +88,7 @@ class TestMultiCreateServerGroupMemberOverQuota( self.useFixture(nova_fixtures.NoopConductorFixture()) for x in range(3): server_req = self._build_minimal_create_server_request( - self.api, 'test_concurrent_request_%s' % x, + 'test_concurrent_request_%s' % x, image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, networks='none') hints = {'group': self.created_group['id']} diff --git a/nova/tests/functional/regressions/test_bug_1781286.py b/nova/tests/functional/regressions/test_bug_1781286.py index 857fa81d6e92..00f8f7ea7d96 100644 --- a/nova/tests/functional/regressions/test_bug_1781286.py +++ b/nova/tests/functional/regressions/test_bug_1781286.py @@ -78,7 +78,7 @@ class RescheduleBuildAvailabilityZoneUpCall( self.stub_out('nova.compute.manager.ComputeManager.' 'build_and_run_instance', wrap_bari) server = self._build_minimal_create_server_request( - self.api, 'test_server_create_reschedule_blocked_az_up_call') + 'test_server_create_reschedule_blocked_az_up_call') server = self.api.post_server({'server': server}) # Because we poisoned AggregateList.get_by_host after hitting the # compute service we have to wait for the notification that the build @@ -88,7 +88,7 @@ class RescheduleBuildAvailabilityZoneUpCall( # build_and_run_instance twice so we have more than one instance of # the mock that needs to be stopped. mock.patch.stopall() - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # We should have rescheduled and the instance AZ should be set from the # Selection object. Since neither compute host is in an AZ, the server # is in the default AZ from config. @@ -148,9 +148,9 @@ class RescheduleMigrateAvailabilityZoneUpCall( self.stub_out('nova.compute.manager.ComputeManager._prep_resize', wrap_prep_resize) server = self._build_minimal_create_server_request( - self.api, 'test_migrate_reschedule_blocked_az_up_call') + 'test_migrate_reschedule_blocked_az_up_call') server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') original_host = server['OS-EXT-SRV-ATTR:host'] # Now cold migrate the server to the other host. @@ -164,7 +164,7 @@ class RescheduleMigrateAvailabilityZoneUpCall( # twice so we have more than one instance of the mock that needs to be # stopped. mock.patch.stopall() - server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') final_host = server['OS-EXT-SRV-ATTR:host'] self.assertNotIn(final_host, [original_host, self.rescheduled]) # We should have rescheduled and the instance AZ should be set from the diff --git a/nova/tests/functional/regressions/test_bug_1781710.py b/nova/tests/functional/regressions/test_bug_1781710.py index 01d4a6b92db2..61f9a0f0a6fc 100644 --- a/nova/tests/functional/regressions/test_bug_1781710.py +++ b/nova/tests/functional/regressions/test_bug_1781710.py @@ -103,7 +103,7 @@ class AntiAffinityMultiCreateRequest(test.TestCase, # Now create two servers in that group. server_req = self._build_minimal_create_server_request( - self.api, 'test_anti_affinity_multi_create', + 'test_anti_affinity_multi_create', image_uuid=image_fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, networks='none') server_req['min_count'] = 2 @@ -115,8 +115,7 @@ class AntiAffinityMultiCreateRequest(test.TestCase, # Now wait for both servers to be ACTIVE and get the host on which # each server was built. for server in self.api.get_servers(detail=False): - server = self._wait_for_state_change( - self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') selected_hosts.add(server['OS-EXT-SRV-ATTR:host']) # Assert that each server is on a separate host. diff --git a/nova/tests/functional/regressions/test_bug_1784353.py b/nova/tests/functional/regressions/test_bug_1784353.py index 20be65f8a103..9d0115982cf3 100644 --- a/nova/tests/functional/regressions/test_bug_1784353.py +++ b/nova/tests/functional/regressions/test_bug_1784353.py @@ -76,7 +76,7 @@ class TestRescheduleWithVolumesAttached( server_response = self.api.post_server({'server': server_request}) server_id = server_response['id'] - self._wait_for_state_change(self.api, server_response, 'ACTIVE') + self._wait_for_state_change(server_response, 'ACTIVE') attached_volume_ids = self.cinder.volume_ids_for_instance(server_id) self.assertIn(volume_id, attached_volume_ids) self.assertEqual(1, len(self.cinder.volume_to_attachment)) diff --git a/nova/tests/functional/regressions/test_bug_1794996.py b/nova/tests/functional/regressions/test_bug_1794996.py index 3b5802789444..ee0756e603f8 100644 --- a/nova/tests/functional/regressions/test_bug_1794996.py +++ b/nova/tests/functional/regressions/test_bug_1794996.py @@ -57,8 +57,7 @@ class TestEvacuateDeleteServerRestartOriginalCompute( server['id'], post) expected_params = {'OS-EXT-SRV-ATTR:host': dest_hostname, 'status': 'ACTIVE'} - server = self._wait_for_server_parameter(self.api, server, - expected_params) + server = self._wait_for_server_parameter(server, expected_params) # Expect to have allocation and usages on both computes as the # source compute is still down diff --git a/nova/tests/functional/regressions/test_bug_1797580.py b/nova/tests/functional/regressions/test_bug_1797580.py index b752a9e8dc1a..08e318c6f8fe 100644 --- a/nova/tests/functional/regressions/test_bug_1797580.py +++ b/nova/tests/functional/regressions/test_bug_1797580.py @@ -65,31 +65,30 @@ class ColdMigrateTargetHostThenLiveMigrateTest( def test_cold_migrate_target_host_then_live_migrate(self): # Create a server, it doesn't matter on which host it builds. server = self._build_minimal_create_server_request( - self.api, 'test_cold_migrate_target_host_then_live_migrate', + 'test_cold_migrate_target_host_then_live_migrate', image_uuid=image_fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, networks='none') server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') original_host = server['OS-EXT-SRV-ATTR:host'] target_host = 'host1' if original_host == 'host2' else 'host2' # Cold migrate the server to the specific target host. migrate_req = {'migrate': {'host': target_host}} self.admin_api.post_server_action(server['id'], migrate_req) - server = self._wait_for_state_change( - self.admin_api, server, 'VERIFY_RESIZE') + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') # Confirm the resize so the server stays on the target host. confim_req = {'confirmResize': None} self.admin_api.post_server_action(server['id'], confim_req) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Attempt to live migrate the server but don't specify a host so the # scheduler has to pick one. live_migrate_req = { 'os-migrateLive': {'host': None, 'block_migration': 'auto'}} self.admin_api.post_server_action(server['id'], live_migrate_req) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # The live migration should have been successful and the server is now # back on the original host. self.assertEqual(original_host, server['OS-EXT-SRV-ATTR:host']) diff --git a/nova/tests/functional/regressions/test_bug_1806064.py b/nova/tests/functional/regressions/test_bug_1806064.py index 00af275e4459..19c6455b3b34 100644 --- a/nova/tests/functional/regressions/test_bug_1806064.py +++ b/nova/tests/functional/regressions/test_bug_1806064.py @@ -103,7 +103,7 @@ class BootFromVolumeOverQuotaRaceDeleteTest( stub_check_num_instances_quota) server = self.api.post_server(server) - server = self._wait_for_state_change(self.api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') # At this point, the build request should be gone and the instance # should have been created in cell1. context = nova_context.get_admin_context() diff --git a/nova/tests/functional/regressions/test_bug_1806515.py b/nova/tests/functional/regressions/test_bug_1806515.py index 9c03b3c5b3ed..ae093c6f2a22 100644 --- a/nova/tests/functional/regressions/test_bug_1806515.py +++ b/nova/tests/functional/regressions/test_bug_1806515.py @@ -57,7 +57,7 @@ class ShowErrorServerWithTags(test.TestCase, 'imageRef': self.image_id } }) - return self._wait_for_state_change(self.api, server, 'ERROR') + return self._wait_for_state_change(server, 'ERROR') def test_show_server_tag_in_error(self): # Create a server which should go to ERROR state because we don't diff --git a/nova/tests/functional/regressions/test_bug_1815153.py b/nova/tests/functional/regressions/test_bug_1815153.py index 8e461322c0b9..ea93e7ea4b66 100644 --- a/nova/tests/functional/regressions/test_bug_1815153.py +++ b/nova/tests/functional/regressions/test_bug_1815153.py @@ -82,11 +82,11 @@ class NonPersistentFieldNotResetTest( def _create_server(self): # Create a server, it doesn't matter on which host it builds. server = self._build_minimal_create_server_request( - self.api, 'sample-server', + 'sample-server', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') return server @@ -156,8 +156,7 @@ class NonPersistentFieldNotResetTest( server['id'], {'evacuate': {'host': target_host}}) expected_params = {'OS-EXT-SRV-ATTR:host': original_host, 'status': 'ERROR'} - server = self._wait_for_server_parameter(self.api, server, - expected_params) + server = self._wait_for_server_parameter(server, expected_params) # Make sure 'is_bfv' is set. reqspec = objects.RequestSpec.get_by_instance_uuid(self.ctxt, diff --git a/nova/tests/functional/regressions/test_bug_1823370.py b/nova/tests/functional/regressions/test_bug_1823370.py index ffbbf1183b95..9edf756bc9b9 100644 --- a/nova/tests/functional/regressions/test_bug_1823370.py +++ b/nova/tests/functional/regressions/test_bug_1823370.py @@ -59,7 +59,7 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase, # weight. server = self._build_server(self.api.get_flavors()[0]['id']) server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host']) # Disable the host on which the server is now running. @@ -72,5 +72,5 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase, req = {'evacuate': {'onSharedStorage': False}} self.api.post_server_action(server['id'], req) self._wait_for_migration_status(server, ['done']) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host']) diff --git a/nova/tests/functional/regressions/test_bug_1825020.py b/nova/tests/functional/regressions/test_bug_1825020.py index 492d9081dbde..8b8c47422db0 100644 --- a/nova/tests/functional/regressions/test_bug_1825020.py +++ b/nova/tests/functional/regressions/test_bug_1825020.py @@ -67,14 +67,14 @@ class VolumeBackedResizeDiskDown(test.TestCase, }] } server = self.api.post_server({'server': server}) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Now try to resize the server with the flavor that has smaller disk. # This should be allowed since the server is volume-backed and the # disk size in the flavor shouldn't matter. data = {'resize': {'flavorRef': flavor1['id']}} self.api.post_server_action(server['id'], data) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') # Now confirm the resize just to complete the operation. self.api.post_server_action(server['id'], {'confirmResize': None}) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') diff --git a/nova/tests/functional/regressions/test_bug_1825034.py b/nova/tests/functional/regressions/test_bug_1825034.py index ea9acee58c21..77f48adb2c0a 100644 --- a/nova/tests/functional/regressions/test_bug_1825034.py +++ b/nova/tests/functional/regressions/test_bug_1825034.py @@ -57,7 +57,7 @@ class FillVirtualInterfaceListMigration( 'imageRef': fake_image.get_valid_image_id() } }) - return self._wait_for_state_change(self.api, server, 'ACTIVE') + return self._wait_for_state_change(server, 'ACTIVE') def test_fill_vifs_migration(self): # Create a test server. diff --git a/nova/tests/functional/regressions/test_bug_1825537.py b/nova/tests/functional/regressions/test_bug_1825537.py index ec3326e3cd63..e6989a48cafc 100644 --- a/nova/tests/functional/regressions/test_bug_1825537.py +++ b/nova/tests/functional/regressions/test_bug_1825537.py @@ -40,7 +40,7 @@ class FinishResizeErrorAllocationCleanupTestCase( # to avoid a race we need to wait for the migration status to change # to 'error' which happens after the fault is recorded. self._wait_for_migration_status(server, ['error']) - server = self._wait_for_state_change(self.admin_api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') # The server should be pointing at $dest_host because resize_instance # will have updated the host/node value on the instance before casting # to the finish_resize method on the dest compute. diff --git a/nova/tests/functional/regressions/test_bug_1830747.py b/nova/tests/functional/regressions/test_bug_1830747.py index 177048396eab..0ecb4eb963e5 100644 --- a/nova/tests/functional/regressions/test_bug_1830747.py +++ b/nova/tests/functional/regressions/test_bug_1830747.py @@ -87,11 +87,11 @@ class MissingReqSpecInstanceGroupUUIDTestCase( # Create a server in the group which should land on host1 due to our # custom weigher. server = self._build_minimal_create_server_request( - self.api, 'test_cold_migrate_reschedule') + 'test_cold_migrate_reschedule') body = dict(server=server) body['os:scheduler_hints'] = {'group': group_id} server = self.api.post_server(body) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host']) # Verify the group uuid is set in the request spec. @@ -129,8 +129,7 @@ class MissingReqSpecInstanceGroupUUIDTestCase( with mock.patch.dict(host1_driver.capabilities, supports_migrate_to_same_host=False): self.api.post_server_action(server['id'], {'migrate': None}) - server = self._wait_for_state_change( - self.api, server, 'VERIFY_RESIZE') + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host']) # The RequestSpec.instance_group.uuid should still be set. diff --git a/nova/tests/functional/regressions/test_bug_1835822.py b/nova/tests/functional/regressions/test_bug_1835822.py index f2ff3e63ce02..d627cd8fa788 100644 --- a/nova/tests/functional/regressions/test_bug_1835822.py +++ b/nova/tests/functional/regressions/test_bug_1835822.py @@ -57,30 +57,29 @@ class RegressionTest1835822( if server_args: basic_server.update(server_args) server = self.api.post_server({'server': basic_server}) - return self._wait_for_state_change(self.api, server, 'ACTIVE') + return self._wait_for_state_change(server, 'ACTIVE') def _hard_reboot_server(self, active_server): args = {"reboot": {"type": "HARD"}} self.api.api_post('servers/%s/action' % active_server['id'], args) fake_notifier.wait_for_versioned_notifications('instance.reboot.end') - return self._wait_for_state_change(self.api, active_server, 'ACTIVE') + return self._wait_for_state_change(active_server, 'ACTIVE') def _rebuild_server(self, active_server): args = {"rebuild": {"imageRef": self.image_ref_1}} self.api.api_post('servers/%s/action' % active_server['id'], args) fake_notifier.wait_for_versioned_notifications('instance.rebuild.end') - return self._wait_for_state_change(self.api, active_server, 'ACTIVE') + return self._wait_for_state_change(active_server, 'ACTIVE') def _shelve_server(self, active_server): self.api.post_server_action(active_server['id'], {'shelve': {}}) - return self._wait_for_state_change( - self.api, active_server, 'SHELVED_OFFLOADED') + return self._wait_for_state_change(active_server, 'SHELVED_OFFLOADED') def _unshelve_server(self, shelved_server): self.api.post_server_action(shelved_server['id'], {'unshelve': {}}) - return self._wait_for_state_change(self.api, shelved_server, 'ACTIVE') + return self._wait_for_state_change(shelved_server, 'ACTIVE') # ---------------------------- tests ---------------------------- def test_create_server_with_config_drive(self): diff --git a/nova/tests/functional/regressions/test_bug_1837955.py b/nova/tests/functional/regressions/test_bug_1837955.py index f52ba9b817f8..2dae7203a8c6 100644 --- a/nova/tests/functional/regressions/test_bug_1837955.py +++ b/nova/tests/functional/regressions/test_bug_1837955.py @@ -80,11 +80,11 @@ class BuildRescheduleClaimFailsTestCase( # Now that our stub is in place, try to create a server and wait for it # to go to ERROR status. server = self._build_minimal_create_server_request( - self.api, 'test_build_reschedule_alt_host_alloc_fails', + 'test_build_reschedule_alt_host_alloc_fails', image_uuid=fake_image.get_valid_image_id(), networks=[{'port': self.neutron.port_1['id']}]) server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') # Wait for the MaxRetriesExceeded fault to be recorded. # set_vm_state_and_notify sets the vm_state to ERROR before the fault diff --git a/nova/tests/functional/regressions/test_bug_1843090.py b/nova/tests/functional/regressions/test_bug_1843090.py index eac9a931d5d5..71572828b0f0 100644 --- a/nova/tests/functional/regressions/test_bug_1843090.py +++ b/nova/tests/functional/regressions/test_bug_1843090.py @@ -43,12 +43,12 @@ class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase): self.flags(compute=version_cap, group='upgrade_levels') server_req = self._build_minimal_create_server_request( - self.api, 'server1', + 'server1', networks=[], image_uuid=fake_image.get_valid_image_id(), flavor_id=self.flavor1['id']) server = self.api.post_server({'server': server_req}) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') orig_claim = nova.compute.resource_tracker.ResourceTracker.resize_claim claim_calls = [] @@ -76,12 +76,10 @@ class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase): # We expect that the instance is on host3 as the scheduler # selected host2 due to our weigher and the cold migrate failed # there and re-scheduled to host3 were it succeeded. - self._wait_for_server_parameter( - self.api, server, - { - 'OS-EXT-SRV-ATTR:host': 'host3', - 'OS-EXT-STS:task_state': None, - 'status': 'VERIFY_RESIZE'}) + self._wait_for_server_parameter(server, { + 'OS-EXT-SRV-ATTR:host': 'host3', + 'OS-EXT-STS:task_state': None, + 'status': 'VERIFY_RESIZE'}) # we ensure that there was a failed and then a successful claim call self.assertEqual(['host2', 'host3'], claim_calls) diff --git a/nova/tests/functional/regressions/test_bug_1845291.py b/nova/tests/functional/regressions/test_bug_1845291.py index 382971b5fc29..bdfa460a0c15 100644 --- a/nova/tests/functional/regressions/test_bug_1845291.py +++ b/nova/tests/functional/regressions/test_bug_1845291.py @@ -68,11 +68,9 @@ class ForcedHostMissingReScheduleTestCase( # We expect that the instance re-scheduled but successfully ended # up on the second destination host. - self._wait_for_server_parameter( - self.api, server, - { - 'OS-EXT-STS:task_state': None, - 'status': 'VERIFY_RESIZE'}) + self._wait_for_server_parameter(server, { + 'OS-EXT-STS:task_state': None, + 'status': 'VERIFY_RESIZE'}) # we ensure that there was a failed and then a successful claim call self.assertEqual(2, len(claim_calls)) diff --git a/nova/tests/functional/regressions/test_bug_1848343.py b/nova/tests/functional/regressions/test_bug_1848343.py index 63dc38166c0d..8c2735ceef12 100644 --- a/nova/tests/functional/regressions/test_bug_1848343.py +++ b/nova/tests/functional/regressions/test_bug_1848343.py @@ -37,10 +37,10 @@ class DeletedServerAllocationRevertTest( source host and target host. """ server = self._build_minimal_create_server_request( - self.api, name, image_uuid=fake_image.get_valid_image_id(), + name, image_uuid=fake_image.get_valid_image_id(), networks='none') server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') source_host = server['OS-EXT-SRV-ATTR:host'] target_host = 'host2' if source_host == 'host1' else 'host1' return server, source_host, target_host @@ -127,7 +127,7 @@ class DeletedServerAllocationRevertTest( # action event after the task rollback happens. self._wait_for_action_fail_completion( server, instance_actions.LIVE_MIGRATION, - 'conductor_live_migrate_instance', api=self.api) + 'conductor_live_migrate_instance') self._assert_no_allocations(server) def test_migrate_on_compute_fail(self): @@ -155,6 +155,5 @@ class DeletedServerAllocationRevertTest( # when the instance is deleted so just wait for the failed instance # action event after the allocation revert happens. self._wait_for_action_fail_completion( - server, instance_actions.MIGRATE, 'compute_prep_resize', - api=self.api) + server, instance_actions.MIGRATE, 'compute_prep_resize') self._assert_no_allocations(server) diff --git a/nova/tests/functional/regressions/test_bug_1849165.py b/nova/tests/functional/regressions/test_bug_1849165.py index 5939a8c40405..f2a7f82ee9db 100644 --- a/nova/tests/functional/regressions/test_bug_1849165.py +++ b/nova/tests/functional/regressions/test_bug_1849165.py @@ -52,11 +52,9 @@ class UpdateResourceMigrationRaceTest( server['id'], {'os-migrateLive': {'host': None, 'block_migration': 'auto'}}) - self._wait_for_server_parameter( - self.api, server, - { - 'OS-EXT-STS:task_state': None, - 'status': 'ACTIVE'}) + self._wait_for_server_parameter(server, { + 'OS-EXT-STS:task_state': None, + 'status': 'ACTIVE'}) # NOTE(efried): This was bug 1849165 where # _populate_assigned_resources raised a TypeError because it tried diff --git a/nova/tests/functional/regressions/test_bug_1849409.py b/nova/tests/functional/regressions/test_bug_1849409.py index db024052e77f..d3d05eb98654 100644 --- a/nova/tests/functional/regressions/test_bug_1849409.py +++ b/nova/tests/functional/regressions/test_bug_1849409.py @@ -42,10 +42,10 @@ class ListDeletedServersWithMarker(test.TestCase, def test_list_deleted_servers_with_marker(self): # Create a server. server = self._build_minimal_create_server_request( - self.api, 'test_list_deleted_servers_with_marker', + 'test_list_deleted_servers_with_marker', image_uuid=fake_image.get_valid_image_id()) server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Now delete the server and wait for it to be gone. self.api.delete_server(server['id']) self._wait_until_deleted(server) diff --git a/nova/tests/functional/test_aggregates.py b/nova/tests/functional/test_aggregates.py index 524ea900c01e..b0d277012ace 100644 --- a/nova/tests/functional/test_aggregates.py +++ b/nova/tests/functional/test_aggregates.py @@ -215,13 +215,12 @@ class AggregateRequestFiltersTest( flavor_id = flavor_id or self.flavors[0]['id'] image_uuid = image_id or '155d900f-4e14-4e4c-a73d-069cbf4541e6' server_req = self._build_minimal_create_server_request( - self.api, 'test-instance', flavor_id=flavor_id, + 'test-instance', flavor_id=flavor_id, image_uuid=image_uuid, networks='none', az=az) created_server = self.api.post_server({'server': server_req}) - server = self._wait_for_state_change( - self.admin_api, created_server, end_status) + server = self._wait_for_state_change(created_server, end_status) return server @@ -330,8 +329,7 @@ class AggregatePostTest(AggregateRequestFiltersTest): # Configure for the SOFT_DELETED scenario. self.flags(reclaim_instance_interval=300) self.api.delete_server(server['id']) - server = self._wait_for_state_change( - self.admin_api, server, 'SOFT_DELETED') + server = self._wait_for_state_change(server, 'SOFT_DELETED') self.assertRaisesRegex( client.OpenStackApiException, 'One or more hosts contain instances in this zone.', @@ -876,7 +874,7 @@ class TestAggregateMultiTenancyIsolationFilter( aggregate """ # Create a tenant-isolated aggregate for the non-admin user. - user_api = self.useFixture( + self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1', project_id=uuids.non_admin)).api agg_id = self.admin_api.post_aggregate( @@ -901,13 +899,12 @@ class TestAggregateMultiTenancyIsolationFilter( spy_get_filtered_hosts) # Create a server for the admin - should only have one host candidate. server_req = self._build_minimal_create_server_request( - self.admin_api, 'test_aggregate_multitenancy_isolation_filter-admin', networks='none') # requires microversion 2.37 server_req = {'server': server_req} with utils.temporary_mutation(self.admin_api, microversion='2.37'): server = self.admin_api.post_server(server_req) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Assert it's not on host2 which is isolated to the non-admin tenant. self.assertNotEqual('host2', server['OS-EXT-SRV-ATTR:host']) self.assertEqual(1, len(self.filtered_hosts)) @@ -917,13 +914,12 @@ class TestAggregateMultiTenancyIsolationFilter( # up on host2 because the other host, which is not isolated to the # aggregate, is still a candidate. server_req = self._build_minimal_create_server_request( - user_api, 'test_aggregate_multitenancy_isolation_filter-user', networks='none') # requires microversion 2.37 server_req = {'server': server_req} - with utils.temporary_mutation(user_api, microversion='2.37'): - server = user_api.post_server(server_req) - self._wait_for_state_change(user_api, server, 'ACTIVE') + with utils.temporary_mutation(self.api, microversion='2.37'): + server = self.api.post_server(server_req) + self._wait_for_state_change(server, 'ACTIVE') self.assertEqual(2, len(self.filtered_hosts)) @@ -1028,10 +1024,10 @@ class AggregateMultiTenancyIsolationColdMigrateTest( """ img = nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID server_req_body = self._build_minimal_create_server_request( - self.api, 'test_cold_migrate_server', image_uuid=img, + 'test_cold_migrate_server', image_uuid=img, networks='none') server = self.api.post_server({'server': server_req_body}) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Ensure the server ended up in host2 or host3 original_host = server['OS-EXT-SRV-ATTR:host'] self.assertNotEqual('host1', original_host) @@ -1039,8 +1035,7 @@ class AggregateMultiTenancyIsolationColdMigrateTest( # in the same tenant-isolated aggregate. self.admin_api.api_post( '/servers/%s/action' % server['id'], {'migrate': None}) - server = self._wait_for_state_change( - self.admin_api, server, 'VERIFY_RESIZE') + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') # Ensure the server is on the other host in the same aggregate. expected_host = 'host3' if original_host == 'host2' else 'host2' self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host']) diff --git a/nova/tests/functional/test_availability_zones.py b/nova/tests/functional/test_availability_zones.py index b4d0b18994c6..c6bf461c7642 100644 --- a/nova/tests/functional/test_availability_zones.py +++ b/nova/tests/functional/test_availability_zones.py @@ -74,10 +74,10 @@ class TestAvailabilityZoneScheduling( def _create_server(self, name): # Create a server, it doesn't matter which host it ends up in. server_body = self._build_minimal_create_server_request( - self.api, name, image_uuid=fake_image.get_valid_image_id(), + name, image_uuid=fake_image.get_valid_image_id(), flavor_id=self.flavor1, networks='none') server = self.api.post_server({'server': server_body}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') original_host = server['OS-EXT-SRV-ATTR:host'] # Assert the server has the AZ set (not None or 'nova'). expected_zone = 'zone1' if original_host == 'host1' else 'zone2' @@ -153,7 +153,7 @@ class TestAvailabilityZoneScheduling( # Resize the server which should move it to the other zone. self.api.post_server_action( server['id'], {'resize': {'flavorRef': self.flavor2}}) - server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') # Now the server should be in the other AZ. new_zone = 'zone2' if original_host == 'host1' else 'zone1' @@ -161,5 +161,5 @@ class TestAvailabilityZoneScheduling( # Revert the resize and the server should be back in the original AZ. self.api.post_server_action(server['id'], {'revertResize': None}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self._assert_instance_az(server, original_az) diff --git a/nova/tests/functional/test_boot_from_volume.py b/nova/tests/functional/test_boot_from_volume.py index 8de80f0ed35a..3bed0831812f 100644 --- a/nova/tests/functional/test_boot_from_volume.py +++ b/nova/tests/functional/test_boot_from_volume.py @@ -79,7 +79,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin, server['block_device_mapping_v2'] = [bdm] created_server = self.api.post_server({"server": server}) server_id = created_server['id'] - self._wait_for_state_change(self.api, created_server, 'ACTIVE') + self._wait_for_state_change(created_server, 'ACTIVE') # Check that hypervisor local disk reporting is still 0 self._verify_zero_local_gb_used() @@ -94,7 +94,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin, # Resize post_data = {'resize': {'flavorRef': flavor_id_alt}} self.api.post_server_action(server_id, post_data) - self._wait_for_state_change(self.api, created_server, 'VERIFY_RESIZE') + self._wait_for_state_change(created_server, 'VERIFY_RESIZE') # Check that hypervisor local disk reporting is still 0 self._verify_zero_local_gb_used() @@ -106,7 +106,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin, # Confirm the resize post_data = {'confirmResize': None} self.api.post_server_action(server_id, post_data) - self._wait_for_state_change(self.api, created_server, 'ACTIVE') + self._wait_for_state_change(created_server, 'ACTIVE') # Check that hypervisor local disk reporting is still 0 self._verify_zero_local_gb_used() @@ -118,7 +118,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin, # Shelve post_data = {'shelve': None} self.api.post_server_action(server_id, post_data) - self._wait_for_state_change(self.api, created_server, + self._wait_for_state_change(created_server, 'SHELVED_OFFLOADED') # Check that hypervisor local disk reporting is still 0 @@ -131,7 +131,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin, # Unshelve post_data = {'unshelve': None} self.api.post_server_action(server_id, post_data) - self._wait_for_state_change(self.api, created_server, 'ACTIVE') + self._wait_for_state_change(created_server, 'ACTIVE') # Check that hypervisor local disk reporting is still 0 self._verify_zero_local_gb_used() @@ -146,7 +146,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin, image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6' post_data = {'rebuild': {'imageRef': image_uuid}} self.api.post_server_action(server_id, post_data) - self._wait_for_state_change(self.api, created_server, 'ACTIVE') + self._wait_for_state_change(created_server, 'ACTIVE') # Check that hypervisor local disk reporting is still 0 self._verify_zero_local_gb_used() @@ -161,7 +161,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin, """ self.flags(max_local_block_devices=0) server = self._build_minimal_create_server_request( - self.admin_api, 'test_max_local_block_devices_0_force_bfv') + 'test_max_local_block_devices_0_force_bfv') ex = self.assertRaises(api_client.OpenStackApiException, self.admin_api.post_server, {'server': server}) @@ -203,7 +203,7 @@ class BootFromVolumeLargeRequestTest(test.TestCase, image1 = 'a2459075-d96c-40d5-893e-577ff92e721c' image2 = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' server = self._build_minimal_create_server_request( - self.api, 'test_boot_from_volume_10_servers_255_volumes_2_images') + 'test_boot_from_volume_10_servers_255_volumes_2_images') server.pop('imageRef') server['min_count'] = 10 bdms = [] diff --git a/nova/tests/functional/test_conf_max_attach_disk_devices.py b/nova/tests/functional/test_conf_max_attach_disk_devices.py index 607fec6e633b..a6dcb0c2283f 100644 --- a/nova/tests/functional/test_conf_max_attach_disk_devices.py +++ b/nova/tests/functional/test_conf_max_attach_disk_devices.py @@ -51,7 +51,7 @@ class ConfigurableMaxDiskDevicesTest(integrated_helpers.InstanceHelperMixin, 'destination_type': 'volume'} server['block_device_mapping_v2'] = [bdm] created_server = self.api.post_server({"server": server}) - self._wait_for_state_change(self.api, created_server, 'ACTIVE') + self._wait_for_state_change(created_server, 'ACTIVE') def test_boot_from_volume_plus_attach_max_exceeded(self): # Set the maximum to 1, boot from 1 volume, and attach one volume. @@ -72,7 +72,7 @@ class ConfigurableMaxDiskDevicesTest(integrated_helpers.InstanceHelperMixin, created_server = self.api.post_server({"server": server}) server_id = created_server['id'] # Server should go into ERROR state - self._wait_for_state_change(self.api, created_server, 'ERROR') + self._wait_for_state_change(created_server, 'ERROR') # Verify the instance fault server = self.api.get_server(server_id) # If anything fails during _prep_block_device, a 500 internal server @@ -95,7 +95,7 @@ class ConfigurableMaxDiskDevicesTest(integrated_helpers.InstanceHelperMixin, server = self._build_server(flavor_id='1') created_server = self.api.post_server({"server": server}) server_id = created_server['id'] - self._wait_for_state_change(self.api, created_server, 'ACTIVE') + self._wait_for_state_change(created_server, 'ACTIVE') # Attach one volume, should pass. vol_id = '9a695496-44aa-4404-b2cc-ccab2501f87e' self.api.post_server_volume( diff --git a/nova/tests/functional/test_cross_az_attach.py b/nova/tests/functional/test_cross_az_attach.py index 1fe11d9ceea8..df12747c937c 100644 --- a/nova/tests/functional/test_cross_az_attach.py +++ b/nova/tests/functional/test_cross_az_attach.py @@ -60,7 +60,6 @@ class CrossAZAttachTestCase(test.TestCase, """ self.flags(cross_az_attach=False, group='cinder') server = self._build_minimal_create_server_request( - self.api, 'test_cross_az_attach_false_boot_from_volume_no_az_specified') del server['imageRef'] # Do not need imageRef for boot from volume. server['block_device_mapping_v2'] = [{ @@ -70,7 +69,7 @@ class CrossAZAttachTestCase(test.TestCase, 'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL }] server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone']) def test_cross_az_attach_false_data_volume_no_az_specified(self): @@ -82,7 +81,6 @@ class CrossAZAttachTestCase(test.TestCase, """ self.flags(cross_az_attach=False, group='cinder') server = self._build_minimal_create_server_request( - self.api, 'test_cross_az_attach_false_data_volume_no_az_specified') # Note that we use the legacy block_device_mapping parameter rather # than block_device_mapping_v2 because that will create an implicit @@ -95,7 +93,7 @@ class CrossAZAttachTestCase(test.TestCase, 'volume_id': nova_fixtures.CinderFixture.SWAP_OLD_VOL }] server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone']) def test_cross_az_attach_false_boot_from_volume_default_zone_match(self): @@ -106,7 +104,6 @@ class CrossAZAttachTestCase(test.TestCase, self.flags(cross_az_attach=False, group='cinder') self.flags(default_schedule_zone=self.az) server = self._build_minimal_create_server_request( - self.api, 'test_cross_az_attach_false_boot_from_volume_default_zone_match') del server['imageRef'] # Do not need imageRef for boot from volume. server['block_device_mapping_v2'] = [{ @@ -116,7 +113,7 @@ class CrossAZAttachTestCase(test.TestCase, 'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL }] server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone']) def test_cross_az_attach_false_bfv_az_specified_mismatch(self): @@ -126,7 +123,7 @@ class CrossAZAttachTestCase(test.TestCase, """ self.flags(cross_az_attach=False, group='cinder') server = self._build_minimal_create_server_request( - self.api, 'test_cross_az_attach_false_bfv_az_specified_mismatch', + 'test_cross_az_attach_false_bfv_az_specified_mismatch', az='london') del server['imageRef'] # Do not need imageRef for boot from volume. server['block_device_mapping_v2'] = [{ @@ -150,7 +147,7 @@ class CrossAZAttachTestCase(test.TestCase, """ self.flags(cross_az_attach=False, group='cinder') server = self._build_minimal_create_server_request( - self.api, 'test_cross_az_attach_false_no_volumes', az=self.az) + 'test_cross_az_attach_false_no_volumes', az=self.az) server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone']) diff --git a/nova/tests/functional/test_cross_cell_migrate.py b/nova/tests/functional/test_cross_cell_migrate.py index 6e2f554c738b..9ccf4d9b93ee 100644 --- a/nova/tests/functional/test_cross_cell_migrate.py +++ b/nova/tests/functional/test_cross_cell_migrate.py @@ -127,7 +127,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase): }] image_uuid = fake_image.get_valid_image_id() server = self._build_minimal_create_server_request( - self.api, 'test_cross_cell_resize', + 'test_cross_cell_resize', image_uuid=image_uuid, flavor_id=flavor['id'], networks=networks) @@ -146,7 +146,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase): server.pop('imageRef', None) server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # For volume-backed make sure there is one attachment to start. if volume_backed: self.assertEqual(1, self._count_volume_attachments(server['id']), @@ -200,7 +200,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase): if stopped: # Stop the server before resizing it. self.api.post_server_action(server['id'], {'os-stop': None}) - self._wait_for_state_change(self.api, server, 'SHUTOFF') + self._wait_for_state_change(server, 'SHUTOFF') # Before resizing make sure quota usage is only 1 for total instances. self.assert_quota_usage(expected_num_instances=1) @@ -222,7 +222,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase): self.api.post_server_action(server['id'], body) # Wait for the server to be resized and then verify the host has # changed to be the host in the other cell. - server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host']) # Assert that the instance is only listed one time from the API (to # make sure it's not listed out of both cells). @@ -487,8 +487,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase): # The server should go to ERROR state with a fault record and # the API should still be showing the server from the source cell # because the instance mapping was not updated. - server = self._wait_for_server_parameter( - self.admin_api, server, + server = self._wait_for_server_parameter(server, {'status': 'ERROR', 'OS-EXT-STS:task_state': None}) # The migration should be in 'error' status. @@ -511,12 +510,12 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase): # Now hard reboot the server in the source cell and it should go back # to ACTIVE. self.api.post_server_action(server['id'], {'reboot': {'type': 'HARD'}}) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Now retry the resize without the fault in the target host to make # sure things are OK (no duplicate entry errors in the target DB). self.api.post_server_action(server['id'], body) - self._wait_for_state_change(self.admin_api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') def _assert_instance_not_in_cell(self, cell_name, server_id): cell = self.cell_mappings[cell_name] @@ -567,8 +566,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase): # The server should go to ERROR state with a fault record and # the API should still be showing the server from the source cell # because the instance mapping was not updated. - server = self._wait_for_server_parameter( - self.admin_api, server, + server = self._wait_for_server_parameter(server, {'status': 'ERROR', 'OS-EXT-STS:task_state': None}) # The migration should be in 'error' status. @@ -585,9 +583,9 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase): # Now hard reboot the server in the source cell and it should go back # to ACTIVE. self.api.post_server_action(server['id'], {'reboot': {'type': 'HARD'}}) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Now retry the resize without the fault in the target host to make # sure things are OK (no duplicate entry errors in the target DB). self.api.post_server_action(server['id'], body) - self._wait_for_state_change(self.admin_api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') diff --git a/nova/tests/functional/test_json_filter.py b/nova/tests/functional/test_json_filter.py index 95fe3d74f1b7..d9f0bbcd8e59 100644 --- a/nova/tests/functional/test_json_filter.py +++ b/nova/tests/functional/test_json_filter.py @@ -60,10 +60,10 @@ class JsonFilterTestCase(integrated_helpers.ProviderUsageBaseTestCase): # custom HostNameWeigher, host1 would be chosen. query = jsonutils.dumps(['=', '$hypervisor_hostname', 'host2']) server = self._build_minimal_create_server_request( - self.api, 'test_filter_on_hypervisor_hostname') + 'test_filter_on_hypervisor_hostname') request = {'server': server, 'os:scheduler_hints': {'query': query}} server = self.api.post_server(request) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Since we request host2 the server should be there despite host1 being # weighed higher. self.assertEqual( diff --git a/nova/tests/functional/test_multiattach.py b/nova/tests/functional/test_multiattach.py index e2db364aedd6..42193c2f2391 100644 --- a/nova/tests/functional/test_multiattach.py +++ b/nova/tests/functional/test_multiattach.py @@ -51,7 +51,7 @@ class TestMultiattachVolumes(integrated_helpers._IntegratedTestBase, 'boot_index': 0 }] server = self.api.post_server({'server': create_req}) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Make sure the volume is attached to the first server. attachments = self.api.api_get( '/servers/%s/os-volume_attachments' % server['id']).body[ @@ -65,7 +65,7 @@ class TestMultiattachVolumes(integrated_helpers._IntegratedTestBase, flavor_id='1', image='155d900f-4e14-4e4c-a73d-069cbf4541e6') create_req['networks'] = 'none' server2 = self.api.post_server({'server': create_req}) - self._wait_for_state_change(self.api, server2, 'ACTIVE') + self._wait_for_state_change(server2, 'ACTIVE') # Attach the volume to the second server. self.api.api_post('/servers/%s/os-volume_attachments' % server2['id'], {'volumeAttachment': {'volumeId': volume_id}}) diff --git a/nova/tests/functional/test_nova_manage.py b/nova/tests/functional/test_nova_manage.py index 7c209403f5cd..43beae0bafa6 100644 --- a/nova/tests/functional/test_nova_manage.py +++ b/nova/tests/functional/test_nova_manage.py @@ -414,7 +414,7 @@ class TestNovaManagePlacementHealAllocations( provider uuid """ server_req = self._build_minimal_create_server_request( - self.api, 'some-server', flavor_id=flavor['id'], + 'some-server', flavor_id=flavor['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') server_req['availability_zone'] = 'nova:%s' % hostname @@ -428,8 +428,7 @@ class TestNovaManagePlacementHealAllocations( }] server_req['imageRef'] = '' created_server = self.api.post_server({'server': server_req}) - server = self._wait_for_state_change( - self.admin_api, created_server, 'ACTIVE') + server = self._wait_for_state_change(created_server, 'ACTIVE') # Verify that our source host is what the server ended up on self.assertEqual(hostname, server['OS-EXT-SRV-ATTR:host']) @@ -564,8 +563,7 @@ class TestNovaManagePlacementHealAllocations( # The server status goes to SHELVED_OFFLOADED before the host/node # is nulled out in the compute service, so we also have to wait for # that so we don't race when we run heal_allocations. - server = self._wait_for_server_parameter( - self.admin_api, server, + server = self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': None, 'status': 'SHELVED_OFFLOADED'}) result = self.cli.heal_allocations(verbose=True) self.assertEqual(4, result, self.output.getvalue()) @@ -788,7 +786,7 @@ class TestNovaManagePlacementHealPortAllocations( server = self._create_server( flavor=self.flavor, networks=[{'port': port['id']} for port in ports]) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # This is a hack to simulate that we have a server that is missing # allocation for its port @@ -1482,23 +1480,23 @@ class TestDBArchiveDeletedRowsMultiCell(integrated_helpers.InstanceHelperMixin, # Boot a server to cell1 server_ids = {} server = self._build_minimal_create_server_request( - self.api, 'cell1-server', az='nova:host1') + 'cell1-server', az='nova:host1') created_server = self.api.post_server({'server': server}) - self._wait_for_state_change(self.api, created_server, 'ACTIVE') + self._wait_for_state_change(created_server, 'ACTIVE') server_ids['cell1'] = created_server['id'] # Boot a server to cell2 server = self._build_minimal_create_server_request( - self.api, 'cell2-server', az='nova:host2') + 'cell2-server', az='nova:host2') created_server = self.api.post_server({'server': server}) - self._wait_for_state_change(self.api, created_server, 'ACTIVE') + self._wait_for_state_change(created_server, 'ACTIVE') server_ids['cell2'] = created_server['id'] # Boot a server to cell0 (cause ERROR state prior to schedule) server = self._build_minimal_create_server_request( - self.api, 'cell0-server') + 'cell0-server') # Flavor m1.xlarge cannot be fulfilled server['flavorRef'] = 'http://fake.server/5' created_server = self.api.post_server({'server': server}) - self._wait_for_state_change(self.api, created_server, 'ERROR') + self._wait_for_state_change(created_server, 'ERROR') server_ids['cell0'] = created_server['id'] # Verify all the servers are in the databases for cell_name, server_id in server_ids.items(): diff --git a/nova/tests/functional/test_policy.py b/nova/tests/functional/test_policy.py index 23658d5fa166..bf0fa3241417 100644 --- a/nova/tests/functional/test_policy.py +++ b/nova/tests/functional/test_policy.py @@ -69,9 +69,9 @@ class HostStatusPolicyTestCase(test.TestCase, # Starting with microversion 2.37 the networks field is required. kwargs['networks'] = networks server = self._build_minimal_create_server_request( - self.api, 'test_host_status_unknown_only', **kwargs) + 'test_host_status_unknown_only', **kwargs) server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') return server @staticmethod @@ -94,12 +94,12 @@ class HostStatusPolicyTestCase(test.TestCase, server = self._get_server(admin_func()) # We need to wait for ACTIVE if this was a post rebuild server action, # else a subsequent rebuild request will fail with a 409 in the API. - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Verify admin can see the host status UP. self.assertEqual('UP', server['host_status']) # Get server as normal non-admin user. server = self._get_server(func()) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Verify non-admin do not receive the host_status field because it is # not UNKNOWN. self.assertNotIn('host_status', server) diff --git a/nova/tests/functional/test_scheduler.py b/nova/tests/functional/test_scheduler.py index 9b50bc5809e1..d9953cdbbc22 100644 --- a/nova/tests/functional/test_scheduler.py +++ b/nova/tests/functional/test_scheduler.py @@ -50,8 +50,7 @@ class MultiCellSchedulerTestCase(test.TestCase, self.addCleanup(fake_image.FakeImageService_reset) def _test_create_and_migrate(self, expected_status, az=None): - server = self._build_minimal_create_server_request(self.api, - 'some-server', + server = self._build_minimal_create_server_request('some-server', az=az) post = {'server': server} # If forcing the server onto a host we have to use the admin API. @@ -59,8 +58,7 @@ class MultiCellSchedulerTestCase(test.TestCase, created_server = api.post_server(post) # Wait for it to finish being created - found_server = self._wait_for_state_change( - self.admin_api, created_server, 'ACTIVE') + found_server = self._wait_for_state_change(created_server, 'ACTIVE') return self.admin_api.api_post( '/servers/%s/action' % found_server['id'], {'migrate': None}, diff --git a/nova/tests/functional/test_server_external_events.py b/nova/tests/functional/test_server_external_events.py index 0e25b6f10e0b..7e97152535bd 100644 --- a/nova/tests/functional/test_server_external_events.py +++ b/nova/tests/functional/test_server_external_events.py @@ -28,12 +28,11 @@ class ServerExternalEventsTestV276( flavors = self.api.get_flavors() server_req = self._build_minimal_create_server_request( - self.api, "some-server", flavor_id=flavors[0]["id"], + "some-server", flavor_id=flavors[0]["id"], image_uuid="155d900f-4e14-4e4c-a73d-069cbf4541e6", networks='none') created_server = self.api.post_server({'server': server_req}) - self.server = self._wait_for_state_change( - self.api, created_server, 'ACTIVE') + self.server = self._wait_for_state_change(created_server, 'ACTIVE') self.power_off = {'name': 'power-update', 'tag': 'POWER_OFF', 'server_uuid': self.server["id"]} @@ -50,7 +49,7 @@ class ServerExternalEventsTestV276( expected_params = {'OS-EXT-STS:task_state': None, 'OS-EXT-STS:vm_state': vm_states.STOPPED, 'OS-EXT-STS:power_state': power_state.SHUTDOWN} - server = self._wait_for_server_parameter(self.api, self.server, + server = self._wait_for_server_parameter(self.server, expected_params) msg = ' with target power state POWER_OFF.' self.assertIn(msg, self.stdlog.logger.output) @@ -79,8 +78,7 @@ class ServerExternalEventsTestV276( expected_params = {'OS-EXT-STS:task_state': None, 'OS-EXT-STS:vm_state': vm_states.ACTIVE, 'OS-EXT-STS:power_state': power_state.RUNNING} - server = self._wait_for_server_parameter(self.api, self.server, - expected_params) + server = self._wait_for_server_parameter(self.server, expected_params) msg = ' with target power state POWER_ON.' self.assertIn(msg, self.stdlog.logger.output) # Test if this is logged in the instance action list. diff --git a/nova/tests/functional/test_server_faults.py b/nova/tests/functional/test_server_faults.py index 67853ed4af7a..b1440b9aa5f3 100644 --- a/nova/tests/functional/test_server_faults.py +++ b/nova/tests/functional/test_server_faults.py @@ -55,18 +55,18 @@ class ServerFaultTestCase(test.TestCase, """ # Create the server with the non-admin user. server = self._build_minimal_create_server_request( - self.api, 'test_server_fault_non_nova_exception', + 'test_server_fault_non_nova_exception', image_uuid=fake_image.get_valid_image_id(), networks=[{'port': nova_fixtures.NeutronFixture.port_1['id']}]) server = self.api.post_server({'server': server}) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Stop the server before rebooting it so that after the driver.reboot # method raises an exception, the fake driver does not report the # instance power state as running - that will make the compute manager # set the instance vm_state to error. self.api.post_server_action(server['id'], {'os-stop': None}) - server = self._wait_for_state_change(self.admin_api, server, 'SHUTOFF') + server = self._wait_for_state_change(server, 'SHUTOFF') # Stub out the compute driver reboot method to raise a non-nova # exception to simulate some error from the underlying hypervisor @@ -83,8 +83,8 @@ class ServerFaultTestCase(test.TestCase, # decorator runs before the reverts_task_state decorator so we will # be sure the fault is set on the server. server = self._wait_for_server_parameter( - self.api, server, {'status': 'ERROR', - 'OS-EXT-STS:task_state': None}) + server, {'status': 'ERROR', 'OS-EXT-STS:task_state': None}, + api=self.api) mock_reboot.assert_called_once() # The server fault from the non-admin user API response should not # have details in it. diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py index 9560c6d590e4..8684a4313111 100644 --- a/nova/tests/functional/test_server_group.py +++ b/nova/tests/functional/test_server_group.py @@ -90,7 +90,7 @@ class ServerGroupTestBase(test.TestCase, expected_status='ACTIVE', flavor=None, az=None): server = self._build_minimal_create_server_request( - self.api, 'some-server', + 'some-server', image_uuid='a2459075-d96c-40d5-893e-577ff92e721c', networks=[], az=az) if flavor: @@ -103,7 +103,7 @@ class ServerGroupTestBase(test.TestCase, # Wait for it to finish being created found_server = self._wait_for_state_change( - self.admin_api, created_server, expected_status) + created_server, expected_status) return found_server @@ -323,8 +323,7 @@ class ServerGroupTestV21(ServerGroupTestBase): '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'}} self.api.post_server_action(servers[1]['id'], post) - rebuilt_server = self._wait_for_state_change( - self.admin_api, servers[1], 'ACTIVE') + rebuilt_server = self._wait_for_state_change(servers[1], 'ACTIVE') self.assertEqual(post['rebuild']['imageRef'], rebuilt_server.get('image')['id']) @@ -369,7 +368,7 @@ class ServerGroupTestV21(ServerGroupTestBase): post = {'migrate': {}} self.admin_api.post_server_action(servers[1]['id'], post) migrated_server = self._wait_for_state_change( - self.admin_api, servers[1], 'VERIFY_RESIZE') + servers[1], 'VERIFY_RESIZE') self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'], migrated_server['OS-EXT-SRV-ATTR:host']) @@ -384,7 +383,7 @@ class ServerGroupTestV21(ServerGroupTestBase): server1_old_host = servers[1]['OS-EXT-SRV-ATTR:host'] self.admin_api.post_server_action(servers[1]['id'], post) migrated_server = self._wait_for_state_change( - self.admin_api, servers[1], 'VERIFY_RESIZE') + servers[1], 'VERIFY_RESIZE') self.assertEqual(server1_old_host, migrated_server['OS-EXT-SRV-ATTR:host']) @@ -424,8 +423,7 @@ class ServerGroupTestV21(ServerGroupTestBase): post = {'evacuate': {'onSharedStorage': False}} self.admin_api.post_server_action(servers[1]['id'], post) self._wait_for_migration_status(servers[1], ['done']) - evacuated_server = self._wait_for_state_change( - self.admin_api, servers[1], 'ACTIVE') + evacuated_server = self._wait_for_state_change(servers[1], 'ACTIVE') # check that the server is evacuated to another host self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'], @@ -447,7 +445,7 @@ class ServerGroupTestV21(ServerGroupTestBase): self.admin_api.post_server_action(servers[1]['id'], post) self._wait_for_migration_status(servers[1], ['error']) server_after_failed_evac = self._wait_for_state_change( - self.admin_api, servers[1], 'ERROR') + servers[1], 'ERROR') # assert that after a failed evac the server active on the same host # as before @@ -467,7 +465,7 @@ class ServerGroupTestV21(ServerGroupTestBase): self.admin_api.post_server_action(servers[1]['id'], post) self._wait_for_migration_status(servers[1], ['error']) server_after_failed_evac = self._wait_for_state_change( - self.admin_api, servers[1], 'ERROR') + servers[1], 'ERROR') # assert that after a failed evac the server active on the same host # as before @@ -608,8 +606,7 @@ class ServerGroupTestV215(ServerGroupTestV21): post = {'evacuate': {}} self.admin_api.post_server_action(servers[1]['id'], post) self._wait_for_migration_status(servers[1], ['done']) - evacuated_server = self._wait_for_state_change( - self.admin_api, servers[1], 'ACTIVE') + evacuated_server = self._wait_for_state_change(servers[1], 'ACTIVE') # check that the server is evacuated self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'], @@ -633,7 +630,7 @@ class ServerGroupTestV215(ServerGroupTestV21): self.admin_api.post_server_action(servers[1]['id'], post) self._wait_for_migration_status(servers[1], ['error']) server_after_failed_evac = self._wait_for_state_change( - self.admin_api, servers[1], 'ERROR') + servers[1], 'ERROR') # assert that after a failed evac the server active on the same host # as before @@ -653,7 +650,7 @@ class ServerGroupTestV215(ServerGroupTestV21): self.admin_api.post_server_action(servers[1]['id'], post) self._wait_for_migration_status(servers[1], ['error']) server_after_failed_evac = self._wait_for_state_change( - self.admin_api, servers[1], 'ERROR') + servers[1], 'ERROR') # assert that after a failed evac the server active on the same host # as before @@ -767,7 +764,7 @@ class ServerGroupTestV215(ServerGroupTestV21): post = {'migrate': {}} self.admin_api.post_server_action(servers[1]['id'], post) migrated_server = self._wait_for_state_change( - self.admin_api, servers[1], 'VERIFY_RESIZE') + servers[1], 'VERIFY_RESIZE') return [migrated_server['OS-EXT-SRV-ATTR:host'], servers[0]['OS-EXT-SRV-ATTR:host']] @@ -794,8 +791,7 @@ class ServerGroupTestV215(ServerGroupTestV21): post = {'evacuate': {}} self.admin_api.post_server_action(servers[1]['id'], post) self._wait_for_migration_status(servers[1], ['done']) - evacuated_server = self._wait_for_state_change( - self.admin_api, servers[1], 'ACTIVE') + evacuated_server = self._wait_for_state_change(servers[1], 'ACTIVE') # Note(gibi): need to get the server again as the state of the instance # goes to ACTIVE first then the host of the instance changes to the @@ -974,7 +970,6 @@ class TestAntiAffinityLiveMigration(test.TestCase, servers = [] for x in range(2): server = self._build_minimal_create_server_request( - self.api, 'test_serial_no_valid_host_then_pass_with_third_host-%d' % x, networks='none') # Add the group hint so the server is created in our group. @@ -986,8 +981,7 @@ class TestAntiAffinityLiveMigration(test.TestCase, with utils.temporary_mutation(self.api, microversion='2.37'): server = self.api.post_server(server_req) servers.append( - self._wait_for_state_change( - self.admin_api, server, 'ACTIVE')) + self._wait_for_state_change(server, 'ACTIVE')) # Make sure each server is on a unique host. hosts = set([svr['OS-EXT-SRV-ATTR:host'] for svr in servers]) @@ -1021,7 +1015,7 @@ class TestAntiAffinityLiveMigration(test.TestCase, # should work this time. self.start_service('compute', host='host3') self.admin_api.post_server_action(server['id'], body) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Now the server should be on host3 since that was the only available # host for the live migration. self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host']) diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py index 7c56bf95fd6a..4f7717e4c6c9 100644 --- a/nova/tests/functional/test_servers.py +++ b/nova/tests/functional/test_servers.py @@ -73,11 +73,12 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase): self.computes = {} super(ServersTestBase, self).setUp() - def _wait_for_server_parameter(self, admin_api, server, expected_params, + def _wait_for_server_parameter(self, server, expected_params, max_retries=10): + api = getattr(self, 'admin_api', self.api) retry_count = 0 while True: - server = admin_api.get_server(server['id']) + server = api.get_server(server['id']) if all([server[attr] == expected_params[attr] for attr in expected_params]): break @@ -92,7 +93,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase): def _wait_for_state_change(self, server, expected_status, max_retries=10): return self._wait_for_server_parameter( - self.api, server, {'status': expected_status}, max_retries) + server, {'status': expected_status}, max_retries) # TODO(stephenfin): Remove this once we subclass 'InstanceHelperMixin' def _wait_until_deleted(self, server): @@ -1404,7 +1405,7 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase, } } server = self.api.post_server(server_req_body) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Disable the host we're on so ComputeFilter would have ruled it out # normally @@ -1434,10 +1435,7 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase, rebuild_req_body, check_response_status=[500]) # Look for the failed rebuild action. self._wait_for_action_fail_completion( - server, instance_actions.REBUILD, 'rebuild_server', - # Before microversion 2.51 events are only returned for instance - # actions if you're an admin. - self.api_fixture.admin_api) + server, instance_actions.REBUILD, 'rebuild_server') # Assert the server image_ref was rolled back on failure. server = self.api.get_server(server['id']) self.assertEqual(original_image_ref, server['image']['id']) @@ -1515,7 +1513,7 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase, } } server = self.api.post_server(server_req_body) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') flavor = self.api.api_get('/flavors/1').body['flavor'] @@ -1541,7 +1539,7 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase, self.api.api_post('/servers/%s/action' % server['id'], rebuild_req_body) self._wait_for_server_parameter( - self.api, server, {'OS-EXT-STS:task_state': None}) + server, {'OS-EXT-STS:task_state': None}) # The usage and allocations should not have changed. rp_usages = _get_provider_usages(rp_uuid) @@ -1577,7 +1575,7 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase, } } server = self.api.post_server(server_req_body) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # For a volume-backed server, the image ref will be an empty string # in the server response. self.assertEqual('', server['image']) @@ -1787,7 +1785,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): post = {'confirmResize': None} self.api.post_server_action( server['id'], post, check_response_status=[204]) - server = self._wait_for_state_change(self.api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') # After confirming and error, we should have an allocation only on the # destination host @@ -1826,7 +1824,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): # Revert the resize and check the usages post = {'revertResize': None} self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Make sure the RequestSpec.flavor matches the original flavor. ctxt = context.get_admin_context() @@ -1908,7 +1906,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): # Revert the resize and check the usages post = {'revertResize': None} self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') self._run_periodics() @@ -2052,7 +2050,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): } } self.api.post_server_action(server['id'], resize_req) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') # There should be resource usage for flavor1 on the source host. self.assert_hypervisor_usage( @@ -2132,7 +2130,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): # VM to ERROR state, but it should remain on source compute expected_params = {'OS-EXT-SRV-ATTR:host': source_hostname, 'status': 'ERROR'} - server = self._wait_for_server_parameter(self.api, server, + server = self._wait_for_server_parameter(server, expected_params) # Check migrations @@ -2183,7 +2181,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): server, instance_actions.MIGRATE, 'NoValidHost') expected_params = {'OS-EXT-SRV-ATTR:host': source_hostname, 'status': 'ACTIVE'} - self._wait_for_server_parameter(self.api, server, expected_params) + self._wait_for_server_parameter(server, expected_params) self._run_periodics() @@ -2217,7 +2215,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): server['id'], post) expected_params = {'OS-EXT-SRV-ATTR:host': dest_hostname, 'status': 'ACTIVE'} - server = self._wait_for_server_parameter(self.api, server, + server = self._wait_for_server_parameter(server, expected_params) # Expect to have allocation and usages on both computes as the @@ -2292,7 +2290,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): self.api.post_server_action(server['id'], post) expected_params = {'OS-EXT-SRV-ATTR:host': dest_hostname, 'status': 'ACTIVE'} - server = self._wait_for_server_parameter(self.api, server, + server = self._wait_for_server_parameter(server, expected_params) # Run the periodics to show those don't modify allocations. @@ -2397,7 +2395,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): self.api.post_server_action(server['id'], post) expected_params = {'OS-EXT-SRV-ATTR:host': dest_hostname, 'status': 'ACTIVE'} - server = self._wait_for_server_parameter(self.api, server, + server = self._wait_for_server_parameter(server, expected_params) # Run the periodics to show those don't modify allocations. @@ -2474,8 +2472,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): # the migration will fail on the dest node and the instance will # stay ACTIVE and task_state will be set to None. server = self._wait_for_server_parameter( - self.api, server, {'status': 'ACTIVE', - 'OS-EXT-STS:task_state': None}) + server, {'status': 'ACTIVE', 'OS-EXT-STS:task_state': None}) # Run the periodics to show those don't modify allocations. self._run_periodics() @@ -2550,7 +2547,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): self.api.post_server_action(server['id'], {'evacuate': {}}) # the migration will fail on the dest node and the instance will # go into error state - server = self._wait_for_state_change(self.api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') # Run the periodics to show those don't modify allocations. self._run_periodics() @@ -2602,7 +2599,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): 'shelve': {} } self.api.post_server_action(server['id'], req) - self._wait_for_state_change(self.api, server, 'SHELVED') + self._wait_for_state_change(server, 'SHELVED') # the host should maintain the existing allocation for this instance # while the instance is shelved self.assertFlavorMatchesUsage(rp_uuid, self.flavor1) @@ -2622,7 +2619,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): 'unshelve': None } self.api.post_server_action(server['id'], req) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # the host should have resource usage as the instance is ACTIVE self.assertFlavorMatchesUsage(source_rp_uuid, self.flavor1) @@ -2639,8 +2636,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): 'shelveOffload': {} } self.api.post_server_action(server['id'], req) - self._wait_for_server_parameter( - self.api, server, {'status': 'SHELVED_OFFLOADED', + self._wait_for_server_parameter(server, {'status': 'SHELVED_OFFLOADED', 'OS-EXT-SRV-ATTR:host': None, 'OS-EXT-AZ:availability_zone': ''}) source_usages = self._get_provider_usages(source_rp_uuid) @@ -2671,7 +2667,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): 'unshelve': None } self.api.post_server_action(server['id'], req) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # unshelving an offloaded instance will call the scheduler so the # instance might end up on a different host current_hostname = server['OS-EXT-SRV-ATTR:host'] @@ -2707,7 +2703,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): 'unshelve': None } self.api.post_server_action(server['id'], req) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # unshelving an offloaded instance will call the scheduler so the # instance might end up on a different host current_hostname = server['OS-EXT-SRV-ATTR:host'] @@ -2746,7 +2742,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): } self.api.post_server_action(server['id'], post) - self._wait_for_server_parameter(self.api, server, + self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': dest_hostname, 'status': 'ACTIVE'}) @@ -2808,7 +2804,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): } self.api.post_server_action(server['id'], post) - self._wait_for_server_parameter(self.api, server, + self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': dest_hostname, 'status': 'ACTIVE'}) @@ -2947,7 +2943,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): # The _rollback_live_migration method in the compute manager will reset # the task_state on the instance, so wait for that to happen. server = self._wait_for_server_parameter( - self.api, server, {'OS-EXT-STS:task_state': None}) + server, {'OS-EXT-STS:task_state': None}) self.assertEqual(source_hostname, migration['source_compute']) self.assertEqual(dest_hostname, migration['dest_compute']) @@ -3089,14 +3085,13 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): def _server_created_with_host(self): hostname = self.compute1.host server_req = self._build_minimal_create_server_request( - self.api, "some-server", flavor_id=self.flavor1["id"], + "some-server", flavor_id=self.flavor1["id"], image_uuid="155d900f-4e14-4e4c-a73d-069cbf4541e6", networks='none') server_req['host'] = hostname created_server = self.api.post_server({"server": server_req}) - server = self._wait_for_state_change( - self.api, created_server, "ACTIVE") + server = self._wait_for_state_change(created_server, "ACTIVE") return server def test_live_migration_after_server_created_with_host(self): @@ -3115,7 +3110,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): } self.api.post_server_action(created_server['id'], post) new_server = self._wait_for_server_parameter( - self.api, created_server, {'status': 'ACTIVE'}) + created_server, {'status': 'ACTIVE'}) inst_dest_host = new_server["OS-EXT-SRV-ATTR:host"] self.assertEqual(dest_hostname, inst_dest_host) @@ -3143,7 +3138,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): self.api.post_server_action(created_server['id'], post) expected_params = {'OS-EXT-SRV-ATTR:host': dest_hostname, 'status': 'ACTIVE'} - new_server = self._wait_for_server_parameter(self.api, created_server, + new_server = self._wait_for_server_parameter(created_server, expected_params) inst_dest_host = new_server["OS-EXT-SRV-ATTR:host"] @@ -3165,7 +3160,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): } } self.api.post_server_action(created_server['id'], resize_req) - self._wait_for_state_change(self.api, created_server, 'VERIFY_RESIZE') + self._wait_for_state_change(created_server, 'VERIFY_RESIZE') # Confirm the resize new_server = self._confirm_resize(created_server) @@ -3184,14 +3179,14 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): self.flags(shelved_offload_time=-1) req = {'shelve': {}} self.api.post_server_action(created_server['id'], req) - self._wait_for_state_change(self.api, created_server, 'SHELVED') + self._wait_for_state_change(created_server, 'SHELVED') req = {'shelveOffload': {}} self.api.post_server_action(created_server['id'], req) - self._wait_for_server_parameter( - self.api, created_server, {'status': 'SHELVED_OFFLOADED', - 'OS-EXT-SRV-ATTR:host': None, - 'OS-EXT-AZ:availability_zone': ''}) + self._wait_for_server_parameter(created_server, { + 'status': 'SHELVED_OFFLOADED', + 'OS-EXT-SRV-ATTR:host': None, + 'OS-EXT-AZ:availability_zone': ''}) # unshelve after shelve offload will do scheduling. this test case # wants to test the scenario when the scheduler select a different host @@ -3203,8 +3198,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): req = {'unshelve': None} self.api.post_server_action(created_server['id'], req) - new_server = self._wait_for_state_change( - self.api, created_server, 'ACTIVE') + new_server = self._wait_for_state_change(created_server, 'ACTIVE') inst_dest_host = new_server["OS-EXT-SRV-ATTR:host"] self.assertEqual(dest_hostname, inst_dest_host) @@ -3217,12 +3211,12 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): supplied host_list, and does not call the scheduler. """ server_req = self._build_minimal_create_server_request( - self.api, "some-server", flavor_id=self.flavor1["id"], + "some-server", flavor_id=self.flavor1["id"], image_uuid="155d900f-4e14-4e4c-a73d-069cbf4541e6", networks='none') created_server = self.api.post_server({"server": server_req}) - server = self._wait_for_state_change(self.api, created_server, + server = self._wait_for_state_change(created_server, "ACTIVE") inst_host = server["OS-EXT-SRV-ATTR:host"] uuid_orig = self._get_provider_uuid_by_host(inst_host) @@ -3274,7 +3268,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): # We will run out of alternates before populate_retry will # raise a MaxRetriesExceeded exception, so the migration will # fail and the server should be in status "ERROR" - server = self._wait_for_state_change(self.api, created_server, + server = self._wait_for_state_change(created_server, "ERROR") # The usage should be unchanged from the original flavor self.assertFlavorMatchesUsage(uuid_orig, self.flavor1) @@ -3285,7 +3279,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): usage = self._get_provider_usages(target_uuid) self.assertEqual(empty_usage, usage) else: - server = self._wait_for_state_change(self.api, created_server, + server = self._wait_for_state_change(created_server, "VERIFY_RESIZE") # Verify that the selected host failed, and was rescheduled to # an alternate host. @@ -3380,7 +3374,7 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase): # Revert the move and check the usages post = {'revertResize': None} self.api.post_server_action(server['id'], post) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') def _check_allocation(): self.assertFlavorMatchesUsage(source_rp_uuid, self.flavor1) @@ -3442,7 +3436,7 @@ class ServerLiveMigrateForceAndAbort( self.api.force_complete_migration(server['id'], migration['id']) - self._wait_for_server_parameter(self.api, server, + self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': dest_hostname, 'status': 'ACTIVE'}) @@ -3479,7 +3473,7 @@ class ServerLiveMigrateForceAndAbort( migration = self._wait_for_migration_status(server, ['running']) self.api.delete_migration(server['id'], migration['id']) - self._wait_for_server_parameter(self.api, server, + self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': source_hostname, 'status': 'ACTIVE'}) @@ -3546,13 +3540,12 @@ class ServerRescheduleTests(integrated_helpers.ProviderUsageBaseTestCase): rescheduled to another node. """ server_req = self._build_minimal_create_server_request( - self.api, 'some-server', flavor_id=self.flavor1['id'], + 'some-server', flavor_id=self.flavor1['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') created_server = self.api.post_server({'server': server_req}) - server = self._wait_for_state_change( - self.api, created_server, 'ACTIVE') + server = self._wait_for_state_change(created_server, 'ACTIVE') dest_hostname = server['OS-EXT-SRV-ATTR:host'] failed_hostname = self._other_hostname(dest_hostname) @@ -3577,7 +3570,7 @@ class ServerRescheduleTests(integrated_helpers.ProviderUsageBaseTestCase): """ server_req = self._build_minimal_create_server_request( - self.api, 'some-server', flavor_id=self.flavor1['id'], + 'some-server', flavor_id=self.flavor1['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') @@ -3592,8 +3585,7 @@ class ServerRescheduleTests(integrated_helpers.ProviderUsageBaseTestCase): consumer_uuid=uuids.inst1, error='testing')]): server = self.api.post_server({'server': server_req}) - server = self._wait_for_state_change( - self.admin_api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self._delete_and_check_allocations(server) @@ -3636,12 +3628,12 @@ class ServerBuildAbortTests(integrated_helpers.ProviderUsageBaseTestCase): from the source node when the build is aborted on that node. """ server_req = self._build_minimal_create_server_request( - self.api, 'some-server', flavor_id=self.flavor1['id'], + 'some-server', flavor_id=self.flavor1['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') created_server = self.api.post_server({'server': server_req}) - self._wait_for_state_change(self.api, created_server, 'ERROR') + self._wait_for_state_change(created_server, 'ERROR') failed_hostname = self.compute1.manager.host @@ -3697,12 +3689,12 @@ class ServerUnshelveSpawnFailTests( 'DISK_GB': 0}, rp_uuid) server_req = self._build_minimal_create_server_request( - self.api, 'unshelve-spawn-fail', flavor_id=self.flavor1['id'], + 'unshelve-spawn-fail', flavor_id=self.flavor1['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') server = self.api.post_server({'server': server_req}) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # assert allocations exist for the host self.assertFlavorMatchesUsage(rp_uuid, self.flavor1) @@ -3710,8 +3702,7 @@ class ServerUnshelveSpawnFailTests( # shelve offload the server self.flags(shelved_offload_time=0) self.api.post_server_action(server['id'], {'shelve': None}) - self._wait_for_server_parameter( - self.api, server, {'status': 'SHELVED_OFFLOADED', + self._wait_for_server_parameter(server, {'status': 'SHELVED_OFFLOADED', 'OS-EXT-SRV-ATTR:host': None}) # assert allocations were removed from the host @@ -3764,7 +3755,7 @@ class ServerSoftDeleteTests(integrated_helpers.ProviderUsageBaseTestCase): def _soft_delete_and_check_allocation(self, server, hostname): self.api.delete_server(server['id']) - server = self._wait_for_state_change(self.api, server, 'SOFT_DELETED') + server = self._wait_for_state_change(server, 'SOFT_DELETED') self._run_periodics() @@ -3842,7 +3833,7 @@ class ServerSoftDeleteTests(integrated_helpers.ProviderUsageBaseTestCase): post = {'restore': {}} self.api.post_server_action(server['id'], post) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # after restore the allocations should be kept self.assertFlavorMatchesUsage(rp_uuid, self.flavor1) @@ -3901,11 +3892,11 @@ class VolumeBackedServerTest(integrated_helpers.ProviderUsageBaseTestCase): with nova.utils.temporary_mutation(self.api, microversion='2.35'): image_id = self.api.get_images()[0]['id'] server_req = self._build_minimal_create_server_request( - self.api, 'trait-based-server', + 'trait-based-server', image_uuid=image_id, flavor_id=self.flavor_id, networks='none') server = self.api.post_server({'server': server_req}) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') return server def _create_volume_backed_server(self): @@ -3928,7 +3919,7 @@ class VolumeBackedServerTest(integrated_helpers.ProviderUsageBaseTestCase): } } server = self.api.post_server(server_req_body) - server = self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') return server def test_ephemeral_has_disk_allocation(self): @@ -3985,8 +3976,7 @@ class VolumeBackedServerTest(integrated_helpers.ProviderUsageBaseTestCase): self._start_compute('host2') self.admin_api.post_server_action(server['id'], {'migrate': None}) # Wait for the server to complete the cold migration. - server = self._wait_for_state_change( - self.admin_api, server, 'VERIFY_RESIZE') + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host']) # Confirm the cold migration and check usage and the request spec. self._confirm_resize(server) @@ -4002,7 +3992,7 @@ class VolumeBackedServerTest(integrated_helpers.ProviderUsageBaseTestCase): fake_notifier.stub_notifier(self) self.addCleanup(fake_notifier.reset) self.api.post_server_action(server['id'], {'shelve': None}) - self._wait_for_state_change(self.api, server, 'SHELVED_OFFLOADED') + self._wait_for_state_change(server, 'SHELVED_OFFLOADED') fake_notifier.wait_for_versioned_notifications( 'instance.shelve_offload.end') # The server should not have any allocations since it's not currently @@ -4012,7 +4002,7 @@ class VolumeBackedServerTest(integrated_helpers.ProviderUsageBaseTestCase): # Now unshelve the server and make sure there are still no DISK_GB # allocations for the root disk. self.api.post_server_action(server['id'], {'unshelve': None}) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') allocs = self._get_allocations_by_server_uuid(server['id']) resources = list(allocs.values())[0]['resources'] self.assertEqual(expected_usage, resources['DISK_GB']) @@ -4058,7 +4048,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): """ server_req = self._build_minimal_create_server_request( - self.api, 'trait-based-server', + 'trait-based-server', image_uuid=image_id, flavor_id=flavor_id, networks='none') return self.api.post_server({'server': server_req}) @@ -4107,7 +4097,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): # Create server using flavor with required trait server = self._create_server_with_traits(self.flavor_with_trait['id'], self.image_id_without_trait) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Assert the server ended up on the expected compute host that has # the required trait. self.assertEqual(self.compute1.host, server['OS-EXT-SRV-ATTR:host']) @@ -4122,7 +4112,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): self.image_id_without_trait) # The server should go to ERROR state because there is no valid host. - server = self._wait_for_state_change(self.admin_api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertIsNone(server['OS-EXT-SRV-ATTR:host']) # Make sure the failure was due to NoValidHost by checking the fault. self.assertIn('fault', server) @@ -4147,7 +4137,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): self.image_id_without_trait ) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Assert the server ended up on the expected compute host that doesn't # have the forbidden trait. @@ -4165,7 +4155,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): ) # The server should go to ERROR state because there is no valid host. - server = self._wait_for_state_change(self.admin_api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertIsNone(server['OS-EXT-SRV-ATTR:host']) # Make sure the failure was due to NoValidHost by checking the fault. self.assertIn('fault', server) @@ -4184,7 +4174,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): # Create server using only image trait server = self._create_server_with_traits( self.flavor_without_trait['id'], self.image_id_with_trait) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Assert the server ended up on the expected compute host that has # the required trait. self.assertEqual(self.compute2.host, server['OS-EXT-SRV-ATTR:host']) @@ -4203,7 +4193,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): # Create server using flavor and image trait server = self._create_server_with_traits( self.flavor_with_trait['id'], self.image_id_with_trait) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Assert the server ended up on the expected compute host that has # the required trait. self.assertEqual(self.compute2.host, server['OS-EXT-SRV-ATTR:host']) @@ -4226,7 +4216,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): nova_fixtures.CinderFixture. IMAGE_WITH_TRAITS_BACKED_VOL) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Assert the server ended up on the expected compute host that has # the required trait. self.assertEqual(self.compute2.host, server['OS-EXT-SRV-ATTR:host']) @@ -4250,7 +4240,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): nova_fixtures.CinderFixture. IMAGE_WITH_TRAITS_BACKED_VOL) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Assert the server ended up on the expected compute host that has # the required trait. self.assertEqual(self.compute2.host, server['OS-EXT-SRV-ATTR:host']) @@ -4268,7 +4258,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): server = self._create_server_with_traits(self.flavor_with_trait['id'], self.image_id_without_trait) # The server should go to ERROR state because there is no valid host. - server = self._wait_for_state_change(self.admin_api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertIsNone(server['OS-EXT-SRV-ATTR:host']) # Make sure the failure was due to NoValidHost by checking the fault. self.assertIn('fault', server) @@ -4287,7 +4277,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): server = self._create_server_with_traits( self.flavor_without_trait['id'], self.image_id_with_trait) # The server should go to ERROR state because there is no valid host. - server = self._wait_for_state_change(self.admin_api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertIsNone(server['OS-EXT-SRV-ATTR:host']) # Make sure the failure was due to NoValidHost by checking the fault. self.assertIn('fault', server) @@ -4302,7 +4292,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): server = self._create_server_with_traits( self.flavor_with_trait['id'], self.image_id_with_trait) # The server should go to ERROR state because there is no valid host. - server = self._wait_for_state_change(self.admin_api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertIsNone(server['OS-EXT-SRV-ATTR:host']) # Make sure the failure was due to NoValidHost by checking the fault. self.assertIn('fault', server) @@ -4322,7 +4312,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): IMAGE_WITH_TRAITS_BACKED_VOL) # The server should go to ERROR state because there is no valid host. - server = self._wait_for_state_change(self.admin_api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertIsNone(server['OS-EXT-SRV-ATTR:host']) # Make sure the failure was due to NoValidHost by checking the fault. self.assertIn('fault', server) @@ -4345,7 +4335,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): # create a server without traits on image and with traits on flavour server = self._create_server_with_traits( self.flavor_with_trait['id'], self.image_id_without_trait) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # make the compute node full and ensure rebuild still succeed inv = {"resource_class": "VCPU", @@ -4361,7 +4351,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): self.api.api_post('/servers/%s/action' % server['id'], rebuild_req_body) self._wait_for_server_parameter( - self.api, server, {'OS-EXT-STS:task_state': None}) + server, {'OS-EXT-STS:task_state': None}) allocs = self._get_allocations_by_server_uuid(server['id']) self.assertIn(rp_uuid, allocs) @@ -4387,7 +4377,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): # create a server without traits on image and with traits on flavour server = self._create_server_with_traits( self.flavor_with_trait['id'], self.image_id_without_trait) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Now rebuild the server with a different image with traits rebuild_req_body = { @@ -4400,7 +4390,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): rebuild_req_body) # Look for the failed rebuild action. self._wait_for_action_fail_completion( - server, instance_actions.REBUILD, 'rebuild_server', self.admin_api) + server, instance_actions.REBUILD, 'rebuild_server') # Assert the server image_ref was rolled back on failure. server = self.api.get_server(server['id']) self.assertEqual(self.image_id_without_trait, server['image']['id']) @@ -4431,7 +4421,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): # create a server with traits in both image and flavour server = self._create_server_with_traits( self.flavor_with_trait['id'], self.image_id_with_trait) - server = self._wait_for_state_change(self.admin_api, server, + server = self._wait_for_state_change(server, 'ACTIVE') # Now rebuild the server with a different image with traits @@ -4443,7 +4433,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): self.api.api_post('/servers/%s/action' % server['id'], rebuild_req_body) self._wait_for_server_parameter( - self.api, server, {'OS-EXT-STS:task_state': None}) + server, {'OS-EXT-STS:task_state': None}) allocs = self._get_allocations_by_server_uuid(server['id']) self.assertIn(rp_uuid, allocs) @@ -4473,7 +4463,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): server = self._create_server_with_traits( self.flavor_with_forbidden_trait['id'], self.image_id_without_trait) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') # Now rebuild the server with a different image with traits rebuild_req_body = { @@ -4486,7 +4476,7 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase): rebuild_req_body) # Look for the failed rebuild action. self._wait_for_action_fail_completion( - server, instance_actions.REBUILD, 'rebuild_server', self.admin_api) + server, instance_actions.REBUILD, 'rebuild_server') # Assert the server image_ref was rolled back on failure. server = self.api.get_server(server['id']) self.assertEqual(self.image_id_without_trait, server['image']['id']) @@ -4619,7 +4609,7 @@ class ConsumerGenerationConflictTest( def test_create_server_fails_as_placement_reports_consumer_conflict(self): server_req = self._build_minimal_create_server_request( - self.api, 'some-server', flavor_id=self.flavor['id'], + 'some-server', flavor_id=self.flavor['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') @@ -4637,8 +4627,7 @@ class ConsumerGenerationConflictTest( mock_put.return_value = rsp created_server = self.api.post_server({'server': server_req}) - server = self._wait_for_state_change( - self.admin_api, created_server, 'ERROR') + server = self._wait_for_state_change(created_server, 'ERROR') # This is not a conflict that the API user can ever resolve. It is a # serious inconsistency in our database or a bug in the scheduler code @@ -4673,7 +4662,7 @@ class ConsumerGenerationConflictTest( request = {'migrate': None} self.api.post_server_action(server['id'], request, check_response_status=[202]) - self._wait_for_server_parameter(self.admin_api, server, + self._wait_for_server_parameter(server, {'OS-EXT-STS:task_state': None}) # The instance action should have failed with details. @@ -4718,7 +4707,7 @@ class ConsumerGenerationConflictTest( request = {'migrate': None} self.api.post_server_action(server['id'], request, check_response_status=[202]) - self._wait_for_server_parameter(self.admin_api, server, + self._wait_for_server_parameter(server, {'OS-EXT-STS:task_state': None}) self._assert_resize_migrate_action_fail( @@ -4770,7 +4759,7 @@ class ConsumerGenerationConflictTest( post = {'confirmResize': None} self.api.post_server_action( server['id'], post, check_response_status=[204]) - server = self._wait_for_state_change(self.api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertIn('Failed to delete allocations', server['fault']['message']) @@ -4821,7 +4810,7 @@ class ConsumerGenerationConflictTest( post = {'revertResize': None} self.api.post_server_action(server['id'], post) - server = self._wait_for_state_change(self.api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertEqual(1, mock_post.call_count) @@ -4873,7 +4862,7 @@ class ConsumerGenerationConflictTest( post = {'revertResize': None} self.api.post_server_action(server['id'], post) - server = self._wait_for_state_change(self.api, server, 'ERROR',) + server = self._wait_for_state_change(server, 'ERROR',) self.assertEqual(1, mock_post.call_count) @@ -4932,7 +4921,7 @@ class ConsumerGenerationConflictTest( } self.api.post_server_action(server['id'], post) - server = self._wait_for_state_change(self.api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertEqual(1, mock_put.call_count) @@ -4998,7 +4987,7 @@ class ConsumerGenerationConflictTest( # Nova failed to clean up on the source host. This right now puts # the instance to ERROR state and fails the migration. - server = self._wait_for_server_parameter(self.api, server, + server = self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': dest_hostname, 'status': 'ERROR'}) self._wait_for_migration_status(server, ['error']) @@ -5072,7 +5061,7 @@ class ConsumerGenerationConflictTest( post['evacuate']['host'] = dest_hostname self.api.post_server_action(server['id'], post) - server = self._wait_for_state_change(self.api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertEqual(1, mock_put.call_count) @@ -5111,7 +5100,7 @@ class ConsumerGenerationConflictTest( mock_put.return_value = rsp self.api.delete_server(server['id']) - server = self._wait_for_state_change(self.admin_api, server, + server = self._wait_for_state_change(server, 'ERROR') self.assertEqual(1, mock_put.call_count) @@ -5236,7 +5225,7 @@ class ServerMovingTestsWithNestedResourceRequests( self.api.post_server_action(server['id'], post) self._wait_for_migration_status(server, ['error']) - self._wait_for_server_parameter(self.api, server, + self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': source_hostname, 'status': 'ACTIVE'}) self.assertIn('Unable to move instance %s to host host2. The instance ' @@ -5294,7 +5283,7 @@ class ServerMovingTestsWithNestedResourceRequests( self._wait_for_migration_status(server, ['error']) expected_params = {'OS-EXT-SRV-ATTR:host': source_hostname, 'status': 'ACTIVE'} - server = self._wait_for_server_parameter(self.api, server, + server = self._wait_for_server_parameter(server, expected_params) self.assertIn('Unable to move instance %s to host host2. The instance ' 'has complex allocations on the source host so move ' @@ -5416,7 +5405,7 @@ class ServerMovingTestsFromFlatToNested( # blindly copy the source allocation to the destination but on the # destination there is no inventory of CUSTOM_MAGIC on the compute node # provider as that resource is reported on a child provider. - self._wait_for_server_parameter(self.api, server, + self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': 'host1', 'status': 'ACTIVE'}) @@ -5505,7 +5494,7 @@ class ServerMovingTestsFromFlatToNested( # blindly copy the source allocation to the destination but on the # destination there is no inventory of CUSTOM_MAGIC on the compute node # provider as that resource is reported on a child provider. - self._wait_for_server_parameter(self.api, server, + self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': 'host1', 'status': 'ACTIVE'}) @@ -5614,7 +5603,7 @@ class PortResourceRequestBasedSchedulingTestBase( def _create_server(self, flavor, networks, host=None): server_req = self._build_minimal_create_server_request( - self.api, 'bandwidth-aware-server', + 'bandwidth-aware-server', image_uuid='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', flavor_id=flavor['id'], networks=networks, host=host) @@ -5786,7 +5775,7 @@ class UnsupportedPortResourceRequestBasedSchedulingTest( server = self._create_server( flavor=self.flavor, networks=[{'port': self.neutron.port_1['id']}]) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # try to add a port with resource request post = { @@ -5808,7 +5797,7 @@ class UnsupportedPortResourceRequestBasedSchedulingTest( server = self._create_server( flavor=self.flavor, networks=[{'port': self.neutron.port_1['id']}]) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # the interfaceAttach operation below will result in a new port being # created in the network that is attached. Make sure that neutron @@ -5841,7 +5830,7 @@ class UnsupportedPortResourceRequestBasedSchedulingTest( server = self._create_server( flavor=self.flavor, networks=[{'uuid': self.neutron.network_1['id']}]) - server = self._wait_for_state_change(self.admin_api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertEqual(500, server['fault']['code']) self.assertIn('Failed to allocate the network', @@ -5869,7 +5858,7 @@ class UnsupportedPortResourceRequestBasedSchedulingTest( server = self._create_server( flavor=self.flavor, networks=[{'port': self.neutron.port_1['id']}]) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # We need to simulate that the above server has a port that has # resource request; we cannot boot with such a port but legacy servers @@ -5896,7 +5885,7 @@ class UnsupportedPortResourceRequestBasedSchedulingTest( server = self._create_server( flavor=self.flavor, networks=[{'port': self.neutron.port_1['id']}]) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # with default config shelve means immediate offload as well req = { @@ -5904,7 +5893,7 @@ class UnsupportedPortResourceRequestBasedSchedulingTest( } self.api.post_server_action(server['id'], req) self._wait_for_server_parameter( - self.api, server, {'status': 'SHELVED_OFFLOADED'}) + server, {'status': 'SHELVED_OFFLOADED'}) # We need to simulate that the above server has a port that has # resource request; we cannot boot with such a port but legacy servers @@ -5930,7 +5919,7 @@ class UnsupportedPortResourceRequestBasedSchedulingTest( server = self._create_server( flavor=self.flavor, networks=[{'port': self.neutron.port_1['id']}]) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # avoid automatic shelve offloading self.flags(shelved_offload_time=-1) @@ -5938,8 +5927,7 @@ class UnsupportedPortResourceRequestBasedSchedulingTest( 'shelve': {} } self.api.post_server_action(server['id'], req) - self._wait_for_server_parameter( - self.api, server, {'status': 'SHELVED'}) + self._wait_for_server_parameter(server, {'status': 'SHELVED'}) # We need to simulate that the above server has a port that has # resource request; we cannot boot with such a port but legacy servers @@ -5947,7 +5935,7 @@ class UnsupportedPortResourceRequestBasedSchedulingTest( self._add_resource_request_to_a_bound_port(self.neutron.port_1['id']) self.api.post_server_action(server['id'], {'unshelve': None}) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') class NonAdminUnsupportedPortResourceRequestBasedSchedulingTest( @@ -5989,7 +5977,7 @@ class PortResourceRequestBasedSchedulingTest( flavor=self.flavor, networks=[{'port': non_qos_port['id']}, {'port': qos_port['id']}]) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') updated_non_qos_port = self.neutron.show_port( non_qos_port['id'])['port'] updated_qos_port = self.neutron.show_port(qos_port['id'])['port'] @@ -6035,7 +6023,7 @@ class PortResourceRequestBasedSchedulingTest( networks=[{'port': ovs_port['id']}, {'port': sriov_port['id']}]) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') ovs_port = self.neutron.show_port(ovs_port['id'])['port'] sriov_port = self.neutron.show_port(sriov_port['id'])['port'] @@ -6078,7 +6066,7 @@ class PortResourceRequestBasedSchedulingTest( server = self._create_server( flavor=self.flavor, networks=[{'port': port['id']}]) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') allocations = self.placement_api.get( '/allocations/%s' % server['id']).body['allocations'] @@ -6138,7 +6126,7 @@ class PortResourceRequestBasedSchedulingTest( server = self._create_server( flavor=self.flavor, networks=[{'port': port['id']}]) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') allocations = self.placement_api.get( '/allocations/%s' % server['id']).body['allocations'] @@ -6236,7 +6224,7 @@ class PortResourceRequestBasedSchedulingTest( {'port': sriov_port_with_res_req['id']}, {'port': sriov_port['id']}]) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') sriov_port = self.neutron.show_port(sriov_port['id'])['port'] sriov_port_with_res_req = self.neutron.show_port( @@ -6294,7 +6282,7 @@ class PortResourceRequestBasedSchedulingTest( flavor=self.flavor_with_group_policy, networks=[{'port': sriov_port['id']}]) - self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') sriov_port = self.neutron.show_port(sriov_port['id'])['port'] sriov_binding = sriov_port['binding:profile'] @@ -6320,7 +6308,7 @@ class PortResourceRequestBasedSchedulingTest( # for the port in this case is PF2) it see the whole host as a # candidate and in our host there is available VF for the request even # if that is on the wrong PF. - server = self._wait_for_state_change(self.admin_api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertIn( 'Exceeded maximum number of retries. Exhausted all hosts ' 'available for retrying build failures for instance', @@ -6335,7 +6323,7 @@ class PortResourceRequestBasedSchedulingTest( flavor=self.flavor, networks=[{'port': port['id']}]) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') port = self.neutron.show_port(port['id'])['port'] @@ -6470,7 +6458,7 @@ class ServerMoveWithPortResourceRequestTest( flavor=self.flavor_with_group_policy, networks=[{'port': port['id']} for port in ports], host='host1') - return self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + return self._wait_for_state_change(server, 'ACTIVE') def _delete_server_and_check_allocations( self, server, qos_port, qos_sriov_port): @@ -6520,7 +6508,7 @@ class ServerMoveWithPortResourceRequestTest( self.api.post_server_action(server['id'], {'migrate': None}, check_response_status=[202]) - self._wait_for_server_parameter(self.admin_api, server, + self._wait_for_server_parameter(server, {'OS-EXT-STS:task_state': None}) self._assert_resize_migrate_action_fail( @@ -6584,7 +6572,7 @@ class ServerMoveWithPortResourceRequestTest( side_effect=fake_get_service): self.api.post_server_action(server['id'], {'migrate': None}) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') migration_uuid = self.get_migration_uuid_for_instance(server['id']) @@ -6627,7 +6615,7 @@ class ServerMoveWithPortResourceRequestTest( else: self.api.post_server_action(server['id'], {'migrate': None}) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') migration_uuid = self.get_migration_uuid_for_instance(server['id']) @@ -6678,7 +6666,7 @@ class ServerMoveWithPortResourceRequestTest( else: self.api.post_server_action(server['id'], {'migrate': None}) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') migration_uuid = self.get_migration_uuid_for_instance(server['id']) @@ -6690,7 +6678,7 @@ class ServerMoveWithPortResourceRequestTest( new_flavor=new_flavor) self.api.post_server_action(server['id'], {'revertResize': None}) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # check that allocation is moved back to the source host self._check_allocation( @@ -6759,7 +6747,7 @@ class ServerMoveWithPortResourceRequestTest( server['id'], {'resize': {"flavorRef": new_flavor['id']}}) else: self.api.post_server_action(server['id'], {'migrate': None}) - self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self._wait_for_state_change(server, 'VERIFY_RESIZE') # ensure that resize is tried on two hosts, so we had a re-schedule self.assertEqual(['host2', 'host3'], prep_resize_calls) @@ -6820,8 +6808,7 @@ class ServerMoveWithPortResourceRequestTest( server['id'], {'resize': {"flavorRef": new_flavor['id']}}) else: self.api.post_server_action(server['id'], {'migrate': None}) - self._wait_for_server_parameter( - self.api, server, + self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': 'host1', 'status': 'ERROR'}) self._wait_for_migration_status(server, ['error']) @@ -6878,8 +6865,7 @@ class ServerMoveWithPortResourceRequestTest( # intentionally not trigger a re-schedule even if there is host3 as an # alternate. self.api.post_server_action(server['id'], {'migrate': None}) - server = self._wait_for_server_parameter( - self.api, server, + server = self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': 'host1', # Note that we have to wait for the task_state to be reverted # to None since that happens after the fault is recorded. @@ -6892,8 +6878,7 @@ class ServerMoveWithPortResourceRequestTest( server['fault']['message']) self._wait_for_action_fail_completion( - server, instance_actions.MIGRATE, 'compute_prep_resize', - self.admin_api) + server, instance_actions.MIGRATE, 'compute_prep_resize') fake_notifier.wait_for_versioned_notifications( 'instance.resize_prep.end') @@ -7120,8 +7105,7 @@ class ServerMoveWithPortResourceRequestTest( req['evacuate']['host'] = host self.api.post_server_action(server['id'], req) - self._wait_for_server_parameter( - self.api, server, + self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'ACTIVE'}) @@ -7173,8 +7157,7 @@ class ServerMoveWithPortResourceRequestTest( self.api.post_server_action(server['id'], req) # Evacuate does not have reschedule loop so evacuate expected to # simply fail and the server remains on the source host - server = self._wait_for_server_parameter( - self.api, server, + server = self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': 'host1', 'status': 'ACTIVE', 'OS-EXT-STS:task_state': None}) @@ -7231,8 +7214,7 @@ class ServerMoveWithPortResourceRequestTest( # The compute manager on host2 will raise from # _update_pci_request_spec_with_allocated_interface_name self.api.post_server_action(server['id'], {'evacuate': {}}) - server = self._wait_for_server_parameter( - self.api, server, + server = self._wait_for_server_parameter(server, {'OS-EXT-SRV-ATTR:host': 'host1', 'OS-EXT-STS:task_state': None, 'status': 'ERROR'}) @@ -7243,8 +7225,7 @@ class ServerMoveWithPortResourceRequestTest( server['fault']['message']) self._wait_for_action_fail_completion( - server, instance_actions.EVACUATE, 'compute_rebuild_instance', - self.admin_api) + server, instance_actions.EVACUATE, 'compute_rebuild_instance') fake_notifier.wait_for_versioned_notifications( 'instance.rebuild.error') @@ -7284,7 +7265,7 @@ class PortResourceRequestReSchedulingTest( server = self._create_server( flavor=self.flavor, networks=[{'port': port['id']}]) - server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + server = self._wait_for_state_change(server, 'ACTIVE') updated_port = self.neutron.show_port(port['id'])['port'] dest_hostname = server['OS-EXT-SRV-ATTR:host'] @@ -7356,8 +7337,7 @@ class PortResourceRequestReSchedulingTest( server = self._create_server( flavor=self.flavor, networks=[{'port': port['id']}]) - server = self._wait_for_state_change( - self.admin_api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertIn( 'Failed to get traits for resource provider', diff --git a/nova/tests/functional/test_servers_provider_tree.py b/nova/tests/functional/test_servers_provider_tree.py index 6bf60449a094..f2f0f65fda88 100644 --- a/nova/tests/functional/test_servers_provider_tree.py +++ b/nova/tests/functional/test_servers_provider_tree.py @@ -419,11 +419,11 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase): def _create_instance(self, flavor): server_req = self._build_minimal_create_server_request( - self.api, 'some-server', flavor_id=flavor['id'], + 'some-server', flavor_id=flavor['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none', az='nova:host1') inst = self.api.post_server({'server': server_req}) - return self._wait_for_state_change(self.admin_api, inst, 'ACTIVE') + return self._wait_for_state_change(inst, 'ACTIVE') def test_reshape(self): """On startup, virt driver signals it needs to reshape, then does so. diff --git a/nova/tests/functional/wsgi/test_servers.py b/nova/tests/functional/wsgi/test_servers.py index 6e72b6f3f0be..c56003a2e3e7 100644 --- a/nova/tests/functional/wsgi/test_servers.py +++ b/nova/tests/functional/wsgi/test_servers.py @@ -354,7 +354,6 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase( servers_policies.ZERO_DISK_FLAVOR: base_policies.RULE_ADMIN_API}, overwrite=False) server_req = self._build_minimal_create_server_request( - self.api, 'test_create_image_backed_server_with_zero_disk_fails', fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, self.zero_disk_flavor['id']) @@ -376,7 +375,6 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase( self.start_service('conductor') self.start_service('scheduler') server_req = self._build_minimal_create_server_request( - self.api, 'test_create_volume_backed_server_with_zero_disk_allowed', flavor_id=self.zero_disk_flavor['id']) server_req.pop('imageRef', None) @@ -387,5 +385,5 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase( 'boot_index': 0 }] server = self.admin_api.post_server({'server': server_req}) - server = self._wait_for_state_change(self.api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') self.assertIn('No valid host', server['fault']['message']) diff --git a/nova/tests/functional/wsgi/test_services.py b/nova/tests/functional/wsgi/test_services.py index 0b6401412860..0dc412146675 100644 --- a/nova/tests/functional/wsgi/test_services.py +++ b/nova/tests/functional/wsgi/test_services.py @@ -147,9 +147,9 @@ class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase): self.admin_api.post_server_action(server['id'], {'evacuate': {}}) # The host does not change until after the status is changed to ACTIVE # so wait for both parameters. - self._wait_for_server_parameter( - self.admin_api, server, {'status': 'ACTIVE', - 'OS-EXT-SRV-ATTR:host': 'host2'}) + self._wait_for_server_parameter(server, { + 'status': 'ACTIVE', + 'OS-EXT-SRV-ATTR:host': 'host2'}) # Delete the compute service for host1 and check the related # placement resources for that host. self.admin_api.api_delete('/os-services/%s' % service['id']) @@ -324,10 +324,10 @@ class ComputeStatusFilterTest(integrated_helpers.ProviderUsageBaseTestCase): # Try creating a server which should fail because nothing is available. networks = [{'port': self.neutron.port_1['id']}] server_req = self._build_minimal_create_server_request( - self.api, 'test_compute_status_filter', + 'test_compute_status_filter', image_uuid=fake_image.get_valid_image_id(), networks=networks) server = self.api.post_server({'server': server_req}) - server = self._wait_for_state_change(self.api, server, 'ERROR') + server = self._wait_for_state_change(server, 'ERROR') # There should be a NoValidHost fault recorded. self.assertIn('fault', server) self.assertIn('No valid host', server['fault']['message']) @@ -339,7 +339,7 @@ class ComputeStatusFilterTest(integrated_helpers.ProviderUsageBaseTestCase): # Try creating another server and it should be OK. server = self.api.post_server({'server': server_req}) - self._wait_for_state_change(self.api, server, 'ACTIVE') + self._wait_for_state_change(server, 'ACTIVE') # Stop, force-down and disable the service so the API cannot call # the compute service to sync the trait.