Merge "functional: Remove 'api' parameter"

This commit is contained in:
Zuul 2019-12-12 13:37:05 +00:00 committed by Gerrit Code Review
commit 2e7a0088c2
65 changed files with 449 additions and 519 deletions

View File

@ -34,7 +34,7 @@ class ServerActionsSampleJsonTest(test_servers.ServersSampleBase,
response_data = api_samples_test_base.pretty_data(response.content)
actions = api_samples_test_base.objectify(response_data)
self.action_stop = actions['instanceActions'][0]
self._wait_for_state_change(self.api, {'id': self.uuid}, 'SHUTOFF')
self._wait_for_state_change({'id': self.uuid}, 'SHUTOFF')
def _get_subs(self):
return {

View File

@ -32,14 +32,14 @@ class MultinicSampleJsonTest(integrated_helpers.InstanceHelperMixin,
def _boot_a_server(self, expected_status='ACTIVE', extra_params=None):
server = self._build_minimal_create_server_request(
self.api, 'MultinicSampleJsonTestServer')
'MultinicSampleJsonTestServer')
if extra_params:
server.update(extra_params)
created_server = self.api.post_server({'server': server})
# Wait for it to finish being created
found_server = self._wait_for_state_change(self.api, created_server,
found_server = self._wait_for_state_change(created_server,
expected_status)
return found_server

View File

@ -40,10 +40,10 @@ class ComputeManagerInitHostTestCase(
# Create a server, it does not matter on which host it lands.
name = 'test_migrate_disk_and_power_off_crash_finish_revert_migration'
server = self._build_minimal_create_server_request(
self.api, name, image_uuid=fake_image.get_valid_image_id(),
name, image_uuid=fake_image.get_valid_image_id(),
networks='auto')
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# Save the source hostname for assertions later.
source_host = server['OS-EXT-SRV-ATTR:host']
@ -66,8 +66,7 @@ class ComputeManagerInitHostTestCase(
self.admin_api.post_server_action(server['id'], {'migrate': None})
# Now wait for the task_state to be reset to None during
# _init_instance.
server = self._wait_for_server_parameter(
self.admin_api, server, {
server = self._wait_for_server_parameter(server, {
'status': 'ACTIVE',
'OS-EXT-STS:task_state': None,
'OS-EXT-SRV-ATTR:host': source_host
@ -158,7 +157,7 @@ class TestComputeRestartInstanceStuckInBuild(
# instance_claim() to stop it. This is less realistic but it works in
# the test env.
server_req = self._build_minimal_create_server_request(
self.api, 'interrupted-server', flavor_id=self.flavor1['id'],
'interrupted-server', flavor_id=self.flavor1['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
@ -170,7 +169,7 @@ class TestComputeRestartInstanceStuckInBuild(
mock_instance_claim.side_effect = sleep_forever
server = self.api.post_server({'server': server_req})
self._wait_for_state_change(self.admin_api, server, 'BUILD')
self._wait_for_state_change(server, 'BUILD')
# the instance.create.start is the closest thing to the
# instance_claim call we can wait for in the test
@ -182,7 +181,7 @@ class TestComputeRestartInstanceStuckInBuild(
# We expect that the instance is pushed to ERROR state during the
# compute restart.
self._wait_for_state_change(self.admin_api, server, 'ERROR')
self._wait_for_state_change(server, 'ERROR')
mock_log.assert_called_with(
'Instance spawn was interrupted before instance_claim, setting '
'instance to ERROR state',

View File

@ -69,7 +69,7 @@ class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase,
'uuid': uuids.working_volume,
'source_type': 'volume',
'destination_type': 'volume'}]}})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
source = server['OS-EXT-SRV-ATTR:host']
if source == self.compute.host:
@ -87,7 +87,7 @@ class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase,
self.stub_out('nova.volume.cinder.API.attachment_delete',
stub_attachment_delete)
self.api.post_server_action(server['id'], post)
self._wait_for_server_parameter(self.api, server,
self._wait_for_server_parameter(server,
{'OS-EXT-SRV-ATTR:host': dest,
'status': 'ACTIVE'})
self.assertEqual(2, stub_attachment_delete.call_count)

View File

@ -73,11 +73,14 @@ def generate_new_element(items, prefix, numeric=False):
class InstanceHelperMixin(object):
def _wait_for_server_parameter(self, admin_api, server, expected_params,
max_retries=10):
def _wait_for_server_parameter(
self, server, expected_params, max_retries=10, api=None):
api = api or getattr(self, 'admin_api', self.api)
retry_count = 0
while True:
server = admin_api.get_server(server['id'])
server = api.get_server(server['id'])
if all([server[attr] == expected_params[attr]
for attr in expected_params]):
break
@ -90,22 +93,21 @@ class InstanceHelperMixin(object):
return server
def _wait_for_state_change(self, admin_api, server, expected_status,
max_retries=10):
def _wait_for_state_change(self, server, expected_status, max_retries=10):
return self._wait_for_server_parameter(
admin_api, server, {'status': expected_status}, max_retries)
server, {'status': expected_status}, max_retries)
def _build_minimal_create_server_request(
self, name=None, image_uuid=None, flavor_id=None, networks=None,
az=None, host=None):
def _build_minimal_create_server_request(self, api, name=None,
image_uuid=None, flavor_id=None,
networks=None, az=None,
host=None):
server = {}
if not image_uuid:
# NOTE(takashin): In API version 2.36, image APIs were deprecated.
# In API version 2.36 or greater, self.api.get_images() returns
# a 404 error. In that case, 'image_uuid' should be specified.
image_uuid = api.get_images()[0]['id']
image_uuid = self.api.get_images()[0]['id']
server['imageRef'] = image_uuid
if not name:
@ -115,7 +117,7 @@ class InstanceHelperMixin(object):
if not flavor_id:
# Set a valid flavorId
flavor_id = api.get_flavors()[0]['id']
flavor_id = self.api.get_flavors()[0]['id']
server['flavorRef'] = 'http://fake.server/%s' % flavor_id
if networks is not None:
@ -142,40 +144,43 @@ class InstanceHelperMixin(object):
return
def _wait_for_action_fail_completion(
self, server, expected_action, event_name, api=None):
self, server, expected_action, event_name):
"""Polls instance action events for the given instance, action and
action event name until it finds the action event with an error
result.
"""
if api is None:
api = self.api
return self._wait_for_instance_action_event(
api, server, expected_action, event_name, event_result='error')
server, expected_action, event_name, event_result='error')
def _wait_for_instance_action_event(
self, api, server, action_name, event_name, event_result):
self, server, action_name, event_name, event_result):
"""Polls the instance action events for the given instance, action,
event, and event result until it finds the event.
"""
api = getattr(self, 'admin_api', self.api)
actions = []
events = []
for attempt in range(10):
actions = api.get_instance_actions(server['id'])
# The API returns the newest event first
for action in actions:
if action['action'] == action_name:
events = (
api.api_get(
'/servers/%s/os-instance-actions/%s' %
(server['id'], action['request_id'])
).body['instanceAction']['events'])
# Look for the action event being in error state.
for event in events:
result = event['result']
if (event['event'] == event_name and
result is not None and
result.lower() == event_result.lower()):
return event
if action['action'] != action_name:
continue
events = api.api_get(
'/servers/%s/os-instance-actions/%s' % (
server['id'], action['request_id'])
).body['instanceAction']['events']
# Look for the action event being in error state.
for event in events:
result = event['result']
if (event['event'] == event_name and
result is not None and
result.lower() == event_result.lower()):
return event
# We didn't find the completion event yet, so wait a bit.
time.sleep(0.5)
@ -192,9 +197,8 @@ class InstanceHelperMixin(object):
:param action: Either "resize" or "migrate" instance action.
:param error_in_tb: Some expected part of the error event traceback.
"""
api = self.admin_api if hasattr(self, 'admin_api') else self.api
event = self._wait_for_action_fail_completion(
server, action, 'conductor_migrate_server', api=api)
server, action, 'conductor_migrate_server')
self.assertIn(error_in_tb, event['traceback'])
def _wait_for_migration_status(self, server, expected_statuses):
@ -202,9 +206,7 @@ class InstanceHelperMixin(object):
for the given server, else the test fails. The migration record, if
found, is returned.
"""
api = getattr(self, 'admin_api', None)
if api is None:
api = self.api
api = getattr(self, 'admin_api', self.api)
statuses = [status.lower() for status in expected_statuses]
for attempt in range(10):
@ -297,10 +299,14 @@ class _IntegratedTestBase(test.TestCase):
self.api = self.api_fixture.admin_api
else:
self.api = self.api_fixture.api
self.admin_api = self.api_fixture.admin_api
if hasattr(self, 'microversion'):
self.api.microversion = self.microversion
if not self.ADMIN_API:
self.admin_api.microversion = self.microversion
def get_unused_server_name(self):
servers = self.api.get_servers()
server_names = [server['name'] for server in servers]
@ -728,14 +734,13 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
:return: the API representation of the booted instance
"""
server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=flavor['id'],
'some-server', flavor_id=flavor['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks=networks)
server_req['availability_zone'] = 'nova:%s' % source_hostname
LOG.info('booting on %s', source_hostname)
created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(
self.admin_api, created_server, 'ACTIVE')
server = self._wait_for_state_change(created_server, 'ACTIVE')
# Verify that our source host is what the server ended up on
self.assertEqual(source_hostname, server['OS-EXT-SRV-ATTR:host'])
@ -849,7 +854,7 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
def _move_and_check_allocations(self, server, request, old_flavor,
new_flavor, source_rp_uuid, dest_rp_uuid):
self.api.post_server_action(server['id'], request)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self._wait_for_state_change(server, 'VERIFY_RESIZE')
def _check_allocation():
self.assertFlavorMatchesUsage(source_rp_uuid, old_flavor)
@ -911,7 +916,7 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
}
}
self.api.post_server_action(server['id'], resize_req)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.assertFlavorMatchesUsage(rp_uuid, old_flavor, new_flavor)
@ -981,15 +986,15 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
def _confirm_resize(self, server):
self.api.post_server_action(server['id'], {'confirmResize': None})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self._wait_for_instance_action_event(
self.api, server, instance_actions.CONFIRM_RESIZE,
server, instance_actions.CONFIRM_RESIZE,
'compute_confirm_resize', 'success')
return server
def _revert_resize(self, server):
self.api.post_server_action(server['id'], {'revertResize': None})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self._wait_for_migration_status(server, ['reverted'])
# Note that the migration status is changed to "reverted" in the
# dest host revert_resize method but the allocations are cleaned up

View File

@ -77,7 +77,7 @@ class SharedStorageProviderUsageTestCase(
}
# create server
server = self.api.post_server(server_req_body)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# get shared_rp and cn_rp usages
shared_rp_usages = self._get_provider_usages(shared_RP['uuid'])
@ -135,7 +135,7 @@ class SharedStorageProviderUsageTestCase(
}
# create server
server = self.api.post_server(server_req_body)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
rebuild_image_ref = (
nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID)
@ -152,7 +152,7 @@ class SharedStorageProviderUsageTestCase(
self.api.api_post('/servers/%s/action' % server['id'],
rebuild_req_body)
self._wait_for_server_parameter(
self.api, server, {'OS-EXT-STS:task_state': None})
server, {'OS-EXT-STS:task_state': None})
# get shared_rp and cn_rp usages
shared_rp_usages = self._get_provider_usages(shared_rp_uuid)
@ -198,7 +198,7 @@ class SharedStorageProviderUsageTestCase(
}
# create server
server = self.api.post_server(server_req_body)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
rebuild_image_ref = (
nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID)
@ -216,7 +216,7 @@ class SharedStorageProviderUsageTestCase(
rebuild_req_body)
# Look for the failed rebuild action.
self._wait_for_action_fail_completion(
server, instance_actions.REBUILD, 'rebuild_server', self.admin_api)
server, instance_actions.REBUILD, 'rebuild_server')
# Assert the server image_ref was rolled back on failure.
server = self.api.get_server(server['id'])
self.assertEqual(org_image_id, server['image']['id'])

View File

@ -126,7 +126,7 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
def _create_server(self, flavor_id, hostname):
server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=flavor_id,
'some-server', flavor_id=flavor_id,
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
server_req['availability_zone'] = 'nova:%s' % hostname
@ -173,22 +173,22 @@ class VPMEMTests(VPMEMTestBase):
# Boot two servers with pmem
server1 = self._create_server(self.flavor, self.compute1.host)
self._wait_for_state_change(self.api, server1, 'ACTIVE')
self._wait_for_state_change(server1, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server1['id'], cn1_uuid)
server2 = self._create_server(self.flavor, self.compute1.host)
self._wait_for_state_change(self.api, server2, 'ACTIVE')
self._wait_for_state_change(server2, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server2['id'], cn1_uuid)
# 'SMALL' VPMEM resource has used up
server3 = self._create_server(self.flavor, self.compute1.host)
self._wait_for_state_change(self.api, server3, 'ERROR')
self._wait_for_state_change(server3, 'ERROR')
# Delete server2, one 'SMALL' VPMEM will be released
self._delete_server(server2)
server3 = self._create_server(self.flavor, self.compute1.host)
self._wait_for_state_change(self.api, server3, 'ACTIVE')
self._wait_for_state_change(server3, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server3['id'], cn1_uuid)
@ -237,29 +237,29 @@ class VPMEMResizeTests(VPMEMTestBase):
# Boot one server with pmem, then resize the server
server = self._create_server(self.flavor1, self.compute1.host)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
# Revert resize
self._resize_server(server, self.flavor2)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn2_uuid)
self._revert_resize(server)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
# Confirm resize
self._resize_server(server, self.flavor2)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn2_uuid)
self._confirm_resize(server)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn2_uuid)
@ -272,29 +272,29 @@ class VPMEMResizeTests(VPMEMTestBase):
# Boot one server with pmem, then resize the server
server = self._create_server(self.flavor1, self.compute1.host)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
# Revert resize
self._resize_server(server, self.flavor2)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
self._revert_resize(server)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
# Confirm resize
self._resize_server(server, self.flavor2)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
self._confirm_resize(server)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)

View File

@ -220,7 +220,7 @@ class NotificationSampleTestBase(test.TestCase,
actual=fake_notifier.VERSIONED_NOTIFICATIONS.pop(0))
server = self._build_minimal_create_server_request(
self.api, 'some-server',
'some-server',
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=flavor_id)
@ -246,7 +246,7 @@ class NotificationSampleTestBase(test.TestCase,
self.assertTrue(created_server['id'])
# Wait for it to finish being created
found_server = self._wait_for_state_change(self.api, created_server,
found_server = self._wait_for_state_change(created_server,
expected_status)
found_server['reservation_id'] = reservation_id

View File

@ -59,7 +59,7 @@ class TestInstanceNotificationSampleWithMultipleCompute(
fake_notifier.reset()
action(server)
# Ensure that instance is in active state after an action
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
@mock.patch('nova.compute.manager.ComputeManager.'
'_live_migration_cleanup_flags', return_value=[True, False])
@ -188,13 +188,13 @@ class TestInstanceNotificationSampleWithMultipleCompute(
}
self.admin_api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'MIGRATING')
self._wait_for_state_change(server, 'MIGRATING')
migrations = self._wait_and_get_migrations(server)
self.admin_api.delete_migration(server['id'], migrations[0]['id'])
self._wait_for_notification('instance.live_migration_abort.start')
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# NOTE(gibi): the intance.live_migration_rollback notification emitted
# after the instance.live_migration_abort notification so we have to
# wait for the rollback to ensure we can assert both notifications
@ -261,10 +261,8 @@ class TestInstanceNotificationSampleWithMultipleCompute(
}
self.admin_api.post_server_action(server['id'], evacuate)
self._wait_for_state_change(self.api, server,
expected_status='REBUILD')
self._wait_for_state_change(self.api, server,
expected_status='ACTIVE')
self._wait_for_state_change(server, expected_status='REBUILD')
self._wait_for_state_change(server, expected_status='ACTIVE')
notifications = self._get_notifications('instance.evacuate')
self.assertEqual(1, len(notifications),
@ -286,7 +284,7 @@ class TestInstanceNotificationSampleWithMultipleCompute(
}
self.admin_api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'MIGRATING')
self._wait_for_state_change(server, 'MIGRATING')
migrations = self._wait_and_get_migrations(server)
migration_id = migrations[0]['id']
@ -390,7 +388,7 @@ class TestInstanceNotificationSample(
fake_notifier.reset()
action(server)
# Ensure that instance is in active state after an action
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# if the test step did not raised then we consider the step as
# succeeded. We drop the logs to avoid causing subunit parser
@ -520,10 +518,8 @@ class TestInstanceNotificationSample(
}
}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server,
expected_status='REBUILD')
self._wait_for_state_change(self.api, server,
expected_status='ACTIVE')
self._wait_for_state_change(server, expected_status='REBUILD')
self._wait_for_state_change(server, expected_status='ACTIVE')
notifications = self._get_notifications('instance.exists')
self._verify_notification(
@ -751,11 +747,9 @@ class TestInstanceNotificationSample(
def _test_power_off_on_server(self, server):
self.api.post_server_action(server['id'], {'os-stop': {}})
self._wait_for_state_change(self.api, server,
expected_status='SHUTOFF')
self._wait_for_state_change(server, expected_status='SHUTOFF')
self.api.post_server_action(server['id'], {'os-start': {}})
self._wait_for_state_change(self.api, server,
expected_status='ACTIVE')
self._wait_for_state_change(server, expected_status='ACTIVE')
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS)
@ -788,8 +782,7 @@ class TestInstanceNotificationSample(
def _test_shelve_and_shelve_offload_server(self, server):
self.flags(shelved_offload_time=-1)
self.api.post_server_action(server['id'], {'shelve': {}})
self._wait_for_state_change(self.api, server,
expected_status='SHELVED')
self._wait_for_state_change(server, expected_status='SHELVED')
self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS)
@ -812,7 +805,7 @@ class TestInstanceNotificationSample(
# we can unshelve to make sure that the unshelve.start notification
# payload is stable as the compute manager first sets the instance
# state then a bit later sets the instance.host to None.
self._wait_for_server_parameter(self.api, server,
self._wait_for_server_parameter(server,
{'status': 'SHELVED_OFFLOADED',
'OS-EXT-SRV-ATTR:host': None})
@ -832,7 +825,7 @@ class TestInstanceNotificationSample(
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
self.api.post_server_action(server['id'], {'unshelve': None})
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
self._wait_for_notification('instance.unshelve.end')
def _test_unshelve_server(self, server):
@ -844,13 +837,13 @@ class TestInstanceNotificationSample(
# we can unshelve to make sure that the unshelve.start notification
# payload is stable as the compute manager first sets the instance
# state then a bit later sets the instance.host to None.
self._wait_for_server_parameter(self.api, server,
self._wait_for_server_parameter(server,
{'status': 'SHELVED_OFFLOADED',
'OS-EXT-SRV-ATTR:host': None})
post = {'unshelve': None}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
self._wait_for_notification('instance.unshelve.end')
self.assertEqual(9, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS)
@ -870,11 +863,11 @@ class TestInstanceNotificationSample(
def _test_suspend_resume_server(self, server):
post = {'suspend': {}}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'SUSPENDED')
self._wait_for_state_change(server, 'SUSPENDED')
post = {'resume': None}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# Four versioned notification are generated.
# 0. instance-suspend-start
@ -913,10 +906,10 @@ class TestInstanceNotificationSample(
def _test_pause_unpause_server(self, server):
self.api.post_server_action(server['id'], {'pause': {}})
self._wait_for_state_change(self.api, server, 'PAUSED')
self._wait_for_state_change(server, 'PAUSED')
self.api.post_server_action(server['id'], {'unpause': {}})
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# Four versioned notifications are generated
# 0. instance-pause-start
@ -997,7 +990,7 @@ class TestInstanceNotificationSample(
}
}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._pop_and_verify_dest_select_notification(server['id'],
replacements={
@ -1034,7 +1027,7 @@ class TestInstanceNotificationSample(
# the following is the revert server request
post = {'revertResize': None}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS)
@ -1170,7 +1163,7 @@ class TestInstanceNotificationSample(
self.addCleanup(patcher.stop)
patcher.start()
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, expected_status='ERROR')
self._wait_for_state_change(server, expected_status='ERROR')
self._wait_for_notification('compute.exception')
# There should be the following notifications after scheduler's
# select_destination notifications:
@ -1248,10 +1241,8 @@ class TestInstanceNotificationSample(
self.api.post_server_action(server['id'], post)
# Before going back to ACTIVE state
# server state need to be changed to REBUILD state
self._wait_for_state_change(self.api, server,
expected_status='REBUILD')
self._wait_for_state_change(self.api, server,
expected_status='ACTIVE')
self._wait_for_state_change(server, expected_status='REBUILD')
self._wait_for_state_change(server, expected_status='ACTIVE')
self._pop_and_verify_dest_select_notification(server['id'],
replacements={
@ -1345,10 +1336,8 @@ class TestInstanceNotificationSample(
self.api.post_server_action(server['id'], post)
# Before going back to ACTIVE state
# server state need to be changed to REBUILD state
self._wait_for_state_change(self.api, server,
expected_status='REBUILD')
self._wait_for_state_change(self.api, server,
expected_status='ACTIVE')
self._wait_for_state_change(server, expected_status='REBUILD')
self._wait_for_state_change(server, expected_status='ACTIVE')
self._pop_and_verify_dest_select_notification(server['id'],
replacements={
@ -1434,7 +1423,7 @@ class TestInstanceNotificationSample(
}
self.api.post_server_action(server['id'], post)
mock_rebuild.side_effect = _virtual_interface_create_failed
self._wait_for_state_change(self.api, server, expected_status='ERROR')
self._wait_for_state_change(server, expected_status='ERROR')
notification = self._get_notifications('instance.rebuild.error')
self.assertEqual(1, len(notification),
fake_notifier.VERSIONED_NOTIFICATIONS)
@ -1455,11 +1444,11 @@ class TestInstanceNotificationSample(
def _test_restore_server(self, server):
self.flags(reclaim_instance_interval=30)
self.api.delete_server(server['id'])
self._wait_for_state_change(self.api, server, 'SOFT_DELETED')
self._wait_for_state_change(server, 'SOFT_DELETED')
# we don't want to test soft_delete here
fake_notifier.reset()
self.api.post_server_action(server['id'], {'restore': {}})
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS)
@ -1641,12 +1630,12 @@ class TestInstanceNotificationSample(
self.flags(allow_resize_to_same_host=True)
post = {'resize': {'flavorRef': '2'}}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self._wait_for_state_change(server, 'VERIFY_RESIZE')
fake_notifier.reset()
post = {'confirmResize': None}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS)
@ -1733,7 +1722,7 @@ class TestInstanceNotificationSample(
}
}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'RESCUE')
self._wait_for_state_change(server, 'RESCUE')
# 0. instance.rescue.start
# 1. instance.exists
@ -1759,7 +1748,7 @@ class TestInstanceNotificationSample(
'unrescue': None
}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS)
@ -1779,7 +1768,7 @@ class TestInstanceNotificationSample(
def _test_soft_delete_server(self, server):
self.flags(reclaim_instance_interval=30)
self.api.delete_server(server['id'])
self._wait_for_state_change(self.api, server, 'SOFT_DELETED')
self._wait_for_state_change(server, 'SOFT_DELETED')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS)
@ -1948,9 +1937,9 @@ class TestInstanceNotificationSample(
def _test_lock_unlock_instance(self, server):
self.api.post_server_action(server['id'], {'lock': {}})
self._wait_for_server_parameter(self.api, server, {'locked': True})
self._wait_for_server_parameter(server, {'locked': True})
self.api.post_server_action(server['id'], {'unlock': {}})
self._wait_for_server_parameter(self.api, server, {'locked': False})
self._wait_for_server_parameter(server, {'locked': False})
# Two versioned notifications are generated
# 0. instance-lock
# 1. instance-unlock
@ -1973,9 +1962,9 @@ class TestInstanceNotificationSample(
def _test_lock_unlock_instance_with_reason(self, server):
self.api.post_server_action(
server['id'], {'lock': {"locked_reason": "global warming"}})
self._wait_for_server_parameter(self.api, server, {'locked': True})
self._wait_for_server_parameter(server, {'locked': True})
self.api.post_server_action(server['id'], {'unlock': {}})
self._wait_for_server_parameter(self.api, server, {'locked': False})
self._wait_for_server_parameter(server, {'locked': False})
# Two versioned notifications are generated
# 0. instance-lock
# 1. instance-unlock

View File

@ -55,7 +55,7 @@ class DeleteWithReservedVolumes(integrated_helpers._IntegratedTestBase,
]
}
})
return self._wait_for_state_change(self.api, server, 'ERROR')
return self._wait_for_state_change(server, 'ERROR')
def test_delete_with_reserved_volumes_new(self):
self.cinder = self.useFixture(

View File

@ -42,7 +42,7 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase,
flavor1 = flavors[0]['id']
server = self._build_server(flavor1)
server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# Start up another compute service so we can resize.
host2 = self.start_service('compute', host='host2')
@ -51,10 +51,10 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase,
flavor2 = flavors[1]['id']
req = {'resize': {'flavorRef': flavor2}}
self.api.post_server_action(server['id'], req)
server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
self.api.post_server_action(server['id'], {'confirmResize': None})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# Disable the host on which the server is now running (host2).
host2.stop()
@ -62,7 +62,7 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase,
# Now try to evacuate the server back to the original source compute.
req = {'evacuate': {'onSharedStorage': False}}
self.api.post_server_action(server['id'], req)
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# The evacuate flow in the compute manager is annoying in that it
# sets the instance status to ACTIVE before updating the host, so we
# have to wait for the migration record to be 'done' to avoid a race.

View File

@ -81,10 +81,10 @@ class TestLocalDeleteAllocations(test.TestCase,
self.assertEqual(0, usage)
# Create a server.
server = self._build_minimal_create_server_request(self.api,
server = self._build_minimal_create_server_request(
'local-delete-test', self.image_id, self.flavor_id, 'none')
server = self.admin_api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# Assert usages are non zero now.
usages_during = self._get_usages(placement_api, rp_uuid)
@ -136,10 +136,10 @@ class TestLocalDeleteAllocations(test.TestCase,
self.assertEqual(0, usage)
# Create a server.
server = self._build_minimal_create_server_request(self.api,
server = self._build_minimal_create_server_request(
'local-delete-test', self.image_id, self.flavor_id, 'none')
server = self.admin_api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# Assert usages are non zero now.
usages_during = self._get_usages(placement_api, rp_uuid)

View File

@ -59,10 +59,10 @@ class ServerTagsFilteringTest(test.TestCase,
for x in range(2):
server = self.api.post_server(
dict(server=self._build_minimal_create_server_request(
self.api, 'test-list-server-tag-filters%i' % x, image_id,
'test-list-server-tag-filters%i' % x, image_id,
networks='none')))
self.addCleanup(self.api.delete_server, server['id'])
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self.servers.append(server)
# now apply two tags to the first server

View File

@ -66,10 +66,10 @@ class ServerListLimitMarkerCell0Test(test.TestCase,
for x in range(3):
server = self.api.post_server(
dict(server=self._build_minimal_create_server_request(
self.api, 'test-list-server-limit%i' % x, self.image_id,
'test-list-server-limit%i' % x, self.image_id,
networks='none')))
self.addCleanup(self.api.delete_server, server['id'])
self._wait_for_state_change(self.api, server, 'ERROR')
self._wait_for_state_change(server, 'ERROR')
servers = self.api.get_servers()
self.assertEqual(3, len(servers))

View File

@ -95,11 +95,11 @@ class SchedulerOnlyChecksTargetTest(test.TestCase,
# We first create the instance
server = self.admin_api.post_server(
dict(server=self._build_minimal_create_server_request(
self.api, 'my-pretty-instance-to-evacuate', self.image_id,
'my-pretty-instance-to-evacuate', self.image_id,
networks='none')))
server_id = server['id']
self.addCleanup(self.api.delete_server, server_id)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# We need to get instance details for knowing its host
server = self.admin_api.get_server(server_id)
@ -125,7 +125,7 @@ class SchedulerOnlyChecksTargetTest(test.TestCase,
}
self.admin_api.post_server_action(server['id'], evacuate)
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
server = self.admin_api.get_server(server_id)
# Yeepee, that works!

View File

@ -84,13 +84,12 @@ class FailedEvacuateStateTests(test.TestCase,
def _boot_a_server(self):
server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=self.flavor1['id'],
'some-server', flavor_id=self.flavor1['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
LOG.info('booting on %s', self.hostname)
created_server = self.api.post_server({'server': server_req})
return self._wait_for_state_change(
self.api, created_server, 'ACTIVE')
return self._wait_for_state_change(created_server, 'ACTIVE')
def test_evacuate_no_valid_host(self):
# Boot a server
@ -110,7 +109,7 @@ class FailedEvacuateStateTests(test.TestCase,
self._wait_for_notification_event_type('compute_task.rebuild_server')
server = self._wait_for_state_change(self.api, server, 'ERROR')
server = self._wait_for_state_change(server, 'ERROR')
self.assertEqual(self.hostname, server['OS-EXT-SRV-ATTR:host'])
# Check migrations

View File

@ -70,7 +70,7 @@ class TestLiveMigrateOneOfConcurrentlyCreatedInstances(
def _boot_servers(self, num_servers=1):
server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=self.flavor1['id'],
'some-server', flavor_id=self.flavor1['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
server_req.update({'min_count': str(num_servers),
@ -81,7 +81,7 @@ class TestLiveMigrateOneOfConcurrentlyCreatedInstances(
servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id})
for idx, server in enumerate(servers):
servers[idx] = self._wait_for_state_change(self.api, server,
servers[idx] = self._wait_for_state_change(server,
'ACTIVE')
return servers

View File

@ -103,9 +103,9 @@ class TestRequestSpecRetryReschedule(test.TestCase,
# create the instance which should go to host1
server = self.admin_api.post_server(
dict(server=self._build_minimal_create_server_request(
self.api, 'test_resize_with_reschedule_then_live_migrate',
'test_resize_with_reschedule_then_live_migrate',
self.image_id, flavor_id=flavor1['id'], networks='none')))
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])
# Stub out the resize to fail on host2, which will trigger a reschedule
@ -116,17 +116,17 @@ class TestRequestSpecRetryReschedule(test.TestCase,
# on host3.
data = {'resize': {'flavorRef': flavor2['id']}}
self.api.post_server_action(server['id'], data)
server = self._wait_for_state_change(self.admin_api, server,
server = self._wait_for_state_change(server,
'VERIFY_RESIZE')
self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host'])
self.api.post_server_action(server['id'], {'confirmResize': None})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# Now live migrate the server to host2 specifically, which previously
# failed the resize attempt but here it should pass.
data = {'os-migrateLive': {'host': 'host2', 'block_migration': 'auto'}}
self.admin_api.post_server_action(server['id'], data)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
# NOTE(mriedem): The instance status effectively goes to ACTIVE before
# the migration status is changed to "completed" since

View File

@ -104,8 +104,7 @@ class TestRescheduleWithServerGroup(test.TestCase,
hints = {'group': created_group['id']}
created_server = self.api.post_server({'server': server,
'os:scheduler_hints': hints})
found_server = self._wait_for_state_change(self.admin_api,
created_server, 'ACTIVE')
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
# Assert that the host is not the failed host.
self.assertNotEqual(self.failed_host,
found_server['OS-EXT-SRV-ATTR:host'])

View File

@ -64,7 +64,7 @@ class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase,
}
}
server = self.api.post_server(server_req_body)
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# For a volume-backed server, the image ref will be an empty string
# in the server response.
self.assertEqual('', server['image'])

View File

@ -99,13 +99,11 @@ class TestParallelEvacuationWithServerGroup(
hints = {'group': group['id']}
created_server1 = self.api.post_server({'server': server,
'os:scheduler_hints': hints})
server1 = self._wait_for_state_change(self.api,
created_server1, 'ACTIVE')
server1 = self._wait_for_state_change(created_server1, 'ACTIVE')
created_server2 = self.api.post_server({'server': server,
'os:scheduler_hints': hints})
server2 = self._wait_for_state_change(self.api,
created_server2, 'ACTIVE')
server2 = self._wait_for_state_change(created_server2, 'ACTIVE')
# assert that the anti-affinity policy is enforced during the boot
self.assertNotEqual(server1['OS-EXT-SRV-ATTR:host'],
@ -134,9 +132,9 @@ class TestParallelEvacuationWithServerGroup(
fake_notifier.wait_for_versioned_notifications(
'instance.rebuild.start', n_events=1)
server1 = self._wait_for_server_parameter(
self.api, server1, {'OS-EXT-STS:task_state': None})
server1, {'OS-EXT-STS:task_state': None})
server2 = self._wait_for_server_parameter(
self.api, server2, {'OS-EXT-STS:task_state': None})
server2, {'OS-EXT-STS:task_state': None})
# NOTE(gibi): The instance.host set _after_ the instance state and
# tast_state is set back to normal so it is not enough to wait for

View File

@ -48,13 +48,13 @@ class TestServerResizeReschedule(integrated_helpers.ProviderUsageBaseTestCase):
supplied host_list, and does not call the scheduler.
"""
server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=self.flavor1['id'],
'some-server', flavor_id=self.flavor1['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
self.first_attempt = True
created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(self.api, created_server,
server = self._wait_for_state_change(created_server,
'ACTIVE')
actual_prep_resize = compute_manager.ComputeManager._prep_resize
@ -74,7 +74,7 @@ class TestServerResizeReschedule(integrated_helpers.ProviderUsageBaseTestCase):
data = {"resize": {"flavorRef": self.flavor2['id']}}
self.api.post_server_action(server_uuid, data)
server = self._wait_for_state_change(self.api, created_server,
server = self._wait_for_state_change(created_server,
'VERIFY_RESIZE')
self.assertEqual(self.flavor2['name'],
server['flavor']['original_name'])

View File

@ -77,11 +77,11 @@ class TestResizeWithNoAllocationScheduler(
def test_resize(self):
# Create our server without networking just to keep things simple.
server_req = self._build_minimal_create_server_request(
self.api, 'test-resize', flavor_id=self.old_flavor['id'],
'test-resize', flavor_id=self.old_flavor['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
original_host = server['OS-EXT-SRV-ATTR:host']
target_host = 'host1' if original_host == 'host2' else 'host2'
@ -95,8 +95,7 @@ class TestResizeWithNoAllocationScheduler(
self.api.post_server_action(server['id'], post)
# Poll the server until the resize is done.
server = self._wait_for_state_change(
self.api, server, 'VERIFY_RESIZE')
server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
# Assert that the server was migrated to the other host.
self.assertEqual(target_host, server['OS-EXT-SRV-ATTR:host'])
# Confirm the resize.

View File

@ -90,7 +90,7 @@ class TestBootFromVolumeIsolatedHostsFilter(
# networks='none'.
with utils.temporary_mutation(self.api, microversion='2.37'):
server = self.api.post_server(server_req_body)
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# NOTE(mriedem): The instance is successfully scheduled but since
# the image_id from the volume_image_metadata isn't stored in the
# RequestSpec.image.id, and restrict_isolated_hosts_to_isolated_images

View File

@ -62,13 +62,12 @@ class InstanceListWithDeletedServicesTestCase(
def _migrate_server(self, server, target_host):
self.admin_api.api_post('/servers/%s/action' % server['id'],
{'migrate': None})
server = self._wait_for_state_change(
self.admin_api, server, 'VERIFY_RESIZE')
server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.assertEqual(target_host, server['OS-EXT-SRV-ATTR:host'])
self.admin_api.api_post('/servers/%s/action' % server['id'],
{'confirmResize': None},
check_response_status=[204])
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
return server
def test_instance_list_deleted_service_with_no_uuid(self):
@ -87,10 +86,10 @@ class InstanceListWithDeletedServicesTestCase(
# Create an instance which will be on host1 since it's the only host.
server_req = self._build_minimal_create_server_request(
self.api, 'test_instance_list_deleted_service_with_no_uuid',
'test_instance_list_deleted_service_with_no_uuid',
image_uuid=self.image_id, networks='none')
server = self.api.post_server({'server': server_req})
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# Now we start a 2nd compute which is "upgraded" (has a uuid) and
# we'll migrate the instance to that host.

View File

@ -94,7 +94,7 @@ class TestEvacuationWithSourceReturningDuringRebuild(
'imageRef': self.image_id,
'flavorRef': self.flavor_id}
server_response = self.api.post_server({'server': server_request})
server = self._wait_for_state_change(self.api, server_response,
server = self._wait_for_state_change(server_response,
'ACTIVE')
# Record where the instance is running before forcing the service down
@ -106,7 +106,7 @@ class TestEvacuationWithSourceReturningDuringRebuild(
self.api.post_server_action(server['id'], {'evacuate': {}})
# Wait for the instance to go into an ACTIVE state
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
server = self.api.get_server(server['id'])
host = server['OS-EXT-SRV-ATTR:host']
migrations = self.api.get_migrations()

View File

@ -60,7 +60,7 @@ class TestMultiCreateServerGroupMemberOverQuota(
multi-create POST /servers request.
"""
server_req = self._build_minimal_create_server_request(
self.api, 'test_multi_create_server_group_members_over_quota',
'test_multi_create_server_group_members_over_quota',
image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none')
server_req['min_count'] = 3
@ -88,7 +88,7 @@ class TestMultiCreateServerGroupMemberOverQuota(
self.useFixture(nova_fixtures.NoopConductorFixture())
for x in range(3):
server_req = self._build_minimal_create_server_request(
self.api, 'test_concurrent_request_%s' % x,
'test_concurrent_request_%s' % x,
image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none')
hints = {'group': self.created_group['id']}

View File

@ -78,7 +78,7 @@ class RescheduleBuildAvailabilityZoneUpCall(
self.stub_out('nova.compute.manager.ComputeManager.'
'build_and_run_instance', wrap_bari)
server = self._build_minimal_create_server_request(
self.api, 'test_server_create_reschedule_blocked_az_up_call')
'test_server_create_reschedule_blocked_az_up_call')
server = self.api.post_server({'server': server})
# Because we poisoned AggregateList.get_by_host after hitting the
# compute service we have to wait for the notification that the build
@ -88,7 +88,7 @@ class RescheduleBuildAvailabilityZoneUpCall(
# build_and_run_instance twice so we have more than one instance of
# the mock that needs to be stopped.
mock.patch.stopall()
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# We should have rescheduled and the instance AZ should be set from the
# Selection object. Since neither compute host is in an AZ, the server
# is in the default AZ from config.
@ -148,9 +148,9 @@ class RescheduleMigrateAvailabilityZoneUpCall(
self.stub_out('nova.compute.manager.ComputeManager._prep_resize',
wrap_prep_resize)
server = self._build_minimal_create_server_request(
self.api, 'test_migrate_reschedule_blocked_az_up_call')
'test_migrate_reschedule_blocked_az_up_call')
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
original_host = server['OS-EXT-SRV-ATTR:host']
# Now cold migrate the server to the other host.
@ -164,7 +164,7 @@ class RescheduleMigrateAvailabilityZoneUpCall(
# twice so we have more than one instance of the mock that needs to be
# stopped.
mock.patch.stopall()
server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
final_host = server['OS-EXT-SRV-ATTR:host']
self.assertNotIn(final_host, [original_host, self.rescheduled])
# We should have rescheduled and the instance AZ should be set from the

View File

@ -103,7 +103,7 @@ class AntiAffinityMultiCreateRequest(test.TestCase,
# Now create two servers in that group.
server_req = self._build_minimal_create_server_request(
self.api, 'test_anti_affinity_multi_create',
'test_anti_affinity_multi_create',
image_uuid=image_fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none')
server_req['min_count'] = 2
@ -115,8 +115,7 @@ class AntiAffinityMultiCreateRequest(test.TestCase,
# Now wait for both servers to be ACTIVE and get the host on which
# each server was built.
for server in self.api.get_servers(detail=False):
server = self._wait_for_state_change(
self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
selected_hosts.add(server['OS-EXT-SRV-ATTR:host'])
# Assert that each server is on a separate host.

View File

@ -76,7 +76,7 @@ class TestRescheduleWithVolumesAttached(
server_response = self.api.post_server({'server': server_request})
server_id = server_response['id']
self._wait_for_state_change(self.api, server_response, 'ACTIVE')
self._wait_for_state_change(server_response, 'ACTIVE')
attached_volume_ids = self.cinder.volume_ids_for_instance(server_id)
self.assertIn(volume_id, attached_volume_ids)
self.assertEqual(1, len(self.cinder.volume_to_attachment))

View File

@ -57,8 +57,7 @@ class TestEvacuateDeleteServerRestartOriginalCompute(
server['id'], post)
expected_params = {'OS-EXT-SRV-ATTR:host': dest_hostname,
'status': 'ACTIVE'}
server = self._wait_for_server_parameter(self.api, server,
expected_params)
server = self._wait_for_server_parameter(server, expected_params)
# Expect to have allocation and usages on both computes as the
# source compute is still down

View File

@ -65,31 +65,30 @@ class ColdMigrateTargetHostThenLiveMigrateTest(
def test_cold_migrate_target_host_then_live_migrate(self):
# Create a server, it doesn't matter on which host it builds.
server = self._build_minimal_create_server_request(
self.api, 'test_cold_migrate_target_host_then_live_migrate',
'test_cold_migrate_target_host_then_live_migrate',
image_uuid=image_fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none')
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
original_host = server['OS-EXT-SRV-ATTR:host']
target_host = 'host1' if original_host == 'host2' else 'host2'
# Cold migrate the server to the specific target host.
migrate_req = {'migrate': {'host': target_host}}
self.admin_api.post_server_action(server['id'], migrate_req)
server = self._wait_for_state_change(
self.admin_api, server, 'VERIFY_RESIZE')
server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
# Confirm the resize so the server stays on the target host.
confim_req = {'confirmResize': None}
self.admin_api.post_server_action(server['id'], confim_req)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# Attempt to live migrate the server but don't specify a host so the
# scheduler has to pick one.
live_migrate_req = {
'os-migrateLive': {'host': None, 'block_migration': 'auto'}}
self.admin_api.post_server_action(server['id'], live_migrate_req)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# The live migration should have been successful and the server is now
# back on the original host.
self.assertEqual(original_host, server['OS-EXT-SRV-ATTR:host'])

View File

@ -103,7 +103,7 @@ class BootFromVolumeOverQuotaRaceDeleteTest(
stub_check_num_instances_quota)
server = self.api.post_server(server)
server = self._wait_for_state_change(self.api, server, 'ERROR')
server = self._wait_for_state_change(server, 'ERROR')
# At this point, the build request should be gone and the instance
# should have been created in cell1.
context = nova_context.get_admin_context()

View File

@ -57,7 +57,7 @@ class ShowErrorServerWithTags(test.TestCase,
'imageRef': self.image_id
}
})
return self._wait_for_state_change(self.api, server, 'ERROR')
return self._wait_for_state_change(server, 'ERROR')
def test_show_server_tag_in_error(self):
# Create a server which should go to ERROR state because we don't

View File

@ -82,11 +82,11 @@ class NonPersistentFieldNotResetTest(
def _create_server(self):
# Create a server, it doesn't matter on which host it builds.
server = self._build_minimal_create_server_request(
self.api, 'sample-server',
'sample-server',
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
return server
@ -156,8 +156,7 @@ class NonPersistentFieldNotResetTest(
server['id'], {'evacuate': {'host': target_host}})
expected_params = {'OS-EXT-SRV-ATTR:host': original_host,
'status': 'ERROR'}
server = self._wait_for_server_parameter(self.api, server,
expected_params)
server = self._wait_for_server_parameter(server, expected_params)
# Make sure 'is_bfv' is set.
reqspec = objects.RequestSpec.get_by_instance_uuid(self.ctxt,

View File

@ -59,7 +59,7 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase,
# weight.
server = self._build_server(self.api.get_flavors()[0]['id'])
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])
# Disable the host on which the server is now running.
@ -72,5 +72,5 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase,
req = {'evacuate': {'onSharedStorage': False}}
self.api.post_server_action(server['id'], req)
self._wait_for_migration_status(server, ['done'])
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host'])

View File

@ -67,14 +67,14 @@ class VolumeBackedResizeDiskDown(test.TestCase,
}]
}
server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# Now try to resize the server with the flavor that has smaller disk.
# This should be allowed since the server is volume-backed and the
# disk size in the flavor shouldn't matter.
data = {'resize': {'flavorRef': flavor1['id']}}
self.api.post_server_action(server['id'], data)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self._wait_for_state_change(server, 'VERIFY_RESIZE')
# Now confirm the resize just to complete the operation.
self.api.post_server_action(server['id'], {'confirmResize': None})
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')

View File

@ -57,7 +57,7 @@ class FillVirtualInterfaceListMigration(
'imageRef': fake_image.get_valid_image_id()
}
})
return self._wait_for_state_change(self.api, server, 'ACTIVE')
return self._wait_for_state_change(server, 'ACTIVE')
def test_fill_vifs_migration(self):
# Create a test server.

View File

@ -40,7 +40,7 @@ class FinishResizeErrorAllocationCleanupTestCase(
# to avoid a race we need to wait for the migration status to change
# to 'error' which happens after the fault is recorded.
self._wait_for_migration_status(server, ['error'])
server = self._wait_for_state_change(self.admin_api, server, 'ERROR')
server = self._wait_for_state_change(server, 'ERROR')
# The server should be pointing at $dest_host because resize_instance
# will have updated the host/node value on the instance before casting
# to the finish_resize method on the dest compute.

View File

@ -87,11 +87,11 @@ class MissingReqSpecInstanceGroupUUIDTestCase(
# Create a server in the group which should land on host1 due to our
# custom weigher.
server = self._build_minimal_create_server_request(
self.api, 'test_cold_migrate_reschedule')
'test_cold_migrate_reschedule')
body = dict(server=server)
body['os:scheduler_hints'] = {'group': group_id}
server = self.api.post_server(body)
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])
# Verify the group uuid is set in the request spec.
@ -129,8 +129,7 @@ class MissingReqSpecInstanceGroupUUIDTestCase(
with mock.patch.dict(host1_driver.capabilities,
supports_migrate_to_same_host=False):
self.api.post_server_action(server['id'], {'migrate': None})
server = self._wait_for_state_change(
self.api, server, 'VERIFY_RESIZE')
server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
# The RequestSpec.instance_group.uuid should still be set.

View File

@ -57,30 +57,29 @@ class RegressionTest1835822(
if server_args:
basic_server.update(server_args)
server = self.api.post_server({'server': basic_server})
return self._wait_for_state_change(self.api, server, 'ACTIVE')
return self._wait_for_state_change(server, 'ACTIVE')
def _hard_reboot_server(self, active_server):
args = {"reboot": {"type": "HARD"}}
self.api.api_post('servers/%s/action' %
active_server['id'], args)
fake_notifier.wait_for_versioned_notifications('instance.reboot.end')
return self._wait_for_state_change(self.api, active_server, 'ACTIVE')
return self._wait_for_state_change(active_server, 'ACTIVE')
def _rebuild_server(self, active_server):
args = {"rebuild": {"imageRef": self.image_ref_1}}
self.api.api_post('servers/%s/action' %
active_server['id'], args)
fake_notifier.wait_for_versioned_notifications('instance.rebuild.end')
return self._wait_for_state_change(self.api, active_server, 'ACTIVE')
return self._wait_for_state_change(active_server, 'ACTIVE')
def _shelve_server(self, active_server):
self.api.post_server_action(active_server['id'], {'shelve': {}})
return self._wait_for_state_change(
self.api, active_server, 'SHELVED_OFFLOADED')
return self._wait_for_state_change(active_server, 'SHELVED_OFFLOADED')
def _unshelve_server(self, shelved_server):
self.api.post_server_action(shelved_server['id'], {'unshelve': {}})
return self._wait_for_state_change(self.api, shelved_server, 'ACTIVE')
return self._wait_for_state_change(shelved_server, 'ACTIVE')
# ---------------------------- tests ----------------------------
def test_create_server_with_config_drive(self):

View File

@ -80,11 +80,11 @@ class BuildRescheduleClaimFailsTestCase(
# Now that our stub is in place, try to create a server and wait for it
# to go to ERROR status.
server = self._build_minimal_create_server_request(
self.api, 'test_build_reschedule_alt_host_alloc_fails',
'test_build_reschedule_alt_host_alloc_fails',
image_uuid=fake_image.get_valid_image_id(),
networks=[{'port': self.neutron.port_1['id']}])
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ERROR')
server = self._wait_for_state_change(server, 'ERROR')
# Wait for the MaxRetriesExceeded fault to be recorded.
# set_vm_state_and_notify sets the vm_state to ERROR before the fault

View File

@ -43,12 +43,12 @@ class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase):
self.flags(compute=version_cap, group='upgrade_levels')
server_req = self._build_minimal_create_server_request(
self.api, 'server1',
'server1',
networks=[],
image_uuid=fake_image.get_valid_image_id(),
flavor_id=self.flavor1['id'])
server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
orig_claim = nova.compute.resource_tracker.ResourceTracker.resize_claim
claim_calls = []
@ -76,12 +76,10 @@ class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase):
# We expect that the instance is on host3 as the scheduler
# selected host2 due to our weigher and the cold migrate failed
# there and re-scheduled to host3 were it succeeded.
self._wait_for_server_parameter(
self.api, server,
{
'OS-EXT-SRV-ATTR:host': 'host3',
'OS-EXT-STS:task_state': None,
'status': 'VERIFY_RESIZE'})
self._wait_for_server_parameter(server, {
'OS-EXT-SRV-ATTR:host': 'host3',
'OS-EXT-STS:task_state': None,
'status': 'VERIFY_RESIZE'})
# we ensure that there was a failed and then a successful claim call
self.assertEqual(['host2', 'host3'], claim_calls)

View File

@ -68,11 +68,9 @@ class ForcedHostMissingReScheduleTestCase(
# We expect that the instance re-scheduled but successfully ended
# up on the second destination host.
self._wait_for_server_parameter(
self.api, server,
{
'OS-EXT-STS:task_state': None,
'status': 'VERIFY_RESIZE'})
self._wait_for_server_parameter(server, {
'OS-EXT-STS:task_state': None,
'status': 'VERIFY_RESIZE'})
# we ensure that there was a failed and then a successful claim call
self.assertEqual(2, len(claim_calls))

View File

@ -37,10 +37,10 @@ class DeletedServerAllocationRevertTest(
source host and target host.
"""
server = self._build_minimal_create_server_request(
self.api, name, image_uuid=fake_image.get_valid_image_id(),
name, image_uuid=fake_image.get_valid_image_id(),
networks='none')
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
source_host = server['OS-EXT-SRV-ATTR:host']
target_host = 'host2' if source_host == 'host1' else 'host1'
return server, source_host, target_host
@ -127,7 +127,7 @@ class DeletedServerAllocationRevertTest(
# action event after the task rollback happens.
self._wait_for_action_fail_completion(
server, instance_actions.LIVE_MIGRATION,
'conductor_live_migrate_instance', api=self.api)
'conductor_live_migrate_instance')
self._assert_no_allocations(server)
def test_migrate_on_compute_fail(self):
@ -155,6 +155,5 @@ class DeletedServerAllocationRevertTest(
# when the instance is deleted so just wait for the failed instance
# action event after the allocation revert happens.
self._wait_for_action_fail_completion(
server, instance_actions.MIGRATE, 'compute_prep_resize',
api=self.api)
server, instance_actions.MIGRATE, 'compute_prep_resize')
self._assert_no_allocations(server)

View File

@ -52,11 +52,9 @@ class UpdateResourceMigrationRaceTest(
server['id'],
{'os-migrateLive': {'host': None, 'block_migration': 'auto'}})
self._wait_for_server_parameter(
self.api, server,
{
'OS-EXT-STS:task_state': None,
'status': 'ACTIVE'})
self._wait_for_server_parameter(server, {
'OS-EXT-STS:task_state': None,
'status': 'ACTIVE'})
# NOTE(efried): This was bug 1849165 where
# _populate_assigned_resources raised a TypeError because it tried

View File

@ -42,10 +42,10 @@ class ListDeletedServersWithMarker(test.TestCase,
def test_list_deleted_servers_with_marker(self):
# Create a server.
server = self._build_minimal_create_server_request(
self.api, 'test_list_deleted_servers_with_marker',
'test_list_deleted_servers_with_marker',
image_uuid=fake_image.get_valid_image_id())
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# Now delete the server and wait for it to be gone.
self.api.delete_server(server['id'])
self._wait_until_deleted(server)

View File

@ -215,13 +215,12 @@ class AggregateRequestFiltersTest(
flavor_id = flavor_id or self.flavors[0]['id']
image_uuid = image_id or '155d900f-4e14-4e4c-a73d-069cbf4541e6'
server_req = self._build_minimal_create_server_request(
self.api, 'test-instance', flavor_id=flavor_id,
'test-instance', flavor_id=flavor_id,
image_uuid=image_uuid,
networks='none', az=az)
created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(
self.admin_api, created_server, end_status)
server = self._wait_for_state_change(created_server, end_status)
return server
@ -330,8 +329,7 @@ class AggregatePostTest(AggregateRequestFiltersTest):
# Configure for the SOFT_DELETED scenario.
self.flags(reclaim_instance_interval=300)
self.api.delete_server(server['id'])
server = self._wait_for_state_change(
self.admin_api, server, 'SOFT_DELETED')
server = self._wait_for_state_change(server, 'SOFT_DELETED')
self.assertRaisesRegex(
client.OpenStackApiException,
'One or more hosts contain instances in this zone.',
@ -876,7 +874,7 @@ class TestAggregateMultiTenancyIsolationFilter(
aggregate
"""
# Create a tenant-isolated aggregate for the non-admin user.
user_api = self.useFixture(
self.api = self.useFixture(
nova_fixtures.OSAPIFixture(api_version='v2.1',
project_id=uuids.non_admin)).api
agg_id = self.admin_api.post_aggregate(
@ -901,13 +899,12 @@ class TestAggregateMultiTenancyIsolationFilter(
spy_get_filtered_hosts)
# Create a server for the admin - should only have one host candidate.
server_req = self._build_minimal_create_server_request(
self.admin_api,
'test_aggregate_multitenancy_isolation_filter-admin',
networks='none') # requires microversion 2.37
server_req = {'server': server_req}
with utils.temporary_mutation(self.admin_api, microversion='2.37'):
server = self.admin_api.post_server(server_req)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# Assert it's not on host2 which is isolated to the non-admin tenant.
self.assertNotEqual('host2', server['OS-EXT-SRV-ATTR:host'])
self.assertEqual(1, len(self.filtered_hosts))
@ -917,13 +914,12 @@ class TestAggregateMultiTenancyIsolationFilter(
# up on host2 because the other host, which is not isolated to the
# aggregate, is still a candidate.
server_req = self._build_minimal_create_server_request(
user_api,
'test_aggregate_multitenancy_isolation_filter-user',
networks='none') # requires microversion 2.37
server_req = {'server': server_req}
with utils.temporary_mutation(user_api, microversion='2.37'):
server = user_api.post_server(server_req)
self._wait_for_state_change(user_api, server, 'ACTIVE')
with utils.temporary_mutation(self.api, microversion='2.37'):
server = self.api.post_server(server_req)
self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(2, len(self.filtered_hosts))
@ -1028,10 +1024,10 @@ class AggregateMultiTenancyIsolationColdMigrateTest(
"""
img = nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID
server_req_body = self._build_minimal_create_server_request(
self.api, 'test_cold_migrate_server', image_uuid=img,
'test_cold_migrate_server', image_uuid=img,
networks='none')
server = self.api.post_server({'server': server_req_body})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# Ensure the server ended up in host2 or host3
original_host = server['OS-EXT-SRV-ATTR:host']
self.assertNotEqual('host1', original_host)
@ -1039,8 +1035,7 @@ class AggregateMultiTenancyIsolationColdMigrateTest(
# in the same tenant-isolated aggregate.
self.admin_api.api_post(
'/servers/%s/action' % server['id'], {'migrate': None})
server = self._wait_for_state_change(
self.admin_api, server, 'VERIFY_RESIZE')
server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
# Ensure the server is on the other host in the same aggregate.
expected_host = 'host3' if original_host == 'host2' else 'host2'
self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])

View File

@ -74,10 +74,10 @@ class TestAvailabilityZoneScheduling(
def _create_server(self, name):
# Create a server, it doesn't matter which host it ends up in.
server_body = self._build_minimal_create_server_request(
self.api, name, image_uuid=fake_image.get_valid_image_id(),
name, image_uuid=fake_image.get_valid_image_id(),
flavor_id=self.flavor1, networks='none')
server = self.api.post_server({'server': server_body})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
original_host = server['OS-EXT-SRV-ATTR:host']
# Assert the server has the AZ set (not None or 'nova').
expected_zone = 'zone1' if original_host == 'host1' else 'zone2'
@ -153,7 +153,7 @@ class TestAvailabilityZoneScheduling(
# Resize the server which should move it to the other zone.
self.api.post_server_action(
server['id'], {'resize': {'flavorRef': self.flavor2}})
server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
# Now the server should be in the other AZ.
new_zone = 'zone2' if original_host == 'host1' else 'zone1'
@ -161,5 +161,5 @@ class TestAvailabilityZoneScheduling(
# Revert the resize and the server should be back in the original AZ.
self.api.post_server_action(server['id'], {'revertResize': None})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self._assert_instance_az(server, original_az)

View File

@ -79,7 +79,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
server['block_device_mapping_v2'] = [bdm]
created_server = self.api.post_server({"server": server})
server_id = created_server['id']
self._wait_for_state_change(self.api, created_server, 'ACTIVE')
self._wait_for_state_change(created_server, 'ACTIVE')
# Check that hypervisor local disk reporting is still 0
self._verify_zero_local_gb_used()
@ -94,7 +94,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
# Resize
post_data = {'resize': {'flavorRef': flavor_id_alt}}
self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(self.api, created_server, 'VERIFY_RESIZE')
self._wait_for_state_change(created_server, 'VERIFY_RESIZE')
# Check that hypervisor local disk reporting is still 0
self._verify_zero_local_gb_used()
@ -106,7 +106,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
# Confirm the resize
post_data = {'confirmResize': None}
self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(self.api, created_server, 'ACTIVE')
self._wait_for_state_change(created_server, 'ACTIVE')
# Check that hypervisor local disk reporting is still 0
self._verify_zero_local_gb_used()
@ -118,7 +118,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
# Shelve
post_data = {'shelve': None}
self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(self.api, created_server,
self._wait_for_state_change(created_server,
'SHELVED_OFFLOADED')
# Check that hypervisor local disk reporting is still 0
@ -131,7 +131,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
# Unshelve
post_data = {'unshelve': None}
self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(self.api, created_server, 'ACTIVE')
self._wait_for_state_change(created_server, 'ACTIVE')
# Check that hypervisor local disk reporting is still 0
self._verify_zero_local_gb_used()
@ -146,7 +146,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
post_data = {'rebuild': {'imageRef': image_uuid}}
self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(self.api, created_server, 'ACTIVE')
self._wait_for_state_change(created_server, 'ACTIVE')
# Check that hypervisor local disk reporting is still 0
self._verify_zero_local_gb_used()
@ -161,7 +161,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
"""
self.flags(max_local_block_devices=0)
server = self._build_minimal_create_server_request(
self.admin_api, 'test_max_local_block_devices_0_force_bfv')
'test_max_local_block_devices_0_force_bfv')
ex = self.assertRaises(api_client.OpenStackApiException,
self.admin_api.post_server,
{'server': server})
@ -203,7 +203,7 @@ class BootFromVolumeLargeRequestTest(test.TestCase,
image1 = 'a2459075-d96c-40d5-893e-577ff92e721c'
image2 = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
server = self._build_minimal_create_server_request(
self.api, 'test_boot_from_volume_10_servers_255_volumes_2_images')
'test_boot_from_volume_10_servers_255_volumes_2_images')
server.pop('imageRef')
server['min_count'] = 10
bdms = []

View File

@ -51,7 +51,7 @@ class ConfigurableMaxDiskDevicesTest(integrated_helpers.InstanceHelperMixin,
'destination_type': 'volume'}
server['block_device_mapping_v2'] = [bdm]
created_server = self.api.post_server({"server": server})
self._wait_for_state_change(self.api, created_server, 'ACTIVE')
self._wait_for_state_change(created_server, 'ACTIVE')
def test_boot_from_volume_plus_attach_max_exceeded(self):
# Set the maximum to 1, boot from 1 volume, and attach one volume.
@ -72,7 +72,7 @@ class ConfigurableMaxDiskDevicesTest(integrated_helpers.InstanceHelperMixin,
created_server = self.api.post_server({"server": server})
server_id = created_server['id']
# Server should go into ERROR state
self._wait_for_state_change(self.api, created_server, 'ERROR')
self._wait_for_state_change(created_server, 'ERROR')
# Verify the instance fault
server = self.api.get_server(server_id)
# If anything fails during _prep_block_device, a 500 internal server
@ -95,7 +95,7 @@ class ConfigurableMaxDiskDevicesTest(integrated_helpers.InstanceHelperMixin,
server = self._build_server(flavor_id='1')
created_server = self.api.post_server({"server": server})
server_id = created_server['id']
self._wait_for_state_change(self.api, created_server, 'ACTIVE')
self._wait_for_state_change(created_server, 'ACTIVE')
# Attach one volume, should pass.
vol_id = '9a695496-44aa-4404-b2cc-ccab2501f87e'
self.api.post_server_volume(

View File

@ -60,7 +60,6 @@ class CrossAZAttachTestCase(test.TestCase,
"""
self.flags(cross_az_attach=False, group='cinder')
server = self._build_minimal_create_server_request(
self.api,
'test_cross_az_attach_false_boot_from_volume_no_az_specified')
del server['imageRef'] # Do not need imageRef for boot from volume.
server['block_device_mapping_v2'] = [{
@ -70,7 +69,7 @@ class CrossAZAttachTestCase(test.TestCase,
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
}]
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])
def test_cross_az_attach_false_data_volume_no_az_specified(self):
@ -82,7 +81,6 @@ class CrossAZAttachTestCase(test.TestCase,
"""
self.flags(cross_az_attach=False, group='cinder')
server = self._build_minimal_create_server_request(
self.api,
'test_cross_az_attach_false_data_volume_no_az_specified')
# Note that we use the legacy block_device_mapping parameter rather
# than block_device_mapping_v2 because that will create an implicit
@ -95,7 +93,7 @@ class CrossAZAttachTestCase(test.TestCase,
'volume_id': nova_fixtures.CinderFixture.SWAP_OLD_VOL
}]
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])
def test_cross_az_attach_false_boot_from_volume_default_zone_match(self):
@ -106,7 +104,6 @@ class CrossAZAttachTestCase(test.TestCase,
self.flags(cross_az_attach=False, group='cinder')
self.flags(default_schedule_zone=self.az)
server = self._build_minimal_create_server_request(
self.api,
'test_cross_az_attach_false_boot_from_volume_default_zone_match')
del server['imageRef'] # Do not need imageRef for boot from volume.
server['block_device_mapping_v2'] = [{
@ -116,7 +113,7 @@ class CrossAZAttachTestCase(test.TestCase,
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
}]
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])
def test_cross_az_attach_false_bfv_az_specified_mismatch(self):
@ -126,7 +123,7 @@ class CrossAZAttachTestCase(test.TestCase,
"""
self.flags(cross_az_attach=False, group='cinder')
server = self._build_minimal_create_server_request(
self.api, 'test_cross_az_attach_false_bfv_az_specified_mismatch',
'test_cross_az_attach_false_bfv_az_specified_mismatch',
az='london')
del server['imageRef'] # Do not need imageRef for boot from volume.
server['block_device_mapping_v2'] = [{
@ -150,7 +147,7 @@ class CrossAZAttachTestCase(test.TestCase,
"""
self.flags(cross_az_attach=False, group='cinder')
server = self._build_minimal_create_server_request(
self.api, 'test_cross_az_attach_false_no_volumes', az=self.az)
'test_cross_az_attach_false_no_volumes', az=self.az)
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])

View File

@ -127,7 +127,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
}]
image_uuid = fake_image.get_valid_image_id()
server = self._build_minimal_create_server_request(
self.api, 'test_cross_cell_resize',
'test_cross_cell_resize',
image_uuid=image_uuid,
flavor_id=flavor['id'],
networks=networks)
@ -146,7 +146,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
server.pop('imageRef', None)
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# For volume-backed make sure there is one attachment to start.
if volume_backed:
self.assertEqual(1, self._count_volume_attachments(server['id']),
@ -200,7 +200,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
if stopped:
# Stop the server before resizing it.
self.api.post_server_action(server['id'], {'os-stop': None})
self._wait_for_state_change(self.api, server, 'SHUTOFF')
self._wait_for_state_change(server, 'SHUTOFF')
# Before resizing make sure quota usage is only 1 for total instances.
self.assert_quota_usage(expected_num_instances=1)
@ -222,7 +222,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
self.api.post_server_action(server['id'], body)
# Wait for the server to be resized and then verify the host has
# changed to be the host in the other cell.
server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])
# Assert that the instance is only listed one time from the API (to
# make sure it's not listed out of both cells).
@ -487,8 +487,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
# The server should go to ERROR state with a fault record and
# the API should still be showing the server from the source cell
# because the instance mapping was not updated.
server = self._wait_for_server_parameter(
self.admin_api, server,
server = self._wait_for_server_parameter(server,
{'status': 'ERROR', 'OS-EXT-STS:task_state': None})
# The migration should be in 'error' status.
@ -511,12 +510,12 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
# Now hard reboot the server in the source cell and it should go back
# to ACTIVE.
self.api.post_server_action(server['id'], {'reboot': {'type': 'HARD'}})
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# Now retry the resize without the fault in the target host to make
# sure things are OK (no duplicate entry errors in the target DB).
self.api.post_server_action(server['id'], body)
self._wait_for_state_change(self.admin_api, server, 'VERIFY_RESIZE')
self._wait_for_state_change(server, 'VERIFY_RESIZE')
def _assert_instance_not_in_cell(self, cell_name, server_id):
cell = self.cell_mappings[cell_name]
@ -567,8 +566,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
# The server should go to ERROR state with a fault record and
# the API should still be showing the server from the source cell
# because the instance mapping was not updated.
server = self._wait_for_server_parameter(
self.admin_api, server,
server = self._wait_for_server_parameter(server,
{'status': 'ERROR', 'OS-EXT-STS:task_state': None})
# The migration should be in 'error' status.
@ -585,9 +583,9 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
# Now hard reboot the server in the source cell and it should go back
# to ACTIVE.
self.api.post_server_action(server['id'], {'reboot': {'type': 'HARD'}})
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# Now retry the resize without the fault in the target host to make
# sure things are OK (no duplicate entry errors in the target DB).
self.api.post_server_action(server['id'], body)
self._wait_for_state_change(self.admin_api, server, 'VERIFY_RESIZE')
self._wait_for_state_change(server, 'VERIFY_RESIZE')

View File

@ -60,10 +60,10 @@ class JsonFilterTestCase(integrated_helpers.ProviderUsageBaseTestCase):
# custom HostNameWeigher, host1 would be chosen.
query = jsonutils.dumps(['=', '$hypervisor_hostname', 'host2'])
server = self._build_minimal_create_server_request(
self.api, 'test_filter_on_hypervisor_hostname')
'test_filter_on_hypervisor_hostname')
request = {'server': server, 'os:scheduler_hints': {'query': query}}
server = self.api.post_server(request)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# Since we request host2 the server should be there despite host1 being
# weighed higher.
self.assertEqual(

View File

@ -51,7 +51,7 @@ class TestMultiattachVolumes(integrated_helpers._IntegratedTestBase,
'boot_index': 0
}]
server = self.api.post_server({'server': create_req})
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# Make sure the volume is attached to the first server.
attachments = self.api.api_get(
'/servers/%s/os-volume_attachments' % server['id']).body[
@ -65,7 +65,7 @@ class TestMultiattachVolumes(integrated_helpers._IntegratedTestBase,
flavor_id='1', image='155d900f-4e14-4e4c-a73d-069cbf4541e6')
create_req['networks'] = 'none'
server2 = self.api.post_server({'server': create_req})
self._wait_for_state_change(self.api, server2, 'ACTIVE')
self._wait_for_state_change(server2, 'ACTIVE')
# Attach the volume to the second server.
self.api.api_post('/servers/%s/os-volume_attachments' % server2['id'],
{'volumeAttachment': {'volumeId': volume_id}})

View File

@ -414,7 +414,7 @@ class TestNovaManagePlacementHealAllocations(
provider uuid
"""
server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=flavor['id'],
'some-server', flavor_id=flavor['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
server_req['availability_zone'] = 'nova:%s' % hostname
@ -428,8 +428,7 @@ class TestNovaManagePlacementHealAllocations(
}]
server_req['imageRef'] = ''
created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(
self.admin_api, created_server, 'ACTIVE')
server = self._wait_for_state_change(created_server, 'ACTIVE')
# Verify that our source host is what the server ended up on
self.assertEqual(hostname, server['OS-EXT-SRV-ATTR:host'])
@ -564,8 +563,7 @@ class TestNovaManagePlacementHealAllocations(
# The server status goes to SHELVED_OFFLOADED before the host/node
# is nulled out in the compute service, so we also have to wait for
# that so we don't race when we run heal_allocations.
server = self._wait_for_server_parameter(
self.admin_api, server,
server = self._wait_for_server_parameter(server,
{'OS-EXT-SRV-ATTR:host': None, 'status': 'SHELVED_OFFLOADED'})
result = self.cli.heal_allocations(verbose=True)
self.assertEqual(4, result, self.output.getvalue())
@ -788,7 +786,7 @@ class TestNovaManagePlacementHealPortAllocations(
server = self._create_server(
flavor=self.flavor,
networks=[{'port': port['id']} for port in ports])
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# This is a hack to simulate that we have a server that is missing
# allocation for its port
@ -1482,23 +1480,23 @@ class TestDBArchiveDeletedRowsMultiCell(integrated_helpers.InstanceHelperMixin,
# Boot a server to cell1
server_ids = {}
server = self._build_minimal_create_server_request(
self.api, 'cell1-server', az='nova:host1')
'cell1-server', az='nova:host1')
created_server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, created_server, 'ACTIVE')
self._wait_for_state_change(created_server, 'ACTIVE')
server_ids['cell1'] = created_server['id']
# Boot a server to cell2
server = self._build_minimal_create_server_request(
self.api, 'cell2-server', az='nova:host2')
'cell2-server', az='nova:host2')
created_server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, created_server, 'ACTIVE')
self._wait_for_state_change(created_server, 'ACTIVE')
server_ids['cell2'] = created_server['id']
# Boot a server to cell0 (cause ERROR state prior to schedule)
server = self._build_minimal_create_server_request(
self.api, 'cell0-server')
'cell0-server')
# Flavor m1.xlarge cannot be fulfilled
server['flavorRef'] = 'http://fake.server/5'
created_server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, created_server, 'ERROR')
self._wait_for_state_change(created_server, 'ERROR')
server_ids['cell0'] = created_server['id']
# Verify all the servers are in the databases
for cell_name, server_id in server_ids.items():

View File

@ -69,9 +69,9 @@ class HostStatusPolicyTestCase(test.TestCase,
# Starting with microversion 2.37 the networks field is required.
kwargs['networks'] = networks
server = self._build_minimal_create_server_request(
self.api, 'test_host_status_unknown_only', **kwargs)
'test_host_status_unknown_only', **kwargs)
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
return server
@staticmethod
@ -94,12 +94,12 @@ class HostStatusPolicyTestCase(test.TestCase,
server = self._get_server(admin_func())
# We need to wait for ACTIVE if this was a post rebuild server action,
# else a subsequent rebuild request will fail with a 409 in the API.
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# Verify admin can see the host status UP.
self.assertEqual('UP', server['host_status'])
# Get server as normal non-admin user.
server = self._get_server(func())
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# Verify non-admin do not receive the host_status field because it is
# not UNKNOWN.
self.assertNotIn('host_status', server)

View File

@ -50,8 +50,7 @@ class MultiCellSchedulerTestCase(test.TestCase,
self.addCleanup(fake_image.FakeImageService_reset)
def _test_create_and_migrate(self, expected_status, az=None):
server = self._build_minimal_create_server_request(self.api,
'some-server',
server = self._build_minimal_create_server_request('some-server',
az=az)
post = {'server': server}
# If forcing the server onto a host we have to use the admin API.
@ -59,8 +58,7 @@ class MultiCellSchedulerTestCase(test.TestCase,
created_server = api.post_server(post)
# Wait for it to finish being created
found_server = self._wait_for_state_change(
self.admin_api, created_server, 'ACTIVE')
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
return self.admin_api.api_post(
'/servers/%s/action' % found_server['id'],
{'migrate': None},

View File

@ -28,12 +28,11 @@ class ServerExternalEventsTestV276(
flavors = self.api.get_flavors()
server_req = self._build_minimal_create_server_request(
self.api, "some-server", flavor_id=flavors[0]["id"],
"some-server", flavor_id=flavors[0]["id"],
image_uuid="155d900f-4e14-4e4c-a73d-069cbf4541e6",
networks='none')
created_server = self.api.post_server({'server': server_req})
self.server = self._wait_for_state_change(
self.api, created_server, 'ACTIVE')
self.server = self._wait_for_state_change(created_server, 'ACTIVE')
self.power_off = {'name': 'power-update',
'tag': 'POWER_OFF',
'server_uuid': self.server["id"]}
@ -50,7 +49,7 @@ class ServerExternalEventsTestV276(
expected_params = {'OS-EXT-STS:task_state': None,
'OS-EXT-STS:vm_state': vm_states.STOPPED,
'OS-EXT-STS:power_state': power_state.SHUTDOWN}
server = self._wait_for_server_parameter(self.api, self.server,
server = self._wait_for_server_parameter(self.server,
expected_params)
msg = ' with target power state POWER_OFF.'
self.assertIn(msg, self.stdlog.logger.output)
@ -79,8 +78,7 @@ class ServerExternalEventsTestV276(
expected_params = {'OS-EXT-STS:task_state': None,
'OS-EXT-STS:vm_state': vm_states.ACTIVE,
'OS-EXT-STS:power_state': power_state.RUNNING}
server = self._wait_for_server_parameter(self.api, self.server,
expected_params)
server = self._wait_for_server_parameter(self.server, expected_params)
msg = ' with target power state POWER_ON.'
self.assertIn(msg, self.stdlog.logger.output)
# Test if this is logged in the instance action list.

View File

@ -55,18 +55,18 @@ class ServerFaultTestCase(test.TestCase,
"""
# Create the server with the non-admin user.
server = self._build_minimal_create_server_request(
self.api, 'test_server_fault_non_nova_exception',
'test_server_fault_non_nova_exception',
image_uuid=fake_image.get_valid_image_id(),
networks=[{'port': nova_fixtures.NeutronFixture.port_1['id']}])
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# Stop the server before rebooting it so that after the driver.reboot
# method raises an exception, the fake driver does not report the
# instance power state as running - that will make the compute manager
# set the instance vm_state to error.
self.api.post_server_action(server['id'], {'os-stop': None})
server = self._wait_for_state_change(self.admin_api, server, 'SHUTOFF')
server = self._wait_for_state_change(server, 'SHUTOFF')
# Stub out the compute driver reboot method to raise a non-nova
# exception to simulate some error from the underlying hypervisor
@ -83,8 +83,8 @@ class ServerFaultTestCase(test.TestCase,
# decorator runs before the reverts_task_state decorator so we will
# be sure the fault is set on the server.
server = self._wait_for_server_parameter(
self.api, server, {'status': 'ERROR',
'OS-EXT-STS:task_state': None})
server, {'status': 'ERROR', 'OS-EXT-STS:task_state': None},
api=self.api)
mock_reboot.assert_called_once()
# The server fault from the non-admin user API response should not
# have details in it.

View File

@ -90,7 +90,7 @@ class ServerGroupTestBase(test.TestCase,
expected_status='ACTIVE', flavor=None,
az=None):
server = self._build_minimal_create_server_request(
self.api, 'some-server',
'some-server',
image_uuid='a2459075-d96c-40d5-893e-577ff92e721c', networks=[],
az=az)
if flavor:
@ -103,7 +103,7 @@ class ServerGroupTestBase(test.TestCase,
# Wait for it to finish being created
found_server = self._wait_for_state_change(
self.admin_api, created_server, expected_status)
created_server, expected_status)
return found_server
@ -323,8 +323,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'}}
self.api.post_server_action(servers[1]['id'], post)
rebuilt_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
rebuilt_server = self._wait_for_state_change(servers[1], 'ACTIVE')
self.assertEqual(post['rebuild']['imageRef'],
rebuilt_server.get('image')['id'])
@ -369,7 +368,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
post = {'migrate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE')
servers[1], 'VERIFY_RESIZE')
self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
migrated_server['OS-EXT-SRV-ATTR:host'])
@ -384,7 +383,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
server1_old_host = servers[1]['OS-EXT-SRV-ATTR:host']
self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE')
servers[1], 'VERIFY_RESIZE')
self.assertEqual(server1_old_host,
migrated_server['OS-EXT-SRV-ATTR:host'])
@ -424,8 +423,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
post = {'evacuate': {'onSharedStorage': False}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['done'])
evacuated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
evacuated_server = self._wait_for_state_change(servers[1], 'ACTIVE')
# check that the server is evacuated to another host
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@ -447,7 +445,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR')
servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host
# as before
@ -467,7 +465,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR')
servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host
# as before
@ -608,8 +606,7 @@ class ServerGroupTestV215(ServerGroupTestV21):
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['done'])
evacuated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
evacuated_server = self._wait_for_state_change(servers[1], 'ACTIVE')
# check that the server is evacuated
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@ -633,7 +630,7 @@ class ServerGroupTestV215(ServerGroupTestV21):
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR')
servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host
# as before
@ -653,7 +650,7 @@ class ServerGroupTestV215(ServerGroupTestV21):
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR')
servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host
# as before
@ -767,7 +764,7 @@ class ServerGroupTestV215(ServerGroupTestV21):
post = {'migrate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE')
servers[1], 'VERIFY_RESIZE')
return [migrated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host']]
@ -794,8 +791,7 @@ class ServerGroupTestV215(ServerGroupTestV21):
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['done'])
evacuated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
evacuated_server = self._wait_for_state_change(servers[1], 'ACTIVE')
# Note(gibi): need to get the server again as the state of the instance
# goes to ACTIVE first then the host of the instance changes to the
@ -974,7 +970,6 @@ class TestAntiAffinityLiveMigration(test.TestCase,
servers = []
for x in range(2):
server = self._build_minimal_create_server_request(
self.api,
'test_serial_no_valid_host_then_pass_with_third_host-%d' % x,
networks='none')
# Add the group hint so the server is created in our group.
@ -986,8 +981,7 @@ class TestAntiAffinityLiveMigration(test.TestCase,
with utils.temporary_mutation(self.api, microversion='2.37'):
server = self.api.post_server(server_req)
servers.append(
self._wait_for_state_change(
self.admin_api, server, 'ACTIVE'))
self._wait_for_state_change(server, 'ACTIVE'))
# Make sure each server is on a unique host.
hosts = set([svr['OS-EXT-SRV-ATTR:host'] for svr in servers])
@ -1021,7 +1015,7 @@ class TestAntiAffinityLiveMigration(test.TestCase,
# should work this time.
self.start_service('compute', host='host3')
self.admin_api.post_server_action(server['id'], body)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
server = self._wait_for_state_change(server, 'ACTIVE')
# Now the server should be on host3 since that was the only available
# host for the live migration.
self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host'])

File diff suppressed because it is too large Load Diff

View File

@ -419,11 +419,11 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
def _create_instance(self, flavor):
server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=flavor['id'],
'some-server', flavor_id=flavor['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none', az='nova:host1')
inst = self.api.post_server({'server': server_req})
return self._wait_for_state_change(self.admin_api, inst, 'ACTIVE')
return self._wait_for_state_change(inst, 'ACTIVE')
def test_reshape(self):
"""On startup, virt driver signals it needs to reshape, then does so.

View File

@ -354,7 +354,6 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase(
servers_policies.ZERO_DISK_FLAVOR: base_policies.RULE_ADMIN_API},
overwrite=False)
server_req = self._build_minimal_create_server_request(
self.api,
'test_create_image_backed_server_with_zero_disk_fails',
fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
self.zero_disk_flavor['id'])
@ -376,7 +375,6 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase(
self.start_service('conductor')
self.start_service('scheduler')
server_req = self._build_minimal_create_server_request(
self.api,
'test_create_volume_backed_server_with_zero_disk_allowed',
flavor_id=self.zero_disk_flavor['id'])
server_req.pop('imageRef', None)
@ -387,5 +385,5 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase(
'boot_index': 0
}]
server = self.admin_api.post_server({'server': server_req})
server = self._wait_for_state_change(self.api, server, 'ERROR')
server = self._wait_for_state_change(server, 'ERROR')
self.assertIn('No valid host', server['fault']['message'])

View File

@ -147,9 +147,9 @@ class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
self.admin_api.post_server_action(server['id'], {'evacuate': {}})
# The host does not change until after the status is changed to ACTIVE
# so wait for both parameters.
self._wait_for_server_parameter(
self.admin_api, server, {'status': 'ACTIVE',
'OS-EXT-SRV-ATTR:host': 'host2'})
self._wait_for_server_parameter(server, {
'status': 'ACTIVE',
'OS-EXT-SRV-ATTR:host': 'host2'})
# Delete the compute service for host1 and check the related
# placement resources for that host.
self.admin_api.api_delete('/os-services/%s' % service['id'])
@ -324,10 +324,10 @@ class ComputeStatusFilterTest(integrated_helpers.ProviderUsageBaseTestCase):
# Try creating a server which should fail because nothing is available.
networks = [{'port': self.neutron.port_1['id']}]
server_req = self._build_minimal_create_server_request(
self.api, 'test_compute_status_filter',
'test_compute_status_filter',
image_uuid=fake_image.get_valid_image_id(), networks=networks)
server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(self.api, server, 'ERROR')
server = self._wait_for_state_change(server, 'ERROR')
# There should be a NoValidHost fault recorded.
self.assertIn('fault', server)
self.assertIn('No valid host', server['fault']['message'])
@ -339,7 +339,7 @@ class ComputeStatusFilterTest(integrated_helpers.ProviderUsageBaseTestCase):
# Try creating another server and it should be OK.
server = self.api.post_server({'server': server_req})
self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_state_change(server, 'ACTIVE')
# Stop, force-down and disable the service so the API cannot call
# the compute service to sync the trait.