Merge "functional: Remove 'api' parameter"

This commit is contained in:
Zuul 2019-12-12 13:37:05 +00:00 committed by Gerrit Code Review
commit 2e7a0088c2
65 changed files with 449 additions and 519 deletions

View File

@ -34,7 +34,7 @@ class ServerActionsSampleJsonTest(test_servers.ServersSampleBase,
response_data = api_samples_test_base.pretty_data(response.content) response_data = api_samples_test_base.pretty_data(response.content)
actions = api_samples_test_base.objectify(response_data) actions = api_samples_test_base.objectify(response_data)
self.action_stop = actions['instanceActions'][0] self.action_stop = actions['instanceActions'][0]
self._wait_for_state_change(self.api, {'id': self.uuid}, 'SHUTOFF') self._wait_for_state_change({'id': self.uuid}, 'SHUTOFF')
def _get_subs(self): def _get_subs(self):
return { return {

View File

@ -32,14 +32,14 @@ class MultinicSampleJsonTest(integrated_helpers.InstanceHelperMixin,
def _boot_a_server(self, expected_status='ACTIVE', extra_params=None): def _boot_a_server(self, expected_status='ACTIVE', extra_params=None):
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'MultinicSampleJsonTestServer') 'MultinicSampleJsonTestServer')
if extra_params: if extra_params:
server.update(extra_params) server.update(extra_params)
created_server = self.api.post_server({'server': server}) created_server = self.api.post_server({'server': server})
# Wait for it to finish being created # Wait for it to finish being created
found_server = self._wait_for_state_change(self.api, created_server, found_server = self._wait_for_state_change(created_server,
expected_status) expected_status)
return found_server return found_server

View File

@ -40,10 +40,10 @@ class ComputeManagerInitHostTestCase(
# Create a server, it does not matter on which host it lands. # Create a server, it does not matter on which host it lands.
name = 'test_migrate_disk_and_power_off_crash_finish_revert_migration' name = 'test_migrate_disk_and_power_off_crash_finish_revert_migration'
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, name, image_uuid=fake_image.get_valid_image_id(), name, image_uuid=fake_image.get_valid_image_id(),
networks='auto') networks='auto')
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# Save the source hostname for assertions later. # Save the source hostname for assertions later.
source_host = server['OS-EXT-SRV-ATTR:host'] source_host = server['OS-EXT-SRV-ATTR:host']
@ -66,8 +66,7 @@ class ComputeManagerInitHostTestCase(
self.admin_api.post_server_action(server['id'], {'migrate': None}) self.admin_api.post_server_action(server['id'], {'migrate': None})
# Now wait for the task_state to be reset to None during # Now wait for the task_state to be reset to None during
# _init_instance. # _init_instance.
server = self._wait_for_server_parameter( server = self._wait_for_server_parameter(server, {
self.admin_api, server, {
'status': 'ACTIVE', 'status': 'ACTIVE',
'OS-EXT-STS:task_state': None, 'OS-EXT-STS:task_state': None,
'OS-EXT-SRV-ATTR:host': source_host 'OS-EXT-SRV-ATTR:host': source_host
@ -158,7 +157,7 @@ class TestComputeRestartInstanceStuckInBuild(
# instance_claim() to stop it. This is less realistic but it works in # instance_claim() to stop it. This is less realistic but it works in
# the test env. # the test env.
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'interrupted-server', flavor_id=self.flavor1['id'], 'interrupted-server', flavor_id=self.flavor1['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none') networks='none')
@ -170,7 +169,7 @@ class TestComputeRestartInstanceStuckInBuild(
mock_instance_claim.side_effect = sleep_forever mock_instance_claim.side_effect = sleep_forever
server = self.api.post_server({'server': server_req}) server = self.api.post_server({'server': server_req})
self._wait_for_state_change(self.admin_api, server, 'BUILD') self._wait_for_state_change(server, 'BUILD')
# the instance.create.start is the closest thing to the # the instance.create.start is the closest thing to the
# instance_claim call we can wait for in the test # instance_claim call we can wait for in the test
@ -182,7 +181,7 @@ class TestComputeRestartInstanceStuckInBuild(
# We expect that the instance is pushed to ERROR state during the # We expect that the instance is pushed to ERROR state during the
# compute restart. # compute restart.
self._wait_for_state_change(self.admin_api, server, 'ERROR') self._wait_for_state_change(server, 'ERROR')
mock_log.assert_called_with( mock_log.assert_called_with(
'Instance spawn was interrupted before instance_claim, setting ' 'Instance spawn was interrupted before instance_claim, setting '
'instance to ERROR state', 'instance to ERROR state',

View File

@ -69,7 +69,7 @@ class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase,
'uuid': uuids.working_volume, 'uuid': uuids.working_volume,
'source_type': 'volume', 'source_type': 'volume',
'destination_type': 'volume'}]}}) 'destination_type': 'volume'}]}})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
source = server['OS-EXT-SRV-ATTR:host'] source = server['OS-EXT-SRV-ATTR:host']
if source == self.compute.host: if source == self.compute.host:
@ -87,7 +87,7 @@ class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase,
self.stub_out('nova.volume.cinder.API.attachment_delete', self.stub_out('nova.volume.cinder.API.attachment_delete',
stub_attachment_delete) stub_attachment_delete)
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
self._wait_for_server_parameter(self.api, server, self._wait_for_server_parameter(server,
{'OS-EXT-SRV-ATTR:host': dest, {'OS-EXT-SRV-ATTR:host': dest,
'status': 'ACTIVE'}) 'status': 'ACTIVE'})
self.assertEqual(2, stub_attachment_delete.call_count) self.assertEqual(2, stub_attachment_delete.call_count)

View File

@ -73,11 +73,14 @@ def generate_new_element(items, prefix, numeric=False):
class InstanceHelperMixin(object): class InstanceHelperMixin(object):
def _wait_for_server_parameter(self, admin_api, server, expected_params,
max_retries=10): def _wait_for_server_parameter(
self, server, expected_params, max_retries=10, api=None):
api = api or getattr(self, 'admin_api', self.api)
retry_count = 0 retry_count = 0
while True: while True:
server = admin_api.get_server(server['id']) server = api.get_server(server['id'])
if all([server[attr] == expected_params[attr] if all([server[attr] == expected_params[attr]
for attr in expected_params]): for attr in expected_params]):
break break
@ -90,22 +93,21 @@ class InstanceHelperMixin(object):
return server return server
def _wait_for_state_change(self, admin_api, server, expected_status, def _wait_for_state_change(self, server, expected_status, max_retries=10):
max_retries=10):
return self._wait_for_server_parameter( return self._wait_for_server_parameter(
admin_api, server, {'status': expected_status}, max_retries) server, {'status': expected_status}, max_retries)
def _build_minimal_create_server_request(
self, name=None, image_uuid=None, flavor_id=None, networks=None,
az=None, host=None):
def _build_minimal_create_server_request(self, api, name=None,
image_uuid=None, flavor_id=None,
networks=None, az=None,
host=None):
server = {} server = {}
if not image_uuid: if not image_uuid:
# NOTE(takashin): In API version 2.36, image APIs were deprecated. # NOTE(takashin): In API version 2.36, image APIs were deprecated.
# In API version 2.36 or greater, self.api.get_images() returns # In API version 2.36 or greater, self.api.get_images() returns
# a 404 error. In that case, 'image_uuid' should be specified. # a 404 error. In that case, 'image_uuid' should be specified.
image_uuid = api.get_images()[0]['id'] image_uuid = self.api.get_images()[0]['id']
server['imageRef'] = image_uuid server['imageRef'] = image_uuid
if not name: if not name:
@ -115,7 +117,7 @@ class InstanceHelperMixin(object):
if not flavor_id: if not flavor_id:
# Set a valid flavorId # Set a valid flavorId
flavor_id = api.get_flavors()[0]['id'] flavor_id = self.api.get_flavors()[0]['id']
server['flavorRef'] = 'http://fake.server/%s' % flavor_id server['flavorRef'] = 'http://fake.server/%s' % flavor_id
if networks is not None: if networks is not None:
@ -142,40 +144,43 @@ class InstanceHelperMixin(object):
return return
def _wait_for_action_fail_completion( def _wait_for_action_fail_completion(
self, server, expected_action, event_name, api=None): self, server, expected_action, event_name):
"""Polls instance action events for the given instance, action and """Polls instance action events for the given instance, action and
action event name until it finds the action event with an error action event name until it finds the action event with an error
result. result.
""" """
if api is None:
api = self.api
return self._wait_for_instance_action_event( return self._wait_for_instance_action_event(
api, server, expected_action, event_name, event_result='error') server, expected_action, event_name, event_result='error')
def _wait_for_instance_action_event( def _wait_for_instance_action_event(
self, api, server, action_name, event_name, event_result): self, server, action_name, event_name, event_result):
"""Polls the instance action events for the given instance, action, """Polls the instance action events for the given instance, action,
event, and event result until it finds the event. event, and event result until it finds the event.
""" """
api = getattr(self, 'admin_api', self.api)
actions = [] actions = []
events = [] events = []
for attempt in range(10): for attempt in range(10):
actions = api.get_instance_actions(server['id']) actions = api.get_instance_actions(server['id'])
# The API returns the newest event first # The API returns the newest event first
for action in actions: for action in actions:
if action['action'] == action_name: if action['action'] != action_name:
events = ( continue
api.api_get(
'/servers/%s/os-instance-actions/%s' % events = api.api_get(
(server['id'], action['request_id']) '/servers/%s/os-instance-actions/%s' % (
).body['instanceAction']['events']) server['id'], action['request_id'])
# Look for the action event being in error state. ).body['instanceAction']['events']
for event in events:
result = event['result'] # Look for the action event being in error state.
if (event['event'] == event_name and for event in events:
result is not None and result = event['result']
result.lower() == event_result.lower()): if (event['event'] == event_name and
return event result is not None and
result.lower() == event_result.lower()):
return event
# We didn't find the completion event yet, so wait a bit. # We didn't find the completion event yet, so wait a bit.
time.sleep(0.5) time.sleep(0.5)
@ -192,9 +197,8 @@ class InstanceHelperMixin(object):
:param action: Either "resize" or "migrate" instance action. :param action: Either "resize" or "migrate" instance action.
:param error_in_tb: Some expected part of the error event traceback. :param error_in_tb: Some expected part of the error event traceback.
""" """
api = self.admin_api if hasattr(self, 'admin_api') else self.api
event = self._wait_for_action_fail_completion( event = self._wait_for_action_fail_completion(
server, action, 'conductor_migrate_server', api=api) server, action, 'conductor_migrate_server')
self.assertIn(error_in_tb, event['traceback']) self.assertIn(error_in_tb, event['traceback'])
def _wait_for_migration_status(self, server, expected_statuses): def _wait_for_migration_status(self, server, expected_statuses):
@ -202,9 +206,7 @@ class InstanceHelperMixin(object):
for the given server, else the test fails. The migration record, if for the given server, else the test fails. The migration record, if
found, is returned. found, is returned.
""" """
api = getattr(self, 'admin_api', None) api = getattr(self, 'admin_api', self.api)
if api is None:
api = self.api
statuses = [status.lower() for status in expected_statuses] statuses = [status.lower() for status in expected_statuses]
for attempt in range(10): for attempt in range(10):
@ -297,10 +299,14 @@ class _IntegratedTestBase(test.TestCase):
self.api = self.api_fixture.admin_api self.api = self.api_fixture.admin_api
else: else:
self.api = self.api_fixture.api self.api = self.api_fixture.api
self.admin_api = self.api_fixture.admin_api
if hasattr(self, 'microversion'): if hasattr(self, 'microversion'):
self.api.microversion = self.microversion self.api.microversion = self.microversion
if not self.ADMIN_API:
self.admin_api.microversion = self.microversion
def get_unused_server_name(self): def get_unused_server_name(self):
servers = self.api.get_servers() servers = self.api.get_servers()
server_names = [server['name'] for server in servers] server_names = [server['name'] for server in servers]
@ -728,14 +734,13 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
:return: the API representation of the booted instance :return: the API representation of the booted instance
""" """
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=flavor['id'], 'some-server', flavor_id=flavor['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks=networks) networks=networks)
server_req['availability_zone'] = 'nova:%s' % source_hostname server_req['availability_zone'] = 'nova:%s' % source_hostname
LOG.info('booting on %s', source_hostname) LOG.info('booting on %s', source_hostname)
created_server = self.api.post_server({'server': server_req}) created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change( server = self._wait_for_state_change(created_server, 'ACTIVE')
self.admin_api, created_server, 'ACTIVE')
# Verify that our source host is what the server ended up on # Verify that our source host is what the server ended up on
self.assertEqual(source_hostname, server['OS-EXT-SRV-ATTR:host']) self.assertEqual(source_hostname, server['OS-EXT-SRV-ATTR:host'])
@ -849,7 +854,7 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
def _move_and_check_allocations(self, server, request, old_flavor, def _move_and_check_allocations(self, server, request, old_flavor,
new_flavor, source_rp_uuid, dest_rp_uuid): new_flavor, source_rp_uuid, dest_rp_uuid):
self.api.post_server_action(server['id'], request) self.api.post_server_action(server['id'], request)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') self._wait_for_state_change(server, 'VERIFY_RESIZE')
def _check_allocation(): def _check_allocation():
self.assertFlavorMatchesUsage(source_rp_uuid, old_flavor) self.assertFlavorMatchesUsage(source_rp_uuid, old_flavor)
@ -911,7 +916,7 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
} }
} }
self.api.post_server_action(server['id'], resize_req) self.api.post_server_action(server['id'], resize_req)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.assertFlavorMatchesUsage(rp_uuid, old_flavor, new_flavor) self.assertFlavorMatchesUsage(rp_uuid, old_flavor, new_flavor)
@ -981,15 +986,15 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
def _confirm_resize(self, server): def _confirm_resize(self, server):
self.api.post_server_action(server['id'], {'confirmResize': None}) self.api.post_server_action(server['id'], {'confirmResize': None})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self._wait_for_instance_action_event( self._wait_for_instance_action_event(
self.api, server, instance_actions.CONFIRM_RESIZE, server, instance_actions.CONFIRM_RESIZE,
'compute_confirm_resize', 'success') 'compute_confirm_resize', 'success')
return server return server
def _revert_resize(self, server): def _revert_resize(self, server):
self.api.post_server_action(server['id'], {'revertResize': None}) self.api.post_server_action(server['id'], {'revertResize': None})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self._wait_for_migration_status(server, ['reverted']) self._wait_for_migration_status(server, ['reverted'])
# Note that the migration status is changed to "reverted" in the # Note that the migration status is changed to "reverted" in the
# dest host revert_resize method but the allocations are cleaned up # dest host revert_resize method but the allocations are cleaned up

View File

@ -77,7 +77,7 @@ class SharedStorageProviderUsageTestCase(
} }
# create server # create server
server = self.api.post_server(server_req_body) server = self.api.post_server(server_req_body)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# get shared_rp and cn_rp usages # get shared_rp and cn_rp usages
shared_rp_usages = self._get_provider_usages(shared_RP['uuid']) shared_rp_usages = self._get_provider_usages(shared_RP['uuid'])
@ -135,7 +135,7 @@ class SharedStorageProviderUsageTestCase(
} }
# create server # create server
server = self.api.post_server(server_req_body) server = self.api.post_server(server_req_body)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
rebuild_image_ref = ( rebuild_image_ref = (
nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID) nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID)
@ -152,7 +152,7 @@ class SharedStorageProviderUsageTestCase(
self.api.api_post('/servers/%s/action' % server['id'], self.api.api_post('/servers/%s/action' % server['id'],
rebuild_req_body) rebuild_req_body)
self._wait_for_server_parameter( self._wait_for_server_parameter(
self.api, server, {'OS-EXT-STS:task_state': None}) server, {'OS-EXT-STS:task_state': None})
# get shared_rp and cn_rp usages # get shared_rp and cn_rp usages
shared_rp_usages = self._get_provider_usages(shared_rp_uuid) shared_rp_usages = self._get_provider_usages(shared_rp_uuid)
@ -198,7 +198,7 @@ class SharedStorageProviderUsageTestCase(
} }
# create server # create server
server = self.api.post_server(server_req_body) server = self.api.post_server(server_req_body)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
rebuild_image_ref = ( rebuild_image_ref = (
nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID) nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID)
@ -216,7 +216,7 @@ class SharedStorageProviderUsageTestCase(
rebuild_req_body) rebuild_req_body)
# Look for the failed rebuild action. # Look for the failed rebuild action.
self._wait_for_action_fail_completion( self._wait_for_action_fail_completion(
server, instance_actions.REBUILD, 'rebuild_server', self.admin_api) server, instance_actions.REBUILD, 'rebuild_server')
# Assert the server image_ref was rolled back on failure. # Assert the server image_ref was rolled back on failure.
server = self.api.get_server(server['id']) server = self.api.get_server(server['id'])
self.assertEqual(org_image_id, server['image']['id']) self.assertEqual(org_image_id, server['image']['id'])

View File

@ -126,7 +126,7 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
def _create_server(self, flavor_id, hostname): def _create_server(self, flavor_id, hostname):
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=flavor_id, 'some-server', flavor_id=flavor_id,
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none') networks='none')
server_req['availability_zone'] = 'nova:%s' % hostname server_req['availability_zone'] = 'nova:%s' % hostname
@ -173,22 +173,22 @@ class VPMEMTests(VPMEMTestBase):
# Boot two servers with pmem # Boot two servers with pmem
server1 = self._create_server(self.flavor, self.compute1.host) server1 = self._create_server(self.flavor, self.compute1.host)
self._wait_for_state_change(self.api, server1, 'ACTIVE') self._wait_for_state_change(server1, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server1['id'], cn1_uuid) server1['id'], cn1_uuid)
server2 = self._create_server(self.flavor, self.compute1.host) server2 = self._create_server(self.flavor, self.compute1.host)
self._wait_for_state_change(self.api, server2, 'ACTIVE') self._wait_for_state_change(server2, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server2['id'], cn1_uuid) server2['id'], cn1_uuid)
# 'SMALL' VPMEM resource has used up # 'SMALL' VPMEM resource has used up
server3 = self._create_server(self.flavor, self.compute1.host) server3 = self._create_server(self.flavor, self.compute1.host)
self._wait_for_state_change(self.api, server3, 'ERROR') self._wait_for_state_change(server3, 'ERROR')
# Delete server2, one 'SMALL' VPMEM will be released # Delete server2, one 'SMALL' VPMEM will be released
self._delete_server(server2) self._delete_server(server2)
server3 = self._create_server(self.flavor, self.compute1.host) server3 = self._create_server(self.flavor, self.compute1.host)
self._wait_for_state_change(self.api, server3, 'ACTIVE') self._wait_for_state_change(server3, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server3['id'], cn1_uuid) server3['id'], cn1_uuid)
@ -237,29 +237,29 @@ class VPMEMResizeTests(VPMEMTestBase):
# Boot one server with pmem, then resize the server # Boot one server with pmem, then resize the server
server = self._create_server(self.flavor1, self.compute1.host) server = self._create_server(self.flavor1, self.compute1.host)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid) server['id'], cn1_uuid)
# Revert resize # Revert resize
self._resize_server(server, self.flavor2) self._resize_server(server, self.flavor2)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, 'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn2_uuid) server['id'], cn2_uuid)
self._revert_resize(server) self._revert_resize(server)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid) server['id'], cn1_uuid)
# Confirm resize # Confirm resize
self._resize_server(server, self.flavor2) self._resize_server(server, self.flavor2)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, 'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn2_uuid) server['id'], cn2_uuid)
self._confirm_resize(server) self._confirm_resize(server)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, 'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn2_uuid) server['id'], cn2_uuid)
@ -272,29 +272,29 @@ class VPMEMResizeTests(VPMEMTestBase):
# Boot one server with pmem, then resize the server # Boot one server with pmem, then resize the server
server = self._create_server(self.flavor1, self.compute1.host) server = self._create_server(self.flavor1, self.compute1.host)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid) server['id'], cn1_uuid)
# Revert resize # Revert resize
self._resize_server(server, self.flavor2) self._resize_server(server, self.flavor2)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, 'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid) server['id'], cn1_uuid)
self._revert_resize(server) self._revert_resize(server)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid) server['id'], cn1_uuid)
# Confirm resize # Confirm resize
self._resize_server(server, self.flavor2) self._resize_server(server, self.flavor2)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, 'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid) server['id'], cn1_uuid)
self._confirm_resize(server) self._confirm_resize(server)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1, self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1}, 'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid) server['id'], cn1_uuid)

View File

@ -220,7 +220,7 @@ class NotificationSampleTestBase(test.TestCase,
actual=fake_notifier.VERSIONED_NOTIFICATIONS.pop(0)) actual=fake_notifier.VERSIONED_NOTIFICATIONS.pop(0))
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'some-server', 'some-server',
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=flavor_id) flavor_id=flavor_id)
@ -246,7 +246,7 @@ class NotificationSampleTestBase(test.TestCase,
self.assertTrue(created_server['id']) self.assertTrue(created_server['id'])
# Wait for it to finish being created # Wait for it to finish being created
found_server = self._wait_for_state_change(self.api, created_server, found_server = self._wait_for_state_change(created_server,
expected_status) expected_status)
found_server['reservation_id'] = reservation_id found_server['reservation_id'] = reservation_id

View File

@ -59,7 +59,7 @@ class TestInstanceNotificationSampleWithMultipleCompute(
fake_notifier.reset() fake_notifier.reset()
action(server) action(server)
# Ensure that instance is in active state after an action # Ensure that instance is in active state after an action
self._wait_for_state_change(self.admin_api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
@mock.patch('nova.compute.manager.ComputeManager.' @mock.patch('nova.compute.manager.ComputeManager.'
'_live_migration_cleanup_flags', return_value=[True, False]) '_live_migration_cleanup_flags', return_value=[True, False])
@ -188,13 +188,13 @@ class TestInstanceNotificationSampleWithMultipleCompute(
} }
self.admin_api.post_server_action(server['id'], post) self.admin_api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'MIGRATING') self._wait_for_state_change(server, 'MIGRATING')
migrations = self._wait_and_get_migrations(server) migrations = self._wait_and_get_migrations(server)
self.admin_api.delete_migration(server['id'], migrations[0]['id']) self.admin_api.delete_migration(server['id'], migrations[0]['id'])
self._wait_for_notification('instance.live_migration_abort.start') self._wait_for_notification('instance.live_migration_abort.start')
self._wait_for_state_change(self.admin_api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# NOTE(gibi): the intance.live_migration_rollback notification emitted # NOTE(gibi): the intance.live_migration_rollback notification emitted
# after the instance.live_migration_abort notification so we have to # after the instance.live_migration_abort notification so we have to
# wait for the rollback to ensure we can assert both notifications # wait for the rollback to ensure we can assert both notifications
@ -261,10 +261,8 @@ class TestInstanceNotificationSampleWithMultipleCompute(
} }
self.admin_api.post_server_action(server['id'], evacuate) self.admin_api.post_server_action(server['id'], evacuate)
self._wait_for_state_change(self.api, server, self._wait_for_state_change(server, expected_status='REBUILD')
expected_status='REBUILD') self._wait_for_state_change(server, expected_status='ACTIVE')
self._wait_for_state_change(self.api, server,
expected_status='ACTIVE')
notifications = self._get_notifications('instance.evacuate') notifications = self._get_notifications('instance.evacuate')
self.assertEqual(1, len(notifications), self.assertEqual(1, len(notifications),
@ -286,7 +284,7 @@ class TestInstanceNotificationSampleWithMultipleCompute(
} }
self.admin_api.post_server_action(server['id'], post) self.admin_api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'MIGRATING') self._wait_for_state_change(server, 'MIGRATING')
migrations = self._wait_and_get_migrations(server) migrations = self._wait_and_get_migrations(server)
migration_id = migrations[0]['id'] migration_id = migrations[0]['id']
@ -390,7 +388,7 @@ class TestInstanceNotificationSample(
fake_notifier.reset() fake_notifier.reset()
action(server) action(server)
# Ensure that instance is in active state after an action # Ensure that instance is in active state after an action
self._wait_for_state_change(self.admin_api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# if the test step did not raised then we consider the step as # if the test step did not raised then we consider the step as
# succeeded. We drop the logs to avoid causing subunit parser # succeeded. We drop the logs to avoid causing subunit parser
@ -520,10 +518,8 @@ class TestInstanceNotificationSample(
} }
} }
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, self._wait_for_state_change(server, expected_status='REBUILD')
expected_status='REBUILD') self._wait_for_state_change(server, expected_status='ACTIVE')
self._wait_for_state_change(self.api, server,
expected_status='ACTIVE')
notifications = self._get_notifications('instance.exists') notifications = self._get_notifications('instance.exists')
self._verify_notification( self._verify_notification(
@ -751,11 +747,9 @@ class TestInstanceNotificationSample(
def _test_power_off_on_server(self, server): def _test_power_off_on_server(self, server):
self.api.post_server_action(server['id'], {'os-stop': {}}) self.api.post_server_action(server['id'], {'os-stop': {}})
self._wait_for_state_change(self.api, server, self._wait_for_state_change(server, expected_status='SHUTOFF')
expected_status='SHUTOFF')
self.api.post_server_action(server['id'], {'os-start': {}}) self.api.post_server_action(server['id'], {'os-start': {}})
self._wait_for_state_change(self.api, server, self._wait_for_state_change(server, expected_status='ACTIVE')
expected_status='ACTIVE')
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS), self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS) fake_notifier.VERSIONED_NOTIFICATIONS)
@ -788,8 +782,7 @@ class TestInstanceNotificationSample(
def _test_shelve_and_shelve_offload_server(self, server): def _test_shelve_and_shelve_offload_server(self, server):
self.flags(shelved_offload_time=-1) self.flags(shelved_offload_time=-1)
self.api.post_server_action(server['id'], {'shelve': {}}) self.api.post_server_action(server['id'], {'shelve': {}})
self._wait_for_state_change(self.api, server, self._wait_for_state_change(server, expected_status='SHELVED')
expected_status='SHELVED')
self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS), self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS) fake_notifier.VERSIONED_NOTIFICATIONS)
@ -812,7 +805,7 @@ class TestInstanceNotificationSample(
# we can unshelve to make sure that the unshelve.start notification # we can unshelve to make sure that the unshelve.start notification
# payload is stable as the compute manager first sets the instance # payload is stable as the compute manager first sets the instance
# state then a bit later sets the instance.host to None. # state then a bit later sets the instance.host to None.
self._wait_for_server_parameter(self.api, server, self._wait_for_server_parameter(server,
{'status': 'SHELVED_OFFLOADED', {'status': 'SHELVED_OFFLOADED',
'OS-EXT-SRV-ATTR:host': None}) 'OS-EXT-SRV-ATTR:host': None})
@ -832,7 +825,7 @@ class TestInstanceNotificationSample(
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1]) actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
self.api.post_server_action(server['id'], {'unshelve': None}) self.api.post_server_action(server['id'], {'unshelve': None})
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self._wait_for_notification('instance.unshelve.end') self._wait_for_notification('instance.unshelve.end')
def _test_unshelve_server(self, server): def _test_unshelve_server(self, server):
@ -844,13 +837,13 @@ class TestInstanceNotificationSample(
# we can unshelve to make sure that the unshelve.start notification # we can unshelve to make sure that the unshelve.start notification
# payload is stable as the compute manager first sets the instance # payload is stable as the compute manager first sets the instance
# state then a bit later sets the instance.host to None. # state then a bit later sets the instance.host to None.
self._wait_for_server_parameter(self.api, server, self._wait_for_server_parameter(server,
{'status': 'SHELVED_OFFLOADED', {'status': 'SHELVED_OFFLOADED',
'OS-EXT-SRV-ATTR:host': None}) 'OS-EXT-SRV-ATTR:host': None})
post = {'unshelve': None} post = {'unshelve': None}
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self._wait_for_notification('instance.unshelve.end') self._wait_for_notification('instance.unshelve.end')
self.assertEqual(9, len(fake_notifier.VERSIONED_NOTIFICATIONS), self.assertEqual(9, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS) fake_notifier.VERSIONED_NOTIFICATIONS)
@ -870,11 +863,11 @@ class TestInstanceNotificationSample(
def _test_suspend_resume_server(self, server): def _test_suspend_resume_server(self, server):
post = {'suspend': {}} post = {'suspend': {}}
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'SUSPENDED') self._wait_for_state_change(server, 'SUSPENDED')
post = {'resume': None} post = {'resume': None}
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# Four versioned notification are generated. # Four versioned notification are generated.
# 0. instance-suspend-start # 0. instance-suspend-start
@ -913,10 +906,10 @@ class TestInstanceNotificationSample(
def _test_pause_unpause_server(self, server): def _test_pause_unpause_server(self, server):
self.api.post_server_action(server['id'], {'pause': {}}) self.api.post_server_action(server['id'], {'pause': {}})
self._wait_for_state_change(self.api, server, 'PAUSED') self._wait_for_state_change(server, 'PAUSED')
self.api.post_server_action(server['id'], {'unpause': {}}) self.api.post_server_action(server['id'], {'unpause': {}})
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# Four versioned notifications are generated # Four versioned notifications are generated
# 0. instance-pause-start # 0. instance-pause-start
@ -997,7 +990,7 @@ class TestInstanceNotificationSample(
} }
} }
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._pop_and_verify_dest_select_notification(server['id'], self._pop_and_verify_dest_select_notification(server['id'],
replacements={ replacements={
@ -1034,7 +1027,7 @@ class TestInstanceNotificationSample(
# the following is the revert server request # the following is the revert server request
post = {'revertResize': None} post = {'revertResize': None}
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS), self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS) fake_notifier.VERSIONED_NOTIFICATIONS)
@ -1170,7 +1163,7 @@ class TestInstanceNotificationSample(
self.addCleanup(patcher.stop) self.addCleanup(patcher.stop)
patcher.start() patcher.start()
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, expected_status='ERROR') self._wait_for_state_change(server, expected_status='ERROR')
self._wait_for_notification('compute.exception') self._wait_for_notification('compute.exception')
# There should be the following notifications after scheduler's # There should be the following notifications after scheduler's
# select_destination notifications: # select_destination notifications:
@ -1248,10 +1241,8 @@ class TestInstanceNotificationSample(
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
# Before going back to ACTIVE state # Before going back to ACTIVE state
# server state need to be changed to REBUILD state # server state need to be changed to REBUILD state
self._wait_for_state_change(self.api, server, self._wait_for_state_change(server, expected_status='REBUILD')
expected_status='REBUILD') self._wait_for_state_change(server, expected_status='ACTIVE')
self._wait_for_state_change(self.api, server,
expected_status='ACTIVE')
self._pop_and_verify_dest_select_notification(server['id'], self._pop_and_verify_dest_select_notification(server['id'],
replacements={ replacements={
@ -1345,10 +1336,8 @@ class TestInstanceNotificationSample(
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
# Before going back to ACTIVE state # Before going back to ACTIVE state
# server state need to be changed to REBUILD state # server state need to be changed to REBUILD state
self._wait_for_state_change(self.api, server, self._wait_for_state_change(server, expected_status='REBUILD')
expected_status='REBUILD') self._wait_for_state_change(server, expected_status='ACTIVE')
self._wait_for_state_change(self.api, server,
expected_status='ACTIVE')
self._pop_and_verify_dest_select_notification(server['id'], self._pop_and_verify_dest_select_notification(server['id'],
replacements={ replacements={
@ -1434,7 +1423,7 @@ class TestInstanceNotificationSample(
} }
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
mock_rebuild.side_effect = _virtual_interface_create_failed mock_rebuild.side_effect = _virtual_interface_create_failed
self._wait_for_state_change(self.api, server, expected_status='ERROR') self._wait_for_state_change(server, expected_status='ERROR')
notification = self._get_notifications('instance.rebuild.error') notification = self._get_notifications('instance.rebuild.error')
self.assertEqual(1, len(notification), self.assertEqual(1, len(notification),
fake_notifier.VERSIONED_NOTIFICATIONS) fake_notifier.VERSIONED_NOTIFICATIONS)
@ -1455,11 +1444,11 @@ class TestInstanceNotificationSample(
def _test_restore_server(self, server): def _test_restore_server(self, server):
self.flags(reclaim_instance_interval=30) self.flags(reclaim_instance_interval=30)
self.api.delete_server(server['id']) self.api.delete_server(server['id'])
self._wait_for_state_change(self.api, server, 'SOFT_DELETED') self._wait_for_state_change(server, 'SOFT_DELETED')
# we don't want to test soft_delete here # we don't want to test soft_delete here
fake_notifier.reset() fake_notifier.reset()
self.api.post_server_action(server['id'], {'restore': {}}) self.api.post_server_action(server['id'], {'restore': {}})
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS), self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS) fake_notifier.VERSIONED_NOTIFICATIONS)
@ -1641,12 +1630,12 @@ class TestInstanceNotificationSample(
self.flags(allow_resize_to_same_host=True) self.flags(allow_resize_to_same_host=True)
post = {'resize': {'flavorRef': '2'}} post = {'resize': {'flavorRef': '2'}}
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') self._wait_for_state_change(server, 'VERIFY_RESIZE')
fake_notifier.reset() fake_notifier.reset()
post = {'confirmResize': None} post = {'confirmResize': None}
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS), self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS) fake_notifier.VERSIONED_NOTIFICATIONS)
@ -1733,7 +1722,7 @@ class TestInstanceNotificationSample(
} }
} }
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'RESCUE') self._wait_for_state_change(server, 'RESCUE')
# 0. instance.rescue.start # 0. instance.rescue.start
# 1. instance.exists # 1. instance.exists
@ -1759,7 +1748,7 @@ class TestInstanceNotificationSample(
'unrescue': None 'unrescue': None
} }
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS), self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS) fake_notifier.VERSIONED_NOTIFICATIONS)
@ -1779,7 +1768,7 @@ class TestInstanceNotificationSample(
def _test_soft_delete_server(self, server): def _test_soft_delete_server(self, server):
self.flags(reclaim_instance_interval=30) self.flags(reclaim_instance_interval=30)
self.api.delete_server(server['id']) self.api.delete_server(server['id'])
self._wait_for_state_change(self.api, server, 'SOFT_DELETED') self._wait_for_state_change(server, 'SOFT_DELETED')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS), self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS) fake_notifier.VERSIONED_NOTIFICATIONS)
@ -1948,9 +1937,9 @@ class TestInstanceNotificationSample(
def _test_lock_unlock_instance(self, server): def _test_lock_unlock_instance(self, server):
self.api.post_server_action(server['id'], {'lock': {}}) self.api.post_server_action(server['id'], {'lock': {}})
self._wait_for_server_parameter(self.api, server, {'locked': True}) self._wait_for_server_parameter(server, {'locked': True})
self.api.post_server_action(server['id'], {'unlock': {}}) self.api.post_server_action(server['id'], {'unlock': {}})
self._wait_for_server_parameter(self.api, server, {'locked': False}) self._wait_for_server_parameter(server, {'locked': False})
# Two versioned notifications are generated # Two versioned notifications are generated
# 0. instance-lock # 0. instance-lock
# 1. instance-unlock # 1. instance-unlock
@ -1973,9 +1962,9 @@ class TestInstanceNotificationSample(
def _test_lock_unlock_instance_with_reason(self, server): def _test_lock_unlock_instance_with_reason(self, server):
self.api.post_server_action( self.api.post_server_action(
server['id'], {'lock': {"locked_reason": "global warming"}}) server['id'], {'lock': {"locked_reason": "global warming"}})
self._wait_for_server_parameter(self.api, server, {'locked': True}) self._wait_for_server_parameter(server, {'locked': True})
self.api.post_server_action(server['id'], {'unlock': {}}) self.api.post_server_action(server['id'], {'unlock': {}})
self._wait_for_server_parameter(self.api, server, {'locked': False}) self._wait_for_server_parameter(server, {'locked': False})
# Two versioned notifications are generated # Two versioned notifications are generated
# 0. instance-lock # 0. instance-lock
# 1. instance-unlock # 1. instance-unlock

View File

@ -55,7 +55,7 @@ class DeleteWithReservedVolumes(integrated_helpers._IntegratedTestBase,
] ]
} }
}) })
return self._wait_for_state_change(self.api, server, 'ERROR') return self._wait_for_state_change(server, 'ERROR')
def test_delete_with_reserved_volumes_new(self): def test_delete_with_reserved_volumes_new(self):
self.cinder = self.useFixture( self.cinder = self.useFixture(

View File

@ -42,7 +42,7 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase,
flavor1 = flavors[0]['id'] flavor1 = flavors[0]['id']
server = self._build_server(flavor1) server = self._build_server(flavor1)
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# Start up another compute service so we can resize. # Start up another compute service so we can resize.
host2 = self.start_service('compute', host='host2') host2 = self.start_service('compute', host='host2')
@ -51,10 +51,10 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase,
flavor2 = flavors[1]['id'] flavor2 = flavors[1]['id']
req = {'resize': {'flavorRef': flavor2}} req = {'resize': {'flavorRef': flavor2}}
self.api.post_server_action(server['id'], req) self.api.post_server_action(server['id'], req)
server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host']) self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
self.api.post_server_action(server['id'], {'confirmResize': None}) self.api.post_server_action(server['id'], {'confirmResize': None})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# Disable the host on which the server is now running (host2). # Disable the host on which the server is now running (host2).
host2.stop() host2.stop()
@ -62,7 +62,7 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase,
# Now try to evacuate the server back to the original source compute. # Now try to evacuate the server back to the original source compute.
req = {'evacuate': {'onSharedStorage': False}} req = {'evacuate': {'onSharedStorage': False}}
self.api.post_server_action(server['id'], req) self.api.post_server_action(server['id'], req)
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# The evacuate flow in the compute manager is annoying in that it # The evacuate flow in the compute manager is annoying in that it
# sets the instance status to ACTIVE before updating the host, so we # sets the instance status to ACTIVE before updating the host, so we
# have to wait for the migration record to be 'done' to avoid a race. # have to wait for the migration record to be 'done' to avoid a race.

View File

@ -81,10 +81,10 @@ class TestLocalDeleteAllocations(test.TestCase,
self.assertEqual(0, usage) self.assertEqual(0, usage)
# Create a server. # Create a server.
server = self._build_minimal_create_server_request(self.api, server = self._build_minimal_create_server_request(
'local-delete-test', self.image_id, self.flavor_id, 'none') 'local-delete-test', self.image_id, self.flavor_id, 'none')
server = self.admin_api.post_server({'server': server}) server = self.admin_api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# Assert usages are non zero now. # Assert usages are non zero now.
usages_during = self._get_usages(placement_api, rp_uuid) usages_during = self._get_usages(placement_api, rp_uuid)
@ -136,10 +136,10 @@ class TestLocalDeleteAllocations(test.TestCase,
self.assertEqual(0, usage) self.assertEqual(0, usage)
# Create a server. # Create a server.
server = self._build_minimal_create_server_request(self.api, server = self._build_minimal_create_server_request(
'local-delete-test', self.image_id, self.flavor_id, 'none') 'local-delete-test', self.image_id, self.flavor_id, 'none')
server = self.admin_api.post_server({'server': server}) server = self.admin_api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# Assert usages are non zero now. # Assert usages are non zero now.
usages_during = self._get_usages(placement_api, rp_uuid) usages_during = self._get_usages(placement_api, rp_uuid)

View File

@ -59,10 +59,10 @@ class ServerTagsFilteringTest(test.TestCase,
for x in range(2): for x in range(2):
server = self.api.post_server( server = self.api.post_server(
dict(server=self._build_minimal_create_server_request( dict(server=self._build_minimal_create_server_request(
self.api, 'test-list-server-tag-filters%i' % x, image_id, 'test-list-server-tag-filters%i' % x, image_id,
networks='none'))) networks='none')))
self.addCleanup(self.api.delete_server, server['id']) self.addCleanup(self.api.delete_server, server['id'])
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self.servers.append(server) self.servers.append(server)
# now apply two tags to the first server # now apply two tags to the first server

View File

@ -66,10 +66,10 @@ class ServerListLimitMarkerCell0Test(test.TestCase,
for x in range(3): for x in range(3):
server = self.api.post_server( server = self.api.post_server(
dict(server=self._build_minimal_create_server_request( dict(server=self._build_minimal_create_server_request(
self.api, 'test-list-server-limit%i' % x, self.image_id, 'test-list-server-limit%i' % x, self.image_id,
networks='none'))) networks='none')))
self.addCleanup(self.api.delete_server, server['id']) self.addCleanup(self.api.delete_server, server['id'])
self._wait_for_state_change(self.api, server, 'ERROR') self._wait_for_state_change(server, 'ERROR')
servers = self.api.get_servers() servers = self.api.get_servers()
self.assertEqual(3, len(servers)) self.assertEqual(3, len(servers))

View File

@ -95,11 +95,11 @@ class SchedulerOnlyChecksTargetTest(test.TestCase,
# We first create the instance # We first create the instance
server = self.admin_api.post_server( server = self.admin_api.post_server(
dict(server=self._build_minimal_create_server_request( dict(server=self._build_minimal_create_server_request(
self.api, 'my-pretty-instance-to-evacuate', self.image_id, 'my-pretty-instance-to-evacuate', self.image_id,
networks='none'))) networks='none')))
server_id = server['id'] server_id = server['id']
self.addCleanup(self.api.delete_server, server_id) self.addCleanup(self.api.delete_server, server_id)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# We need to get instance details for knowing its host # We need to get instance details for knowing its host
server = self.admin_api.get_server(server_id) server = self.admin_api.get_server(server_id)
@ -125,7 +125,7 @@ class SchedulerOnlyChecksTargetTest(test.TestCase,
} }
self.admin_api.post_server_action(server['id'], evacuate) self.admin_api.post_server_action(server['id'], evacuate)
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
server = self.admin_api.get_server(server_id) server = self.admin_api.get_server(server_id)
# Yeepee, that works! # Yeepee, that works!

View File

@ -84,13 +84,12 @@ class FailedEvacuateStateTests(test.TestCase,
def _boot_a_server(self): def _boot_a_server(self):
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=self.flavor1['id'], 'some-server', flavor_id=self.flavor1['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none') networks='none')
LOG.info('booting on %s', self.hostname) LOG.info('booting on %s', self.hostname)
created_server = self.api.post_server({'server': server_req}) created_server = self.api.post_server({'server': server_req})
return self._wait_for_state_change( return self._wait_for_state_change(created_server, 'ACTIVE')
self.api, created_server, 'ACTIVE')
def test_evacuate_no_valid_host(self): def test_evacuate_no_valid_host(self):
# Boot a server # Boot a server
@ -110,7 +109,7 @@ class FailedEvacuateStateTests(test.TestCase,
self._wait_for_notification_event_type('compute_task.rebuild_server') self._wait_for_notification_event_type('compute_task.rebuild_server')
server = self._wait_for_state_change(self.api, server, 'ERROR') server = self._wait_for_state_change(server, 'ERROR')
self.assertEqual(self.hostname, server['OS-EXT-SRV-ATTR:host']) self.assertEqual(self.hostname, server['OS-EXT-SRV-ATTR:host'])
# Check migrations # Check migrations

View File

@ -70,7 +70,7 @@ class TestLiveMigrateOneOfConcurrentlyCreatedInstances(
def _boot_servers(self, num_servers=1): def _boot_servers(self, num_servers=1):
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=self.flavor1['id'], 'some-server', flavor_id=self.flavor1['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none') networks='none')
server_req.update({'min_count': str(num_servers), server_req.update({'min_count': str(num_servers),
@ -81,7 +81,7 @@ class TestLiveMigrateOneOfConcurrentlyCreatedInstances(
servers = self.api.get_servers(detail=True, servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id}) search_opts={'reservation_id': reservation_id})
for idx, server in enumerate(servers): for idx, server in enumerate(servers):
servers[idx] = self._wait_for_state_change(self.api, server, servers[idx] = self._wait_for_state_change(server,
'ACTIVE') 'ACTIVE')
return servers return servers

View File

@ -103,9 +103,9 @@ class TestRequestSpecRetryReschedule(test.TestCase,
# create the instance which should go to host1 # create the instance which should go to host1
server = self.admin_api.post_server( server = self.admin_api.post_server(
dict(server=self._build_minimal_create_server_request( dict(server=self._build_minimal_create_server_request(
self.api, 'test_resize_with_reschedule_then_live_migrate', 'test_resize_with_reschedule_then_live_migrate',
self.image_id, flavor_id=flavor1['id'], networks='none'))) self.image_id, flavor_id=flavor1['id'], networks='none')))
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host']) self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])
# Stub out the resize to fail on host2, which will trigger a reschedule # Stub out the resize to fail on host2, which will trigger a reschedule
@ -116,17 +116,17 @@ class TestRequestSpecRetryReschedule(test.TestCase,
# on host3. # on host3.
data = {'resize': {'flavorRef': flavor2['id']}} data = {'resize': {'flavorRef': flavor2['id']}}
self.api.post_server_action(server['id'], data) self.api.post_server_action(server['id'], data)
server = self._wait_for_state_change(self.admin_api, server, server = self._wait_for_state_change(server,
'VERIFY_RESIZE') 'VERIFY_RESIZE')
self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host']) self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host'])
self.api.post_server_action(server['id'], {'confirmResize': None}) self.api.post_server_action(server['id'], {'confirmResize': None})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# Now live migrate the server to host2 specifically, which previously # Now live migrate the server to host2 specifically, which previously
# failed the resize attempt but here it should pass. # failed the resize attempt but here it should pass.
data = {'os-migrateLive': {'host': 'host2', 'block_migration': 'auto'}} data = {'os-migrateLive': {'host': 'host2', 'block_migration': 'auto'}}
self.admin_api.post_server_action(server['id'], data) self.admin_api.post_server_action(server['id'], data)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host']) self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
# NOTE(mriedem): The instance status effectively goes to ACTIVE before # NOTE(mriedem): The instance status effectively goes to ACTIVE before
# the migration status is changed to "completed" since # the migration status is changed to "completed" since

View File

@ -104,8 +104,7 @@ class TestRescheduleWithServerGroup(test.TestCase,
hints = {'group': created_group['id']} hints = {'group': created_group['id']}
created_server = self.api.post_server({'server': server, created_server = self.api.post_server({'server': server,
'os:scheduler_hints': hints}) 'os:scheduler_hints': hints})
found_server = self._wait_for_state_change(self.admin_api, found_server = self._wait_for_state_change(created_server, 'ACTIVE')
created_server, 'ACTIVE')
# Assert that the host is not the failed host. # Assert that the host is not the failed host.
self.assertNotEqual(self.failed_host, self.assertNotEqual(self.failed_host,
found_server['OS-EXT-SRV-ATTR:host']) found_server['OS-EXT-SRV-ATTR:host'])

View File

@ -64,7 +64,7 @@ class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase,
} }
} }
server = self.api.post_server(server_req_body) server = self.api.post_server(server_req_body)
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# For a volume-backed server, the image ref will be an empty string # For a volume-backed server, the image ref will be an empty string
# in the server response. # in the server response.
self.assertEqual('', server['image']) self.assertEqual('', server['image'])

View File

@ -99,13 +99,11 @@ class TestParallelEvacuationWithServerGroup(
hints = {'group': group['id']} hints = {'group': group['id']}
created_server1 = self.api.post_server({'server': server, created_server1 = self.api.post_server({'server': server,
'os:scheduler_hints': hints}) 'os:scheduler_hints': hints})
server1 = self._wait_for_state_change(self.api, server1 = self._wait_for_state_change(created_server1, 'ACTIVE')
created_server1, 'ACTIVE')
created_server2 = self.api.post_server({'server': server, created_server2 = self.api.post_server({'server': server,
'os:scheduler_hints': hints}) 'os:scheduler_hints': hints})
server2 = self._wait_for_state_change(self.api, server2 = self._wait_for_state_change(created_server2, 'ACTIVE')
created_server2, 'ACTIVE')
# assert that the anti-affinity policy is enforced during the boot # assert that the anti-affinity policy is enforced during the boot
self.assertNotEqual(server1['OS-EXT-SRV-ATTR:host'], self.assertNotEqual(server1['OS-EXT-SRV-ATTR:host'],
@ -134,9 +132,9 @@ class TestParallelEvacuationWithServerGroup(
fake_notifier.wait_for_versioned_notifications( fake_notifier.wait_for_versioned_notifications(
'instance.rebuild.start', n_events=1) 'instance.rebuild.start', n_events=1)
server1 = self._wait_for_server_parameter( server1 = self._wait_for_server_parameter(
self.api, server1, {'OS-EXT-STS:task_state': None}) server1, {'OS-EXT-STS:task_state': None})
server2 = self._wait_for_server_parameter( server2 = self._wait_for_server_parameter(
self.api, server2, {'OS-EXT-STS:task_state': None}) server2, {'OS-EXT-STS:task_state': None})
# NOTE(gibi): The instance.host set _after_ the instance state and # NOTE(gibi): The instance.host set _after_ the instance state and
# tast_state is set back to normal so it is not enough to wait for # tast_state is set back to normal so it is not enough to wait for

View File

@ -48,13 +48,13 @@ class TestServerResizeReschedule(integrated_helpers.ProviderUsageBaseTestCase):
supplied host_list, and does not call the scheduler. supplied host_list, and does not call the scheduler.
""" """
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=self.flavor1['id'], 'some-server', flavor_id=self.flavor1['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none') networks='none')
self.first_attempt = True self.first_attempt = True
created_server = self.api.post_server({'server': server_req}) created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(self.api, created_server, server = self._wait_for_state_change(created_server,
'ACTIVE') 'ACTIVE')
actual_prep_resize = compute_manager.ComputeManager._prep_resize actual_prep_resize = compute_manager.ComputeManager._prep_resize
@ -74,7 +74,7 @@ class TestServerResizeReschedule(integrated_helpers.ProviderUsageBaseTestCase):
data = {"resize": {"flavorRef": self.flavor2['id']}} data = {"resize": {"flavorRef": self.flavor2['id']}}
self.api.post_server_action(server_uuid, data) self.api.post_server_action(server_uuid, data)
server = self._wait_for_state_change(self.api, created_server, server = self._wait_for_state_change(created_server,
'VERIFY_RESIZE') 'VERIFY_RESIZE')
self.assertEqual(self.flavor2['name'], self.assertEqual(self.flavor2['name'],
server['flavor']['original_name']) server['flavor']['original_name'])

View File

@ -77,11 +77,11 @@ class TestResizeWithNoAllocationScheduler(
def test_resize(self): def test_resize(self):
# Create our server without networking just to keep things simple. # Create our server without networking just to keep things simple.
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'test-resize', flavor_id=self.old_flavor['id'], 'test-resize', flavor_id=self.old_flavor['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none') networks='none')
server = self.api.post_server({'server': server_req}) server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
original_host = server['OS-EXT-SRV-ATTR:host'] original_host = server['OS-EXT-SRV-ATTR:host']
target_host = 'host1' if original_host == 'host2' else 'host2' target_host = 'host1' if original_host == 'host2' else 'host2'
@ -95,8 +95,7 @@ class TestResizeWithNoAllocationScheduler(
self.api.post_server_action(server['id'], post) self.api.post_server_action(server['id'], post)
# Poll the server until the resize is done. # Poll the server until the resize is done.
server = self._wait_for_state_change( server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.api, server, 'VERIFY_RESIZE')
# Assert that the server was migrated to the other host. # Assert that the server was migrated to the other host.
self.assertEqual(target_host, server['OS-EXT-SRV-ATTR:host']) self.assertEqual(target_host, server['OS-EXT-SRV-ATTR:host'])
# Confirm the resize. # Confirm the resize.

View File

@ -90,7 +90,7 @@ class TestBootFromVolumeIsolatedHostsFilter(
# networks='none'. # networks='none'.
with utils.temporary_mutation(self.api, microversion='2.37'): with utils.temporary_mutation(self.api, microversion='2.37'):
server = self.api.post_server(server_req_body) server = self.api.post_server(server_req_body)
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# NOTE(mriedem): The instance is successfully scheduled but since # NOTE(mriedem): The instance is successfully scheduled but since
# the image_id from the volume_image_metadata isn't stored in the # the image_id from the volume_image_metadata isn't stored in the
# RequestSpec.image.id, and restrict_isolated_hosts_to_isolated_images # RequestSpec.image.id, and restrict_isolated_hosts_to_isolated_images

View File

@ -62,13 +62,12 @@ class InstanceListWithDeletedServicesTestCase(
def _migrate_server(self, server, target_host): def _migrate_server(self, server, target_host):
self.admin_api.api_post('/servers/%s/action' % server['id'], self.admin_api.api_post('/servers/%s/action' % server['id'],
{'migrate': None}) {'migrate': None})
server = self._wait_for_state_change( server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.admin_api, server, 'VERIFY_RESIZE')
self.assertEqual(target_host, server['OS-EXT-SRV-ATTR:host']) self.assertEqual(target_host, server['OS-EXT-SRV-ATTR:host'])
self.admin_api.api_post('/servers/%s/action' % server['id'], self.admin_api.api_post('/servers/%s/action' % server['id'],
{'confirmResize': None}, {'confirmResize': None},
check_response_status=[204]) check_response_status=[204])
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
return server return server
def test_instance_list_deleted_service_with_no_uuid(self): def test_instance_list_deleted_service_with_no_uuid(self):
@ -87,10 +86,10 @@ class InstanceListWithDeletedServicesTestCase(
# Create an instance which will be on host1 since it's the only host. # Create an instance which will be on host1 since it's the only host.
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'test_instance_list_deleted_service_with_no_uuid', 'test_instance_list_deleted_service_with_no_uuid',
image_uuid=self.image_id, networks='none') image_uuid=self.image_id, networks='none')
server = self.api.post_server({'server': server_req}) server = self.api.post_server({'server': server_req})
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# Now we start a 2nd compute which is "upgraded" (has a uuid) and # Now we start a 2nd compute which is "upgraded" (has a uuid) and
# we'll migrate the instance to that host. # we'll migrate the instance to that host.

View File

@ -94,7 +94,7 @@ class TestEvacuationWithSourceReturningDuringRebuild(
'imageRef': self.image_id, 'imageRef': self.image_id,
'flavorRef': self.flavor_id} 'flavorRef': self.flavor_id}
server_response = self.api.post_server({'server': server_request}) server_response = self.api.post_server({'server': server_request})
server = self._wait_for_state_change(self.api, server_response, server = self._wait_for_state_change(server_response,
'ACTIVE') 'ACTIVE')
# Record where the instance is running before forcing the service down # Record where the instance is running before forcing the service down
@ -106,7 +106,7 @@ class TestEvacuationWithSourceReturningDuringRebuild(
self.api.post_server_action(server['id'], {'evacuate': {}}) self.api.post_server_action(server['id'], {'evacuate': {}})
# Wait for the instance to go into an ACTIVE state # Wait for the instance to go into an ACTIVE state
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
server = self.api.get_server(server['id']) server = self.api.get_server(server['id'])
host = server['OS-EXT-SRV-ATTR:host'] host = server['OS-EXT-SRV-ATTR:host']
migrations = self.api.get_migrations() migrations = self.api.get_migrations()

View File

@ -60,7 +60,7 @@ class TestMultiCreateServerGroupMemberOverQuota(
multi-create POST /servers request. multi-create POST /servers request.
""" """
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'test_multi_create_server_group_members_over_quota', 'test_multi_create_server_group_members_over_quota',
image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none') networks='none')
server_req['min_count'] = 3 server_req['min_count'] = 3
@ -88,7 +88,7 @@ class TestMultiCreateServerGroupMemberOverQuota(
self.useFixture(nova_fixtures.NoopConductorFixture()) self.useFixture(nova_fixtures.NoopConductorFixture())
for x in range(3): for x in range(3):
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'test_concurrent_request_%s' % x, 'test_concurrent_request_%s' % x,
image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none') networks='none')
hints = {'group': self.created_group['id']} hints = {'group': self.created_group['id']}

View File

@ -78,7 +78,7 @@ class RescheduleBuildAvailabilityZoneUpCall(
self.stub_out('nova.compute.manager.ComputeManager.' self.stub_out('nova.compute.manager.ComputeManager.'
'build_and_run_instance', wrap_bari) 'build_and_run_instance', wrap_bari)
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_server_create_reschedule_blocked_az_up_call') 'test_server_create_reschedule_blocked_az_up_call')
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
# Because we poisoned AggregateList.get_by_host after hitting the # Because we poisoned AggregateList.get_by_host after hitting the
# compute service we have to wait for the notification that the build # compute service we have to wait for the notification that the build
@ -88,7 +88,7 @@ class RescheduleBuildAvailabilityZoneUpCall(
# build_and_run_instance twice so we have more than one instance of # build_and_run_instance twice so we have more than one instance of
# the mock that needs to be stopped. # the mock that needs to be stopped.
mock.patch.stopall() mock.patch.stopall()
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# We should have rescheduled and the instance AZ should be set from the # We should have rescheduled and the instance AZ should be set from the
# Selection object. Since neither compute host is in an AZ, the server # Selection object. Since neither compute host is in an AZ, the server
# is in the default AZ from config. # is in the default AZ from config.
@ -148,9 +148,9 @@ class RescheduleMigrateAvailabilityZoneUpCall(
self.stub_out('nova.compute.manager.ComputeManager._prep_resize', self.stub_out('nova.compute.manager.ComputeManager._prep_resize',
wrap_prep_resize) wrap_prep_resize)
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_migrate_reschedule_blocked_az_up_call') 'test_migrate_reschedule_blocked_az_up_call')
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
original_host = server['OS-EXT-SRV-ATTR:host'] original_host = server['OS-EXT-SRV-ATTR:host']
# Now cold migrate the server to the other host. # Now cold migrate the server to the other host.
@ -164,7 +164,7 @@ class RescheduleMigrateAvailabilityZoneUpCall(
# twice so we have more than one instance of the mock that needs to be # twice so we have more than one instance of the mock that needs to be
# stopped. # stopped.
mock.patch.stopall() mock.patch.stopall()
server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
final_host = server['OS-EXT-SRV-ATTR:host'] final_host = server['OS-EXT-SRV-ATTR:host']
self.assertNotIn(final_host, [original_host, self.rescheduled]) self.assertNotIn(final_host, [original_host, self.rescheduled])
# We should have rescheduled and the instance AZ should be set from the # We should have rescheduled and the instance AZ should be set from the

View File

@ -103,7 +103,7 @@ class AntiAffinityMultiCreateRequest(test.TestCase,
# Now create two servers in that group. # Now create two servers in that group.
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'test_anti_affinity_multi_create', 'test_anti_affinity_multi_create',
image_uuid=image_fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, image_uuid=image_fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none') networks='none')
server_req['min_count'] = 2 server_req['min_count'] = 2
@ -115,8 +115,7 @@ class AntiAffinityMultiCreateRequest(test.TestCase,
# Now wait for both servers to be ACTIVE and get the host on which # Now wait for both servers to be ACTIVE and get the host on which
# each server was built. # each server was built.
for server in self.api.get_servers(detail=False): for server in self.api.get_servers(detail=False):
server = self._wait_for_state_change( server = self._wait_for_state_change(server, 'ACTIVE')
self.admin_api, server, 'ACTIVE')
selected_hosts.add(server['OS-EXT-SRV-ATTR:host']) selected_hosts.add(server['OS-EXT-SRV-ATTR:host'])
# Assert that each server is on a separate host. # Assert that each server is on a separate host.

View File

@ -76,7 +76,7 @@ class TestRescheduleWithVolumesAttached(
server_response = self.api.post_server({'server': server_request}) server_response = self.api.post_server({'server': server_request})
server_id = server_response['id'] server_id = server_response['id']
self._wait_for_state_change(self.api, server_response, 'ACTIVE') self._wait_for_state_change(server_response, 'ACTIVE')
attached_volume_ids = self.cinder.volume_ids_for_instance(server_id) attached_volume_ids = self.cinder.volume_ids_for_instance(server_id)
self.assertIn(volume_id, attached_volume_ids) self.assertIn(volume_id, attached_volume_ids)
self.assertEqual(1, len(self.cinder.volume_to_attachment)) self.assertEqual(1, len(self.cinder.volume_to_attachment))

View File

@ -57,8 +57,7 @@ class TestEvacuateDeleteServerRestartOriginalCompute(
server['id'], post) server['id'], post)
expected_params = {'OS-EXT-SRV-ATTR:host': dest_hostname, expected_params = {'OS-EXT-SRV-ATTR:host': dest_hostname,
'status': 'ACTIVE'} 'status': 'ACTIVE'}
server = self._wait_for_server_parameter(self.api, server, server = self._wait_for_server_parameter(server, expected_params)
expected_params)
# Expect to have allocation and usages on both computes as the # Expect to have allocation and usages on both computes as the
# source compute is still down # source compute is still down

View File

@ -65,31 +65,30 @@ class ColdMigrateTargetHostThenLiveMigrateTest(
def test_cold_migrate_target_host_then_live_migrate(self): def test_cold_migrate_target_host_then_live_migrate(self):
# Create a server, it doesn't matter on which host it builds. # Create a server, it doesn't matter on which host it builds.
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_cold_migrate_target_host_then_live_migrate', 'test_cold_migrate_target_host_then_live_migrate',
image_uuid=image_fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, image_uuid=image_fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none') networks='none')
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
original_host = server['OS-EXT-SRV-ATTR:host'] original_host = server['OS-EXT-SRV-ATTR:host']
target_host = 'host1' if original_host == 'host2' else 'host2' target_host = 'host1' if original_host == 'host2' else 'host2'
# Cold migrate the server to the specific target host. # Cold migrate the server to the specific target host.
migrate_req = {'migrate': {'host': target_host}} migrate_req = {'migrate': {'host': target_host}}
self.admin_api.post_server_action(server['id'], migrate_req) self.admin_api.post_server_action(server['id'], migrate_req)
server = self._wait_for_state_change( server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.admin_api, server, 'VERIFY_RESIZE')
# Confirm the resize so the server stays on the target host. # Confirm the resize so the server stays on the target host.
confim_req = {'confirmResize': None} confim_req = {'confirmResize': None}
self.admin_api.post_server_action(server['id'], confim_req) self.admin_api.post_server_action(server['id'], confim_req)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# Attempt to live migrate the server but don't specify a host so the # Attempt to live migrate the server but don't specify a host so the
# scheduler has to pick one. # scheduler has to pick one.
live_migrate_req = { live_migrate_req = {
'os-migrateLive': {'host': None, 'block_migration': 'auto'}} 'os-migrateLive': {'host': None, 'block_migration': 'auto'}}
self.admin_api.post_server_action(server['id'], live_migrate_req) self.admin_api.post_server_action(server['id'], live_migrate_req)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# The live migration should have been successful and the server is now # The live migration should have been successful and the server is now
# back on the original host. # back on the original host.
self.assertEqual(original_host, server['OS-EXT-SRV-ATTR:host']) self.assertEqual(original_host, server['OS-EXT-SRV-ATTR:host'])

View File

@ -103,7 +103,7 @@ class BootFromVolumeOverQuotaRaceDeleteTest(
stub_check_num_instances_quota) stub_check_num_instances_quota)
server = self.api.post_server(server) server = self.api.post_server(server)
server = self._wait_for_state_change(self.api, server, 'ERROR') server = self._wait_for_state_change(server, 'ERROR')
# At this point, the build request should be gone and the instance # At this point, the build request should be gone and the instance
# should have been created in cell1. # should have been created in cell1.
context = nova_context.get_admin_context() context = nova_context.get_admin_context()

View File

@ -57,7 +57,7 @@ class ShowErrorServerWithTags(test.TestCase,
'imageRef': self.image_id 'imageRef': self.image_id
} }
}) })
return self._wait_for_state_change(self.api, server, 'ERROR') return self._wait_for_state_change(server, 'ERROR')
def test_show_server_tag_in_error(self): def test_show_server_tag_in_error(self):
# Create a server which should go to ERROR state because we don't # Create a server which should go to ERROR state because we don't

View File

@ -82,11 +82,11 @@ class NonPersistentFieldNotResetTest(
def _create_server(self): def _create_server(self):
# Create a server, it doesn't matter on which host it builds. # Create a server, it doesn't matter on which host it builds.
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'sample-server', 'sample-server',
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none') networks='none')
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
return server return server
@ -156,8 +156,7 @@ class NonPersistentFieldNotResetTest(
server['id'], {'evacuate': {'host': target_host}}) server['id'], {'evacuate': {'host': target_host}})
expected_params = {'OS-EXT-SRV-ATTR:host': original_host, expected_params = {'OS-EXT-SRV-ATTR:host': original_host,
'status': 'ERROR'} 'status': 'ERROR'}
server = self._wait_for_server_parameter(self.api, server, server = self._wait_for_server_parameter(server, expected_params)
expected_params)
# Make sure 'is_bfv' is set. # Make sure 'is_bfv' is set.
reqspec = objects.RequestSpec.get_by_instance_uuid(self.ctxt, reqspec = objects.RequestSpec.get_by_instance_uuid(self.ctxt,

View File

@ -59,7 +59,7 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase,
# weight. # weight.
server = self._build_server(self.api.get_flavors()[0]['id']) server = self._build_server(self.api.get_flavors()[0]['id'])
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host']) self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])
# Disable the host on which the server is now running. # Disable the host on which the server is now running.
@ -72,5 +72,5 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase,
req = {'evacuate': {'onSharedStorage': False}} req = {'evacuate': {'onSharedStorage': False}}
self.api.post_server_action(server['id'], req) self.api.post_server_action(server['id'], req)
self._wait_for_migration_status(server, ['done']) self._wait_for_migration_status(server, ['done'])
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host']) self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host'])

View File

@ -67,14 +67,14 @@ class VolumeBackedResizeDiskDown(test.TestCase,
}] }]
} }
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# Now try to resize the server with the flavor that has smaller disk. # Now try to resize the server with the flavor that has smaller disk.
# This should be allowed since the server is volume-backed and the # This should be allowed since the server is volume-backed and the
# disk size in the flavor shouldn't matter. # disk size in the flavor shouldn't matter.
data = {'resize': {'flavorRef': flavor1['id']}} data = {'resize': {'flavorRef': flavor1['id']}}
self.api.post_server_action(server['id'], data) self.api.post_server_action(server['id'], data)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') self._wait_for_state_change(server, 'VERIFY_RESIZE')
# Now confirm the resize just to complete the operation. # Now confirm the resize just to complete the operation.
self.api.post_server_action(server['id'], {'confirmResize': None}) self.api.post_server_action(server['id'], {'confirmResize': None})
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')

View File

@ -57,7 +57,7 @@ class FillVirtualInterfaceListMigration(
'imageRef': fake_image.get_valid_image_id() 'imageRef': fake_image.get_valid_image_id()
} }
}) })
return self._wait_for_state_change(self.api, server, 'ACTIVE') return self._wait_for_state_change(server, 'ACTIVE')
def test_fill_vifs_migration(self): def test_fill_vifs_migration(self):
# Create a test server. # Create a test server.

View File

@ -40,7 +40,7 @@ class FinishResizeErrorAllocationCleanupTestCase(
# to avoid a race we need to wait for the migration status to change # to avoid a race we need to wait for the migration status to change
# to 'error' which happens after the fault is recorded. # to 'error' which happens after the fault is recorded.
self._wait_for_migration_status(server, ['error']) self._wait_for_migration_status(server, ['error'])
server = self._wait_for_state_change(self.admin_api, server, 'ERROR') server = self._wait_for_state_change(server, 'ERROR')
# The server should be pointing at $dest_host because resize_instance # The server should be pointing at $dest_host because resize_instance
# will have updated the host/node value on the instance before casting # will have updated the host/node value on the instance before casting
# to the finish_resize method on the dest compute. # to the finish_resize method on the dest compute.

View File

@ -87,11 +87,11 @@ class MissingReqSpecInstanceGroupUUIDTestCase(
# Create a server in the group which should land on host1 due to our # Create a server in the group which should land on host1 due to our
# custom weigher. # custom weigher.
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_cold_migrate_reschedule') 'test_cold_migrate_reschedule')
body = dict(server=server) body = dict(server=server)
body['os:scheduler_hints'] = {'group': group_id} body['os:scheduler_hints'] = {'group': group_id}
server = self.api.post_server(body) server = self.api.post_server(body)
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host']) self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])
# Verify the group uuid is set in the request spec. # Verify the group uuid is set in the request spec.
@ -129,8 +129,7 @@ class MissingReqSpecInstanceGroupUUIDTestCase(
with mock.patch.dict(host1_driver.capabilities, with mock.patch.dict(host1_driver.capabilities,
supports_migrate_to_same_host=False): supports_migrate_to_same_host=False):
self.api.post_server_action(server['id'], {'migrate': None}) self.api.post_server_action(server['id'], {'migrate': None})
server = self._wait_for_state_change( server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.api, server, 'VERIFY_RESIZE')
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host']) self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
# The RequestSpec.instance_group.uuid should still be set. # The RequestSpec.instance_group.uuid should still be set.

View File

@ -57,30 +57,29 @@ class RegressionTest1835822(
if server_args: if server_args:
basic_server.update(server_args) basic_server.update(server_args)
server = self.api.post_server({'server': basic_server}) server = self.api.post_server({'server': basic_server})
return self._wait_for_state_change(self.api, server, 'ACTIVE') return self._wait_for_state_change(server, 'ACTIVE')
def _hard_reboot_server(self, active_server): def _hard_reboot_server(self, active_server):
args = {"reboot": {"type": "HARD"}} args = {"reboot": {"type": "HARD"}}
self.api.api_post('servers/%s/action' % self.api.api_post('servers/%s/action' %
active_server['id'], args) active_server['id'], args)
fake_notifier.wait_for_versioned_notifications('instance.reboot.end') fake_notifier.wait_for_versioned_notifications('instance.reboot.end')
return self._wait_for_state_change(self.api, active_server, 'ACTIVE') return self._wait_for_state_change(active_server, 'ACTIVE')
def _rebuild_server(self, active_server): def _rebuild_server(self, active_server):
args = {"rebuild": {"imageRef": self.image_ref_1}} args = {"rebuild": {"imageRef": self.image_ref_1}}
self.api.api_post('servers/%s/action' % self.api.api_post('servers/%s/action' %
active_server['id'], args) active_server['id'], args)
fake_notifier.wait_for_versioned_notifications('instance.rebuild.end') fake_notifier.wait_for_versioned_notifications('instance.rebuild.end')
return self._wait_for_state_change(self.api, active_server, 'ACTIVE') return self._wait_for_state_change(active_server, 'ACTIVE')
def _shelve_server(self, active_server): def _shelve_server(self, active_server):
self.api.post_server_action(active_server['id'], {'shelve': {}}) self.api.post_server_action(active_server['id'], {'shelve': {}})
return self._wait_for_state_change( return self._wait_for_state_change(active_server, 'SHELVED_OFFLOADED')
self.api, active_server, 'SHELVED_OFFLOADED')
def _unshelve_server(self, shelved_server): def _unshelve_server(self, shelved_server):
self.api.post_server_action(shelved_server['id'], {'unshelve': {}}) self.api.post_server_action(shelved_server['id'], {'unshelve': {}})
return self._wait_for_state_change(self.api, shelved_server, 'ACTIVE') return self._wait_for_state_change(shelved_server, 'ACTIVE')
# ---------------------------- tests ---------------------------- # ---------------------------- tests ----------------------------
def test_create_server_with_config_drive(self): def test_create_server_with_config_drive(self):

View File

@ -80,11 +80,11 @@ class BuildRescheduleClaimFailsTestCase(
# Now that our stub is in place, try to create a server and wait for it # Now that our stub is in place, try to create a server and wait for it
# to go to ERROR status. # to go to ERROR status.
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_build_reschedule_alt_host_alloc_fails', 'test_build_reschedule_alt_host_alloc_fails',
image_uuid=fake_image.get_valid_image_id(), image_uuid=fake_image.get_valid_image_id(),
networks=[{'port': self.neutron.port_1['id']}]) networks=[{'port': self.neutron.port_1['id']}])
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ERROR') server = self._wait_for_state_change(server, 'ERROR')
# Wait for the MaxRetriesExceeded fault to be recorded. # Wait for the MaxRetriesExceeded fault to be recorded.
# set_vm_state_and_notify sets the vm_state to ERROR before the fault # set_vm_state_and_notify sets the vm_state to ERROR before the fault

View File

@ -43,12 +43,12 @@ class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase):
self.flags(compute=version_cap, group='upgrade_levels') self.flags(compute=version_cap, group='upgrade_levels')
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'server1', 'server1',
networks=[], networks=[],
image_uuid=fake_image.get_valid_image_id(), image_uuid=fake_image.get_valid_image_id(),
flavor_id=self.flavor1['id']) flavor_id=self.flavor1['id'])
server = self.api.post_server({'server': server_req}) server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
orig_claim = nova.compute.resource_tracker.ResourceTracker.resize_claim orig_claim = nova.compute.resource_tracker.ResourceTracker.resize_claim
claim_calls = [] claim_calls = []
@ -76,12 +76,10 @@ class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase):
# We expect that the instance is on host3 as the scheduler # We expect that the instance is on host3 as the scheduler
# selected host2 due to our weigher and the cold migrate failed # selected host2 due to our weigher and the cold migrate failed
# there and re-scheduled to host3 were it succeeded. # there and re-scheduled to host3 were it succeeded.
self._wait_for_server_parameter( self._wait_for_server_parameter(server, {
self.api, server, 'OS-EXT-SRV-ATTR:host': 'host3',
{ 'OS-EXT-STS:task_state': None,
'OS-EXT-SRV-ATTR:host': 'host3', 'status': 'VERIFY_RESIZE'})
'OS-EXT-STS:task_state': None,
'status': 'VERIFY_RESIZE'})
# we ensure that there was a failed and then a successful claim call # we ensure that there was a failed and then a successful claim call
self.assertEqual(['host2', 'host3'], claim_calls) self.assertEqual(['host2', 'host3'], claim_calls)

View File

@ -68,11 +68,9 @@ class ForcedHostMissingReScheduleTestCase(
# We expect that the instance re-scheduled but successfully ended # We expect that the instance re-scheduled but successfully ended
# up on the second destination host. # up on the second destination host.
self._wait_for_server_parameter( self._wait_for_server_parameter(server, {
self.api, server, 'OS-EXT-STS:task_state': None,
{ 'status': 'VERIFY_RESIZE'})
'OS-EXT-STS:task_state': None,
'status': 'VERIFY_RESIZE'})
# we ensure that there was a failed and then a successful claim call # we ensure that there was a failed and then a successful claim call
self.assertEqual(2, len(claim_calls)) self.assertEqual(2, len(claim_calls))

View File

@ -37,10 +37,10 @@ class DeletedServerAllocationRevertTest(
source host and target host. source host and target host.
""" """
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, name, image_uuid=fake_image.get_valid_image_id(), name, image_uuid=fake_image.get_valid_image_id(),
networks='none') networks='none')
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
source_host = server['OS-EXT-SRV-ATTR:host'] source_host = server['OS-EXT-SRV-ATTR:host']
target_host = 'host2' if source_host == 'host1' else 'host1' target_host = 'host2' if source_host == 'host1' else 'host1'
return server, source_host, target_host return server, source_host, target_host
@ -127,7 +127,7 @@ class DeletedServerAllocationRevertTest(
# action event after the task rollback happens. # action event after the task rollback happens.
self._wait_for_action_fail_completion( self._wait_for_action_fail_completion(
server, instance_actions.LIVE_MIGRATION, server, instance_actions.LIVE_MIGRATION,
'conductor_live_migrate_instance', api=self.api) 'conductor_live_migrate_instance')
self._assert_no_allocations(server) self._assert_no_allocations(server)
def test_migrate_on_compute_fail(self): def test_migrate_on_compute_fail(self):
@ -155,6 +155,5 @@ class DeletedServerAllocationRevertTest(
# when the instance is deleted so just wait for the failed instance # when the instance is deleted so just wait for the failed instance
# action event after the allocation revert happens. # action event after the allocation revert happens.
self._wait_for_action_fail_completion( self._wait_for_action_fail_completion(
server, instance_actions.MIGRATE, 'compute_prep_resize', server, instance_actions.MIGRATE, 'compute_prep_resize')
api=self.api)
self._assert_no_allocations(server) self._assert_no_allocations(server)

View File

@ -52,11 +52,9 @@ class UpdateResourceMigrationRaceTest(
server['id'], server['id'],
{'os-migrateLive': {'host': None, 'block_migration': 'auto'}}) {'os-migrateLive': {'host': None, 'block_migration': 'auto'}})
self._wait_for_server_parameter( self._wait_for_server_parameter(server, {
self.api, server, 'OS-EXT-STS:task_state': None,
{ 'status': 'ACTIVE'})
'OS-EXT-STS:task_state': None,
'status': 'ACTIVE'})
# NOTE(efried): This was bug 1849165 where # NOTE(efried): This was bug 1849165 where
# _populate_assigned_resources raised a TypeError because it tried # _populate_assigned_resources raised a TypeError because it tried

View File

@ -42,10 +42,10 @@ class ListDeletedServersWithMarker(test.TestCase,
def test_list_deleted_servers_with_marker(self): def test_list_deleted_servers_with_marker(self):
# Create a server. # Create a server.
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_list_deleted_servers_with_marker', 'test_list_deleted_servers_with_marker',
image_uuid=fake_image.get_valid_image_id()) image_uuid=fake_image.get_valid_image_id())
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# Now delete the server and wait for it to be gone. # Now delete the server and wait for it to be gone.
self.api.delete_server(server['id']) self.api.delete_server(server['id'])
self._wait_until_deleted(server) self._wait_until_deleted(server)

View File

@ -215,13 +215,12 @@ class AggregateRequestFiltersTest(
flavor_id = flavor_id or self.flavors[0]['id'] flavor_id = flavor_id or self.flavors[0]['id']
image_uuid = image_id or '155d900f-4e14-4e4c-a73d-069cbf4541e6' image_uuid = image_id or '155d900f-4e14-4e4c-a73d-069cbf4541e6'
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'test-instance', flavor_id=flavor_id, 'test-instance', flavor_id=flavor_id,
image_uuid=image_uuid, image_uuid=image_uuid,
networks='none', az=az) networks='none', az=az)
created_server = self.api.post_server({'server': server_req}) created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change( server = self._wait_for_state_change(created_server, end_status)
self.admin_api, created_server, end_status)
return server return server
@ -330,8 +329,7 @@ class AggregatePostTest(AggregateRequestFiltersTest):
# Configure for the SOFT_DELETED scenario. # Configure for the SOFT_DELETED scenario.
self.flags(reclaim_instance_interval=300) self.flags(reclaim_instance_interval=300)
self.api.delete_server(server['id']) self.api.delete_server(server['id'])
server = self._wait_for_state_change( server = self._wait_for_state_change(server, 'SOFT_DELETED')
self.admin_api, server, 'SOFT_DELETED')
self.assertRaisesRegex( self.assertRaisesRegex(
client.OpenStackApiException, client.OpenStackApiException,
'One or more hosts contain instances in this zone.', 'One or more hosts contain instances in this zone.',
@ -876,7 +874,7 @@ class TestAggregateMultiTenancyIsolationFilter(
aggregate aggregate
""" """
# Create a tenant-isolated aggregate for the non-admin user. # Create a tenant-isolated aggregate for the non-admin user.
user_api = self.useFixture( self.api = self.useFixture(
nova_fixtures.OSAPIFixture(api_version='v2.1', nova_fixtures.OSAPIFixture(api_version='v2.1',
project_id=uuids.non_admin)).api project_id=uuids.non_admin)).api
agg_id = self.admin_api.post_aggregate( agg_id = self.admin_api.post_aggregate(
@ -901,13 +899,12 @@ class TestAggregateMultiTenancyIsolationFilter(
spy_get_filtered_hosts) spy_get_filtered_hosts)
# Create a server for the admin - should only have one host candidate. # Create a server for the admin - should only have one host candidate.
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.admin_api,
'test_aggregate_multitenancy_isolation_filter-admin', 'test_aggregate_multitenancy_isolation_filter-admin',
networks='none') # requires microversion 2.37 networks='none') # requires microversion 2.37
server_req = {'server': server_req} server_req = {'server': server_req}
with utils.temporary_mutation(self.admin_api, microversion='2.37'): with utils.temporary_mutation(self.admin_api, microversion='2.37'):
server = self.admin_api.post_server(server_req) server = self.admin_api.post_server(server_req)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# Assert it's not on host2 which is isolated to the non-admin tenant. # Assert it's not on host2 which is isolated to the non-admin tenant.
self.assertNotEqual('host2', server['OS-EXT-SRV-ATTR:host']) self.assertNotEqual('host2', server['OS-EXT-SRV-ATTR:host'])
self.assertEqual(1, len(self.filtered_hosts)) self.assertEqual(1, len(self.filtered_hosts))
@ -917,13 +914,12 @@ class TestAggregateMultiTenancyIsolationFilter(
# up on host2 because the other host, which is not isolated to the # up on host2 because the other host, which is not isolated to the
# aggregate, is still a candidate. # aggregate, is still a candidate.
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
user_api,
'test_aggregate_multitenancy_isolation_filter-user', 'test_aggregate_multitenancy_isolation_filter-user',
networks='none') # requires microversion 2.37 networks='none') # requires microversion 2.37
server_req = {'server': server_req} server_req = {'server': server_req}
with utils.temporary_mutation(user_api, microversion='2.37'): with utils.temporary_mutation(self.api, microversion='2.37'):
server = user_api.post_server(server_req) server = self.api.post_server(server_req)
self._wait_for_state_change(user_api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(2, len(self.filtered_hosts)) self.assertEqual(2, len(self.filtered_hosts))
@ -1028,10 +1024,10 @@ class AggregateMultiTenancyIsolationColdMigrateTest(
""" """
img = nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID img = nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID
server_req_body = self._build_minimal_create_server_request( server_req_body = self._build_minimal_create_server_request(
self.api, 'test_cold_migrate_server', image_uuid=img, 'test_cold_migrate_server', image_uuid=img,
networks='none') networks='none')
server = self.api.post_server({'server': server_req_body}) server = self.api.post_server({'server': server_req_body})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# Ensure the server ended up in host2 or host3 # Ensure the server ended up in host2 or host3
original_host = server['OS-EXT-SRV-ATTR:host'] original_host = server['OS-EXT-SRV-ATTR:host']
self.assertNotEqual('host1', original_host) self.assertNotEqual('host1', original_host)
@ -1039,8 +1035,7 @@ class AggregateMultiTenancyIsolationColdMigrateTest(
# in the same tenant-isolated aggregate. # in the same tenant-isolated aggregate.
self.admin_api.api_post( self.admin_api.api_post(
'/servers/%s/action' % server['id'], {'migrate': None}) '/servers/%s/action' % server['id'], {'migrate': None})
server = self._wait_for_state_change( server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.admin_api, server, 'VERIFY_RESIZE')
# Ensure the server is on the other host in the same aggregate. # Ensure the server is on the other host in the same aggregate.
expected_host = 'host3' if original_host == 'host2' else 'host2' expected_host = 'host3' if original_host == 'host2' else 'host2'
self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host']) self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])

View File

@ -74,10 +74,10 @@ class TestAvailabilityZoneScheduling(
def _create_server(self, name): def _create_server(self, name):
# Create a server, it doesn't matter which host it ends up in. # Create a server, it doesn't matter which host it ends up in.
server_body = self._build_minimal_create_server_request( server_body = self._build_minimal_create_server_request(
self.api, name, image_uuid=fake_image.get_valid_image_id(), name, image_uuid=fake_image.get_valid_image_id(),
flavor_id=self.flavor1, networks='none') flavor_id=self.flavor1, networks='none')
server = self.api.post_server({'server': server_body}) server = self.api.post_server({'server': server_body})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
original_host = server['OS-EXT-SRV-ATTR:host'] original_host = server['OS-EXT-SRV-ATTR:host']
# Assert the server has the AZ set (not None or 'nova'). # Assert the server has the AZ set (not None or 'nova').
expected_zone = 'zone1' if original_host == 'host1' else 'zone2' expected_zone = 'zone1' if original_host == 'host1' else 'zone2'
@ -153,7 +153,7 @@ class TestAvailabilityZoneScheduling(
# Resize the server which should move it to the other zone. # Resize the server which should move it to the other zone.
self.api.post_server_action( self.api.post_server_action(
server['id'], {'resize': {'flavorRef': self.flavor2}}) server['id'], {'resize': {'flavorRef': self.flavor2}})
server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
# Now the server should be in the other AZ. # Now the server should be in the other AZ.
new_zone = 'zone2' if original_host == 'host1' else 'zone1' new_zone = 'zone2' if original_host == 'host1' else 'zone1'
@ -161,5 +161,5 @@ class TestAvailabilityZoneScheduling(
# Revert the resize and the server should be back in the original AZ. # Revert the resize and the server should be back in the original AZ.
self.api.post_server_action(server['id'], {'revertResize': None}) self.api.post_server_action(server['id'], {'revertResize': None})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self._assert_instance_az(server, original_az) self._assert_instance_az(server, original_az)

View File

@ -79,7 +79,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
server['block_device_mapping_v2'] = [bdm] server['block_device_mapping_v2'] = [bdm]
created_server = self.api.post_server({"server": server}) created_server = self.api.post_server({"server": server})
server_id = created_server['id'] server_id = created_server['id']
self._wait_for_state_change(self.api, created_server, 'ACTIVE') self._wait_for_state_change(created_server, 'ACTIVE')
# Check that hypervisor local disk reporting is still 0 # Check that hypervisor local disk reporting is still 0
self._verify_zero_local_gb_used() self._verify_zero_local_gb_used()
@ -94,7 +94,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
# Resize # Resize
post_data = {'resize': {'flavorRef': flavor_id_alt}} post_data = {'resize': {'flavorRef': flavor_id_alt}}
self.api.post_server_action(server_id, post_data) self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(self.api, created_server, 'VERIFY_RESIZE') self._wait_for_state_change(created_server, 'VERIFY_RESIZE')
# Check that hypervisor local disk reporting is still 0 # Check that hypervisor local disk reporting is still 0
self._verify_zero_local_gb_used() self._verify_zero_local_gb_used()
@ -106,7 +106,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
# Confirm the resize # Confirm the resize
post_data = {'confirmResize': None} post_data = {'confirmResize': None}
self.api.post_server_action(server_id, post_data) self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(self.api, created_server, 'ACTIVE') self._wait_for_state_change(created_server, 'ACTIVE')
# Check that hypervisor local disk reporting is still 0 # Check that hypervisor local disk reporting is still 0
self._verify_zero_local_gb_used() self._verify_zero_local_gb_used()
@ -118,7 +118,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
# Shelve # Shelve
post_data = {'shelve': None} post_data = {'shelve': None}
self.api.post_server_action(server_id, post_data) self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(self.api, created_server, self._wait_for_state_change(created_server,
'SHELVED_OFFLOADED') 'SHELVED_OFFLOADED')
# Check that hypervisor local disk reporting is still 0 # Check that hypervisor local disk reporting is still 0
@ -131,7 +131,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
# Unshelve # Unshelve
post_data = {'unshelve': None} post_data = {'unshelve': None}
self.api.post_server_action(server_id, post_data) self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(self.api, created_server, 'ACTIVE') self._wait_for_state_change(created_server, 'ACTIVE')
# Check that hypervisor local disk reporting is still 0 # Check that hypervisor local disk reporting is still 0
self._verify_zero_local_gb_used() self._verify_zero_local_gb_used()
@ -146,7 +146,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6' image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
post_data = {'rebuild': {'imageRef': image_uuid}} post_data = {'rebuild': {'imageRef': image_uuid}}
self.api.post_server_action(server_id, post_data) self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(self.api, created_server, 'ACTIVE') self._wait_for_state_change(created_server, 'ACTIVE')
# Check that hypervisor local disk reporting is still 0 # Check that hypervisor local disk reporting is still 0
self._verify_zero_local_gb_used() self._verify_zero_local_gb_used()
@ -161,7 +161,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
""" """
self.flags(max_local_block_devices=0) self.flags(max_local_block_devices=0)
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.admin_api, 'test_max_local_block_devices_0_force_bfv') 'test_max_local_block_devices_0_force_bfv')
ex = self.assertRaises(api_client.OpenStackApiException, ex = self.assertRaises(api_client.OpenStackApiException,
self.admin_api.post_server, self.admin_api.post_server,
{'server': server}) {'server': server})
@ -203,7 +203,7 @@ class BootFromVolumeLargeRequestTest(test.TestCase,
image1 = 'a2459075-d96c-40d5-893e-577ff92e721c' image1 = 'a2459075-d96c-40d5-893e-577ff92e721c'
image2 = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' image2 = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_boot_from_volume_10_servers_255_volumes_2_images') 'test_boot_from_volume_10_servers_255_volumes_2_images')
server.pop('imageRef') server.pop('imageRef')
server['min_count'] = 10 server['min_count'] = 10
bdms = [] bdms = []

View File

@ -51,7 +51,7 @@ class ConfigurableMaxDiskDevicesTest(integrated_helpers.InstanceHelperMixin,
'destination_type': 'volume'} 'destination_type': 'volume'}
server['block_device_mapping_v2'] = [bdm] server['block_device_mapping_v2'] = [bdm]
created_server = self.api.post_server({"server": server}) created_server = self.api.post_server({"server": server})
self._wait_for_state_change(self.api, created_server, 'ACTIVE') self._wait_for_state_change(created_server, 'ACTIVE')
def test_boot_from_volume_plus_attach_max_exceeded(self): def test_boot_from_volume_plus_attach_max_exceeded(self):
# Set the maximum to 1, boot from 1 volume, and attach one volume. # Set the maximum to 1, boot from 1 volume, and attach one volume.
@ -72,7 +72,7 @@ class ConfigurableMaxDiskDevicesTest(integrated_helpers.InstanceHelperMixin,
created_server = self.api.post_server({"server": server}) created_server = self.api.post_server({"server": server})
server_id = created_server['id'] server_id = created_server['id']
# Server should go into ERROR state # Server should go into ERROR state
self._wait_for_state_change(self.api, created_server, 'ERROR') self._wait_for_state_change(created_server, 'ERROR')
# Verify the instance fault # Verify the instance fault
server = self.api.get_server(server_id) server = self.api.get_server(server_id)
# If anything fails during _prep_block_device, a 500 internal server # If anything fails during _prep_block_device, a 500 internal server
@ -95,7 +95,7 @@ class ConfigurableMaxDiskDevicesTest(integrated_helpers.InstanceHelperMixin,
server = self._build_server(flavor_id='1') server = self._build_server(flavor_id='1')
created_server = self.api.post_server({"server": server}) created_server = self.api.post_server({"server": server})
server_id = created_server['id'] server_id = created_server['id']
self._wait_for_state_change(self.api, created_server, 'ACTIVE') self._wait_for_state_change(created_server, 'ACTIVE')
# Attach one volume, should pass. # Attach one volume, should pass.
vol_id = '9a695496-44aa-4404-b2cc-ccab2501f87e' vol_id = '9a695496-44aa-4404-b2cc-ccab2501f87e'
self.api.post_server_volume( self.api.post_server_volume(

View File

@ -60,7 +60,6 @@ class CrossAZAttachTestCase(test.TestCase,
""" """
self.flags(cross_az_attach=False, group='cinder') self.flags(cross_az_attach=False, group='cinder')
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api,
'test_cross_az_attach_false_boot_from_volume_no_az_specified') 'test_cross_az_attach_false_boot_from_volume_no_az_specified')
del server['imageRef'] # Do not need imageRef for boot from volume. del server['imageRef'] # Do not need imageRef for boot from volume.
server['block_device_mapping_v2'] = [{ server['block_device_mapping_v2'] = [{
@ -70,7 +69,7 @@ class CrossAZAttachTestCase(test.TestCase,
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL 'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
}] }]
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone']) self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])
def test_cross_az_attach_false_data_volume_no_az_specified(self): def test_cross_az_attach_false_data_volume_no_az_specified(self):
@ -82,7 +81,6 @@ class CrossAZAttachTestCase(test.TestCase,
""" """
self.flags(cross_az_attach=False, group='cinder') self.flags(cross_az_attach=False, group='cinder')
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api,
'test_cross_az_attach_false_data_volume_no_az_specified') 'test_cross_az_attach_false_data_volume_no_az_specified')
# Note that we use the legacy block_device_mapping parameter rather # Note that we use the legacy block_device_mapping parameter rather
# than block_device_mapping_v2 because that will create an implicit # than block_device_mapping_v2 because that will create an implicit
@ -95,7 +93,7 @@ class CrossAZAttachTestCase(test.TestCase,
'volume_id': nova_fixtures.CinderFixture.SWAP_OLD_VOL 'volume_id': nova_fixtures.CinderFixture.SWAP_OLD_VOL
}] }]
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone']) self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])
def test_cross_az_attach_false_boot_from_volume_default_zone_match(self): def test_cross_az_attach_false_boot_from_volume_default_zone_match(self):
@ -106,7 +104,6 @@ class CrossAZAttachTestCase(test.TestCase,
self.flags(cross_az_attach=False, group='cinder') self.flags(cross_az_attach=False, group='cinder')
self.flags(default_schedule_zone=self.az) self.flags(default_schedule_zone=self.az)
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api,
'test_cross_az_attach_false_boot_from_volume_default_zone_match') 'test_cross_az_attach_false_boot_from_volume_default_zone_match')
del server['imageRef'] # Do not need imageRef for boot from volume. del server['imageRef'] # Do not need imageRef for boot from volume.
server['block_device_mapping_v2'] = [{ server['block_device_mapping_v2'] = [{
@ -116,7 +113,7 @@ class CrossAZAttachTestCase(test.TestCase,
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL 'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
}] }]
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone']) self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])
def test_cross_az_attach_false_bfv_az_specified_mismatch(self): def test_cross_az_attach_false_bfv_az_specified_mismatch(self):
@ -126,7 +123,7 @@ class CrossAZAttachTestCase(test.TestCase,
""" """
self.flags(cross_az_attach=False, group='cinder') self.flags(cross_az_attach=False, group='cinder')
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_cross_az_attach_false_bfv_az_specified_mismatch', 'test_cross_az_attach_false_bfv_az_specified_mismatch',
az='london') az='london')
del server['imageRef'] # Do not need imageRef for boot from volume. del server['imageRef'] # Do not need imageRef for boot from volume.
server['block_device_mapping_v2'] = [{ server['block_device_mapping_v2'] = [{
@ -150,7 +147,7 @@ class CrossAZAttachTestCase(test.TestCase,
""" """
self.flags(cross_az_attach=False, group='cinder') self.flags(cross_az_attach=False, group='cinder')
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_cross_az_attach_false_no_volumes', az=self.az) 'test_cross_az_attach_false_no_volumes', az=self.az)
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone']) self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])

View File

@ -127,7 +127,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
}] }]
image_uuid = fake_image.get_valid_image_id() image_uuid = fake_image.get_valid_image_id()
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_cross_cell_resize', 'test_cross_cell_resize',
image_uuid=image_uuid, image_uuid=image_uuid,
flavor_id=flavor['id'], flavor_id=flavor['id'],
networks=networks) networks=networks)
@ -146,7 +146,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
server.pop('imageRef', None) server.pop('imageRef', None)
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# For volume-backed make sure there is one attachment to start. # For volume-backed make sure there is one attachment to start.
if volume_backed: if volume_backed:
self.assertEqual(1, self._count_volume_attachments(server['id']), self.assertEqual(1, self._count_volume_attachments(server['id']),
@ -200,7 +200,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
if stopped: if stopped:
# Stop the server before resizing it. # Stop the server before resizing it.
self.api.post_server_action(server['id'], {'os-stop': None}) self.api.post_server_action(server['id'], {'os-stop': None})
self._wait_for_state_change(self.api, server, 'SHUTOFF') self._wait_for_state_change(server, 'SHUTOFF')
# Before resizing make sure quota usage is only 1 for total instances. # Before resizing make sure quota usage is only 1 for total instances.
self.assert_quota_usage(expected_num_instances=1) self.assert_quota_usage(expected_num_instances=1)
@ -222,7 +222,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
self.api.post_server_action(server['id'], body) self.api.post_server_action(server['id'], body)
# Wait for the server to be resized and then verify the host has # Wait for the server to be resized and then verify the host has
# changed to be the host in the other cell. # changed to be the host in the other cell.
server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host']) self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])
# Assert that the instance is only listed one time from the API (to # Assert that the instance is only listed one time from the API (to
# make sure it's not listed out of both cells). # make sure it's not listed out of both cells).
@ -487,8 +487,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
# The server should go to ERROR state with a fault record and # The server should go to ERROR state with a fault record and
# the API should still be showing the server from the source cell # the API should still be showing the server from the source cell
# because the instance mapping was not updated. # because the instance mapping was not updated.
server = self._wait_for_server_parameter( server = self._wait_for_server_parameter(server,
self.admin_api, server,
{'status': 'ERROR', 'OS-EXT-STS:task_state': None}) {'status': 'ERROR', 'OS-EXT-STS:task_state': None})
# The migration should be in 'error' status. # The migration should be in 'error' status.
@ -511,12 +510,12 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
# Now hard reboot the server in the source cell and it should go back # Now hard reboot the server in the source cell and it should go back
# to ACTIVE. # to ACTIVE.
self.api.post_server_action(server['id'], {'reboot': {'type': 'HARD'}}) self.api.post_server_action(server['id'], {'reboot': {'type': 'HARD'}})
self._wait_for_state_change(self.admin_api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# Now retry the resize without the fault in the target host to make # Now retry the resize without the fault in the target host to make
# sure things are OK (no duplicate entry errors in the target DB). # sure things are OK (no duplicate entry errors in the target DB).
self.api.post_server_action(server['id'], body) self.api.post_server_action(server['id'], body)
self._wait_for_state_change(self.admin_api, server, 'VERIFY_RESIZE') self._wait_for_state_change(server, 'VERIFY_RESIZE')
def _assert_instance_not_in_cell(self, cell_name, server_id): def _assert_instance_not_in_cell(self, cell_name, server_id):
cell = self.cell_mappings[cell_name] cell = self.cell_mappings[cell_name]
@ -567,8 +566,7 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
# The server should go to ERROR state with a fault record and # The server should go to ERROR state with a fault record and
# the API should still be showing the server from the source cell # the API should still be showing the server from the source cell
# because the instance mapping was not updated. # because the instance mapping was not updated.
server = self._wait_for_server_parameter( server = self._wait_for_server_parameter(server,
self.admin_api, server,
{'status': 'ERROR', 'OS-EXT-STS:task_state': None}) {'status': 'ERROR', 'OS-EXT-STS:task_state': None})
# The migration should be in 'error' status. # The migration should be in 'error' status.
@ -585,9 +583,9 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
# Now hard reboot the server in the source cell and it should go back # Now hard reboot the server in the source cell and it should go back
# to ACTIVE. # to ACTIVE.
self.api.post_server_action(server['id'], {'reboot': {'type': 'HARD'}}) self.api.post_server_action(server['id'], {'reboot': {'type': 'HARD'}})
self._wait_for_state_change(self.admin_api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# Now retry the resize without the fault in the target host to make # Now retry the resize without the fault in the target host to make
# sure things are OK (no duplicate entry errors in the target DB). # sure things are OK (no duplicate entry errors in the target DB).
self.api.post_server_action(server['id'], body) self.api.post_server_action(server['id'], body)
self._wait_for_state_change(self.admin_api, server, 'VERIFY_RESIZE') self._wait_for_state_change(server, 'VERIFY_RESIZE')

View File

@ -60,10 +60,10 @@ class JsonFilterTestCase(integrated_helpers.ProviderUsageBaseTestCase):
# custom HostNameWeigher, host1 would be chosen. # custom HostNameWeigher, host1 would be chosen.
query = jsonutils.dumps(['=', '$hypervisor_hostname', 'host2']) query = jsonutils.dumps(['=', '$hypervisor_hostname', 'host2'])
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_filter_on_hypervisor_hostname') 'test_filter_on_hypervisor_hostname')
request = {'server': server, 'os:scheduler_hints': {'query': query}} request = {'server': server, 'os:scheduler_hints': {'query': query}}
server = self.api.post_server(request) server = self.api.post_server(request)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# Since we request host2 the server should be there despite host1 being # Since we request host2 the server should be there despite host1 being
# weighed higher. # weighed higher.
self.assertEqual( self.assertEqual(

View File

@ -51,7 +51,7 @@ class TestMultiattachVolumes(integrated_helpers._IntegratedTestBase,
'boot_index': 0 'boot_index': 0
}] }]
server = self.api.post_server({'server': create_req}) server = self.api.post_server({'server': create_req})
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# Make sure the volume is attached to the first server. # Make sure the volume is attached to the first server.
attachments = self.api.api_get( attachments = self.api.api_get(
'/servers/%s/os-volume_attachments' % server['id']).body[ '/servers/%s/os-volume_attachments' % server['id']).body[
@ -65,7 +65,7 @@ class TestMultiattachVolumes(integrated_helpers._IntegratedTestBase,
flavor_id='1', image='155d900f-4e14-4e4c-a73d-069cbf4541e6') flavor_id='1', image='155d900f-4e14-4e4c-a73d-069cbf4541e6')
create_req['networks'] = 'none' create_req['networks'] = 'none'
server2 = self.api.post_server({'server': create_req}) server2 = self.api.post_server({'server': create_req})
self._wait_for_state_change(self.api, server2, 'ACTIVE') self._wait_for_state_change(server2, 'ACTIVE')
# Attach the volume to the second server. # Attach the volume to the second server.
self.api.api_post('/servers/%s/os-volume_attachments' % server2['id'], self.api.api_post('/servers/%s/os-volume_attachments' % server2['id'],
{'volumeAttachment': {'volumeId': volume_id}}) {'volumeAttachment': {'volumeId': volume_id}})

View File

@ -414,7 +414,7 @@ class TestNovaManagePlacementHealAllocations(
provider uuid provider uuid
""" """
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=flavor['id'], 'some-server', flavor_id=flavor['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none') networks='none')
server_req['availability_zone'] = 'nova:%s' % hostname server_req['availability_zone'] = 'nova:%s' % hostname
@ -428,8 +428,7 @@ class TestNovaManagePlacementHealAllocations(
}] }]
server_req['imageRef'] = '' server_req['imageRef'] = ''
created_server = self.api.post_server({'server': server_req}) created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change( server = self._wait_for_state_change(created_server, 'ACTIVE')
self.admin_api, created_server, 'ACTIVE')
# Verify that our source host is what the server ended up on # Verify that our source host is what the server ended up on
self.assertEqual(hostname, server['OS-EXT-SRV-ATTR:host']) self.assertEqual(hostname, server['OS-EXT-SRV-ATTR:host'])
@ -564,8 +563,7 @@ class TestNovaManagePlacementHealAllocations(
# The server status goes to SHELVED_OFFLOADED before the host/node # The server status goes to SHELVED_OFFLOADED before the host/node
# is nulled out in the compute service, so we also have to wait for # is nulled out in the compute service, so we also have to wait for
# that so we don't race when we run heal_allocations. # that so we don't race when we run heal_allocations.
server = self._wait_for_server_parameter( server = self._wait_for_server_parameter(server,
self.admin_api, server,
{'OS-EXT-SRV-ATTR:host': None, 'status': 'SHELVED_OFFLOADED'}) {'OS-EXT-SRV-ATTR:host': None, 'status': 'SHELVED_OFFLOADED'})
result = self.cli.heal_allocations(verbose=True) result = self.cli.heal_allocations(verbose=True)
self.assertEqual(4, result, self.output.getvalue()) self.assertEqual(4, result, self.output.getvalue())
@ -788,7 +786,7 @@ class TestNovaManagePlacementHealPortAllocations(
server = self._create_server( server = self._create_server(
flavor=self.flavor, flavor=self.flavor,
networks=[{'port': port['id']} for port in ports]) networks=[{'port': port['id']} for port in ports])
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# This is a hack to simulate that we have a server that is missing # This is a hack to simulate that we have a server that is missing
# allocation for its port # allocation for its port
@ -1482,23 +1480,23 @@ class TestDBArchiveDeletedRowsMultiCell(integrated_helpers.InstanceHelperMixin,
# Boot a server to cell1 # Boot a server to cell1
server_ids = {} server_ids = {}
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'cell1-server', az='nova:host1') 'cell1-server', az='nova:host1')
created_server = self.api.post_server({'server': server}) created_server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, created_server, 'ACTIVE') self._wait_for_state_change(created_server, 'ACTIVE')
server_ids['cell1'] = created_server['id'] server_ids['cell1'] = created_server['id']
# Boot a server to cell2 # Boot a server to cell2
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'cell2-server', az='nova:host2') 'cell2-server', az='nova:host2')
created_server = self.api.post_server({'server': server}) created_server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, created_server, 'ACTIVE') self._wait_for_state_change(created_server, 'ACTIVE')
server_ids['cell2'] = created_server['id'] server_ids['cell2'] = created_server['id']
# Boot a server to cell0 (cause ERROR state prior to schedule) # Boot a server to cell0 (cause ERROR state prior to schedule)
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'cell0-server') 'cell0-server')
# Flavor m1.xlarge cannot be fulfilled # Flavor m1.xlarge cannot be fulfilled
server['flavorRef'] = 'http://fake.server/5' server['flavorRef'] = 'http://fake.server/5'
created_server = self.api.post_server({'server': server}) created_server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, created_server, 'ERROR') self._wait_for_state_change(created_server, 'ERROR')
server_ids['cell0'] = created_server['id'] server_ids['cell0'] = created_server['id']
# Verify all the servers are in the databases # Verify all the servers are in the databases
for cell_name, server_id in server_ids.items(): for cell_name, server_id in server_ids.items():

View File

@ -69,9 +69,9 @@ class HostStatusPolicyTestCase(test.TestCase,
# Starting with microversion 2.37 the networks field is required. # Starting with microversion 2.37 the networks field is required.
kwargs['networks'] = networks kwargs['networks'] = networks
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_host_status_unknown_only', **kwargs) 'test_host_status_unknown_only', **kwargs)
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
return server return server
@staticmethod @staticmethod
@ -94,12 +94,12 @@ class HostStatusPolicyTestCase(test.TestCase,
server = self._get_server(admin_func()) server = self._get_server(admin_func())
# We need to wait for ACTIVE if this was a post rebuild server action, # We need to wait for ACTIVE if this was a post rebuild server action,
# else a subsequent rebuild request will fail with a 409 in the API. # else a subsequent rebuild request will fail with a 409 in the API.
self._wait_for_state_change(self.admin_api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# Verify admin can see the host status UP. # Verify admin can see the host status UP.
self.assertEqual('UP', server['host_status']) self.assertEqual('UP', server['host_status'])
# Get server as normal non-admin user. # Get server as normal non-admin user.
server = self._get_server(func()) server = self._get_server(func())
self._wait_for_state_change(self.admin_api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# Verify non-admin do not receive the host_status field because it is # Verify non-admin do not receive the host_status field because it is
# not UNKNOWN. # not UNKNOWN.
self.assertNotIn('host_status', server) self.assertNotIn('host_status', server)

View File

@ -50,8 +50,7 @@ class MultiCellSchedulerTestCase(test.TestCase,
self.addCleanup(fake_image.FakeImageService_reset) self.addCleanup(fake_image.FakeImageService_reset)
def _test_create_and_migrate(self, expected_status, az=None): def _test_create_and_migrate(self, expected_status, az=None):
server = self._build_minimal_create_server_request(self.api, server = self._build_minimal_create_server_request('some-server',
'some-server',
az=az) az=az)
post = {'server': server} post = {'server': server}
# If forcing the server onto a host we have to use the admin API. # If forcing the server onto a host we have to use the admin API.
@ -59,8 +58,7 @@ class MultiCellSchedulerTestCase(test.TestCase,
created_server = api.post_server(post) created_server = api.post_server(post)
# Wait for it to finish being created # Wait for it to finish being created
found_server = self._wait_for_state_change( found_server = self._wait_for_state_change(created_server, 'ACTIVE')
self.admin_api, created_server, 'ACTIVE')
return self.admin_api.api_post( return self.admin_api.api_post(
'/servers/%s/action' % found_server['id'], '/servers/%s/action' % found_server['id'],
{'migrate': None}, {'migrate': None},

View File

@ -28,12 +28,11 @@ class ServerExternalEventsTestV276(
flavors = self.api.get_flavors() flavors = self.api.get_flavors()
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, "some-server", flavor_id=flavors[0]["id"], "some-server", flavor_id=flavors[0]["id"],
image_uuid="155d900f-4e14-4e4c-a73d-069cbf4541e6", image_uuid="155d900f-4e14-4e4c-a73d-069cbf4541e6",
networks='none') networks='none')
created_server = self.api.post_server({'server': server_req}) created_server = self.api.post_server({'server': server_req})
self.server = self._wait_for_state_change( self.server = self._wait_for_state_change(created_server, 'ACTIVE')
self.api, created_server, 'ACTIVE')
self.power_off = {'name': 'power-update', self.power_off = {'name': 'power-update',
'tag': 'POWER_OFF', 'tag': 'POWER_OFF',
'server_uuid': self.server["id"]} 'server_uuid': self.server["id"]}
@ -50,7 +49,7 @@ class ServerExternalEventsTestV276(
expected_params = {'OS-EXT-STS:task_state': None, expected_params = {'OS-EXT-STS:task_state': None,
'OS-EXT-STS:vm_state': vm_states.STOPPED, 'OS-EXT-STS:vm_state': vm_states.STOPPED,
'OS-EXT-STS:power_state': power_state.SHUTDOWN} 'OS-EXT-STS:power_state': power_state.SHUTDOWN}
server = self._wait_for_server_parameter(self.api, self.server, server = self._wait_for_server_parameter(self.server,
expected_params) expected_params)
msg = ' with target power state POWER_OFF.' msg = ' with target power state POWER_OFF.'
self.assertIn(msg, self.stdlog.logger.output) self.assertIn(msg, self.stdlog.logger.output)
@ -79,8 +78,7 @@ class ServerExternalEventsTestV276(
expected_params = {'OS-EXT-STS:task_state': None, expected_params = {'OS-EXT-STS:task_state': None,
'OS-EXT-STS:vm_state': vm_states.ACTIVE, 'OS-EXT-STS:vm_state': vm_states.ACTIVE,
'OS-EXT-STS:power_state': power_state.RUNNING} 'OS-EXT-STS:power_state': power_state.RUNNING}
server = self._wait_for_server_parameter(self.api, self.server, server = self._wait_for_server_parameter(self.server, expected_params)
expected_params)
msg = ' with target power state POWER_ON.' msg = ' with target power state POWER_ON.'
self.assertIn(msg, self.stdlog.logger.output) self.assertIn(msg, self.stdlog.logger.output)
# Test if this is logged in the instance action list. # Test if this is logged in the instance action list.

View File

@ -55,18 +55,18 @@ class ServerFaultTestCase(test.TestCase,
""" """
# Create the server with the non-admin user. # Create the server with the non-admin user.
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'test_server_fault_non_nova_exception', 'test_server_fault_non_nova_exception',
image_uuid=fake_image.get_valid_image_id(), image_uuid=fake_image.get_valid_image_id(),
networks=[{'port': nova_fixtures.NeutronFixture.port_1['id']}]) networks=[{'port': nova_fixtures.NeutronFixture.port_1['id']}])
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# Stop the server before rebooting it so that after the driver.reboot # Stop the server before rebooting it so that after the driver.reboot
# method raises an exception, the fake driver does not report the # method raises an exception, the fake driver does not report the
# instance power state as running - that will make the compute manager # instance power state as running - that will make the compute manager
# set the instance vm_state to error. # set the instance vm_state to error.
self.api.post_server_action(server['id'], {'os-stop': None}) self.api.post_server_action(server['id'], {'os-stop': None})
server = self._wait_for_state_change(self.admin_api, server, 'SHUTOFF') server = self._wait_for_state_change(server, 'SHUTOFF')
# Stub out the compute driver reboot method to raise a non-nova # Stub out the compute driver reboot method to raise a non-nova
# exception to simulate some error from the underlying hypervisor # exception to simulate some error from the underlying hypervisor
@ -83,8 +83,8 @@ class ServerFaultTestCase(test.TestCase,
# decorator runs before the reverts_task_state decorator so we will # decorator runs before the reverts_task_state decorator so we will
# be sure the fault is set on the server. # be sure the fault is set on the server.
server = self._wait_for_server_parameter( server = self._wait_for_server_parameter(
self.api, server, {'status': 'ERROR', server, {'status': 'ERROR', 'OS-EXT-STS:task_state': None},
'OS-EXT-STS:task_state': None}) api=self.api)
mock_reboot.assert_called_once() mock_reboot.assert_called_once()
# The server fault from the non-admin user API response should not # The server fault from the non-admin user API response should not
# have details in it. # have details in it.

View File

@ -90,7 +90,7 @@ class ServerGroupTestBase(test.TestCase,
expected_status='ACTIVE', flavor=None, expected_status='ACTIVE', flavor=None,
az=None): az=None):
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api, 'some-server', 'some-server',
image_uuid='a2459075-d96c-40d5-893e-577ff92e721c', networks=[], image_uuid='a2459075-d96c-40d5-893e-577ff92e721c', networks=[],
az=az) az=az)
if flavor: if flavor:
@ -103,7 +103,7 @@ class ServerGroupTestBase(test.TestCase,
# Wait for it to finish being created # Wait for it to finish being created
found_server = self._wait_for_state_change( found_server = self._wait_for_state_change(
self.admin_api, created_server, expected_status) created_server, expected_status)
return found_server return found_server
@ -323,8 +323,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'}} '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'}}
self.api.post_server_action(servers[1]['id'], post) self.api.post_server_action(servers[1]['id'], post)
rebuilt_server = self._wait_for_state_change( rebuilt_server = self._wait_for_state_change(servers[1], 'ACTIVE')
self.admin_api, servers[1], 'ACTIVE')
self.assertEqual(post['rebuild']['imageRef'], self.assertEqual(post['rebuild']['imageRef'],
rebuilt_server.get('image')['id']) rebuilt_server.get('image')['id'])
@ -369,7 +368,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
post = {'migrate': {}} post = {'migrate': {}}
self.admin_api.post_server_action(servers[1]['id'], post) self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change( migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE') servers[1], 'VERIFY_RESIZE')
self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'], self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
migrated_server['OS-EXT-SRV-ATTR:host']) migrated_server['OS-EXT-SRV-ATTR:host'])
@ -384,7 +383,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
server1_old_host = servers[1]['OS-EXT-SRV-ATTR:host'] server1_old_host = servers[1]['OS-EXT-SRV-ATTR:host']
self.admin_api.post_server_action(servers[1]['id'], post) self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change( migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE') servers[1], 'VERIFY_RESIZE')
self.assertEqual(server1_old_host, self.assertEqual(server1_old_host,
migrated_server['OS-EXT-SRV-ATTR:host']) migrated_server['OS-EXT-SRV-ATTR:host'])
@ -424,8 +423,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
post = {'evacuate': {'onSharedStorage': False}} post = {'evacuate': {'onSharedStorage': False}}
self.admin_api.post_server_action(servers[1]['id'], post) self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['done']) self._wait_for_migration_status(servers[1], ['done'])
evacuated_server = self._wait_for_state_change( evacuated_server = self._wait_for_state_change(servers[1], 'ACTIVE')
self.admin_api, servers[1], 'ACTIVE')
# check that the server is evacuated to another host # check that the server is evacuated to another host
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'], self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@ -447,7 +445,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
self.admin_api.post_server_action(servers[1]['id'], post) self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error']) self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change( server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR') servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host # assert that after a failed evac the server active on the same host
# as before # as before
@ -467,7 +465,7 @@ class ServerGroupTestV21(ServerGroupTestBase):
self.admin_api.post_server_action(servers[1]['id'], post) self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error']) self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change( server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR') servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host # assert that after a failed evac the server active on the same host
# as before # as before
@ -608,8 +606,7 @@ class ServerGroupTestV215(ServerGroupTestV21):
post = {'evacuate': {}} post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post) self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['done']) self._wait_for_migration_status(servers[1], ['done'])
evacuated_server = self._wait_for_state_change( evacuated_server = self._wait_for_state_change(servers[1], 'ACTIVE')
self.admin_api, servers[1], 'ACTIVE')
# check that the server is evacuated # check that the server is evacuated
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'], self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@ -633,7 +630,7 @@ class ServerGroupTestV215(ServerGroupTestV21):
self.admin_api.post_server_action(servers[1]['id'], post) self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error']) self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change( server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR') servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host # assert that after a failed evac the server active on the same host
# as before # as before
@ -653,7 +650,7 @@ class ServerGroupTestV215(ServerGroupTestV21):
self.admin_api.post_server_action(servers[1]['id'], post) self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error']) self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change( server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR') servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host # assert that after a failed evac the server active on the same host
# as before # as before
@ -767,7 +764,7 @@ class ServerGroupTestV215(ServerGroupTestV21):
post = {'migrate': {}} post = {'migrate': {}}
self.admin_api.post_server_action(servers[1]['id'], post) self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change( migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE') servers[1], 'VERIFY_RESIZE')
return [migrated_server['OS-EXT-SRV-ATTR:host'], return [migrated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host']] servers[0]['OS-EXT-SRV-ATTR:host']]
@ -794,8 +791,7 @@ class ServerGroupTestV215(ServerGroupTestV21):
post = {'evacuate': {}} post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post) self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['done']) self._wait_for_migration_status(servers[1], ['done'])
evacuated_server = self._wait_for_state_change( evacuated_server = self._wait_for_state_change(servers[1], 'ACTIVE')
self.admin_api, servers[1], 'ACTIVE')
# Note(gibi): need to get the server again as the state of the instance # Note(gibi): need to get the server again as the state of the instance
# goes to ACTIVE first then the host of the instance changes to the # goes to ACTIVE first then the host of the instance changes to the
@ -974,7 +970,6 @@ class TestAntiAffinityLiveMigration(test.TestCase,
servers = [] servers = []
for x in range(2): for x in range(2):
server = self._build_minimal_create_server_request( server = self._build_minimal_create_server_request(
self.api,
'test_serial_no_valid_host_then_pass_with_third_host-%d' % x, 'test_serial_no_valid_host_then_pass_with_third_host-%d' % x,
networks='none') networks='none')
# Add the group hint so the server is created in our group. # Add the group hint so the server is created in our group.
@ -986,8 +981,7 @@ class TestAntiAffinityLiveMigration(test.TestCase,
with utils.temporary_mutation(self.api, microversion='2.37'): with utils.temporary_mutation(self.api, microversion='2.37'):
server = self.api.post_server(server_req) server = self.api.post_server(server_req)
servers.append( servers.append(
self._wait_for_state_change( self._wait_for_state_change(server, 'ACTIVE'))
self.admin_api, server, 'ACTIVE'))
# Make sure each server is on a unique host. # Make sure each server is on a unique host.
hosts = set([svr['OS-EXT-SRV-ATTR:host'] for svr in servers]) hosts = set([svr['OS-EXT-SRV-ATTR:host'] for svr in servers])
@ -1021,7 +1015,7 @@ class TestAntiAffinityLiveMigration(test.TestCase,
# should work this time. # should work this time.
self.start_service('compute', host='host3') self.start_service('compute', host='host3')
self.admin_api.post_server_action(server['id'], body) self.admin_api.post_server_action(server['id'], body)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') server = self._wait_for_state_change(server, 'ACTIVE')
# Now the server should be on host3 since that was the only available # Now the server should be on host3 since that was the only available
# host for the live migration. # host for the live migration.
self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host']) self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host'])

File diff suppressed because it is too large Load Diff

View File

@ -419,11 +419,11 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
def _create_instance(self, flavor): def _create_instance(self, flavor):
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=flavor['id'], 'some-server', flavor_id=flavor['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none', az='nova:host1') networks='none', az='nova:host1')
inst = self.api.post_server({'server': server_req}) inst = self.api.post_server({'server': server_req})
return self._wait_for_state_change(self.admin_api, inst, 'ACTIVE') return self._wait_for_state_change(inst, 'ACTIVE')
def test_reshape(self): def test_reshape(self):
"""On startup, virt driver signals it needs to reshape, then does so. """On startup, virt driver signals it needs to reshape, then does so.

View File

@ -354,7 +354,6 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase(
servers_policies.ZERO_DISK_FLAVOR: base_policies.RULE_ADMIN_API}, servers_policies.ZERO_DISK_FLAVOR: base_policies.RULE_ADMIN_API},
overwrite=False) overwrite=False)
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api,
'test_create_image_backed_server_with_zero_disk_fails', 'test_create_image_backed_server_with_zero_disk_fails',
fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
self.zero_disk_flavor['id']) self.zero_disk_flavor['id'])
@ -376,7 +375,6 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase(
self.start_service('conductor') self.start_service('conductor')
self.start_service('scheduler') self.start_service('scheduler')
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api,
'test_create_volume_backed_server_with_zero_disk_allowed', 'test_create_volume_backed_server_with_zero_disk_allowed',
flavor_id=self.zero_disk_flavor['id']) flavor_id=self.zero_disk_flavor['id'])
server_req.pop('imageRef', None) server_req.pop('imageRef', None)
@ -387,5 +385,5 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase(
'boot_index': 0 'boot_index': 0
}] }]
server = self.admin_api.post_server({'server': server_req}) server = self.admin_api.post_server({'server': server_req})
server = self._wait_for_state_change(self.api, server, 'ERROR') server = self._wait_for_state_change(server, 'ERROR')
self.assertIn('No valid host', server['fault']['message']) self.assertIn('No valid host', server['fault']['message'])

View File

@ -147,9 +147,9 @@ class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
self.admin_api.post_server_action(server['id'], {'evacuate': {}}) self.admin_api.post_server_action(server['id'], {'evacuate': {}})
# The host does not change until after the status is changed to ACTIVE # The host does not change until after the status is changed to ACTIVE
# so wait for both parameters. # so wait for both parameters.
self._wait_for_server_parameter( self._wait_for_server_parameter(server, {
self.admin_api, server, {'status': 'ACTIVE', 'status': 'ACTIVE',
'OS-EXT-SRV-ATTR:host': 'host2'}) 'OS-EXT-SRV-ATTR:host': 'host2'})
# Delete the compute service for host1 and check the related # Delete the compute service for host1 and check the related
# placement resources for that host. # placement resources for that host.
self.admin_api.api_delete('/os-services/%s' % service['id']) self.admin_api.api_delete('/os-services/%s' % service['id'])
@ -324,10 +324,10 @@ class ComputeStatusFilterTest(integrated_helpers.ProviderUsageBaseTestCase):
# Try creating a server which should fail because nothing is available. # Try creating a server which should fail because nothing is available.
networks = [{'port': self.neutron.port_1['id']}] networks = [{'port': self.neutron.port_1['id']}]
server_req = self._build_minimal_create_server_request( server_req = self._build_minimal_create_server_request(
self.api, 'test_compute_status_filter', 'test_compute_status_filter',
image_uuid=fake_image.get_valid_image_id(), networks=networks) image_uuid=fake_image.get_valid_image_id(), networks=networks)
server = self.api.post_server({'server': server_req}) server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(self.api, server, 'ERROR') server = self._wait_for_state_change(server, 'ERROR')
# There should be a NoValidHost fault recorded. # There should be a NoValidHost fault recorded.
self.assertIn('fault', server) self.assertIn('fault', server)
self.assertIn('No valid host', server['fault']['message']) self.assertIn('No valid host', server['fault']['message'])
@ -339,7 +339,7 @@ class ComputeStatusFilterTest(integrated_helpers.ProviderUsageBaseTestCase):
# Try creating another server and it should be OK. # Try creating another server and it should be OK.
server = self.api.post_server({'server': server_req}) server = self.api.post_server({'server': server_req})
self._wait_for_state_change(self.api, server, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
# Stop, force-down and disable the service so the API cannot call # Stop, force-down and disable the service so the API cannot call
# the compute service to sync the trait. # the compute service to sync the trait.