functional: Add unified '_build_server' helper function

'_IntegratedTestBase' has subclassed 'InstanceHelperMixin' since change
I0d21cb94c932e6e556eca964c57868c705b2d120, which means both now provide
a '_build_minimal_create_server_request' function. However, only
'_IntegratedTestBase' provides a '_build_server' function. The
'_build_minimal_create_server_request' and '_build_server' functions do
pretty much the same thing but there are some differences. Combine these
under the '_build_server' alias.

Change-Id: I91fa2f73185fef48e9aae9b7f61389c374e06676
Signed-off-by: Stephen Finucane <sfinucan@redhat.com>
This commit is contained in:
Stephen Finucane 2019-12-06 15:31:17 +00:00
parent 093e65c2ca
commit 458d37fceb
56 changed files with 245 additions and 381 deletions

View File

@ -29,8 +29,7 @@ class MultinicSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
self.uuid = server['id']
def _boot_a_server(self, expected_status='ACTIVE', extra_params=None):
server = self._build_minimal_create_server_request(
'MultinicSampleJsonTestServer')
server = self._build_server()
if extra_params:
server.update(extra_params)

View File

@ -13,12 +13,10 @@
import mock
import time
from nova import context as nova_context
from nova import objects
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_notifier
from nova.tests.unit.image import fake as fake_image
class ComputeManagerInitHostTestCase(
@ -38,10 +36,7 @@ class ComputeManagerInitHostTestCase(
for x in range(2):
self._start_compute('host%d' % x)
# Create a server, it does not matter on which host it lands.
name = 'test_migrate_disk_and_power_off_crash_finish_revert_migration'
server = self._build_minimal_create_server_request(
name, image_uuid=fake_image.get_valid_image_id(),
networks='auto')
server = self._build_server(networks='auto')
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')
# Save the source hostname for assertions later.
@ -132,9 +127,6 @@ class TestComputeRestartInstanceStuckInBuild(
super(TestComputeRestartInstanceStuckInBuild, self).setUp()
self.compute1 = self._start_compute(host='host1')
flavors = self.api.get_flavors()
self.flavor1 = flavors[0]
def test_restart_compute_while_instance_waiting_for_resource_claim(self):
"""Test for bug 1833581 where an instance is stuck in
BUILD state forever due to compute service is restarted before the
@ -156,8 +148,7 @@ class TestComputeRestartInstanceStuckInBuild(
# There is another way to trigger the issue. We can inject a sleep into
# instance_claim() to stop it. This is less realistic but it works in
# the test env.
server_req = self._build_minimal_create_server_request(
'interrupted-server', flavor_id=self.flavor1['id'],
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')

View File

@ -59,7 +59,7 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
:returns: created server (dict)
"""
# Create a server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({'server': server})
self.assertTrue(created_server['id'])
created_server_id = created_server['id']

View File

@ -97,38 +97,6 @@ class InstanceHelperMixin(object):
return self._wait_for_server_parameter(
server, {'status': expected_status}, max_retries)
def _build_minimal_create_server_request(
self, name=None, image_uuid=None, flavor_id=None, networks=None,
az=None, host=None):
server = {}
if not image_uuid:
# NOTE(takashin): In API version 2.36, image APIs were deprecated.
# In API version 2.36 or greater, self.api.get_images() returns
# a 404 error. In that case, 'image_uuid' should be specified.
image_uuid = self.api.get_images()[0]['id']
server['imageRef'] = image_uuid
if not name:
name = ''.join(
random.choice(string.ascii_lowercase) for i in range(10))
server['name'] = name
if not flavor_id:
# Set a valid flavorId
flavor_id = self.api.get_flavors()[0]['id']
server['flavorRef'] = 'http://fake.server/%s' % flavor_id
if networks is not None:
server['networks'] = networks
if az is not None:
server['availability_zone'] = az
# This requires at least microversion 2.74 to work
if host is not None:
server['host'] = host
return server
def _wait_until_deleted(self, server):
initially_in_error = server.get('status') == 'ERROR'
try:
@ -241,6 +209,54 @@ class InstanceHelperMixin(object):
}
return api.post_aggregate(body)['id']
def _build_server(self, name=None, image_uuid=None, flavor_id=None,
networks=None, az=None, host=None):
"""Build a request for the server create API.
:param name: A name for the server.
:param image_uuid: The ID of an existing image.
:param flavor_id: The ID of an existing flavor.
:param networks: A dict of networks to attach or a string of 'none' or
'auto'.
:param az: The name of the availability zone the instance should
request.
:param host: The host to boot the instance on. Requires API
microversion 2.74 or greater.
:returns: The generated request body.
"""
if not name:
name = ''.join(
random.choice(string.ascii_lowercase) for i in range(20))
if image_uuid is None: # we need to handle ''
# NOTE(takashin): In API version 2.36, image APIs were deprecated.
# In API version 2.36 or greater, self.api.get_images() returns
# a 404 error. In that case, 'image_uuid' should be specified.
with utils.temporary_mutation(self.api, microversion='2.35'):
image_uuid = self.api.get_images()[0]['id']
if not flavor_id:
# Set a valid flavorId
flavor_id = self.api.get_flavors()[0]['id']
server = {
'name': name,
'imageRef': image_uuid,
'flavorRef': 'http://fake.server/%s' % flavor_id,
}
if networks is not None:
server['networks'] = networks
if az is not None:
server['availability_zone'] = az
# This requires at least microversion 2.74 to work
if host is not None:
server['host'] = host
return server
class _IntegratedTestBase(test.TestCase, InstanceHelperMixin):
REQUIRES_LOCKING = True
@ -300,11 +316,6 @@ class _IntegratedTestBase(test.TestCase, InstanceHelperMixin):
if not self.ADMIN_API:
self.admin_api.microversion = self.microversion
def get_unused_server_name(self):
servers = self.api.get_servers()
server_names = [server['name'] for server in servers]
return generate_new_element(server_names, 'server')
def get_unused_flavor_name_id(self):
flavors = self.api.get_flavors()
flavor_names = list()
@ -344,30 +355,6 @@ class _IntegratedTestBase(test.TestCase, InstanceHelperMixin):
self.api_fixture.admin_api.post_extra_spec(flv_id, spec)
return flv_id
def _build_server(self, flavor_id, image=None):
server = {}
if image is None:
# TODO(stephenfin): We need to stop relying on this API
with utils.temporary_mutation(self.api, microversion='2.35'):
image = self.api.get_images()[0]
LOG.debug("Image: %s", image)
# We now have a valid imageId
server[self._image_ref_parameter] = image['id']
else:
server[self._image_ref_parameter] = image
# Set a valid flavorId
flavor = self.api.get_flavor(flavor_id)
LOG.debug("Using flavor: %s", flavor)
server[self._flavor_ref_parameter] = ('http://fake.server/%s'
% flavor['id'])
# Set a valid server name
server_name = self.get_unused_server_name()
server['name'] = server_name
return server
def _check_api_endpoint(self, endpoint, expected_middleware):
app = self.api_fixture.app().get((None, '/v2'))
@ -695,9 +682,9 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
or "none" or "auto"
:return: the API representation of the booted instance
"""
server_req = self._build_minimal_create_server_request(
'some-server', flavor_id=flavor['id'],
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=flavor['id'],
networks=networks)
server_req['availability_zone'] = 'nova:%s' % source_hostname
LOG.info('booting on %s', source_hostname)

View File

@ -70,7 +70,7 @@ class NUMAServersTest(NUMAServersTestBase):
'resource_providers'][0]['uuid']
# Create server
good_server = self._build_server(flavor_id)
good_server = self._build_server(flavor_id=flavor_id)
post = {'server': good_server}
@ -270,7 +270,7 @@ class NUMAServersTest(NUMAServersTestBase):
# 'start_service' to make sure there isn't a mismatch
self.compute = self.start_service('compute', host='compute1')
post = {'server': self._build_server(flavor_id)}
post = {'server': self._build_server(flavor_id=flavor_id)}
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
@ -355,7 +355,7 @@ class NUMAServersTest(NUMAServersTestBase):
# 'start_service' to make sure there isn't a mismatch
self.compute = self.start_service('compute', host='compute1')
post = {'server': self._build_server(flavor_id)}
post = {'server': self._build_server(flavor_id=flavor_id)}
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
@ -376,7 +376,7 @@ class NUMAServersTest(NUMAServersTestBase):
# Create server
flavor_a_id = self._create_flavor(extra_spec={})
good_server = self._build_server(flavor_a_id)
good_server = self._build_server(flavor_id=flavor_a_id)
post = {'server': good_server}
@ -532,7 +532,7 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
extra_spec = {'hw:cpu_policy': 'dedicated'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
server_req = self._build_server(flavor_id)
server_req = self._build_server(flavor_id=flavor_id)
server_req['host'] = 'test_compute0'
server_req['networks'] = 'auto'
@ -760,7 +760,7 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
self.compute = self.start_service('compute', host='test_compute0')
# Create server
good_server = self._build_server(flavor_id)
good_server = self._build_server(flavor_id=flavor_id)
good_server['networks'] = networks
post = {'server': good_server}
@ -908,7 +908,7 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
{'uuid': base.LibvirtNeutronFixture.network_1['id']},
]
good_server = self._build_server(flavor_id)
good_server = self._build_server(flavor_id=flavor_id)
good_server['networks'] = networks
post = {'server': good_server}

View File

@ -58,7 +58,7 @@ class _PCIServersTestBase(base.ServersTestBase):
self.compute_started = True
# Create server
good_server = self._build_server(flavor_id)
good_server = self._build_server(flavor_id=flavor_id)
post = {'server': good_server}
@ -238,8 +238,9 @@ class GetServerDiagnosticsServerWithVfTestV21(_PCIServersTestBase):
self.compute_started = True
# Create server
good_server = self._build_server(flavor_id,
'155d900f-4e14-4e4c-a73d-069cbf4541e6')
good_server = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=flavor_id)
good_server['networks'] = [
{'uuid': base.LibvirtNeutronFixture.network_1['id']},
{'uuid': base.LibvirtNeutronFixture.network_4['id']},

View File

@ -91,7 +91,7 @@ class VGPUReshapeTests(base.ServersTestBase):
extra_spec = {"resources:VGPU": 1}
flavor_id = self._create_flavor(extra_spec=extra_spec)
server_req = self._build_server(flavor_id)
server_req = self._build_server(flavor_id=flavor_id)
# NOTE(gibi): during instance_claim() there is a
# driver.update_provider_tree() call that would detect the old tree and

View File

@ -27,8 +27,8 @@ class RealTimeServersTest(base.ServersTestBase):
self.flags(sysinfo_serial='none', group='libvirt')
def test_no_dedicated_cpu(self):
flavor = self._create_flavor(extra_spec={'hw:cpu_realtime': 'yes'})
server = self._build_server(flavor)
flavor_id = self._create_flavor(extra_spec={'hw:cpu_realtime': 'yes'})
server = self._build_server(flavor_id=flavor_id)
# Cannot set realtime policy in a non dedicated cpu pinning policy
self.assertRaises(
@ -36,9 +36,9 @@ class RealTimeServersTest(base.ServersTestBase):
self.api.post_server, {'server': server})
def test_no_realtime_mask(self):
flavor = self._create_flavor(extra_spec={
flavor_id = self._create_flavor(extra_spec={
'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated'})
server = self._build_server(flavor)
server = self._build_server(flavor_id=flavor_id)
# Cannot set realtime policy if not vcpus mask defined
self.assertRaises(
@ -56,11 +56,11 @@ class RealTimeServersTest(base.ServersTestBase):
self.compute = self.start_service('compute', host='test_compute0')
flavor = self._create_flavor(extra_spec={
flavor_id = self._create_flavor(extra_spec={
'hw:cpu_realtime': 'yes',
'hw:cpu_policy': 'dedicated',
'hw:cpu_realtime_mask': '^1'})
server = self._build_server(flavor)
server = self._build_server(flavor_id=flavor_id)
created = self.api.post_server({'server': server})
instance = self._wait_for_state_change(created, 'ACTIVE')

View File

@ -125,9 +125,9 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
return compute
def _create_server(self, flavor_id, hostname):
server_req = self._build_minimal_create_server_request(
'some-server', flavor_id=flavor_id,
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=flavor_id,
networks='none')
server_req['availability_zone'] = 'nova:%s' % hostname
LOG.info('booting on %s', hostname)

View File

@ -214,8 +214,8 @@ class NotificationSampleTestBase(test.TestCase,
notification,
actual=fake_notifier.VERSIONED_NOTIFICATIONS.pop(0))
server = self._build_minimal_create_server_request(
'some-server',
server = self._build_server(
name='some-server',
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=flavor_id)

View File

@ -39,7 +39,7 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# Create a server. At this point there is only one compute service.
flavors = self.api.get_flavors()
flavor1 = flavors[0]['id']
server = self._build_server(flavor1)
server = self._build_server(flavor_id=flavor1)
server = self.api.post_server({'server': server})
self._wait_for_state_change(server, 'ACTIVE')

View File

@ -43,9 +43,6 @@ class TestLocalDeleteAllocations(test.TestCase,
self.start_service('scheduler')
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
@staticmethod
def _get_usages(placement_api, rp_uuid):
fmt = '/resource_providers/%(uuid)s/usages'
@ -81,8 +78,7 @@ class TestLocalDeleteAllocations(test.TestCase,
self.assertEqual(0, usage)
# Create a server.
server = self._build_minimal_create_server_request(
'local-delete-test', self.image_id, self.flavor_id, 'none')
server = self._build_server(networks='none')
server = self.admin_api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')
@ -136,8 +132,7 @@ class TestLocalDeleteAllocations(test.TestCase,
self.assertEqual(0, usage)
# Create a server.
server = self._build_minimal_create_server_request(
'local-delete-test', self.image_id, self.flavor_id, 'none')
server = self._build_server(networks='none')
server = self.admin_api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')

View File

@ -43,9 +43,7 @@ class ServerTagsFilteringTest(test.TestCase,
# the image fake backend needed for image discovery
image_fake.stub_out_image_service(self)
self.addCleanup(image_fake.FakeImageService_reset)
# We have to get the image before we use 2.latest otherwise we'll get
# a 404 on the /images proxy API because of 2.36.
image_id = self.api.get_images()[0]['id']
# Use the latest microversion available to make sure something does
# not regress in new microversions; cap as necessary.
self.api.microversion = 'latest'
@ -57,10 +55,8 @@ class ServerTagsFilteringTest(test.TestCase,
# create two test servers
self.servers = []
for x in range(2):
server = self.api.post_server(
dict(server=self._build_minimal_create_server_request(
'test-list-server-tag-filters%i' % x, image_id,
networks='none')))
server = self._build_server(networks='none')
server = self.api.post_server({'server': server})
self.addCleanup(self.api.delete_server, server['id'])
server = self._wait_for_state_change(server, 'ACTIVE')
self.servers.append(server)

View File

@ -44,9 +44,7 @@ class ServerListLimitMarkerCell0Test(test.TestCase,
# the image fake backend needed for image discovery
image_fake.stub_out_image_service(self)
self.addCleanup(image_fake.FakeImageService_reset)
# We have to get the image before we use 2.latest otherwise we'll get
# a 404 on the /images proxy API because of 2.36.
self.image_id = self.api.get_images()[0]['id']
# Use the latest microversion available to make sure something does
# not regress in new microversions; cap as necessary.
self.api.microversion = 'latest'
@ -64,10 +62,8 @@ class ServerListLimitMarkerCell0Test(test.TestCase,
"""
# create three test servers
for x in range(3):
server = self.api.post_server(
dict(server=self._build_minimal_create_server_request(
'test-list-server-limit%i' % x, self.image_id,
networks='none')))
server_req = self._build_server(networks='none')
server = self.api.post_server({'server': server_req})
self.addCleanup(self.api.delete_server, server['id'])
self._wait_for_state_change(server, 'ERROR')

View File

@ -64,10 +64,6 @@ class SchedulerOnlyChecksTargetTest(test.TestCase,
self.start_service('conductor')
# We have to get the image before we use 2.latest otherwise we'll get
# a 404 on the /images proxy API because of 2.36.
self.image_id = self.api.get_images()[0]['id']
# Use the latest microversion available to make sure something does
# not regress in new microversions; cap as necessary.
self.admin_api.microversion = 'latest'
@ -93,10 +89,8 @@ class SchedulerOnlyChecksTargetTest(test.TestCase,
def test_evacuate_server(self):
# We first create the instance
server = self.admin_api.post_server(
dict(server=self._build_minimal_create_server_request(
'my-pretty-instance-to-evacuate', self.image_id,
networks='none')))
server = self._build_server(networks='none')
server = self.admin_api.post_server({'server': server})
server_id = server['id']
self.addCleanup(self.api.delete_server, server_id)
self._wait_for_state_change(server, 'ACTIVE')

View File

@ -66,9 +66,6 @@ class FailedEvacuateStateTests(test.TestCase,
self.compute1 = self.start_service('compute', host=self.hostname)
fake_network.set_stub_network_methods(self)
flavors = self.api.get_flavors()
self.flavor1 = flavors[0]
def _wait_for_notification_event_type(self, event_type, max_retries=10):
retry_counter = 0
while True:
@ -83,8 +80,7 @@ class FailedEvacuateStateTests(test.TestCase,
time.sleep(0.5)
def _boot_a_server(self):
server_req = self._build_minimal_create_server_request(
'some-server', flavor_id=self.flavor1['id'],
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
LOG.info('booting on %s', self.hostname)

View File

@ -65,12 +65,8 @@ class TestLiveMigrateOneOfConcurrentlyCreatedInstances(
fake_network.set_stub_network_methods(self)
flavors = self.api.get_flavors()
self.flavor1 = flavors[0]
def _boot_servers(self, num_servers=1):
server_req = self._build_minimal_create_server_request(
'some-server', flavor_id=self.flavor1['id'],
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
server_req.update({'min_count': str(num_servers),

View File

@ -101,10 +101,11 @@ class TestRequestSpecRetryReschedule(test.TestCase,
flavor1, flavor2 = flavor2, flavor1
# create the instance which should go to host1
server = self.admin_api.post_server(
dict(server=self._build_minimal_create_server_request(
'test_resize_with_reschedule_then_live_migrate',
self.image_id, flavor_id=flavor1['id'], networks='none')))
server = self._build_server(
image_uuid=self.image_id,
flavor_id=flavor1['id'],
networks='none')
server = self.admin_api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])

View File

@ -47,10 +47,10 @@ class TestServerResizeReschedule(integrated_helpers.ProviderUsageBaseTestCase):
"""Test that when a resize attempt fails, the retry comes from the
supplied host_list, and does not call the scheduler.
"""
server_req = self._build_minimal_create_server_request(
'some-server', flavor_id=self.flavor1['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=self.flavor1['id'],
networks='none')
self.first_attempt = True
created_server = self.api.post_server({'server': server_req})

View File

@ -76,9 +76,9 @@ class TestResizeWithNoAllocationScheduler(
def test_resize(self):
# Create our server without networking just to keep things simple.
server_req = self._build_minimal_create_server_request(
'test-resize', flavor_id=self.old_flavor['id'],
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=self.old_flavor['id'],
networks='none')
server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(server, 'ACTIVE')

View File

@ -51,9 +51,7 @@ class InstanceListWithDeletedServicesTestCase(
# the image fake backend needed for image discovery
fake_image.stub_out_image_service(self)
self.addCleanup(fake_image.FakeImageService_reset)
# Get the image before we set the microversion to latest to avoid
# the proxy issues with GET /images in 2.36.
self.image_id = self.api.get_images()[0]['id']
self.api.microversion = 'latest'
self.start_service('conductor')
@ -85,9 +83,7 @@ class InstanceListWithDeletedServicesTestCase(
host1 = self.start_service('compute', host='host1')
# Create an instance which will be on host1 since it's the only host.
server_req = self._build_minimal_create_server_request(
'test_instance_list_deleted_service_with_no_uuid',
image_uuid=self.image_id, networks='none')
server_req = self._build_server(networks='none')
server = self.api.post_server({'server': server_req})
self._wait_for_state_change(server, 'ACTIVE')

View File

@ -59,8 +59,7 @@ class TestMultiCreateServerGroupMemberOverQuota(
server group and then create 3 servers in the group using a
multi-create POST /servers request.
"""
server_req = self._build_minimal_create_server_request(
'test_multi_create_server_group_members_over_quota',
server_req = self._build_server(
image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none')
server_req['min_count'] = 3
@ -87,8 +86,7 @@ class TestMultiCreateServerGroupMemberOverQuota(
# by using NoopConductorFixture.
self.useFixture(nova_fixtures.NoopConductorFixture())
for x in range(3):
server_req = self._build_minimal_create_server_request(
'test_concurrent_request_%s' % x,
server_req = self._build_server(
image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none')
hints = {'group': self.created_group['id']}

View File

@ -77,8 +77,7 @@ class RescheduleBuildAvailabilityZoneUpCall(
self.stub_out('nova.compute.manager.ComputeManager.'
'build_and_run_instance', wrap_bari)
server = self._build_minimal_create_server_request(
'test_server_create_reschedule_blocked_az_up_call')
server = self._build_server()
server = self.api.post_server({'server': server})
# Because we poisoned AggregateList.get_by_host after hitting the
# compute service we have to wait for the notification that the build
@ -147,8 +146,7 @@ class RescheduleMigrateAvailabilityZoneUpCall(
self.stub_out('nova.compute.manager.ComputeManager._prep_resize',
wrap_prep_resize)
server = self._build_minimal_create_server_request(
'test_migrate_reschedule_blocked_az_up_call')
server = self._build_server()
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')
original_host = server['OS-EXT-SRV-ATTR:host']

View File

@ -102,8 +102,7 @@ class AntiAffinityMultiCreateRequest(test.TestCase,
'_get_alternate_hosts', stub_get_alternate_hosts)
# Now create two servers in that group.
server_req = self._build_minimal_create_server_request(
'test_anti_affinity_multi_create',
server_req = self._build_server(
image_uuid=image_fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none')
server_req['min_count'] = 2

View File

@ -64,8 +64,7 @@ class ColdMigrateTargetHostThenLiveMigrateTest(
def test_cold_migrate_target_host_then_live_migrate(self):
# Create a server, it doesn't matter on which host it builds.
server = self._build_minimal_create_server_request(
'test_cold_migrate_target_host_then_live_migrate',
server = self._build_server(
image_uuid=image_fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none')
server = self.api.post_server({'server': server})

View File

@ -81,8 +81,7 @@ class NonPersistentFieldNotResetTest(
def _create_server(self):
# Create a server, it doesn't matter on which host it builds.
server = self._build_minimal_create_server_request(
'sample-server',
server = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
server = self.api.post_server({'server': server})

View File

@ -56,7 +56,7 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase):
def test_evacuate_multi_cell(self):
# Create a server which should land on host1 since it has the highest
# weight.
server = self._build_server(self.api.get_flavors()[0]['id'])
server = self._build_server()
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])

View File

@ -86,9 +86,7 @@ class MissingReqSpecInstanceGroupUUIDTestCase(
# Create a server in the group which should land on host1 due to our
# custom weigher.
server = self._build_minimal_create_server_request(
'test_cold_migrate_reschedule')
body = dict(server=server)
body = {'server': self._build_server()}
body['os:scheduler_hints'] = {'group': group_id}
server = self.api.post_server(body)
server = self._wait_for_state_change(server, 'ACTIVE')

View File

@ -15,7 +15,6 @@ import time
from nova import exception
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_notifier
from nova.tests.unit.image import fake as fake_image
class BuildRescheduleClaimFailsTestCase(
@ -79,11 +78,9 @@ class BuildRescheduleClaimFailsTestCase(
# Now that our stub is in place, try to create a server and wait for it
# to go to ERROR status.
server = self._build_minimal_create_server_request(
'test_build_reschedule_alt_host_alloc_fails',
image_uuid=fake_image.get_valid_image_id(),
server_req = self._build_server(
networks=[{'port': self.neutron.port_1['id']}])
server = self.api.post_server({'server': server})
server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(server, 'ERROR')
# Wait for the MaxRetriesExceeded fault to be recorded.

View File

@ -16,7 +16,6 @@ from nova import exception
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_notifier
from nova.tests.unit.image import fake as fake_image
class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase):
@ -36,17 +35,10 @@ class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase):
self.compute2 = self._start_compute(host='host2')
self.compute3 = self._start_compute(host='host3')
flavors = self.api.get_flavors()
self.flavor1 = flavors[0]
def _test_reschedule_migration_with_compute_rpc_pin(self, version_cap):
self.flags(compute=version_cap, group='upgrade_levels')
server_req = self._build_minimal_create_server_request(
'server1',
networks=[],
image_uuid=fake_image.get_valid_image_id(),
flavor_id=self.flavor1['id'])
server_req = self._build_server(networks='none')
server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(server, 'ACTIVE')

View File

@ -14,7 +14,6 @@ from nova.compute import instance_actions
from nova.compute import manager as compute_manager
from nova.scheduler.client import query as query_client
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as fake_image
class DeletedServerAllocationRevertTest(
@ -32,13 +31,11 @@ class DeletedServerAllocationRevertTest(
self._start_compute('host1')
self._start_compute('host2')
def _create_server(self, name):
"""Creates a server with the given name and returns the server,
source host and target host.
def _create_server(self):
"""Creates and return a server along with a source host and target
host.
"""
server = self._build_minimal_create_server_request(
name, image_uuid=fake_image.get_valid_image_id(),
networks='none')
server = self._build_server(networks='none')
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')
source_host = server['OS-EXT-SRV-ATTR:host']
@ -90,8 +87,7 @@ class DeletedServerAllocationRevertTest(
rolls back allocations before RPC casting to prep_resize on the dest
host.
"""
server, source_host, target_host = self._create_server(
'test_migration_task_rollback')
server, source_host, target_host = self._create_server()
self._disable_target_host(target_host)
self._stub_delete_server_during_scheduling(server)
@ -114,8 +110,7 @@ class DeletedServerAllocationRevertTest(
for a live migration and then fails and rolls back allocations before
RPC casting to live_migration on the source host.
"""
server, source_host, target_host = self._create_server(
'test_live_migration_task_rollback')
server, source_host, target_host = self._create_server()
self._disable_target_host(target_host)
self._stub_delete_server_during_scheduling(server)
@ -135,8 +130,7 @@ class DeletedServerAllocationRevertTest(
the instance is gone which triggers a failure and revert of the
migration-based allocations created in conductor.
"""
server, source_host, target_host = self._create_server(
'test_resize_on_compute_fail')
server, source_host, target_host = self._create_server()
# Wrap _prep_resize so we can concurrently delete the server.
original_prep_resize = compute_manager.ComputeManager._prep_resize

View File

@ -41,9 +41,7 @@ class ListDeletedServersWithMarker(test.TestCase,
def test_list_deleted_servers_with_marker(self):
# Create a server.
server = self._build_minimal_create_server_request(
'test_list_deleted_servers_with_marker',
image_uuid=fake_image.get_valid_image_id())
server = self._build_server()
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')
# Now delete the server and wait for it to be gone.

View File

@ -49,10 +49,7 @@ class TestInstanceActionBuryInCell0(test.TestCase,
no compute service and result in the instance being created (buried)
in cell0.
"""
server = self._build_minimal_create_server_request(
'test_bury_in_cell0_instance_create_action',
image_uuid=fake_image.get_valid_image_id(),
networks='none')
server = self._build_server(networks='none')
# Use microversion 2.37 to create a server without any networking.
with utils.temporary_mutation(self.api, microversion='2.37'):
server = self.api.post_server({'server': server})

View File

@ -214,9 +214,9 @@ class AggregateRequestFiltersTest(
end_status='ACTIVE'):
flavor_id = flavor_id or self.flavors[0]['id']
image_uuid = image_id or '155d900f-4e14-4e4c-a73d-069cbf4541e6'
server_req = self._build_minimal_create_server_request(
'test-instance', flavor_id=flavor_id,
server_req = self._build_server(
image_uuid=image_uuid,
flavor_id=flavor_id,
networks='none', az=az)
created_server = self.api.post_server({'server': server_req})
@ -854,6 +854,9 @@ class TestAggregateMultiTenancyIsolationFilter(
self.start_service('conductor')
self.admin_api = self.useFixture(
nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
self.api = self.useFixture(
nova_fixtures.OSAPIFixture(api_version='v2.1',
project_id=uuids.non_admin)).api
# Add the AggregateMultiTenancyIsolation to the list of enabled
# filters since it is not enabled by default.
enabled_filters = CONF.filter_scheduler.enabled_filters
@ -874,9 +877,6 @@ class TestAggregateMultiTenancyIsolationFilter(
aggregate
"""
# Create a tenant-isolated aggregate for the non-admin user.
self.api = self.useFixture(
nova_fixtures.OSAPIFixture(api_version='v2.1',
project_id=uuids.non_admin)).api
agg_id = self.admin_api.post_aggregate(
{'aggregate': {'name': 'non_admin_agg'}})['id']
meta_req = {'set_metadata': {
@ -897,11 +897,9 @@ class TestAggregateMultiTenancyIsolationFilter(
self.stub_out(
'nova.scheduler.host_manager.HostManager.get_filtered_hosts',
spy_get_filtered_hosts)
# Create a server for the admin - should only have one host candidate.
server_req = self._build_minimal_create_server_request(
'test_aggregate_multitenancy_isolation_filter-admin',
networks='none') # requires microversion 2.37
server_req = {'server': server_req}
server_req = {'server': self._build_server(networks='none')}
with utils.temporary_mutation(self.admin_api, microversion='2.37'):
server = self.admin_api.post_server(server_req)
server = self._wait_for_state_change(server, 'ACTIVE')
@ -913,10 +911,7 @@ class TestAggregateMultiTenancyIsolationFilter(
# candidate. We don't assert that the non-admin tenant server shows
# up on host2 because the other host, which is not isolated to the
# aggregate, is still a candidate.
server_req = self._build_minimal_create_server_request(
'test_aggregate_multitenancy_isolation_filter-user',
networks='none') # requires microversion 2.37
server_req = {'server': server_req}
server_req = {'server': self._build_server(networks='none')}
with utils.temporary_mutation(self.api, microversion='2.37'):
server = self.api.post_server(server_req)
self._wait_for_state_change(server, 'ACTIVE')
@ -1023,8 +1018,8 @@ class AggregateMultiTenancyIsolationColdMigrateTest(
isolated host aggregate via the AggregateMultiTenancyIsolation filter.
"""
img = nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID
server_req_body = self._build_minimal_create_server_request(
'test_cold_migrate_server', image_uuid=img,
server_req_body = self._build_server(
image_uuid=img,
networks='none')
server = self.api.post_server({'server': server_req_body})
server = self._wait_for_state_change(server, 'ACTIVE')

View File

@ -73,9 +73,9 @@ class TestAvailabilityZoneScheduling(
def _create_server(self, name):
# Create a server, it doesn't matter which host it ends up in.
server_body = self._build_minimal_create_server_request(
name, image_uuid=fake_image.get_valid_image_id(),
flavor_id=self.flavor1, networks='none')
server_body = self._build_server(
flavor_id=self.flavor1,
networks='none')
server = self.api.post_server({'server': server_body})
server = self._wait_for_state_change(server, 'ACTIVE')
original_host = server['OS-EXT-SRV-ATTR:host']

View File

@ -68,8 +68,7 @@ class BootFromVolumeTest(test_servers.ServersTestBase):
# Boot a server with a flavor disk larger than the available local
# disk. It should succeed for boot from volume.
server = self._build_server(flavor_id)
server['imageRef'] = ''
server = self._build_server(image_uuid='', flavor_id=flavor_id)
volume_uuid = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
bdm = {'boot_index': 0,
'uuid': volume_uuid,
@ -158,8 +157,7 @@ class BootFromVolumeTest(test_servers.ServersTestBase):
a user cannot boot from image, they must boot from volume.
"""
self.flags(max_local_block_devices=0)
server = self._build_minimal_create_server_request(
'test_max_local_block_devices_0_force_bfv')
server = self._build_server()
ex = self.assertRaises(api_client.OpenStackApiException,
self.admin_api.post_server,
{'server': server})
@ -200,8 +198,7 @@ class BootFromVolumeLargeRequestTest(test.TestCase,
# _handle_kernel_and_ramdisk()
image1 = 'a2459075-d96c-40d5-893e-577ff92e721c'
image2 = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
server = self._build_minimal_create_server_request(
'test_boot_from_volume_10_servers_255_volumes_2_images')
server = self._build_server()
server.pop('imageRef')
server['min_count'] = 10
bdms = []

View File

@ -59,9 +59,8 @@ class CrossAZAttachTestCase(test.TestCase,
volume.
"""
self.flags(cross_az_attach=False, group='cinder')
server = self._build_minimal_create_server_request(
'test_cross_az_attach_false_boot_from_volume_no_az_specified')
del server['imageRef'] # Do not need imageRef for boot from volume.
# Do not need imageRef for boot from volume.
server = self._build_server(image_uuid='')
server['block_device_mapping_v2'] = [{
'source_type': 'volume',
'destination_type': 'volume',
@ -80,8 +79,7 @@ class CrossAZAttachTestCase(test.TestCase,
in the zone specified by the non-root data volume.
"""
self.flags(cross_az_attach=False, group='cinder')
server = self._build_minimal_create_server_request(
'test_cross_az_attach_false_data_volume_no_az_specified')
server = self._build_server()
# Note that we use the legacy block_device_mapping parameter rather
# than block_device_mapping_v2 because that will create an implicit
# source_type=image, destination_type=local, boot_index=0,
@ -103,9 +101,8 @@ class CrossAZAttachTestCase(test.TestCase,
"""
self.flags(cross_az_attach=False, group='cinder')
self.flags(default_schedule_zone=self.az)
server = self._build_minimal_create_server_request(
'test_cross_az_attach_false_boot_from_volume_default_zone_match')
del server['imageRef'] # Do not need imageRef for boot from volume.
# Do not need imageRef for boot from volume.
server = self._build_server(image_uuid='')
server['block_device_mapping_v2'] = [{
'source_type': 'volume',
'destination_type': 'volume',
@ -122,10 +119,8 @@ class CrossAZAttachTestCase(test.TestCase,
error response.
"""
self.flags(cross_az_attach=False, group='cinder')
server = self._build_minimal_create_server_request(
'test_cross_az_attach_false_bfv_az_specified_mismatch',
az='london')
del server['imageRef'] # Do not need imageRef for boot from volume.
# Do not need imageRef for boot from volume.
server = self._build_server(image_uuid='', az='london')
server['block_device_mapping_v2'] = [{
'source_type': 'volume',
'destination_type': 'volume',
@ -146,8 +141,7 @@ class CrossAZAttachTestCase(test.TestCase,
a noop if there are no volumes in the server create request.
"""
self.flags(cross_az_attach=False, group='cinder')
server = self._build_minimal_create_server_request(
'test_cross_az_attach_false_no_volumes', az=self.az)
server = self._build_server(az=self.az)
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])

View File

@ -33,7 +33,6 @@ from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import cast_as_call
from nova.tests.unit import fake_notifier
from nova.tests.unit.image import fake as fake_image
from nova import utils
CONF = conf.CONF
@ -150,12 +149,8 @@ class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
'port': self.neutron.port_1['id'],
'tag': 'private'
}]
image_uuid = fake_image.get_valid_image_id()
server = self._build_minimal_create_server_request(
'test_cross_cell_resize',
image_uuid=image_uuid,
flavor_id=flavor['id'],
networks=networks)
server = self._build_server(
flavor_id=flavor['id'], networks=networks)
# Put a tag on the server to make sure that survives the resize.
server['tags'] = ['test']
if volume_backed:

View File

@ -66,9 +66,7 @@ class TestNeutronExternalNetworks(test.TestCase,
ComputeManager._build_resources method and abort the build which is
not how ExternalNetworkAttachForbidden is really handled in reality.
"""
server = self._build_minimal_create_server_request(
'test_non_admin_create_server_on_external_network',
image_uuid=fake_image.get_valid_image_id(),
server = self._build_server(
networks=[{'uuid': uuids.external_network}])
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ERROR')

View File

@ -21,7 +21,7 @@ class ImagesTest(test_servers.ServersTestBase):
def test_create_images_negative_invalid_state(self):
# Create server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({"server": server})
server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
@ -66,7 +66,7 @@ class ImagesTest(test_servers.ServersTestBase):
project B is the owner of the image.
"""
# Create a server using the tenant user project.
server = self._build_minimal_create_server_request()
server = self._build_server()
server = self.api.post_server({"server": server})
server = self._wait_for_state_change(server, 'ACTIVE')

View File

@ -29,7 +29,7 @@ class InstanceActionsTestV2(test_servers.ServersTestBase):
"""
# Create a server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({'server': server})
self.assertTrue(created_server['id'])
created_server_id = created_server['id']

View File

@ -59,8 +59,7 @@ class JsonFilterTestCase(integrated_helpers.ProviderUsageBaseTestCase):
# for host2 to make sure the filter works. If not, because of the
# custom HostNameWeigher, host1 would be chosen.
query = jsonutils.dumps(['=', '$hypervisor_hostname', 'host2'])
server = self._build_minimal_create_server_request(
'test_filter_on_hypervisor_hostname')
server = self._build_server()
request = {'server': server, 'os:scheduler_hints': {'query': query}}
server = self.api.post_server(request)
server = self._wait_for_state_change(server, 'ACTIVE')

View File

@ -49,7 +49,7 @@ class LegacyV2CompatibleTestBase(test_servers.ServersTestBase):
self.assertNotIn('type', response.body["keypair"])
def test_request_with_pattern_properties_check(self):
server = self._build_minimal_create_server_request()
server = self._build_server()
post = {'server': server}
created_server = self.api.post_server(post)
self._wait_for_state_change(created_server, 'ACTIVE')
@ -58,7 +58,7 @@ class LegacyV2CompatibleTestBase(test_servers.ServersTestBase):
self.assertEqual(response, {'a': 'b'})
def test_request_with_pattern_properties_with_avoid_metadata(self):
server = self._build_minimal_create_server_request()
server = self._build_server()
post = {'server': server}
created_server = self.api.post_server(post)
exc = self.assertRaises(client.OpenStackApiException,

View File

@ -40,8 +40,8 @@ class TestMultiattachVolumes(integrated_helpers._IntegratedTestBase):
for it to be ACTIVE, and then attaches the volume to another server.
"""
volume_id = nova_fixtures.CinderFixture.MULTIATTACH_VOL
create_req = self._build_server(flavor_id='1', image='')
create_req['networks'] = 'none'
create_req = self._build_server(
image_uuid='', flavor_id='1', networks='none')
create_req['block_device_mapping_v2'] = [{
'uuid': volume_id,
'source_type': 'volume',
@ -61,8 +61,8 @@ class TestMultiattachVolumes(integrated_helpers._IntegratedTestBase):
# Now create a second server and attach the same volume to that.
create_req = self._build_server(
flavor_id='1', image='155d900f-4e14-4e4c-a73d-069cbf4541e6')
create_req['networks'] = 'none'
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', flavor_id='1',
networks='none')
server2 = self.api.post_server({'server': create_req})
self._wait_for_state_change(server2, 'ACTIVE')
# Attach the volume to the second server.

View File

@ -413,9 +413,9 @@ class TestNovaManagePlacementHealAllocations(
:returns: two-item tuple of the server and the compute node resource
provider uuid
"""
server_req = self._build_minimal_create_server_request(
'some-server', flavor_id=flavor['id'],
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=flavor['id'],
networks='none')
server_req['availability_zone'] = 'nova:%s' % hostname
if volume_backed:
@ -1415,7 +1415,7 @@ class TestDBArchiveDeletedRows(integrated_helpers._IntegratedTestBase):
{'name': 'test_archive_instance_group_members',
'policies': ['affinity']})
# Create two servers in the group.
server = self._build_minimal_create_server_request()
server = self._build_server()
server['min_count'] = 2
server_req = {
'server': server, 'os:scheduler_hints': {'group': group['id']}}
@ -1477,20 +1477,17 @@ class TestDBArchiveDeletedRowsMultiCell(integrated_helpers.InstanceHelperMixin,
admin_context = context.get_admin_context(read_deleted='yes')
# Boot a server to cell1
server_ids = {}
server = self._build_minimal_create_server_request(
'cell1-server', az='nova:host1')
server = self._build_server(az='nova:host1')
created_server = self.api.post_server({'server': server})
self._wait_for_state_change(created_server, 'ACTIVE')
server_ids['cell1'] = created_server['id']
# Boot a server to cell2
server = self._build_minimal_create_server_request(
'cell2-server', az='nova:host2')
server = self._build_server(az='nova:host2')
created_server = self.api.post_server({'server': server})
self._wait_for_state_change(created_server, 'ACTIVE')
server_ids['cell2'] = created_server['id']
# Boot a server to cell0 (cause ERROR state prior to schedule)
server = self._build_minimal_create_server_request(
'cell0-server')
server = self._build_server()
# Flavor m1.xlarge cannot be fulfilled
server['flavorRef'] = 'http://fake.server/5'
created_server = self.api.post_server({'server': server})

View File

@ -68,8 +68,7 @@ class HostStatusPolicyTestCase(test.TestCase,
if networks:
# Starting with microversion 2.37 the networks field is required.
kwargs['networks'] = networks
server = self._build_minimal_create_server_request(
'test_host_status_unknown_only', **kwargs)
server = self._build_server(**kwargs)
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')
return server

View File

@ -50,8 +50,7 @@ class MultiCellSchedulerTestCase(test.TestCase,
self.addCleanup(fake_image.FakeImageService_reset)
def _test_create_and_migrate(self, expected_status, az=None):
server = self._build_minimal_create_server_request('some-server',
az=az)
server = self._build_server(az=az)
post = {'server': server}
# If forcing the server onto a host we have to use the admin API.
api = self.admin_api if az else self.api

View File

@ -26,10 +26,8 @@ class ServerExternalEventsTestV276(
super(ServerExternalEventsTestV276, self).setUp()
self.compute = self.start_service('compute', host='compute')
flavors = self.api.get_flavors()
server_req = self._build_minimal_create_server_request(
"some-server", flavor_id=flavors[0]["id"],
image_uuid="155d900f-4e14-4e4c-a73d-069cbf4541e6",
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
created_server = self.api.post_server({'server': server_req})
self.server = self._wait_for_state_change(created_server, 'ACTIVE')

View File

@ -54,9 +54,7 @@ class ServerFaultTestCase(test.TestCase,
admin user.
"""
# Create the server with the non-admin user.
server = self._build_minimal_create_server_request(
'test_server_fault_non_nova_exception',
image_uuid=fake_image.get_valid_image_id(),
server = self._build_server(
networks=[{'port': nova_fixtures.NeutronFixture.port_1['id']}])
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')

View File

@ -89,13 +89,11 @@ class ServerGroupTestBase(test.TestCase,
def _boot_a_server_to_group(self, group,
expected_status='ACTIVE', flavor=None,
az=None):
server = self._build_minimal_create_server_request(
'some-server',
image_uuid='a2459075-d96c-40d5-893e-577ff92e721c', networks=[],
server = self._build_server(
image_uuid='a2459075-d96c-40d5-893e-577ff92e721c',
flavor_id=flavor['id'] if flavor else None,
networks=[],
az=az)
if flavor:
server['flavorRef'] = ('http://fake.server/%s'
% flavor['id'])
post = {'server': server,
'os:scheduler_hints': {'group': group['id']}}
created_server = self.api.post_server(post)
@ -968,10 +966,8 @@ class TestAntiAffinityLiveMigration(test.TestCase,
{'name': 'test_serial_no_valid_host_then_pass_with_third_host',
'policies': ['anti-affinity']})
servers = []
for x in range(2):
server = self._build_minimal_create_server_request(
'test_serial_no_valid_host_then_pass_with_third_host-%d' % x,
networks='none')
for _ in range(2):
server = self._build_server(networks='none')
# Add the group hint so the server is created in our group.
server_req = {
'server': server,

View File

@ -131,7 +131,7 @@ class ServersTest(ServersTestBase):
self.stub_out('nova.virt.fake.FakeDriver.spawn', throw_error)
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
@ -156,15 +156,13 @@ class ServersTest(ServersTestBase):
raw_image = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
vhd_image = 'a440c04b-79fa-479c-bed1-0b816eaec379'
server = self._build_minimal_create_server_request(
image_uuid=vhd_image)
server = self._build_server(image_uuid=vhd_image)
server = self.api.post_server({'server': server})
server = self.api.get_server(server['id'])
errored_server = self._wait_for_state_change(server, 'ERROR')
self.assertIn('No valid host', errored_server['fault']['message'])
server = self._build_minimal_create_server_request(
image_uuid=raw_image)
server = self._build_server(image_uuid=raw_image)
server = self.api.post_server({'server': server})
server = self.api.get_server(server['id'])
created_server = self._wait_for_state_change(server, 'ACTIVE')
@ -186,7 +184,7 @@ class ServersTest(ServersTestBase):
self.stub_out('nova.virt.fake.FakeDriver.spawn', throw_error)
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
@ -222,7 +220,7 @@ class ServersTest(ServersTestBase):
# Create server
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
good_server = self._build_server()
post = {'server': server}
@ -295,7 +293,7 @@ class ServersTest(ServersTestBase):
self.flags(reclaim_instance_interval=1)
# Create server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server)
@ -327,7 +325,7 @@ class ServersTest(ServersTestBase):
self.flags(reclaim_instance_interval=3600)
# Create server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server)
@ -357,7 +355,7 @@ class ServersTest(ServersTestBase):
self.flags(reclaim_instance_interval=3600)
# Create server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server1 = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server1)
@ -375,7 +373,7 @@ class ServersTest(ServersTestBase):
'SOFT_DELETED')
# Create a second server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server2 = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server2)
@ -396,7 +394,7 @@ class ServersTest(ServersTestBase):
self.flags(reclaim_instance_interval=3600)
# Create server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server)
@ -424,7 +422,7 @@ class ServersTest(ServersTestBase):
# Creates a server with metadata.
# Build the server data gradually, checking errors along the way
server = self._build_minimal_create_server_request()
server = self._build_server()
metadata = {}
for i in range(30):
@ -463,7 +461,7 @@ class ServersTest(ServersTestBase):
def test_server_metadata_actions_negative_invalid_state(self):
# Create server with metadata
server = self._build_minimal_create_server_request()
server = self._build_server()
metadata = {'key_1': 'value_1'}
@ -504,7 +502,7 @@ class ServersTest(ServersTestBase):
# Rebuild a server with metadata.
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
server = self._build_server()
server_post = {'server': server}
metadata = {}
@ -569,7 +567,7 @@ class ServersTest(ServersTestBase):
# Test building and renaming a server.
# Create a server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server)
server_id = created_server['id']
@ -590,7 +588,7 @@ class ServersTest(ServersTestBase):
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
server = self._build_minimal_create_server_request()
server = self._build_server()
server[self._min_count_parameter] = 2
server[self._return_resv_id_parameter] = True
post = {'server': server}
@ -602,7 +600,7 @@ class ServersTest(ServersTestBase):
self.assertRegex(reservation_id, 'r-[0-9a-zA-Z]{8}')
# Create 1 more server, which should not return a reservation_id
server = self._build_minimal_create_server_request()
server = self._build_server()
post = {'server': server}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
@ -642,7 +640,7 @@ class ServersTest(ServersTestBase):
})
# Create server
server = self._build_minimal_create_server_request()
server = self._build_server()
server['personality'] = personality
post = {'server': server}
@ -663,7 +661,7 @@ class ServersTest(ServersTestBase):
def test_stop_start_servers_negative_invalid_state(self):
# Create server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
@ -703,7 +701,7 @@ class ServersTest(ServersTestBase):
def test_revert_resized_server_negative_invalid_state(self):
# Create server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
@ -728,7 +726,7 @@ class ServersTest(ServersTestBase):
self.flags(allow_resize_to_same_host=True)
# Create server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
@ -756,7 +754,7 @@ class ServersTest(ServersTestBase):
def test_confirm_resized_server_negative_invalid_state(self):
# Create server
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
@ -780,7 +778,7 @@ class ServersTest(ServersTestBase):
self.flags(cores=1, group='quota')
self.flags(ram=512, group='quota')
# Create server with default flavor, 1 core, 512 ram
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
@ -796,7 +794,7 @@ class ServersTest(ServersTestBase):
def test_attach_vol_maximum_disk_devices_exceeded(self):
self.useFixture(nova_fixtures.CinderFixture(self))
server = self._build_minimal_create_server_request()
server = self._build_server()
created_server = self.api.post_server({"server": server})
server_id = created_server['id']
self._wait_for_state_change(created_server, 'ACTIVE')
@ -827,7 +825,7 @@ class ServersTestV219(ServersTestBase):
api_major_version = 'v2.1'
def _create_server(self, set_desc = True, desc = None):
server = self._build_minimal_create_server_request()
server = self._build_server()
if set_desc:
server['description'] = desc
post = {'server': server}
@ -1037,7 +1035,7 @@ class ServerTestV220(ServersTestBase):
self.ctxt = context.get_admin_context()
def _create_server(self):
server = self._build_minimal_create_server_request()
server = self._build_server()
post = {'server': server}
response = self.api.api_post('/servers', post).body
return (server, response['server'])
@ -3048,9 +3046,9 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
def _server_created_with_host(self):
hostname = self.compute1.host
server_req = self._build_minimal_create_server_request(
"some-server", flavor_id=self.flavor1["id"],
server_req = self._build_server(
image_uuid="155d900f-4e14-4e4c-a73d-069cbf4541e6",
flavor_id=self.flavor1["id"],
networks='none')
server_req['host'] = hostname
@ -3174,10 +3172,10 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
"""Test that when a resize attempt fails, the retry comes from the
supplied host_list, and does not call the scheduler.
"""
server_req = self._build_minimal_create_server_request(
"some-server", flavor_id=self.flavor1["id"],
image_uuid="155d900f-4e14-4e4c-a73d-069cbf4541e6",
networks='none')
server_req = self._build_server(
image_uuid="155d900f-4e14-4e4c-a73d-069cbf4541e6",
flavor_id=self.flavor1["id"],
networks='none')
created_server = self.api.post_server({"server": server_req})
server = self._wait_for_state_change(created_server,
@ -3374,10 +3372,7 @@ class PollUnconfirmedResizesTest(integrated_helpers.ProviderUsageBaseTestCase):
VERIFY_RESIZE status and the _poll_unconfirmed_resizes periodic task
runs the source compute service goes down so the confirm task fails.
"""
server = self._build_minimal_create_server_request(
name='test_source_host_down_during_confirm',
image_uuid=nova.tests.unit.image.fake.get_valid_image_id(),
networks='none', host='host1')
server = self._build_server(networks='none', host='host1')
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')
# Cold migrate the server to the other host.
@ -3567,10 +3562,10 @@ class ServerRescheduleTests(integrated_helpers.ProviderUsageBaseTestCase):
from the source node when the build fails on that node and is
rescheduled to another node.
"""
server_req = self._build_minimal_create_server_request(
'some-server', flavor_id=self.flavor1['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=self.flavor1['id'],
networks='none')
created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(created_server, 'ACTIVE')
@ -3597,9 +3592,9 @@ class ServerRescheduleTests(integrated_helpers.ProviderUsageBaseTestCase):
then the server is put into ERROR state properly.
"""
server_req = self._build_minimal_create_server_request(
'some-server', flavor_id=self.flavor1['id'],
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=self.flavor1['id'],
networks='none')
orig_claim = utils.claim_resources
@ -3655,10 +3650,10 @@ class ServerBuildAbortTests(integrated_helpers.ProviderUsageBaseTestCase):
"""Tests that allocations, created by the scheduler, are cleaned
from the source node when the build is aborted on that node.
"""
server_req = self._build_minimal_create_server_request(
'some-server', flavor_id=self.flavor1['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=self.flavor1['id'],
networks='none')
created_server = self.api.post_server({'server': server_req})
self._wait_for_state_change(created_server, 'ERROR')
@ -3716,9 +3711,9 @@ class ServerUnshelveSpawnFailTests(
'MEMORY_MB': 0,
'DISK_GB': 0}, rp_uuid)
server_req = self._build_minimal_create_server_request(
'unshelve-spawn-fail', flavor_id=self.flavor1['id'],
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=self.flavor1['id'],
networks='none')
server = self.api.post_server({'server': server_req})
@ -3917,12 +3912,9 @@ class VolumeBackedServerTest(integrated_helpers.ProviderUsageBaseTestCase):
return body['flavor']['id']
def _create_server(self):
with nova.utils.temporary_mutation(self.api, microversion='2.35'):
image_id = self.api.get_images()[0]['id']
server_req = self._build_minimal_create_server_request(
'trait-based-server',
image_uuid=image_id,
flavor_id=self.flavor_id, networks='none')
server_req = self._build_server(
flavor_id=self.flavor_id,
networks='none')
server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(server, 'ACTIVE')
return server
@ -4075,10 +4067,10 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase):
:return: create server response
"""
server_req = self._build_minimal_create_server_request(
'trait-based-server',
server_req = self._build_server(
image_uuid=image_id,
flavor_id=flavor_id, networks='none')
flavor_id=flavor_id,
networks='none')
return self.api.post_server({'server': server_req})
def _create_volume_backed_server_with_traits(self, flavor_id, volume_id):
@ -4530,7 +4522,7 @@ class ServerTestV256Common(ServersTestBase):
self.start_service('compute', host=host)
def _create_server(self, target_host=None):
server = self._build_minimal_create_server_request(
server = self._build_server(
image_uuid='a2459075-d96c-40d5-893e-577ff92e721c')
server.update({'networks': 'auto'})
if target_host is not None:
@ -4636,9 +4628,9 @@ class ConsumerGenerationConflictTest(
self.compute2 = self._start_compute('compute2')
def test_create_server_fails_as_placement_reports_consumer_conflict(self):
server_req = self._build_minimal_create_server_request(
'some-server', flavor_id=self.flavor['id'],
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=self.flavor['id'],
networks='none')
# We cannot pre-create a consumer with the uuid of the instance created
@ -5630,10 +5622,10 @@ class PortResourceRequestBasedSchedulingTestBase(
compute_allocations)
def _create_server(self, flavor, networks, host=None):
server_req = self._build_minimal_create_server_request(
'bandwidth-aware-server',
server_req = self._build_server(
image_uuid='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
flavor_id=flavor['id'], networks=networks,
flavor_id=flavor['id'],
networks=networks,
host=host)
return self.api.post_server({'server': server_req})

View File

@ -418,9 +418,9 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
self.assertNotIn('FOO', traits)
def _create_instance(self, flavor):
server_req = self._build_minimal_create_server_request(
'some-server', flavor_id=flavor['id'],
server_req = self._build_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=flavor['id'],
networks='none', az='nova:host1')
inst = self.api.post_server({'server': server_req})
return self._wait_for_state_change(inst, 'ACTIVE')

View File

@ -29,11 +29,8 @@ class InterfaceFullstack(test_servers.ServersTestBase):
def test_detach_interface_negative_invalid_state(self):
# Create server with network
image = self.api.get_images()[0]['id']
post = {"server": {"name": "test", "flavorRef": "1",
"imageRef": image,
"networks": [{"uuid": "3cb9bc59-5699-4588-a4b1-b87f96708bc6"}]}}
created_server = self.api.post_server(post)
created_server = self.api.post_server({'server': self._build_server(
networks=[{'uuid': '3cb9bc59-5699-4588-a4b1-b87f96708bc6'}])})
created_server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'ACTIVE')

View File

@ -23,6 +23,7 @@ from nova.tests.unit import policy_fixture
LOG = logging.getLogger(__name__)
# TODO(stephenfin): Add InstanceHelperMixin
class SecgroupsFullstack(testscenarios.WithScenarios, test.TestCase):
"""Tests for security groups

View File

@ -353,10 +353,9 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase(
self.policy_fixture.set_rules({
servers_policies.ZERO_DISK_FLAVOR: base_policies.RULE_ADMIN_API},
overwrite=False)
server_req = self._build_minimal_create_server_request(
'test_create_image_backed_server_with_zero_disk_fails',
fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
self.zero_disk_flavor['id'])
server_req = self._build_server(
image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
flavor_id=self.zero_disk_flavor['id'])
ex = self.assertRaises(api_client.OpenStackApiException,
self.api.post_server, {'server': server_req})
self.assertIn('Only volume-backed servers are allowed for flavors '
@ -374,8 +373,7 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase(
self.useFixture(nova_fixtures.CinderFixture(self))
self.start_service('conductor')
self.start_service('scheduler')
server_req = self._build_minimal_create_server_request(
'test_create_volume_backed_server_with_zero_disk_allowed',
server_req = self._build_server(
flavor_id=self.zero_disk_flavor['id'])
server_req.pop('imageRef', None)
server_req['block_device_mapping_v2'] = [{
@ -400,11 +398,8 @@ class ResizeCheckInstanceHostTestCase(
# Start up a compute on which to create a server.
self._start_compute('host1')
# Create a server on host1.
server = self._build_minimal_create_server_request(
name='test_resize_source_compute_validation',
image_uuid=fake_image.get_valid_image_id(),
flavor_id=flavors[0]['id'],
networks='none')
server = self._build_server(
flavor_id=flavors[0]['id'], networks='none')
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')
# Check if we're cold migrating.

View File

@ -19,7 +19,6 @@ from nova import exception
from nova import objects
from nova.tests.functional.api import client as api_client
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as fake_image
from nova import utils
@ -323,9 +322,7 @@ class ComputeStatusFilterTest(integrated_helpers.ProviderUsageBaseTestCase):
# Try creating a server which should fail because nothing is available.
networks = [{'port': self.neutron.port_1['id']}]
server_req = self._build_minimal_create_server_request(
'test_compute_status_filter',
image_uuid=fake_image.get_valid_image_id(), networks=networks)
server_req = self._build_server(networks=networks)
server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(server, 'ERROR')
# There should be a NoValidHost fault recorded.