Files
tempest/tempest/api/compute/admin/test_create_server.py
zhufl c88e0a1fb8 Add same extra specs as self.flavor_ref for created new flavor
In some environments, some extra specs must be set for the flavors
that will be used to create server, e.g., flavor extra specs
{"hw:mem_page_size": "large"} needs to be set if all compute nodes use
huge page and the system demmands explicit huge page filtering, in
this case if no {"hw:mem_page_size": "large"} extra specs is set for
the flavor, when creating server we will get
"BuildErrorException: Server 921aa25c-d182-4b53-88d1-90e003b17212 failed
                     to build and is in ERROR status
 Details: {'message': 'No valid host was found. There are not enough hosts
          available.', 'code': 500, 'created': '2017-12-14T05:33:52Z'}"

Another usecase is if there are many compute nodes, e.g., more than 20
compute nodes, we may want to limit the tempest execution on part of them
(maybe not all nodes are suitable for the test), so we can add
{"aggregate_instance_extra_specs:availability_zone": "tempest-zone"}
for the predefined flavor for it to choose only specified compute nodes.

Usually we will set the correct extra specs for the flavors that will
be filled as CONF.compute.flavor_ref and CONF.compute.flavor_ref_alt,
so it's ok to use these flavors to create servers, but if we need to
create a new flavor in testcases to create server, and if no extra specs
is set for it, creating server may fail.

So this is to add same extra specs as self.flavor_ref for the created
flavor that will be used to create server, and we don't need to do this
for those created flavors that won't be used to create servers.

Change-Id: I7dea2ef41bd686b544ead7f1ba0587720c3a3c44
Closes-bug: #1742132
2018-01-09 12:22:03 +00:00

134 lines
5.4 KiB
Python

# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersWithSpecificFlavorTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersWithSpecificFlavorTestJSON, cls).setup_clients()
cls.client = cls.servers_client
@decorators.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
flavor_base = self.flavors_client.show_flavor(
self.flavor_ref)['flavor']
def create_flavor_with_ephemeral(ephem_disk):
name = 'flavor_with_ephemeral_%s' % ephem_disk
flavor_name = data_utils.rand_name(name)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
# Create a flavor with ephemeral disk
flavor = self.create_flavor(name=flavor_name, ram=ram, vcpus=vcpus,
disk=disk, ephemeral=ephem_disk)
# Set extra specs same as self.flavor_ref for the created flavor,
# because the environment may need some special extra specs to
# create server which should have been contained in
# self.flavor_ref.
extra_spec_keys = \
self.admin_flavors_client.list_flavor_extra_specs(
self.flavor_ref)['extra_specs']
if extra_spec_keys:
self.admin_flavors_client.set_flavor_extra_spec(
flavor['id'], **extra_spec_keys)
return flavor['id']
flavor_with_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=1)
flavor_no_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=0)
admin_pass = self.image_ssh_password
validation_resources = self.get_test_validation_resources(
self.os_primary)
server_no_eph_disk = self.create_test_server(
validatable=True,
validation_resources=validation_resources,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_no_eph_disk_id)
self.addCleanup(waiters.wait_for_server_termination,
self.servers_client, server_no_eph_disk['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.servers_client.delete_server,
server_no_eph_disk['id'])
# Get partition number of server without ephemeral disk.
server_no_eph_disk = self.client.show_server(
server_no_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_no_eph_disk,
validation_resources),
self.ssh_user,
admin_pass,
validation_resources['keypair']['private_key'],
server=server_no_eph_disk,
servers_client=self.client)
disks_num = len(linux_client.get_disks().split('\n'))
# Explicit server deletion necessary for Juno compatibility
self.client.delete_server(server_no_eph_disk['id'])
server_with_eph_disk = self.create_test_server(
validatable=True,
validation_resources=validation_resources,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id)
self.addCleanup(waiters.wait_for_server_termination,
self.servers_client, server_with_eph_disk['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.servers_client.delete_server,
server_with_eph_disk['id'])
server_with_eph_disk = self.client.show_server(
server_with_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_with_eph_disk,
validation_resources),
self.ssh_user,
admin_pass,
validation_resources['keypair']['private_key'],
server=server_with_eph_disk,
servers_client=self.client)
disks_num_eph = len(linux_client.get_disks().split('\n'))
self.assertEqual(disks_num + 1, disks_num_eph)