OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

8469 lines
361 KiB

# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import datetime
import time
import zlib
from keystoneauth1 import adapter
import mock
from neutronclient.common import exceptions as neutron_exception
import os_resource_classes as orc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import base64
from oslo_serialization import jsonutils
from oslo_utils import fixture as osloutils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.compute import api as compute_api
from nova.compute import instance_actions
from nova.compute import manager as compute_manager
from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova import exception
from nova.network import constants
from nova.network import neutron as neutronapi
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.policies import base as base_policies
from nova.policies import servers as servers_policies
from nova.scheduler import utils
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_requests
from nova.tests.unit.objects import test_instance_info_cache
from nova import utils as nova_utils
from nova.virt import fake
from nova import volume
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ServersTest(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2'
def test_get_servers(self):
# Simple check that listing servers works.
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s", server)
def _get_node_build_failures(self):
ctxt = context.get_admin_context()
computes = objects.ComputeNodeList.get_all(ctxt)
return {
node.hypervisor_hostname: int(node.stats.get('failed_builds', 0))
for node in computes}
def test_create_server_with_error(self):
# Create a server which will enter error state.
def throw_error(*args, **kwargs):
raise exception.BuildAbortException(reason='',
instance_uuid='fake')
self.stub_out('nova.virt.fake.FakeDriver.spawn', throw_error)
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'ERROR')
# Delete the server
self._delete_server(created_server)
# We should have no (persisted) build failures until we update
# resources, after which we should have one
self.assertEqual([0], list(self._get_node_build_failures().values()))
# BuildAbortException will not trigger a reschedule and the build
# failure update is the last step in the compute manager after
# instance state setting, fault recording and notification sending. So
# we have no other way than simply wait to ensure the node build
# failure counter updated before we assert it.
def failed_counter_updated():
self._run_periodics()
self.assertEqual(
[1], list(self._get_node_build_failures().values()))
self._wait_for_assert(failed_counter_updated)
def test_create_server_with_image_type_filter(self):
self.flags(query_placement_for_image_type_support=True,
group='scheduler')
raw_image = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
vhd_image = 'a440c04b-79fa-479c-bed1-0b816eaec379'
server = self._build_server(image_uuid=vhd_image)
server = self.api.post_server({'server': server})
server = self.api.get_server(server['id'])
errored_server = self._wait_for_state_change(server, 'ERROR')
self.assertIn('No valid host', errored_server['fault']['message'])
server = self._build_server(image_uuid=raw_image)
server = self.api.post_server({'server': server})
server = self.api.get_server(server['id'])
created_server = self._wait_for_state_change(server, 'ACTIVE')
# Delete the server
self._delete_server(created_server)
def _test_create_server_with_error_with_retries(self):
# Create a server which will enter error state.
self._start_compute('host2')
fails = []
def throw_error(*args, **kwargs):
fails.append('one')
raise test.TestingException('Please retry me')
self.stub_out('nova.virt.fake.FakeDriver.spawn', throw_error)
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'ERROR')
# Delete the server
self._delete_server(created_server)
return len(fails)
def test_create_server_with_error_with_retries(self):
self.flags(max_attempts=2, group='scheduler')
fails = self._test_create_server_with_error_with_retries()
self.assertEqual(2, fails)
self._run_periodics()
self.assertEqual(
[1, 1], list(self._get_node_build_failures().values()))
def test_create_server_with_error_with_no_retries(self):
self.flags(max_attempts=1, group='scheduler')
fails = self._test_create_server_with_error_with_retries()
self.assertEqual(1, fails)
# The build failure update is the last step in build_and_run_instance
# in the compute manager after instance state setting, fault
# recording and notification sending. So we have no other way than
# simply wait to ensure the node build failure counter updated
# before we assert it.
def failed_counter_updated():
self._run_periodics()
self.assertEqual(
[0, 1], list(sorted(self._get_node_build_failures().values())))
self._wait_for_assert(failed_counter_updated)
def test_create_and_delete_server(self):
# Creates and deletes a server.
# Create server
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_server()
post = {'server': server}
# Without an imageRef, this throws 500.
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# With an invalid imageRef, this throws 500.
server['imageRef'] = uuids.fake
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Add a valid imageRef
server['imageRef'] = good_server.get('imageRef')
# Without flavorRef, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
server['flavorRef'] = good_server.get('flavorRef')
# Without a name, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Set a valid server name
server['name'] = good_server['name']
created_server = self.api.post_server(post)
LOG.debug("created_server: %s", created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertIn("image", server)
self.assertIn("flavor", server)
# Delete the server
self._delete_server(found_server)
def _force_reclaim(self):
# Make sure that compute manager thinks the instance is
# old enough to be expired
the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
timeutils.set_time_override(override_time=the_past)
self.addCleanup(timeutils.clear_time_override)
ctxt = context.get_admin_context()
self.compute._reclaim_queued_deletes(ctxt)
def test_deferred_delete(self):
# Creates, deletes and waits for server to be reclaimed.
self.flags(reclaim_instance_interval=1)
# Create server
server = self._build_server()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
# Cannot restore unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'restore': {}})
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server,
'SOFT_DELETED')
self._force_reclaim()
# Wait for real deletion
self._wait_until_deleted(found_server)
def test_deferred_delete_restore(self):
# Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=3600)
# Create server
server = self._build_server()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server,
'SOFT_DELETED')
# Restore server
self.api.post_server_action(created_server_id, {'restore': {}})
# Wait for server to become active again
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
def test_deferred_delete_restore_overquota(self):
# Test that a restore that would put the user over quota fails
self.flags(instances=1, group='quota')
# Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=3600)
# Create server
server = self._build_server()
created_server1 = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server1)
self.assertTrue(created_server1['id'])
created_server_id1 = created_server1['id']
# Wait for it to finish being created
found_server1 = self._wait_for_state_change(created_server1, 'ACTIVE')
# Delete the server
self.api.delete_server(created_server_id1)
# Wait for queued deletion
found_server1 = self._wait_for_state_change(found_server1,
'SOFT_DELETED')
# Create a second server
self._create_server()
# Try to restore the first server, it should fail
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
created_server_id1, {'restore': {}})
self.assertEqual(403, ex.response.status_code)
self.assertEqual('SOFT_DELETED', found_server1['status'])
def test_deferred_delete_force(self):
# Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=3600)
# Create server
server = self._build_server()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server,
'SOFT_DELETED')
# Force delete server
self.api.post_server_action(created_server_id, {'forceDelete': {}})
# Wait for real deletion
self._wait_until_deleted(found_server)
def test_create_server_with_metadata(self):
# Creates a server with metadata.
# Build the server data gradually, checking errors along the way
server = self._build_server()
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server['metadata'] = metadata
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s", created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers details list
servers = self.api.get_servers(detail=True)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Details do include metadata
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers summary list
servers = self.api.get_servers(detail=False)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Summary should not include metadata
self.assertFalse(found_server.get('metadata'))
# Cleanup
self._delete_server(found_server)
def test_server_metadata_actions_negative_invalid_state(self):
# Create server with metadata
server = self._build_server()
metadata = {'key_1': 'value_1'}
server['metadata'] = metadata
post = {'server': server}
created_server = self.api.post_server(post)
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
self.assertEqual(metadata, found_server.get('metadata'))
server_id = found_server['id']
# Change status from ACTIVE to SHELVED for negative test
self.flags(shelved_offload_time = -1)
self.api.post_server_action(server_id, {'shelve': {}})
found_server = self._wait_for_state_change(found_server, 'SHELVED')
metadata = {'key_2': 'value_2'}
# Update Metadata item in SHELVED (not ACTIVE, etc.)
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_metadata,
server_id, metadata)
self.assertEqual(409, ex.response.status_code)
self.assertEqual('SHELVED', found_server['status'])
# Delete Metadata item in SHELVED (not ACTIVE, etc.)
ex = self.assertRaises(client.OpenStackApiException,
self.api.delete_server_metadata,
server_id, 'key_1')
self.assertEqual(409, ex.response.status_code)
self.assertEqual('SHELVED', found_server['status'])
# Cleanup
self._delete_server(found_server)
def test_create_and_rebuild_server(self):
# Rebuild a server with metadata.
# create a server with initially has no metadata
server = self._build_server()
server_post = {'server': server}
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server_post['server']['metadata'] = metadata
created_server = self.api.post_server(server_post)
LOG.debug("created_server: %s", created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
created_server = self._wait_for_state_change(created_server, 'ACTIVE')
# rebuild the server with metadata and other server attributes
post = {}
post['rebuild'] = {
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'blah',
'accessIPv4': '172.19.0.2',
'accessIPv6': 'fe80::2',
'metadata': {'some': 'thing'},
}
post['rebuild'].update({
'accessIPv4': '172.19.0.2',
'accessIPv6': 'fe80::2',
})
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s", created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild']['imageRef'],
found_server.get('image')['id'])
self.assertEqual('172.19.0.2', found_server['accessIPv4'])
self.assertEqual('fe80::2', found_server['accessIPv6'])
# rebuild the server with empty metadata and nothing else
post = {}
post['rebuild'] = {
'imageRef': "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"metadata": {},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s", created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild']['imageRef'],
found_server.get('image')['id'])
self.assertEqual('172.19.0.2', found_server['accessIPv4'])
self.assertEqual('fe80::2', found_server['accessIPv6'])
# Cleanup
self._delete_server(found_server)
def test_rename_server(self):
# Test building and renaming a server.
# Create a server
server = self._build_server()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server)
server_id = created_server['id']
self.assertTrue(server_id)
# Rename the server to 'new-name'
self.api.put_server(server_id, {'server': {'name': 'new-name'}})
# Check the name of the server
created_server = self.api.get_server(server_id)
self.assertEqual(created_server['name'], 'new-name')
# Cleanup
self._delete_server(created_server)
def test_create_multiple_servers(self):
# Creates multiple servers and checks for reservation_id.
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
server = self._build_server()
server['min_count'] = 2
server['return_reservation_id'] = True
post = {'server': server}
response = self.api.post_server(post)
self.assertIn('reservation_id', response)
reservation_id = response['reservation_id']
self.assertNotIn(reservation_id, ['', None])
# Assert that the reservation_id itself has the expected format
self.assertRegex(reservation_id, 'r-[0-9a-zA-Z]{8}')
# Create 1 more server, which should not return a reservation_id
server = self._build_server()
post = {'server': server}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# lookup servers created by the first request.
servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id})
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
# The server from the 2nd request should not be there.
self.assertIsNone(found_server)
# Should have found 2 servers.
self.assertEqual(len(server_map), 2)
# Cleanup
self._delete_server(created_server)
for server in server_map.values():
self._delete_server(server)
def test_create_server_with_injected_files(self):
# Creates a server with injected_files.
personality = []
# Inject a text file
data = 'Hello, World!'
personality.append({
'path': '/helloworld.txt',
'contents': base64.encode_as_bytes(data),
})
# Inject a binary file
data = zlib.compress(b'Hello, World!')
personality.append({
'path': '/helloworld.zip',
'contents': base64.encode_as_bytes(data),
})
# Create server
server = self._build_server()
server['personality'] = personality
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s", created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
# Cleanup
self._delete_server(found_server)
def test_stop_start_servers_negative_invalid_state(self):
# Create server
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
# Start server in ACTIVE
# NOTE(mkoshiya): When os-start API runs, the server status
# must be SHUTOFF.
# By returning 409, I want to confirm that the ACTIVE server does not
# cause unexpected behavior.
post = {'os-start': {}}
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
created_server_id, post)
self.assertEqual(409, ex.response.status_code)
self.assertEqual('ACTIVE', found_server['status'])
# Stop server
post = {'os-stop': {}}
self.api.post_server_action(created_server_id, post)
found_server = self._wait_for_state_change(found_server, 'SHUTOFF')
# Stop server in SHUTOFF
# NOTE(mkoshiya): When os-stop API runs, the server status
# must be ACTIVE or ERROR.
# By returning 409, I want to confirm that the SHUTOFF server does not
# cause unexpected behavior.
post = {'os-stop': {}}
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
created_server_id, post)
self.assertEqual(409, ex.response.status_code)
self.assertEqual('SHUTOFF', found_server['status'])
# Cleanup
self._delete_server(found_server)
def test_revert_resized_server_negative_invalid_state(self):
# Create server
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
# Revert resized server in ACTIVE
# NOTE(yatsumi): When revert resized server API runs,
# the server status must be VERIFY_RESIZE.
# By returning 409, I want to confirm that the ACTIVE server does not
# cause unexpected behavior.
post = {'revertResize': {}}
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
created_server_id, post)
self.assertEqual(409, ex.response.status_code)
self.assertEqual('ACTIVE', found_server['status'])
# Cleanup
self._delete_server(found_server)
def test_resize_server_negative_invalid_state(self):
# Avoid migration
self.flags(allow_resize_to_same_host=True)
# Create server
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
# Resize server(flavorRef: 1 -> 2)
post = {'resize': {"flavorRef": "2", "OS-DCF:diskConfig": "AUTO"}}
self.api.post_server_action(created_server_id, post)
found_server = self._wait_for_state_change(found_server,
'VERIFY_RESIZE')
# Resize server in VERIFY_RESIZE(flavorRef: 2 -> 1)
# NOTE(yatsumi): When resize API runs, the server status
# must be ACTIVE or SHUTOFF.
# By returning 409, I want to confirm that the VERIFY_RESIZE server
# does not cause unexpected behavior.
post = {'resize': {"flavorRef": "1", "OS-DCF:diskConfig": "AUTO"}}
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
created_server_id, post)
self.assertEqual(409, ex.response.status_code)
self.assertEqual('VERIFY_RESIZE', found_server['status'])
# Cleanup
self._delete_server(found_server)
def test_confirm_resized_server_negative_invalid_state(self):
# Create server
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'ACTIVE')
# Confirm resized server in ACTIVE
# NOTE(yatsumi): When confirm resized server API runs,
# the server status must be VERIFY_RESIZE.
# By returning 409, I want to confirm that the ACTIVE server does not
# cause unexpected behavior.
post = {'confirmResize': {}}
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
created_server_id, post)
self.assertEqual(409, ex.response.status_code)
self.assertEqual('ACTIVE', found_server['status'])
# Cleanup
self._delete_server(found_server)
def test_resize_server_overquota(self):
self.flags(cores=1, group='quota')
self.flags(ram=512, group='quota')
# Create server with default flavor, 1 core, 512 ram
server = self._build_server()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
self._wait_for_state_change(created_server, 'ACTIVE')
# Try to resize to flavorid 2, 1 core, 2048 ram
post = {'resize': {'flavorRef': '2'}}
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
created_server_id, post)
self.assertEqual(403, ex.response.status_code)
def test_attach_vol_maximum_disk_devices_exceeded(self):
server = self._build_server()
created_server = self.api.post_server({"server": server})
server_id = created_server['id']
self._wait_for_state_change(created_server, 'ACTIVE')
volume_id = '9a695496-44aa-4404-b2cc-ccab2501f87e'
LOG.info('Attaching volume %s to server %s', volume_id, server_id)
# The fake driver doesn't implement get_device_name_for_instance, so
# we'll just raise the exception directly here, instead of simuluating
# an instance with 26 disk devices already attached.
with mock.patch.object(self.compute.driver,
'get_device_name_for_instance') as mock_get:
mock_get.side_effect = exception.TooManyDiskDevices(maximum=26)
ex = self.assertRaises(
client.OpenStackApiException, self.api.post_server_volume,
server_id, dict(volumeAttachment=dict(volumeId=volume_id)))
expected = ('The maximum allowed number of disk devices (26) to '
'attach to a single instance has been exceeded.')
self.assertEqual(403, ex.response.status_code)
self.assertIn(expected, str(ex))
class ServersTestV21(ServersTest):
api_major_version = 'v2.1'
class ServersTestV219(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2.1'
def _create_server(self, set_desc = True, desc = None):
server = self._build_server()
if set_desc:
server['description'] = desc
post = {'server': server}
response = self.api.api_post('/servers', post).body
return (server, response['server'])
def _update_server(self, server_id, set_desc = True, desc = None):
new_name = integrated_helpers.generate_random_alphanumeric(8)
server = {'server': {'name': new_name}}
if set_desc:
server['server']['description'] = desc
self.api.api_put('/servers/%s' % server_id, server)
def _rebuild_server(self, server_id, set_desc = True, desc = None):
new_name = integrated_helpers.generate_random_alphanumeric(8)
post = {}
post['rebuild'] = {
'name': new_name,
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'accessIPv4': '172.19.0.2',
'accessIPv6': 'fe80::2',
'metadata': {'some': 'thing'},
}
post['rebuild'].update({
'accessIPv4': '172.19.0.2',
'accessIPv6': 'fe80::2',
})
if set_desc:
post['rebuild']['description'] = desc
self.api.api_post('/servers/%s/action' % server_id, post)
def _create_server_and_verify(self, set_desc = True, expected_desc = None):
# Creates a server with a description and verifies it is
# in the GET responses.
created_server = self._create_server(set_desc, expected_desc)[1]
self._verify_server_description(created_server['id'], expected_desc)
self._delete_server(created_server)
def _update_server_and_verify(self, server_id, set_desc = True,
expected_desc = None):
# Updates a server with a description and verifies it is
# in the GET responses.
self._update_server(server_id, set_desc, expected_desc)
self._verify_server_description(server_id, expected_desc)
def _rebuild_server_and_verify(self, server_id, set_desc = True,
expected_desc = None):
# Rebuilds a server with a description and verifies it is
# in the GET responses.
self._rebuild_server(server_id, set_desc, expected_desc)
self._verify_server_description(server_id, expected_desc)
def _verify_server_description(self, server_id, expected_desc = None,
desc_in_resp = True):
# Calls GET on the servers and verifies that the description
# is set as expected in the response, or not set at all.
response = self.api.api_get('/servers/%s' % server_id)
found_server = response.body['server']
self.assertEqual(server_id, found_server['id'])
if desc_in_resp:
# Verify the description is set as expected (can be None)
self.assertEqual(expected_desc, found_server.get('description'))
else:
# Verify the description is not included in the response.
self.assertNotIn('description', found_server)
servers = self.api.api_get('/servers/detail').body['servers']
server_map = {server['id']: server for server in servers}
found_server = server_map.get(server_id)
self.assertTrue(found_server)
if desc_in_resp:
# Verify the description is set as expected (can be None)
self.assertEqual(expected_desc, found_server.get('description'))
else:
# Verify the description is not included in the response.
self.assertNotIn('description', found_server)
def _create_assertRaisesRegex(self, desc):
# Verifies that a 400 error is thrown on create server
with self.assertRaisesRegex(client.OpenStackApiException,
".*Unexpected status code.*") as cm:
self._create_server(True, desc)
self.assertEqual(400, cm.exception.response.status_code)
def _update_assertRaisesRegex(self, server_id, desc):
# Verifies that a 400 error is thrown on update server
with self.assertRaisesRegex(client.OpenStackApiException,
".*Unexpected status code.*") as cm:
self._update_server(server_id, True, desc)
self.assertEqual(400, cm.exception.response.status_code)
def _rebuild_assertRaisesRegex(self, server_id, desc):
# Verifies that a 400 error is thrown on rebuild server
with self.assertRaisesRegex(client.OpenStackApiException,
".*Unexpected status code.*") as cm:
self._rebuild_server(server_id, True, desc)
self.assertEqual(400, cm.exception.response.status_code)
def test_create_server_with_description(self):
self.api.microversion = '2.19'
# Create and get a server with a description
self._create_server_and_verify(True, 'test description')
# Create and get a server with an empty description
self._create_server_and_verify(True, '')
# Create and get a server with description set to None
self._create_server_and_verify()
# Create and get a server without setting the description
self._create_server_and_verify(False)
def test_update_server_with_description(self):
self.api.microversion = '2.19'
# Create a server with an initial description
server = self._create_server(True, 'test desc 1')[1]
server_id = server['id']
# Update and get the server with a description
self._update_server_and_verify(server_id, True, 'updated desc')
# Update and get the server name without changing the description
self._update_server_and_verify(server_id, False, 'updated desc')
# Update and get the server with an empty description
self._update_server_and_verify(server_id, True, '')
# Update and get the server by removing the description (set to None)
self._update_server_and_verify(server_id)
# Update and get the server with a 2nd new description
self._update_server_and_verify(server_id, True, 'updated desc2')
# Cleanup
self._delete_server(server)
def test_rebuild_server_with_description(self):
self.api.microversion = '2.19'
# Create a server with an initial description
server = self._create_server(True, 'test desc 1')[1]
server_id = server['id']
self._wait_for_state_change(server, 'ACTIVE')
# Rebuild and get the server with a description
self._rebuild_server_and_verify(server_id, True, 'updated desc')
# Rebuild and get the server name without changing the description
self._rebuild_server_and_verify(server_id, False, 'updated desc')
# Rebuild and get the server with an empty description
self._rebuild_server_and_verify(server_id, True, '')
# Rebuild and get the server by removing the description (set to None)
self._rebuild_server_and_verify(server_id)
# Rebuild and get the server with a 2nd new description
self._rebuild_server_and_verify(server_id, True, 'updated desc2')
# Cleanup
self._delete_server(server)
def test_version_compatibility(self):
# Create a server with microversion v2.19 and a description.
self.api.microversion = '2.19'
server = self._create_server(True, 'test desc 1')[1]
server_id = server['id']
# Verify that the description is not included on V2.18 GETs
self.api.microversion = '2.18'
self._verify_server_description(server_id, desc_in_resp = False)
# Verify that updating the server with description on V2.18
# results in a 400 error
self._update_assertRaisesRegex(server_id, 'test update 2.18')
# Verify that rebuilding the server with description on V2.18
# results in a 400 error
self._rebuild_assertRaisesRegex(server_id, 'test rebuild 2.18')
# Cleanup
self._delete_server(server)
# Create a server on V2.18 and verify that the description
# defaults to the name on a V2.19 GET
server_req, server = self._create_server(False)
server_id = server['id']
self.api.microversion = '2.19'
self._verify_server_description(server_id, server_req['name'])
# Cleanup
self._delete_server(server)
# Verify that creating a server with description on V2.18
# results in a 400 error
self.api.microversion = '2.18'
self._create_assertRaisesRegex('test create 2.18')
def test_description_errors(self):
self.api.microversion = '2.19'
# Create servers with invalid descriptions. These throw 400.
# Invalid unicode with non-printable control char
self._create_assertRaisesRegex(u'invalid\0dstring')
# Description is longer than 255 chars
self._create_assertRaisesRegex('x' * 256)
# Update and rebuild servers with invalid descriptions.
# These throw 400.
server_id = self._create_server(True, "desc")[1]['id']
# Invalid unicode with non-printable control char
self._update_assertRaisesRegex(server_id, u'invalid\u0604string')
self._rebuild_assertRaisesRegex(server_id, u'invalid\u0604string')
# Description is longer than 255 chars
self._update_assertRaisesRegex(server_id, 'x' * 256)
self._rebuild_assertRaisesRegex(server_id, 'x' * 256)
class ServerTestV220(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2.1'
def setUp(self):
super(ServerTestV220, self).setUp()
self.api.microversion = '2.20'
self.ctxt = context.get_admin_context()
def _create_server(self):
server = self._build_server()
post = {'server': server}
response = self.api.api_post('/servers', post).body
return (server, response['server'])
def _shelve_server(self):
server = self._create_server()[1]
server_id = server['id']
self._wait_for_state_change(server, 'ACTIVE')
self.api.post_server_action(server_id, {'shelve': None})
return self._wait_for_state_change(server, 'SHELVED_OFFLOADED')
def _get_fake_bdms(self, ctxt):
return block_device_obj.block_device_make_list(self.ctxt,
[fake_block_device.FakeDbBlockDeviceDict(
{'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': '5d721593-f033-4f6d-ab6f-b5b067e61bc4'})])
def test_attach_detach_vol_to_shelved_offloaded_server_new_flow(self):
self.flags(shelved_offload_time=0)
found_server = self._shelve_server()
server_id = found_server['id']
fake_bdms = self._get_fake_bdms(self.ctxt)
# Test attach volume
self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get)
with test.nested(mock.patch.object(compute_api.API,
'_check_volume_already_attached_to_instance'),
mock.patch.object(volume.cinder.API,
'check_availability_zone'),
mock.patch.object(volume.cinder.API,
'attachment_create'),
mock.patch.object(volume.cinder.API,
'attachment_complete')
) as (mock_check_vol_attached,
mock_check_av_zone, mock_attach_create,
mock_attachment_complete):
mock_attach_create.return_value = {'id': uuids.volume}
volume_attachment = {"volumeAttachment": {"volumeId":
"5d721593-f033-4f6d-ab6f-b5b067e61bc4"}}
attach_response = self.api.api_post(
'/servers/%s/os-volume_attachments' % (server_id),
volume_attachment).body['volumeAttachment']
self.assertTrue(mock_attach_create.called)
mock_attachment_complete.assert_called_once_with(
mock.ANY, uuids.volume)
self.assertIsNone(attach_response['device'])
# Test detach volume
with test.nested(mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid'),
mock.patch.object(compute_api.API,
'_local_cleanup_bdm_volumes')
) as (mock_get_bdms, mock_clean_vols):
mock_get_bdms.return_value = fake_bdms
attachment_id = mock_get_bdms.return_value[0]['volume_id']
self.api.api_delete('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))
self.assertTrue(mock_clean_vols.called)
self._delete_server(found_server)
class ServerTestV269(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2.1'
NUMBER_OF_CELLS = 3
def setUp(self):
super(ServerTestV269, self).setUp()
self.api.microversion = '2.69'
self.ctxt = context.get_admin_context()
self.project_id = self.api.project_id
self.cells = objects.CellMappingList.get_all(self.ctxt)
self.down_cell_insts = []
self.up_cell_insts = []
self.down_cell_mappings = objects.CellMappingList()
flavor = objects.Flavor(id=1, name='flavor1',
memory_mb=256, vcpus=1,
root_gb=1, ephemeral_gb=1,
flavorid='1',
swap=0, rxtx_factor=1.0,
vcpu_weight=1,
disabled=False,
is_public=True,
extra_specs={},
projects=[])
_info_cache = objects.InstanceInfoCache(context)
objects.InstanceInfoCache._from_db_object(context, _info_cache,
test_instance_info_cache.fake_info_cache)
# cell1 and cell2 will be the down cells while
# cell0 and cell3 will be the up cells.
down_cell_names = ['cell1', 'cell2']
for cell in self.cells:
# create 2 instances and their mappings in all the 4 cells
for i in range(2):
with context.target_cell(self.ctxt, cell) as cctxt:
inst = objects.Instance(
context=cctxt,
project_id=self.project_id,
user_id=self.project_id,
instance_type_id=flavor.id,
hostname='%s-inst%i' % (cell.name, i),
flavor=flavor,
info_cache=_info_cache,
display_name='server-test')
inst.create()
im = objects.InstanceMapping(context=self.ctxt,
instance_uuid=inst.uuid,
cell_mapping=cell,
project_id=self.project_id,
queued_for_delete=False)
im.create()
if cell.name in down_cell_names:
self.down_cell_insts.append(inst.uuid)
else:
self.up_cell_insts.append(inst.uuid)
# In cell1 and cell3 add a third instance in a different project
# to show the --all-tenants case.
if cell.name == 'cell1' or cell.name == 'cell3':
with context.target_cell(self.ctxt, cell) as cctxt:
inst = objects.Instance(
context=cctxt,
project_id='faker',
user_id='faker',
instance_type_id=flavor.id,
hostname='%s-inst%i' % (cell.name, 3),
flavor=flavor,
info_cache=_info_cache,
display_name='server-test')
inst.create()
im = objects.InstanceMapping(context=self.ctxt,
instance_uuid=inst.uuid,
cell_mapping=cell,
project_id='faker',
queued_for_delete=False)
im.create()
if cell.name in down_cell_names:
self.down_cell_mappings.objects.append(cell)
self.useFixture(nova_fixtures.DownCellFixture(self.down_cell_mappings))
def test_get_servers_with_down_cells(self):
servers = self.api.get_servers(detail=False)
# 4 servers from the up cells and 4 servers from the down cells
self.assertEqual(8, len(servers))
for server in servers:
if 'name' not in server:
# server is in the down cell.
self.assertEqual('UNKNOWN', server['status'])
self.assertIn(server['id'], self.down_cell_insts)
self.assertIn('links', server)
# the partial construct will have only the above 3 keys
self.assertEqual(3, len(server))
else:
# server in up cell
self.assertIn(server['id'], self.up_cell_insts)
# has all the keys
self.assertEqual(server['name'], 'server-test')
self.assertIn('links', server)
def test_get_servers_detail_with_down_cells(self):
servers = self.api.get_servers()
# 4 servers from the up cells and 4 servers from the down cells
self.assertEqual(8, len(servers))
for server in servers:
if 'user_id' not in server:
# server is in the down cell.
self.assertEqual('UNKNOWN', server['status'])
self.assertIn(server['id'], self.down_cell_insts)
# the partial construct will only have 5 keys: created,
# tenant_id, status, id and links. security_groups should be
# present too but isn't since we haven't created a network
# interface
self.assertEqual(5, len(server))
else:
# server in up cell
self.assertIn(server['id'], self.up_cell_insts)
# has all the keys
self.assertEqual(server['user_id'], self.project_id)
self.assertIn('image', server)
def test_get_servers_detail_limits_with_down_cells(self):
servers = self.api.get_servers(search_opts={'limit': 5})
# 4 servers from the up cells since we skip down cell
# results by default for paging.
self.assertEqual(4, len(servers), servers)
for server in servers:
# server in up cell
self.assertIn(server['id'], self.up_cell_insts)
# has all the keys
self.assertEqual(server['user_id'], self.project_id)
self.assertIn('image', server)
def test_get_servers_detail_limits_with_down_cells_the_500_gift(self):
self.flags(list_records_by_skipping_down_cells=False, group='api')
# We get an API error with a 500 response code since the
# list_records_by_skipping_down_cells config option is False.
exp = self.assertRaises(client.OpenStackApiException,
self.api.get_servers,
search_opts={'limit': 5})
self.assertEqual(500, exp.response.status_code)
self.assertIn('NovaException', str(exp))
def test_get_servers_detail_marker_in_down_cells(self):
marker = self.down_cell_insts[2]
# It will fail with a 500 if the marker is in the down cell.
exp = self.assertRaises(client.OpenStackApiException,
self.api.get_servers,
search_opts={'marker': marker})
self.assertEqual(500, exp.response.status_code)
self.assertIn('oslo_db.exception.DBError', str(exp))
def test_get_servers_detail_marker_sorting(self):
marker = self.up_cell_insts[1]
# It will give the results from the up cell if
# list_records_by_skipping_down_cells config option is True.
servers = self.api.get_servers(search_opts={'marker': marker,
'sort_key': "created_at",
'sort_dir': "asc"})
# since there are 4 servers from the up cells, when giving the
# second instance as marker, sorted by creation time in ascending
# third and fourth instances will be returned.
self.assertEqual(2, len(servers))
for server in servers:
self.assertIn(
server['id'], [self.up_cell_insts[2], self.up_cell_insts[3]])
def test_get_servers_detail_non_admin_with_deleted_flag(self):
# if list_records_by_skipping_down_cells config option is True
# this deleted option should be ignored and the rest of the instances
# from the up cells and the partial results from the down cells should
# be returned.
# Set the policy so we don't have permission to allow
# all filters but are able to get server details.
servers_rule = 'os_compute_api:servers:detail'
extraspec_rule = 'os_compute_api:servers:allow_all_filters'
self.policy.set_rules({
extraspec_rule: 'rule:admin_api',
servers_rule: '@'})
servers = self.api.get_servers(search_opts={'deleted': True})
# gets 4 results from up cells and 4 from down cells.
self.assertEqual(8, len(servers))
for server in servers:
if "image" not in server:
self.assertIn(server['id'], self.down_cell_insts)
else:
self.assertIn(server['id'], self.up_cell_insts)
def test_get_servers_detail_filters(self):
# We get the results only from the up cells, this ignoring the down
# cells if list_records_by_skipping_down_cells config option is True.
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.admin_api = api_fixture.admin_api
self.admin_api.microversion = '2.69'
servers = self.admin_api.get_servers(
search_opts={'hostname': "cell3-inst0"})
self.assertEqual(1, len(servers))
self.assertEqual(self.up_cell_insts[2], servers[0]['id'])
def test_get_servers_detail_all_tenants_with_down_cells(self):
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.admin_api = api_fixture.admin_api
self.admin_api.microversion = '2.69'
servers = self.admin_api.get_servers(search_opts={'all_tenants': True})
# 4 servers from the up cells and 4 servers from the down cells
# plus the 2 instances from cell1 and cell3 which are in a different
# project.
self.assertEqual(10, len(servers))
for server in servers:
if 'user_id' not in server:
# server is in the down cell.
self.assertEqual('UNKNOWN', server['status'])
if server['tenant_id'] != 'faker':
self.assertIn(server['id'], self.down_cell_insts)
# the partial construct will only have 5 keys: created,
# tenant_id, status, id and links. security_groups should be
# present too but isn't since we haven't created a network
# interface
self.assertEqual(5, len(server))
else:
# server in up cell
if server['tenant_id'] != 'faker':
self.assertIn(server['id'], self.up_cell_insts)
self.assertEqual(server['user_id'], self.project_id)
self.assertIn('image', server)
class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2.1'
# We have to cap the microversion at 2.38 because that's the max we
# can use to update image metadata via our compute images proxy API.
microversion = '2.38'
def _disable_compute_for(self, server):
# Refresh to get its host
server = self.admin_api.get_server(server['id'])
host = server['OS-EXT-SRV-ATTR:host']
# Disable the service it is on
self.admin_api.put_service(
'disable', {'host': host, 'binary': 'nova-compute'})
def test_rebuild_with_image_novalidhost(self):
"""Creates a server with an image that is valid for the single compute
that we have. Then rebuilds the server, passing in an image with
metadata that does not fit the single compute which should result in
a NoValidHost error. The ImagePropertiesFilter filter is enabled by
default so that should filter out the host based on the image meta.
"""
self.compute2 = self.start_service('compute', host='host2')
# We hard-code from a fake image since we can't get images
# via the compute /images proxy API with microversion > 2.35.
original_image_ref = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
server_req_body = {
'server': {
'imageRef': original_image_ref,
'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
'name': 'test_rebuild_with_image_novalidhost',
# We don't care about networking for this test. This requires
# microversion >= 2.37.
'networks': 'none'
}
}
server = self.api.post_server(server_req_body)
self._wait_for_state_change(server, 'ACTIVE')
# Disable the host we're on so ComputeFilter would have ruled it out
# normally
self._disable_compute_for(server)
# Now update the image metadata to be something that won't work with
# the fake compute driver we're using since the fake driver has an
# "x86_64" architecture.
rebuild_image_ref = self.glance.auto_disk_config_enabled_image['id']
self.api.put_image_meta_key(
rebuild_image_ref, 'hw_architecture', 'unicore32')
# Now rebuild the server with that updated image and it should result
# in a NoValidHost failure from the scheduler.
rebuild_req_body = {
'rebuild': {
'imageRef': rebuild_image_ref
}
}
# Since we're using the CastAsCall fixture, the NoValidHost error
# should actually come back to the API and result in a 500 error.
# Normally the user would get a 202 response because nova-api RPC casts
# to nova-conductor which RPC calls the scheduler which raises the
# NoValidHost. We can mimic the end user way to figure out the failure
# by looking for the failed 'rebuild' instance action event.
self.api.api_post('/servers/%s/action' % server['id'],
rebuild_req_body, check_response_status=[500])
# Look for the failed rebuild action.
self._wait_for_action_fail_completion(
server, instance_actions.REBUILD, 'rebuild_server')
# Assert the server image_ref was rolled back on failure.
server = self.api.get_server(server['id'])
self.assertEqual(original_image_ref, server['image']['id'])
# The server should be in ERROR state
self.assertEqual('ERROR', server['status'])
self.assertIn('No valid host', server['fault']['message'])
# Rebuild it again with the same bad image to make sure it's rejected
# again. Since we're using CastAsCall here, there is no 202 from the
# API, and the exception from conductor gets passed back through the
# API.
ex = self.assertRaises(
client.OpenStackApiException, self.api.api_post,
'/servers/%s/action' % server['id'], rebuild_req_body)
self.assertIn('NoValidHost', str(ex))
# A rebuild to the same host should never attempt a rebuild claim.
@mock.patch('nova.compute.resource_tracker.ResourceTracker.rebuild_claim',
new_callable=mock.NonCallableMock)
def test_rebuild_with_new_image(self, mock_rebuild_claim):
"""Rebuilds a server with a different image which will run it through
the scheduler to validate the image is still OK with the compute host
that the instance is running on.
Validates that additional resources are not allocated against the
instance.host in Placement due to the rebuild on same host.
"""
admin_api = self.api_fixture.admin_api
admin_api.microversion = '2.53'
def _get_provider_uuid_by_host(host):
resp = admin_api.api_get(
'os-hypervisors?hypervisor_hostname_pattern=%s' % host).body
return resp['hypervisors'][0]['id']
def _get_provider_usages(provider_uuid):
return self.placement.get(
'/resource_providers/%s/usages' % provider_uuid).body['usages']
def _get_allocations_by_server_uuid(server_uuid):
return self.placement.get(
'/allocations/%s' % server_uuid).body['allocations']
def _set_provider_inventory(rp_uuid, resource_class, inventory):
# Get the resource provider generation for the inventory update.
rp = self.placement.get(
'/resource_providers/%s' % rp_uuid).body
inventory['resource_provider_generation'] = rp['generation']
return self.placement.put(
'/resource_providers/%s/inventories/%s' %
(rp_uuid, resource_class), inventory).body
def assertFlavorMatchesAllocation(flavor, allocation):
self.assertEqual(flavor['vcpus'], allocation['VCPU'])
self.assertEqual(flavor['ram'], allocation['MEMORY_MB'])
self.assertEqual(flavor['disk'], allocation['DISK_GB'])
nodename = self.compute.manager._get_nodename(None)
rp_uuid = _get_provider_uuid_by_host(nodename)
# make sure we start with no usage on the compute node
rp_usages = _get_provider_usages(rp_uuid)
self.assertEqual({'VCPU': 0, 'MEMORY_MB': 0, 'DISK_GB': 0}, rp_usages)
server_req_body = {
'server': {
# We hard-code from a fake image since we can't get images
# via the compute /images proxy API with microversion > 2.35.
'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
'name': 'test_rebuild_with_new_image',
# We don't care about networking for this test. This requires
# microversion >= 2.37.
'networks': 'none'
}
}
server = self.api.post_server(server_req_body)
self._wait_for_state_change(server, 'ACTIVE')
flavor = self.api.api_get('/flavors/1').body['flavor']
# make the compute node full and ensure rebuild still succeed
_set_provider_inventory(rp_uuid, "VCPU", {"total": 1})
# There should be usage for the server on the compute node now.
rp_usages = _get_provider_usages(rp_uuid)
assertFlavorMatchesAllocation(flavor, rp_usages)
allocs = _get_allocations_by_server_uuid(server['id'])
self.assertIn(rp_uuid, allocs)
allocs = allocs[rp_uuid]['resources']
assertFlavorMatchesAllocation(flavor, allocs)
rebuild_image_ref = self.glance.auto_disk_config_enabled_image['id']
# Now rebuild the server with a different image.
rebuild_req_body = {
'rebuild': {
'imageRef': rebuild_image_ref
}
}
self.api.api_post('/servers/%s/action' % server['id'],
rebuild_req_body)
self._wait_for_server_parameter(
server, {'OS-EXT-STS:task_state': None})
# The usage and allocations should not have changed.
rp_usages = _get_provider_usages(rp_uuid)
assertFlavorMatchesAllocation(flavor, rp_usages)
allocs = _get_allocations_by_server_uuid(server['id'])
self.assertIn(rp_uuid, allocs)
allocs = allocs[rp_uuid]['resources']
assertFlavorMatchesAllocation(flavor, allocs)
def test_volume_backed_rebuild_different_image(self):
"""Tests that trying to rebuild a volume-backed instance with a
different image than what is in the root disk of the root volume
will result in a 400 BadRequest error.
"""
# First create our server as normal.
server_req_body = {
# There is no imageRef because this is boot from volume.
'server': {
'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
'name': 'test_volume_backed_rebuild_different_image',
# We don't care about networking for this test. This requires
# microversion >= 2.37.
'networks': 'none',
'block_device_mapping_v2': [{
'boot_index': 0,
'uuid':
nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
'source_type': 'volume',
'destination_type': 'volume'
}]
}
}
server = self.api.post_server(server_req_body)
server = self._wait_for_state_change(server, 'ACTIVE')
# For a volume-backed server, the image ref will be an empty string
# in the server response.
self.assertEqual('', server['image'])
# Now rebuild the server with a different image than was used to create
# our fake volume.
rebuild_image_ref = self.glance.auto_disk_config_enabled_image['id']
rebuild_req_body = {
'rebuild': {
'imageRef': rebuild_image_ref
}
}
resp = self.api.api_post('/servers/%s/action' % server['id'],
rebuild_req_body, check_response_status=[400])
# Assert that we failed because of the image change and not something
# else.
self.assertIn('Unable to rebuild with a different image for a '
'volume-backed server', str(resp))
class ServersTestV280(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2.1'
def setUp(self):
super(ServersTestV280, self).setUp()
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
self.admin_api = api_fixture.admin_api
self.api.microversion = '2.80'
self.admin_api.microversion = '2.80'
def test_get_migrations_after_cold_migrate_server_in_same_project(
self):
# Create a server by non-admin
server = self.api.post_server({
'server': {
'flavorRef': 1,
'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'migrate-server-test',
'networks': 'none'
}})
server_id = server['id']
# Check it's there
found_server = self.api.get_server(server_id)
self.assertEqual(server_id, found_server['id'])
self.start_service('compute', host='host2')
post = {'migrate': {}}
self.admin_api.post_server_action(server_id, post)
# Get the migration records by admin
migrations = self.admin_api.get_migrations(
user_id=self.admin_api.auth_user)
self.assertEqual(1, len(migrations))
self.assertEqual(server_id, migrations[0]['instance_uuid'])
# Get the migration records by non-admin
migrations = self.admin_api.get_migrations(
user_id=self.api.auth_user)
self.assertEqual([], migrations)
def test_get_migrations_after_live_migrate_server_in_different_project(
self):
# Create a server by non-admin
server = self.api.post_server({
'server': {
'flavorRef': 1,
'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'migrate-server-test',
'networks': 'none'
}})
server_id = server['id']
# Check it's there
found_server = self.api.get_server(server_id)
self.assertEqual(server_id, found_server['id'])
server = self._wait_for_state_change(found_server, 'BUILD')
self.start_service('compute', host='host2')
project_id_1 = '4906260553374bf0a5d566543b320516'
project_id_2 = 'c850298c1b6b4796a8f197ac310b2469'
new_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version=self.api_major_version, project_id=project_id_1))
new_admin_api = new_api_fixture.admin_api
new_admin_api.microversion = '2.80'
post = {
'os-migrateLive': {
'host': 'host2',
'block_migration': True
}
}
new_admin_api.post_server_action(server_id, post)
# Get the migration records
migrations = new_admin_api.get_migrations(project_id=project_id_1)
self.assertEqual(1, len(migrations))
self.assertEqual(server_id, migrations[0]['instance_uuid'])
# Get the migration records by not exist project_id
migrations = new_admin_api.get_migrations(project_id=project_id_2)
self.assertEqual([], migrations)
class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
"""Tests moving servers while checking the resource allocations and usages
These tests use two compute hosts. Boot a server on one of them then try to
move the server to the other. At every step resource allocation of the
server and the resource usages of the computes are queried from placement
API and asserted.
"""
REQUIRES_LOCKING = True
# NOTE(danms): The test defaults to using SmallFakeDriver,
# which only has one vcpu, which can't take the doubled allocation
# we're now giving it. So, use the bigger MediumFakeDriver here.
compute_driver = 'fake.MediumFakeDriver'
def setUp(self):
super(ServerMovingTests, self).setUp()
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
self.compute1 = self._start_compute(host='host1')
self.compute2 = self._start_compute(host='host2')
flavors = self.api.get_flavors()
self.flavor1 = flavors[0]
self.flavor2 = flavors[1]
# create flavor3 which has less MEMORY_MB but more DISK_GB than flavor2
flavor_body = {'flavor':
{'name': 'test_flavor3',
'ram': int(self.flavor2['ram'] / 2),
'vcpus': 1,
'disk': self.flavor2['disk'] * 2,
'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e3'
}}
self.flavor3 = self.api.post_flavor(flavor_body)
def _other_hostname(self, host):
other_host = {'host1': 'host2',
'host2': 'host1'}
return other_host[host]
def _run_periodics(self):
# NOTE(jaypipes): We always run periodics in the same order: first on
# compute1, then on compute2. However, we want to test scenarios when
# the periodics run at different times during mover operations. This is
# why we have the "reverse" tests which simply switch the source and
# dest host while keeping the order in which we run the
# periodics. This effectively allows us to test the matrix of timing
# scenarios during move operations.
ctx = context.get_admin_context()
LOG.info('Running periodic for compute1 (%s)',
self.compute1.manager.host)
self.compute1.manager.update_available_resource(ctx)
LOG.info('Running periodic for compute2 (%s)',
self.compute2.manager.host)
self.compute2.manager.update_available_resource(ctx)
LOG.info('Finished with periodics')
def test_resize_revert(self):
self._test_resize_revert(dest_hostname='host1')
def test_resize_revert_reverse(self):
self._test_resize_revert(dest_hostname='host2')
def test_resize_confirm(self):
self._test_resize_confirm(dest_hostname='host1')
def test_resize_confirm_reverse(self):
self._test_resize_confirm(dest_hostname='host2')
def test_migration_confirm_resize_error(self):
source_hostname = self.compute1.host
dest_hostname = self.compute2.host
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
server = self._boot_and_check_allocations(self.flavor1,
source_hostname)
self._move_and_check_allocations(
server, request={'migrate': None}, old_flavor=self.flavor1,
new_flavor=self.flavor1, source_rp_uuid=source_rp_uuid,
dest_rp_uuid=dest_rp_uuid)
# Mock failure
def fake_confirm_migration(context, migration, instance, network_info):
raise exception.MigrationPreCheckError(
reason='test_migration_confirm_resize_error')
with mock.patch('nova.virt.fake.FakeDriver.'
'confirm_migration',
side_effect=fake_confirm_migration):
# Confirm the migration/resize and check the usages
post = {'confirmResize': None}
self.api.post_server_action(
server['id'], post, check_response_status=[204])
server = self._wait_for_state_change(server, 'ERROR')
# After confirming and error, we should have an allocation only on the
# destination host
self.assertFlavorMatchesUsage(dest_rp_uuid, self.flavor1)
self.assertRequestMatchesUsage({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, source_rp_uuid)
self.assertFlavorMatchesAllocation(self.flavor1, server['id'],
dest_rp_uuid)
self._run_periodics()
# Check we're still accurate after running the periodics
self.assertFlavorMatchesUsage(dest_rp_uuid, self.flavor1)
self.assertRequestMatchesUsage({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, source_rp_uuid)
self.assertFlavorMatchesAllocation(self.flavor1, server['id'],
dest_rp_uuid)
self._delete_and_check_allocations(server)
def _test_resize_revert(self, dest_hostname):
source_hostname = self._other_hostname(dest_hostname)
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
server = self._boot_and_check_allocations(self.flavor1,
source_hostname)
self._resize_and_check_allocations(server, self.flavor1, self.flavor2,
source_rp_uuid, dest_rp_uuid)
# Revert the resize and check the usages
post = {'revertResize': None}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'ACTIVE')
# Make sure the RequestSpec.flavor matches the original flavor.
ctxt = context.get_admin_context()
reqspec = objects.RequestSpec.get_by_instance_uuid(ctxt, server['id'])
self.assertEqual(self.flavor1['id'], reqspec.flavor.flavorid)
self._run_periodics()
# the original host expected to have the old resource allocation
self.assertFlavorMatchesUsage(source_rp_uuid, self.flavor1)
self.assertRequestMatchesUsage({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, dest_rp_uuid)
# Check that the server only allocates resource from the original host
self.assertFlavorMatchesAllocation(self.flavor1, server['id'],
source_rp_uuid)
self._delete_and_check_allocations(server)
def _test_resize_confirm(self, dest_hostname):
source_hostname = self._other_hostname(dest_hostname)
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
server = self._boot_and_check_allocations(self.flavor1,
source_hostname)
self._resize_and_check_allocations(server, self.flavor1, self.flavor2,
source_rp_uuid, dest_rp_uuid)
# Confirm the resize and check the usages
self._confirm_resize(server)
# After confirming, we should have an allocation only on the
# destination host
# The target host usage should be according to the new flavor
self.assertFlavorMatchesUsage(dest_rp_uuid, self.flavor2)
self.assertRequestMatchesUsage({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, source_rp_uuid)
# and the target host allocation should be according to the new flavor
self.assertFlavorMatchesAllocation(self.flavor2, server['id'],
dest_rp_uuid)
self._run_periodics()
# Check we're still accurate after running the periodics
# and the target host usage should be according to the new flavor
self.assertFlavorMatchesUsage(dest_rp_uuid, self.flavor2)
self.assertRequestMatchesUsage({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, source_rp_uuid)
# and the server allocates only from the target host
self.assertFlavorMatchesAllocation(self.flavor2, server['id'],
dest_rp_uuid)
self._delete_and_check_allocations(server)
def test_resize_revert_same_host(self):
# make sure that the test only uses a single host
compute2_service_id = self.admin_api.get_services(
host=self.compute2.host, binary='nova-compute')[0]['id']
self.admin_api.put_service(compute2_service_id, {'status': 'disabled'})
hostname = self.compute1.manager.host
rp_uuid = self._get_provider_uuid_by_host(hostname)
server = self._boot_and_check_allocations(self.flavor2, hostname)
self._resize_to_same_host_and_check_allocations(
server, self.flavor2, self.flavor3, rp_uuid)
# Revert the resize and check the usages
post = {'revertResize': None}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'ACTIVE')
self._run_periodics()
# after revert only allocations due to the old flavor should remain
self.assertFlavorMatchesUsage(rp_uuid, self.flavor2)
self.assertFlavorMatchesAllocation(self.flavor2, server['id'],
rp_uuid)
self._delete_and_check_allocations(server)
def test_resize_confirm_same_host(self):
# make sure that the test only uses a single host
compute2_service_id = self.admin_api.get_services(
host=self.compute2.host, binary='nova-compute')[0]['id']
self.admin_api.put_service(compute2_service_id, {'status': 'disabled'})
hostname = self.compute1.manager.host
rp_uuid = self._get_provider_uuid_by_host(hostname)
server = self._boot_and_check_allocations(self.flavor2, hostname)
self._resize_to_same_host_and_check_allocations(
server, self.flavor2, self.flavor3, rp_uuid)
# Confirm the resize and check the usages
self._confirm_resize(server)
self._run_periodics()
# after confirm only allocations due to the new flavor should remain
self.assertFlavorMatchesUsage(rp_uuid, self.flavor3)
self.assertFlavorMatchesAllocation(self.flavor3, server['id'],
rp_uuid)
self._delete_and_check_allocations(server)
def test_resize_not_enough_resource(self):
# Try to resize to a flavor that requests more VCPU than what the
# compute hosts has available and expect the resize to fail
flavor_body = {'flavor':
{'name': 'test_too_big_flavor',
'ram': 1024,
'vcpus': fake.MediumFakeDriver.vcpus + 1,
'disk': 20,
}}
big_flavor = self.api.post_flavor(flavor_body)
dest_hostname = self.compute2.host
source_hostname = self._other_hostname(dest_hostname)
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
server = self._boot_and_check_allocations(
self.flavor1, source_hostname)
self.flags(allow_resize_to_same_host=False)
resize_req = {
'resize': {
'flavorRef': big_flavor['id']
}
}
self.api.post_server_action(
server['id'], resize_req, check_response_status=[202])
event = self._assert_resize_migrate_action_fail(
server, instance_actions.RESIZE, 'NoValidHost')
self.assertIn('details', event)
# This test case works in microversion 2.84.
self.assertIn('No valid host was found', event['details'])
server = self.admin_api.get_server(server['id'])
self.assertEqual(source_hostname, server['OS-EXT-SRV-ATTR:host'])
# The server is still ACTIVE and thus there is no fault message.
self.assertEqual('ACTIVE', server['status'])
self.assertNotIn('fault', server)
# only the source host shall have usages after the failed resize
self.assertFlavorMatchesUsage(source_rp_uuid, self.flavor1)
# Check that the other provider has no usage
self.assertRequestMatchesUsage(
{'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, dest_rp_uuid)
# Check that the server only allocates resource from the host it is
# booted on
self.assertFlavorMatchesAllocation(self.flavor1, server['id'],
source_rp_uuid)
self._delete_and_check_allocations(server)
def test_resize_delete_while_verify(self):
"""Test scenario where the server is deleted while in the
VERIFY_RESIZE state and ensures the allocations are properly
cleaned up from the source and target compute node resource providers.
The _confirm_resize_on_deleting() method in the API is actually
responsible for making sure the migration-based allocations get
cleaned up by confirming the resize on the source host before deleting
the server from the target host.
"""
dest_hostname = 'host2'
source_hostname = self._other_hostname(dest_hostname)
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
server = self._boot_and_check_allocations(self.flavor1,
source_hostname)
self._resize_and_check_allocations(server, self.flavor1, self.flavor2,
source_rp_uuid, dest_rp_uuid)
self._delete_and_check_allocations(server)
def test_resize_confirm_assert_hypervisor_usage_no_periodics(self):
"""Resize confirm test for bug 1818914 to make sure the tracked
resource usage in the os-hypervisors API (not placement) is as
expected during a confirmed resize. This intentionally does not
use _test_resize_confirm in order to avoid running periodics.
"""
# There should be no usage from a server on either hypervisor.
source_rp_uuid = self._get_provider_uuid_by_host('host1')
dest_rp_uuid = self._get_provider_uuid_by_host('host2')
no_usage = {'vcpus': 0, 'disk': 0, 'ram': 0}
for rp_uuid in (source_rp_uuid, dest_rp_uuid):
self.assert_hypervisor_usage(
rp_uuid, no_usage, volume_backed=False)
# Create the server and wait for it to be ACTIVE.
server = self._boot_and_check_allocations(self.flavor1, 'host1')
# There should be resource usage for flavor1 on the source host.
self.assert_hypervisor_usage(
source_rp_uuid, self.flavor1, volume_backed=False)
# And still no usage on the dest host.
self.assert_hypervisor_usage(
dest_rp_uuid, no_usage, volume_backed=False)
# Resize the server to flavor2 and wait for VERIFY_RESIZE.
self.flags(allow_resize_to_same_host=False)
resize_req = {
'resize': {
'flavorRef': self.flavor2['id']
}
}
self.api.post_server_action(server['id'], resize_req)
self._wait_for_state_change(server, 'VERIFY_RESIZE')
# There should be resource usage for flavor1 on the source host.
self.assert_hypervisor_usage(
source_rp_uuid, self.flavor1, volume_backed=False)
# And resource usage for flavor2 on the target host.
self.assert_hypervisor_usage(
dest_rp_uuid, self.flavor2, volume_backed=False)
# Now confirm the resize and check hypervisor usage again.
self._confirm_resize(server)
# There should no resource usage for flavor1 on the source host.
self.assert_hypervisor_usage(
source_rp_uuid, no_usage, volume_backed=False)
# And resource usage for flavor2 should still be on the target host.
self.assert_hypervisor_usage(
dest_rp_uuid, self.flavor2, volume_backed=False)
# Run periodics and make sure usage is still as expected.
self._run_periodics()
self.assert_hypervisor_usage(
source_rp_uuid, no_usage, volume_backed=False)
self.assert_hypervisor_usage(
dest_rp_uuid, self.flavor2, volume_backed=False)
def _wait_for_notification_event_type(self, event_type, max_retries=50):
retry_counter = 0
while True:
if len(fake_notifier.NOTIFICATIONS) > 0:
for notification in fake_notifier.NOTIFICATIONS:
if notification.event_type == event_type:
return
if retry_counter == max_retries:
self.fail('Wait for notification event type (%s) failed'
% event_type)
retry_counter += 1
time.sleep(0.1)
def test_evacuate_with_no_compute(self):
source_hostname = self.compute1.host
dest_hostname = self.compute2.host
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
# Disable compute service on destination host
compute2_service_id = self.admin_api.get_services(
host=dest_hostname, binary='nova-compute')[0]['id']
self.admin_api.put_service(compute2_service_id, {'status': 'disabled'})
server = self._boot_and_check_allocations(
self.flavor1, source_hostname)
# Force source compute down
source_compute_id = self.admin_api.get_services(
host=source_hostname, binary='nova-compute')[0]['id']
self.compute1.stop()
self.admin_api.put_service(
source_compute_id, {'forced_down': 'true'})
# Initialize fake_notifier
fake_notifier.stub_notifier(self)
fake_notifier.reset()
# Initiate evacuation
# There is no other host to evacuate to so the rebuild should put the
# VM to ERROR state, but it should remain on source compute
server = self._evacuate_server(
server, expected_state='ERROR', expected_host=source_hostname,
expected_migration_status='error')
# NOTE(elod.illes): Should be changed to non-polling solution when
# patch https://review.opendev.org/#/c/482629/ gets merged:
# fake_notifier.wait_for_versioned_notifications(
# 'compute_task.rebuild_server')
self._wait_for_notification_event_type('compute_task.rebuild_server')
self._run_periodics()
# Check migrations
migrations = self.api.get_migrations()
self.assertEqual(1, len(migrations))
self.assertEqual('evacuation', migrations[0]['migration_type'])
self.assertEqual(server['id'], migrations[0]['instance_uuid'])
self.assertEqual(source_hostname, migrations[0]['source_compute'])
self.assertEqual('error', migrations[0]['status'])
# Restart source host
self.admin_api.put_service(
source_compute_id, {'forced_down': 'false'})
self.compute1.start()
self._run_periodics()
# Check allocation and usages: should only use resources on source host
self.assertFlavorMatchesUsage(source_rp_uuid, self.flavor1)
self.assertFlavorMatchesAllocation(self.flavor1, server['id'],
source_rp_uuid)
zero_usage = {'VCPU': 0, 'DISK_GB': 0, 'MEMORY_MB': 0}
self.assertRequestMatchesUsage(zero_usage, dest_rp_uuid)
self._delete_and_check_allocations(server)
def test_migrate_no_valid_host(self):
source_hostname = self.compute1.host
dest_hostname = self.compute2.host
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
server = self._boot_and_check_allocations(
self.flavor1, source_hostname)
dest_compute_id = self.admin_api.get_services(
host=dest_hostname, binary='nova-compute')[0]['id']
self.compute2.stop()
# force it down to avoid waiting for the service group to time out
self.admin_api.put_service(
dest_compute_id, {'forced_down': 'true'})
# migrate the server
post = {'migrate': None}
self.api.post_server_action(server['id'], post)
self._assert_resize_migrate_action_fail(
server, instance_actions.MIGRATE, 'NoValidHost')
expected_params = {'OS-EXT-SRV-ATTR:host': source_hostname,
'status': 'ACTIVE'}
self._wait_for_server_parameter(server, expected_params)
self._run_periodics()
# Expect to have allocation only on source_host
self.assertFlavorMatchesUsage(source_rp_uuid, self.flavor1)
zero_usage = {'VCPU': 0, 'DISK_GB': 0, 'MEMORY_MB': 0}
self.assertRequestMatchesUsage(zero_usage, dest_rp_uuid)
self.assertFlavorMatchesAllocation(self.flavor1, server['id'],
source_rp_uuid)
self._delete_and_check_allocations(server)
def _test_evacuate(self, keep_hypervisor_state):
source_hostname = self.compute1.host
dest_hostname = self.compute2.host
server = self._boot_and_check_allocations(
self.flavor1, source_hostname)
source_compute_id = self.admin_api.get_services(
host=source_hostname, binary='nova-compute')[0]['id']
self.compute1.stop()
# force it down to avoid waiting for the service group to time out
self.admin_api.put_service(
source_compute_id, {'forced_down': 'true'})
# evacuate the server
server = self._evacuate_server(server, expected_host=dest_hostname)
# Expect to have allocation and usages on both computes as the
# source compute is still down
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
self.assertFlavorMatchesUsage(source_rp_uuid, self.flavor1)
self.assertFlavorMatchesUsage(dest_rp_uuid, self.flavor1)
self._check_allocation_during_evacuate(
self.flavor1, server['id'], source_rp_uuid, dest_rp_uuid)
# restart the source compute
self.compute1 = self.restart_compute_service(
self.compute1, keep_hypervisor_state=keep_hypervisor_state)
self.admin_api.put_service(
source_compute_id, {'forced_down': 'false'})
source_usages = self._get_provider_usages(source_rp_uuid)
self.assertEqual({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0},
source_usages)
self.assertFlavorMatchesUsage(dest_rp_uuid, self.flavor1)
self.assertFlavorMatchesAllocation(self.flavor1, server['id'],
dest_rp_uuid)
self._delete_and_check_allocations(server)
def test_evacuate_instance_kept_on_the_hypervisor(self):
self._test_evacuate(keep_hypervisor_state=True)
def test_evacuate_clean_hypervisor(self):
self._test_evacuate(keep_hypervisor_state=False)
def _test_evacuate_forced_host(self, keep_hypervisor_state):
"""Evacuating a server with a forced host bypasses the scheduler
which means conductor has to create the allocations against the
destination node. This test recreates the scenarios and asserts
the allocations on the source and destination nodes are as expected.
"""
source_hostname = self.compute1.host
dest_hostname = self.compute2.host
# the ability to force evacuate a server is removed entirely in 2.68
self.api.microversion = '2.67'
server = self._boot_and_check_allocations(
self.flavor1, source_hostname)
source_compute_id = self.admin_api.get_services(
host=source_hostname, binary='nova-compute')[0]['id']
self.compute1.stop()
# force it down to avoid waiting for the service group to time out
self.admin_api.put_service(
source_compute_id, {'forced_down': 'true'})
# evacuate the server and force the destination host which bypasses
# the scheduler
post = {
'evacuate': {
'host': dest_hostname,
'force': True
}
}
self.api.post_server_action(server['id'], post)
expected_params = {'OS-EXT-SRV-ATTR:host': dest_hostname,
'status': 'ACTIVE'}
server = self._wait_for_server_parameter(server,
expected_params)
# Run the periodics to show those don't modify allocations.
self._run_periodics()
# Expect to have allocation and usages on both computes as the
# source compute is still down
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
self.assertFlavorMatchesUsage(source_rp_uuid, self.flavor1)
self.assertFlavorMatchesUsage(dest_rp_uuid, self.flavor1)
self._check_allocation_during_evacuate(
self.flavor1, server['id'], source_rp_uuid, dest_rp_uuid)
# restart the source compute
self.compute1 = self.restart_compute_service(
self.compute1, keep_hypervisor_state=keep_hypervisor_state)
self.admin_api.put_service(
source_compute_id, {'forced_down': 'false'})
# Run the periodics again to show they don't change anything.
self._run_periodics()
# When the source node starts up, the instance has moved so the
# ResourceTracker should cleanup allocations for the source node.
source_usages = self._get_provider_usages(source_rp_uuid)
self.assertEqual(
{'VCPU': 0, 'MEMORY_MB': 0, 'DISK_GB': 0}, source_usages)
# The usages/allocations should still exist on the destination node
# after the source node starts back up.
self.assertFlavorMatchesUsage(dest_rp_uuid, self.flavor1)
self.assertFlavorMatchesAllocation(self.flavor1, server['id'],
dest_rp_uuid)
self._delete_and_check_allocations(server)
def test_evacuate_forced_host_instance_kept_on_the_hypervisor(self):
self._test_evacuate_forced_host(keep_hypervisor_state=True)
def test_evacuate_forced_host_clean_hypervisor(self):
self._test_evacuate_forced_host(keep_hypervisor_state=False)
def test_evacuate_forced_host_v268(self):
"""Evacuating a server with a forced host was removed in API
microversion 2.68. This test ensures that the request is rejected.
"""
source_hostname = self.compute1.host
dest_hostname = self.compute2.host
server = self._boot_and_check_allocations(
self.flavor1, source_hostname)
# evacuate the server and force the destination host which bypasses
# the scheduler
post = {
'evacuate': {
'host': dest_hostname,
'force': True
}
}
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
server['id'], post)
self.assertIn("'force' was unexpected", str(ex))
# NOTE(gibi): there is a similar test in SchedulerOnlyChecksTargetTest but
# we want this test here as well because ServerMovingTest is a parent class
# of multiple test classes that run this test case with different compute
# node setups.
def test_evacuate_host_specified_but_not_forced(self):
"""Evacuating a server with a host but using the scheduler to create
the allocations against the destination node. This test recreates the
scenarios and asserts the allocations on the source and destination
nodes are as expected.
"""
source_hostname = self.compute1.host
dest_hostname = self.compute2.host
server = self._boot_and_check_allocations(
self.flavor1, source_hostname)
source_compute_id = self.admin_api.get_services(
host=source_hostname, binary='nova-compute')[0]['id']
self.compute1.stop()
# force it down to avoid waiting for the service group to time out
self.admin_api.put_service(
source_compute_id, {'forced_down': 'true'})
# evacuate the server specify the target but do not force the
# destination host to use the scheduler to validate the target host
post = {
'evacuate': {
'host': dest_hostname,
}
}
self.api.post_server_action(server[