Heal RequestSpec.is_bfv for legacy instances during moves
Change I9c2111f7377df65c1fc3c72323f85483b3295989 sets the RequestSpec.is_bfv flag for newly created instances so that scheduling (using placement) does not allocate DISK_GB resources for the Flavor's root_gb when booting from volume. RequestSpecs for old instances created before that change will not have the is_bfv field set, so this change adds a check for that in the various move operations (evacuate, unshelve, cold migrate and live migrate) and sets the RequestSpec.is_bfv flag accordingly. The related functional test is updated for the legacy cold migrate and heal scenario. Change-Id: I8e529ad4d707b2ad012328993892db83ce464c4b Closes-Bug: #1469179
This commit is contained in:
parent
f63fd14975
commit
9344c1995a
@ -224,6 +224,28 @@ def is_volume_backed_instance(context, instance, bdms=None):
|
||||
return not instance['image_ref']
|
||||
|
||||
|
||||
def heal_reqspec_is_bfv(ctxt, request_spec, instance):
|
||||
"""Calculates the is_bfv flag for a RequestSpec created before Rocky.
|
||||
|
||||
Starting in Rocky, new instances have their RequestSpec created with
|
||||
the "is_bfv" flag to indicate if they are volume-backed which is used
|
||||
by the scheduler when determining root disk resource allocations.
|
||||
|
||||
RequestSpecs created before Rocky will not have the is_bfv flag set
|
||||
so we need to calculate it here and update the RequestSpec.
|
||||
|
||||
:param ctxt: nova.context.RequestContext auth context
|
||||
:param request_spec: nova.objects.RequestSpec used for scheduling
|
||||
:param instance: nova.objects.Instance being scheduled
|
||||
"""
|
||||
if 'is_bfv' in request_spec:
|
||||
return
|
||||
# Determine if this is a volume-backed instance and set the field
|
||||
# in the request spec accordingly.
|
||||
request_spec.is_bfv = is_volume_backed_instance(ctxt, instance)
|
||||
request_spec.save()
|
||||
|
||||
|
||||
def convert_mb_to_ceil_gb(mb_value):
|
||||
gb_int = 0
|
||||
if mb_value:
|
||||
|
@ -775,8 +775,12 @@ class ComputeTaskManager(base.Base):
|
||||
# accept it
|
||||
filter_properties = request_spec.\
|
||||
to_legacy_filter_properties_dict()
|
||||
# FIXME(mriedem): This means we'll lose the is_bfv flag
|
||||
# set on an existing RequestSpec that had it set.
|
||||
request_spec = request_spec.\
|
||||
to_legacy_request_spec_dict()
|
||||
# TODO(mriedem): we don't even need to call populate_retry
|
||||
# because unshelve failures aren't rescheduled.
|
||||
scheduler_utils.populate_retry(filter_properties,
|
||||
instance.uuid)
|
||||
request_spec = objects.RequestSpec.from_primitives(
|
||||
@ -799,6 +803,8 @@ class ComputeTaskManager(base.Base):
|
||||
cell=instance_mapping.cell_mapping))
|
||||
|
||||
request_spec.ensure_project_and_user_id(instance)
|
||||
compute_utils.heal_reqspec_is_bfv(
|
||||
context, request_spec, instance)
|
||||
host_lists = self._schedule_instances(context,
|
||||
request_spec, [instance.uuid],
|
||||
return_alternates=False)
|
||||
@ -953,6 +959,8 @@ class ComputeTaskManager(base.Base):
|
||||
instance,
|
||||
image_ref)
|
||||
request_spec.ensure_project_and_user_id(instance)
|
||||
compute_utils.heal_reqspec_is_bfv(
|
||||
context, request_spec, instance)
|
||||
host_lists = self._schedule_instances(context,
|
||||
request_spec, [instance.uuid],
|
||||
return_alternates=False)
|
||||
|
@ -15,6 +15,7 @@ import oslo_messaging as messaging
|
||||
import six
|
||||
|
||||
from nova.compute import power_state
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova.conductor.tasks import base
|
||||
from nova.conductor.tasks import migrate
|
||||
import nova.conf
|
||||
@ -302,6 +303,8 @@ class LiveMigrationTask(base.TaskBase):
|
||||
cell=cell_mapping)
|
||||
|
||||
request_spec.ensure_project_and_user_id(self.instance)
|
||||
compute_utils.heal_reqspec_is_bfv(
|
||||
self.context, request_spec, self.instance)
|
||||
|
||||
return request_spec
|
||||
|
||||
|
@ -14,6 +14,7 @@ from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from nova import availability_zones
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova.conductor.tasks import base
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
@ -220,6 +221,8 @@ class MigrationTask(base.TaskBase):
|
||||
migration = self._preallocate_migration()
|
||||
|
||||
self.request_spec.ensure_project_and_user_id(self.instance)
|
||||
compute_utils.heal_reqspec_is_bfv(
|
||||
self.context, self.request_spec, self.instance)
|
||||
# On an initial call to migrate, 'self.host_list' will be None, so we
|
||||
# have to call the scheduler to get a list of acceptable hosts to
|
||||
# migrate to. That list will consist of a selected host, along with
|
||||
|
@ -3669,6 +3669,37 @@ class VolumeBackedServerTest(integrated_helpers.ProviderUsageBaseTestCase):
|
||||
20,
|
||||
self.admin_api.get_hypervisor_stats()['local_gb_used'])
|
||||
|
||||
# Now let's hack the RequestSpec.is_bfv field to mimic migrating an
|
||||
# old instance created before RequestSpec.is_bfv was set in the API,
|
||||
# move the instance and verify that the RequestSpec.is_bfv is set
|
||||
# and the instance still reports the same DISK_GB allocations as during
|
||||
# the initial create.
|
||||
ctxt = context.get_admin_context()
|
||||
reqspec = objects.RequestSpec.get_by_instance_uuid(ctxt, server['id'])
|
||||
# Make sure it's set.
|
||||
self.assertTrue(reqspec.is_bfv)
|
||||
del reqspec.is_bfv
|
||||
reqspec.save()
|
||||
reqspec = objects.RequestSpec.get_by_instance_uuid(ctxt, server['id'])
|
||||
# Make sure it's not set.
|
||||
self.assertNotIn('is_bfv', reqspec)
|
||||
# Now migrate the instance to another host and check the request spec
|
||||
# and allocations after the migration.
|
||||
self._start_compute('host2')
|
||||
self.admin_api.post_server_action(server['id'], {'migrate': None})
|
||||
# Wait for the server to complete the cold migration.
|
||||
server = self._wait_for_state_change(
|
||||
self.admin_api, server, 'VERIFY_RESIZE')
|
||||
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
|
||||
# Confirm the cold migration and check usage and the request spec.
|
||||
self.api.post_server_action(server['id'], {'confirmResize': None})
|
||||
reqspec = objects.RequestSpec.get_by_instance_uuid(ctxt, server['id'])
|
||||
# Make sure it's set.
|
||||
self.assertTrue(reqspec.is_bfv)
|
||||
allocs = self._get_allocations_by_server_uuid(server['id'])
|
||||
resources = list(allocs.values())[0]['resources']
|
||||
self.assertEqual(expected_usage, resources['DISK_GB'])
|
||||
|
||||
|
||||
class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase):
|
||||
"""Tests for requesting a server with required traits in Placement"""
|
||||
|
@ -1145,6 +1145,23 @@ class ComputeUtilsTestCase(test.NoDBTestCase):
|
||||
expected_result, compute_utils.may_have_ports_or_volumes(inst),
|
||||
vm_state)
|
||||
|
||||
def test_heal_reqspec_is_bfv_no_update(self):
|
||||
reqspec = objects.RequestSpec(is_bfv=False)
|
||||
with mock.patch.object(compute_utils, 'is_volume_backed_instance',
|
||||
new_callable=mock.NonCallableMock):
|
||||
compute_utils.heal_reqspec_is_bfv(
|
||||
self.context, reqspec, mock.sentinel.instance)
|
||||
|
||||
@mock.patch('nova.objects.RequestSpec.save')
|
||||
def test_heal_reqspec_is_bfv_with_update(self, mock_save):
|
||||
reqspec = objects.RequestSpec()
|
||||
with mock.patch.object(compute_utils, 'is_volume_backed_instance',
|
||||
return_value=True):
|
||||
compute_utils.heal_reqspec_is_bfv(
|
||||
self.context, reqspec, mock.sentinel.instance)
|
||||
self.assertTrue(reqspec.is_bfv)
|
||||
mock_save.assert_called_once_with()
|
||||
|
||||
|
||||
class ServerGroupTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
|
@ -58,6 +58,9 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
self.migration = objects.Migration()
|
||||
self.fake_spec = objects.RequestSpec()
|
||||
self._generate_task()
|
||||
_p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv')
|
||||
self.heal_reqspec_is_bfv_mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
def _generate_task(self):
|
||||
self.task = live_migrate.LiveMigrationTask(self.context,
|
||||
@ -119,6 +122,10 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
self.migration)
|
||||
else:
|
||||
m_alloc.assert_not_called()
|
||||
# When the task is executed with a destination it means the host is
|
||||
# being forced and we don't call the scheduler, so we don't need to
|
||||
# heal the request spec.
|
||||
self.heal_reqspec_is_bfv_mock.assert_not_called()
|
||||
|
||||
def test_execute_with_destination_old_school(self):
|
||||
self.test_execute_with_destination(new_mode=False)
|
||||
@ -342,6 +349,8 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
|
||||
mock_setup.assert_called_once_with(self.context, self.fake_spec)
|
||||
mock_reset.assert_called_once_with()
|
||||
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
|
||||
self.context, self.fake_spec, self.instance)
|
||||
mock_select.assert_called_once_with(self.context, self.fake_spec,
|
||||
[self.instance.uuid], return_objects=True, return_alternates=False)
|
||||
mock_check.assert_called_once_with('host1')
|
||||
@ -374,6 +383,8 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
|
||||
get_image.assert_called_once_with(self.instance.system_metadata)
|
||||
setup_ig.assert_called_once_with(self.context, another_spec)
|
||||
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
|
||||
self.context, another_spec, self.instance)
|
||||
select_dest.assert_called_once_with(self.context, another_spec,
|
||||
[self.instance.uuid], return_objects=True,
|
||||
return_alternates=False)
|
||||
|
@ -50,6 +50,9 @@ class MigrationTaskTestCase(test.NoDBTestCase):
|
||||
'hosts': [['host1', 'node1']]}}
|
||||
self.reservations = []
|
||||
self.clean_shutdown = True
|
||||
_p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv')
|
||||
self.heal_reqspec_is_bfv_mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
def _generate_task(self):
|
||||
return migrate.MigrationTask(self.context, self.instance, self.flavor,
|
||||
@ -131,6 +134,8 @@ class MigrationTaskTestCase(test.NoDBTestCase):
|
||||
|
||||
task.execute()
|
||||
|
||||
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
|
||||
self.context, self.request_spec, self.instance)
|
||||
sig_mock.assert_called_once_with(self.context, self.request_spec)
|
||||
task.scheduler_client.select_destinations.assert_called_once_with(
|
||||
self.context, self.request_spec, [self.instance.uuid],
|
||||
|
@ -343,6 +343,9 @@ class _BaseTaskTestCase(object):
|
||||
fake_deserialize_context)
|
||||
|
||||
self.useFixture(fixtures.SpawnIsSynchronousFixture())
|
||||
_p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv')
|
||||
self.heal_reqspec_is_bfv_mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
def _prepare_rebuild_args(self, update_args=None):
|
||||
# Args that don't get passed in to the method but do get passed to RPC
|
||||
@ -1034,6 +1037,8 @@ class _BaseTaskTestCase(object):
|
||||
reset_forced_destinations.assert_called_once_with()
|
||||
from_primitives.assert_called_once_with(self.context, request_spec,
|
||||
filter_properties)
|
||||
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
|
||||
self.context, fake_spec, test.MatchType(objects.Instance))
|
||||
sched_instances.assert_called_once_with(self.context, fake_spec,
|
||||
[instance.uuid], return_alternates=False)
|
||||
self.assertEqual(cell_mapping,
|
||||
@ -1297,6 +1302,8 @@ class _BaseTaskTestCase(object):
|
||||
obj_base.obj_to_primitive(inst_obj.image_meta), [inst_obj])
|
||||
fp_mock.assert_called_once_with(self.context, request_spec,
|
||||
filter_properties)
|
||||
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
|
||||
self.context, fake_spec, inst_obj)
|
||||
select_dest_mock.assert_called_once_with(self.context, fake_spec,
|
||||
inst_uuids, return_objects=True, return_alternates=False)
|
||||
compute_args['host'] = expected_host
|
||||
|
Loading…
x
Reference in New Issue
Block a user