Add --instance option to heal_allocations
This resolves one of the TODOs in the heal_allocations CLI by adding an --instance option to the command which, when specified, will process just the single instance given. Change-Id: Icf57f217f03ac52b1443addc34aa5128661a8554
This commit is contained in:
parent
ded3e4d900
commit
c92b297896
doc/source/cli
nova
releasenotes/notes
@ -330,7 +330,7 @@ Nova Cells v2
|
||||
Placement
|
||||
~~~~~~~~~
|
||||
|
||||
``nova-manage placement heal_allocations [--max-count <max_count>] [--verbose] [--dry-run]``
|
||||
``nova-manage placement heal_allocations [--max-count <max_count>] [--verbose] [--dry-run] [--instance <instance_uuid>]``
|
||||
Iterates over non-cell0 cells looking for instances which do not have
|
||||
allocations in the Placement service and which are not undergoing a task
|
||||
state transition. For each instance found, allocations are created against
|
||||
@ -352,6 +352,10 @@ Placement
|
||||
Specify ``--dry-run`` to print output but not commit any changes. The
|
||||
return code should be 4. *(Since 20.0.0 Train)*
|
||||
|
||||
Specify ``--instance`` to process a specific instance given its UUID. If
|
||||
specified the ``--max-count`` option has no effect.
|
||||
*(Since 20.0.0 Train)*
|
||||
|
||||
This command requires that the ``[api_database]/connection`` and
|
||||
``[placement]`` configuration options are set. Placement API >= 1.28 is
|
||||
required.
|
||||
|
@ -1934,7 +1934,7 @@ class PlacementCommands(object):
|
||||
instance=instance.uuid, provider=node_uuid)
|
||||
|
||||
def _heal_instances_in_cell(self, ctxt, max_count, unlimited, output,
|
||||
placement, dry_run):
|
||||
placement, dry_run, instance_uuid):
|
||||
"""Checks for instances to heal in a given cell.
|
||||
|
||||
:param ctxt: cell-targeted nova.context.RequestContext
|
||||
@ -1946,6 +1946,7 @@ class PlacementCommands(object):
|
||||
to communicate with the Placement service API.
|
||||
:param dry_run: Process instances and print output but do not commit
|
||||
any changes.
|
||||
:param instance_uuid: UUID of a specific instance to process.
|
||||
:return: Number of instances that had allocations created.
|
||||
:raises: nova.exception.ComputeHostNotFound if a compute node for a
|
||||
given instance cannot be found
|
||||
@ -1971,6 +1972,8 @@ class PlacementCommands(object):
|
||||
# automatically pick up where we left off without the user having
|
||||
# to pass it in (if unlimited is False).
|
||||
filters = {'deleted': False}
|
||||
if instance_uuid:
|
||||
filters['uuid'] = instance_uuid
|
||||
instances = objects.InstanceList.get_by_filters(
|
||||
ctxt, filters=filters, sort_key='created_at', sort_dir='asc',
|
||||
limit=max_count, expected_attrs=['flavor'])
|
||||
@ -1989,7 +1992,8 @@ class PlacementCommands(object):
|
||||
# don't include instances that already have allocations in the
|
||||
# max_count number, only the number of instances that have
|
||||
# successfully created allocations.
|
||||
if not unlimited and num_processed == max_count:
|
||||
# If a specific instance was requested we return here as well.
|
||||
if (not unlimited and num_processed == max_count) or instance_uuid:
|
||||
return num_processed
|
||||
|
||||
# Use a marker to get the next page of instances in this cell.
|
||||
@ -2023,7 +2027,11 @@ class PlacementCommands(object):
|
||||
@args('--dry-run', action='store_true', dest='dry_run', default=False,
|
||||
help='Runs the command and prints output but does not commit any '
|
||||
'changes. The return code should be 4.')
|
||||
def heal_allocations(self, max_count=None, verbose=False, dry_run=False):
|
||||
@args('--instance', metavar='<instance_uuid>', dest='instance_uuid',
|
||||
help='UUID of a specific instance to process. If specified '
|
||||
'--max-count has no effect.')
|
||||
def heal_allocations(self, max_count=None, verbose=False, dry_run=False,
|
||||
instance_uuid=None):
|
||||
"""Heals instance allocations in the Placement service
|
||||
|
||||
Return codes:
|
||||
@ -2039,7 +2047,6 @@ class PlacementCommands(object):
|
||||
# NOTE(mriedem): Thoughts on ways to expand this:
|
||||
# - allow passing a specific cell to heal
|
||||
# - allow filtering on enabled/disabled cells
|
||||
# - allow passing a specific instance to heal
|
||||
# - add a force option to force allocations for instances which have
|
||||
# task_state is not None (would get complicated during a migration);
|
||||
# for example, this could cleanup ironic instances that have
|
||||
@ -2058,7 +2065,10 @@ class PlacementCommands(object):
|
||||
# count, should we have separate options to be specific, i.e. --total
|
||||
# and --batch-size? Then --batch-size defaults to 50 and --total
|
||||
# defaults to None to mean unlimited.
|
||||
if max_count is not None:
|
||||
if instance_uuid:
|
||||
max_count = 1
|
||||
unlimited = False
|
||||
elif max_count is not None:
|
||||
try:
|
||||
max_count = int(max_count)
|
||||
except ValueError:
|
||||
@ -2073,10 +2083,24 @@ class PlacementCommands(object):
|
||||
output(_('Running batches of %i until complete') % max_count)
|
||||
|
||||
ctxt = context.get_admin_context()
|
||||
cells = objects.CellMappingList.get_all(ctxt)
|
||||
if not cells:
|
||||
output(_('No cells to process.'))
|
||||
return 4
|
||||
# If we are going to process a specific instance, just get the cell
|
||||
# it is in up front.
|
||||
if instance_uuid:
|
||||
try:
|
||||
im = objects.InstanceMapping.get_by_instance_uuid(
|
||||
ctxt, instance_uuid)
|
||||
cells = objects.CellMappingList(objects=[im.cell_mapping])
|
||||
except exception.InstanceMappingNotFound:
|
||||
print('Unable to find cell for instance %s, is it mapped? Try '
|
||||
'running "nova-manage cell_v2 verify_instance" or '
|
||||
'"nova-manage cell_v2 map_instances".' %
|
||||
instance_uuid)
|
||||
return 127
|
||||
else:
|
||||
cells = objects.CellMappingList.get_all(ctxt)
|
||||
if not cells:
|
||||
output(_('No cells to process.'))
|
||||
return 4
|
||||
|
||||
placement = report.SchedulerReportClient()
|
||||
num_processed = 0
|
||||
@ -2099,7 +2123,7 @@ class PlacementCommands(object):
|
||||
try:
|
||||
num_processed += self._heal_instances_in_cell(
|
||||
cctxt, limit_per_cell, unlimited, output, placement,
|
||||
dry_run)
|
||||
dry_run, instance_uuid)
|
||||
except exception.ComputeHostNotFound as e:
|
||||
print(e.format_message())
|
||||
return 2
|
||||
@ -2112,7 +2136,9 @@ class PlacementCommands(object):
|
||||
# don't include instances that already have allocations in the
|
||||
# max_count number, only the number of instances that have
|
||||
# successfully created allocations.
|
||||
if num_processed == max_count:
|
||||
# If a specific instance was provided then we'll just exit
|
||||
# the loop and process it below (either return 4 or 0).
|
||||
if num_processed == max_count and not instance_uuid:
|
||||
output(_('Max count reached. Processed %s instances.')
|
||||
% num_processed)
|
||||
return 1
|
||||
|
@ -666,6 +666,59 @@ class TestNovaManagePlacementHealAllocations(
|
||||
self.assertIn('[dry-run] Create allocations for instance %s on '
|
||||
'provider %s' % (server['id'], rp_uuid), output)
|
||||
|
||||
def test_heal_allocations_specific_instance(self):
|
||||
"""Tests the case that a specific instance is processed and only that
|
||||
instance even though there are two which require processing.
|
||||
"""
|
||||
# Create one that we won't process.
|
||||
self._boot_and_assert_no_allocations(
|
||||
self.flavor, 'cell1')
|
||||
# Create another that we will process specifically.
|
||||
server, _ = self._boot_and_assert_no_allocations(
|
||||
self.flavor, 'cell1')
|
||||
# First do a dry run to make sure two instances need processing.
|
||||
result = self.cli.heal_allocations(
|
||||
max_count=2, verbose=True, dry_run=True)
|
||||
# Nothing changed so the return code should be 4.
|
||||
self.assertEqual(4, result, self.output.getvalue())
|
||||
output = self.output.getvalue()
|
||||
self.assertIn('Found 2 candidate instances', output)
|
||||
|
||||
# Now run with our specific instance and it should be the only one
|
||||
# processed. Also run with max_count specified to show it's ignored.
|
||||
result = self.cli.heal_allocations(
|
||||
max_count=10, verbose=True, instance_uuid=server['id'])
|
||||
output = self.output.getvalue()
|
||||
self.assertEqual(0, result, self.output.getvalue())
|
||||
self.assertIn('Found 1 candidate instances', output)
|
||||
self.assertIn('Processed 1 instances.', output)
|
||||
# There shouldn't be any messages about running in batches.
|
||||
self.assertNotIn('Running batches', output)
|
||||
# There shouldn't be any message about max count reached.
|
||||
self.assertNotIn('Max count reached.', output)
|
||||
|
||||
# Now run it again on the specific instance and it should be done.
|
||||
result = self.cli.heal_allocations(
|
||||
verbose=True, instance_uuid=server['id'])
|
||||
output = self.output.getvalue()
|
||||
self.assertEqual(4, result, self.output.getvalue())
|
||||
self.assertIn('Found 1 candidate instances', output)
|
||||
self.assertIn('Processed 0 instances.', output)
|
||||
# There shouldn't be any message about max count reached.
|
||||
self.assertNotIn('Max count reached.', output)
|
||||
|
||||
# Delete the instance mapping and make sure that results in an error
|
||||
# when we run the command.
|
||||
ctxt = context.get_admin_context()
|
||||
im = objects.InstanceMapping.get_by_instance_uuid(ctxt, server['id'])
|
||||
im.destroy()
|
||||
result = self.cli.heal_allocations(
|
||||
verbose=True, instance_uuid=server['id'])
|
||||
output = self.output.getvalue()
|
||||
self.assertEqual(127, result, self.output.getvalue())
|
||||
self.assertIn('Unable to find cell for instance %s, is it mapped?' %
|
||||
server['id'], output)
|
||||
|
||||
|
||||
class TestNovaManagePlacementSyncAggregates(
|
||||
integrated_helpers.ProviderUsageBaseTestCase):
|
||||
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
other:
|
||||
- |
|
||||
An ``--instance`` option has been added to the
|
||||
``nova-manage placement heal_allocations`` CLI which allows running the
|
||||
command on a specific instance given its UUID.
|
Loading…
x
Reference in New Issue
Block a user