Reset forced_destination before migration at a proper time

Resetting was in place but it was done after the retry filter is
populated in the MigrationTask by the populate_retry call. This
patch moves the reset code before the call to populate_retry as
to allow retries.

Change-Id: I8290e890a36cf5a8f409ab8a50e7c72f7ae15025
Closed-Bug: #1845291
This commit is contained in:
Balazs Gibizer 2019-09-24 18:03:33 +02:00 committed by Eric Fried
parent 167e593ed5
commit 64b99bd853
2 changed files with 12 additions and 19 deletions

View File

@ -281,6 +281,13 @@ class MigrationTask(base.TaskBase):
raise exception.MaxRetriesExceeded(reason=reason)
def _execute(self):
# NOTE(sbauza): Force_hosts/nodes needs to be reset if we want to make
# sure that the next destination is not forced to be the original host.
# This needs to be done before the populate_retry call otherwise
# retries will be disabled if the server was created with a forced
# host/node.
self.request_spec.reset_forced_destinations()
# TODO(sbauza): Remove once all the scheduler.utils methods accept a
# RequestSpec object in the signature.
legacy_props = self.request_spec.to_legacy_filter_properties_dict()
@ -293,11 +300,6 @@ class MigrationTask(base.TaskBase):
scheduler_utils.populate_retry(legacy_props,
self.instance.uuid)
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
self.request_spec.reset_forced_destinations()
port_res_req = self.network_api.get_requested_resource_for_instance(
self.context, self.instance.uuid)
# NOTE(gibi): When cyborg or other module wants to handle similar

View File

@ -68,20 +68,11 @@ class ForcedHostMissingReScheduleTestCase(
# We expect that the instance re-scheduled but successfully ended
# up on the second destination host.
# FIXME(gibi): this is bug 1845291 as nova does not try to
# re-schedule. The forced host was not cleared from the
# RequestSpec before the MigrationTask populated the retry
# information for the re-scheduling.
# self._wait_for_server_parameter(
# self.api, server,
# {
# 'OS-EXT-STS:task_state': None,
# 'status': 'VERIFY_RESIZE'})
# Instead the server goes to ERROR state
self._wait_for_server_parameter(
self.api, server, {'status': 'ERROR'})
self.api, server,
{
'OS-EXT-STS:task_state': None,
'status': 'VERIFY_RESIZE'})
# we ensure that there was a failed and then a successful claim call
# FIXME(gibi): this is bug 1845291 as there was no second claim call
# self.assertEqual(2, len(claim_calls))
self.assertEqual(1, len(claim_calls))
self.assertEqual(2, len(claim_calls))