Merge "Reset forced_destination before migration at a proper time"
This commit is contained in:
commit
09077c20fd
@ -286,6 +286,13 @@ class MigrationTask(base.TaskBase):
|
|||||||
raise exception.MaxRetriesExceeded(reason=reason)
|
raise exception.MaxRetriesExceeded(reason=reason)
|
||||||
|
|
||||||
def _execute(self):
|
def _execute(self):
|
||||||
|
# NOTE(sbauza): Force_hosts/nodes needs to be reset if we want to make
|
||||||
|
# sure that the next destination is not forced to be the original host.
|
||||||
|
# This needs to be done before the populate_retry call otherwise
|
||||||
|
# retries will be disabled if the server was created with a forced
|
||||||
|
# host/node.
|
||||||
|
self.request_spec.reset_forced_destinations()
|
||||||
|
|
||||||
# TODO(sbauza): Remove once all the scheduler.utils methods accept a
|
# TODO(sbauza): Remove once all the scheduler.utils methods accept a
|
||||||
# RequestSpec object in the signature.
|
# RequestSpec object in the signature.
|
||||||
legacy_props = self.request_spec.to_legacy_filter_properties_dict()
|
legacy_props = self.request_spec.to_legacy_filter_properties_dict()
|
||||||
@ -298,11 +305,6 @@ class MigrationTask(base.TaskBase):
|
|||||||
scheduler_utils.populate_retry(legacy_props,
|
scheduler_utils.populate_retry(legacy_props,
|
||||||
self.instance.uuid)
|
self.instance.uuid)
|
||||||
|
|
||||||
# NOTE(sbauza): Force_hosts/nodes needs to be reset
|
|
||||||
# if we want to make sure that the next destination
|
|
||||||
# is not forced to be the original host
|
|
||||||
self.request_spec.reset_forced_destinations()
|
|
||||||
|
|
||||||
port_res_req = self.network_api.get_requested_resource_for_instance(
|
port_res_req = self.network_api.get_requested_resource_for_instance(
|
||||||
self.context, self.instance.uuid)
|
self.context, self.instance.uuid)
|
||||||
# NOTE(gibi): When cyborg or other module wants to handle similar
|
# NOTE(gibi): When cyborg or other module wants to handle similar
|
||||||
|
@ -68,20 +68,11 @@ class ForcedHostMissingReScheduleTestCase(
|
|||||||
|
|
||||||
# We expect that the instance re-scheduled but successfully ended
|
# We expect that the instance re-scheduled but successfully ended
|
||||||
# up on the second destination host.
|
# up on the second destination host.
|
||||||
# FIXME(gibi): this is bug 1845291 as nova does not try to
|
|
||||||
# re-schedule. The forced host was not cleared from the
|
|
||||||
# RequestSpec before the MigrationTask populated the retry
|
|
||||||
# information for the re-scheduling.
|
|
||||||
# self._wait_for_server_parameter(
|
|
||||||
# self.api, server,
|
|
||||||
# {
|
|
||||||
# 'OS-EXT-STS:task_state': None,
|
|
||||||
# 'status': 'VERIFY_RESIZE'})
|
|
||||||
# Instead the server goes to ERROR state
|
|
||||||
self._wait_for_server_parameter(
|
self._wait_for_server_parameter(
|
||||||
self.api, server, {'status': 'ERROR'})
|
self.api, server,
|
||||||
|
{
|
||||||
|
'OS-EXT-STS:task_state': None,
|
||||||
|
'status': 'VERIFY_RESIZE'})
|
||||||
|
|
||||||
# we ensure that there was a failed and then a successful claim call
|
# we ensure that there was a failed and then a successful claim call
|
||||||
# FIXME(gibi): this is bug 1845291 as there was no second claim call
|
self.assertEqual(2, len(claim_calls))
|
||||||
# self.assertEqual(2, len(claim_calls))
|
|
||||||
self.assertEqual(1, len(claim_calls))
|
|
||||||
|
Loading…
Reference in New Issue
Block a user