Prioritise resource deletion over creation

Because of quotas, there are times when creating a resource and then
deleting another resource may fail where doing it in the reverse order
would work, even though the resources are independent of one another.

When enqueueing 'check_resource' messages, send those for cleanup nodes
prior to those for update nodes. This means that all things being equal
(i.e. no dependency relationship), deletions will be started first. It
doesn't guarantee success when quotas allow, since only a dependency
relationship will cause Heat to wait for the deletion to complete before
starting creation, but it is a risk-free way to give us a better chance of
succeeding.

Change-Id: I9727d906cd0ad8c4bf9c5e632a47af6d7aad0c72
Partial-Bug: #1713900
This commit is contained in:
Zane Bitter 2017-09-20 14:24:46 -04:00 committed by Thomas Herve
parent 6ddc050e2f
commit 09d74ffa3c
3 changed files with 12 additions and 10 deletions

View File

@ -249,7 +249,8 @@ class CheckResource(object):
try:
input_forward_data = None
for req_node in deps.required_by(graph_key):
for req_node in sorted(deps.required_by(graph_key),
key=lambda n: n.is_update):
input_data = _get_input_data(req_node, input_forward_data)
if req_node.is_update:
input_forward_data = input_data

View File

@ -1346,10 +1346,11 @@ class Stack(collections.Mapping):
self.context, self.id, self.current_traversal, True, self.id)
leaves = set(self.convergence_dependencies.leaves())
if not any(leaves):
if not leaves:
self.mark_complete()
else:
for rsrc_id, is_update in self.convergence_dependencies.leaves():
for rsrc_id, is_update in sorted(leaves,
key=lambda n: n.is_update):
if is_update:
LOG.info("Triggering resource %s for update", rsrc_id)
else:

View File

@ -67,7 +67,7 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
self.assertEqual({'edges': [[[1, True], None]]}, stack_db.current_deps)
leaves = stack.convergence_dependencies.leaves()
expected_calls = []
for rsrc_id, is_update in leaves:
for rsrc_id, is_update in sorted(leaves, key=lambda n: n.is_update):
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
stack.context, rsrc_id, stack.current_traversal,
@ -123,7 +123,7 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
leaves = stack.convergence_dependencies.leaves()
expected_calls = []
for rsrc_id, is_update in leaves:
for rsrc_id, is_update in sorted(leaves, key=lambda n: n.is_update):
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
stack.context, rsrc_id, stack.current_traversal,
@ -263,15 +263,15 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
leaves = stack.convergence_dependencies.leaves()
expected_calls = []
for rsrc_id, is_update in leaves:
for rsrc_id, is_update in sorted(leaves, key=lambda n: n.is_update):
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
stack.context, rsrc_id, stack.current_traversal,
{'input_data': {}},
is_update, None, False))
leaves = curr_stack.convergence_dependencies.leaves()
for rsrc_id, is_update in leaves:
leaves = set(curr_stack.convergence_dependencies.leaves())
for rsrc_id, is_update in sorted(leaves, key=lambda n: n.is_update):
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
curr_stack.context, rsrc_id, curr_stack.current_traversal,
@ -346,7 +346,7 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
leaves = stack.convergence_dependencies.leaves()
expected_calls = []
for rsrc_id, is_update in leaves:
for rsrc_id, is_update in sorted(leaves, key=lambda n: n.is_update):
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
stack.context, rsrc_id, stack.current_traversal,
@ -354,7 +354,7 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
is_update, None, False))
leaves = curr_stack.convergence_dependencies.leaves()
for rsrc_id, is_update in leaves:
for rsrc_id, is_update in sorted(leaves, key=lambda n: n.is_update):
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
curr_stack.context, rsrc_id, curr_stack.current_traversal,