Use the member_definition library func in ResourceGroup rolling updates

Use the scaling library function for generating the intermediate template
stages during a rolling update, instead of maintaining a separate
implementation for ResourceGroup.

One improvement here is that if we are scaling down at the same time as
doing a rolling update and we already have more members than we need to
maintain the minimum in service, then we'll scale down to the minimum we
need immediately.

Change-Id: Ied7abb0a8d9ccfbda66d2dbee75057643ba7a169
Partially-Implements: blueprint scaling-group-common
This commit is contained in:
Zane Bitter 2015-08-27 15:10:46 -04:00 committed by Steve Baker
parent 091de16ae1
commit 09bc1d2a95
2 changed files with 48 additions and 22 deletions

View File

@ -30,6 +30,7 @@ from heat.engine import scheduler
from heat.engine import support
from heat.engine import template
from heat.scaling import rolling_update
from heat.scaling import template as scale_template
template_template = {
"heat_template_version": "2015-04-30",
@ -360,6 +361,12 @@ class ResourceGroup(stack_resource.StackResource):
self).check_update_complete(updater):
yield
def _run_update(self, total_capacity, max_updates, names, timeout):
template = self._assemble_for_rolling_update(total_capacity,
max_updates,
names)
return self._run_to_completion(template, timeout)
def check_update_complete(self, checkers):
for checker in checkers:
if not checker.started():
@ -455,16 +462,35 @@ class ResourceGroup(stack_resource.StackResource):
child_template['resources'] = resources
return child_template
def _assemble_for_rolling_update(self, names, name_blacklist,
include_all=False):
old_resources = self._get_resources()
def _assemble_for_rolling_update(self, total_capacity, max_updates,
updated_names, include_all=False):
name_blacklist = self._name_blacklist()
valid_resources = [(n, d) for n, d in self._get_resources()
if n not in name_blacklist][:total_capacity]
num_creating = max(total_capacity - len(valid_resources), 0)
new_names = iter(updated_names[:num_creating])
upd_names = updated_names[num_creating:]
def replace_priority(res_item):
name, defn = res_item
try:
return upd_names.index(name)
except ValueError:
return len(upd_names)
old_resources = sorted(valid_resources, key=replace_priority)
res_def = self._build_resource_definition(include_all)
resources = scale_template.member_definitions(old_resources, res_def,
total_capacity,
max_updates,
lambda: next(new_names),
self._do_prop_replace)
child_template = copy.deepcopy(template_template)
resources = dict((k, v)
for k, v in old_resources if k not in name_blacklist)
resources.update(dict((k, self._do_prop_replace(k, res_def))
for k in names))
child_template['resources'] = resources
child_template['resources'] = dict(resources)
return child_template
def _try_rolling_update(self):
@ -527,9 +553,6 @@ class ResourceGroup(stack_resource.StackResource):
while not duration.expired():
yield
# blacklisted names exiting and new
name_blacklist = self._name_blacklist()
# blacklist count existing
num_blacklist = self._count_black_listed()
@ -545,10 +568,9 @@ class ResourceGroup(stack_resource.StackResource):
def tasks():
for index, (curr_cap, max_upd, update_rsrcs) in enumerate(batches):
yield scheduler.TaskRunner(
self._run_to_completion,
self._assemble_for_rolling_update(update_rsrcs,
name_blacklist),
yield scheduler.TaskRunner(self._run_update,
curr_cap, max_upd,
update_rsrcs,
update_timeout)
if index < (len(batches) - 1) and pause_sec > 0:

View File

@ -245,7 +245,8 @@ class ResourceGroupTest(common.HeatTestCase):
resg = resource_group.ResourceGroup('test', snip, stack)
resg._nested = get_fake_nested_stack(['0', '1'])
resg._build_resource_definition = mock.Mock(return_value=resource_def)
self.assertEqual(expect, resg._assemble_for_rolling_update(['0'], []))
self.assertEqual(expect, resg._assemble_for_rolling_update(2, 1,
['0']))
def test_assemble_nested_rolling_update_none(self):
expect = {
@ -279,7 +280,7 @@ class ResourceGroupTest(common.HeatTestCase):
resg = resource_group.ResourceGroup('test', snip, stack)
resg._nested = get_fake_nested_stack(['0', '1'])
resg._build_resource_definition = mock.Mock(return_value=resource_def)
self.assertEqual(expect, resg._assemble_for_rolling_update([], []))
self.assertEqual(expect, resg._assemble_for_rolling_update(2, 0, []))
def test_index_var(self):
stack = utils.parse_stack(template_repl)
@ -1229,7 +1230,7 @@ class TestGetBatches(common.HeatTestCase):
self.assertEqual(self.batches, batches)
def test_assemble(self):
resources = tuple((str(i), False) for i in range(self.init_cap + 1))
resources = [(str(i), False) for i in range(self.init_cap + 1)]
self.grp._build_resource_definition = mock.Mock(return_value=True)
self.grp._get_resources = mock.Mock(return_value=resources)
@ -1237,12 +1238,15 @@ class TestGetBatches(common.HeatTestCase):
for size, max_upd, names in self.batches:
template = self.grp._assemble_for_rolling_update(names, {'0'})
template = self.grp._assemble_for_rolling_update(size,
max_upd,
names)
res_dict = template['resources']
expected_names = set(map(str,
range(1, max(self.init_cap, size) + 1)))
expected_names = set(map(str, range(1, size + 1)))
self.assertEqual(expected_names, set(res_dict))
updated = set(n for n, v in res_dict.items() if v is True)
self.assertEqual(set(names), updated)
resources[:] = sorted(res_dict.items(), key=lambda i: int(i[0]))