ResourceGroup add remove_policies property

To cater for the requirement of TripleO (and others) to specify a
victim for removal from a Resource group ("eliding" in merge.py speak)
add a new property, which enables specific indexes to be passed on
update, which will force removal of those specific resources.

In future it may be that we add additional ways to specify the removal
policy, so this interface is a list of policies, with each policy being
a map taking an optional list of arguments.

'resource_list' policy added now, which takes one of the following:

1. Resource name (as specified in the nested stack, obtainable e.g via
heat resource-list --nested-depth $n <parent stack>, or by doing
resource-list directly using the nested stack ID obtainable via
physical_resource_id of the ResourceGroup resource.

2. The nested resource refid - this is typically (but not always,
particularly for AWS resources) the resource physical_resource_id,
and is obtainable via either get_resource or the "refs" attribute
of the ResourceGroup.

Note that once removed, the resource name is never reused, so the
%index%, where used, will always increment on replacement.

Change-Id: Icbc76b54bae3eb39efdaa704b036b1bbb94db206
blueprint: resourcegroup-force-remove
This commit is contained in:
Steven Hardy 2014-10-14 17:57:45 +01:00
parent a6e9a94c6c
commit c209a9644d
2 changed files with 272 additions and 3 deletions

View File

@ -13,6 +13,7 @@
import collections
import copy
import six
from heat.common import exception
from heat.common.i18n import _
@ -69,9 +70,9 @@ class ResourceGroup(stack_resource.StackResource):
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
COUNT, INDEX_VAR, RESOURCE_DEF,
COUNT, INDEX_VAR, RESOURCE_DEF, REMOVAL_POLICIES
) = (
'count', 'index_var', 'resource_def',
'count', 'index_var', 'resource_def', 'removal_policies'
)
_RESOURCE_DEF_KEYS = (
@ -80,6 +81,12 @@ class ResourceGroup(stack_resource.StackResource):
'type', 'properties',
)
_REMOVAL_POLICIES_KEYS = (
REMOVAL_RSRC_LIST,
) = (
'resource_list',
)
ATTRIBUTES = (
REFS, ATTR_ATTRIBUTES,
) = (
@ -126,6 +133,36 @@ class ResourceGroup(stack_resource.StackResource):
},
required=True
),
REMOVAL_POLICIES: properties.Schema(
properties.Schema.LIST,
_('Policies for removal of resources on update'),
schema=properties.Schema(
properties.Schema.MAP,
_('Policy to be processed when doing an update which '
'requires removal of specific resources.'),
schema={
REMOVAL_RSRC_LIST: properties.Schema(
properties.Schema.LIST,
_('List of resources to be removed '
'when doing an update which requires removal of '
'specific resources. '
'The resource may be specified several ways: '
'(1) The resource name, as in the nested stack, '
'(2) The resource reference returned from '
'get_resource in a template, as available via '
'the \'refs\' attribute '
'Note this is destructive on update when specified; '
'even if the count is not being reduced, and once '
'a resource name is removed, it\'s name is never '
'reused in subsequent updates'
),
default=[]
),
},
),
update_allowed=True,
default=[]
),
}
attributes_schema = {
@ -150,8 +187,51 @@ class ResourceGroup(stack_resource.StackResource):
self.stack)
res_inst.validate()
def _name_blacklist(self):
"""Resolve the remove_policies to names for removal."""
# To avoid reusing names after removal, we store a comma-separated
# blacklist in the resource data
db_rsrc_names = self.data().get('name_blacklist')
if db_rsrc_names:
current_blacklist = db_rsrc_names.split(',')
else:
current_blacklist = []
# Now we iterate over the removal policies, and update the blacklist
# with any additional names
rsrc_names = list(current_blacklist)
for r in self.properties[self.REMOVAL_POLICIES]:
if self.REMOVAL_RSRC_LIST in r:
# Tolerate string or int list values
for n in r[self.REMOVAL_RSRC_LIST]:
str_n = six.text_type(n)
if str_n in self.nested() and str_n not in rsrc_names:
rsrc_names.append(str_n)
continue
rsrc = self.nested().resource_by_refid(str_n)
if rsrc and str_n not in rsrc_names:
rsrc_names.append(rsrc.name)
# If the blacklist has changed, update the resource data
if rsrc_names != current_blacklist:
self.data_set('name_blacklist', ','.join(rsrc_names))
return rsrc_names
def _resource_names(self):
return [str(n) for n in range(self.properties.get(self.COUNT))]
name_blacklist = self._name_blacklist()
req_count = self.properties.get(self.COUNT)
def gen_names():
count = 0
index = 0
while count < req_count:
if str(index) not in name_blacklist:
yield str(index)
count += 1
index += 1
return list(gen_names())
def handle_create(self):
names = self._resource_names()

View File

@ -92,6 +92,24 @@ template_repl = {
}
}
template_repl2 = {
"heat_template_version": "2013-05-23",
"resources": {
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "dummy.resource",
"properties": {
"Foo": "Bar%index%"
}
}
}
}
}
}
template_attr = {
"heat_template_version": "2014-10-16",
"resources": {
@ -379,6 +397,177 @@ class ResourceGroupTest(common.HeatTestCase):
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['0', '1'], sorted(resource_names))
def test_update_remove_resource_list_name(self):
"""Test update specifying victims."""
resg = self._create_dummy_stack()
self.assertEqual(2, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['0', '1'], sorted(resource_names))
new_snip = copy.deepcopy(resg.t)
new_snip['Properties']['count'] = 5
scheduler.TaskRunner(resg.update, new_snip)()
self.stack = resg.nested()
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.state)
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.nested().state)
self.assertEqual(5, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['0', '1', '2', '3', '4'], sorted(resource_names))
# Reduce by three, specifying the middle resources to be removed
reduce_snip = copy.deepcopy(resg.t)
reduce_snip['Properties']['count'] = 2
reduce_snip['Properties']['removal_policies'] = [{'resource_list':
['1', '2', '3']}]
scheduler.TaskRunner(resg.update, reduce_snip)()
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.state)
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.nested().state)
self.assertEqual(2, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['0', '4'], sorted(resource_names))
# Increase to 3 again leaving the force remove, the indexes are skipped
increase_snip = copy.deepcopy(resg.t)
increase_snip['Properties']['count'] = 3
scheduler.TaskRunner(resg.update, increase_snip)()
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.state)
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.nested().state)
self.assertEqual(3, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['0', '4', '5'], sorted(resource_names))
# Increase to 5 clearing the resource_list, the blacklist should be
# maintained so no resource names are reused
increase_snip2 = copy.deepcopy(resg.t)
increase_snip2['Properties']['count'] = 5
del(increase_snip2['Properties']['removal_policies'])
scheduler.TaskRunner(resg.update, increase_snip2)()
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.state)
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.nested().state)
self.assertEqual(5, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['0', '4', '5', '6', '7'], sorted(resource_names))
# Reduce by 3 only passing two resource_list victims, the remaining
# removal should be the largest numbered/newest, as normal
reduce_snip = copy.deepcopy(resg.t)
reduce_snip['Properties']['count'] = 2
reduce_snip['Properties']['removal_policies'] = [{'resource_list':
['4', '5']}]
scheduler.TaskRunner(resg.update, reduce_snip)()
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.state)
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.nested().state)
self.assertEqual(2, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['0', '6'], sorted(resource_names))
def test_update_remove_resource_list_refid(self):
"""Test update specifying victims."""
resg = self._create_dummy_stack()
self.assertEqual(2, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['0', '1'], sorted(resource_names))
# Update to remove a specific resource ref without affecting the size
# we should remove resource 0 and build a replacement
r_id = resg.nested()['0'].FnGetRefId()
self.assertIsNotNone(r_id)
reduce_snip = copy.deepcopy(resg.t)
reduce_snip['Properties']['count'] = 2
reduce_snip['Properties']['removal_policies'] = [
{'resource_list': [r_id]}]
scheduler.TaskRunner(resg.update, reduce_snip)()
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.state)
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.nested().state)
self.assertEqual(2, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['1', '2'], sorted(resource_names))
self.assertIsNone(resg.nested().resource_by_refid(r_id))
# We now should not do anything on subsequent updates
reduce_snip = copy.deepcopy(resg.t)
del(reduce_snip['Properties']['removal_policies'])
scheduler.TaskRunner(resg.update, reduce_snip)()
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.state)
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.nested().state)
self.assertEqual(2, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['1', '2'], sorted(resource_names))
self.assertIsNone(resg.nested().resource_by_refid(r_id))
def test_update_remove_add_index_replacement(self):
"""Test update removal/add indexes are consistent."""
resg = self._create_dummy_stack(template_data=template_repl2)
self.assertEqual(2, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['0', '1'], sorted(resource_names))
new_snip = copy.deepcopy(resg.t)
new_snip['Properties']['count'] = 5
scheduler.TaskRunner(resg.update, new_snip)()
self.stack = resg.nested()
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.state)
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.nested().state)
self.assertEqual(5, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['0', '1', '2', '3', '4'], sorted(resource_names))
for r in ['0', '1', '2', '3', '4']:
prop_val = 'Bar%s' % r
self.assertEqual(prop_val, resg.nested()[r].properties.get('Foo'))
# Reduce by three, specifying the middle resources to be removed
reduce_snip = copy.deepcopy(resg.t)
reduce_snip['Properties']['count'] = 2
reduce_snip['Properties']['removal_policies'] = [{'resource_list':
['1', '2', '3']}]
scheduler.TaskRunner(resg.update, reduce_snip)()
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.state)
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.nested().state)
self.assertEqual(2, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['0', '4'], sorted(resource_names))
self.assertEqual('Bar0', resg.nested()['0'].properties.get('Foo'))
self.assertEqual('Bar4', resg.nested()['4'].properties.get('Foo'))
# Increase to 3 again leaving the force remove, the indexes are skipped
increase_snip = copy.deepcopy(resg.t)
increase_snip['Properties']['count'] = 3
scheduler.TaskRunner(resg.update, increase_snip)()
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.state)
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.nested().state)
self.assertEqual(3, len(resg.nested()))
resource_names = [r.name for r in resg.nested().iter_resources()]
self.assertEqual(['0', '4', '5'], sorted(resource_names))
self.assertEqual('Bar0', resg.nested()['0'].properties.get('Foo'))
self.assertEqual('Bar4', resg.nested()['4'].properties.get('Foo'))
self.assertEqual('Bar5', resg.nested()['5'].properties.get('Foo'))
def test_invalid_removal_policies_nolist(self):
"""Test that error raised for malformed removal_policies."""
tmp = copy.deepcopy(template)
grp_props = tmp['resources']['group1']['properties']
grp_props['removal_policies'] = 'notallowed'
stack = utils.parse_stack(tmp)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
errstr = 'removal_policies "\'notallowed\'" is not a list'
self.assertIn(errstr, six.text_type(exc))
def test_invalid_removal_policies_nomap(self):
"""Test that error raised for malformed removal_policies."""
tmp = copy.deepcopy(template)
grp_props = tmp['resources']['group1']['properties']
grp_props['removal_policies'] = ['notallowed']
stack = utils.parse_stack(tmp)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
errstr = '"notallowed" is not a map'
self.assertIn(errstr, six.text_type(exc))
def test_aggregate_attribs(self):
"""
Test attribute aggregation and that we mimic the nested resource's