Rolling update support for Instance/AutoScalingGroup

This is the last part of the series to implement support
for AutoScaling UpdatePolicy.

Modified handle_update of InstanceGroup and AutoScalingGroup to
support rolling update if there is an update policy and when
there is a property difference in LaunchConfigurationName. A new
replace method is created to handle replacement of instances in
batches. Replacement of instances occurs prior to resize. The
_lb_reload is called appropriately for each batch. The resize
method is refactored to support underlying changes in
_create_template.

Change-Id: I8113a964fb6eb0ec16c2e3766d2771e64e1cf248
Implements: blueprint as-update-policy
This commit is contained in:
Winson Chan 2013-10-08 12:05:23 -07:00
parent f30ca4419c
commit b1dedae9e1
5 changed files with 1116 additions and 344 deletions

View File

@ -18,7 +18,9 @@ import copy
from heat.engine import resource
from heat.engine import signal_responder
from heat.common import short_id
from heat.common import exception
from heat.common import timeutils as iso8601utils
from heat.openstack.common import log as logging
from heat.openstack.common import timeutils
from heat.engine.properties import Properties
@ -120,20 +122,32 @@ class InstanceGroup(stack_resource.StackResource):
Add validation for update_policy
"""
super(InstanceGroup, self).validate()
if self.update_policy:
self.update_policy.validate()
policy_name = self.update_policy_schema.keys()[0]
if self.update_policy[policy_name]:
pause_time = self.update_policy[policy_name]['PauseTime']
if iso8601utils.parse_isoduration(pause_time) > 3600:
raise ValueError('Maximum PauseTime is 1 hour.')
def get_instance_names(self):
"""Get a list of resource names of the instances in this InstanceGroup.
Failed resources will be ignored.
"""
return sorted(x.name for x in self.get_instances())
return [r.name for r in self.get_instances()]
def get_instances(self):
"""Get a set of all the instance resources managed by this group."""
return [resource for resource in self.nested().itervalues()
if resource.state[1] != resource.FAILED]
"""Get a list of all the instance resources managed by this group.
Sort the list of instances first by created_time then by name.
"""
resources = []
if self.nested():
resources = [resource for resource in self.nested().itervalues()
if resource.status != resource.FAILED]
return sorted(resources, key=lambda r: (r.created_time, r.name))
def handle_create(self):
"""Create a nested stack and add the initial resources to it."""
@ -171,6 +185,14 @@ class InstanceGroup(stack_resource.StackResource):
self.stack.resolve_runtime_data,
self.name)
# Replace instances first if launch configuration has changed
if (self.update_policy['RollingUpdate'] and
'LaunchConfigurationName' in prop_diff):
policy = self.update_policy['RollingUpdate']
self._replace(int(policy['MinInstancesInService']),
int(policy['MaxBatchSize']),
policy['PauseTime'])
# Get the current capacity, we may need to adjust if
# Size has changed
if 'Size' in prop_diff:
@ -194,11 +216,7 @@ class InstanceGroup(stack_resource.StackResource):
def handle_delete(self):
return self.delete_nested()
def _create_template(self, num_instances):
"""
Create a template with a number of instance definitions based on the
launch configuration.
"""
def _get_instance_definition(self):
conf_name = self.properties['LaunchConfigurationName']
conf = self.stack.resource_by_refid(conf_name)
instance_definition = copy.deepcopy(conf.t)
@ -208,18 +226,91 @@ class InstanceGroup(stack_resource.StackResource):
instance_definition['Properties']['SubnetId'] = \
self.properties['VPCZoneIdentifier'][0]
# resolve references within the context of this stack.
fully_parsed = self.stack.resolve_runtime_data(instance_definition)
return self.stack.resolve_runtime_data(instance_definition)
resources = {}
for i in range(num_instances):
resources["%s-%d" % (self.name, i)] = fully_parsed
return {"Resources": resources}
def _create_template(self, num_instances, num_replace=0):
"""
Create the template for the nested stack of existing and new instances
For rolling update, if launch configuration is different, the
instance definition should come from the existing instance instead
of using the new launch configuration.
"""
instances = self.get_instances()[-num_instances:]
instance_definition = self._get_instance_definition()
num_create = num_instances - len(instances)
num_replace -= num_create
def instance_templates(num_replace):
for i in range(num_instances):
if i < len(instances):
inst = instances[i]
if inst.t != instance_definition and num_replace > 0:
num_replace -= 1
yield inst.name, instance_definition
else:
yield inst.name, inst.t
else:
yield short_id.generate_id(), instance_definition
return {"Resources": dict(instance_templates(num_replace))}
def _replace(self, min_in_service, batch_size, pause_time):
"""
Replace the instances in the group using updated launch configuration
"""
def changing_instances(tmpl):
instances = self.get_instances()
current = set((i.name, str(i.t)) for i in instances)
updated = set((k, str(v)) for k, v in tmpl['Resources'].items())
# includes instances to be updated and deleted
affected = set(k for k, v in current ^ updated)
return set(i.FnGetRefId() for i in instances if i.name in affected)
def pause_between_batch():
while True:
try:
yield
except scheduler.Timeout:
return
capacity = len(self.nested()) if self.nested() else 0
efft_bat_sz = min(batch_size, capacity)
efft_min_sz = min(min_in_service, capacity)
pause_sec = iso8601utils.parse_isoduration(pause_time)
batch_cnt = (capacity + efft_bat_sz - 1) // efft_bat_sz
if pause_sec * (batch_cnt - 1) >= self.stack.timeout_mins * 60:
raise ValueError('The current UpdatePolicy will result '
'in stack update timeout.')
# effective capacity includes temporary capacity added to accomodate
# the minimum number of instances in service during update
efft_capacity = max(capacity - efft_bat_sz, efft_min_sz) + efft_bat_sz
try:
remainder = capacity
while remainder > 0 or efft_capacity > capacity:
if capacity - remainder >= efft_min_sz:
efft_capacity = capacity
template = self._create_template(efft_capacity, efft_bat_sz)
self._lb_reload(exclude=changing_instances(template))
updater = self.update_with_template(template, {})
updater.run_to_completion()
self.check_update_complete(updater)
remainder -= efft_bat_sz
if remainder > 0 and pause_sec > 0:
self._lb_reload()
waiter = scheduler.TaskRunner(pause_between_batch)
waiter(timeout=pause_sec)
finally:
self._lb_reload()
def resize(self, new_capacity):
"""
Resize the instance group to the new capacity.
When shrinking, the newest instances will be removed.
When shrinking, the oldest instances will be removed.
"""
new_template = self._create_template(new_capacity)
try:
@ -231,7 +322,7 @@ class InstanceGroup(stack_resource.StackResource):
# nodes.
self._lb_reload()
def _lb_reload(self):
def _lb_reload(self, exclude=[]):
'''
Notify the LoadBalancer to reload its config to include
the changes in instances we have just made.
@ -240,7 +331,8 @@ class InstanceGroup(stack_resource.StackResource):
otherwise the instances' IP addresses may not be available.
'''
if self.properties['LoadBalancerNames']:
id_list = [inst.FnGetRefId() for inst in self.get_instances()]
id_list = [inst.FnGetRefId() for inst in self.get_instances()
if inst.FnGetRefId() not in exclude]
for lb in self.properties['LoadBalancerNames']:
lb_resource = self.stack[lb]
if 'Instances' in lb_resource.properties_schema:
@ -373,6 +465,14 @@ class AutoScalingGroup(InstanceGroup, CooldownMixin):
self.stack.resolve_runtime_data,
self.name)
# Replace instances first if launch configuration has changed
if (self.update_policy['AutoScalingRollingUpdate'] and
'LaunchConfigurationName' in prop_diff):
policy = self.update_policy['AutoScalingRollingUpdate']
self._replace(int(policy['MinInstancesInService']),
int(policy['MaxBatchSize']),
policy['PauseTime'])
# Get the current capacity, we may need to adjust if
# MinSize or MaxSize has changed
capacity = len(self.get_instances())

View File

@ -21,6 +21,7 @@ from testtools import skipIf
from oslo.config import cfg
from heat.common import short_id
from heat.common import template_format
from heat.common import exception
from heat.engine.resources import autoscaling as asc
@ -208,13 +209,14 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
instance_names = rsrc.get_instance_names()
self.assertEqual(len(instance_names), 1)
# Reduce the min size to 0, should complete without adjusting
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['MinSize'] = '0'
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(instance_names, rsrc.get_instance_names())
# trigger adjustment to reduce to 0, there should be no more instances
self._stub_lb_reload(0)
@ -236,10 +238,9 @@ class AutoScalingTest(HeatTestCase):
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['AvailabilityZones'] = ['foo']
updater = scheduler.TaskRunner(rsrc.update, update_snippet)
@ -260,7 +261,7 @@ class AutoScalingTest(HeatTestCase):
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.m.VerifyAll()
@ -292,7 +293,7 @@ class AutoScalingTest(HeatTestCase):
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.m.VerifyAll()
@ -330,8 +331,7 @@ class AutoScalingTest(HeatTestCase):
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.m.VerifyAll()
@ -369,8 +369,7 @@ class AutoScalingTest(HeatTestCase):
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.m.VerifyAll()
@ -410,7 +409,7 @@ class AutoScalingTest(HeatTestCase):
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.m.VerifyAll()
@ -442,7 +441,7 @@ class AutoScalingTest(HeatTestCase):
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.m.VerifyAll()
@ -505,14 +504,14 @@ class AutoScalingTest(HeatTestCase):
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
instance_names = rsrc.get_instance_names()
# Reduce the max size to 2, should complete without adjusting
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['MaxSize'] = '2'
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(instance_names, rsrc.get_instance_names())
self.assertEqual('2', rsrc.properties['MaxSize'])
rsrc.delete()
@ -531,7 +530,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# Increase min size to 2, should trigger an ExactCapacity adjust
self._stub_lb_reload(2)
@ -542,8 +541,7 @@ class AutoScalingTest(HeatTestCase):
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['MinSize'] = '2'
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
self.assertEqual('2', rsrc.properties['MinSize'])
rsrc.delete()
@ -562,7 +560,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# Increase min size to 2 via DesiredCapacity, should adjust
self._stub_lb_reload(2)
@ -573,9 +571,7 @@ class AutoScalingTest(HeatTestCase):
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['DesiredCapacity'] = '2'
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
self.assertEqual('2', rsrc.properties['DesiredCapacity'])
rsrc.delete()
@ -593,17 +589,15 @@ class AutoScalingTest(HeatTestCase):
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
instance_names = rsrc.get_instance_names()
# Remove DesiredCapacity from the updated template, which should
# have no effect, it's an optional parameter
update_snippet = copy.deepcopy(rsrc.parsed_template())
del(update_snippet['Properties']['DesiredCapacity'])
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(instance_names, rsrc.get_instance_names())
self.assertEqual(None, rsrc.properties['DesiredCapacity'])
rsrc.delete()
@ -624,7 +618,7 @@ class AutoScalingTest(HeatTestCase):
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['Cooldown'] = '61'
scheduler.TaskRunner(rsrc.update, update_snippet)()
@ -644,12 +638,15 @@ class AutoScalingTest(HeatTestCase):
# Check that the Fn::GetAZs is correctly resolved
expected = {u'Type': u'AWS::ElasticLoadBalancing::LoadBalancer',
u'Properties': {'Instances': ['WebServerGroup-0'],
u'Properties': {'Instances': ['aaaabbbbcccc'],
u'Listeners': [{u'InstancePort': u'80',
u'LoadBalancerPort': u'80',
u'Protocol': u'HTTP'}],
u'AvailabilityZones': ['abc', 'xyz']}}
self.m.StubOutWithMock(short_id, 'generate_id')
short_id.generate_id().AndReturn('aaaabbbbcccc')
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
@ -666,7 +663,7 @@ class AutoScalingTest(HeatTestCase):
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['Cooldown'] = '61'
scheduler.TaskRunner(rsrc.update, update_snippet)()
@ -690,8 +687,12 @@ class AutoScalingTest(HeatTestCase):
'Properties': {
'protocol_port': 8080,
'pool_id': 'pool123',
'members': [u'WebServerGroup-0']}
'members': [u'aaaabbbbcccc']}
}
self.m.StubOutWithMock(short_id, 'generate_id')
short_id.generate_id().AndReturn('aaaabbbbcccc')
self.m.StubOutWithMock(neutron_lb.LoadBalancer, 'handle_update')
neutron_lb.LoadBalancer.handle_update(expected,
mox.IgnoreArg(),
@ -742,9 +743,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_create(3)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 3)
# reduce to 1
self._stub_lb_reload(1)
@ -752,7 +751,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_meta_expected(now, 'ChangeInCapacity : -2')
self.m.ReplayAll()
rsrc.adjust(-2)
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# raise to 3
self._stub_lb_reload(3)
@ -760,9 +759,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_create(2)
self.m.ReplayAll()
rsrc.adjust(2)
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 3)
# set to 2
self._stub_lb_reload(2)
@ -770,8 +767,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_meta_expected(now, 'ExactCapacity : 2')
self.m.ReplayAll()
rsrc.adjust(2, 'ExactCapacity')
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
self.m.VerifyAll()
def test_scaling_group_scale_up_failure(self):
@ -785,7 +781,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
self.m.VerifyAll()
self.m.UnsetStubs()
@ -797,7 +793,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
self.assertRaises(exception.Error, rsrc.adjust, 1)
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
self.m.VerifyAll()
@ -815,23 +811,20 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# raise above the max
rsrc.adjust(4)
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# lower below the min
rsrc.adjust(-2)
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# no change
rsrc.adjust(0)
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
rsrc.delete()
self.m.VerifyAll()
@ -849,8 +842,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# reduce by 50%
self._stub_lb_reload(1)
@ -858,8 +850,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_validate()
self.m.ReplayAll()
rsrc.adjust(-50, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# raise by 200%
self._stub_lb_reload(3)
@ -867,9 +858,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_create(2)
self.m.ReplayAll()
rsrc.adjust(200, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 3)
rsrc.delete()
@ -888,8 +877,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# reduce by 50%
self._stub_lb_reload(1)
@ -897,8 +885,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_meta_expected(now, 'PercentChangeInCapacity : -50')
self.m.ReplayAll()
rsrc.adjust(-50, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# Now move time on 10 seconds - Cooldown in template is 60
# so this should not update the policy metadata, and the
@ -923,7 +910,7 @@ class AutoScalingTest(HeatTestCase):
# raise by 200%, too soon for Cooldown so there should be no change
rsrc.adjust(200, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
rsrc.delete()
@ -942,8 +929,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# reduce by 50%
self._stub_lb_reload(1)
@ -951,8 +937,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_meta_expected(now, 'PercentChangeInCapacity : -50')
self.m.ReplayAll()
rsrc.adjust(-50, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# Now move time on 61 seconds - Cooldown in template is 60
# so this should update the policy metadata, and the
@ -979,9 +964,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_meta_expected(now, 'PercentChangeInCapacity : 200')
self.m.ReplayAll()
rsrc.adjust(200, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 3)
rsrc.delete()
@ -1000,8 +983,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# reduce by 50%
self._stub_lb_reload(1)
@ -1009,8 +991,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_validate()
self.m.ReplayAll()
rsrc.adjust(-50, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# Don't move time, since cooldown is zero, it should work
previous_meta = {timeutils.strtime(now):
@ -1033,9 +1014,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_create(2)
self.m.ReplayAll()
rsrc.adjust(200, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 3)
rsrc.delete()
self.m.VerifyAll()
@ -1053,7 +1032,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# Scale up one
self._stub_lb_reload(2)
@ -1071,8 +1050,7 @@ class AutoScalingTest(HeatTestCase):
alarm_url = up_policy.FnGetAtt('AlarmUrl')
self.assertNotEqual(None, alarm_url)
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
rsrc.delete()
self.m.VerifyAll()
@ -1091,8 +1069,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# Scale down one
self._stub_lb_reload(1)
@ -1107,7 +1084,7 @@ class AutoScalingTest(HeatTestCase):
down_policy = self.create_scaling_policy(t, stack,
'WebServerScaleDownPolicy')
down_policy.signal()
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
rsrc.delete()
self.m.VerifyAll()
@ -1124,7 +1101,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# Scale up one
self._stub_lb_reload(2)
@ -1139,8 +1116,7 @@ class AutoScalingTest(HeatTestCase):
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# Now move time on 10 seconds - Cooldown in template is 60
# so this should not update the policy metadata, and the
@ -1162,8 +1138,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
rsrc.delete()
self.m.VerifyAll()
@ -1180,7 +1155,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# Scale up one
self._stub_lb_reload(2)
@ -1195,8 +1170,7 @@ class AutoScalingTest(HeatTestCase):
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# Now move time on 61 seconds - Cooldown in template is 60
# so this should trigger a scale-up
@ -1220,9 +1194,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 3)
rsrc.delete()
self.m.VerifyAll()
@ -1239,7 +1211,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# Create the scaling policy (with Cooldown=0) and scale up one
properties = t['Resources']['WebServerScaleUpPolicy']['Properties']
@ -1256,8 +1228,7 @@ class AutoScalingTest(HeatTestCase):
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# Now trigger another scale-up without changing time, should work
previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
@ -1279,9 +1250,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 3)
rsrc.delete()
self.m.VerifyAll()
@ -1298,7 +1267,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# Create the scaling policy no Cooldown property, should behave the
# same as when Cooldown==0
@ -1317,8 +1286,7 @@ class AutoScalingTest(HeatTestCase):
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# Now trigger another scale-up without changing time, should work
previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
@ -1340,9 +1308,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 3)
rsrc.delete()
self.m.VerifyAll()
@ -1364,7 +1330,7 @@ class AutoScalingTest(HeatTestCase):
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 1)
# Create initial scaling policy
up_policy = self.create_scaling_policy(t, stack,
@ -1383,8 +1349,7 @@ class AutoScalingTest(HeatTestCase):
# Trigger alarm
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 2)
# Update scaling policy
update_snippet = copy.deepcopy(up_policy.parsed_template())
@ -1419,9 +1384,7 @@ class AutoScalingTest(HeatTestCase):
# Trigger alarm
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2', 'WebServerGroup-3'],
rsrc.get_instance_names())
self.assertEqual(len(rsrc.get_instance_names()), 4)
rsrc.delete()
self.m.VerifyAll()

View File

@ -12,15 +12,24 @@
# License for the specific language governing permissions and limitations
# under the License.
import re
import mox
import json
import copy
from oslo.config import cfg
from heat.common import exception
from heat.common import template_format
from heat.engine.resources import instance
from heat.engine import parser
from heat.engine.resources import user
from heat.engine.resources import instance
from heat.engine.resources import loadbalancer as lb
from heat.engine.resources import wait_condition as wc
from heat.tests.common import HeatTestCase
from heat.tests.utils import setup_dummy_db
from heat.tests import utils
from heat.tests import fakes
from heat.tests.v1_1 import fakes as fakes11
from testtools.matchers import MatchesRegex
asg_tmpl_without_updt_policy = '''
@ -34,10 +43,22 @@ asg_tmpl_without_updt_policy = '''
"Properties" : {
"AvailabilityZones" : ["nova"],
"LaunchConfigurationName" : { "Ref" : "LaunchConfig" },
"MinSize" : "1",
"MaxSize" : "10"
"MinSize" : "10",
"MaxSize" : "20",
"LoadBalancerNames" : [ { "Ref" : "ElasticLoadBalancer" } ]
}
},
"ElasticLoadBalancer" : {
"Type" : "AWS::ElasticLoadBalancing::LoadBalancer",
"Properties" : {
"AvailabilityZones" : ["nova"],
"Listeners" : [ {
"LoadBalancerPort" : "80",
"InstancePort" : "80",
"Protocol" : "HTTP"
}]
}
},
"LaunchConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Properties": {
@ -67,8 +88,8 @@ asg_tmpl_with_bad_updt_policy = '''
"Properties" : {
"AvailabilityZones" : ["nova"],
"LaunchConfigurationName" : { "Ref" : "LaunchConfig" },
"MinSize" : "1",
"MaxSize" : "10"
"MinSize" : "10",
"MaxSize" : "20"
}
},
"LaunchConfig" : {
@ -100,10 +121,22 @@ asg_tmpl_with_default_updt_policy = '''
"Properties" : {
"AvailabilityZones" : ["nova"],
"LaunchConfigurationName" : { "Ref" : "LaunchConfig" },
"MinSize" : "1",
"MaxSize" : "10"
"MinSize" : "10",
"MaxSize" : "20",
"LoadBalancerNames" : [ { "Ref" : "ElasticLoadBalancer" } ]
}
},
"ElasticLoadBalancer" : {
"Type" : "AWS::ElasticLoadBalancing::LoadBalancer",
"Properties" : {
"AvailabilityZones" : ["nova"],
"Listeners" : [ {
"LoadBalancerPort" : "80",
"InstancePort" : "80",
"Protocol" : "HTTP"
}]
}
},
"LaunchConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Properties": {
@ -118,7 +151,7 @@ asg_tmpl_with_default_updt_policy = '''
}
'''
asg_tmpl_with_updt_policy_1 = '''
asg_tmpl_with_updt_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to create autoscaling group.",
@ -128,18 +161,30 @@ asg_tmpl_with_updt_policy_1 = '''
"UpdatePolicy" : {
"AutoScalingRollingUpdate" : {
"MinInstancesInService" : "1",
"MaxBatchSize" : "3",
"PauseTime" : "PT30S"
"MaxBatchSize" : "2",
"PauseTime" : "PT1S"
}
},
"Type" : "AWS::AutoScaling::AutoScalingGroup",
"Properties" : {
"AvailabilityZones" : ["nova"],
"LaunchConfigurationName" : { "Ref" : "LaunchConfig" },
"MinSize" : "1",
"MaxSize" : "10"
"MinSize" : "10",
"MaxSize" : "20",
"LoadBalancerNames" : [ { "Ref" : "ElasticLoadBalancer" } ]
}
},
"ElasticLoadBalancer" : {
"Type" : "AWS::ElasticLoadBalancing::LoadBalancer",
"Properties" : {
"AvailabilityZones" : ["nova"],
"Listeners" : [ {
"LoadBalancerPort" : "80",
"InstancePort" : "80",
"Protocol" : "HTTP"
}]
}
},
"LaunchConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Properties": {
@ -154,67 +199,117 @@ asg_tmpl_with_updt_policy_1 = '''
}
'''
asg_tmpl_with_updt_policy_2 = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to create autoscaling group.",
"Parameters" : {},
"Resources" : {
"WebServerGroup" : {
"UpdatePolicy" : {
"AutoScalingRollingUpdate" : {
"MinInstancesInService" : "1",
"MaxBatchSize" : "5",
"PauseTime" : "PT30S"
}
},
"Type" : "AWS::AutoScaling::AutoScalingGroup",
"Properties" : {
"AvailabilityZones" : ["nova"],
"LaunchConfigurationName" : { "Ref" : "LaunchConfig" },
"MinSize" : "1",
"MaxSize" : "10"
}
},
"LaunchConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Properties": {
"ImageId" : "foo",
"InstanceType" : "m1.large",
"KeyName" : "test",
"SecurityGroups" : [ "sg-1" ],
"UserData" : "jsconfig data"
}
}
}
}
'''
class AutoScalingGroupTest(HeatTestCase):
class InstanceGroupTest(HeatTestCase):
def setUp(self):
super(InstanceGroupTest, self).setUp()
setup_dummy_db()
def _stub_create(self, num, instance_class=instance.Instance):
"""
Expect creation of C{num} number of Instances.
:param instance_class: The resource class to expect to be created
instead of instance.Instance.
"""
super(AutoScalingGroupTest, self).setUp()
self.fc = fakes11.FakeClient()
self.fkc = fakes.FakeKeystoneClient(username='test_stack.CfnLBUser')
cfg.CONF.set_default('heat_waitcondition_server_url',
'http://127.0.0.1:8000/v1/waitcondition')
utils.setup_dummy_db()
def _stub_validate(self):
self.m.StubOutWithMock(parser.Stack, 'validate')
parser.Stack.validate()
parser.Stack.validate().MultipleTimes()
def _stub_lb_create(self):
self.m.StubOutWithMock(user.User, 'keystone')
user.User.keystone().AndReturn(self.fkc)
self.m.StubOutWithMock(user.AccessKey, 'keystone')
user.AccessKey.keystone().AndReturn(self.fkc)
self.m.StubOutWithMock(wc.WaitConditionHandle, 'keystone')
wc.WaitConditionHandle.keystone().MultipleTimes().AndReturn(self.fkc)
self.m.StubOutWithMock(wc.WaitConditionHandle, 'get_status')
wc.WaitConditionHandle.get_status().AndReturn(['SUCCESS'])
def _stub_lb_reload(self, num=1, setup=True):
if setup:
self.m.StubOutWithMock(lb.LoadBalancer, 'handle_update')
for i in range(num):
lb.LoadBalancer.handle_update(
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
def _stub_grp_create(self, capacity=0, setup_lb=True):
"""
Expect creation of instances to capacity. By default, expect creation
of load balancer unless specified.
"""
self._stub_validate()
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
self.m.StubOutWithMock(instance_class, 'handle_create')
self.m.StubOutWithMock(instance_class, 'check_create_complete')
cookie = object()
for x in range(num):
instance_class.handle_create().AndReturn(cookie)
instance_class.check_create_complete(cookie).AndReturn(False)
instance_class.check_create_complete(
cookie).MultipleTimes().AndReturn(True)
# for load balancer setup
if setup_lb:
self._stub_lb_create()
self._stub_lb_reload()
instance.Instance.handle_create().AndReturn(cookie)
instance.Instance.check_create_complete(cookie).AndReturn(True)
# for each instance in group
for i in range(capacity):
instance.Instance.handle_create().AndReturn(cookie)
instance.Instance.check_create_complete(cookie).AndReturn(True)
def _stub_grp_replace(self,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=0):
"""
Expect replacement of the capacity by batch size
"""
# for load balancer setup
self._stub_lb_reload(num_reloads_expected_on_updt)
# for instances in the group
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
self.m.StubOutWithMock(instance.Instance, 'destroy')
cookie = object()
for i in range(num_creates_expected_on_updt):
instance.Instance.handle_create().AndReturn(cookie)
instance.Instance.check_create_complete(cookie).AndReturn(True)
for i in range(num_deletes_expected_on_updt):
instance.Instance.destroy().AndReturn(None)
def _stub_grp_update(self,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=0):
"""
Expect update of the instances
"""
self.m.StubOutWithMock(instance.Instance, 'nova')
instance.Instance.nova().MultipleTimes().AndReturn(self.fc)
def activate_status(server):
server.status = 'VERIFY_RESIZE'
return_server = self.fc.servers.list()[1]
return_server.id = 1234
return_server.get = activate_status.__get__(return_server)
self.m.StubOutWithMock(self.fc.servers, 'get')
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.servers.get(mox.IgnoreArg()).\
MultipleTimes().AndReturn(return_server)
self.fc.client.post_servers_1234_action(
body={'resize': {'flavorRef': 3}}).\
MultipleTimes().AndReturn((202, None))
self.fc.client.post_servers_1234_action(
body={'confirmResize': None}).\
MultipleTimes().AndReturn((202, None))
self._stub_grp_replace(num_creates_expected_on_updt,
num_deletes_expected_on_updt,
num_reloads_expected_on_updt)
def get_launch_conf_name(self, stack, ig_name):
return stack[ig_name].properties['LaunchConfigurationName']
@ -222,12 +317,17 @@ class InstanceGroupTest(HeatTestCase):
def test_parse_without_update_policy(self):
tmpl = template_format.parse(asg_tmpl_without_updt_policy)
stack = utils.parse_stack(tmpl)
stack.validate()
grp = stack['WebServerGroup']
self.assertFalse(grp.update_policy['AutoScalingRollingUpdate'])
def test_parse_with_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_updt_policy_1)
tmpl = template_format.parse(asg_tmpl_with_updt_policy)
stack = utils.parse_stack(tmpl)
stack.validate()
tmpl_grp = tmpl['Resources']['WebServerGroup']
tmpl_policy = tmpl_grp['UpdatePolicy']['AutoScalingRollingUpdate']
tmpl_batch_sz = int(tmpl_policy['MaxBatchSize'])
grp = stack['WebServerGroup']
self.assertTrue(grp.update_policy)
self.assertTrue(len(grp.update_policy) == 1)
@ -235,12 +335,13 @@ class InstanceGroupTest(HeatTestCase):
policy = grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(policy and len(policy) > 0)
self.assertEqual(int(policy['MinInstancesInService']), 1)
self.assertEqual(int(policy['MaxBatchSize']), 3)
self.assertEqual(policy['PauseTime'], 'PT30S')
self.assertEqual(int(policy['MaxBatchSize']), tmpl_batch_sz)
self.assertEqual(policy['PauseTime'], 'PT1S')
def test_parse_with_default_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_default_updt_policy)
stack = utils.parse_stack(tmpl)
stack.validate()
grp = stack['WebServerGroup']
self.assertTrue(grp.update_policy)
self.assertTrue(len(grp.update_policy) == 1)
@ -256,6 +357,14 @@ class InstanceGroupTest(HeatTestCase):
stack = utils.parse_stack(tmpl)
self.assertRaises(exception.StackValidationFailed, stack.validate)
def test_parse_with_bad_pausetime_in_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_default_updt_policy)
group = tmpl['Resources']['WebServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['PauseTime'] = 'P1YT1H'
stack = utils.parse_stack(tmpl)
self.assertRaises(exception.StackValidationFailed, stack.validate)
def validate_update_policy_diff(self, current, updated):
# load current stack
@ -287,96 +396,389 @@ class InstanceGroupTest(HeatTestCase):
def test_update_policy_added(self):
self.validate_update_policy_diff(asg_tmpl_without_updt_policy,
asg_tmpl_with_updt_policy_1)
asg_tmpl_with_updt_policy)
def test_update_policy_updated(self):
self.validate_update_policy_diff(asg_tmpl_with_updt_policy_1,
asg_tmpl_with_updt_policy_2)
updt_template = json.loads(asg_tmpl_with_updt_policy)
grp = updt_template['Resources']['WebServerGroup']
policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '2'
policy['MaxBatchSize'] = '4'
policy['PauseTime'] = 'PT1M30S'
self.validate_update_policy_diff(asg_tmpl_with_updt_policy,
json.dumps(updt_template))
def test_update_policy_removed(self):
self.validate_update_policy_diff(asg_tmpl_with_updt_policy_1,
self.validate_update_policy_diff(asg_tmpl_with_updt_policy,
asg_tmpl_without_updt_policy)
def test_autoscaling_group_update(self):
def update_autoscaling_group(self, init_template, updt_template,
num_updates_expected_on_updt,
num_creates_expected_on_updt,
num_deletes_expected_on_updt,
num_reloads_expected_on_updt,
update_replace):
# setup stack from the initial template
tmpl = template_format.parse(asg_tmpl_with_updt_policy_1)
tmpl = template_format.parse(init_template)
stack = utils.parse_stack(tmpl)
nested = stack['WebServerGroup'].nested()
stack.validate()
# test stack create
# test the number of instance creation
# test that physical resource name of launch configuration is used
size = int(stack['WebServerGroup'].properties['MinSize'])
self._stub_create(size)
self._stub_grp_create(size)
self.m.ReplayAll()
stack.create()
self.m.VerifyAll()
self.assertEqual(stack.state, ('CREATE', 'COMPLETE'))
conf = stack['LaunchConfig']
conf_name_pattern = '%s-LaunchConfig-[a-zA-Z0-9]+$' % stack.name
regex_pattern = re.compile(conf_name_pattern)
self.assertTrue(regex_pattern.match(conf.FnGetRefId()))
nested = stack['WebServerGroup'].nested()
self.assertTrue(len(nested), size)
# test stack update
# test that update policy is updated
# test that launch configuration is replaced
# test that update policy is loaded
current_grp = stack['WebServerGroup']
self.assertTrue('AutoScalingRollingUpdate'
in current_grp.update_policy)
current_policy = current_grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(current_policy and len(current_policy) > 0)
self.assertEqual(int(current_policy['MaxBatchSize']), 3)
self.assertTrue(current_policy)
self.assertTrue(len(current_policy) > 0)
init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy']
init_roll_updt = init_updt_policy['AutoScalingRollingUpdate']
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
self.assertEqual(int(current_policy['MaxBatchSize']), init_batch_sz)
# test that physical resource name of launch configuration is used
conf = stack['LaunchConfig']
conf_name_pattern = '%s-LaunchConfig-[a-zA-Z0-9]+$' % stack.name
self.assertThat(conf.FnGetRefId(), MatchesRegex(conf_name_pattern))
# get launch conf name here to compare result after update
conf_name = self.get_launch_conf_name(stack, 'WebServerGroup')
updated_tmpl = template_format.parse(asg_tmpl_with_updt_policy_2)
# test the number of instances created
nested = stack['WebServerGroup'].nested()
self.assertEqual(len(nested.resources), size)
# clean up for next test
self.m.UnsetStubs()
# saves info from initial list of instances for comparison later
init_instances = current_grp.get_instances()
init_names = current_grp.get_instance_names()
init_images = [(i.name, i.t['Properties']['ImageId'])
for i in init_instances]
init_flavors = [(i.name, i.t['Properties']['InstanceType'])
for i in init_instances]
# test stack update
updated_tmpl = template_format.parse(updt_template)
updated_stack = utils.parse_stack(updated_tmpl)
new_grp_tmpl = updated_tmpl['Resources']['WebServerGroup']
new_updt_pol = new_grp_tmpl['UpdatePolicy']['AutoScalingRollingUpdate']
new_batch_sz = int(new_updt_pol['MaxBatchSize'])
new_min_in_svc = int(new_updt_pol['MinInstancesInService'])
self.assertNotEqual(new_batch_sz, init_batch_sz)
self._stub_validate()
if update_replace:
self._stub_grp_replace(size, size, num_reloads_expected_on_updt)
else:
self._stub_grp_update(num_creates_expected_on_updt,
num_deletes_expected_on_updt,
num_reloads_expected_on_updt)
self.m.ReplayAll()
stack.update(updated_stack)
self.m.VerifyAll()
self.assertEqual(stack.state, ('UPDATE', 'COMPLETE'))
# test that the update policy is updated
updated_grp = stack['WebServerGroup']
updt_instances = updated_grp.get_instances()
self.assertTrue('AutoScalingRollingUpdate'
in updated_grp.update_policy)
updated_policy = updated_grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(updated_policy and len(updated_policy) > 0)
self.assertEqual(int(updated_policy['MaxBatchSize']), 5)
self.assertTrue(updated_policy)
self.assertTrue(len(updated_policy) > 0)
self.assertEqual(int(updated_policy['MaxBatchSize']), new_batch_sz)
# test that the launch configuration is replaced
updated_conf_name = self.get_launch_conf_name(stack, 'WebServerGroup')
self.assertNotEqual(conf_name, updated_conf_name)
# test that the group size are the same
updt_instances = updated_grp.get_instances()
updt_names = updated_grp.get_instance_names()
self.assertEqual(len(updt_names), len(init_names))
# test that appropriate number of instance names are the same
matched_names = set(updt_names) & set(init_names)
self.assertEqual(len(matched_names), num_updates_expected_on_updt)
# test that the appropriate number of new instances are created
self.assertEqual(len(set(updt_names) - set(init_names)),
num_creates_expected_on_updt)
# test that the appropriate number of instances are deleted
self.assertEqual(len(set(init_names) - set(updt_names)),
num_deletes_expected_on_updt)
# test that the older instances are the ones being deleted
if num_deletes_expected_on_updt > 0:
deletes_expected = init_names[:num_deletes_expected_on_updt]
self.assertNotIn(deletes_expected, updt_names)
# test if instances are updated
if update_replace:
# test that the image id is changed for all instances
updt_images = [(i.name, i.t['Properties']['ImageId'])
for i in updt_instances]
self.assertEqual(len(set(updt_images) & set(init_images)), 0)
else:
# test that instance type is changed for all instances
updt_flavors = [(i.name, i.t['Properties']['InstanceType'])
for i in updt_instances]
self.assertEqual(len(set(updt_flavors) & set(init_flavors)), 0)
def test_autoscaling_group_update_replace(self):
"""
Test simple update replace with no conflict in batch size and
minimum instances in service.
"""
updt_template = json.loads(asg_tmpl_with_updt_policy)
grp = updt_template['Resources']['WebServerGroup']
policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '1'
policy['MaxBatchSize'] = '3'
config = updt_template['Resources']['LaunchConfig']
config['Properties']['ImageId'] = 'bar'
self.update_autoscaling_group(asg_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=10,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=9,
update_replace=True)
def test_autoscaling_group_update_replace_with_adjusted_capacity(self):
"""
Test update replace with capacity adjustment due to conflict in
batch size and minimum instances in service.
"""
updt_template = json.loads(asg_tmpl_with_updt_policy)
grp = updt_template['Resources']['WebServerGroup']
policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '8'
policy['MaxBatchSize'] = '4'
config = updt_template['Resources']['LaunchConfig']
config['Properties']['ImageId'] = 'bar'
self.update_autoscaling_group(asg_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=8,
num_creates_expected_on_updt=2,
num_deletes_expected_on_updt=2,
num_reloads_expected_on_updt=7,
update_replace=True)
def test_autoscaling_group_update_replace_huge_batch_size(self):
"""
Test update replace with a huge batch size.
"""
updt_template = json.loads(asg_tmpl_with_updt_policy)
group = updt_template['Resources']['WebServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '0'
policy['MaxBatchSize'] = '20'
config = updt_template['Resources']['LaunchConfig']
config['Properties']['ImageId'] = 'bar'
self.update_autoscaling_group(asg_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=10,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=3,
update_replace=True)
def test_autoscaling_group_update_replace_huge_min_in_service(self):
"""
Test update replace with a huge number of minimum instances in service.
"""
updt_template = json.loads(asg_tmpl_with_updt_policy)
group = updt_template['Resources']['WebServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '20'
policy['MaxBatchSize'] = '1'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['LaunchConfig']
config['Properties']['ImageId'] = 'bar'
self.update_autoscaling_group(asg_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=9,
num_creates_expected_on_updt=1,
num_deletes_expected_on_updt=1,
num_reloads_expected_on_updt=13,
update_replace=True)
def test_autoscaling_group_update_no_replace(self):
"""
Test simple update only and no replace (i.e. updated instance flavor
in Launch Configuration) with no conflict in batch size and
minimum instances in service.
"""
updt_template = json.loads(copy.deepcopy(asg_tmpl_with_updt_policy))
group = updt_template['Resources']['WebServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '1'
policy['MaxBatchSize'] = '3'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['LaunchConfig']
config['Properties']['InstanceType'] = 'm1.large'
self.update_autoscaling_group(asg_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=10,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=6,
update_replace=False)
def test_instance_group_update_no_replace_with_adjusted_capacity(self):
"""
Test update only and no replace (i.e. updated instance flavor in
Launch Configuration) with capacity adjustment due to conflict in
batch size and minimum instances in service.
"""
updt_template = json.loads(copy.deepcopy(asg_tmpl_with_updt_policy))
group = updt_template['Resources']['WebServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '8'
policy['MaxBatchSize'] = '4'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['LaunchConfig']
config['Properties']['InstanceType'] = 'm1.large'
self.update_autoscaling_group(asg_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=8,
num_creates_expected_on_updt=2,
num_deletes_expected_on_updt=2,
num_reloads_expected_on_updt=5,
update_replace=False)
def test_autoscaling_group_update_policy_removed(self):
# setup stack from the initial template
tmpl = template_format.parse(asg_tmpl_with_updt_policy_1)
tmpl = template_format.parse(asg_tmpl_with_updt_policy)
stack = utils.parse_stack(tmpl)
nested = stack['WebServerGroup'].nested()
stack.validate()
# test stack create
# test the number of instance creation
# test that physical resource name of launch configuration is used
size = int(stack['WebServerGroup'].properties['MinSize'])
self._stub_create(size)
self._stub_grp_create(size)
self.m.ReplayAll()
stack.create()
self.m.VerifyAll()
self.assertEqual(stack.state, ('CREATE', 'COMPLETE'))
conf = stack['LaunchConfig']
conf_name_pattern = '%s-LaunchConfig-[a-zA-Z0-9]+$' % stack.name
regex_pattern = re.compile(conf_name_pattern)
self.assertTrue(regex_pattern.match(conf.FnGetRefId()))
nested = stack['WebServerGroup'].nested()
self.assertTrue(len(nested), size)
# test stack update
# test that update policy is removed
# test that update policy is loaded
current_grp = stack['WebServerGroup']
self.assertTrue('AutoScalingRollingUpdate'
in current_grp.update_policy)
current_policy = current_grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(current_policy and len(current_policy) > 0)
self.assertEqual(int(current_policy['MaxBatchSize']), 3)
self.assertTrue(current_policy)
self.assertTrue(len(current_policy) > 0)
init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy']
init_roll_updt = init_updt_policy['AutoScalingRollingUpdate']
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
self.assertEqual(int(current_policy['MaxBatchSize']), init_batch_sz)
# test that physical resource name of launch configuration is used
conf = stack['LaunchConfig']
conf_name_pattern = '%s-LaunchConfig-[a-zA-Z0-9]+$' % stack.name
self.assertThat(conf.FnGetRefId(), MatchesRegex(conf_name_pattern))
# test the number of instances created
nested = stack['WebServerGroup'].nested()
self.assertEqual(len(nested.resources), size)
# clean up for next test
self.m.UnsetStubs()
# test stack update
updated_tmpl = template_format.parse(asg_tmpl_without_updt_policy)
updated_stack = utils.parse_stack(updated_tmpl)
self._stub_grp_replace(num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=1)
self.m.ReplayAll()
stack.update(updated_stack)
self.m.VerifyAll()
self.assertEqual(stack.state, ('UPDATE', 'COMPLETE'))
# test that update policy is removed
updated_grp = stack['WebServerGroup']
self.assertFalse(updated_grp.update_policy['AutoScalingRollingUpdate'])
def test_autoscaling_group_update_policy_check_timeout(self):
# setup stack from the initial template
tmpl = template_format.parse(asg_tmpl_with_updt_policy)
stack = utils.parse_stack(tmpl)
# test stack create
size = int(stack['WebServerGroup'].properties['MinSize'])
self._stub_grp_create(size)
self.m.ReplayAll()
stack.create()
self.m.VerifyAll()
self.assertEqual(stack.state, ('CREATE', 'COMPLETE'))
# test that update policy is loaded
current_grp = stack['WebServerGroup']
self.assertTrue('AutoScalingRollingUpdate'
in current_grp.update_policy)
current_policy = current_grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(current_policy)
self.assertTrue(len(current_policy) > 0)
init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy']
init_roll_updt = init_updt_policy['AutoScalingRollingUpdate']
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
self.assertEqual(int(current_policy['MaxBatchSize']), init_batch_sz)
# test the number of instances created
nested = stack['WebServerGroup'].nested()
self.assertEqual(len(nested.resources), size)
# clean up for next test
self.m.UnsetStubs()
# modify the pause time and test for error
new_pause_time = 'PT30M'
updt_template = json.loads(copy.deepcopy(asg_tmpl_with_updt_policy))
group = updt_template['Resources']['WebServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['PauseTime'] = new_pause_time
config = updt_template['Resources']['LaunchConfig']
config['Properties']['ImageId'] = 'bar'
updated_tmpl = template_format.parse(json.dumps(updt_template))
updated_stack = utils.parse_stack(updated_tmpl)
self._stub_grp_replace(num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=1)
self.m.ReplayAll()
stack.update(updated_stack)
self.m.VerifyAll()
self.assertEqual(stack.state, ('UPDATE', 'FAILED'))
# test that the update policy is updated
updated_grp = stack['WebServerGroup']
self.assertTrue('AutoScalingRollingUpdate'
in updated_grp.update_policy)
updated_policy = updated_grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(updated_policy)
self.assertTrue(len(updated_policy) > 0)
self.assertEqual(updated_policy['PauseTime'], new_pause_time)
# test that error message match
expected_error_message = ('The current UpdatePolicy will result '
'in stack update timeout.')
self.assertIn(expected_error_message, stack.status_reason)

View File

@ -222,7 +222,8 @@ class InstanceGroupTest(HeatTestCase):
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
# The failed inner resource remains
child_resource = rsrc.nested()['JobServerGroup-0']
self.assertEqual(len(rsrc.nested().resources), 1)
child_resource = rsrc.nested().resources.values()[0]
self.assertEqual((child_resource.CREATE, child_resource.FAILED),
child_resource.state)
@ -241,6 +242,8 @@ class InstanceGroupTest(HeatTestCase):
self.m.ReplayAll()
conf = self.create_resource(t, stack, 'JobServerConfig')
rsrc = self.create_resource(t, stack, 'JobServerGroup')
self.assertEqual(len(rsrc.nested().resources), 1)
succeeded_instance = rsrc.nested().resources.values()[0]
self.m.VerifyAll()
self.m.UnsetStubs()
@ -262,7 +265,9 @@ class InstanceGroupTest(HeatTestCase):
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
# The failed inner resource remains
child_resource = rsrc.nested()['JobServerGroup-1']
self.assertEqual(len(rsrc.nested().resources), 2)
child_resource = [r for r in rsrc.nested().resources.values()
if r.name != succeeded_instance.name][0]
self.assertEqual((child_resource.CREATE, child_resource.FAILED),
child_resource.state)

View File

@ -12,15 +12,18 @@
# License for the specific language governing permissions and limitations
# under the License.
import re
import mox
import json
import copy
from heat.common import exception
from heat.common import template_format
from heat.engine.resources import instance
from heat.engine import parser
from heat.engine.resources import instance
from heat.tests.common import HeatTestCase
from heat.tests.utils import setup_dummy_db
from heat.tests import utils
from heat.tests.v1_1 import fakes
from testtools.matchers import MatchesRegex
ig_tmpl_without_updt_policy = '''
@ -33,7 +36,7 @@ ig_tmpl_without_updt_policy = '''
"Type" : "OS::Heat::InstanceGroup",
"Properties" : {
"LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
"Size" : "8",
"Size" : "10",
"AvailabilityZones" : ["nova"]
}
},
@ -64,7 +67,7 @@ ig_tmpl_with_bad_updt_policy = '''
"Type" : "OS::Heat::InstanceGroup",
"Properties" : {
"LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
"Size" : "8",
"Size" : "10",
"AvailabilityZones" : ["nova"]
}
},
@ -96,7 +99,7 @@ ig_tmpl_with_default_updt_policy = '''
"Type" : "OS::Heat::InstanceGroup",
"Properties" : {
"LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
"Size" : "8",
"Size" : "10",
"AvailabilityZones" : ["nova"]
}
},
@ -114,7 +117,7 @@ ig_tmpl_with_default_updt_policy = '''
}
'''
ig_tmpl_with_updt_policy_1 = '''
ig_tmpl_with_updt_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to create multiple instances.",
@ -124,14 +127,14 @@ ig_tmpl_with_updt_policy_1 = '''
"UpdatePolicy" : {
"RollingUpdate" : {
"MinInstancesInService" : "1",
"MaxBatchSize" : "3",
"PauseTime" : "PT30S"
"MaxBatchSize" : "2",
"PauseTime" : "PT1S"
}
},
"Type" : "OS::Heat::InstanceGroup",
"Properties" : {
"LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
"Size" : "8",
"Size" : "10",
"AvailabilityZones" : ["nova"]
}
},
@ -149,66 +152,81 @@ ig_tmpl_with_updt_policy_1 = '''
}
'''
ig_tmpl_with_updt_policy_2 = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to create multiple instances.",
"Parameters" : {},
"Resources" : {
"JobServerGroup" : {
"UpdatePolicy" : {
"RollingUpdate" : {
"MinInstancesInService" : "1",
"MaxBatchSize" : "5",
"PauseTime" : "PT30S"
}
},
"Type" : "OS::Heat::InstanceGroup",
"Properties" : {
"LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
"Size" : "8",
"AvailabilityZones" : ["nova"]
}
},
"JobServerConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Properties": {
"ImageId" : "foo",
"InstanceType" : "m1.large",
"KeyName" : "test",
"SecurityGroups" : [ "sg-1" ],
"UserData" : "jsconfig data"
}
}
}
}
'''
class InstanceGroupTest(HeatTestCase):
def setUp(self):
super(InstanceGroupTest, self).setUp()
setup_dummy_db()
def _stub_create(self, num, instance_class=instance.Instance):
"""
Expect creation of C{num} number of Instances.
:param instance_class: The resource class to expect to be created
instead of instance.Instance.
"""
self.fc = fakes.FakeClient()
utils.setup_dummy_db()
def _stub_validate(self):
self.m.StubOutWithMock(parser.Stack, 'validate')
parser.Stack.validate()
parser.Stack.validate().MultipleTimes()
def _stub_grp_create(self, capacity):
"""
Expect creation of instances to capacity
"""
self._stub_validate()
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
self.m.StubOutWithMock(instance_class, 'handle_create')
self.m.StubOutWithMock(instance_class, 'check_create_complete')
cookie = object()
for x in range(num):
instance_class.handle_create().AndReturn(cookie)
instance_class.check_create_complete(cookie).AndReturn(False)
instance_class.check_create_complete(
cookie).MultipleTimes().AndReturn(True)
for x in range(capacity):
instance.Instance.handle_create().AndReturn(cookie)
instance.Instance.check_create_complete(cookie).AndReturn(True)
def _stub_grp_replace(self,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0):
"""
Expect update replacement of the instances
"""
self._stub_validate()
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
self.m.StubOutWithMock(instance.Instance, 'destroy')
cookie = object()
for i in range(num_creates_expected_on_updt):
instance.Instance.handle_create().AndReturn(cookie)
instance.Instance.check_create_complete(cookie).AndReturn(True)
for i in range(num_deletes_expected_on_updt):
instance.Instance.destroy().AndReturn(None)
def _stub_grp_update(self,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0):
"""
Expect update of the instances
"""
self.m.StubOutWithMock(instance.Instance, 'nova')
instance.Instance.nova().MultipleTimes().AndReturn(self.fc)
def activate_status(server):
server.status = 'VERIFY_RESIZE'
return_server = self.fc.servers.list()[1]
return_server.id = 1234
return_server.get = activate_status.__get__(return_server)
self.m.StubOutWithMock(self.fc.servers, 'get')
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.servers.get(mox.IgnoreArg()).\
MultipleTimes().AndReturn(return_server)
self.fc.client.post_servers_1234_action(
body={'resize': {'flavorRef': 3}}).\
MultipleTimes().AndReturn((202, None))
self.fc.client.post_servers_1234_action(
body={'confirmResize': None}).\
MultipleTimes().AndReturn((202, None))
self._stub_grp_replace(num_creates_expected_on_updt,
num_deletes_expected_on_updt)
def get_launch_conf_name(self, stack, ig_name):
return stack[ig_name].properties['LaunchConfigurationName']
@ -216,12 +234,14 @@ class InstanceGroupTest(HeatTestCase):
def test_parse_without_update_policy(self):
tmpl = template_format.parse(ig_tmpl_without_updt_policy)
stack = utils.parse_stack(tmpl)
stack.validate()
grp = stack['JobServerGroup']
self.assertFalse(grp.update_policy['RollingUpdate'])
def test_parse_with_update_policy(self):
tmpl = template_format.parse(ig_tmpl_with_updt_policy_1)
tmpl = template_format.parse(ig_tmpl_with_updt_policy)
stack = utils.parse_stack(tmpl)
stack.validate()
grp = stack['JobServerGroup']
self.assertTrue(grp.update_policy)
self.assertTrue(len(grp.update_policy) == 1)
@ -229,12 +249,13 @@ class InstanceGroupTest(HeatTestCase):
policy = grp.update_policy['RollingUpdate']
self.assertTrue(policy and len(policy) > 0)
self.assertEqual(int(policy['MinInstancesInService']), 1)
self.assertEqual(int(policy['MaxBatchSize']), 3)
self.assertEqual(policy['PauseTime'], 'PT30S')
self.assertEqual(int(policy['MaxBatchSize']), 2)
self.assertEqual(policy['PauseTime'], 'PT1S')
def test_parse_with_default_update_policy(self):
tmpl = template_format.parse(ig_tmpl_with_default_updt_policy)
stack = utils.parse_stack(tmpl)
stack.validate()
grp = stack['JobServerGroup']
self.assertTrue(grp.update_policy)
self.assertTrue(len(grp.update_policy) == 1)
@ -250,6 +271,21 @@ class InstanceGroupTest(HeatTestCase):
stack = utils.parse_stack(tmpl)
self.assertRaises(exception.StackValidationFailed, stack.validate)
def test_parse_with_bad_pausetime_in_update_policy(self):
tmpl = template_format.parse(ig_tmpl_with_updt_policy)
group = tmpl['Resources']['JobServerGroup']
policy = group['UpdatePolicy']['RollingUpdate']
# test against some random string
policy['PauseTime'] = 'ABCD1234'
stack = utils.parse_stack(tmpl)
self.assertRaises(exception.StackValidationFailed, stack.validate)
# test unsupported designator
policy['PauseTime'] = 'P1YT1H'
stack = utils.parse_stack(tmpl)
self.assertRaises(exception.StackValidationFailed, stack.validate)
def validate_update_policy_diff(self, current, updated):
# load current stack
@ -281,93 +317,359 @@ class InstanceGroupTest(HeatTestCase):
def test_update_policy_added(self):
self.validate_update_policy_diff(ig_tmpl_without_updt_policy,
ig_tmpl_with_updt_policy_1)
ig_tmpl_with_updt_policy)
def test_update_policy_updated(self):
self.validate_update_policy_diff(ig_tmpl_with_updt_policy_1,
ig_tmpl_with_updt_policy_2)
updt_template = json.loads(ig_tmpl_with_updt_policy)
grp = updt_template['Resources']['JobServerGroup']
policy = grp['UpdatePolicy']['RollingUpdate']
policy['MinInstancesInService'] = '2'
policy['MaxBatchSize'] = '4'
policy['PauseTime'] = 'PT1M30S'
self.validate_update_policy_diff(ig_tmpl_with_updt_policy,
json.dumps(updt_template))
def test_update_policy_removed(self):
self.validate_update_policy_diff(ig_tmpl_with_updt_policy_1,
self.validate_update_policy_diff(ig_tmpl_with_updt_policy,
ig_tmpl_without_updt_policy)
def test_instance_group_update(self):
def update_instance_group(self, init_template, updt_template,
num_updates_expected_on_updt,
num_creates_expected_on_updt,
num_deletes_expected_on_updt,
update_replace):
# setup stack from the initial template
tmpl = template_format.parse(ig_tmpl_with_updt_policy_1)
tmpl = template_format.parse(init_template)
stack = utils.parse_stack(tmpl)
nested = stack['JobServerGroup'].nested()
stack.validate()
# test stack create
# test the number of instance creation
# test that physical resource name of launch configuration is used
size = int(stack['JobServerGroup'].properties['Size'])
self._stub_create(size)
self._stub_grp_create(size)
self.m.ReplayAll()
stack.create()
self.m.VerifyAll()
self.assertEqual(stack.state, ('CREATE', 'COMPLETE'))
conf = stack['JobServerConfig']
conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack.name
regex_pattern = re.compile(conf_name_pattern)
self.assertTrue(regex_pattern.match(conf.FnGetRefId()))
nested = stack['JobServerGroup'].nested()
self.assertTrue(len(nested), size)
# test stack update
# test that update policy is updated
# test that launch configuration is replaced
# test that update policy is loaded
current_grp = stack['JobServerGroup']
self.assertTrue('RollingUpdate' in current_grp.update_policy)
current_policy = current_grp.update_policy['RollingUpdate']
self.assertTrue(current_policy and len(current_policy) > 0)
self.assertEqual(int(current_policy['MaxBatchSize']), 3)
self.assertTrue(current_policy)
self.assertTrue(len(current_policy) > 0)
init_grp_tmpl = tmpl['Resources']['JobServerGroup']
init_roll_updt = init_grp_tmpl['UpdatePolicy']['RollingUpdate']
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
self.assertEqual(int(current_policy['MaxBatchSize']), init_batch_sz)
# test that physical resource name of launch configuration is used
conf = stack['JobServerConfig']
conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack.name
self.assertThat(conf.FnGetRefId(), MatchesRegex(conf_name_pattern))
# get launch conf name here to compare result after update
conf_name = self.get_launch_conf_name(stack, 'JobServerGroup')
updated_tmpl = template_format.parse(ig_tmpl_with_updt_policy_2)
# test the number of instances created
nested = stack['JobServerGroup'].nested()
self.assertEqual(len(nested.resources), size)
# clean up for next test
self.m.UnsetStubs()
# saves info from initial list of instances for comparison later
init_instances = current_grp.get_instances()
init_names = current_grp.get_instance_names()
init_images = [(i.name, i.t['Properties']['ImageId'])
for i in init_instances]
init_flavors = [(i.name, i.t['Properties']['InstanceType'])
for i in init_instances]
# test stack update
updated_tmpl = template_format.parse(updt_template)
updated_stack = utils.parse_stack(updated_tmpl)
new_grp_tmpl = updated_tmpl['Resources']['JobServerGroup']
new_roll_updt = new_grp_tmpl['UpdatePolicy']['RollingUpdate']
new_batch_sz = int(new_roll_updt['MaxBatchSize'])
self.assertNotEqual(new_batch_sz, init_batch_sz)
if update_replace:
self._stub_grp_replace(size, size)
else:
self._stub_grp_update(num_creates_expected_on_updt,
num_deletes_expected_on_updt)
self.m.ReplayAll()
stack.update(updated_stack)
self.m.VerifyAll()
self.assertEqual(stack.state, ('UPDATE', 'COMPLETE'))
# test that the update policy is updated
updated_grp = stack['JobServerGroup']
self.assertTrue('RollingUpdate' in updated_grp.update_policy)
updated_policy = updated_grp.update_policy['RollingUpdate']
self.assertTrue(updated_policy and len(updated_policy) > 0)
self.assertEqual(int(updated_policy['MaxBatchSize']), 5)
self.assertTrue(updated_policy)
self.assertTrue(len(updated_policy) > 0)
self.assertEqual(int(updated_policy['MaxBatchSize']), new_batch_sz)
# test that the launch configuration is replaced
updated_conf_name = self.get_launch_conf_name(stack, 'JobServerGroup')
self.assertNotEqual(conf_name, updated_conf_name)
# test that the group size are the same
updt_instances = updated_grp.get_instances()
updt_names = updated_grp.get_instance_names()
self.assertEqual(len(updt_names), len(init_names))
# test that the appropriate number of instance names are the same
matched_names = set(updt_names) & set(init_names)
self.assertEqual(len(matched_names), num_updates_expected_on_updt)
# test that the appropriate number of new instances are created
self.assertEqual(len(set(updt_names) - set(init_names)),
num_creates_expected_on_updt)
# test that the appropriate number of instances are deleted
self.assertEqual(len(set(init_names) - set(updt_names)),
num_deletes_expected_on_updt)
# test that the older instances are the ones being deleted
if num_deletes_expected_on_updt > 0:
deletes_expected = init_names[:num_deletes_expected_on_updt]
self.assertNotIn(deletes_expected, updt_names)
# test if instances are updated
if update_replace:
# test that the image id is changed for all instances
updt_images = [(i.name, i.t['Properties']['ImageId'])
for i in updt_instances]
self.assertEqual(len(set(updt_images) & set(init_images)), 0)
else:
# test that instance type is changed for all instances
updt_flavors = [(i.name, i.t['Properties']['InstanceType'])
for i in updt_instances]
self.assertEqual(len(set(updt_flavors) & set(init_flavors)), 0)
def test_instance_group_update_replace(self):
"""
Test simple update replace with no conflict in batch size and
minimum instances in service.
"""
updt_template = json.loads(ig_tmpl_with_updt_policy)
grp = updt_template['Resources']['JobServerGroup']
policy = grp['UpdatePolicy']['RollingUpdate']
policy['MinInstancesInService'] = '1'
policy['MaxBatchSize'] = '3'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['ImageId'] = 'bar'
self.update_instance_group(ig_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=10,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
update_replace=True)
def test_instance_group_update_replace_with_adjusted_capacity(self):
"""
Test update replace with capacity adjustment due to conflict in
batch size and minimum instances in service.
"""
updt_template = json.loads(ig_tmpl_with_updt_policy)
grp = updt_template['Resources']['JobServerGroup']
policy = grp['UpdatePolicy']['RollingUpdate']
policy['MinInstancesInService'] = '8'
policy['MaxBatchSize'] = '4'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['ImageId'] = 'bar'
self.update_instance_group(ig_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=8,
num_creates_expected_on_updt=2,
num_deletes_expected_on_updt=2,
update_replace=True)
def test_instance_group_update_replace_huge_batch_size(self):
"""
Test update replace with a huge batch size.
"""
updt_template = json.loads(ig_tmpl_with_updt_policy)
group = updt_template['Resources']['JobServerGroup']
policy = group['UpdatePolicy']['RollingUpdate']
policy['MinInstancesInService'] = '0'
policy['MaxBatchSize'] = '20'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['ImageId'] = 'bar'
self.update_instance_group(ig_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=10,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
update_replace=True)
def test_instance_group_update_replace_huge_min_in_service(self):
"""
Test update replace with a huge number of minimum instances in service.
"""
updt_template = json.loads(ig_tmpl_with_updt_policy)
group = updt_template['Resources']['JobServerGroup']
policy = group['UpdatePolicy']['RollingUpdate']
policy['MinInstancesInService'] = '20'
policy['MaxBatchSize'] = '1'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['ImageId'] = 'bar'
self.update_instance_group(ig_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=9,
num_creates_expected_on_updt=1,
num_deletes_expected_on_updt=1,
update_replace=True)
def test_instance_group_update_no_replace(self):
"""
Test simple update only and no replace (i.e. updated instance flavor
in Launch Configuration) with no conflict in batch size and
minimum instances in service.
"""
updt_template = json.loads(copy.deepcopy(ig_tmpl_with_updt_policy))
group = updt_template['Resources']['JobServerGroup']
policy = group['UpdatePolicy']['RollingUpdate']
policy['MinInstancesInService'] = '1'
policy['MaxBatchSize'] = '3'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['InstanceType'] = 'm1.large'
self.update_instance_group(ig_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=10,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
update_replace=False)
def test_instance_group_update_no_replace_with_adjusted_capacity(self):
"""
Test update only and no replace (i.e. updated instance flavor in
Launch Configuration) with capacity adjustment due to conflict in
batch size and minimum instances in service.
"""
updt_template = json.loads(copy.deepcopy(ig_tmpl_with_updt_policy))
group = updt_template['Resources']['JobServerGroup']
policy = group['UpdatePolicy']['RollingUpdate']
policy['MinInstancesInService'] = '8'
policy['MaxBatchSize'] = '4'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['InstanceType'] = 'm1.large'
self.update_instance_group(ig_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=8,
num_creates_expected_on_updt=2,
num_deletes_expected_on_updt=2,
update_replace=False)
def test_instance_group_update_policy_removed(self):
# setup stack from the initial template
tmpl = template_format.parse(ig_tmpl_with_updt_policy_1)
tmpl = template_format.parse(ig_tmpl_with_updt_policy)
stack = utils.parse_stack(tmpl)
nested = stack['JobServerGroup'].nested()
# test stack create
# test the number of instance creation
# test that physical resource name of launch configuration is used
size = int(stack['JobServerGroup'].properties['Size'])
self._stub_create(size)
self._stub_grp_create(size)
self.m.ReplayAll()
stack.create()
self.m.VerifyAll()
self.assertEqual(stack.state, ('CREATE', 'COMPLETE'))
conf = stack['JobServerConfig']
conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack.name
regex_pattern = re.compile(conf_name_pattern)
self.assertTrue(regex_pattern.match(conf.FnGetRefId()))
nested = stack['JobServerGroup'].nested()
self.assertTrue(len(nested), size)
# test stack update
# test that update policy is removed
# test that update policy is loaded
current_grp = stack['JobServerGroup']
self.assertTrue('RollingUpdate' in current_grp.update_policy)
current_policy = current_grp.update_policy['RollingUpdate']
self.assertTrue(current_policy and len(current_policy) > 0)
self.assertEqual(int(current_policy['MaxBatchSize']), 3)
self.assertTrue(current_policy)
self.assertTrue(len(current_policy) > 0)
init_grp_tmpl = tmpl['Resources']['JobServerGroup']
init_roll_updt = init_grp_tmpl['UpdatePolicy']['RollingUpdate']
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
self.assertEqual(int(current_policy['MaxBatchSize']), init_batch_sz)
# test that physical resource name of launch configuration is used
conf = stack['JobServerConfig']
conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack.name
self.assertThat(conf.FnGetRefId(), MatchesRegex(conf_name_pattern))
# test the number of instances created
nested = stack['JobServerGroup'].nested()
self.assertEqual(len(nested.resources), size)
# test stack update
updated_tmpl = template_format.parse(ig_tmpl_without_updt_policy)
updated_stack = utils.parse_stack(updated_tmpl)
stack.update(updated_stack)
self.assertEqual(stack.state, ('UPDATE', 'COMPLETE'))
# test that update policy is removed
updated_grp = stack['JobServerGroup']
self.assertFalse(updated_grp.update_policy['RollingUpdate'])
def test_instance_group_update_policy_check_timeout(self):
# setup stack from the initial template
tmpl = template_format.parse(ig_tmpl_with_updt_policy)
stack = utils.parse_stack(tmpl)
# test stack create
size = int(stack['JobServerGroup'].properties['Size'])
self._stub_grp_create(size)
self.m.ReplayAll()
stack.create()
self.m.VerifyAll()
self.assertEqual(stack.state, ('CREATE', 'COMPLETE'))
# test that update policy is loaded
current_grp = stack['JobServerGroup']
self.assertTrue('RollingUpdate' in current_grp.update_policy)
current_policy = current_grp.update_policy['RollingUpdate']
self.assertTrue(current_policy)
self.assertTrue(len(current_policy) > 0)
init_grp_tmpl = tmpl['Resources']['JobServerGroup']
init_roll_updt = init_grp_tmpl['UpdatePolicy']['RollingUpdate']
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
self.assertEqual(int(current_policy['MaxBatchSize']), init_batch_sz)
# test the number of instances created
nested = stack['JobServerGroup'].nested()
self.assertEqual(len(nested.resources), size)
# clean up for next test
self.m.UnsetStubs()
# modify the pause time and test for error
new_pause_time = 'PT30M'
updt_template = json.loads(copy.deepcopy(ig_tmpl_with_updt_policy))
group = updt_template['Resources']['JobServerGroup']
policy = group['UpdatePolicy']['RollingUpdate']
policy['PauseTime'] = new_pause_time
config = updt_template['Resources']['JobServerConfig']
config['Properties']['ImageId'] = 'bar'
updated_tmpl = template_format.parse(json.dumps(updt_template))
updated_stack = utils.parse_stack(updated_tmpl)
stack.update(updated_stack)
self.assertEqual(stack.state, ('UPDATE', 'FAILED'))
# test that the update policy is updated
updated_grp = stack['JobServerGroup']
self.assertTrue('RollingUpdate' in updated_grp.update_policy)
updated_policy = updated_grp.update_policy['RollingUpdate']
self.assertTrue(updated_policy)
self.assertTrue(len(updated_policy) > 0)
self.assertEqual(updated_policy['PauseTime'], new_pause_time)
# test that error message match
expected_error_message = ('The current UpdatePolicy will result '
'in stack update timeout.')
self.assertIn(expected_error_message, stack.status_reason)