Make the update policy timeout check into a unit test
There are a lot of repeated tests here, the one that looks unique is the one that tests a timed out replacement. This is an instance group method so it is moved to test_instance_group and it is reworked to not need a stack creation. Part of blueprint decouple-nested Change-Id: I4406ee772568ea24edfb34e43891f600e9de4573
This commit is contained in:
parent
f2450abda0
commit
33f26722b9
@ -11,23 +11,16 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import copy
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import mox
|
import mock
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import six
|
import six
|
||||||
from testtools import matchers
|
|
||||||
|
|
||||||
from heat.common import exception
|
from heat.common import exception
|
||||||
from heat.common import template_format
|
from heat.common import template_format
|
||||||
from heat.engine.clients.os import nova
|
|
||||||
from heat.engine import function
|
from heat.engine import function
|
||||||
from heat.engine.notification import stack as notification
|
from heat.engine import rsrc_defn
|
||||||
from heat.engine import parser
|
|
||||||
from heat.engine.resources.aws import wait_condition_handle as aws_wch
|
|
||||||
from heat.engine.resources import instance
|
|
||||||
from heat.engine.resources import loadbalancer as lb
|
|
||||||
from heat.tests import common
|
from heat.tests import common
|
||||||
from heat.tests import utils
|
from heat.tests import utils
|
||||||
from heat.tests.v1_1 import fakes as fakes_v1_1
|
from heat.tests.v1_1 import fakes as fakes_v1_1
|
||||||
@ -210,107 +203,6 @@ class AutoScalingGroupTest(common.HeatTestCase):
|
|||||||
cfg.CONF.set_default('heat_waitcondition_server_url',
|
cfg.CONF.set_default('heat_waitcondition_server_url',
|
||||||
'http://127.0.0.1:8000/v1/waitcondition')
|
'http://127.0.0.1:8000/v1/waitcondition')
|
||||||
|
|
||||||
def _stub_validate(self):
|
|
||||||
self.m.StubOutWithMock(parser.Stack, 'validate')
|
|
||||||
parser.Stack.validate().MultipleTimes()
|
|
||||||
|
|
||||||
def _stub_lb_create(self):
|
|
||||||
self.m.StubOutWithMock(aws_wch.WaitConditionHandle, 'get_status')
|
|
||||||
aws_wch.WaitConditionHandle.get_status().AndReturn(['SUCCESS'])
|
|
||||||
|
|
||||||
def _stub_lb_reload(self, num=1, setup=True):
|
|
||||||
if setup:
|
|
||||||
self.m.StubOutWithMock(lb.LoadBalancer, 'handle_update')
|
|
||||||
for i in range(num):
|
|
||||||
lb.LoadBalancer.handle_update(
|
|
||||||
mox.IgnoreArg(), mox.IgnoreArg(),
|
|
||||||
mox.IgnoreArg()).AndReturn(None)
|
|
||||||
|
|
||||||
def _stub_grp_create(self, capacity=0, setup_lb=True):
|
|
||||||
"""
|
|
||||||
Expect creation of instances to capacity. By default, expect creation
|
|
||||||
of load balancer unless specified.
|
|
||||||
"""
|
|
||||||
self._stub_validate()
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(instance.Instance, 'handle_create')
|
|
||||||
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(notification, 'send')
|
|
||||||
notification.send(mox.IgnoreArg()).MultipleTimes().AndReturn(None)
|
|
||||||
|
|
||||||
cookie = object()
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
|
|
||||||
nova.NovaClientPlugin._create().AndReturn(self.fc)
|
|
||||||
# for load balancer setup
|
|
||||||
if setup_lb:
|
|
||||||
self._stub_lb_create()
|
|
||||||
self._stub_lb_reload()
|
|
||||||
instance.Instance.handle_create().AndReturn(cookie)
|
|
||||||
instance.Instance.check_create_complete(cookie).AndReturn(True)
|
|
||||||
|
|
||||||
# for each instance in group
|
|
||||||
for i in range(capacity):
|
|
||||||
instance.Instance.handle_create().AndReturn(cookie)
|
|
||||||
instance.Instance.check_create_complete(cookie).AndReturn(True)
|
|
||||||
|
|
||||||
def _stub_grp_replace(self,
|
|
||||||
num_creates_expected_on_updt=0,
|
|
||||||
num_deletes_expected_on_updt=0,
|
|
||||||
num_reloads_expected_on_updt=0):
|
|
||||||
"""
|
|
||||||
Expect replacement of the capacity by batch size
|
|
||||||
"""
|
|
||||||
# for load balancer setup
|
|
||||||
self._stub_lb_reload(num_reloads_expected_on_updt)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(notification, 'send')
|
|
||||||
notification.send(mox.IgnoreArg()).MultipleTimes().AndReturn(None)
|
|
||||||
|
|
||||||
# for instances in the group
|
|
||||||
self.m.StubOutWithMock(instance.Instance, 'handle_create')
|
|
||||||
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
|
|
||||||
self.m.StubOutWithMock(instance.Instance, 'destroy')
|
|
||||||
|
|
||||||
cookie = object()
|
|
||||||
for i in range(num_creates_expected_on_updt):
|
|
||||||
instance.Instance.handle_create().AndReturn(cookie)
|
|
||||||
instance.Instance.check_create_complete(cookie).AndReturn(True)
|
|
||||||
for i in range(num_deletes_expected_on_updt):
|
|
||||||
instance.Instance.destroy().AndReturn(None)
|
|
||||||
|
|
||||||
def _stub_grp_update(self,
|
|
||||||
num_creates_expected_on_updt=0,
|
|
||||||
num_deletes_expected_on_updt=0,
|
|
||||||
num_reloads_expected_on_updt=0):
|
|
||||||
"""
|
|
||||||
Expect update of the instances
|
|
||||||
"""
|
|
||||||
|
|
||||||
def activate_status(server):
|
|
||||||
server.status = 'VERIFY_RESIZE'
|
|
||||||
|
|
||||||
return_server = self.fc.servers.list()[1]
|
|
||||||
return_server.id = '1234'
|
|
||||||
return_server.get = activate_status.__get__(return_server)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(self.fc.servers, 'get')
|
|
||||||
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
|
|
||||||
|
|
||||||
self.fc.servers.get(
|
|
||||||
mox.IgnoreArg()).MultipleTimes().AndReturn(return_server)
|
|
||||||
self.fc.client.post_servers_1234_action(
|
|
||||||
body={'resize': {'flavorRef': 3}}
|
|
||||||
).MultipleTimes().AndReturn((202, None))
|
|
||||||
self.fc.client.post_servers_1234_action(
|
|
||||||
body={'confirmResize': None}
|
|
||||||
).MultipleTimes().AndReturn((202, None))
|
|
||||||
|
|
||||||
self._stub_grp_replace(num_creates_expected_on_updt,
|
|
||||||
num_deletes_expected_on_updt,
|
|
||||||
num_reloads_expected_on_updt)
|
|
||||||
|
|
||||||
def get_launch_conf_name(self, stack, ig_name):
|
def get_launch_conf_name(self, stack, ig_name):
|
||||||
return stack[ig_name].properties['LaunchConfigurationName']
|
return stack[ig_name].properties['LaunchConfigurationName']
|
||||||
|
|
||||||
@ -424,6 +316,20 @@ class AutoScalingGroupTest(common.HeatTestCase):
|
|||||||
expected = {u'UpdatePolicy': updated_policy}
|
expected = {u'UpdatePolicy': updated_policy}
|
||||||
self.assertEqual(expected, tmpl_diff)
|
self.assertEqual(expected, tmpl_diff)
|
||||||
|
|
||||||
|
# test application of the new update policy in handle_update
|
||||||
|
update_snippet = rsrc_defn.ResourceDefinition(
|
||||||
|
current_grp.name,
|
||||||
|
current_grp.type(),
|
||||||
|
properties=updated_grp.t['Properties'],
|
||||||
|
update_policy=updated_policy)
|
||||||
|
current_grp._try_rolling_update = mock.MagicMock()
|
||||||
|
current_grp.adjust = mock.MagicMock()
|
||||||
|
current_grp.handle_update(update_snippet, tmpl_diff, None)
|
||||||
|
if updated_policy is None:
|
||||||
|
self.assertEqual({}, current_grp.update_policy.data)
|
||||||
|
else:
|
||||||
|
self.assertEqual(updated_policy, current_grp.update_policy.data)
|
||||||
|
|
||||||
def test_update_policy_added(self):
|
def test_update_policy_added(self):
|
||||||
self.validate_update_policy_diff(asg_tmpl_without_updt_policy,
|
self.validate_update_policy_diff(asg_tmpl_without_updt_policy,
|
||||||
asg_tmpl_with_updt_policy)
|
asg_tmpl_with_updt_policy)
|
||||||
@ -441,139 +347,3 @@ class AutoScalingGroupTest(common.HeatTestCase):
|
|||||||
def test_update_policy_removed(self):
|
def test_update_policy_removed(self):
|
||||||
self.validate_update_policy_diff(asg_tmpl_with_updt_policy,
|
self.validate_update_policy_diff(asg_tmpl_with_updt_policy,
|
||||||
asg_tmpl_without_updt_policy)
|
asg_tmpl_without_updt_policy)
|
||||||
|
|
||||||
def test_autoscaling_group_update_policy_removed(self):
|
|
||||||
|
|
||||||
# setup stack from the initial template
|
|
||||||
tmpl = template_format.parse(asg_tmpl_with_updt_policy)
|
|
||||||
stack = utils.parse_stack(tmpl)
|
|
||||||
self.stub_ImageConstraint_validate()
|
|
||||||
self.stub_KeypairConstraint_validate()
|
|
||||||
self.stub_FlavorConstraint_validate()
|
|
||||||
self.m.ReplayAll()
|
|
||||||
|
|
||||||
stack.validate()
|
|
||||||
self.m.VerifyAll()
|
|
||||||
self.m.UnsetStubs()
|
|
||||||
|
|
||||||
# test stack create
|
|
||||||
size = int(stack['WebServerGroup'].properties['MinSize'])
|
|
||||||
self._stub_grp_create(size)
|
|
||||||
self.stub_ImageConstraint_validate()
|
|
||||||
self.stub_FlavorConstraint_validate()
|
|
||||||
self.m.ReplayAll()
|
|
||||||
stack.create()
|
|
||||||
self.m.VerifyAll()
|
|
||||||
self.assertEqual(('CREATE', 'COMPLETE'), stack.state)
|
|
||||||
|
|
||||||
# test that update policy is loaded
|
|
||||||
current_grp = stack['WebServerGroup']
|
|
||||||
self.assertIn('AutoScalingRollingUpdate', current_grp.update_policy)
|
|
||||||
current_policy = current_grp.update_policy['AutoScalingRollingUpdate']
|
|
||||||
self.assertTrue(current_policy)
|
|
||||||
self.assertTrue(len(current_policy) > 0)
|
|
||||||
init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy']
|
|
||||||
init_roll_updt = init_updt_policy['AutoScalingRollingUpdate']
|
|
||||||
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
|
|
||||||
self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize']))
|
|
||||||
|
|
||||||
# test that physical resource name of launch configuration is used
|
|
||||||
conf = stack['LaunchConfig']
|
|
||||||
conf_name_pattern = '%s-LaunchConfig-[a-zA-Z0-9]+$' % stack.name
|
|
||||||
self.assertThat(conf.FnGetRefId(),
|
|
||||||
matchers.MatchesRegex(conf_name_pattern))
|
|
||||||
|
|
||||||
# test the number of instances created
|
|
||||||
nested = stack['WebServerGroup'].nested()
|
|
||||||
self.assertEqual(size, len(nested.resources))
|
|
||||||
|
|
||||||
# clean up for next test
|
|
||||||
self.m.UnsetStubs()
|
|
||||||
|
|
||||||
# test stack update
|
|
||||||
updated_tmpl = template_format.parse(asg_tmpl_without_updt_policy)
|
|
||||||
updated_stack = utils.parse_stack(updated_tmpl)
|
|
||||||
self._stub_grp_replace(num_creates_expected_on_updt=0,
|
|
||||||
num_deletes_expected_on_updt=0,
|
|
||||||
num_reloads_expected_on_updt=1)
|
|
||||||
self.m.ReplayAll()
|
|
||||||
stack.update(updated_stack)
|
|
||||||
self.m.VerifyAll()
|
|
||||||
self.assertEqual(('UPDATE', 'COMPLETE'), stack.state)
|
|
||||||
|
|
||||||
# test that update policy is removed
|
|
||||||
updated_grp = stack['WebServerGroup']
|
|
||||||
self.assertFalse(updated_grp.update_policy['AutoScalingRollingUpdate'])
|
|
||||||
|
|
||||||
def test_autoscaling_group_update_policy_check_timeout(self):
|
|
||||||
|
|
||||||
# setup stack from the initial template
|
|
||||||
tmpl = template_format.parse(asg_tmpl_with_updt_policy)
|
|
||||||
stack = utils.parse_stack(tmpl)
|
|
||||||
|
|
||||||
# test stack create
|
|
||||||
size = int(stack['WebServerGroup'].properties['MinSize'])
|
|
||||||
self._stub_grp_create(size)
|
|
||||||
self.stub_ImageConstraint_validate()
|
|
||||||
self.stub_FlavorConstraint_validate()
|
|
||||||
self.m.ReplayAll()
|
|
||||||
stack.create()
|
|
||||||
self.m.VerifyAll()
|
|
||||||
self.assertEqual(('CREATE', 'COMPLETE'), stack.state)
|
|
||||||
|
|
||||||
# test that update policy is loaded
|
|
||||||
current_grp = stack['WebServerGroup']
|
|
||||||
self.assertIn('AutoScalingRollingUpdate', current_grp.update_policy)
|
|
||||||
current_policy = current_grp.update_policy['AutoScalingRollingUpdate']
|
|
||||||
self.assertTrue(current_policy)
|
|
||||||
self.assertTrue(len(current_policy) > 0)
|
|
||||||
init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy']
|
|
||||||
init_roll_updt = init_updt_policy['AutoScalingRollingUpdate']
|
|
||||||
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
|
|
||||||
self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize']))
|
|
||||||
|
|
||||||
# test the number of instances created
|
|
||||||
nested = stack['WebServerGroup'].nested()
|
|
||||||
self.assertEqual(size, len(nested.resources))
|
|
||||||
|
|
||||||
# clean up for next test
|
|
||||||
self.m.UnsetStubs()
|
|
||||||
|
|
||||||
# modify the pause time and test for error
|
|
||||||
# the following test, effective_capacity is 12
|
|
||||||
# batch_count = (effective_capacity + batch_size -1)//batch_size
|
|
||||||
# = (12 + 2 - 1)//2 = 6
|
|
||||||
# if (batch_count - 1)* pause_time > stack.time_out, to raise error
|
|
||||||
# (6 - 1)*14*60 > 3600, so to raise error
|
|
||||||
new_pause_time = 'PT14M'
|
|
||||||
min_in_service = 10
|
|
||||||
updt_template = json.loads(copy.deepcopy(asg_tmpl_with_updt_policy))
|
|
||||||
group = updt_template['Resources']['WebServerGroup']
|
|
||||||
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
|
|
||||||
policy['PauseTime'] = new_pause_time
|
|
||||||
policy['MinInstancesInService'] = min_in_service
|
|
||||||
config = updt_template['Resources']['LaunchConfig']
|
|
||||||
config['Properties']['ImageId'] = 'F17-x86_64-cfntools'
|
|
||||||
updated_tmpl = template_format.parse(json.dumps(updt_template))
|
|
||||||
updated_stack = utils.parse_stack(updated_tmpl)
|
|
||||||
|
|
||||||
self.stub_KeypairConstraint_validate()
|
|
||||||
self.stub_ImageConstraint_validate()
|
|
||||||
self.stub_FlavorConstraint_validate()
|
|
||||||
self.m.ReplayAll()
|
|
||||||
stack.update(updated_stack)
|
|
||||||
self.m.VerifyAll()
|
|
||||||
self.assertEqual(('UPDATE', 'FAILED'), stack.state)
|
|
||||||
|
|
||||||
# test that the update policy is updated
|
|
||||||
updated_grp = stack['WebServerGroup']
|
|
||||||
self.assertIn('AutoScalingRollingUpdate', updated_grp.update_policy)
|
|
||||||
updated_policy = updated_grp.update_policy['AutoScalingRollingUpdate']
|
|
||||||
self.assertTrue(updated_policy)
|
|
||||||
self.assertTrue(len(updated_policy) > 0)
|
|
||||||
self.assertEqual(new_pause_time, updated_policy['PauseTime'])
|
|
||||||
|
|
||||||
# test that error message match
|
|
||||||
expected_error_message = ('The current UpdatePolicy will result '
|
|
||||||
'in stack update timeout.')
|
|
||||||
self.assertIn(expected_error_message, stack.status_reason)
|
|
||||||
|
@ -11,18 +11,15 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import copy
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import mox
|
import mock
|
||||||
from testtools import matchers
|
|
||||||
|
|
||||||
from heat.common import exception
|
from heat.common import exception
|
||||||
from heat.common import template_format
|
from heat.common import template_format
|
||||||
from heat.engine.clients.os import nova
|
|
||||||
from heat.engine import function
|
from heat.engine import function
|
||||||
from heat.engine import parser
|
from heat.engine.resources import instance_group as instgrp
|
||||||
from heat.engine.resources import instance
|
from heat.engine import rsrc_defn
|
||||||
from heat.tests import common
|
from heat.tests import common
|
||||||
from heat.tests import utils
|
from heat.tests import utils
|
||||||
from heat.tests.v1_1 import fakes as fakes_v1_1
|
from heat.tests.v1_1 import fakes as fakes_v1_1
|
||||||
@ -161,77 +158,6 @@ class InstanceGroupTest(common.HeatTestCase):
|
|||||||
super(InstanceGroupTest, self).setUp()
|
super(InstanceGroupTest, self).setUp()
|
||||||
self.fc = fakes_v1_1.FakeClient()
|
self.fc = fakes_v1_1.FakeClient()
|
||||||
|
|
||||||
def _stub_validate(self):
|
|
||||||
self.m.StubOutWithMock(parser.Stack, 'validate')
|
|
||||||
parser.Stack.validate().MultipleTimes()
|
|
||||||
self.stub_ImageConstraint_validate()
|
|
||||||
self.stub_KeypairConstraint_validate()
|
|
||||||
self.stub_FlavorConstraint_validate()
|
|
||||||
|
|
||||||
def _stub_grp_create(self, capacity):
|
|
||||||
"""
|
|
||||||
Expect creation of instances to capacity
|
|
||||||
"""
|
|
||||||
self._stub_validate()
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(instance.Instance, 'handle_create')
|
|
||||||
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
|
|
||||||
|
|
||||||
cookie = object()
|
|
||||||
for x in range(capacity):
|
|
||||||
instance.Instance.handle_create().AndReturn(cookie)
|
|
||||||
instance.Instance.check_create_complete(cookie).AndReturn(True)
|
|
||||||
|
|
||||||
def _stub_grp_replace(self,
|
|
||||||
num_creates_expected_on_updt=0,
|
|
||||||
num_deletes_expected_on_updt=0):
|
|
||||||
"""
|
|
||||||
Expect update replacement of the instances
|
|
||||||
"""
|
|
||||||
self._stub_validate()
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(instance.Instance, 'handle_create')
|
|
||||||
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
|
|
||||||
self.m.StubOutWithMock(instance.Instance, 'destroy')
|
|
||||||
|
|
||||||
cookie = object()
|
|
||||||
for i in range(num_creates_expected_on_updt):
|
|
||||||
instance.Instance.handle_create().AndReturn(cookie)
|
|
||||||
instance.Instance.check_create_complete(cookie).AndReturn(True)
|
|
||||||
for i in range(num_deletes_expected_on_updt):
|
|
||||||
instance.Instance.destroy().AndReturn(None)
|
|
||||||
|
|
||||||
def _stub_grp_update(self,
|
|
||||||
num_creates_expected_on_updt=0,
|
|
||||||
num_deletes_expected_on_updt=0):
|
|
||||||
"""
|
|
||||||
Expect update of the instances
|
|
||||||
"""
|
|
||||||
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
|
|
||||||
nova.NovaClientPlugin._create().AndReturn(self.fc)
|
|
||||||
|
|
||||||
def activate_status(server):
|
|
||||||
server.status = 'VERIFY_RESIZE'
|
|
||||||
|
|
||||||
return_server = self.fc.servers.list()[1]
|
|
||||||
return_server.id = '1234'
|
|
||||||
return_server.get = activate_status.__get__(return_server)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(self.fc.servers, 'get')
|
|
||||||
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
|
|
||||||
|
|
||||||
self.fc.servers.get(
|
|
||||||
mox.IgnoreArg()).MultipleTimes().AndReturn(return_server)
|
|
||||||
self.fc.client.post_servers_1234_action(
|
|
||||||
body={'resize': {'flavorRef': 3}}
|
|
||||||
).MultipleTimes().AndReturn((202, None))
|
|
||||||
self.fc.client.post_servers_1234_action(
|
|
||||||
body={'confirmResize': None}
|
|
||||||
).MultipleTimes().AndReturn((202, None))
|
|
||||||
|
|
||||||
self._stub_grp_replace(num_creates_expected_on_updt,
|
|
||||||
num_deletes_expected_on_updt)
|
|
||||||
|
|
||||||
def get_launch_conf_name(self, stack, ig_name):
|
def get_launch_conf_name(self, stack, ig_name):
|
||||||
return stack[ig_name].properties['LaunchConfigurationName']
|
return stack[ig_name].properties['LaunchConfigurationName']
|
||||||
|
|
||||||
@ -339,6 +265,20 @@ class InstanceGroupTest(common.HeatTestCase):
|
|||||||
expected = {u'UpdatePolicy': updated_policy}
|
expected = {u'UpdatePolicy': updated_policy}
|
||||||
self.assertEqual(expected, tmpl_diff)
|
self.assertEqual(expected, tmpl_diff)
|
||||||
|
|
||||||
|
# test application of the new update policy in handle_update
|
||||||
|
update_snippet = rsrc_defn.ResourceDefinition(
|
||||||
|
current_grp.name,
|
||||||
|
current_grp.type(),
|
||||||
|
properties=updated_grp.t['Properties'],
|
||||||
|
update_policy=updated_policy)
|
||||||
|
current_grp._try_rolling_update = mock.MagicMock()
|
||||||
|
current_grp.resize = mock.MagicMock()
|
||||||
|
current_grp.handle_update(update_snippet, tmpl_diff, None)
|
||||||
|
if updated_policy is None:
|
||||||
|
self.assertEqual({}, current_grp.update_policy.data)
|
||||||
|
else:
|
||||||
|
self.assertEqual(updated_policy, current_grp.update_policy.data)
|
||||||
|
|
||||||
def test_update_policy_added(self):
|
def test_update_policy_added(self):
|
||||||
self.validate_update_policy_diff(ig_tmpl_without_updt_policy,
|
self.validate_update_policy_diff(ig_tmpl_without_updt_policy,
|
||||||
ig_tmpl_with_updt_policy)
|
ig_tmpl_with_updt_policy)
|
||||||
@ -357,112 +297,26 @@ class InstanceGroupTest(common.HeatTestCase):
|
|||||||
self.validate_update_policy_diff(ig_tmpl_with_updt_policy,
|
self.validate_update_policy_diff(ig_tmpl_with_updt_policy,
|
||||||
ig_tmpl_without_updt_policy)
|
ig_tmpl_without_updt_policy)
|
||||||
|
|
||||||
def test_instance_group_update_policy_removed(self):
|
|
||||||
|
|
||||||
# setup stack from the initial template
|
class InstanceGroupReplaceTest(common.HeatTestCase):
|
||||||
tmpl = template_format.parse(ig_tmpl_with_updt_policy)
|
def test_timeout_exception(self):
|
||||||
stack = utils.parse_stack(tmpl)
|
t = template_format.parse(ig_tmpl_with_updt_policy)
|
||||||
|
stack = utils.parse_stack(t)
|
||||||
|
|
||||||
# test stack create
|
defn = rsrc_defn.ResourceDefinition(
|
||||||
size = int(stack['JobServerGroup'].properties['Size'])
|
'asg', 'OS::Heat::InstanceGroup',
|
||||||
self._stub_grp_create(size)
|
{'Size': 2,
|
||||||
self.m.ReplayAll()
|
'AvailabilityZones': ['zoneb'],
|
||||||
stack.create()
|
"LaunchConfigurationName": "LaunchConfig",
|
||||||
self.m.VerifyAll()
|
"LoadBalancerNames": ["ElasticLoadBalancer"]})
|
||||||
self.assertEqual(('CREATE', 'COMPLETE'), stack.state)
|
|
||||||
|
|
||||||
# test that update policy is loaded
|
# the following test, effective_capacity is 12
|
||||||
current_grp = stack['JobServerGroup']
|
# batch_count = (effective_capacity + batch_size -1)//batch_size
|
||||||
self.assertIn('RollingUpdate', current_grp.update_policy)
|
# = (12 + 2 - 1)//2 = 6
|
||||||
current_policy = current_grp.update_policy['RollingUpdate']
|
# if (batch_count - 1)* pause_time > stack.time_out, to raise error
|
||||||
self.assertTrue(current_policy)
|
# (6 - 1)*14*60 > 3600, so to raise error
|
||||||
self.assertTrue(len(current_policy) > 0)
|
|
||||||
init_grp_tmpl = tmpl['Resources']['JobServerGroup']
|
|
||||||
init_roll_updt = init_grp_tmpl['UpdatePolicy']['RollingUpdate']
|
|
||||||
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
|
|
||||||
self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize']))
|
|
||||||
|
|
||||||
# test that physical resource name of launch configuration is used
|
group = instgrp.InstanceGroup('asg', defn, stack)
|
||||||
conf = stack['JobServerConfig']
|
group.nested = mock.MagicMock(return_value=range(12))
|
||||||
conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack.name
|
self.assertRaises(ValueError,
|
||||||
self.assertThat(conf.FnGetRefId(),
|
group._replace, 10, 1, 14 * 60)
|
||||||
matchers.MatchesRegex(conf_name_pattern))
|
|
||||||
|
|
||||||
# test the number of instances created
|
|
||||||
nested = stack['JobServerGroup'].nested()
|
|
||||||
self.assertEqual(size, len(nested.resources))
|
|
||||||
|
|
||||||
# test stack update
|
|
||||||
updated_tmpl = template_format.parse(ig_tmpl_without_updt_policy)
|
|
||||||
updated_stack = utils.parse_stack(updated_tmpl)
|
|
||||||
stack.update(updated_stack)
|
|
||||||
self.assertEqual(('UPDATE', 'COMPLETE'), stack.state)
|
|
||||||
|
|
||||||
# test that update policy is removed
|
|
||||||
updated_grp = stack['JobServerGroup']
|
|
||||||
self.assertFalse(updated_grp.update_policy['RollingUpdate'])
|
|
||||||
|
|
||||||
def test_instance_group_update_policy_check_timeout(self):
|
|
||||||
|
|
||||||
# setup stack from the initial template
|
|
||||||
tmpl = template_format.parse(ig_tmpl_with_updt_policy)
|
|
||||||
stack = utils.parse_stack(tmpl)
|
|
||||||
|
|
||||||
# test stack create
|
|
||||||
size = int(stack['JobServerGroup'].properties['Size'])
|
|
||||||
self._stub_grp_create(size)
|
|
||||||
self.m.ReplayAll()
|
|
||||||
stack.create()
|
|
||||||
self.m.VerifyAll()
|
|
||||||
self.assertEqual(('CREATE', 'COMPLETE'), stack.state)
|
|
||||||
|
|
||||||
# test that update policy is loaded
|
|
||||||
current_grp = stack['JobServerGroup']
|
|
||||||
self.assertIn('RollingUpdate', current_grp.update_policy)
|
|
||||||
current_policy = current_grp.update_policy['RollingUpdate']
|
|
||||||
self.assertTrue(current_policy)
|
|
||||||
self.assertTrue(len(current_policy) > 0)
|
|
||||||
init_grp_tmpl = tmpl['Resources']['JobServerGroup']
|
|
||||||
init_roll_updt = init_grp_tmpl['UpdatePolicy']['RollingUpdate']
|
|
||||||
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
|
|
||||||
self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize']))
|
|
||||||
|
|
||||||
# test the number of instances created
|
|
||||||
nested = stack['JobServerGroup'].nested()
|
|
||||||
self.assertEqual(size, len(nested.resources))
|
|
||||||
|
|
||||||
# clean up for next test
|
|
||||||
self.m.UnsetStubs()
|
|
||||||
|
|
||||||
# modify the pause time and test for error
|
|
||||||
new_pause_time = 'PT30M'
|
|
||||||
updt_template = json.loads(copy.deepcopy(ig_tmpl_with_updt_policy))
|
|
||||||
group = updt_template['Resources']['JobServerGroup']
|
|
||||||
policy = group['UpdatePolicy']['RollingUpdate']
|
|
||||||
policy['PauseTime'] = new_pause_time
|
|
||||||
config = updt_template['Resources']['JobServerConfig']
|
|
||||||
config['Properties']['ImageId'] = 'bar'
|
|
||||||
updated_tmpl = template_format.parse(json.dumps(updt_template))
|
|
||||||
updated_stack = utils.parse_stack(updated_tmpl)
|
|
||||||
|
|
||||||
self.stub_KeypairConstraint_validate()
|
|
||||||
self.stub_ImageConstraint_validate()
|
|
||||||
self.stub_FlavorConstraint_validate()
|
|
||||||
self.m.ReplayAll()
|
|
||||||
stack.update(updated_stack)
|
|
||||||
self.assertEqual(('UPDATE', 'FAILED'), stack.state)
|
|
||||||
|
|
||||||
# test that the update policy is updated
|
|
||||||
updated_grp = stack['JobServerGroup']
|
|
||||||
self.assertIn('RollingUpdate', updated_grp.update_policy)
|
|
||||||
updated_policy = updated_grp.update_policy['RollingUpdate']
|
|
||||||
self.assertTrue(updated_policy)
|
|
||||||
self.assertTrue(len(updated_policy) > 0)
|
|
||||||
self.assertEqual(new_pause_time, updated_policy['PauseTime'])
|
|
||||||
|
|
||||||
# test that error message match
|
|
||||||
expected_error_message = ('The current UpdatePolicy will result '
|
|
||||||
'in stack update timeout.')
|
|
||||||
self.assertIn(expected_error_message, stack.status_reason)
|
|
||||||
|
|
||||||
self.m.VerifyAll()
|
|
||||||
|
Loading…
Reference in New Issue
Block a user