Add Heat scenario to test scaling policies

Scenario triggers scaling policy webhooks and tracks their
performance.

Change-Id: I82270c1ceb2bf99254581b0b57f152a1361298d2
This commit is contained in:
Chris St. Pierre
2015-06-16 09:07:56 -05:00
parent 434e8173fa
commit 65b3c5fff2
9 changed files with 309 additions and 1 deletions

View File

@@ -220,6 +220,13 @@
# Time interval(in sec) between checks when waiting for stack to be restored. # Time interval(in sec) between checks when waiting for stack to be restored.
#heat_stack_restore_poll_interval = 1.0 #heat_stack_restore_poll_interval = 1.0
# Time (in sec) to wait for stack to scale up or down. (floating point value)
#heat_stack_scale_timeout = 3600.0
# Time interval (in sec) between checks when waiting for a stack to
# scale up or down. (floating point value)
#heat_stack_scale_poll_interval = 1.0
# Delay between creating Manila share and polling for its status. # Delay between creating Manila share and polling for its status.
# (floating point value) # (floating point value)
#manila_share_create_prepoll_delay = 2.0 #manila_share_create_prepoll_delay = 2.0

View File

@@ -0,0 +1,46 @@
heat_template_version: 2013-05-23
parameters:
flavor:
type: string
default: m1.tiny
constraints:
- custom_constraint: nova.flavor
image:
type: string
default: cirros-0.3.4-x86_64-uec
constraints:
- custom_constraint: glance.image
scaling_adjustment:
type: number
default: 1
max_size:
type: number
default: 5
constraints:
- range: {min: 1}
resources:
asg:
type: OS::Heat::AutoScalingGroup
properties:
resource:
type: OS::Nova::Server
properties:
image: { get_param: image }
flavor: { get_param: flavor }
min_size: 1
desired_capacity: 3
max_size: { get_param: max_size }
scaling_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: {get_resource: asg}
scaling_adjustment: { get_param: scaling_adjustment }
outputs:
scaling_url:
value: {get_attr: [scaling_policy, alarm_url]}

View File

@@ -242,3 +242,41 @@
sla: sla:
failure_rate: failure_rate:
max: 0 max: 0
HeatStacks.create_stack_and_scale:
-
args:
template_path: "/home/jenkins/.rally/extra/autoscaling_group.yaml.template"
output_key: "scaling_url"
delta: 1
parameters:
scaling_adjustment: 1
runner:
type: "constant"
times: 2
concurrency: 1
context:
users:
tenants: 2
users_per_tenant: 1
sla:
failure_rate:
max: 0
-
args:
template_path: "/home/jenkins/.rally/extra/autoscaling_group.yaml.template"
output_key: "scaling_url"
delta: -1
parameters:
scaling_adjustment: -1
runner:
type: "constant"
times: 2
concurrency: 2
context:
users:
tenants: 2
users_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@@ -140,6 +140,43 @@ class HeatStacks(utils.HeatScenario):
updated_environment or environment) updated_environment or environment)
self._delete_stack(stack) self._delete_stack(stack)
@types.set(template_path=types.FileType, files=types.FileTypeDict)
@validation.required_services(consts.Service.HEAT)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["heat"]})
def create_stack_and_scale(self, template_path, output_key, delta,
parameters=None, files=None,
environment=None):
"""Create an autoscaling stack and invoke a scaling policy.
Measure the performance of autoscaling webhooks.
:param template_path: path to template file that includes an
OS::Heat::AutoScalingGroup resource
:param output_key: the stack output key that corresponds to
the scaling webhook
:param delta: the number of instances the stack is expected to
change by.
:param parameters: parameters to use in heat template
:param files: files used in template (dict of file name to
file path)
:param environment: stack environment definition (dict)
"""
# TODO(stpierre): Kilo Heat is *much* better than Juno for the
# requirements of this scenario, so once Juno goes out of
# support we should update this scenario to suck less. Namely:
#
# * Kilo Heat can supply alarm_url attributes without needing
# an output key, so instead of getting the output key from
# the user, just get the name of the ScalingPolicy to apply.
# * Kilo Heat changes the status of a stack while scaling it,
# so _scale_stack() can check for the stack to have changed
# size and for it to be in UPDATE_COMPLETE state, so the
# user no longer needs to specify the expected delta.
stack = self._create_stack(template_path, parameters, files,
environment)
self._scale_stack(stack, output_key, delta)
@types.set(template_path=types.FileType, files=types.FileTypeDict) @types.set(template_path=types.FileType, files=types.FileTypeDict)
@validation.required_services(consts.Service.HEAT) @validation.required_services(consts.Service.HEAT)
@validation.required_openstack(users=True) @validation.required_openstack(users=True)

View File

@@ -16,10 +16,14 @@
import time import time
from oslo_config import cfg from oslo_config import cfg
import requests
from rally.common import log as logging
from rally import exceptions
from rally.task.scenarios import base from rally.task.scenarios import base
from rally.task import utils from rally.task import utils
LOG = logging.getLogger(__name__)
HEAT_BENCHMARK_OPTS = [ HEAT_BENCHMARK_OPTS = [
cfg.FloatOpt("heat_stack_create_prepoll_delay", cfg.FloatOpt("heat_stack_create_prepoll_delay",
@@ -87,7 +91,14 @@ HEAT_BENCHMARK_OPTS = [
cfg.FloatOpt("heat_stack_restore_poll_interval", cfg.FloatOpt("heat_stack_restore_poll_interval",
default=1.0, default=1.0,
help="Time interval(in sec) between checks when waiting for " help="Time interval(in sec) between checks when waiting for "
"stack to be restored.") "stack to be restored."),
cfg.FloatOpt("heat_stack_scale_timeout",
default=3600.0,
help="Time (in sec) to wait for stack to scale up or down."),
cfg.FloatOpt("heat_stack_scale_poll_interval",
default=1.0,
help="Time interval (in sec) between checks when waiting for "
"a stack to scale up or down."),
] ]
CONF = cfg.CONF CONF = cfg.CONF
@@ -272,3 +283,62 @@ class HeatScenario(base.Scenario):
timeout=CONF.benchmark.heat_stack_restore_timeout, timeout=CONF.benchmark.heat_stack_restore_timeout,
check_interval=CONF.benchmark.heat_stack_restore_poll_interval check_interval=CONF.benchmark.heat_stack_restore_poll_interval
) )
def _count_instances(self, stack):
"""Count instances in a Heat stack.
:param stack: stack to count instances in.
"""
return len([
r for r in self.clients("heat").resources.list(stack.id,
nested_depth=1)
if r.resource_type == "OS::Nova::Server"])
def _scale_stack(self, stack, output_key, delta):
"""Scale a stack up or down.
Calls the webhook given in the output value identified by
'output_key', and waits for the stack size to change by
'delta'.
:param stack: stack to scale up or down
:param output_key: The name of the output to get the URL from
:param delta: The expected change in number of instances in
the stack (signed int)
"""
num_instances = self._count_instances(stack)
expected_instances = num_instances + delta
LOG.debug("Scaling stack %s from %s to %s instances with %s" %
(stack.id, num_instances, expected_instances, output_key))
with base.AtomicAction(self, "heat.scale_with_%s" % output_key):
self._stack_webhook(stack, output_key)
utils.wait_for(
stack,
is_ready=lambda s: (
self._count_instances(s) == expected_instances),
update_resource=utils.get_from_manager(
["UPDATE_FAILED"]),
timeout=CONF.benchmark.heat_stack_scale_timeout,
check_interval=CONF.benchmark.heat_stack_scale_poll_interval)
def _stack_webhook(self, stack, output_key):
"""POST to the URL given in the output value identified by output_key.
This can be used to scale stacks up and down, for instance.
:param stack: stack to call a webhook on
:param output_key: The name of the output to get the URL from
:raises: InvalidConfigException if the output key is not found
"""
url = None
for output in stack.outputs:
if output["output_key"] == output_key:
url = output["output_value"]
break
else:
raise exceptions.InvalidConfigException(
"No output key %(key)s found in stack %(id)s" %
{"key": output_key, "id": stack.id})
with base.AtomicAction(self, "heat.%s_webhook" % output_key):
requests.post(url).raise_for_status()

View File

@@ -0,0 +1,22 @@
{
"HeatStacks.create_stack_and_scale": [
{
"args": {
"template_path": "templates/autoscaling_group.yaml.template",
"output_key": "scaling_url",
"delta": 1
},
"runner": {
"type": "constant",
"concurrency": 2,
"times": 3
},
"context": {
"users": {
"users_per_tenant": 1,
"tenants": 2
}
}
}
]
}

View File

@@ -0,0 +1,15 @@
---
HeatStacks.create_stack_and_scale:
-
args:
template_path: "templates/autoscaling_group.yaml.template"
output_key: "scaling_url"
delta: 1
runner:
type: "constant"
times: 3
concurrency: 2
context:
users:
tenants: 2
users_per_tenant: 1

View File

@@ -143,6 +143,25 @@ class HeatStacksTestCase(test.ScenarioTestCase):
self.default_environment) self.default_environment)
mock__delete_stack.assert_called_once_with(fake_stack) mock__delete_stack.assert_called_once_with(fake_stack)
def test_create_stack_and_scale(self):
heat_scenario = stacks.HeatStacks()
stack = mock.Mock()
heat_scenario._create_stack = mock.Mock(return_value=stack)
heat_scenario._scale_stack = mock.Mock()
heat_scenario.create_stack_and_scale(
self.default_template, "key", -1,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
heat_scenario._create_stack.assert_called_once_with(
self.default_template,
self.default_parameters,
self.default_files,
self.default_environment)
heat_scenario._scale_stack.assert_called_once_with(
stack, "key", -1)
@mock.patch(HEAT_STACKS + "._delete_stack") @mock.patch(HEAT_STACKS + "._delete_stack")
@mock.patch(HEAT_STACKS + "._resume_stack") @mock.patch(HEAT_STACKS + "._resume_stack")
@mock.patch(HEAT_STACKS + "._suspend_stack") @mock.patch(HEAT_STACKS + "._suspend_stack")

View File

@@ -191,6 +191,60 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
self._test_atomic_action_timer(scenario.atomic_actions(), self._test_atomic_action_timer(scenario.atomic_actions(),
"heat.restore_stack") "heat.restore_stack")
def test__count_instances(self):
self.clients("heat").resources.list.return_value = [
mock.Mock(resource_type="OS::Nova::Server"),
mock.Mock(resource_type="OS::Nova::Server"),
mock.Mock(resource_type="OS::Heat::AutoScalingGroup")]
scenario = utils.HeatScenario()
self.assertEqual(scenario._count_instances(self.stack), 2)
self.clients("heat").resources.list.assert_called_once_with(
self.stack.id,
nested_depth=1)
def test__scale_stack(self):
scenario = utils.HeatScenario()
scenario._count_instances = mock.Mock(side_effect=[3, 3, 2])
scenario._stack_webhook = mock.Mock()
scenario._scale_stack(self.stack, "test_output_key", -1)
scenario._stack_webhook.assert_called_once_with(self.stack,
"test_output_key")
self.mock_wait_for.mock.assert_called_once_with(
self.stack,
is_ready=mock.ANY,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.heat_stack_scale_timeout,
check_interval=CONF.benchmark.heat_stack_scale_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with(
["UPDATE_FAILED"])
self._test_atomic_action_timer(scenario.atomic_actions(),
"heat.scale_with_test_output_key")
@mock.patch("requests.post")
def test_stack_webhook(self, mock_post):
scenario = utils.HeatScenario()
stack = mock.Mock(outputs=[
{"output_key": "output1", "output_value": "url1"},
{"output_key": "output2", "output_value": "url2"}])
scenario._stack_webhook(stack, "output1")
mock_post.assert_called_with("url1")
self._test_atomic_action_timer(scenario.atomic_actions(),
"heat.output1_webhook")
@mock.patch("requests.post")
def test_stack_webhook_invalid_output_key(self, mock_post):
scenario = utils.HeatScenario()
stack = mock.Mock()
stack.outputs = [{"output_key": "output1", "output_value": "url1"},
{"output_key": "output2", "output_value": "url2"}]
self.assertRaises(exceptions.InvalidConfigException,
scenario._stack_webhook, stack, "bogus")
class HeatScenarioNegativeTestCase(test.ScenarioTestCase): class HeatScenarioNegativeTestCase(test.ScenarioTestCase):
patch_benchmark_utils = False patch_benchmark_utils = False