Update heat templates and openstack-aodh for gnocchi
- Support new gnocchiclient interface in openstack-aodh Ported fix from openstack-aodh 5.0.0. - Update heat templates to create gnocchi resources type of alarm instead of ceilometer - Remove all the heat templates using cfn-push-stats tool to push metrics from instance, as heat CloudWatch API/cfn-push-stats is deprecated for a long time and it was removed in openstack queens. references:http://lists.openstack.org/pipermail/openstack-dev/2017-October/123104.html https://bugs.launchpad.net/heat/+bug/1743707 - updates the scaling interval to 5 minutes in heat templates as the granularity of the archive policy we are using in gnocchi is 5 minutes. Story: 2002825 Task: 22871 Depends-On: https://review.openstack.org/587279 Change-Id: I4872c67d15065c0e7a16632390488305649f8f37 Signed-off-by: Don Penney <don.penney@windriver.com> Signed-off-by: Jack Ding <jack.ding@windriver.com>
This commit is contained in:
parent
71aaf7b58c
commit
da76605405
@ -0,0 +1,32 @@
|
|||||||
|
From fe727c7baf89dab7e67244ba9c84c71d609c0389 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Angie Wang <angie.Wang@windriver.com>
|
||||||
|
Date: Thu, 28 Jun 2018 12:30:19 -0400
|
||||||
|
Subject: [PATCH 1/1] meta support new gnocchiclient interface
|
||||||
|
|
||||||
|
---
|
||||||
|
SPECS/openstack-aodh.spec | 2 ++
|
||||||
|
1 file changed, 2 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/SPECS/openstack-aodh.spec b/SPECS/openstack-aodh.spec
|
||||||
|
index 2fa77d0..a10e9e0 100644
|
||||||
|
--- a/SPECS/openstack-aodh.spec
|
||||||
|
+++ b/SPECS/openstack-aodh.spec
|
||||||
|
@@ -23,6 +23,7 @@ Source20: aodh-expirer-active
|
||||||
|
#WRS: Include patches here:
|
||||||
|
Patch1: 0001-modify-aodh-api.patch
|
||||||
|
Patch2: 0002-Add-drivername-support-for-postgresql-connection-set.patch
|
||||||
|
+Patch3: 0003-support-new-gnocchiclient-interface.patch
|
||||||
|
|
||||||
|
BuildArch: noarch
|
||||||
|
|
||||||
|
@@ -226,6 +227,7 @@ This package contains the Aodh test files.
|
||||||
|
#WRS: Apply patches here
|
||||||
|
%patch1 -p1
|
||||||
|
%patch2 -p1
|
||||||
|
+%patch3 -p1
|
||||||
|
|
||||||
|
find . \( -name .gitignore -o -name .placeholder \) -delete
|
||||||
|
|
||||||
|
--
|
||||||
|
1.8.3.1
|
||||||
|
|
@ -4,3 +4,4 @@ meta-remove-default-logrotate.patch
|
|||||||
0001-meta-modify-aodh-api.patch
|
0001-meta-modify-aodh-api.patch
|
||||||
0001-meta-pass-aodh-api-config.patch
|
0001-meta-pass-aodh-api-config.patch
|
||||||
0006-add-drivername-for-postgresql.patch
|
0006-add-drivername-for-postgresql.patch
|
||||||
|
0007-meta-support-new-gnocchiclient-interface.patch
|
||||||
|
@ -0,0 +1,202 @@
|
|||||||
|
From 25f0c1ab7cc1d9d22df01f1cd2d6c4fb2fe8b0f2 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Angie Wang <angie.Wang@windriver.com>
|
||||||
|
Date: Thu, 28 Jun 2018 12:26:52 -0400
|
||||||
|
Subject: [PATCH 1/1] support new gnocchiclient interface
|
||||||
|
|
||||||
|
---
|
||||||
|
aodh/api/controllers/v2/alarm_rules/gnocchi.py | 37 +++++++---------------
|
||||||
|
aodh/evaluator/gnocchi.py | 22 ++++++++++---
|
||||||
|
.../functional/api/v2/test_alarm_scenarios.py | 5 ---
|
||||||
|
aodh/tests/unit/evaluator/test_gnocchi.py | 3 ++
|
||||||
|
4 files changed, 32 insertions(+), 35 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/aodh/api/controllers/v2/alarm_rules/gnocchi.py b/aodh/api/controllers/v2/alarm_rules/gnocchi.py
|
||||||
|
index 6e2e64d..419b6b0 100644
|
||||||
|
--- a/aodh/api/controllers/v2/alarm_rules/gnocchi.py
|
||||||
|
+++ b/aodh/api/controllers/v2/alarm_rules/gnocchi.py
|
||||||
|
@@ -74,9 +74,9 @@ class AlarmGnocchiThresholdRule(base.AlarmRule):
|
||||||
|
conf = pecan.request.cfg
|
||||||
|
gnocchi_client = client.Client(
|
||||||
|
'1', keystone_client.get_session(conf),
|
||||||
|
- interface=conf.service_credentials.interface,
|
||||||
|
- region_name=conf.service_credentials.region_name)
|
||||||
|
-
|
||||||
|
+ adapter_options={
|
||||||
|
+ 'interface': conf.service_credentials.interface,
|
||||||
|
+ 'region_name': conf.service_credentials.region_name})
|
||||||
|
try:
|
||||||
|
return gnocchi_client.capabilities.list().get(
|
||||||
|
'aggregation_methods', [])
|
||||||
|
@@ -105,26 +105,6 @@ class MetricOfResourceRule(AlarmGnocchiThresholdRule):
|
||||||
|
'resource_type'])
|
||||||
|
return rule
|
||||||
|
|
||||||
|
- @classmethod
|
||||||
|
- def validate_alarm(cls, alarm):
|
||||||
|
- super(MetricOfResourceRule,
|
||||||
|
- cls).validate_alarm(alarm)
|
||||||
|
-
|
||||||
|
- conf = pecan.request.cfg
|
||||||
|
- gnocchi_client = client.Client(
|
||||||
|
- '1', keystone_client.get_session(conf),
|
||||||
|
- interface=conf.service_credentials.interface,
|
||||||
|
- region_name=conf.service_credentials.region_name)
|
||||||
|
-
|
||||||
|
- rule = alarm.gnocchi_resources_threshold_rule
|
||||||
|
- try:
|
||||||
|
- gnocchi_client.resource.get(rule.resource_type,
|
||||||
|
- rule.resource_id)
|
||||||
|
- except exceptions.ClientException as e:
|
||||||
|
- raise base.ClientSideError(e.message, status_code=e.code)
|
||||||
|
- except Exception as e:
|
||||||
|
- raise GnocchiUnavailable(e)
|
||||||
|
-
|
||||||
|
|
||||||
|
class AggregationMetricByResourcesLookupRule(AlarmGnocchiThresholdRule):
|
||||||
|
metric = wsme.wsattr(wtypes.text, mandatory=True)
|
||||||
|
@@ -200,9 +180,9 @@ class AggregationMetricByResourcesLookupRule(AlarmGnocchiThresholdRule):
|
||||||
|
|
||||||
|
gnocchi_client = client.Client(
|
||||||
|
'1', keystone_client.get_session(conf),
|
||||||
|
- interface=conf.service_credentials.interface,
|
||||||
|
- region_name=conf.service_credentials.region_name)
|
||||||
|
-
|
||||||
|
+ adapter_options={
|
||||||
|
+ 'interface': conf.service_credentials.interface,
|
||||||
|
+ 'region_name': conf.service_credentials.region_name})
|
||||||
|
try:
|
||||||
|
gnocchi_client.metric.aggregation(
|
||||||
|
metrics=rule.metric,
|
||||||
|
@@ -211,6 +191,11 @@ class AggregationMetricByResourcesLookupRule(AlarmGnocchiThresholdRule):
|
||||||
|
needed_overlap=0,
|
||||||
|
resource_type=rule.resource_type)
|
||||||
|
except exceptions.ClientException as e:
|
||||||
|
+ if e.code == 404:
|
||||||
|
+ # NOTE(sileht): We are fine here, we just want to ensure the
|
||||||
|
+ # 'query' payload is valid for Gnocchi If the metric
|
||||||
|
+ # doesn't exists yet, it doesn't matter
|
||||||
|
+ return
|
||||||
|
raise base.ClientSideError(e.message, status_code=e.code)
|
||||||
|
except Exception as e:
|
||||||
|
raise GnocchiUnavailable(e)
|
||||||
|
diff --git a/aodh/evaluator/gnocchi.py b/aodh/evaluator/gnocchi.py
|
||||||
|
index 524b662..3632597 100644
|
||||||
|
--- a/aodh/evaluator/gnocchi.py
|
||||||
|
+++ b/aodh/evaluator/gnocchi.py
|
||||||
|
@@ -14,6 +14,7 @@
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gnocchiclient import client
|
||||||
|
+from gnocchiclient import exceptions
|
||||||
|
from oslo_log import log
|
||||||
|
from oslo_serialization import jsonutils
|
||||||
|
|
||||||
|
@@ -34,8 +35,9 @@ class GnocchiBase(threshold.ThresholdEvaluator):
|
||||||
|
super(GnocchiBase, self).__init__(conf)
|
||||||
|
self._gnocchi_client = client.Client(
|
||||||
|
'1', keystone_client.get_session(conf),
|
||||||
|
- interface=conf.service_credentials.interface,
|
||||||
|
- region_name=conf.service_credentials.region_name)
|
||||||
|
+ adapter_options={
|
||||||
|
+ 'interface': conf.service_credentials.interface,
|
||||||
|
+ 'region_name': conf.service_credentials.region_name})
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _sanitize(rule, statistics):
|
||||||
|
@@ -58,12 +60,16 @@ class GnocchiResourceThresholdEvaluator(GnocchiBase):
|
||||||
|
try:
|
||||||
|
return self._gnocchi_client.metric.get_measures(
|
||||||
|
metric=rule['metric'],
|
||||||
|
+ granularity=rule['granularity'],
|
||||||
|
start=start, stop=end,
|
||||||
|
resource_id=rule['resource_id'],
|
||||||
|
aggregation=rule['aggregation_method'])
|
||||||
|
+ except exceptions.NotFound:
|
||||||
|
+ LOG.debug('metric %s or resource %s does not exists',
|
||||||
|
+ rule['metric'], rule['resource_id'])
|
||||||
|
+ return []
|
||||||
|
except Exception as e:
|
||||||
|
- LOG.warning(_LW('alarm stats retrieval failed: %s'),
|
||||||
|
- e)
|
||||||
|
+ LOG.warning(_LW('alarm stats retrieval failed: %s'), e)
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
@@ -80,9 +86,13 @@ class GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase):
|
||||||
|
# https://bugs.launchpad.net/gnocchi/+bug/1479429
|
||||||
|
return self._gnocchi_client.metric.aggregation(
|
||||||
|
metrics=rule['metrics'],
|
||||||
|
+ granularity=rule['granularity'],
|
||||||
|
start=start, stop=end,
|
||||||
|
aggregation=rule['aggregation_method'],
|
||||||
|
needed_overlap=0)
|
||||||
|
+ except exceptions.NotFound:
|
||||||
|
+ LOG.debug('metrics %s does not exists', rule['metrics'])
|
||||||
|
+ return []
|
||||||
|
except Exception as e:
|
||||||
|
LOG.warning(_LW('alarm stats retrieval failed: %s'), e)
|
||||||
|
return []
|
||||||
|
@@ -101,12 +111,16 @@ class GnocchiAggregationResourcesThresholdEvaluator(GnocchiBase):
|
||||||
|
try:
|
||||||
|
return self._gnocchi_client.metric.aggregation(
|
||||||
|
metrics=rule['metric'],
|
||||||
|
+ granularity=rule['granularity'],
|
||||||
|
query=jsonutils.loads(rule['query']),
|
||||||
|
resource_type=rule["resource_type"],
|
||||||
|
start=start, stop=end,
|
||||||
|
aggregation=rule['aggregation_method'],
|
||||||
|
needed_overlap=0,
|
||||||
|
)
|
||||||
|
+ except exceptions.NotFound:
|
||||||
|
+ LOG.debug('metric %s does not exists', rule['metric'])
|
||||||
|
+ return []
|
||||||
|
except Exception as e:
|
||||||
|
LOG.warning(_LW('alarm stats retrieval failed: %s'), e)
|
||||||
|
return []
|
||||||
|
diff --git a/aodh/tests/functional/api/v2/test_alarm_scenarios.py b/aodh/tests/functional/api/v2/test_alarm_scenarios.py
|
||||||
|
index 8e3288a..5d30caa 100644
|
||||||
|
--- a/aodh/tests/functional/api/v2/test_alarm_scenarios.py
|
||||||
|
+++ b/aodh/tests/functional/api/v2/test_alarm_scenarios.py
|
||||||
|
@@ -2966,11 +2966,6 @@ class TestAlarmsRuleGnocchi(TestAlarmsBase):
|
||||||
|
c.capabilities.list.return_value = {
|
||||||
|
'aggregation_methods': ['count']}
|
||||||
|
self.post_json('/alarms', params=json, headers=self.auth_headers)
|
||||||
|
- expected = [mock.call.capabilities.list(),
|
||||||
|
- mock.call.resource.get(
|
||||||
|
- "instance",
|
||||||
|
- "209ef69c-c10c-4efb-90ff-46f4b2d90d2e")]
|
||||||
|
- self.assertEqual(expected, c.mock_calls)
|
||||||
|
|
||||||
|
alarms = list(self.alarm_conn.get_alarms(enabled=False))
|
||||||
|
self.assertEqual(1, len(alarms))
|
||||||
|
diff --git a/aodh/tests/unit/evaluator/test_gnocchi.py b/aodh/tests/unit/evaluator/test_gnocchi.py
|
||||||
|
index 6d00f4f..e4740e4 100644
|
||||||
|
--- a/aodh/tests/unit/evaluator/test_gnocchi.py
|
||||||
|
+++ b/aodh/tests/unit/evaluator/test_gnocchi.py
|
||||||
|
@@ -186,6 +186,7 @@ class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase):
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
[mock.call.get_measures(aggregation='mean', metric='cpu_util',
|
||||||
|
+ granularity=60,
|
||||||
|
resource_id='my_instance',
|
||||||
|
start=start_alarm, stop=end)],
|
||||||
|
self.client.metric.mock_calls)
|
||||||
|
@@ -326,6 +327,7 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
|
||||||
|
metrics=[
|
||||||
|
'0bb1604d-1193-4c0a-b4b8-74b170e35e83',
|
||||||
|
'9ddc209f-42f8-41e1-b8f1-8804f59c4053'],
|
||||||
|
+ granularity=300,
|
||||||
|
needed_overlap=0,
|
||||||
|
start=start_alarm, stop=end)],
|
||||||
|
self.client.metric.mock_calls)
|
||||||
|
@@ -443,6 +445,7 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
|
||||||
|
end = "2015-01-26T12:57:00"
|
||||||
|
self.assertEqual(
|
||||||
|
[mock.call.aggregation(aggregation='mean', metrics='cpu_util',
|
||||||
|
+ granularity=50,
|
||||||
|
needed_overlap=0,
|
||||||
|
query={"=": {"server_group":
|
||||||
|
"my_autoscaling_group"}},
|
||||||
|
--
|
||||||
|
1.8.3.1
|
||||||
|
|
@ -138,9 +138,6 @@ heat resources can be found in hot/scenarios/ directory.
|
|||||||
illustrate the creation of a Cinder Volume and then
|
illustrate the creation of a Cinder Volume and then
|
||||||
the launching / creation of a VM Instance using this
|
the launching / creation of a VM Instance using this
|
||||||
Cinder Volume as its boot device.
|
Cinder Volume as its boot device.
|
||||||
CFNPushStatsAutoScaling.yaml - An autoscaling stack of VMs that use cfn-push-stats
|
|
||||||
to emit custom meters which are used by autoscaling
|
|
||||||
policies.
|
|
||||||
CombinationAutoScaling.yaml - A single template that creates a simple VM In/Out
|
CombinationAutoScaling.yaml - A single template that creates a simple VM In/Out
|
||||||
auto-scaling use case. A single Load Balancer VM is
|
auto-scaling use case. A single Load Balancer VM is
|
||||||
created and an AutoScalingGroup of server VMs is
|
created and an AutoScalingGroup of server VMs is
|
||||||
@ -171,11 +168,6 @@ heat resources can be found in hot/scenarios/ directory.
|
|||||||
a keypair, 2 flavors, 4 networks, 5 subnets and
|
a keypair, 2 flavors, 4 networks, 5 subnets and
|
||||||
2 routers with gateways setup.
|
2 routers with gateways setup.
|
||||||
|
|
||||||
A demo illustrating a complex environment can be found in the hot/demo/ directory.
|
|
||||||
|
|
||||||
./hot/demo:
|
|
||||||
scaleUpDown.yaml - A demo to illustrate scale up and down
|
|
||||||
|
|
||||||
USAGE
|
USAGE
|
||||||
=====
|
=====
|
||||||
The HEAT template (YAML) files can be used to create a 'Stack' by using either the
|
The HEAT template (YAML) files can be used to create a 'Stack' by using either the
|
||||||
|
@ -1,35 +0,0 @@
|
|||||||
##############################################################################
|
|
||||||
#
|
|
||||||
# CPU Scaling UP / DOWN Demo
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# Creates two VMs:
|
|
||||||
#
|
|
||||||
# 1. traffic-generator
|
|
||||||
# - iperf client sending traffic to network-appliance
|
|
||||||
# - pause / unpause this VM to control load on network-appliance VM
|
|
||||||
# - NOTE use ubuntu-cfntools.img
|
|
||||||
# (ubuntu 16.04 with cloud-init and cfn-tools installed)
|
|
||||||
# - NOTE cloud-init and cfn-init used to create required config files, and
|
|
||||||
# install required tools (i.e. iperf).
|
|
||||||
#
|
|
||||||
# 2. network-appliance
|
|
||||||
# - iperf server receiving and sending back traffic to iperf client
|
|
||||||
# - also starts 'dd ...' when traffic starts, to cause more load on system
|
|
||||||
# - this VM auto-scales cpu up and down based on cpu load cfn-pushed to Titanium
|
|
||||||
# - NOTE use ubuntu-cfntools.img
|
|
||||||
# (ubuntu 16.04 with cloud-init and cfn-tools installed)
|
|
||||||
# - NOTE cloud-init and cfn-init used to create required config files, and
|
|
||||||
# install required tools.
|
|
||||||
# ( i.e. iperf, Titanium Guest Scaling SDK Module, collectd,
|
|
||||||
# influxdb and grafana )
|
|
||||||
#
|
|
||||||
|
|
||||||
openstack stack create -t scaleUpDown.yaml demo
|
|
||||||
|
|
||||||
watch "ceilometer sample-list -m net_appl_cpu_load -l 10; ceilometer alarm-list | fgrep net_appl"
|
|
||||||
|
|
||||||
http://<network-appliance-FLOATING-IP>:3000
|
|
||||||
|
|
||||||
openstack stack delete demo
|
|
||||||
|
|
@ -1 +0,0 @@
|
|||||||
* * * * * root /etc/cfn/send_guest_metrics
|
|
@ -1,105 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
### BEGIN INIT INFO
|
|
||||||
# Provides: gen-add-load.sh
|
|
||||||
# Required-Start: $all
|
|
||||||
# Required-Stop: $remote_fs $syslog
|
|
||||||
# Default-Start: 2 3 4 5
|
|
||||||
# Default-Stop: 0 1 6
|
|
||||||
# Short-Description: Start daemon at boot time
|
|
||||||
# Description: Enable service provided by daemon.
|
|
||||||
### END INIT INFO
|
|
||||||
|
|
||||||
#########################################################
|
|
||||||
#
|
|
||||||
# Systemd file for 'gen-add-load.sh'
|
|
||||||
#
|
|
||||||
#########################################################
|
|
||||||
|
|
||||||
dir="/usr/bin"
|
|
||||||
cmd="./gen-add-load.sh"
|
|
||||||
user="root"
|
|
||||||
|
|
||||||
name=`basename $0`
|
|
||||||
pid_file="/var/run/$name.pid"
|
|
||||||
stdout_log="/var/log/$name.log"
|
|
||||||
stderr_log="/var/log/$name.err"
|
|
||||||
|
|
||||||
get_pid() {
|
|
||||||
cat "$pid_file"
|
|
||||||
}
|
|
||||||
|
|
||||||
is_running() {
|
|
||||||
[ -f "$pid_file" ] && ps -p `get_pid` > /dev/null 2>&1
|
|
||||||
}
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
start)
|
|
||||||
if is_running; then
|
|
||||||
echo "Already started"
|
|
||||||
else
|
|
||||||
echo "Starting $name"
|
|
||||||
cd "$dir"
|
|
||||||
if [ -z "$user" ]; then
|
|
||||||
sudo $cmd >> "$stdout_log" 2>> "$stderr_log" &
|
|
||||||
else
|
|
||||||
sudo -u "$user" $cmd >> "$stdout_log" 2>> "$stderr_log" &
|
|
||||||
fi
|
|
||||||
echo $! > "$pid_file"
|
|
||||||
if ! is_running; then
|
|
||||||
echo "Unable to start, see $stdout_log and $stderr_log"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
stop)
|
|
||||||
if is_running; then
|
|
||||||
echo -n "Stopping $name.."
|
|
||||||
kill `get_pid`
|
|
||||||
for i in 1 2 3 4 5 6 7 8 9 10
|
|
||||||
# for i in `seq 10`
|
|
||||||
do
|
|
||||||
if ! is_running; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -n "."
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
|
|
||||||
if is_running; then
|
|
||||||
echo "Not stopped; may still be shutting down or shutdown may have failed"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Stopped"
|
|
||||||
if [ -f "$pid_file" ]; then
|
|
||||||
rm "$pid_file"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Not running"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
restart)
|
|
||||||
$0 stop
|
|
||||||
if is_running; then
|
|
||||||
echo "Unable to stop, will not attempt to start"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
$0 start
|
|
||||||
;;
|
|
||||||
status)
|
|
||||||
if is_running; then
|
|
||||||
echo "Running"
|
|
||||||
else
|
|
||||||
echo "Stopped"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Usage: $0 {start|stop|restart|status}"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
exit 0
|
|
@ -1,48 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
#
|
|
||||||
# 'gen-add-load.sh'
|
|
||||||
# -----------------
|
|
||||||
#
|
|
||||||
# Monitors incoming packets on ens3 interface with 'pkt-capture.sh'
|
|
||||||
#
|
|
||||||
# When incoming traffic goes above threshold of 1000 pkts/2seconds,
|
|
||||||
# starts a DD command to add more load than just handling the traffic.
|
|
||||||
# (i.e. mimicking doing some work on the traffic)
|
|
||||||
#
|
|
||||||
# When incoming traffic goes below threshold of 1000 pkts/2seconds,
|
|
||||||
# stops the DD command.
|
|
||||||
#
|
|
||||||
|
|
||||||
command="dd if=/dev/zero of=/dev/null"
|
|
||||||
pid=0
|
|
||||||
|
|
||||||
addLoadRunning=false
|
|
||||||
|
|
||||||
while true
|
|
||||||
do
|
|
||||||
nbPcks=`/usr/bin/pkt-capture.sh ens3 2`
|
|
||||||
echo $nbPcks
|
|
||||||
|
|
||||||
if test $nbPcks -gt 1000
|
|
||||||
then
|
|
||||||
if ( ! $addLoadRunning )
|
|
||||||
then
|
|
||||||
echo "Starting DD command."
|
|
||||||
$command &
|
|
||||||
pid=$!
|
|
||||||
fi
|
|
||||||
echo "TRAFFIC RUNNING"
|
|
||||||
addLoadRunning=true
|
|
||||||
else
|
|
||||||
if ( $addLoadRunning )
|
|
||||||
then
|
|
||||||
echo "Stopping DD command."
|
|
||||||
kill $pid
|
|
||||||
fi
|
|
||||||
echo "No Traffic"
|
|
||||||
addLoadRunning=false
|
|
||||||
fi
|
|
||||||
echo
|
|
||||||
done
|
|
||||||
|
|
@ -1,105 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
### BEGIN INIT INFO
|
|
||||||
# Provides: gen-add-load.sh
|
|
||||||
# Required-Start: $all
|
|
||||||
# Required-Stop: $remote_fs $syslog
|
|
||||||
# Default-Start: 2 3 4 5
|
|
||||||
# Default-Stop: 0 1 6
|
|
||||||
# Short-Description: Start daemon at boot time
|
|
||||||
# Description: Enable service provided by daemon.
|
|
||||||
### END INIT INFO
|
|
||||||
|
|
||||||
#########################################################
|
|
||||||
#
|
|
||||||
# Systemd file for 'gen-traffic.sh'
|
|
||||||
#
|
|
||||||
#########################################################
|
|
||||||
|
|
||||||
dir="/usr/bin"
|
|
||||||
cmd="./gen-traffic.sh"
|
|
||||||
user="root"
|
|
||||||
|
|
||||||
name=`basename $0`
|
|
||||||
pid_file="/var/run/$name.pid"
|
|
||||||
stdout_log="/var/log/$name.log"
|
|
||||||
stderr_log="/var/log/$name.err"
|
|
||||||
|
|
||||||
get_pid() {
|
|
||||||
cat "$pid_file"
|
|
||||||
}
|
|
||||||
|
|
||||||
is_running() {
|
|
||||||
[ -f "$pid_file" ] && ps -p `get_pid` > /dev/null 2>&1
|
|
||||||
}
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
start)
|
|
||||||
if is_running; then
|
|
||||||
echo "Already started"
|
|
||||||
else
|
|
||||||
echo "Starting $name"
|
|
||||||
cd "$dir"
|
|
||||||
if [ -z "$user" ]; then
|
|
||||||
sudo $cmd >> "$stdout_log" 2>> "$stderr_log" &
|
|
||||||
else
|
|
||||||
sudo -u "$user" $cmd >> "$stdout_log" 2>> "$stderr_log" &
|
|
||||||
fi
|
|
||||||
echo $! > "$pid_file"
|
|
||||||
if ! is_running; then
|
|
||||||
echo "Unable to start, see $stdout_log and $stderr_log"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
stop)
|
|
||||||
if is_running; then
|
|
||||||
echo -n "Stopping $name.."
|
|
||||||
kill `get_pid`
|
|
||||||
for i in 1 2 3 4 5 6 7 8 9 10
|
|
||||||
# for i in `seq 10`
|
|
||||||
do
|
|
||||||
if ! is_running; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -n "."
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
|
|
||||||
if is_running; then
|
|
||||||
echo "Not stopped; may still be shutting down or shutdown may have failed"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Stopped"
|
|
||||||
if [ -f "$pid_file" ]; then
|
|
||||||
rm "$pid_file"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Not running"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
restart)
|
|
||||||
$0 stop
|
|
||||||
if is_running; then
|
|
||||||
echo "Unable to stop, will not attempt to start"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
$0 start
|
|
||||||
;;
|
|
||||||
status)
|
|
||||||
if is_running; then
|
|
||||||
echo "Running"
|
|
||||||
else
|
|
||||||
echo "Stopped"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Usage: $0 {start|stop|restart|status}"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
exit 0
|
|
@ -1,21 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
#
|
|
||||||
# 'gen-traffic.sh'
|
|
||||||
# -----------------
|
|
||||||
#
|
|
||||||
# in a forever loop:
|
|
||||||
# call iperf client, sending to 10.10.10.50 (fixed ip of iperf server, network-appliance)
|
|
||||||
# for 600 seconds.
|
|
||||||
# ( iperf -c ... seems to sometimes get hung if using a longer time interval )
|
|
||||||
#
|
|
||||||
|
|
||||||
while true
|
|
||||||
do
|
|
||||||
date
|
|
||||||
echo "Starting traffic ..."
|
|
||||||
/usr/bin/iperf -c 10.10.10.50 -t 600
|
|
||||||
date
|
|
||||||
echo "Traffic stopped."
|
|
||||||
echo
|
|
||||||
done
|
|
@ -1,204 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
###############################################################################
|
|
||||||
#
|
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
#
|
|
||||||
###############################################################################
|
|
||||||
#
|
|
||||||
# Description:
|
|
||||||
# This displays total CPU occupancy based on hi-resolution timings.
|
|
||||||
#
|
|
||||||
###############################################################################
|
|
||||||
# Define minimal path
|
|
||||||
PATH=/bin:/usr/bin:/usr/local/bin
|
|
||||||
|
|
||||||
# NOTE: Comment out LOG_DEBUG and DEBUG_METHODS in production version.
|
|
||||||
# Uncomment LOG_DEBUG to enable debug print statements
|
|
||||||
##LOG_DEBUG=1
|
|
||||||
|
|
||||||
# Uncomment DEBUG_METHODS to enable test of methods
|
|
||||||
##DEBUG_METHODS=1
|
|
||||||
|
|
||||||
SCHEDSTAT_VERSION=$(cat /proc/schedstat 2>/dev/null | awk '/version/ {print $2;}')
|
|
||||||
NPROCESSORS_ONLN=$(getconf _NPROCESSORS_ONLN)
|
|
||||||
ARCH=$(arch)
|
|
||||||
|
|
||||||
# NOTE: we only support 64-bit math due to long integers of schedstat
|
|
||||||
SUPPORTED_SCHEDSTAT_VERSION=15
|
|
||||||
SUPPORTED_ARCH='x86_64'
|
|
||||||
|
|
||||||
# Customize sleep interval based on how responsive we want scaling to react.
|
|
||||||
# This is set small for demonstration purposes.
|
|
||||||
SLEEP_INTERVAL="1.0s"
|
|
||||||
|
|
||||||
# Log if debug is enabled via LOG_DEBUG
|
|
||||||
function log_debug
|
|
||||||
{
|
|
||||||
if [ ! -z "${LOG_DEBUG}" ]; then
|
|
||||||
logger -p debug -t "$0[${PPID}]" -s "$@" 2>&1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Log unconditionally to STDERR
|
|
||||||
function log_error
|
|
||||||
{
|
|
||||||
logger -p error -t "$0[${PPID}]" -s "$@"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Log unconditionally to STDOUT
|
|
||||||
function log
|
|
||||||
{
|
|
||||||
logger -p info -t "$0[${PPID}]" -s "$@" 2>&1
|
|
||||||
}
|
|
||||||
|
|
||||||
function read_proc_schedstat
|
|
||||||
{
|
|
||||||
local _outvar=$1
|
|
||||||
local _result # Use some naming convention to avoid OUTVARs to clash
|
|
||||||
local _cpu
|
|
||||||
local _cputime
|
|
||||||
_result=0
|
|
||||||
while read -r line
|
|
||||||
do
|
|
||||||
# version 15: cputime is 7th field
|
|
||||||
if [[ $line =~ ^cpu([[:digit:]]+)[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+([[:digit:]]+)[[:space:]]+ ]]
|
|
||||||
then
|
|
||||||
_cpu=${BASH_REMATCH[1]}
|
|
||||||
_cputime=${BASH_REMATCH[2]}
|
|
||||||
((_result += _cputime))
|
|
||||||
fi
|
|
||||||
done < "/proc/schedstat"
|
|
||||||
|
|
||||||
eval $_outvar=\$_result # Instead of just =$_result
|
|
||||||
}
|
|
||||||
|
|
||||||
function occupancy_loadavg()
|
|
||||||
{
|
|
||||||
# NOTE: This method is not recommended, as the feedback is slow and
|
|
||||||
# based on the loadavg 1-minute decay. The loadavg also includes
|
|
||||||
# IoWait which isn't desired. This does not require large integers.
|
|
||||||
|
|
||||||
# Calculate total CPU occupancy based on 1 minute loadavg.
|
|
||||||
ldavg_1m=$(cat /proc/loadavg 2>/dev/null | awk '{print $1}')
|
|
||||||
|
|
||||||
# Calculate total CPU occupancy (%)
|
|
||||||
occ=$(awk -v ldavg=${ldavg_1m} -v N=${NPROCESSORS_ONLN} \
|
|
||||||
'BEGIN {printf "%.1f\n", 100.0 * ldavg / N;}'
|
|
||||||
)
|
|
||||||
log_debug "CPU Occupancy(loadavg): ${occ}"
|
|
||||||
echo ${occ}
|
|
||||||
}
|
|
||||||
|
|
||||||
function occupancy_jiffie()
|
|
||||||
{
|
|
||||||
# NOTE: This method is not recommended, as the per-cpu stats are not
|
|
||||||
# properly updated by the kernel after scaling VM back up.
|
|
||||||
# This routine uses simple small integer math.
|
|
||||||
|
|
||||||
# Calculate total CPU occupancy based on jiffie stats.
|
|
||||||
|
|
||||||
read cpu user nice system idle iowait irq softirq steal guest < /proc/stat
|
|
||||||
j_occ_0=$((user+system+nice+irq+softirq+steal))
|
|
||||||
j_tot_0=$((user+system+nice+irq+softirq+steal+idle+iowait))
|
|
||||||
|
|
||||||
sleep ${SLEEP_INTERVAL}
|
|
||||||
|
|
||||||
read cpu user nice system idle iowait irq softirq steal guest < /proc/stat
|
|
||||||
j_occ_1=$((user+system+nice+irq+softirq+steal))
|
|
||||||
j_tot_1=$((user+system+nice+irq+softirq+steal+idle+iowait))
|
|
||||||
|
|
||||||
# Calculate total CPU occupancy (%)
|
|
||||||
occ=$(( 100 * (j_occ_1 - j_occ_0) / (j_tot_1 - j_tot_0) ))
|
|
||||||
|
|
||||||
log_debug "CPU Occupancy(jiffie): ${occ}"
|
|
||||||
echo ${occ}
|
|
||||||
}
|
|
||||||
|
|
||||||
function occupancy_schedstat()
|
|
||||||
{
|
|
||||||
# NOTE: This method is recommended as timings are high resolution.
|
|
||||||
# However the timings require large integers, so we are assuming
|
|
||||||
# we require 64-bit guest.
|
|
||||||
|
|
||||||
# Calculate total CPU occupancy based on uptime stats
|
|
||||||
local cputime_0=''
|
|
||||||
local cputime_1=''
|
|
||||||
|
|
||||||
read t_elapsed_0 t_idle_0 < /proc/uptime
|
|
||||||
read_proc_schedstat cputime_0
|
|
||||||
|
|
||||||
sleep ${SLEEP_INTERVAL}
|
|
||||||
|
|
||||||
read t_elapsed_1 t_idle_1 < /proc/uptime
|
|
||||||
read_proc_schedstat cputime_1
|
|
||||||
|
|
||||||
# Calculate total CPU occupancy (%)
|
|
||||||
occ=$(awk -v te0=${t_elapsed_0} -v te1=${t_elapsed_1} \
|
|
||||||
-v tc0=${cputime_0} -v tc1=${cputime_1} \
|
|
||||||
-v N=${NPROCESSORS_ONLN} \
|
|
||||||
'BEGIN {dt_ms = N*(te1 - te0)*1E3; cputime_ms = (tc1 - tc0)/1.0E6;
|
|
||||||
occ = 100.0 * cputime_ms / dt_ms;
|
|
||||||
printf "%.1f\n", occ;}'
|
|
||||||
)
|
|
||||||
log_debug "CPU Occupancy(schedstat): ${occ}"
|
|
||||||
echo ${occ}
|
|
||||||
}
|
|
||||||
|
|
||||||
function occupancy_uptime()
|
|
||||||
{
|
|
||||||
# NOTE: This method is is very similar to the loadavg method in that
|
|
||||||
# IoWait is treated as load, but the occupancy is instantaneous.
|
|
||||||
# This does not require large integers.
|
|
||||||
|
|
||||||
# Calculate total CPU occupancy based on uptime/idle stats
|
|
||||||
read t_elapsed_0 t_idle_0 < /proc/uptime
|
|
||||||
|
|
||||||
sleep ${SLEEP_INTERVAL}
|
|
||||||
|
|
||||||
read t_elapsed_1 t_idle_1 < /proc/uptime
|
|
||||||
|
|
||||||
# Calculate total CPU occupancy (%)
|
|
||||||
occ=$(awk -v te0=${t_elapsed_0} -v ti0=${t_idle_0} \
|
|
||||||
-v te1=${t_elapsed_1} -v ti1=${t_idle_1} \
|
|
||||||
-v N=${NPROCESSORS_ONLN} \
|
|
||||||
'BEGIN {dt = N*(te1 - te0); di = ti1 - ti0; cputime = dt - di;
|
|
||||||
occ = 100.0 * cputime / dt;
|
|
||||||
printf "%.1f\n", occ;}'
|
|
||||||
)
|
|
||||||
log_debug "CPU Occupancy(uptime): ${occ}"
|
|
||||||
echo ${occ}
|
|
||||||
}
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
#
|
|
||||||
# MAIN Program
|
|
||||||
#
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
if [ ! -z "${DEBUG_METHODS}" ]
|
|
||||||
then
|
|
||||||
log_debug "Testing occupancy_loadavg"
|
|
||||||
occupancy_loadavg
|
|
||||||
|
|
||||||
log_debug "Testing occupancy_jiffie"
|
|
||||||
occupancy_jiffie
|
|
||||||
|
|
||||||
log_debug "Testing occupancy_uptime"
|
|
||||||
occupancy_uptime
|
|
||||||
|
|
||||||
log_debug "Testing occupancy_schedstat"
|
|
||||||
occupancy_schedstat
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_debug "Discovered arch=${ARCH}, schedstat version=${SCHEDSTAT_VERSION}."
|
|
||||||
if [[ ${ARCH} == ${SUPPORTED_ARCH} ]] && [[ ${SCHEDSTAT_VERSION} -eq ${SUPPORTED_SCHEDSTAT_VERSION} ]]
|
|
||||||
then
|
|
||||||
occupancy_schedstat
|
|
||||||
else
|
|
||||||
occupancy_uptime
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit 0
|
|
@ -1,105 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
### BEGIN INIT INFO
|
|
||||||
# Provides: iperf
|
|
||||||
# Required-Start: $all
|
|
||||||
# Required-Stop: $remote_fs $syslog
|
|
||||||
# Default-Start: 2 3 4 5
|
|
||||||
# Default-Stop: 0 1 6
|
|
||||||
# Short-Description: Start daemon at boot time
|
|
||||||
# Description: Enable service provided by daemon.
|
|
||||||
### END INIT INFO
|
|
||||||
|
|
||||||
#########################################################
|
|
||||||
#
|
|
||||||
# Systemd file for 'iperf -s' on network-appliance VM
|
|
||||||
#
|
|
||||||
#########################################################
|
|
||||||
|
|
||||||
dir="/usr/bin"
|
|
||||||
cmd="iperf -s"
|
|
||||||
user="root"
|
|
||||||
|
|
||||||
name=`basename $0`
|
|
||||||
pid_file="/var/run/$name.pid"
|
|
||||||
stdout_log="/var/log/$name.log"
|
|
||||||
stderr_log="/var/log/$name.err"
|
|
||||||
|
|
||||||
get_pid() {
|
|
||||||
cat "$pid_file"
|
|
||||||
}
|
|
||||||
|
|
||||||
is_running() {
|
|
||||||
[ -f "$pid_file" ] && ps -p `get_pid` > /dev/null 2>&1
|
|
||||||
}
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
start)
|
|
||||||
if is_running; then
|
|
||||||
echo "Already started"
|
|
||||||
else
|
|
||||||
echo "Starting $name"
|
|
||||||
cd "$dir"
|
|
||||||
if [ -z "$user" ]; then
|
|
||||||
sudo $cmd >> "$stdout_log" 2>> "$stderr_log" &
|
|
||||||
else
|
|
||||||
sudo -u "$user" $cmd >> "$stdout_log" 2>> "$stderr_log" &
|
|
||||||
fi
|
|
||||||
echo $! > "$pid_file"
|
|
||||||
if ! is_running; then
|
|
||||||
echo "Unable to start, see $stdout_log and $stderr_log"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
stop)
|
|
||||||
if is_running; then
|
|
||||||
echo -n "Stopping $name.."
|
|
||||||
kill `get_pid`
|
|
||||||
for i in 1 2 3 4 5 6 7 8 9 10
|
|
||||||
# for i in `seq 10`
|
|
||||||
do
|
|
||||||
if ! is_running; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -n "."
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
|
|
||||||
if is_running; then
|
|
||||||
echo "Not stopped; may still be shutting down or shutdown may have failed"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Stopped"
|
|
||||||
if [ -f "$pid_file" ]; then
|
|
||||||
rm "$pid_file"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Not running"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
restart)
|
|
||||||
$0 stop
|
|
||||||
if is_running; then
|
|
||||||
echo "Unable to stop, will not attempt to start"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
$0 start
|
|
||||||
;;
|
|
||||||
status)
|
|
||||||
if is_running; then
|
|
||||||
echo "Running"
|
|
||||||
else
|
|
||||||
echo "Stopped"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Usage: $0 {start|stop|restart|status}"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
exit 0
|
|
@ -1,4 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# Generate maximum CPU load for a core
|
|
||||||
# Launch this X times for X cores to get 100% utilization
|
|
||||||
dd if=/dev/urandom of=/dev/null &
|
|
@ -1,215 +0,0 @@
|
|||||||
#!/bin/bash -v
|
|
||||||
|
|
||||||
#########################################################
|
|
||||||
#
|
|
||||||
# Install script for network-appliance VM
|
|
||||||
# called thru cloud-init
|
|
||||||
#
|
|
||||||
#########################################################
|
|
||||||
|
|
||||||
echo "Starting setup of network appliance ..." >> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "Installing iperf ..." >> /var/log/heat_setup.txt
|
|
||||||
apt-get -y install iperf >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "Installing python-pip ..." >> /var/log/heat_setup.txt
|
|
||||||
apt-get -y install gcc python-dev python-pip >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "Installing psutil ..." >> /var/log/heat_setup.txt
|
|
||||||
pip install psutil >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
|
|
||||||
# Create sym links to standard location for cfn tools in an aws environment
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "Setting up symlinks" >> /var/log/heat_setup.txt
|
|
||||||
cfn-create-aws-symlinks --source /usr/bin >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
# invoke cfn-init which will extract the cloudformation metadata from the userdata
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "Setting up cfn-init " >> /var/log/heat_setup.txt
|
|
||||||
/usr/bin/cfn-init >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "Installing Guest SDK ..." >> /var/log/heat_setup.txt
|
|
||||||
git clone https://github.com/Wind-River/titanium-cloud.git >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
cat > /lib/systemd/system/guest-agent.service << EOF
|
|
||||||
[Unit]
|
|
||||||
Description=Guest Agent
|
|
||||||
After=cloud-init.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
ExecStart=/usr/sbin/guest_agent
|
|
||||||
Type=simple
|
|
||||||
Restart=always
|
|
||||||
RestartSec=0
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=guest-scale-agent.service
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
|
|
||||||
EOF
|
|
||||||
cd titanium-cloud/guest-API-SDK/17.06/
|
|
||||||
apt-get -y install build-essential libjson0 libjson0-dev >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
cd wrs-server-group-2.0.4/
|
|
||||||
mkdir obj bin lib
|
|
||||||
make >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
cp bin/* /usr/sbin
|
|
||||||
cp lib/libguesthostmsg.so.2.0.4 lib/libservergroup.so.2.0.4 /usr/lib/
|
|
||||||
ldconfig
|
|
||||||
cd ../wrs-guest-scale-2.0.4/
|
|
||||||
mkdir obj bin lib
|
|
||||||
make >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
cp bin/guest_scale_agent /usr/sbin
|
|
||||||
cd scripts/
|
|
||||||
cp app_scale_helper offline_cpus /usr/sbin
|
|
||||||
chmod 755 init_offline_cpus offline_cpus
|
|
||||||
cp init_offline_cpus /etc/init.d
|
|
||||||
cp guest-scale-agent.service offline-cpus.service /lib/systemd/system/
|
|
||||||
systemctl enable guest-agent.service
|
|
||||||
systemctl enable guest-scale-agent.service
|
|
||||||
systemctl enable offline-cpus.service
|
|
||||||
systemctl start guest-agent.service
|
|
||||||
systemctl start guest-scale-agent.service
|
|
||||||
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "Starting collectd and grafana install ..." >> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
apt-get -y update >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
apt-get -y dist-upgrade >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
apt-get -y install openssh-server >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "Setup gpg keys ..." >> /var/log/heat_setup.txt
|
|
||||||
gpg --recv-keys 3994D24FB8543576 >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
gpg --recv-keys 3994D24FB8543576 >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
gpg --recv-keys 3994D24FB8543576 >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
sh -c 'gpg --export -a 3994D24FB8543576 | apt-key add -' >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
sh -c 'gpg --export -a 3994D24FB8543576 | apt-key add -' >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
sh -c 'gpg --export -a 3994D24FB8543576 | apt-key add -' >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "Get influxdb key and packagecloud key ..." >> /var/log/heat_setup.txt
|
|
||||||
# don't use latest influxdb yet, it has bugs
|
|
||||||
# sh -c 'curl -sL https://repos.influxdata.com/influxdb.key | apt-key add -' >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
sh -c 'curl https://packagecloud.io/gpg.key | apt-key add -' >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "Setup collectd, influxdb and grafana .list files ..." >> /var/log/heat_setup.txt
|
|
||||||
echo "deb http://pkg.ci.collectd.org/deb xenial collectd-5.8" > /etc/apt/sources.list.d/collectd.list
|
|
||||||
# don't use latest influxdb yet, it has bugs
|
|
||||||
# echo "deb https://repos.influxdata.com/debian xenial stable" > /etc/apt/sources.list.d/influxdb.list
|
|
||||||
echo "deb https://packagecloud.io/grafana/stable/debian/ jessie main" > /etc/apt/sources.list.d/grafana.list
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "apt-get update ..." >> /var/log/heat_setup.txt
|
|
||||||
apt-get -y update >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "apt-cache ..." >> /var/log/heat_setup.txt
|
|
||||||
apt-cache madison collectd >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
apt-cache madison influxdb >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
apt-cache madison influxdb-client >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
apt-cache madison grafana >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "apt-get install collectd ..." >> /var/log/heat_setup.txt
|
|
||||||
apt-get -y install collectd >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "apt-get install influxdb ..." >> /var/log/heat_setup.txt
|
|
||||||
apt-get -y install influxdb >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "apt-get install influxdb-client ..." >> /var/log/heat_setup.txt
|
|
||||||
apt-get -y install influxdb-client >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "apt-get install grafana ..." >> /var/log/heat_setup.txt
|
|
||||||
apt-get -y install grafana >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "apt-get cleanup ..." >> /var/log/heat_setup.txt
|
|
||||||
apt-get -y update >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
apt-get -y dist-upgrade >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
apt-get -y autoclean >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
apt-get -y autoremove >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
mv /etc/collectd/collectd.conf /etc/collectd/collectd.conf.ORIG
|
|
||||||
cat >> /etc/collectd/collectd.conf << EOF
|
|
||||||
|
|
||||||
LoadPlugin network
|
|
||||||
<Plugin "network">
|
|
||||||
Server "127.0.0.1" "25826"
|
|
||||||
</Plugin>
|
|
||||||
|
|
||||||
<Plugin cpu>
|
|
||||||
ReportByCpu true
|
|
||||||
ReportByState false
|
|
||||||
ValuesPercentage false
|
|
||||||
ReportNumCpu true
|
|
||||||
</Plugin>
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
|
|
||||||
cp /etc/influxdb/influxdb.conf /etc/influxdb/influxdb.conf.ORIG
|
|
||||||
sed -i -e '/^\[collectd\].*/,/enabled = false/d' /etc/influxdb/influxdb.conf
|
|
||||||
cat >> /etc/influxdb/influxdb.conf << EOF
|
|
||||||
|
|
||||||
[collectd]
|
|
||||||
enabled = true
|
|
||||||
bind-address = ":25826"
|
|
||||||
database = "collectd"
|
|
||||||
typesdb = "/usr/share/collectd/types.db"
|
|
||||||
EOF
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "start grafana-server ..." >> /var/log/heat_setup.txt
|
|
||||||
systemctl start grafana-server
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "start influxdb ..." >> /var/log/heat_setup.txt
|
|
||||||
systemctl start influxdb
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "start collectd ..." >> /var/log/heat_setup.txt
|
|
||||||
systemctl start collectd
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "enable grafana-server ..." >> /var/log/heat_setup.txt
|
|
||||||
systemctl enable grafana-server.service
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "enable influxdb.service ..." >> /var/log/heat_setup.txt
|
|
||||||
systemctl enable influxdb.service
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "enable collectd.service ..." >> /var/log/heat_setup.txt
|
|
||||||
systemctl enable collectd.service
|
|
||||||
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "Starting network appliance server service ..." >> /var/log/heat_setup.txt
|
|
||||||
update-rc.d iperf-server-service defaults 97 03 >> /var/log/heat_setup.txt
|
|
||||||
service iperf-server-service start >> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "Starting gen-add-load service ..." >> /var/log/heat_setup.txt
|
|
||||||
update-rc.d gen-add-load-service defaults 97 03 >> /var/log/heat_setup.txt
|
|
||||||
service gen-add-load-service start >> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
sleep 5
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "restart collectd ..." >> /var/log/heat_setup.txt
|
|
||||||
systemctl restart collectd
|
|
||||||
|
|
||||||
sleep 5
|
|
||||||
echo >> /var/log/heat_setup.txt
|
|
||||||
echo "restart influxdb ..." >> /var/log/heat_setup.txt
|
|
||||||
systemctl restart influxdb
|
|
||||||
|
|
||||||
echo "Finished user data setup" >> /var/log/heat_setup.txt
|
|
@ -1,15 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
#########################################################
|
|
||||||
#
|
|
||||||
# pkt-capture.sh <interface> <interval>
|
|
||||||
#
|
|
||||||
# Measures the received packets on specified interface
|
|
||||||
# for specified interval (in seconds).
|
|
||||||
#
|
|
||||||
#########################################################
|
|
||||||
|
|
||||||
pcksFile="/sys/class/net/$1/statistics/rx_packets"
|
|
||||||
nbPcks=`cat $pcksFile`
|
|
||||||
sleep $2
|
|
||||||
echo $(expr `cat $pcksFile` - $nbPcks)
|
|
@ -1,316 +0,0 @@
|
|||||||
################################################################################
|
|
||||||
## Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
##
|
|
||||||
## SPDX-License-Identifier: Apache-2.0
|
|
||||||
##
|
|
||||||
#################################################################################
|
|
||||||
|
|
||||||
heat_template_version: 2013-05-23
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
parameters:
|
|
||||||
|
|
||||||
FLAVOR:
|
|
||||||
description: Nova flavor to use for traffic-generator VM. (nova flavor-list)
|
|
||||||
type: string
|
|
||||||
default: small.2c.2G.20G
|
|
||||||
constraints:
|
|
||||||
- custom_constraint: nova.flavor
|
|
||||||
|
|
||||||
SCALING_FLAVOR:
|
|
||||||
description: Nova flavor to use for network-appliance VM. (nova flavor-list)
|
|
||||||
type: string
|
|
||||||
default: scalingFlavor
|
|
||||||
constraints:
|
|
||||||
- custom_constraint: nova.flavor
|
|
||||||
|
|
||||||
IMAGE:
|
|
||||||
description: Name of the glance image to create a cinder volume for (glance image-list)
|
|
||||||
NOTE - this MUST be an ubuntu 16.04 image with cloud-init and cfn-init
|
|
||||||
type: string
|
|
||||||
default: ubuntu-cfntools.img
|
|
||||||
constraints:
|
|
||||||
- custom_constraint: glance.image
|
|
||||||
|
|
||||||
EXT_NETWORK:
|
|
||||||
description: Name of the external network to use (neutron net-list)
|
|
||||||
type: string
|
|
||||||
default: external-net0
|
|
||||||
constraints:
|
|
||||||
- custom_constraint: neutron.network
|
|
||||||
|
|
||||||
INT_NETWORK:
|
|
||||||
description: Name of the internal network to use (neutron net-list)
|
|
||||||
type: string
|
|
||||||
default: admin-internal-net0
|
|
||||||
constraints:
|
|
||||||
- custom_constraint: neutron.network
|
|
||||||
|
|
||||||
METER_NAME:
|
|
||||||
description: Meter that VM will cfn-push, and
|
|
||||||
Ceilometer meter to query when determining autoscaling
|
|
||||||
type: string
|
|
||||||
default: net_appl_cpu_load
|
|
||||||
|
|
||||||
METER_UNIT:
|
|
||||||
description: Unit for the meter
|
|
||||||
type: string
|
|
||||||
default: '%'
|
|
||||||
|
|
||||||
HIGH_VALUE:
|
|
||||||
description: Metric value that will trigger a scale up if exceeded
|
|
||||||
type: string
|
|
||||||
default: '50'
|
|
||||||
|
|
||||||
LOW_VALUE:
|
|
||||||
description: Metric value that will trigger a scale down if below
|
|
||||||
type: string
|
|
||||||
default: '20'
|
|
||||||
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
resources:
|
|
||||||
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
# trafficGenerator VM
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
TrafficGeneratorVolume:
|
|
||||||
type: OS::Cinder::Volume
|
|
||||||
properties:
|
|
||||||
name: traffic-generator-DISK
|
|
||||||
image: { get_param: IMAGE }
|
|
||||||
size: 20
|
|
||||||
|
|
||||||
TrafficGeneratorFloatingIP:
|
|
||||||
type: OS::Neutron::FloatingIP
|
|
||||||
properties:
|
|
||||||
floating_network: { get_param: EXT_NETWORK }
|
|
||||||
|
|
||||||
TrafficGeneratorVM:
|
|
||||||
type: OS::Nova::Server
|
|
||||||
metadata:
|
|
||||||
wrs-groupindex-mode: true
|
|
||||||
AWS::CloudFormation::Init:
|
|
||||||
config:
|
|
||||||
files:
|
|
||||||
/usr/bin/gen-traffic.sh:
|
|
||||||
content:
|
|
||||||
get_file: ./gen-traffic.sh
|
|
||||||
mode: '000700'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/etc/init.d/gen-traffic-service:
|
|
||||||
content:
|
|
||||||
get_file: ./gen-traffic-service
|
|
||||||
mode: '000700'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
properties:
|
|
||||||
name: traffic-generator
|
|
||||||
flavor: { get_param: FLAVOR }
|
|
||||||
block_device_mapping:
|
|
||||||
- device_name: vda
|
|
||||||
delete_on_termination: true
|
|
||||||
volume_id: { get_resource: TrafficGeneratorVolume }
|
|
||||||
networks:
|
|
||||||
- { network: { get_param: INT_NETWORK } , fixed_ip: 10.10.10.40, vif-model: virtio}
|
|
||||||
user_data_format: HEAT_CFNTOOLS
|
|
||||||
user_data:
|
|
||||||
get_file: ./traffic-generator-install.sh
|
|
||||||
|
|
||||||
TrafficGeneratorFloatingIPAssoc:
|
|
||||||
type: OS::Neutron::FloatingIPAssociation
|
|
||||||
properties:
|
|
||||||
floatingip_id: { get_resource: TrafficGeneratorFloatingIP }
|
|
||||||
port_id: { get_attr: [TrafficGeneratorVM, addresses, { get_param: INT_NETWORK }, 0, port] }
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
# network-appliance VM
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
CfnUser:
|
|
||||||
type: AWS::IAM::User
|
|
||||||
|
|
||||||
WebServerKeys:
|
|
||||||
type: AWS::IAM::AccessKey
|
|
||||||
properties:
|
|
||||||
UserName: { get_resource: CfnUser }
|
|
||||||
|
|
||||||
NetworkApplianceVolume:
|
|
||||||
type: OS::Cinder::Volume
|
|
||||||
properties:
|
|
||||||
name: network-appliance-DISK
|
|
||||||
image: { get_param: IMAGE }
|
|
||||||
size: 20
|
|
||||||
|
|
||||||
NetworkApplianceFloatingIP:
|
|
||||||
type: OS::Neutron::FloatingIP
|
|
||||||
properties:
|
|
||||||
floating_network: { get_param: EXT_NETWORK }
|
|
||||||
|
|
||||||
NetworkApplianceVM:
|
|
||||||
type: OS::Nova::Server
|
|
||||||
# Special Note: CFN related metadata is located at the resource level (not as a property)
|
|
||||||
metadata:
|
|
||||||
wrs-groupindex-mode: true
|
|
||||||
AWS::CloudFormation::Init:
|
|
||||||
config:
|
|
||||||
files:
|
|
||||||
/etc/cfn/cfn-credentials:
|
|
||||||
content:
|
|
||||||
str_replace:
|
|
||||||
template: |
|
|
||||||
AWSAccessKeyId=_keyid_
|
|
||||||
AWSSecretKey=_secret_
|
|
||||||
params:
|
|
||||||
_keyid_: { get_resource: WebServerKeys }
|
|
||||||
_secret_: { get_attr: [WebServerKeys, SecretAccessKey] }
|
|
||||||
mode: '000400'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/etc/cfn/make_load:
|
|
||||||
content:
|
|
||||||
get_file: ./make_load
|
|
||||||
mode: '000700'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/etc/cfn/get_cpu_load:
|
|
||||||
content:
|
|
||||||
get_file: ./get_cpu_load
|
|
||||||
mode: '000700'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/etc/cfn/send_guest_metrics:
|
|
||||||
content:
|
|
||||||
str_replace:
|
|
||||||
template: |
|
|
||||||
#!/bin/sh
|
|
||||||
METRIC=`/etc/cfn/get_cpu_load`
|
|
||||||
/opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_
|
|
||||||
sleep 9
|
|
||||||
METRIC=`/etc/cfn/get_cpu_load`
|
|
||||||
/opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_
|
|
||||||
sleep 9
|
|
||||||
METRIC=`/etc/cfn/get_cpu_load`
|
|
||||||
/opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_
|
|
||||||
sleep 9
|
|
||||||
METRIC=`/etc/cfn/get_cpu_load`
|
|
||||||
/opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_
|
|
||||||
sleep 9
|
|
||||||
METRIC=`/etc/cfn/get_cpu_load`
|
|
||||||
/opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_
|
|
||||||
sleep 9
|
|
||||||
METRIC=`/etc/cfn/get_cpu_load`
|
|
||||||
/opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_
|
|
||||||
params:
|
|
||||||
_metername_: { get_param: METER_NAME }
|
|
||||||
_unit_: { get_param: METER_UNIT }
|
|
||||||
mode: '000700'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/etc/cron.d/cfn_cron:
|
|
||||||
content:
|
|
||||||
get_file: ./cfn_cron
|
|
||||||
mode: '000600'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/usr/bin/pkt-capture.sh:
|
|
||||||
content:
|
|
||||||
get_file: ./pkt-capture.sh
|
|
||||||
mode: '000700'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/usr/bin/gen-add-load.sh:
|
|
||||||
content:
|
|
||||||
get_file: ./gen-add-load.sh
|
|
||||||
mode: '000700'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/etc/init.d/gen-add-load-service:
|
|
||||||
content:
|
|
||||||
get_file: ./gen-add-load-service
|
|
||||||
mode: '000700'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/etc/init.d/iperf-server-service:
|
|
||||||
content:
|
|
||||||
get_file: iperf-server-service
|
|
||||||
mode: '000700'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
properties:
|
|
||||||
name: network-appliance
|
|
||||||
flavor: { get_param: SCALING_FLAVOR }
|
|
||||||
metadata: {"metering.stack_id": {get_param: "OS::stack_id"} }
|
|
||||||
block_device_mapping:
|
|
||||||
- device_name: vda
|
|
||||||
delete_on_termination: true
|
|
||||||
volume_id: { get_resource: NetworkApplianceVolume }
|
|
||||||
networks:
|
|
||||||
- { network: { get_param: INT_NETWORK } , fixed_ip: 10.10.10.50, vif-model: virtio}
|
|
||||||
user_data_format: HEAT_CFNTOOLS
|
|
||||||
user_data:
|
|
||||||
get_file: ./network-appliance-install.sh
|
|
||||||
|
|
||||||
NetworkApplianceFloatingIPAssoc:
|
|
||||||
type: OS::Neutron::FloatingIPAssociation
|
|
||||||
properties:
|
|
||||||
floatingip_id: { get_resource: NetworkApplianceFloatingIP }
|
|
||||||
port_id: { get_attr: [NetworkApplianceVM, addresses, { get_param: INT_NETWORK }, 0, port] }
|
|
||||||
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
# SETUP SCALING POLICIES
|
|
||||||
|
|
||||||
CPUAlarmHigh:
|
|
||||||
type: OS::Ceilometer::Alarm
|
|
||||||
properties:
|
|
||||||
description: Scale up if the 1 minute avg for the meter is above the threshold
|
|
||||||
meter_name: { get_param: METER_NAME }
|
|
||||||
statistic: avg
|
|
||||||
period: '60'
|
|
||||||
evaluation_periods: '1'
|
|
||||||
threshold: { get_param: HIGH_VALUE }
|
|
||||||
repeat_actions: True
|
|
||||||
alarm_actions:
|
|
||||||
- {get_attr: [NetworkApplianceVmScaleUpPolicy, AlarmUrl]}
|
|
||||||
comparison_operator: gt
|
|
||||||
matching_metadata: {'stack_id': {get_param: "OS::stack_id" }}
|
|
||||||
|
|
||||||
CPUAlarmLow:
|
|
||||||
type: OS::Ceilometer::Alarm
|
|
||||||
properties:
|
|
||||||
description: Scale down if the 1 minute avg for the meter is below the threshold
|
|
||||||
meter_name: { get_param: METER_NAME }
|
|
||||||
statistic: avg
|
|
||||||
period: '60'
|
|
||||||
evaluation_periods: '1'
|
|
||||||
threshold: { get_param: LOW_VALUE }
|
|
||||||
repeat_actions: True
|
|
||||||
alarm_actions:
|
|
||||||
- {get_attr: [NetworkApplianceVmScaleDownPolicy, AlarmUrl]}
|
|
||||||
comparison_operator: lt
|
|
||||||
matching_metadata: {'stack_id': {get_param: "OS::stack_id" }}
|
|
||||||
|
|
||||||
NetworkApplianceVmScaleUpPolicy:
|
|
||||||
type: OS::WR::ScalingPolicy
|
|
||||||
properties:
|
|
||||||
ServerName: {get_resource: NetworkApplianceVM}
|
|
||||||
ScalingResource: 'cpu'
|
|
||||||
ScalingDirection: 'up'
|
|
||||||
Cooldown: '60'
|
|
||||||
|
|
||||||
NetworkApplianceVmScaleDownPolicy:
|
|
||||||
type: OS::WR::ScalingPolicy
|
|
||||||
properties:
|
|
||||||
ServerName: {get_resource: NetworkApplianceVM}
|
|
||||||
ScalingResource: 'cpu'
|
|
||||||
ScalingDirection: 'down'
|
|
||||||
Cooldown: '60'
|
|
||||||
|
|
@ -1,27 +0,0 @@
|
|||||||
#!/bin/bash -v
|
|
||||||
|
|
||||||
#########################################################
|
|
||||||
#
|
|
||||||
# Install script for traffic-generator VM
|
|
||||||
# called thru cloud-init
|
|
||||||
#
|
|
||||||
#########################################################
|
|
||||||
|
|
||||||
echo "Starting setup of traffic generator ..." >> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo "Installing iperf ..." >> /var/log/heat_setup.txt
|
|
||||||
apt-get -y install iperf >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
# Create sym links to standard location for cfn tools in an aws environment
|
|
||||||
echo "Setting up symlinks" >> /var/log/heat_setup.txt
|
|
||||||
cfn-create-aws-symlinks --source /usr/bin
|
|
||||||
|
|
||||||
# invoke cfn-init which will extract the cloudformation metadata from the userdata
|
|
||||||
echo "Setting up cfn-init " >> /var/log/heat_setup.txt
|
|
||||||
/usr/bin/cfn-init >> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo "Starting gen-traffic service ..." >> /var/log/heat_setup.txt
|
|
||||||
update-rc.d gen-traffic-service defaults 97 03 >> /var/log/heat_setup.txt
|
|
||||||
service gen-traffic-service start >> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
echo "Finished setup of traffic generator." >> /var/log/heat_setup.txt
|
|
@ -1,305 +0,0 @@
|
|||||||
# Copyright (c) 2013 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
################################################################################
|
|
||||||
#
|
|
||||||
# Objective:
|
|
||||||
# Demonstrates creating:
|
|
||||||
# An autoscaling stack of VMs that use cfn-push-stats to emit samples from
|
|
||||||
# within the guest.
|
|
||||||
# The name of the stack will prefix the custom meter
|
|
||||||
# This template requires OAM network setup properly to allow communication
|
|
||||||
# between the VMs to the controller
|
|
||||||
#
|
|
||||||
# Pre-Reqs:
|
|
||||||
# The VM must be able to communicate with the controller
|
|
||||||
# Normal lab setup. Capable of launching 3 VMs
|
|
||||||
# A keypair called: controller-0. (nova keypair-list)
|
|
||||||
# A flavor called: small (nova flavor-list)
|
|
||||||
# A glance image called: tis-centos-guest (glance image-list)
|
|
||||||
# A network called: internal-net0 (neutron net-list)
|
|
||||||
# A nested template file CFNPushStats.yaml in the same folder as this yaml.
|
|
||||||
#
|
|
||||||
# Optional Template Parameters:
|
|
||||||
# KEYPAIR: A keypair setup for the current user (nova keypair-list)
|
|
||||||
# KEYPAIR_ADMIN_USER: Name of user to inject ssh keys from keypair on the VM
|
|
||||||
# FLAVOR: A nova flavor name or UUID for the VMs (nova flavor-list)
|
|
||||||
# IMAGE: A glance image name or UUID for launching the VMs
|
|
||||||
# (glance image-list)
|
|
||||||
# PUBLIC_NETWORK: Name or UUID of the public network to use for the VMs
|
|
||||||
# (neutron net-list)
|
|
||||||
# INTERNAL_NETWORK: Name or UUID of the internal network to use for the VMs
|
|
||||||
# (neutron net-list)
|
|
||||||
# METER_NAME: Name of the new ceilometer meter to use to trigger autoscaling
|
|
||||||
# METER_UNIT: Unit of the new ceilometer meter to use to trigger autoscaling
|
|
||||||
# HIGH_VALUE: Value for the meter to trigger a scale up.
|
|
||||||
# LOW_VALUE: Value for the meter to trigger a scale down.
|
|
||||||
#
|
|
||||||
# Tenant Considerations:
|
|
||||||
# This template must be run as Admin
|
|
||||||
#
|
|
||||||
# Sample CLI syntax:
|
|
||||||
# heat stack-create -f CFNPushStatsAutoScale.yaml STACK
|
|
||||||
#
|
|
||||||
# Expected Outcome:
|
|
||||||
# VMs running the guest image (nova list)
|
|
||||||
# New ceilometer alarm triggers (ceilometer alarm-list)
|
|
||||||
# New ceilometer meters (ceilometer meter-list) created from within the VM
|
|
||||||
#
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
heat_template_version: 2015-04-30
|
|
||||||
|
|
||||||
description: >
|
|
||||||
Demonstrates autoscaling VMs that use cfn-push-stats
|
|
||||||
to emit ceilometer meters from within the VM
|
|
||||||
|
|
||||||
parameters:
|
|
||||||
|
|
||||||
KEYPAIR:
|
|
||||||
description: keypair to use. (nova keypair-list)
|
|
||||||
type: string
|
|
||||||
default: controller-0
|
|
||||||
constraints:
|
|
||||||
- custom_constraint: nova.keypair
|
|
||||||
|
|
||||||
KEYPAIR_ADMIN_USER:
|
|
||||||
description: Name of user account to inject ssh keys from keypair
|
|
||||||
type: string
|
|
||||||
default: 'ec2-user'
|
|
||||||
|
|
||||||
FLAVOR:
|
|
||||||
description: Nova flavor to use. (nova flavor-list)
|
|
||||||
type: string
|
|
||||||
default: small
|
|
||||||
constraints:
|
|
||||||
- custom_constraint: nova.flavor
|
|
||||||
|
|
||||||
IMAGE:
|
|
||||||
description: Glance image to create a cinder volume (glance image-list)
|
|
||||||
type: string
|
|
||||||
default: tis-centos-guest
|
|
||||||
constraints:
|
|
||||||
- custom_constraint: glance.image
|
|
||||||
|
|
||||||
PUBLIC_NETWORK:
|
|
||||||
description: Name of public network to use for VMs (neutron net-list)
|
|
||||||
type: string
|
|
||||||
default: public-net0
|
|
||||||
constraints:
|
|
||||||
- custom_constraint: neutron.network
|
|
||||||
|
|
||||||
INTERNAL_NETWORK:
|
|
||||||
description: Name of internal network to use for VMs (neutron net-list)
|
|
||||||
type: string
|
|
||||||
default: internal-net0
|
|
||||||
constraints:
|
|
||||||
- custom_constraint: neutron.network
|
|
||||||
|
|
||||||
METER_NAME:
|
|
||||||
description: Ceilometer meter to query when determining autoscaling
|
|
||||||
type: string
|
|
||||||
default: vm_stat
|
|
||||||
|
|
||||||
METER_UNIT:
|
|
||||||
description: Name for custom meter to be created using cfn-push-stats
|
|
||||||
type: string
|
|
||||||
default: '%'
|
|
||||||
|
|
||||||
HIGH_VALUE:
|
|
||||||
description: Metric value that will trigger a scale up if exceeded
|
|
||||||
type: string
|
|
||||||
default: '80'
|
|
||||||
|
|
||||||
LOW_VALUE:
|
|
||||||
description: Metric value that will trigger a scale down if below
|
|
||||||
type: string
|
|
||||||
default: '30'
|
|
||||||
|
|
||||||
resources:
|
|
||||||
|
|
||||||
CfnUser:
|
|
||||||
type: AWS::IAM::User
|
|
||||||
|
|
||||||
WebKeys:
|
|
||||||
type: AWS::IAM::AccessKey
|
|
||||||
properties:
|
|
||||||
UserName: { get_resource: CfnUser }
|
|
||||||
|
|
||||||
ScaleUpPolicy:
|
|
||||||
type: OS::Heat::ScalingPolicy
|
|
||||||
properties:
|
|
||||||
adjustment_type: change_in_capacity
|
|
||||||
auto_scaling_group_id: { get_resource: ScalingGroup }
|
|
||||||
cooldown: 60
|
|
||||||
scaling_adjustment: 1
|
|
||||||
|
|
||||||
ScaleDownPolicy:
|
|
||||||
type: OS::Heat::ScalingPolicy
|
|
||||||
properties:
|
|
||||||
adjustment_type: change_in_capacity
|
|
||||||
auto_scaling_group_id: { get_resource: ScalingGroup }
|
|
||||||
cooldown: 60
|
|
||||||
scaling_adjustment: -1
|
|
||||||
|
|
||||||
# Matching metadata is not compatible with cfn-push-stats
|
|
||||||
AlarmHigh:
|
|
||||||
type: OS::Ceilometer::Alarm
|
|
||||||
properties:
|
|
||||||
meter_name:
|
|
||||||
list_join:
|
|
||||||
- "_"
|
|
||||||
- - { get_param: 'OS::stack_name'}
|
|
||||||
- { get_param: METER_NAME }
|
|
||||||
statistic: avg
|
|
||||||
period: 60
|
|
||||||
evaluation_periods: 1
|
|
||||||
threshold: { get_param: HIGH_VALUE }
|
|
||||||
alarm_actions:
|
|
||||||
- {get_attr: [ScaleUpPolicy, alarm_url]}
|
|
||||||
comparison_operator: gt
|
|
||||||
|
|
||||||
AlarmLow:
|
|
||||||
type: OS::Ceilometer::Alarm
|
|
||||||
properties:
|
|
||||||
meter_name:
|
|
||||||
list_join:
|
|
||||||
- "_"
|
|
||||||
- - { get_param: 'OS::stack_name'}
|
|
||||||
- { get_param: METER_NAME }
|
|
||||||
statistic: avg
|
|
||||||
period: 60
|
|
||||||
evaluation_periods: 1
|
|
||||||
threshold: { get_param: LOW_VALUE }
|
|
||||||
alarm_actions:
|
|
||||||
- {get_attr: [ScaleDownPolicy, alarm_url]}
|
|
||||||
comparison_operator: lt
|
|
||||||
|
|
||||||
|
|
||||||
ScalingGroup:
|
|
||||||
type: OS::Heat::AutoScalingGroup
|
|
||||||
properties:
|
|
||||||
cooldown: 60
|
|
||||||
desired_capacity: 1
|
|
||||||
max_size: 3
|
|
||||||
min_size: 1
|
|
||||||
resource:
|
|
||||||
type: OS::Nova::Server
|
|
||||||
# Special Note: CFN related metadata is located at the resource
|
|
||||||
# level (not as a property)
|
|
||||||
metadata:
|
|
||||||
wrs-groupindex-mode: true
|
|
||||||
AWS::CloudFormation::Init:
|
|
||||||
config:
|
|
||||||
files:
|
|
||||||
/etc/cfn/cfn-credentials:
|
|
||||||
content:
|
|
||||||
str_replace:
|
|
||||||
template: |
|
|
||||||
AWSAccessKeyId=_keyid_
|
|
||||||
AWSSecretKey=_secret_
|
|
||||||
params:
|
|
||||||
_keyid_:
|
|
||||||
get_resource: WebKeys
|
|
||||||
_secret_:
|
|
||||||
get_attr:
|
|
||||||
- WebKeys
|
|
||||||
- SecretAccessKey
|
|
||||||
mode: '000400'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/etc/cfn/make_load:
|
|
||||||
content: |
|
|
||||||
#!/bin/sh
|
|
||||||
# Generate maximum CPU load for a core
|
|
||||||
# Launch this X times for X cores
|
|
||||||
# to get 100% utilization
|
|
||||||
dd if=/dev/urandom of=/dev/null &
|
|
||||||
mode: '000700'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/etc/cfn/get_cpu_load:
|
|
||||||
content: |
|
|
||||||
#!/usr/bin/python
|
|
||||||
# Get the 1 minute CPU load average and
|
|
||||||
# divide by num cores
|
|
||||||
import os
|
|
||||||
cores = 1
|
|
||||||
n = os.sysconf("SC_NPROCESSORS_ONLN")
|
|
||||||
if isinstance(n, int) and n > 0:
|
|
||||||
cores = n
|
|
||||||
l_avg = float(os.getloadavg()[0])
|
|
||||||
# convert to a percentage
|
|
||||||
pct = (100 * l_avg) / float(cores)
|
|
||||||
print pct
|
|
||||||
mode: '000700'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/etc/cfn/send_guest_metrics:
|
|
||||||
content:
|
|
||||||
str_replace:
|
|
||||||
template: |
|
|
||||||
#!/bin/sh
|
|
||||||
METRIC=`/etc/cfn/get_cpu_load`
|
|
||||||
/opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_
|
|
||||||
params:
|
|
||||||
_metername_:
|
|
||||||
list_join:
|
|
||||||
- "_"
|
|
||||||
- - { get_param: 'OS::stack_name' }
|
|
||||||
- { get_param: METER_NAME }
|
|
||||||
_unit_:
|
|
||||||
get_param: METER_UNIT
|
|
||||||
mode: '000700'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
/etc/cron.d/cfn_cron:
|
|
||||||
content: |
|
|
||||||
* * * * * root /etc/cfn/send_guest_metrics
|
|
||||||
mode: '000600'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
|
|
||||||
properties:
|
|
||||||
name:
|
|
||||||
list_join:
|
|
||||||
- "_"
|
|
||||||
- - { get_param: 'OS::stack_name'}
|
|
||||||
- "vm"
|
|
||||||
- ""
|
|
||||||
key_name: { get_param: KEYPAIR }
|
|
||||||
admin_user: { get_param: KEYPAIR_ADMIN_USER }
|
|
||||||
flavor: { get_param: FLAVOR }
|
|
||||||
image: { get_param: IMAGE }
|
|
||||||
networks:
|
|
||||||
- network: { get_param: PUBLIC_NETWORK }
|
|
||||||
- network: { get_param: INTERNAL_NETWORK }
|
|
||||||
# HEAT_CFNTOOLS includes Resource Metadata in the user-data
|
|
||||||
# automatically and expects the format to comply with
|
|
||||||
# AWS::CloudFormation::Init
|
|
||||||
user_data_format: HEAT_CFNTOOLS
|
|
||||||
user_data: |
|
|
||||||
#!/bin/bash -v
|
|
||||||
# Create sym links to standard location for cfn tools
|
|
||||||
# in an aws environment
|
|
||||||
echo "Setting up symlinks" >> /var/log/heat_setup.txt
|
|
||||||
cfn-create-aws-symlinks --source /usr/bin
|
|
||||||
# invoke cfn-init which will extract cloudformation
|
|
||||||
# metadata from the userdata
|
|
||||||
echo "Running cfn-init " >> /var/log/heat_setup.txt
|
|
||||||
/usr/bin/cfn-init >> /var/log/heat_setup.txt
|
|
||||||
echo "Done cfn-init setup" >> /var/log/heat_setup.txt
|
|
||||||
|
|
||||||
|
|
||||||
outputs:
|
|
||||||
ceilometer_query:
|
|
||||||
value:
|
|
||||||
str_replace:
|
|
||||||
template: ceilometer statistics -m metername -p 60 -a avg
|
|
||||||
params:
|
|
||||||
metername:
|
|
||||||
list_join:
|
|
||||||
- "_"
|
|
||||||
- - { get_param: 'OS::stack_name' }
|
|
||||||
- { get_param: METER_NAME }
|
|
@ -1,5 +1,5 @@
|
|||||||
################################################################################
|
################################################################################
|
||||||
# Copyright (c) 2013-2014 Wind River Systems, Inc.
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -26,16 +26,16 @@
|
|||||||
# IMAGE: A glance image to use in launching the VMs (glance image-list)
|
# IMAGE: A glance image to use in launching the VMs (glance image-list)
|
||||||
# LB_IMAGE: A glance image to use in launching the load balancer
|
# LB_IMAGE: A glance image to use in launching the load balancer
|
||||||
# (glance image-list)
|
# (glance image-list)
|
||||||
# LB_NETWORK: name of the network to use for the load balancer VM
|
# LB_NETWORK: UUID of the network to use for the load balancer VM
|
||||||
# (neutron net-list)
|
# (neutron net-list)
|
||||||
# VM_NETWORK: name of the network to use for the scaling VMs
|
# VM_NETWORK: UUID of the network to use for the scaling VMs
|
||||||
# (neutron net-list)
|
# (neutron net-list)
|
||||||
# PUBLIC_NETWORK: name of public network to use for all VMs
|
# PUBLIC_NETWORK: UUID of public network to use for all VMs
|
||||||
# (neutron net-list)
|
# (neutron net-list)
|
||||||
# INTERNAL_NETWORK: name of the internal network to use for all VMs
|
# INTERNAL_NETWORK: UUID of the internal network to use for all VMs
|
||||||
# (neutron net-list)
|
# (neutron net-list)
|
||||||
# METER_NAME: name of the ceilometer meter to trigger autoscaling
|
# METER_NAME: name of the gnocchi metric to trigger autoscaling
|
||||||
# (ceilometer meter-list)
|
# (gnocchi metric list)
|
||||||
#
|
#
|
||||||
# Tenant Considerations:
|
# Tenant Considerations:
|
||||||
# The default meters (vswitch) are not accessible to tenants
|
# The default meters (vswitch) are not accessible to tenants
|
||||||
@ -46,7 +46,7 @@
|
|||||||
# Expected Outcome:
|
# Expected Outcome:
|
||||||
# A VM running the load balancer (nova list)
|
# A VM running the load balancer (nova list)
|
||||||
# An auto-scaling stack of server VMs (nova list)
|
# An auto-scaling stack of server VMs (nova list)
|
||||||
# New ceilometer alarm triggers (ceilometer alarm-list)
|
# New aodh alarm triggers (aodh alarm list)
|
||||||
#
|
#
|
||||||
# Note: there is no communication between the load balancer and the VMs
|
# Note: there is no communication between the load balancer and the VMs
|
||||||
#
|
#
|
||||||
@ -94,36 +94,37 @@ parameters:
|
|||||||
PUBLIC_NETWORK:
|
PUBLIC_NETWORK:
|
||||||
type: string
|
type: string
|
||||||
description: Public network name
|
description: Public network name
|
||||||
default: public-net0
|
|
||||||
constraints:
|
constraints:
|
||||||
- custom_constraint: neutron.network
|
- custom_constraint: neutron.network
|
||||||
|
|
||||||
INTERNAL_NETWORK:
|
INTERNAL_NETWORK:
|
||||||
type: string
|
type: string
|
||||||
description: Internal network name
|
description: Internal network name
|
||||||
default: internal-net0
|
|
||||||
constraints:
|
constraints:
|
||||||
- custom_constraint: neutron.network
|
- custom_constraint: neutron.network
|
||||||
|
|
||||||
VM_NETWORK:
|
VM_NETWORK:
|
||||||
type: string
|
type: string
|
||||||
description: Server network name
|
description: Server network name
|
||||||
default: private-net0
|
|
||||||
constraints:
|
constraints:
|
||||||
- custom_constraint: neutron.network
|
- custom_constraint: neutron.network
|
||||||
|
|
||||||
LB_NETWORK:
|
LB_NETWORK:
|
||||||
type: string
|
type: string
|
||||||
description: Load Balancer network name
|
description: Load Balancer network name
|
||||||
default: internal-net0
|
|
||||||
constraints:
|
constraints:
|
||||||
- custom_constraint: neutron.network
|
- custom_constraint: neutron.network
|
||||||
|
|
||||||
METER_NAME:
|
METER_NAME:
|
||||||
type: string
|
type: string
|
||||||
description: ceilometer meter to trigger autoscaling
|
description: gnocchi metric to trigger autoscaling
|
||||||
default: vswitch.port.receive.util
|
default: vswitch.port.receive.util
|
||||||
|
|
||||||
|
RESOURCE_TYPE:
|
||||||
|
description: Gnocchi resource type to use to query
|
||||||
|
type: string
|
||||||
|
default: vswitch_interface_and_port
|
||||||
|
|
||||||
MinClusterSize:
|
MinClusterSize:
|
||||||
type: string
|
type: string
|
||||||
default: '1'
|
default: '1'
|
||||||
@ -176,30 +177,54 @@ resources:
|
|||||||
cooldown: 30
|
cooldown: 30
|
||||||
|
|
||||||
LINKAlarmHigh:
|
LINKAlarmHigh:
|
||||||
type: OS::Ceilometer::Alarm
|
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
|
||||||
properties:
|
properties:
|
||||||
description: Scale-out if the max link util > 50% for 30 secs
|
description: Scale-out if the avg link util > 50% for 5 minutes
|
||||||
meter_name: { get_param: METER_NAME }
|
metric: { get_param: METER_NAME }
|
||||||
statistic: max
|
aggregation_method: mean
|
||||||
period: '30'
|
granularity: 300
|
||||||
evaluation_periods: '1'
|
evaluation_periods: '1'
|
||||||
threshold: '50'
|
threshold: '50'
|
||||||
|
resource_type: { get_param: RESOURCE_TYPE }
|
||||||
|
comparison_operator: gt
|
||||||
alarm_actions:
|
alarm_actions:
|
||||||
- {get_attr: [SrvScaleOutPolicy, alarm_url]}
|
- {get_attr: [SrvScaleOutPolicy, alarm_url]}
|
||||||
comparison_operator: gt
|
query:
|
||||||
|
str_replace:
|
||||||
|
template: '{"or": [{"=": {"network_uuid": "internal_net_id"}},
|
||||||
|
{"=": {"network_uuid": "public_net_id"}},
|
||||||
|
{"=": {"network_uuid": "lb_net_id"}},
|
||||||
|
{"=": {"network_uuid": "vm_net_id"}}]}'
|
||||||
|
params:
|
||||||
|
internal_net_id: {get_param: INTERNAL_NETWORK}
|
||||||
|
public_net_id: {get_param: PUBLIC_NETWORK}
|
||||||
|
lb_net_id: {get_param: LB_NETWORK}
|
||||||
|
vm_net_id: {get_param: VM_NETWORK}
|
||||||
|
|
||||||
LINKAlarmLow:
|
LINKAlarmLow:
|
||||||
type: OS::Ceilometer::Alarm
|
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
|
||||||
properties:
|
properties:
|
||||||
description: Scale-in if the max link util < 20% for 30 secs
|
description: Scale-in if the avg link util < 20% for 5 minutes
|
||||||
meter_name: { get_param: METER_NAME }
|
metric: { get_param: METER_NAME }
|
||||||
statistic: max
|
aggregation_method: mean
|
||||||
period: '30'
|
granularity: 300
|
||||||
evaluation_periods: '1'
|
evaluation_periods: '1'
|
||||||
threshold: '20'
|
threshold: '20'
|
||||||
|
resource_type: { get_param: RESOURCE_TYPE }
|
||||||
|
comparison_operator: lt
|
||||||
alarm_actions:
|
alarm_actions:
|
||||||
- {get_attr: [SrvScaleInPolicy, alarm_url]}
|
- {get_attr: [SrvScaleInPolicy, alarm_url]}
|
||||||
comparison_operator: lt
|
query:
|
||||||
|
str_replace:
|
||||||
|
template: '{"or": [{"=": {"network_uuid": "internal_net_id"}},
|
||||||
|
{"=": {"network_uuid": "public_net_id"}},
|
||||||
|
{"=": {"network_uuid": "lb_net_id"}},
|
||||||
|
{"=": {"network_uuid": "vm_net_id"}}]}'
|
||||||
|
params:
|
||||||
|
internal_net_id: {get_param: INTERNAL_NETWORK}
|
||||||
|
public_net_id: {get_param: PUBLIC_NETWORK}
|
||||||
|
lb_net_id: {get_param: LB_NETWORK}
|
||||||
|
vm_net_id: {get_param: VM_NETWORK}
|
||||||
|
|
||||||
ServerGroup:
|
ServerGroup:
|
||||||
type: OS::Heat::AutoScalingGroup
|
type: OS::Heat::AutoScalingGroup
|
||||||
@ -221,7 +246,7 @@ resources:
|
|||||||
image: { get_param: IMAGE }
|
image: { get_param: IMAGE }
|
||||||
key_name: { get_param: KEYPAIR }
|
key_name: { get_param: KEYPAIR }
|
||||||
admin_user: {get_param: KEYPAIR_ADMIN_USER }
|
admin_user: {get_param: KEYPAIR_ADMIN_USER }
|
||||||
metadata: {"metering.stack": {get_param: "OS::stack_id"}}
|
metadata: {"metering.server_group": {get_param: "OS::stack_id"}}
|
||||||
networks:
|
networks:
|
||||||
- network: { get_param: PUBLIC_NETWORK }
|
- network: { get_param: PUBLIC_NETWORK }
|
||||||
vif-model: 'virtio'
|
vif-model: 'virtio'
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
################################################################################
|
################################################################################
|
||||||
# Copyright (c) 2013 Wind River Systems, Inc.
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -26,9 +26,8 @@
|
|||||||
# FLAVOR: A nova flavor name or UUID for the VMs (nova flavor-list)
|
# FLAVOR: A nova flavor name or UUID for the VMs (nova flavor-list)
|
||||||
# IMAGE: A glance image name or UUID for launching VMs (glance image-list)
|
# IMAGE: A glance image name or UUID for launching VMs (glance image-list)
|
||||||
# NETWORK: Name or UUID of the network to use for the VMs (neutron net-list)
|
# NETWORK: Name or UUID of the network to use for the VMs (neutron net-list)
|
||||||
# METER_NAME: Name of the ceilometer meter to use to trigger autoscaling
|
# METER_NAME: Name of the gnocchi metric to use to trigger autoscaling
|
||||||
# (ceilometer meter-list)
|
# (gnocchi metric list)
|
||||||
# METER_PREFIX: user_metadata for a nova meter, metering for all other meters
|
|
||||||
# HIGH_VALUE: Value for the meter to trigger a scale up.
|
# HIGH_VALUE: Value for the meter to trigger a scale up.
|
||||||
# LOW_VALUE: Value for the meter to trigger a scale down.
|
# LOW_VALUE: Value for the meter to trigger a scale down.
|
||||||
#
|
#
|
||||||
@ -40,7 +39,7 @@
|
|||||||
#
|
#
|
||||||
# Expected Outcome:
|
# Expected Outcome:
|
||||||
# VMs running the guest image (nova list)
|
# VMs running the guest image (nova list)
|
||||||
# New ceilometer alarm triggers (ceilometer alarm-list)
|
# New aodh alarm triggers (aodh alarm list)
|
||||||
# New cinder volumes for each VM (cinder list)
|
# New cinder volumes for each VM (cinder list)
|
||||||
# This template produces an output which shows the CLI syntax to help
|
# This template produces an output which shows the CLI syntax to help
|
||||||
# troubleshoot autoscaling
|
# troubleshoot autoscaling
|
||||||
@ -89,18 +88,14 @@ parameters:
|
|||||||
- custom_constraint: neutron.network
|
- custom_constraint: neutron.network
|
||||||
|
|
||||||
METER_NAME:
|
METER_NAME:
|
||||||
description: Ceilometer meter to query when determining autoscaling
|
description: Gnocchi metric to query when determining autoscaling
|
||||||
type: string
|
type: string
|
||||||
default: cpu_util
|
default: cpu_util
|
||||||
|
|
||||||
METER_PREFIX:
|
RESOURCE_TYPE:
|
||||||
description: >
|
description: Gnocchi resource type to use to query
|
||||||
Ceilometer alarm query prefix.
|
|
||||||
If a nova meter (user_metadata) otherwise (metering)
|
|
||||||
type: string
|
type: string
|
||||||
default: user_metadata
|
default: instance
|
||||||
constraints:
|
|
||||||
- allowed_values: [ user_metadata, metering ]
|
|
||||||
|
|
||||||
HIGH_VALUE:
|
HIGH_VALUE:
|
||||||
description: Metric value that will trigger a scale up if exceeded
|
description: Metric value that will trigger a scale up if exceeded
|
||||||
@ -130,7 +125,7 @@ resources:
|
|||||||
KEYPAIR: { get_param: KEYPAIR }
|
KEYPAIR: { get_param: KEYPAIR }
|
||||||
KEYPAIR_ADMIN_USER: { get_param: KEYPAIR_ADMIN_USER }
|
KEYPAIR_ADMIN_USER: { get_param: KEYPAIR_ADMIN_USER }
|
||||||
NETWORK: { get_param: NETWORK }
|
NETWORK: { get_param: NETWORK }
|
||||||
METADATA: {"metering.stack": {get_param: "OS::stack_id"}}
|
METADATA: {"metering.server_group": {get_param: "OS::stack_id"}}
|
||||||
|
|
||||||
ScaleUpPolicy:
|
ScaleUpPolicy:
|
||||||
type: OS::Heat::ScalingPolicy
|
type: OS::Heat::ScalingPolicy
|
||||||
@ -149,51 +144,73 @@ resources:
|
|||||||
scaling_adjustment: -1
|
scaling_adjustment: -1
|
||||||
|
|
||||||
AlarmHigh:
|
AlarmHigh:
|
||||||
type: OS::Ceilometer::Alarm
|
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
|
||||||
properties:
|
properties:
|
||||||
meter_name: { get_param: METER_NAME }
|
description: Scale up if the meter above threshold for 5 minutes
|
||||||
statistic: avg
|
metric: { get_param: METER_NAME }
|
||||||
period: 60
|
aggregation_method: mean
|
||||||
|
granularity: 300
|
||||||
evaluation_periods: 1
|
evaluation_periods: 1
|
||||||
threshold: { get_param: HIGH_VALUE }
|
threshold: { get_param: HIGH_VALUE }
|
||||||
|
resource_type: { get_param: RESOURCE_TYPE }
|
||||||
|
comparison_operator: gt
|
||||||
alarm_actions:
|
alarm_actions:
|
||||||
- {get_attr: [ScaleUpPolicy, alarm_url]}
|
- {get_attr: [ScaleUpPolicy, alarm_url]}
|
||||||
# ceilometer alarm resource will automatically prepend
|
# gnocchi alarm resource will automatically
|
||||||
# to the matching_metadata based on the meter type
|
# prepend to the query based on the meter type
|
||||||
# metadata.metering
|
# metadata.metering
|
||||||
# or metadata.user_metadata
|
# or metadata.user_metadata
|
||||||
matching_metadata: {'stack': {get_param: "OS::stack_id"}}
|
query:
|
||||||
comparison_operator: gt
|
str_replace:
|
||||||
|
template: '{"=": {"server_group": "stack_id"}}'
|
||||||
|
params:
|
||||||
|
stack_id: {get_param: "OS::stack_id"}
|
||||||
|
|
||||||
AlarmLow:
|
AlarmLow:
|
||||||
type: OS::Ceilometer::Alarm
|
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
|
||||||
properties:
|
properties:
|
||||||
meter_name: { get_param: METER_NAME }
|
description: Scale down if the meter below threshold for 5 minutes
|
||||||
statistic: avg
|
metric: { get_param: METER_NAME }
|
||||||
period: 60
|
aggregation_method: mean
|
||||||
|
granularity: 300
|
||||||
evaluation_periods: 1
|
evaluation_periods: 1
|
||||||
threshold: { get_param: LOW_VALUE }
|
threshold: { get_param: LOW_VALUE }
|
||||||
|
resource_type: { get_param: RESOURCE_TYPE }
|
||||||
|
comparison_operator: lt
|
||||||
alarm_actions:
|
alarm_actions:
|
||||||
- {get_attr: [ScaleDownPolicy, alarm_url]}
|
- {get_attr: [ScaleDownPolicy, alarm_url]}
|
||||||
# ceilometer alarm resource will automatically prepend
|
# gnocchi alarm resource will automatically
|
||||||
# to the matching_metadata based on the meter type
|
# prepend to the query based on the meter type
|
||||||
# metadata.metering
|
# metadata.metering
|
||||||
# or metadata.user_metadata
|
# or metadata.user_metadata
|
||||||
matching_metadata: {'stack': {get_param: "OS::stack_id"}}
|
query:
|
||||||
comparison_operator: lt
|
str_replace:
|
||||||
|
template: '{"=": {"server_group": "stack_id"}}'
|
||||||
|
params:
|
||||||
|
stack_id: {get_param: "OS::stack_id"}
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
ceilometer_query:
|
gnocchi_query:
|
||||||
value:
|
value:
|
||||||
str_replace:
|
str_replace:
|
||||||
template: >
|
template: >
|
||||||
ceilometer statistics -m metername
|
gnocchi measures aggregation --resource-type resourcetype
|
||||||
-q metadata.prefix.stack=stackval
|
--query '"server_group"="stackval"'
|
||||||
-p 60 -a avg
|
--granularity 300 --aggregation mean -m metric
|
||||||
params:
|
params:
|
||||||
metername: { get_param: METER_NAME }
|
resourcetype: { get_param: RESOURCE_TYPE }
|
||||||
prefix: { get_param: METER_PREFIX}
|
metric: { get_param: METER_NAME }
|
||||||
stackval: { get_param: "OS::stack_id" }
|
stackval: { get_param: "OS::stack_id" }
|
||||||
|
description: >
|
||||||
|
This is a Gnocchi query for statistics on the cpu_util measurements about
|
||||||
|
OS::Nova::Server instances in this stack. The --resource-type select the
|
||||||
|
type of Gnocchi resource. The --query parameter filters resources
|
||||||
|
according to its attributes. When a VM's metadata includes an item of the
|
||||||
|
form metering.server_group=X, the corresponding Gnocchi resource has a
|
||||||
|
attribute named server_group that can queried with 'server_group="X"' In
|
||||||
|
this case the nested stacks give their VMs metadata that is passed as a
|
||||||
|
nested stack parameter, and this stack passes a metadata of the form
|
||||||
|
metering.server_group=X, where X is this stack's ID.
|
||||||
|
|
||||||
manual_scale_up:
|
manual_scale_up:
|
||||||
value:
|
value:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
################################################################################
|
################################################################################
|
||||||
# Copyright (c) 2014 Wind River Systems, Inc.
|
# Copyright (c) 2014-2018 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -33,8 +33,8 @@
|
|||||||
# Expected Outcome:
|
# Expected Outcome:
|
||||||
# A new stack (heat stack-list)
|
# A new stack (heat stack-list)
|
||||||
# A new nova VM (nova list)
|
# A new nova VM (nova list)
|
||||||
# Two ceilometer alarms corresponding to high and low watermarks
|
# Two aodh alarms corresponding to high and low watermarks
|
||||||
# (ceilometer alarm-list)
|
# (aodh alarm list)
|
||||||
#
|
#
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
@ -80,18 +80,14 @@ parameters:
|
|||||||
- custom_constraint: neutron.network
|
- custom_constraint: neutron.network
|
||||||
|
|
||||||
METER_NAME:
|
METER_NAME:
|
||||||
description: Ceilometer meter to query when determining autoscaling
|
description: Gnocchi metric to query when determining autoscaling
|
||||||
type: string
|
type: string
|
||||||
default: vcpu_util
|
default: vcpu_util
|
||||||
|
|
||||||
METER_PREFIX:
|
RESOURCE_TYPE:
|
||||||
description: >
|
description: Gnocchi resource type to use to query
|
||||||
Ceilometer alarm query prefix. If a nova meter (user_metadata)
|
|
||||||
otherwise (metering)
|
|
||||||
type: string
|
type: string
|
||||||
default: user_metadata
|
default: instance
|
||||||
constraints:
|
|
||||||
- allowed_values: [ user_metadata, metering ]
|
|
||||||
|
|
||||||
SCALE_UP_VALUE:
|
SCALE_UP_VALUE:
|
||||||
description: Metric value that will trigger a scale up if exceeded
|
description: Metric value that will trigger a scale up if exceeded
|
||||||
@ -119,45 +115,57 @@ resources:
|
|||||||
key_name: { get_param: KEYPAIR }
|
key_name: { get_param: KEYPAIR }
|
||||||
admin_user: { get_param: KEYPAIR_ADMIN_USER }
|
admin_user: { get_param: KEYPAIR_ADMIN_USER }
|
||||||
metadata:
|
metadata:
|
||||||
metering.stack: { get_param: "OS::stack_id" }
|
{"metering.server_group": { get_param: "OS::stack_id"}}
|
||||||
networks:
|
networks:
|
||||||
- { network: { get_param: NETWORK }, vif-model: virtio }
|
- { network: { get_param: NETWORK }, vif-model: virtio }
|
||||||
|
|
||||||
CPUAlarmHigh:
|
CPUAlarmHigh:
|
||||||
type: OS::Ceilometer::Alarm
|
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
|
||||||
properties:
|
properties:
|
||||||
description: Scale up if 1 minute avg of meter above threshold
|
description: Scale up if 5 minutes avg of meter above threshold
|
||||||
meter_name: { get_param: METER_NAME }
|
metric: { get_param: METER_NAME }
|
||||||
statistic: avg
|
aggregation_method: mean
|
||||||
period: '60'
|
granularity: 300
|
||||||
evaluation_periods: '1'
|
evaluation_periods: '1'
|
||||||
threshold: { get_param: SCALE_UP_VALUE }
|
threshold: { get_param: SCALE_UP_VALUE }
|
||||||
|
resource_type: { get_param: RESOURCE_TYPE }
|
||||||
repeat_actions: 'True'
|
repeat_actions: 'True'
|
||||||
|
comparison_operator: gt
|
||||||
alarm_actions:
|
alarm_actions:
|
||||||
- { get_attr: [SrvScaleUpPolicy, AlarmUrl] }
|
- { get_attr: [SrvScaleUpPolicy, AlarmUrl] }
|
||||||
comparison_operator: gt
|
# gnocchi alarm resource will automatically
|
||||||
# ceilometer alarm resource will automatically prepend
|
# prepend to the query based on the meter type
|
||||||
# metadata.metering
|
# metadata.metering
|
||||||
# or metadata.user_metadata
|
# or metadata.user_metadata
|
||||||
matching_metadata: { 'stack': { get_param: "OS::stack_id" }}
|
query:
|
||||||
|
str_replace:
|
||||||
|
template: '{"=": {"server_group": "stack_id"}}'
|
||||||
|
params:
|
||||||
|
stack_id: {get_param: "OS::stack_id"}
|
||||||
|
|
||||||
CPUAlarmLow:
|
CPUAlarmLow:
|
||||||
type: OS::Ceilometer::Alarm
|
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
|
||||||
properties:
|
properties:
|
||||||
description: Scale down if 1 minute avg of meter below threshold
|
description: Scale down if 5 minutes avg of meter below threshold
|
||||||
meter_name: { get_param: METER_NAME }
|
metric: { get_param: METER_NAME }
|
||||||
statistic: avg
|
aggregation_method: mean
|
||||||
period: '60'
|
granularity: 300
|
||||||
evaluation_periods: '1'
|
evaluation_periods: '1'
|
||||||
threshold: { get_param: SCALE_DOWN_VALUE }
|
threshold: { get_param: SCALE_DOWN_VALUE }
|
||||||
|
resource_type: { get_param: RESOURCE_TYPE }
|
||||||
repeat_actions: 'True'
|
repeat_actions: 'True'
|
||||||
|
comparison_operator: lt
|
||||||
alarm_actions:
|
alarm_actions:
|
||||||
- { get_attr: [SrvScaleDownPolicy, AlarmUrl]}
|
- { get_attr: [SrvScaleDownPolicy, AlarmUrl]}
|
||||||
comparison_operator: lt
|
# gnocchi alarm resource will automatically
|
||||||
# ceilometer alarm resource will automatically prepend
|
# prepend to the query based on the meter type
|
||||||
# metadata.metering
|
# metadata.metering
|
||||||
# or metadata.user_metadata
|
# or metadata.user_metadata
|
||||||
matching_metadata: {'stack': { get_param: "OS::stack_id" }}
|
query:
|
||||||
|
str_replace:
|
||||||
|
template: '{"=": {"server_group": "stack_id"}}'
|
||||||
|
params:
|
||||||
|
stack_id: {get_param: "OS::stack_id"}
|
||||||
|
|
||||||
SrvScaleUpPolicy:
|
SrvScaleUpPolicy:
|
||||||
type: OS::WR::ScalingPolicy
|
type: OS::WR::ScalingPolicy
|
||||||
@ -177,33 +185,27 @@ resources:
|
|||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
|
|
||||||
ceilometer_query:
|
gnocchi_query:
|
||||||
value:
|
value:
|
||||||
str_replace:
|
str_replace:
|
||||||
template: >
|
template: >
|
||||||
ceilometer statistics -m metername
|
gnocchi measures aggregation --resource-type resourcetype
|
||||||
-q metadata.prefix.stack=stackval
|
--query '"server_group"="stackval"'
|
||||||
-p 60 -a avg
|
--granularity 300 --aggregation mean -m metric
|
||||||
params:
|
params:
|
||||||
metername: { get_param: METER_NAME }
|
resourcetype: { get_param: RESOURCE_TYPE }
|
||||||
prefix: { get_param: METER_PREFIX }
|
metric: { get_param: METER_NAME }
|
||||||
stackval: { get_param: "OS::stack_id" }
|
|
||||||
|
|
||||||
ceilometer_sample_create:
|
|
||||||
value:
|
|
||||||
str_replace:
|
|
||||||
template: >
|
|
||||||
ceilometer sample-create
|
|
||||||
-r server -m metername
|
|
||||||
--meter-type gauge
|
|
||||||
--meter-unit '%'
|
|
||||||
--sample-volume 2
|
|
||||||
--resource-metadata '{"prefix.stack":"stackval"}
|
|
||||||
params:
|
|
||||||
server: { get_resource: Srv}
|
|
||||||
metername: { get_param: METER_NAME }
|
|
||||||
prefix: { get_param: METER_PREFIX }
|
|
||||||
stackval: { get_param: "OS::stack_id" }
|
stackval: { get_param: "OS::stack_id" }
|
||||||
|
description: >
|
||||||
|
This is a Gnocchi query for statistics on the vcpu_util measurements about
|
||||||
|
OS::Nova::Server instances in this stack. The --resource-type select the
|
||||||
|
type of Gnocchi resource. The --query parameter filters resources
|
||||||
|
according to its attributes. When a VM's metadata includes an item of the
|
||||||
|
form metering.server_group=X, the corresponding Gnocchi resource has a
|
||||||
|
attribute named server_group that can queried with 'server_group="X"' In
|
||||||
|
this case the nested stacks give their VMs metadata that is passed as a
|
||||||
|
nested stack parameter, and this stack passes a metadata of the form
|
||||||
|
metering.server_group=X, where X is this stack's ID.
|
||||||
|
|
||||||
manual_scale_up:
|
manual_scale_up:
|
||||||
value:
|
value:
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
################################################################################
|
################################################################################
|
||||||
# Copyright (c) 2013-2015 Wind River Systems, Inc.
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
# Objective:
|
# Objective:
|
||||||
# Demonstrate constructing the heat resource OS::Ceilometer::Alarm
|
# Demonstrate constructing the heat resource OS::Aodh::GnocchiAggregationByResourcesAlarm
|
||||||
#
|
#
|
||||||
# Pre-Reqs:
|
# Pre-Reqs:
|
||||||
# Normal Lab Setup
|
# Normal Lab Setup
|
||||||
@ -22,19 +22,19 @@
|
|||||||
#
|
#
|
||||||
# Expected Outcome:
|
# Expected Outcome:
|
||||||
# A new alarm (trigger).
|
# A new alarm (trigger).
|
||||||
# ceilometer alarm-list
|
# aodh alarm list
|
||||||
#
|
#
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
heat_template_version: 2015-04-30
|
heat_template_version: 2015-04-30
|
||||||
|
|
||||||
description: >
|
description: >
|
||||||
Demonstrate the OS::Ceilometer::Alarm heat resource
|
Demonstrate the OS::Aodh::GnocchiAggregationByResourcesAlarm heat resource
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
|
|
||||||
OS_Ceilometer_Alarm:
|
OS_Ceilometer_Alarm:
|
||||||
type: OS::Ceilometer::Alarm
|
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
|
||||||
properties:
|
properties:
|
||||||
#################################################
|
#################################################
|
||||||
# Required properties
|
# Required properties
|
||||||
@ -50,21 +50,36 @@ resources:
|
|||||||
# , required: true, type: string}
|
# , required: true, type: string}
|
||||||
evaluation_periods: '3'
|
evaluation_periods: '3'
|
||||||
|
|
||||||
# meter_name: {description: Meter name watched by the alarm
|
# metric: {description: Meter name watched by the alarm
|
||||||
# , required: true, type: string}
|
# , required: true, type: string}
|
||||||
meter_name: 'cpu_util'
|
metric: 'cpu_util'
|
||||||
|
|
||||||
# period: {description: Period (seconds) to evaluate over
|
# aggregation_method:
|
||||||
# , required: true, type: string}
|
# description: The aggregation_method to compare to the threshold
|
||||||
period: '90'
|
|
||||||
|
|
||||||
# statistic:
|
|
||||||
# constraints:
|
|
||||||
# - allowed_values: [count, avg, sum, min, max]
|
|
||||||
# description: Meter statistic to evaluate
|
|
||||||
# required: true
|
# required: true
|
||||||
# type: string
|
# type: string
|
||||||
statistic: 'avg'
|
# default: mean
|
||||||
|
# constraints:
|
||||||
|
# - allowed_values: mean }
|
||||||
|
aggregation_method: 'mean'
|
||||||
|
|
||||||
|
# granularity:
|
||||||
|
# description: The time range in seconds over which to query
|
||||||
|
# required: true
|
||||||
|
# type: string
|
||||||
|
granularity: '300'
|
||||||
|
|
||||||
|
# resource_type: {description: The type of resource, required:true, type:string}
|
||||||
|
resource_type: 'instance'
|
||||||
|
|
||||||
|
# query:
|
||||||
|
# description: A query to filter resource, it's a json string like
|
||||||
|
# {"and": [{"=": {"ended_at": null}}, ...]}
|
||||||
|
query:
|
||||||
|
str_replace:
|
||||||
|
template: '{"=": {"server_group": "stack_id"}}'
|
||||||
|
params:
|
||||||
|
stack_id: {get_param: "OS::stack_id"}
|
||||||
|
|
||||||
# threshold: {description: Threshold to evaluate against
|
# threshold: {description: Threshold to evaluate against
|
||||||
# , required: true, type: string}
|
# , required: true, type: string}
|
||||||
@ -88,10 +103,6 @@ resources:
|
|||||||
# (webhooks) to invoke when state transitions to insufficient-data
|
# (webhooks) to invoke when state transitions to insufficient-data
|
||||||
# , required: false, type: list}
|
# , required: false, type: list}
|
||||||
|
|
||||||
# matching_metadata: {description: Meter should match this resource
|
|
||||||
# metadata (key=value) additionally to the meter_name
|
|
||||||
# , required: false, type: map}
|
|
||||||
|
|
||||||
# ok_actions: {description: A list of URLs (webhooks) to invoke
|
# ok_actions: {description: A list of URLs (webhooks) to invoke
|
||||||
# when state transitions to ok, required: false, type: list}
|
# when state transitions to ok, required: false, type: list}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user