Fix template variable scoping

Change-Id: I3933a0fbadc4e76bbbc5a3c18ef1c1819ffb383c
This commit is contained in:
bklei
2015-08-12 11:29:07 -06:00
parent d4c70c2303
commit e4673c8041
8 changed files with 85 additions and 85 deletions

View File

@@ -1,4 +1,4 @@
region: <%= region_name %>
region: <%= @region_name %>
accessedViaHttps: false
@@ -10,50 +10,50 @@ maxQueryLimit: 1000000
kafka:
brokerUris:
- <%= kafka_brokers %>
- <%= @kafka_brokers %>
zookeeperUris:
- <%= zookeeper_servers %>
- <%= @zookeeper_servers %>
healthCheckTopic: healthcheck
mysql:
driverClass: com.mysql.jdbc.Driver
url: jdbc:mysql://<%= sql_host %>:3306/mon?connectTimeout=5000&autoReconnect=true
user: <%= sql_user %>
password: <%= sql_password %>
url: jdbc:mysql://<%= @sql_host %>:3306/mon?connectTimeout=5000&autoReconnect=true
user: <%= @sql_user %>
password: <%= @sql_password %>
maxWaitForConnection: 1s
validationQuery: "/* MyService Health Check */ SELECT 1"
minSize: 8
maxSize: 32
checkConnectionWhileIdle: <%= check_conn_while_idle %>
checkConnectionWhileIdle: <%= @check_conn_while_idle %>
checkConnectionOnBorrow: true
databaseConfiguration:
databaseType: <%= database_type %>
databaseType: <%= @database_type %>
influxDB:
name: mon
version: V9
maxHttpConnections: 100
gzip: <%= gzip_setting %>
gzip: <%= @gzip_setting %>
replicationFactor: 1
url: http://<%= database_host %>:8086
user: <%= api_db_user %>
password: <%= api_db_password %>
url: http://<%= @database_host %>:8086
user: <%= @api_db_user %>
password: <%= @api_db_password %>
vertica:
driverClass: com.vertica.jdbc.Driver
url: jdbc:vertica://<%= database_host %>:5433/mon
url: jdbc:vertica://<%= @database_host %>:5433/mon
user: dbadmin
password: <%= db_admin_password %>
password: <%= @db_admin_password %>
maxWaitForConnection: 5s
validationQuery: "/* MyService Health Check */ SELECT 1"
minSize: 4
maxSize: 32
checkConnectionWhileIdle: <%= check_conn_while_idle %>
checkConnectionWhileIdle: <%= @check_conn_while_idle %>
middleware:
enabled: true
serverVIP: <%= keystone_endpoint %>
serverVIP: <%= @keystone_endpoint %>
serverPort: 5000
connTimeout: 5000
connSSLClientAuth: true
@@ -65,13 +65,13 @@ middleware:
connPoolMinIdleTime: 600000
connRetryTimes: 2
connRetryInterval: 50
defaultAuthorizedRoles: <%= roles_default %>
agentAuthorizedRoles: <%= roles_agent %>
delegateAuthorizedRole: <%= role_delegate %>
adminAuthMethod: <%= auth_method %>
adminUser: <%= admin_name %>
adminPassword: <%= admin_password %>
adminToken: <%= keystone_admin_token %>
defaultAuthorizedRoles: <%= @roles_default %>
agentAuthorizedRoles: <%= @roles_agent %>
delegateAuthorizedRole: <%= @role_delegate %>
adminAuthMethod: <%= @auth_method %>
adminUser: <%= @admin_name %>
adminPassword: <%= @admin_password %>
adminToken: <%= @keystone_admin_token %>
timeToCacheToken: 600
maxTokenCacheSize: 1048576
@@ -79,7 +79,7 @@ server:
applicationConnectors:
- type: http
maxRequestHeaderSize: 16KiB
port: <%= monasca_api_port %>
port: <%= @monasca_api_port %>
logging:

View File

@@ -1,4 +1,4 @@
#!<%= virtual_env %>/bin/python
#!<%= @virtual_env %>/bin/python
import json
from keystoneclient.v3 import client as keystone_client
@@ -55,7 +55,7 @@ def create_alarm_definition(client, alarm_definition):
def main():
client = get_monasca_client()
names = get_current_names(client)
alarm_definitions = json.load(open('<%= alarm_definition_config %>'))
alarm_definitions = json.load(open('<%= @alarm_definition_config %>'))
for alarm_definition in alarm_definitions:
name = alarm_definition['name']

View File

@@ -19,16 +19,16 @@ then
fi
CONFIG_FILE="/etc/opt/influxdb/influxdb.conf"
INFLUX_HOST="<%= influxdb_host %>:<%= influxdb_port %>"
INFLUX_ADMIN="<%= influxdb_user %>"
INFLUX_HOST="<%= @influxdb_host %>:<%= @influxdb_port %>"
INFLUX_ADMIN="<%= @influxdb_user %>"
MONASCA_DB="mon"
MONASCA_USERS="mon_api mon_persister"
MONASCA_READ_ONLY_USERS="mon_ro"
DEFAULT_RETENTION_POLICY_NAME="<%= influxdb_def_ret_pol_name %>"
DEFAULT_RETENTION_POLICY_DURATION="<%= influxdb_def_ret_pol_duration %>"
TMP_RETENTION_POLICY_NAME="<%= influxdb_tmp_ret_pol_name %>"
TMP_RETENTION_POLICY_DURATION="<%= influxdb_tmp_ret_pol_duration %>"
RETENTION_POLICY_REPLICATION_FACTOR="<%= influxdb_retention_replication %>"
DEFAULT_RETENTION_POLICY_NAME="<%= @influxdb_def_ret_pol_name %>"
DEFAULT_RETENTION_POLICY_DURATION="<%= @influxdb_def_ret_pol_duration %>"
TMP_RETENTION_POLICY_NAME="<%= @influxdb_tmp_ret_pol_name %>"
TMP_RETENTION_POLICY_DURATION="<%= @influxdb_tmp_ret_pol_duration %>"
RETENTION_POLICY_REPLICATION_FACTOR="<%= @influxdb_retention_replication %>"
wait_for_influx()
{

View File

@@ -1,5 +1,5 @@
kafka:
url: <%= kafka_brokers %>
url: <%= @kafka_brokers %>
group: monasca-notification
alarm_topic: alarm-state-transitions
notification_topic: alarm-notifications
@@ -7,27 +7,27 @@ kafka:
max_offset_lag: 600
mysql:
host: <%= sql_host %>
user: <%= sql_user %>
passwd: <%= sql_password %>
host: <%= @sql_host %>
user: <%= @sql_user %>
passwd: <%= @sql_password %>
db: mon
notification_types:
email:
server: <%= smtp_server %>
port: <%= smtp_port %>
user: <%= smtp_user %>
password: <%= smtp_password %>
server: <%= @smtp_server %>
port: <%= @smtp_port %>
user: <%= @smtp_user %>
password: <%= @smtp_password %>
timeout: 60
from_addr: <%= from_email_address %>
from_addr: <%= @from_email_address %>
webhook:
timeout: 5
url: <%= webhook_url %>
url: <%= @webhook_url %>
pagerduty:
timeout: 5
url: <%= pagerduty_url %>
url: <%= @pagerduty_url %>
processors:
alarm:
@@ -47,7 +47,7 @@ queues:
sent_notifications_size: 50
zookeeper:
url: <%= zookeeper_servers %>
url: <%= @zookeeper_servers %>
notification_path: /notification/alarms
notification_retry_path: /notification/retry

View File

@@ -1,25 +1,25 @@
name: <%= persister_service_name %>
name: <%= @persister_service_name %>
alarmHistoryConfiguration:
batchSize: <%= batch_size %>
numThreads: <%= num_threads %>
maxBatchTime: <%= batch_seconds %>
batchSize: <%= @batch_size %>
numThreads: <%= @num_threads %>
maxBatchTime: <%= @batch_seconds %>
topic: alarm-state-transitions
groupId: <%= persister_config['consumer_group_id'] %>_alarm-state-transitions
consumerId: <%= consumer_id %>
groupId: <%= @persister_config['consumer_group_id'] %>_alarm-state-transitions
consumerId: <%= @consumer_id %>
clientId : 1
metricConfiguration:
batchSize: <%= batch_size %>
numThreads: <%= num_threads %>
maxBatchTime: <%= batch_seconds %>
batchSize: <%= @batch_size %>
numThreads: <%= @num_threads %>
maxBatchTime: <%= @batch_seconds %>
topic: metrics
groupId: <%= persister_config['consumer_group_id'] %>_metrics
consumerId: <%= consumer_id %>
groupId: <%= @persister_config['consumer_group_id'] %>_metrics
consumerId: <%= @consumer_id %>
clientId : 1
kafkaConfig:
zookeeperConnect: <%= zookeeper_servers %>
zookeeperConnect: <%= @zookeeper_servers %>
socketTimeoutMs: 30000
socketReceiveBufferBytes : 65536
fetchMessageMaxBytes: 1048576
@@ -39,24 +39,24 @@ verticaMetricRepoConfig:
maxCacheSize: 2000000
databaseConfiguration:
databaseType: <%= persister_config['database_type'] %>
databaseType: <%= @persister_config['database_type'] %>
influxDbConfiguration:
name: mon
version: V9
maxHttpConnections: 100
gzip: <%= gzip_setting %>
replicationFactor: <%= replication_factor %>
retentionPolicy: <%= retention_policy %>
url: <%= persister_config['database_url'] %>
user: <%= pers_db_user %>
password: <%= api_db_password %>
gzip: <%= @gzip_setting %>
replicationFactor: <%= @replication_factor %>
retentionPolicy: <%= @retention_policy %>
url: <%= @persister_config['database_url'] %>
user: <%= @pers_db_user %>
password: <%= @api_db_password %>
dataSourceFactory:
driverClass: com.vertica.jdbc.Driver
url: <%= persister_config['database_url'] %>
url: <%= @persister_config['database_url'] %>
user: dbadmin
password: <%= db_admin_password %>
password: <%= @db_admin_password %>
properties:
ssl: false
maxWaitForConnection: 5s
@@ -82,15 +82,15 @@ logging:
- type: file
threshold: INFO
archive: true
currentLogFilename: /var/log/monasca/<%= persister_service_name %>.log
archivedLogFilenamePattern: /var/log/monasca/<%= persister_service_name %>.log-%d.log.gz
currentLogFilename: /var/log/monasca/<%= @persister_service_name %>.log
archivedLogFilenamePattern: /var/log/monasca/<%= @persister_service_name %>.log-%d.log.gz
archivedFileCount: 5
timeZone: UTC
server:
applicationConnectors:
- type: http
port: <%= persister_config['application_port'] %>
port: <%= @persister_config['application_port'] %>
adminConnectors:
- type: http
port: <%= persister_config['admin_port'] %>
port: <%= @persister_config['admin_port'] %>

View File

@@ -1,6 +1,6 @@
# Startup script for the <%= persister_service_name %>
# Startup script for the <%= @persister_service_name %>
description "<%= persister_service_name %> java app"
description "<%= @persister_service_name %> java app"
start on runlevel [2345]
console log
@@ -8,4 +8,4 @@ respawn
setgid monasca
setuid persister
exec /usr/bin/java -Dfile.encoding=UTF-8 -Xmx8g -cp /opt/monasca/monasca-persister.jar:/opt/vertica/java/lib/vertica_jdbc.jar monasca.persister.PersisterApplication server /etc/monasca/<%= persister_service_name %>.yml
exec /usr/bin/java -Dfile.encoding=UTF-8 -Xmx8g -cp /opt/monasca/monasca-persister.jar:/opt/vertica/java/lib/vertica_jdbc.jar monasca.persister.PersisterApplication server /etc/monasca/<%= @persister_service_name %>.yml

View File

@@ -1,13 +1,13 @@
#!/bin/bash
#
# /etc/init.d/storm-<%= storm_service %>
# /etc/init.d/storm-<%= @storm_service %>
#
# Startup script for storm-<%= storm_service %>
# Startup script for storm-<%= @storm_service %>
#
# description: Starts and stops storm-<%= storm_service %>
# description: Starts and stops storm-<%= @storm_service %>
#
stormBin=<%= storm_install_dir %>/bin/storm
stormSvc=<%= storm_service %>
stormBin=<%= @storm_install_dir %>/bin/storm
stormSvc=<%= @storm_service %>
desc="Storm $stormSvc daemon"
outFile="/var/log/storm/storm-$stormSvc.out"
@@ -18,7 +18,7 @@ fi
start() {
echo "Starting $desc (storm-$stormSvc): "
su <%= storm_user %> -c "nohup $stormBin <%= storm_service %> >>$outFile 2>&1 &"
su <%= @storm_user %> -c "nohup $stormBin <%= @storm_service %> >>$outFile 2>&1 &"
RETVAL=$?
sleep 2
return $RETVAL

View File

@@ -6,7 +6,7 @@ metricSpoutConfig:
topic: metrics
numThreads: 1
groupId: thresh-metric
zookeeperConnect: <%= zookeeper_servers %>
zookeeperConnect: <%= @zookeeper_servers %>
consumerId: 1
socketTimeoutMs: 30000
socketReceiveBufferBytes : 65536
@@ -31,7 +31,7 @@ eventSpoutConfig:
topic: events
numThreads: 1
groupId: thresh-event
zookeeperConnect: <%= zookeeper_servers %>
zookeeperConnect: <%= @zookeeper_servers %>
consumerId: 1
socketTimeoutMs: 30000
socketReceiveBufferBytes : 65536
@@ -53,7 +53,7 @@ eventSpoutConfig:
kafkaProducerConfig:
topic: alarm-state-transitions
metadataBrokerList: <%= kafka_brokers %>
metadataBrokerList: <%= @kafka_brokers %>
serializerClass: kafka.serializer.StringEncoder
partitionerClass:
requestRequiredAcks: 1
@@ -77,9 +77,9 @@ sporadicMetricNamespaces:
database:
driverClass: com.mysql.jdbc.Driver
url: jdbc:mysql://<%= sql_host %>:3306/mon?connectTimeout=5000&autoReconnect=true
user: <%= sql_user %>
password: <%= sql_password %>
url: jdbc:mysql://<%= @sql_host %>:3306/mon?connectTimeout=5000&autoReconnect=true
user: <%= @sql_user %>
password: <%= @sql_password %>
properties:
ssl: false
maxWaitForConnection: 1s