Derive Mako scenario templates from the current YAMLs

Convert the existing YAML scenarios, which as really templates in disguise, into
real Mako templates.

Update the documentation as well.

Partially Implements: bp scenario-test-config-template
Change-Id: I20cc1f9fa2ae5a41a02f4a46c5ae665e9a7af91a
This commit is contained in:
Luigi Toscano 2015-07-14 11:13:04 +02:00
parent 0227ae833d
commit 29724b6648
12 changed files with 564 additions and 7 deletions

View File

@ -9,18 +9,19 @@ tests.
Details
-------
Key values:
Key values (mako variables):
* %OS_USERNAME%, %OS_PASSWORD%, %OS_TENANT_NAME%, %OPENSTACK_HOST% - OpenStack credentials;
* %NETWORK% - network type (neutron or nova-network);
* %cluster_name% - name of cluster, which generating from $HOST-$ZUUL_CHANGE-$CLUSTER_HASH. Where:
* ${OS_USERNAME}, ${OS_PASSWORD}, ${OS_TENANT_NAME}, ${OS_AUTH_URL} - OpenStack credentials and access details
* ${network_type} - network type (neutron or nova-network);
* ${network_private_name}, ${network_public_name} - names of private (tenant) and public networks;
* ${cluster_name} - name of cluster, which generating from $HOST-$ZUUL_CHANGE-$CLUSTER_HASH. Where:
* $HOST - host id (c1 - with neutron, c2 - with nova-network);
* $ZUUL_CHANGE - change number;
* $CLUSTER_HASH - hash, which generating for each cluster by using "uuid" python module;
* %{plugin}_image% - name of image for each plugin;
* ${<plugin>_image} - name of image for each plugin;
* flavor ids:
* %ci_flavor% - 2GB RAM, 1 VCPU, 40GB Root disk;
* %medium_flavor% - 4GB RAM, 2 VCPUs, 40GB Root disk;
* ${ci_flavor} - 2GB RAM, 1 VCPU, 40GB Root disk;
* ${medium_flavor} - 4GB RAM, 2 VCPUs, 40GB Root disk;
Main URLs
---------

View File

@ -0,0 +1,71 @@
clusters:
- plugin_name: cdh
plugin_version: 5.3.0
image: ${cdh_image}
node_group_templates:
- name: worker-dn
flavor_id: ${ci_flavor_id}
node_processes:
- HDFS_DATANODE
volumes_per_node: 2
volumes_size: 2
auto_security_group: true
node_configs:
&ng_configs
DATANODE:
dfs_datanode_du_reserved: 0
- name: worker-nm
flavor_id: ${ci_flavor_id}
node_processes:
- YARN_NODEMANAGER
auto_security_group: true
- name: worker-nm-dn
flavor_id: ${ci_flavor_id}
node_processes:
- YARN_NODEMANAGER
- HDFS_DATANODE
volumes_per_node: 2
volumes_size: 2
auto_security_group: true
node_configs:
*ng_configs
- name: manager
flavor_id: ${medium_flavor_id}
node_processes:
- CLOUDERA_MANAGER
auto_security_group: true
- name: master-core
flavor_id: ${medium_flavor_id}
node_processes:
- HDFS_NAMENODE
- YARN_RESOURCEMANAGER
- SENTRY_SERVER
- ZOOKEEPER_SERVER
auto_security_group: true
- name: master-additional
flavor_id: ${medium_flavor_id}
node_processes:
- OOZIE_SERVER
- YARN_JOBHISTORY
- HDFS_SECONDARYNAMENODE
- HIVE_METASTORE
- HIVE_SERVER2
auto_security_group: true
cluster_template:
name: cdh530
node_group_templates:
manager: 1
master-core: 1
master-additional: 1
worker-nm-dn: 1
worker-nm: 1
worker-dn: 1
cluster_configs:
HDFS:
dfs_replication: 1
cluster:
name: ${cluster_name}
scenario:
- run_jobs
- sentry
edp_jobs_flow: hadoop_2

View File

@ -0,0 +1,69 @@
clusters:
- plugin_name: cdh
plugin_version: 5.4.0
image: ${cdh_5_4_0_image}
node_group_templates:
- name: worker-dn
flavor_id: ${ci_flavor_id}
node_processes:
- HDFS_DATANODE
volumes_per_node: 2
volumes_size: 2
auto_security_group: true
node_configs:
&ng_configs
DATANODE:
dfs_datanode_du_reserved: 0
- name: worker-nm
flavor_id: ${ci_flavor_id}
node_processes:
- YARN_NODEMANAGER
auto_security_group: true
- name: worker-nm-dn
flavor_id: ${ci_flavor_id}
node_processes:
- YARN_NODEMANAGER
- HDFS_DATANODE
volumes_per_node: 2
volumes_size: 2
auto_security_group: true
node_configs:
*ng_configs
- name: manager
flavor_id: ${large_flavor_id}
node_processes:
- CLOUDERA_MANAGER
- KMS
auto_security_group: true
- name: master-core
flavor_id: ${medium_flavor_id}
node_processes:
- HDFS_NAMENODE
- YARN_RESOURCEMANAGER
auto_security_group: true
- name: master-additional
flavor_id: ${medium_flavor_id}
node_processes:
- OOZIE_SERVER
- YARN_JOBHISTORY
- HDFS_SECONDARYNAMENODE
- HIVE_METASTORE
- HIVE_SERVER2
auto_security_group: true
cluster_template:
name: cdh540
node_group_templates:
manager: 1
master-core: 1
master-additional: 1
worker-nm-dn: 1
worker-nm: 1
worker-dn: 1
cluster_configs:
HDFS:
dfs_replication: 1
cluster:
name: ${cluster_name}
scenario:
- run_jobs
edp_jobs_flow: hadoop_2

View File

@ -0,0 +1,10 @@
credentials:
os_username: ${OS_USERNAME}
os_password: ${OS_PASSWORD}
os_tenant: ${OS_TENANT_NAME}
os_auth_url: ${OS_AUTH_URL}
network:
type: ${network_type}
private_network: ${network_private_name}
public_network: ${network_public_name}

View File

@ -0,0 +1,31 @@
clusters:
- plugin_name: fake
plugin_version: "0.1"
image: ${fake_plugin_image}
node_group_templates:
- name: worker
flavor_id: ${ci_flavor_id}
node_processes:
- datanode
- tasktracker
volumes_per_node: 2
volumes_size: 2
auto_security_group: true
- name: master
flavor_id: ${ci_flavor_id}
node_processes:
- jobtracker
- namenode
auto_security_group: true
cluster_template:
name: fake01
node_group_templates:
master: 1
worker: 1
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow: transient

View File

@ -0,0 +1,40 @@
clusters:
- plugin_name: hdp
plugin_version: 1.3.2
image: ${hdp_image}
node_group_templates:
- name: master
flavor_id: ${ci_flavor_id}
node_processes:
- JOBTRACKER
- NAMENODE
- SECONDARY_NAMENODE
- GANGLIA_SERVER
- NAGIOS_SERVER
- AMBARI_SERVER
- OOZIE_SERVER
auto_security_group: false
- name: worker
flavor_id: ${ci_flavor_id}
node_processes:
- TASKTRACKER
- DATANODE
- HDFS_CLIENT
- MAPREDUCE_CLIENT
- OOZIE_CLIENT
- PIG
volumes_per_node: 2
volumes_size: 2
auto_security_group: false
cluster_template:
name: hdp132
node_group_templates:
master: 1
worker: 3
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow: hadoop_1

View File

@ -0,0 +1,47 @@
clusters:
- plugin_name: hdp
plugin_version: 2.0.6
image: ${hdp_two_image}
node_group_templates:
- name: master
flavor_id: ${ci_flavor_id}
node_processes:
- AMBARI_SERVER
- GANGLIA_SERVER
- HISTORYSERVER
- NAGIOS_SERVER
- NAMENODE
- OOZIE_SERVER
- RESOURCEMANAGER
- SECONDARY_NAMENODE
- ZOOKEEPER_SERVER
auto_security_group: true
- name: worker
flavor_id: ${ci_flavor_id}
node_processes:
- DATANODE
- HDFS_CLIENT
- MAPREDUCE2_CLIENT
- NODEMANAGER
- OOZIE_CLIENT
- PIG
- YARN_CLIENT
- ZOOKEEPER_CLIENT
volumes_per_node: 2
volumes_size: 2
auto_security_group: true
cluster_template:
name: hdp206
node_group_templates:
master: 1
worker: 3
cluster_configs:
YARN:
yarn.log-aggregation-enable: false
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow: hadoop_2

View File

@ -0,0 +1,47 @@
clusters:
- plugin_name: mapr
plugin_version: 4.0.2.mrv2
image: ${mapr_402mrv2_image}
node_group_templates:
- name: master
flavor_id: ${ci_flavor_id}
node_processes:
- Metrics
- Webserver
- ZooKeeper
- HTTPFS
- Oozie
- FileServer
- CLDB
- Flume
- Hue
- NodeManager
- HistoryServer
- ResourceManager
- HiveServer2
- HiveMetastore
- Sqoop2-Client
- Sqoop2-Server
auto_security_group: true
volumes_per_node: 2
volumes_size: 20
- name: worker
flavor_id: ${ci_flavor_id}
node_processes:
- NodeManager
- FileServer
auto_security_group: true
volumes_per_node: 2
volumes_size: 20
cluster_template:
name: mapr402mrv2
node_group_templates:
master: 1
worker: 3
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow: mapr

View File

@ -0,0 +1,32 @@
clusters:
- plugin_name: spark
plugin_version: 1.0.0
image: ${spark_image}
node_group_templates:
- name: master
flavor_id: ${ci_flavor_id}
node_processes:
- master
- namenode
auto_security_group: true
- name: worker
flavor_id: ${ci_flavor_id}
node_processes:
- datanode
- slave
auto_security_group: true
cluster_template:
name: spark100
node_group_templates:
master: 1
worker: 1
cluster_configs:
HDFS:
dfs.replication: 1
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow: spark_edp

View File

@ -0,0 +1,50 @@
clusters:
- plugin_name: vanilla
plugin_version: 2.6.0
image: ${vanilla_two_six_image}
node_group_templates:
- name: worker
flavor_id: ${ci_flavor_id}
node_processes:
- datanode
- nodemanager
volumes_per_node: 2
volumes_size: 2
auto_security_group: true
node_configs:
&ng_configs
MapReduce:
yarn.app.mapreduce.am.resource.mb: 256
yarn.app.mapreduce.am.command-opts: -Xmx256m
YARN:
yarn.scheduler.minimum-allocation-mb: 256
yarn.scheduler.maximum-allocation-mb: 1024
yarn.nodemanager.vmem-check-enabled: false
- name: master
flavor_id: ${ci_flavor_id}
node_processes:
- oozie
- historyserver
- resourcemanager
- namenode
auto_security_group: true
cluster_template:
name: transient
node_group_templates:
master: 1
worker: 3
cluster_configs:
HDFS:
dfs.replication: 1
MapReduce:
mapreduce.tasktracker.map.tasks.maximum: 16
mapreduce.tasktracker.reduce.tasks.maximum: 16
YARN:
yarn.resourcemanager.scheduler.class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler
cluster:
name: ${cluster_name}
is_transient: true
scenario:
- run_jobs
- transient
edp_jobs_flow: transient

View File

@ -0,0 +1,73 @@
clusters:
- plugin_name: vanilla
plugin_version: 1.2.1
image: ${vanilla_image}
node_group_templates:
- name: worker-tt-dn
flavor_id: ${ci_flavor_id}
node_processes:
- datanode
- tasktracker
volumes_per_node: 2
volumes_size: 2
auto_security_group: true
- name: worker-tt
flavor_id: ${ci_flavor_id}
node_processes:
- tasktracker
auto_security_group: true
- name: worker-dn
flavor_id: ${ci_flavor_id}
node_processes:
- datanode
volumes_per_node: 2
volumes_size: 2
auto_security_group: true
- name: master-jt-nn
flavor_id: ${ci_flavor_id}
node_processes:
- namenode
- jobtracker
auto_security_group: true
- name: master-sec-nn-oz
flavor_id: ${ci_flavor_id}
node_processes:
- oozie
- secondarynamenode
auto_security_group: true
cluster_template:
name: vanilla121
node_group_templates:
master-sec-nn-oz: 1
master-jt-nn: 1
worker-tt: 1
worker-tt-dn: 2
worker-dn: 1
cluster_configs:
HDFS:
dfs.replication: 1
MapReduce:
mapred.map.tasks.speculative.execution: False
mapred.child.java.opts: -Xmx512m
general:
'Enable Swift': True
cluster:
name: ${cluster_name}
scaling:
- operation: resize
node_group: worker-tt-dn
size: 1
- operation: resize
node_group: worker-dn
size: 0
- operation: resize
node_group: worker-tt
size: 0
- operation: add
node_group: worker-tt
size: 1
- operation: add
node_group: worker-dn
size: 1
edp_jobs_flow: hadoop_1

View File

@ -0,0 +1,86 @@
clusters:
- plugin_name: vanilla
plugin_version: 2.6.0
image: ${vanilla_two_six_image}
node_group_templates:
- name: worker-dn-nm
flavor_id: ${ci_flavor_id}
node_processes:
- datanode
- nodemanager
volumes_per_node: 2
volumes_size: 2
auto_security_group: true
node_configs:
&ng_configs
MapReduce:
yarn.app.mapreduce.am.resource.mb: 256
yarn.app.mapreduce.am.command-opts: -Xmx256m
YARN:
yarn.scheduler.minimum-allocation-mb: 256
yarn.scheduler.maximum-allocation-mb: 1024
yarn.nodemanager.vmem-check-enabled: false
- name: worker-nm
flavor_id: ${ci_flavor_id}
node_processes:
- nodemanager
auto_security_group: true
node_configs:
*ng_configs
- name: worker-dn
flavor_id: ${ci_flavor_id}
node_processes:
- datanode
volumes_per_node: 2
volumes_size: 2
auto_security_group: true
node_configs:
*ng_configs
- name: master-rm-nn-hvs
flavor_id: ${ci_flavor_id}
node_processes:
- namenode
- resourcemanager
- hiveserver
auto_security_group: true
node_configs:
*ng_configs
- name: master-oo-hs-sn
flavor_id: ${ci_flavor_id}
node_processes:
- oozie
- historyserver
- secondarynamenode
auto_security_group: true
node_configs:
*ng_configs
cluster_template:
name: vanilla260
node_group_templates:
master-rm-nn-hvs: 1
master-oo-hs-sn: 1
worker-dn-nm: 2
worker-dn: 1
worker-nm: 1
cluster_configs:
HDFS:
dfs.replication: 1
cluster:
name: ${cluster_name}
scaling:
- operation: resize
node_group: worker-dn-nm
size: 1
- operation: resize
node_group: worker-dn
size: 0
- operation: resize
node_group: worker-nm
size: 0
- operation: add
node_group: worker-dn
size: 1
- operation: add
node_group: worker-nm
size: 1
edp_jobs_flow: hadoop_2