Merge "Scenario tests: add the test templates for Stein"

This commit is contained in:
Zuul 2019-06-27 18:22:10 +00:00 committed by Gerrit Code Review
commit 3eb57b4d4b
21 changed files with 591 additions and 16 deletions

View File

@ -0,0 +1,11 @@
---
prelude: >
Stein test templates are now available, while Ocata
test templates have been removed.
features:
- A folder with scenario templates for Stein has been added.
It is a subset of the templates in the main directory.
deprecations:
- The Ocata-specific job templates have been removed.
This means that starting from this release Ocata is
not supported (it is under Extended Maintenance now).

View File

@ -62,7 +62,7 @@ clusters:
- HIVE_SERVER2
- SPARK_YARN_HISTORY_SERVER
auto_security_group: ${use_auto_security_group}
# In 5.11 the defaults of following configs are too large,
# In >=5.9 the defaults of following configs are too large,
# restrict them to save memory for scenario testing.
node_configs:
HIVEMETASTORE:

View File

@ -62,7 +62,7 @@ clusters:
- HIVE_SERVER2
- SPARK_YARN_HISTORY_SERVER
auto_security_group: ${use_auto_security_group}
# In 5.11 the defaults of following configs are too large,
# In >=5.9 the defaults of following configs are too large,
# restrict them to save memory for scenario testing.
node_configs:
HIVEMETASTORE:

View File

@ -62,7 +62,7 @@ clusters:
- HIVE_SERVER2
- SPARK_YARN_HISTORY_SERVER
auto_security_group: ${use_auto_security_group}
# In 5.11 the defaults of following configs are too large,
# In >=5.9 the defaults of following configs are too large,
# restrict them to save memory for scenario testing.
node_configs:
HIVEMETASTORE:

View File

@ -62,7 +62,7 @@ clusters:
- HIVE_SERVER2
- SPARK_YARN_HISTORY_SERVER
auto_security_group: ${use_auto_security_group}
# In 5.11 the defaults of following configs are too large,
# In >=5.9 the defaults of following configs are too large,
# restrict them to save memory for scenario testing.
node_configs:
HIVEMETASTORE:

View File

@ -0,0 +1,69 @@
<%page args="use_auto_security_group='true', ci_flavor_id='m1.small', medium_flavor_id='m1.medium', availability_zone='nova', volumes_availability_zone='nova'"/>
clusters:
- plugin_name: ambari
plugin_version: '2.3'
image: ${ambari_23_image}
node_group_templates:
- name: master
flavor: ${medium_flavor_id}
node_processes:
- Ambari
- MapReduce History Server
- Spark History Server
- NameNode
- ResourceManager
- SecondaryNameNode
- YARN Timeline Server
- ZooKeeper
- Kafka Broker
auto_security_group: ${use_auto_security_group}
- name: master-edp
flavor: ${ci_flavor_id}
node_processes:
- Hive Metastore
- HiveServer
- Oozie
auto_security_group: ${use_auto_security_group}
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- DataNode
- NodeManager
volumes_per_node: 2
volumes_size: 2
availability_zone: ${availability_zone}
volumes_availability_zone: ${volumes_availability_zone}
auto_security_group: ${use_auto_security_group}
cluster_template:
name: ambari21
node_group_templates:
master: 1
master-edp: 1
worker: 3
cluster_configs:
HDFS:
dfs.datanode.du.reserved: 0
custom_checks:
check_kafka:
zookeeper_process: ZooKeeper
kafka_process: Kafka Broker
spark_flow:
- type: Spark
main_lib:
type: database
source: edp-examples/edp-spark/spark-kafka-example.jar
args:
- '{zookeeper_list}'
- '{topic}'
- '{timeout}'
timeout: 30
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow:
- java_job
- spark_pi

View File

@ -3,7 +3,7 @@
clusters:
- plugin_name: ambari
plugin_version: '2.4'
image: ${ambari_22_image}
image: ${ambari_24_image}
node_group_templates:
- name: master
flavor: ${medium_flavor_id}
@ -66,4 +66,7 @@ clusters:
size: 1
edp_jobs_flow:
- java_job
- name: mapreduce_job_s3
features:
- s3
- spark_pi

View File

@ -0,0 +1,72 @@
<%page args="use_auto_security_group='true', ci_flavor_id='m1.small', medium_flavor_id='m1.medium', availability_zone='nova', volumes_availability_zone='nova'"/>
clusters:
- plugin_name: ambari
plugin_version: '2.5'
image: ${ambari_25_image}
node_group_templates:
- name: master
flavor: ${medium_flavor_id}
node_processes:
- Ambari
- MapReduce History Server
- Spark History Server
- NameNode
- ResourceManager
- SecondaryNameNode
- YARN Timeline Server
- ZooKeeper
- Kafka Broker
auto_security_group: ${use_auto_security_group}
- name: master-edp
flavor: ${ci_flavor_id}
node_processes:
- Hive Metastore
- HiveServer
- Oozie
auto_security_group: ${use_auto_security_group}
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- DataNode
- NodeManager
volumes_per_node: 2
volumes_size: 2
availability_zone: ${availability_zone}
volumes_availability_zone: ${volumes_availability_zone}
auto_security_group: ${use_auto_security_group}
cluster_template:
name: ambari21
node_group_templates:
master: 1
master-edp: 1
worker: 3
cluster_configs:
HDFS:
dfs.datanode.du.reserved: 0
custom_checks:
check_kafka:
zookeeper_process: ZooKeeper
kafka_process: Kafka Broker
spark_flow:
- type: Spark
main_lib:
type: database
source: edp-examples/edp-spark/spark-kafka-example.jar
args:
- '{zookeeper_list}'
- '{topic}'
- '{timeout}'
timeout: 30
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow:
- java_job
- name: mapreduce_job_s3
features:
- s3
- spark_pi

View File

@ -0,0 +1,72 @@
<%page args="use_auto_security_group='true', ci_flavor_id='m1.small', medium_flavor_id='m1.medium', availability_zone='nova', volumes_availability_zone='nova'"/>
clusters:
- plugin_name: ambari
plugin_version: '2.6'
image: ${ambari_26_image}
node_group_templates:
- name: master
flavor: ${medium_flavor_id}
node_processes:
- Ambari
- MapReduce History Server
- Spark History Server
- NameNode
- ResourceManager
- SecondaryNameNode
- YARN Timeline Server
- ZooKeeper
- Kafka Broker
auto_security_group: ${use_auto_security_group}
- name: master-edp
flavor: ${ci_flavor_id}
node_processes:
- Hive Metastore
- HiveServer
- Oozie
auto_security_group: ${use_auto_security_group}
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- DataNode
- NodeManager
volumes_per_node: 2
volumes_size: 2
availability_zone: ${availability_zone}
volumes_availability_zone: ${volumes_availability_zone}
auto_security_group: ${use_auto_security_group}
cluster_template:
name: ambari21
node_group_templates:
master: 1
master-edp: 1
worker: 3
cluster_configs:
HDFS:
dfs.datanode.du.reserved: 0
custom_checks:
check_kafka:
zookeeper_process: ZooKeeper
kafka_process: Kafka Broker
spark_flow:
- type: Spark
main_lib:
type: database
source: edp-examples/edp-spark/spark-kafka-example.jar
args:
- '{zookeeper_list}'
- '{topic}'
- '{timeout}'
timeout: 30
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow:
- java_job
- name: mapreduce_job_s3
features:
- s3
- spark_pi

View File

@ -2,8 +2,8 @@
clusters:
- plugin_name: cdh
plugin_version: 5.7.0
image: ${cdh_570_image}
plugin_version: 5.11.0
image: ${cdh_5110_image}
node_group_templates:
- name: worker-dn
flavor: ${ci_flavor_id}
@ -62,7 +62,7 @@ clusters:
- HIVE_SERVER2
- SPARK_YARN_HISTORY_SERVER
auto_security_group: ${use_auto_security_group}
# In 5.7 the defaults of following configs are too large,
# In >=5.9 the defaults of following configs are too large,
# restrict them to save memory for scenario testing.
node_configs:
HIVEMETASTORE:
@ -70,7 +70,7 @@ clusters:
HIVESERVER:
hiveserver2_java_heapsize: 2147483648
cluster_template:
name: cdh570
name: cdh5110
node_group_templates:
manager: 1
master-core: 1
@ -89,6 +89,9 @@ clusters:
edp_jobs_flow:
- pig_job
- mapreduce_job
- name: mapreduce_job_s3
features:
- s3
- mapreduce_streaming_job
- java_job
- spark_wordcount

View File

@ -0,0 +1,97 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', large_flavor_id='m1.large', availability_zone='nova', volumes_availability_zone='nova'"/>
clusters:
- plugin_name: cdh
plugin_version: 5.13.0
image: ${cdh_5130_image}
node_group_templates:
- name: worker-dn
flavor: ${ci_flavor_id}
node_processes:
- HDFS_DATANODE
volumes_per_node: 2
volumes_size: 2
availability_zone: ${availability_zone}
volumes_availability_zone: ${volumes_availability_zone}
auto_security_group: ${use_auto_security_group}
node_configs:
&ng_configs
DATANODE:
dfs_datanode_du_reserved: 0
- name: worker-nm
flavor: ${ci_flavor_id}
node_processes:
- YARN_NODEMANAGER
auto_security_group: ${use_auto_security_group}
- name: worker-nm-dn
flavor: ${ci_flavor_id}
node_processes:
- YARN_NODEMANAGER
- HDFS_DATANODE
volumes_per_node: 2
volumes_size: 2
availability_zone: ${availability_zone}
volumes_availability_zone: ${volumes_availability_zone}
auto_security_group: ${use_auto_security_group}
node_configs:
*ng_configs
- name: manager
flavor: ${large_flavor_id}
node_processes:
- CLOUDERA_MANAGER
- KMS
is_proxy_gateway: ${is_proxy_gateway}
auto_security_group: ${use_auto_security_group}
- name: master-core
flavor: ${large_flavor_id}
node_processes:
- HDFS_NAMENODE
- YARN_RESOURCEMANAGER
- SENTRY_SERVER
- YARN_NODEMANAGER
- ZOOKEEPER_SERVER
auto_security_group: ${use_auto_security_group}
- name: master-additional
flavor: ${large_flavor_id}
node_processes:
- OOZIE_SERVER
- YARN_JOBHISTORY
- YARN_NODEMANAGER
- HDFS_SECONDARYNAMENODE
- HIVE_METASTORE
- HIVE_SERVER2
- SPARK_YARN_HISTORY_SERVER
auto_security_group: ${use_auto_security_group}
# In >=5.9 the defaults of following configs are too large,
# restrict them to save memory for scenario testing.
node_configs:
HIVEMETASTORE:
hive_metastore_java_heapsize: 2147483648
HIVESERVER:
hiveserver2_java_heapsize: 2147483648
cluster_template:
name: cdh5130
node_group_templates:
manager: 1
master-core: 1
master-additional: 1
worker-nm-dn: 1
worker-nm: 1
worker-dn: 1
cluster_configs:
HDFS:
dfs_replication: 1
cluster:
name: ${cluster_name}
scenario:
- run_jobs
- sentry
edp_jobs_flow:
- pig_job
- mapreduce_job
- name: mapreduce_job_s3
features:
- s3
- mapreduce_streaming_job
- java_job
- spark_wordcount

View File

@ -89,6 +89,9 @@ clusters:
edp_jobs_flow:
- pig_job
- mapreduce_job
- name: mapreduce_job_s3
features:
- s3
- mapreduce_streaming_job
- java_job
- spark_wordcount

View File

@ -54,3 +54,6 @@ clusters:
size: 1
edp_jobs_flow:
- mapr
- name: mapreduce_job_s3
features:
- s3

View File

@ -2,8 +2,8 @@
clusters:
- plugin_name: spark
plugin_version: 1.6.0
image: ${spark_160_image}
plugin_version: '2.2'
image: ${spark_22_image}
node_group_templates:
- name: master
flavor: ${ci_flavor_id}
@ -19,7 +19,7 @@ clusters:
- slave
auto_security_group: ${use_auto_security_group}
cluster_template:
name: spark160
name: spark220
node_group_templates:
master: 1
worker: 1

View File

@ -0,0 +1,37 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small'"/>
clusters:
- plugin_name: spark
plugin_version: '2.3'
image: ${spark_22_image}
node_group_templates:
- name: master
flavor: ${ci_flavor_id}
node_processes:
- master
- namenode
auto_security_group: ${use_auto_security_group}
is_proxy_gateway: ${is_proxy_gateway}
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- datanode
- slave
auto_security_group: ${use_auto_security_group}
cluster_template:
name: spark220
node_group_templates:
master: 1
worker: 1
cluster_configs:
HDFS:
dfs.replication: 1
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow:
- spark_pi
- spark_wordcount

View File

@ -2,8 +2,8 @@
clusters:
- plugin_name: storm
plugin_version: 1.0.1
image: ${storm_101_image}
plugin_version: 1.1.0
image: ${storm_110_image}
node_group_templates:
- name: master
flavor: ${ci_flavor_id}
@ -22,7 +22,7 @@ clusters:
- zookeeper
auto_security_group: ${use_auto_security_group}
cluster_template:
name: storm101
name: storm110
node_group_templates:
master: 1
worker: 1

View File

@ -0,0 +1,37 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', medium_flavor_id='m1.medium'"/>
clusters:
- plugin_name: storm
plugin_version: '1.2'
image: ${storm_120_image}
node_group_templates:
- name: master
flavor: ${ci_flavor_id}
node_processes:
- nimbus
auto_security_group: ${use_auto_security_group}
is_proxy_gateway: ${is_proxy_gateway}
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- supervisor
auto_security_group: ${use_auto_security_group}
- name: zookeeper
flavor: ${medium_flavor_id}
node_processes:
- zookeeper
auto_security_group: ${use_auto_security_group}
cluster_template:
name: storm120
node_group_templates:
master: 1
worker: 1
zookeeper: 1
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
scenario:
- scale

View File

@ -0,0 +1,84 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', cluster_name='vanilla-275', availability_zone='nova', volumes_availability_zone='nova'"/>
clusters:
- plugin_name: vanilla
plugin_version: 2.7.5
image: ${vanilla_275_image}
node_group_templates:
- name: worker-dn-nm
flavor: ${ci_flavor_id}
node_processes:
- datanode
- nodemanager
volumes_per_node: 2
volumes_size: 2
availability_zone: ${availability_zone}
volumes_availability_zone: ${volumes_availability_zone}
auto_security_group: ${use_auto_security_group}
- name: worker-nm
flavor: ${ci_flavor_id}
node_processes:
- nodemanager
auto_security_group: ${use_auto_security_group}
- name: worker-dn
flavor: ${ci_flavor_id}
node_processes:
- datanode
volumes_per_node: 2
volumes_size: 2
availability_zone: ${availability_zone}
volumes_availability_zone: ${volumes_availability_zone}
auto_security_group: ${use_auto_security_group}
- name: master-rm-nn-hvs-sp
flavor: ${ci_flavor_id}
node_processes:
- namenode
- resourcemanager
- hiveserver
- nodemanager
- spark history server
auto_security_group: ${use_auto_security_group}
- name: master-oo-hs-sn
flavor: ${ci_flavor_id}
node_processes:
- oozie
- historyserver
- secondarynamenode
- nodemanager
auto_security_group: ${use_auto_security_group}
is_proxy_gateway: ${is_proxy_gateway}
cluster_template:
node_group_templates:
master-rm-nn-hvs-sp: 1
master-oo-hs-sn: 1
worker-dn-nm: 2
worker-dn: 1
worker-nm: 1
cluster_configs:
HDFS:
dfs.replication: 1
cluster:
name: ${cluster_name}
scaling:
- operation: resize
node_group: worker-dn-nm
size: 1
- operation: resize
node_group: worker-dn
size: 0
- operation: resize
node_group: worker-nm
size: 0
- operation: add
node_group: worker-dn
size: 1
- operation: add
node_group: worker-nm
size: 2
edp_jobs_flow:
- pig_job
- mapreduce_job
- mapreduce_streaming_job
- java_job
- hive_job
- spark_wordcount

View File

@ -0,0 +1,84 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', cluster_name='vanilla-282', availability_zone='nova', volumes_availability_zone='nova'"/>
clusters:
- plugin_name: vanilla
plugin_version: 2.8.2
image: ${vanilla_282_image}
node_group_templates:
- name: worker-dn-nm
flavor: ${ci_flavor_id}
node_processes:
- datanode
- nodemanager
volumes_per_node: 2
volumes_size: 2
availability_zone: ${availability_zone}
volumes_availability_zone: ${volumes_availability_zone}
auto_security_group: ${use_auto_security_group}
- name: worker-nm
flavor: ${ci_flavor_id}
node_processes:
- nodemanager
auto_security_group: ${use_auto_security_group}
- name: worker-dn
flavor: ${ci_flavor_id}
node_processes:
- datanode
volumes_per_node: 2
volumes_size: 2
availability_zone: ${availability_zone}
volumes_availability_zone: ${volumes_availability_zone}
auto_security_group: ${use_auto_security_group}
- name: master-rm-nn-hvs-sp
flavor: ${ci_flavor_id}
node_processes:
- namenode
- resourcemanager
- hiveserver
- nodemanager
- spark history server
auto_security_group: ${use_auto_security_group}
- name: master-oo-hs-sn
flavor: ${ci_flavor_id}
node_processes:
- oozie
- historyserver
- secondarynamenode
- nodemanager
auto_security_group: ${use_auto_security_group}
is_proxy_gateway: ${is_proxy_gateway}
cluster_template:
node_group_templates:
master-rm-nn-hvs-sp: 1
master-oo-hs-sn: 1
worker-dn-nm: 2
worker-dn: 1
worker-nm: 1
cluster_configs:
HDFS:
dfs.replication: 1
cluster:
name: ${cluster_name}
scaling:
- operation: resize
node_group: worker-dn-nm
size: 1
- operation: resize
node_group: worker-dn
size: 0
- operation: resize
node_group: worker-nm
size: 0
- operation: add
node_group: worker-dn
size: 1
- operation: add
node_group: worker-nm
size: 2
edp_jobs_flow:
- pig_job
- mapreduce_job
- mapreduce_streaming_job
- java_job
- hive_job
- spark_wordcount

View File

@ -22,7 +22,7 @@ clusters:
- zookeeper
auto_security_group: ${use_auto_security_group}
cluster_template:
name: storm110
name: storm120
node_group_templates:
master: 1
worker: 1