Drop HDP 1.3.2 plugin

Drop HDP 1.3.2 with all related to that plugin unit tests,
scenario tests.

Implements blueprint: drop-hadoop-1

Change-Id: I25c5fc046e78d347c8bfe059129f4dcd14511c99
This commit is contained in:
Vitaly Gridnev 2015-09-03 17:15:38 +03:00
parent 6b5f0d0b1b
commit 27e428f9bf
24 changed files with 72 additions and 7704 deletions

View File

@ -33,9 +33,6 @@ include sahara/plugins/vanilla/hadoop2/resources/*.sql
include sahara/plugins/vanilla/hadoop2/resources/*.template
include sahara/plugins/vanilla/v2_6_0/resources/*.xml
include sahara/plugins/vanilla/v2_7_1/resources/*.xml
include sahara/plugins/hdp/versions/version_1_3_2/resources/*.template
include sahara/plugins/hdp/versions/version_1_3_2/resources/*.json
include sahara/plugins/hdp/versions/version_1_3_2/resources/*.sh
include sahara/plugins/hdp/versions/version_2_0_6/resources/*.template
include sahara/plugins/hdp/versions/version_2_0_6/resources/*.json
include sahara/plugins/hdp/versions/version_2_0_6/resources/*.sh

View File

@ -61,29 +61,11 @@ to be downloaded from the package repository.
For more information about HDP images, refer to
https://github.com/openstack/sahara-image-elements.
There are three VM images provided for use with the HDP Plugin, that can also
be built using the tools available in sahara-image-elements:
1. `sahara-juno-hdp-1.3.2-centos-6.5.qcow2 <http://sahara-files.mirantis.com/sahara-juno-hdp-1.3.2-centos-6.5.qcow2>`_:
This image contains most of the requisite packages necessary for HDP
deployment. The packages contained herein correspond to the HDP 1.3 release.
The operating system is a minimal CentOS 6.5 cloud-init enabled install.
This image can only be used to provision HDP 1.3 hadoop clusters.
2. `sahara-juno-hdp-2.0.6-centos-6.5.qcow2 <http://sahara-files.mirantis.com/sahara-juno-hdp-2.0.6-centos-6.5.qcow2>`_:
This image contains most of the requisite packages necessary for HDP
deployment. The packages contained herein correspond to the HDP 2.0.6
release. The operating system is a minimal CentOS 6.5 cloud-init enabled
install. This image can only be used to provision HDP 2.0.6 hadoop clusters.
3. `sahara-juno-hdp-plain-centos-6.5.qcow2 <http://sahara-files.mirantis.com/sahara-juno-hdp-plain-centos-6.5.qcow2>`_:
This image provides only a minimal install of CentOS 6.5 and is cloud-init
enabled. This image can be used to provision any versions of HDP supported
by Sahara.
You could download well tested and up-to-date prepared images from
http://sahara-files.mirantis.com/images/upstream/kilo/
HDP plugin requires an image to be tagged in Sahara Image Registry with two
tags: 'hdp' and '<hdp version>' (e.g. '1.3.2').
tags: 'hdp' and '<hdp version>' (e.g. '2.0.6').
Also in the Image Registry you will need to specify username for an image.
The username specified should be 'cloud-user'.
@ -130,8 +112,7 @@ The HDP plugin currently has the following limitations:
HDP Version Support
-------------------
The HDP plugin currently supports HDP 1.3.2 and HDP 2.0.6. Support for future
version of HDP will be provided shortly after software is generally available.
The HDP plugin currently supports HDP 2.0.6.
Cluster Validation
------------------
@ -145,5 +126,5 @@ validation checks to ensure a successful Hadoop deployment:
* Ensure that each defined node group had an associated Ambari Agent configured
The HDP Plugin and Sahara Support
----------------------------------
---------------------------------
For more information, please contact Hortonworks.

View File

@ -45,47 +45,6 @@ edp_jobs_flow:
args:
- 10
- 10
hadoop_1:
- type: Pig
input_datasource:
type: swift
source: etc/edp-examples/edp-pig/trim-spaces/data/input
output_datasource:
type: hdfs
destination: /user/hadoop/edp-output
main_lib:
type: swift
source: etc/edp-examples/edp-pig/trim-spaces/example.pig
additional_libs:
- type: swift
source: etc/edp-examples/edp-pig/trim-spaces/udf.jar
configs:
dfs.replication: 1
- type: MapReduce
input_datasource:
type: swift
source: etc/edp-examples/edp-pig/trim-spaces/data/input
output_datasource:
type: hdfs
destination: /user/hadoop/edp-output
additional_libs:
- type: database
source: etc/edp-examples/edp-mapreduce/edp-mapreduce.jar
configs:
dfs.replication: 1
mapred.mapper.class: org.apache.oozie.example.SampleMapper
mapred.reducer.class: org.apache.oozie.example.SampleReducer
- type: MapReduce.Streaming
input_datasource:
type: swift
source: etc/edp-examples/edp-pig/trim-spaces/data/input
output_datasource:
type: hdfs
destination: /user/hadoop/edp-output
configs:
dfs.replication: 1
edp.streaming.mapper: /bin/cat
edp.streaming.reducer: /usr/bin/wc
spark_edp:
- type: Spark
main_lib:

View File

@ -1,40 +0,0 @@
clusters:
- plugin_name: hdp
plugin_version: 1.3.2
image: ${hdp_image}
node_group_templates:
- name: master
flavor: ${ci_flavor_id}
node_processes:
- JOBTRACKER
- NAMENODE
- SECONDARY_NAMENODE
- GANGLIA_SERVER
- NAGIOS_SERVER
- AMBARI_SERVER
- OOZIE_SERVER
auto_security_group: false
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- TASKTRACKER
- DATANODE
- HDFS_CLIENT
- MAPREDUCE_CLIENT
- OOZIE_CLIENT
- PIG
volumes_per_node: 2
volumes_size: 2
auto_security_group: false
cluster_template:
name: hdp132
node_group_templates:
master: 1
worker: 3
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow: hadoop_1

View File

@ -1,24 +0,0 @@
{
"plugin_name": "hdp",
"hadoop_version": "1.3.2",
"node_groups": [
{
"name": "worker",
"count": 3,
"node_group_template_id": "{hdp-132-default-worker}"
},
{
"name": "secondary-master",
"count": 1,
"node_group_template_id": "{hdp-132-default-secondary-master}"
},
{
"name": "master",
"count": 1,
"node_group_template_id": "{hdp-132-default-master}"
}
],
"name": "hdp-132-default-cluster",
"neutron_management_network": "{neutron_management_network}",
"cluster_configs": {}
}

View File

@ -1,12 +0,0 @@
{
"plugin_name": "hdp",
"hadoop_version": "1.3.2",
"node_processes": [
"JOBTRACKER",
"NAMENODE",
"AMBARI_SERVER"
],
"name": "hdp-132-default-master",
"floating_ip_pool": "{floating_ip_pool}",
"flavor_id": "{flavor_id}"
}

View File

@ -1,12 +0,0 @@
{
"plugin_name": "hdp",
"hadoop_version": "1.3.2",
"node_processes": [
"SECONDARY_NAMENODE",
"OOZIE_SERVER",
"OOZIE_CLIENT"
],
"name": "hdp-132-default-secondary-master",
"floating_ip_pool": "{floating_ip_pool}",
"flavor_id": "{flavor_id}"
}

View File

@ -1,11 +0,0 @@
{
"plugin_name": "hdp",
"hadoop_version": "1.3.2",
"node_processes": [
"TASKTRACKER",
"DATANODE"
],
"name": "hdp-132-default-worker",
"floating_ip_pool": "{floating_ip_pool}",
"flavor_id": "{flavor_id}"
}

View File

@ -52,7 +52,7 @@ def validate_number_of_datanodes(cluster, scaled_groups, default_configs):
class ClusterSpec(object):
def __init__(self, config, version='1.3.2'):
def __init__(self, config, version='2.0.6'):
self._config_template = config
self.services = []
self.configurations = {}

View File

@ -123,7 +123,7 @@ class HadoopServer(object):
r.write_file_to(
'/etc/hadoop/conf/topology.sh',
f.get_file_text(
'plugins/hdp/versions/version_1_3_2/resources/topology.sh'))
'plugins/hdp/versions/version_2_0_6/resources/topology.sh'))
r.execute_command(
'chmod +x /etc/hadoop/conf/topology.sh', run_as_root=True
)

View File

@ -1,46 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.hdp import confighints_helper as ch_helper
from sahara.plugins.hdp import edp_engine
from sahara.service.edp import hdfs_helper
from sahara.utils import edp
class EdpOozieEngine(edp_engine.EdpOozieEngine):
def create_hdfs_dir(self, remote, dir_name):
hdfs_helper.create_dir_hadoop1(remote, dir_name, self.get_hdfs_user())
@staticmethod
def get_possible_job_config(job_type):
if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
return {'job_config': ch_helper.get_possible_hive_config_from(
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')}
if edp.compare_job_type(job_type,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING):
return {'job_config': ch_helper.get_possible_mapreduce_config_from(
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')}
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
return {'job_config': ch_helper.get_possible_pig_config_from(
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')}
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
def get_resource_manager_uri(self, cluster):
return cluster['info']['MapReduce']['JobTracker']

View File

@ -1,718 +0,0 @@
{
"services" : [
{
"name" : "HDFS",
"components" : [
{
"name" : "NAMENODE",
"type" : "MASTER",
"cardinality" : "1"
},
{
"name" : "DATANODE",
"type" : "SLAVE",
"cardinality" : "1+"
},
{
"name" : "SECONDARY_NAMENODE",
"type" : "MASTER",
"cardinality" : "1"
},
{
"name" : "HDFS_CLIENT",
"type" : "CLIENT",
"cardinality" : "1+"
}
],
"configurations" : [
]
},
{
"name" : "MAPREDUCE",
"components" : [
{
"name" : "JOBTRACKER",
"type" : "MASTER",
"cardinality" : "1"
},
{
"name" : "TASKTRACKER",
"type" : "SLAVE",
"cardinality" : "1+"
},
{
"name" : "HISTORYSERVER",
"type" : "MASTER",
"cardinality" : "1"
},
{
"name" : "MAPREDUCE_CLIENT",
"type" : "CLIENT",
"cardinality" : "1+"
}
],
"configurations" : [
]
},
{
"name" : "PIG",
"components" : [
{
"name" : "PIG",
"type" : "CLIENT",
"cardinality" : "1+"
}
],
"configurations" : [
]
},
{
"name" : "HIVE",
"components" : [
{
"name" : "HIVE_SERVER",
"type" : "MASTER",
"cardinality" : "1"
},
{
"name" : "HIVE_METASTORE",
"type" : "MASTER",
"cardinality" : "1"
},
{
"name" : "HIVE_CLIENT",
"type" : "CLIENT",
"cardinality" : "1+"
},
{
"name" : "MYSQL_SERVER",
"type" : "MASTER",
"cardinality" : "1"
}
],
"configurations" : [
]
},
{
"name" : "HCATALOG",
"components" : [
{
"name" : "HCAT",
"type" : "CLIENT",
"cardinality" : "1+"
}
],
"configurations" : [
]
},
{
"name" : "WEBHCAT",
"components" : [
{
"name" : "WEBHCAT_SERVER",
"type" : "MASTER",
"cardinality" : "1"
}
],
"configurations" : [
]
},
{
"name" : "HBASE",
"components" : [
{
"name" : "HBASE_MASTER",
"type" : "MASTER",
"cardinality" : "1"
},
{
"name" : "HBASE_REGIONSERVER",
"type" : "SLAVE",
"cardinality" : "1+"
},
{
"name" : "HBASE_CLIENT",
"type" : "CLIENT",
"cardinality" : "1+"
}
],
"configurations" : [
]
},
{
"name" : "ZOOKEEPER",
"components" : [
{
"name" : "ZOOKEEPER_SERVER",
"type" : "MASTER",
"cardinality" : "1+"
},
{
"name" : "ZOOKEEPER_CLIENT",
"type" : "CLIENT",
"cardinality" : "1+"
}
],
"configurations" : [
]
},
{
"name" : "OOZIE",
"components" : [
{
"name" : "OOZIE_SERVER",
"type" : "MASTER",
"cardinality" : "1"
},
{
"name" : "OOZIE_CLIENT",
"type" : "CLIENT",
"cardinality" : "1+"
}
],
"configurations" : [
]
},
{
"name" : "SQOOP",
"components" : [
{
"name" : "SQOOP",
"type" : "CLIENT",
"cardinality" : "1+"
}
],
"configurations" : [
]
},
{
"name" : "GANGLIA",
"components" : [
{
"name" : "GANGLIA_SERVER",
"type" : "MASTER",
"cardinality" : "1"
},
{
"name" : "GANGLIA_MONITOR",
"type" : "SLAVE",
"cardinality" : "1+"
}
],
"configurations" : [
]
},
{
"name" : "NAGIOS",
"components" : [
{
"name" : "NAGIOS_SERVER",
"type" : "MASTER",
"cardinality" : "1"
}
],
"configurations" : [
]
},
{
"name" : "AMBARI",
"components" : [
{
"name" : "AMBARI_SERVER",
"type" : "MASTER",
"cardinality" : "1"
},
{
"name" : "AMBARI_AGENT",
"type" : "SLAVE",
"cardinality" : "1+"
}
],
"configurations" : [
],
"users" : [
{
"name" : "admin",
"password" : "admin",
"groups" : [
"admin"
]
}
]
}
],
"host_role_mappings" : [
{
"name" : "MASTER",
"components" : [
{ "name" : "NAMENODE" },
{ "name" : "JOBTRACKER" },
{ "name" : "SECONDARY_NAMENODE" },
{ "name" : "GANGLIA_SERVER" },
{ "name" : "NAGIOS_SERVER" },
{ "name" : "AMBARI_SERVER" }
],
"hosts" : [
{
"cardinality" : "1",
"default_count" : 1
}
]
},
{
"name" : "SLAVE",
"components" : [
{ "name" : "DATANODE" },
{ "name" : "TASKTRACKER" },
{ "name" : "HDFS_CLIENT" },
{ "name" : "MAPREDUCE_CLIENT" }
],
"hosts" : [
{
"cardinality" : "1+",
"default_count" : 2
}
]
}
],
"configurations" : [
{
"name" : "global",
"properties" : [
{ "name" : "java64_home", "value" : "/usr/lib/jvm/java-openjdk" },
{ "name" : "dfs_name_dir", "value" : "/mnt/hadoop/hdfs/namenode" },
{ "name" : "fs_checkpoint_dir", "value" : "/hadoop/hdfs/namesecondary" },
{ "name" : "dfs_data_dir", "value" : "/hadoop/hdfs/data" },
{ "name" : "hdfs_log_dir_prefix", "value" : "/var/log/hadoop" },
{ "name" : "hadoop_pid_dir_prefix", "value" : "/var/run/hadoop" },
{ "name" : "dfs_webhdfs_enabled", "value" : false },
{ "name" : "hadoop_heapsize", "value" : "1024" },
{ "name" : "namenode_heapsize", "value" : "1024m" },
{ "name" : "namenode_opt_newsize", "value" : "200m" },
{ "name" : "namenode_opt_maxnewsize", "value" : "200m" },
{ "name" : "datanode_du_reserved", "value" : "1" },
{ "name" : "dtnode_heapsize", "value" : "1024m" },
{ "name" : "dfs_datanode_failed_volume_tolerated", "value" : "0" },
{ "name" : "fs_checkpoint_period", "value" : "21600" },
{ "name" : "fs_checkpoint_size", "value" : "0.5" },
{ "name" : "dfs_exclude", "value" : "dfs.exclude" },
{ "name" : "dfs_include", "value" : "dfs.include" },
{ "name" : "dfs_replication", "value" : "3" },
{ "name" : "dfs_block_local_path_access_user", "value" : "hbase" },
{ "name" : "dfs_datanode_data_dir_perm", "value" : "750" },
{ "name" : "security_enabled", "value" : false },
{ "name" : "kerberos_domain", "value" : "EXAMPLE.COM" },
{ "name" : "kadmin_pw", "value" : "" },
{ "name" : "keytab_path", "value" : "/etc/security/keytabs" },
{ "name" : "namenode_formatted_mark_dir", "value" : "/var/run/hadoop/hdfs/namenode/formatted/" },
{ "name" : "hcat_conf_dir", "value" : "" },
{ "name" : "mapred_local_dir", "value" : "/hadoop/mapred" },
{ "name" : "mapred_system_dir", "value" : "/mapred/system" },
{ "name" : "scheduler_name", "value" : "org.apache.hadoop.mapred.CapacityTaskScheduler" },
{ "name" : "jtnode_opt_newsize", "value" : "200m" },
{ "name" : "jtnode_opt_maxnewsize", "value" : "200m" },
{ "name" : "jtnode_heapsize", "value" : "1024m" },
{ "name" : "mapred_map_tasks_max", "value" : "4" },
{ "name" : "mapred_red_tasks_max", "value" : "2" },
{ "name" : "mapred_cluster_map_mem_mb", "value" : "-1" },
{ "name" : "mapred_cluster_red_mem_mb", "value" : "-1" },
{ "name" : "mapred_cluster_max_map_mem_mb", "value" : "-1" },
{ "name" : "mapred_cluster_max_red_mem_mb", "value" : "-1" },
{ "name" : "mapred_job_map_mem_mb", "value" : "-1" },
{ "name" : "mapred_job_red_mem_mb", "value" : "-1" },
{ "name" : "mapred_child_java_opts_sz", "value" : "768" },
{ "name" : "io_sort_mb", "value" : "200" },
{ "name" : "io_sort_spill_percent", "value" : "0.9" },
{ "name" : "mapreduce_userlog_retainhours", "value" : "24" },
{ "name" : "maxtasks_per_job", "value" : "-1" },
{ "name" : "lzo_enabled", "value" : true },
{ "name" : "snappy_enabled", "value" : true },
{ "name" : "rca_enabled", "value" : true },
{ "name" : "mapred_hosts_exclude", "value" : "mapred.exclude" },
{ "name" : "mapred_hosts_include", "value" : "mapred.include" },
{ "name" : "mapred_jobstatus_dir", "value" : "file:////mapred/jobstatus" },
{ "name" : "task_controller", "value" : "org.apache.hadoop.mapred.DefaultTaskController" },
{ "name" : "nagios_user", "value" : "nagios" },
{ "name" : "nagios_group", "value" : "nagios" },
{ "name" : "nagios_web_login", "value" : "nagiosadmin" },
{ "name" : "nagios_web_password", "value" : "admin" },
{ "name" : "nagios_contact", "value" : "default@REPLACEME.com" },
{ "name" : "hbase_conf_dir", "value" : "/etc/hbase" },
{ "name" : "proxyuser_group", "value" : "users" },
{ "name" : "user_group", "value": "hadoop" },
{ "name" : "smokeuser", "value": "ambari-qa" },
{ "name" : "rrdcached_base_dir", "value": "/var/lib/ganglia/rrds" },
{ "name" : "dfs_datanode_address", "value" : "50010" },
{ "name" : "dfs_datanode_http_address", "value" : "50075" },
{ "name" : "gpl_artifacts_download_url", "value" : "" },
{ "name" : "apache_artifacts_download_url", "value" : "" },
{ "name" : "ganglia_runtime_dir", "value" : "/var/run/ganglia/hdp" },
{ "name" : "gmetad_user", "value" : "nobody" },
{ "name" : "gmond_user", "value" : "nobody" },
{ "name" : "run_dir", "value" : "/var/run/hadoop" },
{ "name" : "hadoop_conf_dir", "value" : "/etc/hadoop" },
{ "name" : "hdfs_user", "value" : "hdfs" },
{ "name" : "mapred_user", "value" : "mapred" },
{ "name" : "hbase_user", "value" : "hbase" },
{ "name" : "hive_user", "value" : "hive" },
{ "name" : "hcat_user", "value" : "hcat" },
{ "name" : "webhcat_user", "value" : "hcat" },
{ "name" : "oozie_user", "value" : "oozie" },
{ "name" : "zk_user", "value" : "zookeeper" },
{ "name" : "hive_ambari_database", "value" : "MySQL" },
{ "name" : "hive_database", "value" : "New MySQL Database" },
{ "name" : "hive_hostname", "value" : "%HIVE_HOST%" },
{ "name" : "hive_database_name", "value" : "hive" },
{ "name" : "hive_metastore_user_name", "value" : "hive" },
{ "name" : "hive_metastore_user_passwd", "value" : "hive" },
{ "name" : "hive_jdbc_connection_url", "value" : "jdbc:mysql://%HIVE_MYSQL_HOST%/hive?createDatabaseIfNotExist=true" },
{ "name" : "hive_metastore_port", "value" : "9083" },
{ "name" : "hive_lib", "value" : "/usr/lib/hive/lib/" },
{ "name" : "hive_conf_dir", "value" : "/etc/hive/conf" },
{ "name" : "hive_dbroot", "value" : "/usr/lib/hive/lib" },
{ "name" : "hive_log_dir", "value" : "/var/log/hive" },
{ "name" : "hive_pid_dir", "value" : "/var/run/hive" },
{ "name" : "mysql_connector_url", "value" : "${download_url}/mysql-connector-java-5.1.18.zip" },
{ "name" : "hive_aux_jars_path", "value" : "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar" },
{ "name" : "hcat_log_dir", "value" : "/var/log/webhcat" },
{ "name" : "hcat_pid_dir", "value" : "/var/run/webhcat" },
{ "name" : "zk_data_dir", "value" : "/hadoop/zookeeper" },
{ "name" : "tickTime", "value" : "2000" },
{ "name" : "initLimit", "value" : "10" },
{ "name" : "syncLimit", "value" : "5" },
{ "name" : "clientPort", "value" : "2181" },
{ "name" : "zk_log_dir", "value" : "/var/log/zookeeper" },
{ "name" : "zk_pid_dir", "value" : "/var/run/zookeeper" },
{ "name" : "zk_pid_file", "value" : "/var/run/zookeeper/zookeeper_server.pid" },
{ "name" : "hive_database_type", "value" : "mysql" },
{ "name" : "hive_jdbc_driver", "value" : "com.mysql.jdbc.Driver" },
{ "name" : "oozie_derby_database", "value" : "Derby" },
{ "name" : "oozie_database", "value" : "New Derby Database" },
{ "name" : "oozie_hostname", "value" : "%OOZIE_HOST%" },
{ "name" : "oozie_database_name", "value" : "oozie" },
{ "name" : "oozie_metastore_user_name", "value" : "oozie" },
{ "name" : "oozie_metastore_user_passwd", "value" : "oozie" },
{ "name" : "oozie_jdbc_connection_url", "value" : "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true" },
{ "name" : "oozie_data_dir", "value" : "/hadoop/oozie/data" },
{ "name" : "oozie_log_dir", "value" : "/var/log/oozie" },
{ "name" : "oozie_pid_dir", "value" : "/var/run/oozie" },
{ "name" : "oozie_database_type", "value" : "derby" },
{ "name" : "oozie_jdbc_driver", "value" : "org.apache.derby.jdbc.EmbeddedDriver" },
{ "name" : "hbase_master_heapsize", "value" : "1024m" },
{ "name" : "hbase_regionserver_heapsize", "value" : "1024m" },
{ "name" : "regionserver_handlers", "value" : "60" },
{ "name" : "hregion_majorcompaction", "value" : "86400000" },
{ "name" : "hregion_blockmultiplier", "value" : "2" },
{ "name" : "hregion_memstoreflushsize", "value" : "134217728" },
{ "name" : "hstore_compactionthreshold", "value" : "3" },
{ "name" : "hfile_blockcache_size", "value" : "0.40" },
{ "name" : "hstorefile_maxsize", "value" : "10737418240" },
{ "name" : "client_scannercaching", "value" : "100" },
{ "name" : "zookeeper_sessiontimeout", "value" : "60000" },
{ "name" : "hfile_max_keyvalue_size", "value" : "10485760" },
{ "name" : "hbase_log_dir", "value" : "/var/log/hbase" },
{ "name" : "hbase_pid_dir", "value" : "/var/run/hbase" },
{ "name" : "hbase_hdfs_root_dir", "value" : "/apps/hbase/data" },
{ "name" : "hbase_tmp_dir", "value" : "/var/log/hbase" },
{ "name" : "hdfs_enable_shortcircuit_read", "value" : "true" },
{ "name" : "hdfs_support_append", "value" : "true" },
{ "name" : "hstore_blockingstorefiles", "value" : "7" },
{ "name" : "regionserver_memstore_lab", "value" : "true" },
{ "name" : "regionserver_memstore_lowerlimit", "value" : "0.38" },
{ "name" : "regionserver_memstore_upperlimit", "value" : "0.4" }
]
},
{
"name" : "core-site",
"properties" : [
{ "name" : "io.file.buffer.size", "value" : "131072" },
{ "name" : "io.serializations", "value" : "org.apache.hadoop.io.serializer.WritableSerialization" },
{ "name" : "io.compression.codec.lzo.class", "value" : "com.hadoop.compression.lzo.LzoCodec" },
{ "name" : "fs.trash.interval", "value" : "360" },
{ "name" : "ipc.client.idlethreshold", "value" : "8000" },
{ "name" : "ipc.client.connection.maxidletime", "value" : "30000" },
{ "name" : "ipc.client.connect.max.retries", "value" : "50" },
{ "name" : "webinterface.private.actions", "value" : "false" },
{ "name" : "fs.default.name", "value" : "hdfs://%NN_HOST%:8020" },
{ "name" : "fs.checkpoint.dir", "value" : "/hadoop/hdfs/namesecondary" },
{ "name" : "fs.checkpoint.period", "value" : "21600" },
{ "name" : "fs.checkpoint.size", "value" : "0.5" },
{ "name" : "fs.checkpoint.edits.dir", "value" : "/hadoop/hdfs/namesecondary" },
{ "name" : "fs.swift.impl", "value" : "org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem" },
{ "name" : "fs.swift.connect.timeout", "value" : "15000" },
{ "name" : "fs.swift.socket.timeout", "value" : "60000" },
{ "name" : "fs.swift.connect.retry.count", "value" : "3" },
{ "name" : "fs.swift.connect.throttle.delay", "value" : "0" },
{ "name" : "fs.swift.blocksize", "value" : "32768" },
{ "name" : "fs.swift.partsize", "value" : "4718592" },
{ "name" : "fs.swift.requestsize", "value" : "64" },
{ "name" : "fs.swift.service.sahara.public", "value" : "true" },
{ "name" : "fs.swift.service.sahara.http.port", "value" : "8080" },
{ "name" : "fs.swift.service.sahara.https.port", "value" : "443" },
{ "name" : "fs.swift.service.sahara.auth.url", "value" : "None" },
{ "name" : "fs.swift.service.sahara.tenant", "value" : "None"},
{ "name" : "hadoop.proxyuser.hive.groups", "value" : "users" },
{ "name" : "hadoop.proxyuser.hive.hosts", "value" : "%HIVE_HOST%" },
{ "name" : "hadoop.proxyuser.hcat.groups", "value" : "users" },
{ "name" : "hadoop.proxyuser.hcat.hosts", "value" : "%WEBHCAT_HOST%" },
{ "name" : "hadoop.proxyuser.oozie.groups", "value" : "hadoop" },
{ "name" : "hadoop.proxyuser.oozie.hosts", "value" : "%OOZIE_HOST%" },
{ "name" : "io.compression.codecs", "value": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec" }
]
},
{
"name" : "mapred-site",
"properties" : [
{ "name" : "io.sort.record.percent", "value" : ".2" },
{ "name" : "io.sort.factor", "value" : "100" },
{ "name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill", "value" : "250" },
{ "name" : "mapred.job.tracker.handler.count", "value" : "50" },
{ "name" : "mapreduce.cluster.administrators", "value" : " hadoop" },
{ "name" : "mapred.reduce.parallel.copies", "value" : "30" },
{ "name" : "tasktracker.http.threads", "value" : "50" },
{ "name" : "mapred.map.tasks.speculative.execution", "value" : "false" },
{ "name" : "mapred.reduce.tasks.speculative.execution", "value" : "false" },
{ "name" : "mapred.reduce.slowstart.completed.maps", "value" : "0.05" },
{ "name" : "mapred.inmem.merge.threshold", "value" : "1000" },
{ "name" : "mapred.job.shuffle.merge.percent", "value" : "0.66" },
{ "name" : "mapred.job.shuffle.input.buffer.percent", "value" : "0.7" },
{ "name" : "mapred.output.compression.type", "value" : "BLOCK" },
{ "name" : "mapred.jobtracker.completeuserjobs.maximum", "value" : "0" },
{ "name" : "mapred.jobtracker.restart.recover", "value" : "false" },
{ "name" : "mapred.job.reduce.input.buffer.percent", "value" : "0.0" },
{ "name" : "mapreduce.reduce.input.limit", "value" : "10737418240" },
{ "name" : "mapred.task.timeout", "value" : "600000" },
{ "name" : "jetty.connector", "value" : "org.mortbay.jetty.nio.SelectChannelConnector" },
{ "name" : "mapred.child.root.logger", "value" : "INFO,TLA" },
{ "name" : "mapred.max.tracker.blacklists", "value" : "16" },
{ "name" : "mapred.healthChecker.interval", "value" : "135000" },
{ "name" : "mapred.healthChecker.script.timeout", "value" : "60000" },
{ "name" : "mapred.job.tracker.persist.jobstatus.active", "value" : "false" },
{ "name" : "mapred.job.tracker.persist.jobstatus.hours", "value" : "1" },
{ "name" : "mapred.jobtracker.retirejob.check", "value" : "10000" },
{ "name" : "mapred.jobtracker.retirejob.interval", "value" : "0" },
{ "name" : "mapred.job.tracker.history.completed.location", "value" : "/mapred/history/done" },
{ "name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs", "value" : "false" },
{ "name" : "mapred.job.reuse.jvm.num.tasks", "value" : "1" },
{ "name" : "hadoop.job.history.user.location", "value" : "none" },
{ "name" : "mapreduce.jobtracker.staging.root.dir", "value" : "/user" },
{ "name" : "mapreduce.tasktracker.group", "value" : "hadoop" },
{ "name" : "mapreduce.jobtracker.split.metainfo.maxsize", "value" : "50000000" },
{ "name" : "mapred.jobtracker.blacklist.fault-timeout-window", "value" : "180" },
{ "name" : "mapred.jobtracker.blacklist.fault-bucket-width", "value" : "15" },
{ "name" : "mapred.queue.names", "value" : "default" },
{ "name" : "mapred.local.dir", "value" : "/mnt/hadoop/mapred" },
{ "name" : "mapred.jobtracker.taskScheduler", "value" : "org.apache.hadoop.mapred.CapacityTaskScheduler" },
{ "name" : "mapred.tasktracker.map.tasks.maximum", "value" : "4" },
{ "name" : "mapred.tasktracker.reduce.tasks.maximum", "value" : "2" },
{ "name" : "mapred.cluster.reduce.memory.mb", "value" : "-1" },
{ "name" : "mapred.job.map.memory.mb", "value" : "-1" },
{ "name" : "mapred.cluster.max.map.memory.mb", "value" : "-1" },
{ "name" : "mapred.cluster.max.reduce.memory.mb", "value" : "-1" },
{ "name" : "mapred.job.reduce.memory.mb", "value" : "-1" },
{ "name" : "mapred.hosts", "value" : "/etc/hadoop/mapred.include" },
{ "name" : "mapred.hosts.exclude", "value" : "/etc/hadoop/mapred.exclude" },
{ "name" : "mapred.healthChecker.script.path", "value" : "file:////mapred/jobstatus" },
{ "name" : "mapred.job.tracker.persist.jobstatus.dir", "value" : "/etc/hadoop/health_check" },
{ "name" : "mapred.child.java.opts", "value" : "-server -Xmx768m -Djava.net.preferIPv4Stack=true" },
{ "name" : "mapred.cluster.map.memory.mb", "value" : "-1" },
{ "name" : "io.sort.mb", "value" : "200" },
{ "name" : "io.sort.spill.percent", "value" : "0.9" },
{ "name" : "mapred.system.dir", "value" : "/mapred/system" },
{ "name" : "mapred.job.tracker", "value" : "%JT_HOST%:50300" },
{ "name" : "mapred.job.tracker.http.address", "value" : "%JT_HOST%:50030" },
{ "name" : "mapred.userlog.retain.hours", "value" : "24" },
{ "name" : "mapred.jobtracker.maxtasks.per.job", "value" : "-1" },
{ "name" : "mapred.task.tracker.task-controller", "value" : "org.apache.hadoop.mapred.DefaultTaskController" },
{ "name" : "mapreduce.jobtracker.kerberos.principal", "value" : "jt/_HOST@EXAMPLE.COM" },
{ "name" : "mapreduce.tasktracker.kerberos.principal", "value" : "tt/_HOST@EXAMPLE.COM" },
{ "name" : "mapreduce.jobtracker.keytab.file", "value" : "/etc/security/keytabs/jt.service.keytab" },
{ "name" : "mapreduce.tasktracker.keytab.file", "value" : "/etc/security/keytabs/tt.service.keytab" },
{ "name" : "mapreduce.history.server.embedded", "value" : "false" },
{ "name" : "mapreduce.history.server.http.address", "value" : "%JT_HOST%:51111" },
{ "name" : "mapreduce.jobhistory.kerberos.principal", "value" : "jt/_HOST@EXAMPLE.COM" },
{ "name" : "mapreduce.jobhistory.keytab.file", "value" : "/etc/security/keytabs/jt.service.keytab" },
{ "name" : "mapreduce.jobhistory.webapp.address", "value": "%HS_HOST%:19888" }
]
},
{
"name" : "hdfs-site",
"properties" : [
{ "name" : "dfs.datanode.socket.write.timeout", "value" : "0" },
{ "name" : "dfs.replication.max", "value" : "50" },
{ "name" : "dfs.heartbeat.interval", "value" : "3" },
{ "name" : "dfs.safemode.threshold.pct", "value" : "1.0f" },
{ "name" : "dfs.balance.bandwidthPerSec", "value" : "6250000" },
{ "name" : "dfs.block.size", "value" : "134217728" },
{ "name" : "dfs.datanode.ipc.address", "value" : "0.0.0.0:8010" },
{ "name" : "dfs.blockreport.initialDelay", "value" : "120" },
{ "name" : "dfs.datanode.du.pct", "value" : "0.85f" },
{ "name" : "dfs.namenode.handler.count", "value" : "40" },
{ "name" : "dfs.datanode.max.xcievers", "value" : "4096" },
{ "name" : "dfs.umaskmode", "value" : "077" },
{ "name" : "dfs.web.ugi", "value" : "gopher,gopher" },
{ "name" : "dfs.permissions", "value" : "true" },
{ "name" : "dfs.permissions.supergroup", "value" : "hdfs" },
{ "name" : "ipc.server.max.response.size", "value" : "5242880" },
{ "name" : "dfs.block.access.token.enable", "value" : "true" },
{ "name" : "dfs.secondary.https.port", "value" : "50490" },
{ "name" : "dfs.https.port", "value" : "50470" },
{ "name" : "dfs.access.time.precision", "value" : "0" },
{ "name" : "dfs.cluster.administrators", "value" : " hdfs" },
{ "name" : "ipc.server.read.threadpool.size", "value" : "5" },
{ "name" : "dfs.name.dir", "value" : "/mnt/hadoop/hdfs/namenode" },
{ "name" : "dfs.webhdfs.enabled", "value" : "false" },
{ "name" : "dfs.datanode.failed.volumes.tolerated", "value" : "0" },
{ "name" : "dfs.block.local-path-access.user", "value" : "hbase" },
{ "name" : "dfs.data.dir", "value" : "/mnt/hadoop/hdfs/data" },
{ "name" : "dfs.hosts.exclude", "value" : "/etc/hadoop/dfs.exclude" },
{ "name" : "dfs.hosts", "value" : "/etc/hadoop/dfs.include" },
{ "name" : "dfs.replication", "value" : "3" },
{ "name" : "dfs.datanode.address", "value" : "0.0.0.0:50010" },
{ "name" : "dfs.datanode.http.address", "value" : "0.0.0.0:50075" },
{ "name" : "dfs.http.address", "value" : "%NN_HOST%:50070" },
{ "name" : "dfs.datanode.du.reserved", "value" : "1" },
{ "name" : "dfs.namenode.kerberos.principal", "value" : "nn/_HOST@EXAMPLE.COM" },
{ "name" : "dfs.secondary.namenode.kerberos.principal", "value" : "nn/_HOST@EXAMPLE.COM" },
{ "name" : "dfs.namenode.kerberos.https.principal", "value" : "host/_HOST@EXAMPLE.COM" },
{ "name" : "dfs.secondary.namenode.kerberos.https.principal", "value" : "host/_HOST@EXAMPLE.COM" },
{ "name" : "dfs.secondary.http.address", "value" : "%SNN_HOST%:50090" },
{ "name" : "dfs.web.authentication.kerberos.keytab", "value" : "/etc/security/keytabs/spnego.service.keytab" },
{ "name" : "dfs.datanode.kerberos.principal", "value" : "dn/_HOST@EXAMPLE.COM" },
{ "name" : "dfs.namenode.keytab.file", "value" : "/etc/security/keytabs/nn.service.keytab" },
{ "name" : "dfs.secondary.namenode.keytab.file", "value" : "/etc/security/keytabs/nn.service.keytab" },
{ "name" : "dfs.datanode.keytab.file", "value" : "/etc/security/keytabs/dn.service.keytab" },
{ "name" : "dfs.https.address", "value" : "%NN_HOST%:50470" },
{ "name" : "dfs.datanode.data.dir.perm", "value" : "750" }
]
},
{
"name" : "hive-site",
"properties" : [
{ "name" : "javax.jdo.option.ConnectionURL", "value" : "jdbc:mysql://%HIVE_HOST%/hive?createDatabaseIfNotExist=true" },
{ "name" : "hive.metastore.uris", "value" : "thrift://%HIVE_METASTORE_HOST%:9083" },
{ "name" : "javax.jdo.option.ConnectionDriverName", "value" : "com.mysql.jdbc.Driver" },
{ "name" : "javax.jdo.option.ConnectionUserName", "value" : "hive" },
{ "name" : "javax.jdo.option.ConnectionPassword", "value" : "hive" },
{ "name" : "fs.file.impl.disable.cache", "value" : "true" },
{ "name" : "hive.optimize.bucketmapjoin.sortedmerge", "value" : "true" },
{ "name" : "hive.auto.convert.join.noconditionaltask", "value" : "true" },
{ "name" : "hadoop.clientside.fs.operations", "value" : "true" },
{ "name" : "hive.mapred.reduce.tasks.speculative.execution", "value" : "false" },
{ "name" : "fs.hdfs.impl.disable.cache", "value" : "true" },
{ "name" : "hive.metastore.warehouse.dir", "value" : "/apps/hive/warehouse" },
{ "name" : "hive.semantic.analyzer.factory.impl", "value" : "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory" },
{ "name" : "hive.server2.enable.doAs", "value" : "true" },
{ "name" : "hive.metastore.local", "value" : "false" },
{ "name" : "hive.auto.convert.join", "value" : "true" },
{ "name" : "hive.auto.convert.sortmerge.join", "value" : "true" },
{ "name" : "hive.auto.convert.sortmerge.join.noconditionaltask", "value" : "true" },
{ "name" : "hive.optimize.reducededuplication.min.reducer", "value" : "1" },
{ "name" : "hive.optimize.bucketmapjoin", "value" : "true" },
{ "name" : "hive.auto.convert.join.noconditionaltask.size", "value" : "1000000000" },
{ "name" : "hive.security.authorization.enabled", "value" : "true" },
{ "name" : "hive.enforce.sorting", "value" : "true" },
{ "name" : "hive.metastore.client.socket.timeout", "value" : "60" },
{ "name" : "hive.mapjoin.bucket.cache.size", "value" : "10000" },
{ "name" : "hive.enforce.bucketing", "value" : "true" },
{ "name" : "hive.security.authorization.manager", "value" : "org.apache.hcatalog.security.HdfsAuthorizationProvider" },
{ "name" : "hive.map.aggr", "value" : "true" },
{ "name" : "hive.metastore.execute.setugi", "value" : "true" },
{ "name" : "hive.optimize.mapjoin.mapreduce", "value" : "true" },
{ "name" : "hive.metastore.cache.pinobjtypes", "value" : "Table,Database,Type,FieldSchema,Order" }
]
},
{
"name" : "webhcat-site",
"properties" : [
{ "name" : "templeton.streaming.jar", "value" : "hdfs:///apps/webhcat/hadoop-streaming.jar" },
{ "name" : "templeton.pig.path", "value" : "pig.tar.gz/pig/bin/pig" },
{ "name" : "templeton.port", "value" : "50111" },
{ "name" : "templeton.jar", "value" : "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar" },
{ "name" : "templeton.hive.archive", "value" : "hdfs:///apps/webhcat/hive.tar.gz" },
{ "name" : "templeton.libjars", "value" : "/usr/lib/zookeeper/zookeeper.jar" },
{ "name" : "templeton.hadoop", "value" : "/usr/bin/hadoop" },
{ "name" : "templeton.hcat", "value" : "/usr/bin/hcat" },
{ "name" : "templeton.pig.archive", "value" : "hdfs:///apps/webhcat/pig.tar.gz" },
{ "name" : "templeton.hive.path", "value" : "hive.tar.gz/hive/bin/hive" },
{ "name" : "templeton.exec.timeout", "value" : "60000" },
{ "name" : "templeton.override.enabled", "value" : "false" },
{ "name" : "templeton.storage.class", "value" : "org.apache.hcatalog.templeton.tool.ZooKeeperStorage" },
{ "name" : "templeton.hadoop.conf.dir", "value" : "/etc/hadoop/conf" },
{ "name" : "templeton.hive.properties", "value" : "hive.metastore.local=false,hive.metastore.uris=thrift://%HIVE_METASTORE_HOST%:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse" },
{ "name" : "templeton.zookeeper.hosts", "value" : "%ZOOKEEPER_HOSTS%" }
]
},
{
"name" : "hbase-site",
"properties" : [
{ "name" : "hbase.rpc.engine", "value" : "org.apache.hadoop.hbase.ipc.WritableRpcEngine" },
{ "name" : "hbase.security.authentication", "value" : "simple" },
{ "name" : "hbase.security.authorization", "value" : "false" },
{ "name" : "hbase.superuser", "value" : "hbase" },
{ "name" : "hbase.zookeeper.useMulti", "value" : "true" },
{ "name" : "zookeeper.znode.parent", "value" : "/hbase-unsecure" },
{ "name" : "hbase.rootdir", "value" : "hdfs://%NN_HOST%:8020/apps/hbase/data" },
{ "name" : "hbase.tmp.dir", "value" : "/var/log/hbase" },
{ "name" : "hbase.regionserver.global.memstore.upperLimit", "value" : "0.4" },
{ "name" : "hbase.hstore.blockingStoreFiles", "value" : "7" },
{ "name" : "hbase.hstore.compactionThreshold", "value" : "3" },
{ "name" : "hfile.block.cache.size", "value" : "0.40" },
{ "name" : "hbase.hregion.max.filesize", "value" : "10737418240" },
{ "name" : "hbase.regionserver.handler.count", "value" : "60" },
{ "name" : "hbase.hregion.majorcompaction", "value" : "86400000" },
{ "name" : "hbase.regionserver.global.memstore.lowerLimit", "value" : "0.38" },
{ "name" : "hbase.hregion.memstore.block.multiplier", "value" : "2" },
{ "name" : "hbase.hregion.memstore.mslab.enabled", "value" : "true" },
{ "name" : "hbase.hregion.memstore.flush.size", "value" : "134217728" },
{ "name" : "hbase.client.scanner.caching", "value" : "100" },
{ "name" : "hbase.cluster.distributed", "value" : "true" },
{ "name" : "hbase.zookeeper.property.clientPort", "value" : "2181" },
{ "name" : "zookeeper.session.timeout", "value" : "60000" },
{ "name" : "hbase.client.keyvalue.maxsize", "value" : "10485760" },
{ "name" : "dfs.support.append", "value" : "true" },
{ "name" : "dfs.client.read.shortcircuit", "value" : "true" },
{ "name" : "hbase.zookeeper.quorum", "value" : "%ZOOKEEPER_HOSTS%" },
{ "name" : "hbase.master.info.port", "value" : "60010" }
]
},
{
"name" : "oozie-site",
"properties" : [
{ "name" : "oozie.authentication.kerberos.name.rules" , "value" : "DEFAULT" },
{ "name" : "oozie.authentication.type" , "value" : "simple" },
{ "name" : "oozie.service.ActionService.executor.ext.classes" , "value" : "org.apache.oozie.action.email.EmailActionExecutor,\norg.apache.oozie.action.hadoop.HiveActionExecutor,\norg.apache.oozie.action.hadoop.ShellActionExecutor,\norg.apache.oozie.action.hadoop.SqoopActionExecutor,\norg.apache.oozie.action.hadoop.DistcpActionExecutor" },
{ "name" : "oozie.service.AuthorizationService.authorization.enabled" , "value" : "false" },
{ "name" : "oozie.service.CallableQueueService.callable.concurrency" , "value" : "3" },
{ "name" : "oozie.service.CallableQueueService.queue.size" , "value" : "1000" },
{ "name" : "oozie.service.CallableQueueService.threads" , "value" : "10" },
{ "name" : "oozie.service.HadoopAccessorService.hadoop.configurations" , "value" : "*=/etc/hadoop/conf" },
{ "name" : "oozie.service.HadoopAccessorService.jobTracker.whitelist" , "value" : " " },
{ "name" : "oozie.service.HadoopAccessorService.nameNode.whitelist" , "value" : " " },
{ "name" : "oozie.service.JPAService.pool.max.active.conn" , "value" : "10" },
{ "name" : "oozie.service.PurgeService.older.than" , "value" : "30" },
{ "name" : "oozie.service.PurgeService.purge.interval" , "value" : "3600" },
{ "name" : "oozie.service.SchemaService.wf.ext.schemas" , "value" : "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd" },
{ "name" : "oozie.service.WorkflowAppService.system.libpath" , "value" : "/user/${user.name}/share/lib" },
{ "name" : "oozie.service.coord.normal.default.timeout" , "value" : "120" },
{ "name" : "oozie.system.id" , "value" : "oozie-${user.name}" },
{ "name" : "oozie.systemmode" , "value" : "NORMAL" },
{ "name" : "use.system.libpath.for.mapreduce.and.pig.jobs" , "value" : "false" },
{ "name" : "oozie.base.url" , "value" : "http://%OOZIE_HOST%:11000/oozie" },
{ "name" : "oozie.service.JPAService.create.db.schema" , "value" : "false" },
{ "name" : "oozie.db.schema.name" , "value" : "oozie" },
{ "name" : "oozie.service.JPAService.jdbc.driver" , "value" : "org.apache.derby.jdbc.EmbeddedDriver" },
{ "name" : "oozie.service.JPAService.jdbc.username" , "value" : "oozie" },
{ "name" : "oozie.service.JPAService.jdbc.password" , "value" : "oozie" },
{ "name" : "oozie.service.JPAService.jdbc.url" , "value" : "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true" }
]
},
{
"name" : "ambari",
"properties" : [
{ "name" : "server.port", "value" : "8080" },
{ "name" : "rpm", "value" : "http://s3.amazonaws.com/public-repo-1.hortonworks.com/ambari/centos6/1.x/updates/1.6.0/ambari.repo" }
]
}
]
}

View File

@ -1,21 +0,0 @@
#!/bin/bash
HADOOP_CONF=/etc/hadoop/conf
while [ $# -gt 0 ] ; do
nodeArg=$1
exec< ${HADOOP_CONF}/topology.data
result=""
while read line ; do
ar=( $line )
if [ "${ar[0]}" = "$nodeArg" ] ; then
result="${ar[1]}"
fi
done
shift
if [ -z "$result" ] ; then
echo -n "/default/rack "
else
echo -n "$result "
fi
done

View File

@ -1,739 +0,0 @@
# Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from oslo_config import cfg
import six
from sahara import exceptions as e
from sahara.i18n import _
from sahara.plugins import exceptions as ex
from sahara.plugins import utils
from sahara.swift import swift_helper as h
from sahara.topology import topology_helper as th
CONF = cfg.CONF
TOPOLOGY_CONFIG = {
"topology.node.switch.mapping.impl":
"org.apache.hadoop.net.ScriptBasedMapping",
"topology.script.file.name":
"/etc/hadoop/conf/topology.sh"
}
def create_service(name):
for cls in Service.__subclasses__():
if cls.get_service_id() == name:
return cls()
# no subclass found, return service base class
return Service(name)
class Service(object):
def __init__(self, name):
self.name = name
self.configurations = set(['global', 'core-site'])
self.components = []
self.users = []
self.deployed = False
def add_component(self, component):
self.components.append(component)
def add_user(self, user):
self.users.append(user)
def validate(self, cluster_spec, cluster):
pass
def finalize_configuration(self, cluster_spec):
pass
def register_user_input_handlers(self, ui_handlers):
pass
def register_service_urls(self, cluster_spec, url_info, cluster):
return url_info
def pre_service_start(self, cluster_spec, ambari_info, started_services):
pass
def finalize_ng_components(self, cluster_spec):
pass
def is_user_template_component(self, component):
return True
def is_mandatory(self):
return False
def _replace_config_token(self, cluster_spec, token, value, props):
for config_name, props in six.iteritems(props):
config = cluster_spec.configurations[config_name]
for prop in props:
config[prop] = config[prop].replace(token, value)
def _update_config_values(self, configurations, value, props):
for absolute_prop_name in props:
tokens = absolute_prop_name.split('/')
config_name = tokens[0]
prop_name = tokens[1]
config = configurations[config_name]
config[prop_name] = value
def _get_common_paths(self, node_groups):
if len(node_groups) == 1:
paths = node_groups[0].storage_paths()
else:
sets = [set(ng.storage_paths()) for ng in node_groups]
paths = list(set.intersection(*sets))
if len(paths) > 1 and '/mnt' in paths:
paths.remove('/mnt')
return paths
def _generate_storage_path(self, storage_paths, path):
return ",".join([p + path for p in storage_paths])
def _get_port_from_cluster_spec(self, cluster_spec, service, prop_name):
address = cluster_spec.configurations[service][prop_name]
return utils.get_port_from_address(address)
class HdfsService(Service):
def __init__(self):
super(HdfsService, self).__init__(HdfsService.get_service_id())
self.configurations.add('hdfs-site')
@classmethod
def get_service_id(cls):
return 'HDFS'
def validate(self, cluster_spec, cluster):
# check for a single NAMENODE
count = cluster_spec.get_deployed_node_group_count('NAMENODE')
if count != 1:
raise ex.InvalidComponentCountException('NAMENODE', 1, count)
def finalize_configuration(self, cluster_spec):
nn_hosts = cluster_spec.determine_component_hosts('NAMENODE')
if nn_hosts:
props = {'core-site': ['fs.default.name'],
'hdfs-site': ['dfs.http.address', 'dfs.https.address']}
self._replace_config_token(
cluster_spec, '%NN_HOST%', nn_hosts.pop().fqdn(), props)
snn_hosts = cluster_spec.determine_component_hosts(
'SECONDARY_NAMENODE')
if snn_hosts:
props = {'hdfs-site': ['dfs.secondary.http.address']}
self._replace_config_token(
cluster_spec, '%SNN_HOST%', snn_hosts.pop().fqdn(), props)
# add swift properties to configuration
core_site_config = cluster_spec.configurations['core-site']
for prop in self._get_swift_properties():
core_site_config[prop['name']] = prop['value']
# add topology properties to configuration, if enabled
if CONF.enable_data_locality:
for prop in th.vm_awareness_core_config():
core_site_config[prop['name']] = prop['value']
core_site_config.update(TOPOLOGY_CONFIG)
# process storage paths to accommodate ephemeral or cinder storage
nn_ng = cluster_spec.get_node_groups_containing_component(
'NAMENODE')[0]
dn_node_groups = cluster_spec.get_node_groups_containing_component(
'DATANODE')
common_paths = []
if dn_node_groups:
common_paths = self._get_common_paths(dn_node_groups)
hdfs_site_config = cluster_spec.configurations['hdfs-site']
global_config = cluster_spec.configurations['global']
hdfs_site_config['dfs.name.dir'] = self._generate_storage_path(
nn_ng.storage_paths(), '/hadoop/hdfs/namenode')
global_config['dfs_name_dir'] = self._generate_storage_path(
nn_ng.storage_paths(), '/hadoop/hdfs/namenode')
if common_paths:
hdfs_site_config['dfs.data.dir'] = self._generate_storage_path(
common_paths, '/hadoop/hdfs/data')
global_config['dfs_data_dir'] = self._generate_storage_path(
common_paths, '/hadoop/hdfs/data')
def register_service_urls(self, cluster_spec, url_info, cluster):
namenode_ip = cluster_spec.determine_component_hosts(
'NAMENODE').pop().management_ip
ui_port = self._get_port_from_cluster_spec(cluster_spec, 'hdfs-site',
'dfs.http.address')
nn_port = self._get_port_from_cluster_spec(cluster_spec, 'core-site',
'fs.default.name')
url_info['HDFS'] = {
'Web UI': 'http://%s:%s' % (namenode_ip, ui_port),
'NameNode': 'hdfs://%s:%s' % (namenode_ip, nn_port)
}
return url_info
def is_mandatory(self):
return True
def _get_swift_properties(self):
return h.get_swift_configs()
class MapReduceService(Service):
def __init__(self):
super(MapReduceService, self).__init__(
MapReduceService.get_service_id())
self.configurations.add('mapred-site')
@classmethod
def get_service_id(cls):
return 'MAPREDUCE'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('JOBTRACKER')
if count != 1:
raise ex.InvalidComponentCountException('JOBTRACKER', 1, count)
count = cluster_spec.get_deployed_node_group_count('TASKTRACKER')
if not count:
raise ex.InvalidComponentCountException(
'TASKTRACKER', '> 0', count)
def finalize_configuration(self, cluster_spec):
jt_hosts = cluster_spec.determine_component_hosts('JOBTRACKER')
if jt_hosts:
props = {'mapred-site': ['mapred.job.tracker',
'mapred.job.tracker.http.address',
'mapreduce.history.server.http.address']}
self._replace_config_token(
cluster_spec, '%JT_HOST%', jt_hosts.pop().fqdn(), props)
# HISTORYSERVER component now a part of MapReduce 1 in Ambari 1.6.0
hs_hosts = cluster_spec.determine_component_hosts('HISTORYSERVER')
if hs_hosts:
props = {'mapred-site': ['mapreduce.jobhistory.webapp.address']}
self._replace_config_token(
cluster_spec, '%HS_HOST%', hs_hosts.pop().fqdn(), props)
# data locality/rack awareness prop processing
mapred_site_config = cluster_spec.configurations['mapred-site']
if CONF.enable_data_locality:
for prop in th.vm_awareness_mapred_config():
mapred_site_config[prop['name']] = prop['value']
# process storage paths to accommodate ephemeral or cinder storage
# NOTE: mapred.system.dir is an HDFS namespace path (not a filesystem
# path) so the default path should suffice
tt_node_groups = cluster_spec.get_node_groups_containing_component(
'TASKTRACKER')
if tt_node_groups:
global_config = cluster_spec.configurations['global']
common_paths = self._get_common_paths(tt_node_groups)
mapred_site_config['mapred.local.dir'] = (
self._generate_storage_path(common_paths, '/hadoop/mapred'))
global_config['mapred_local_dir'] = self._generate_storage_path(
common_paths, '/hadoop/mapred')
def finalize_ng_components(self, cluster_spec):
# add HISTORYSERVER, since HDP 1.3.2 stack was
# modified in Ambari 1.5.1/1.6.0 to include this component
# in the MAPREDUCE service
ambari_server_ngs = (
cluster_spec.get_node_groups_containing_component('JOBTRACKER'))
for ng in ambari_server_ngs:
if 'HISTORYSERVER' not in ng.components:
ng.components.append('HISTORYSERVER')
def register_service_urls(self, cluster_spec, url_info, cluster):
jobtracker_ip = cluster_spec.determine_component_hosts(
'JOBTRACKER').pop().management_ip
ui_port = self._get_port_from_cluster_spec(
cluster_spec, 'mapred-site', 'mapreduce.jobhistory.webapp.address')
jt_port = self._get_port_from_cluster_spec(
cluster_spec, 'mapred-site', 'mapred.job.tracker')
url_info['MapReduce'] = {
'Web UI': 'http://%s:%s' % (jobtracker_ip, ui_port),
'JobTracker': '%s:%s' % (jobtracker_ip, jt_port)
}
return url_info
def is_mandatory(self):
return True
class HiveService(Service):
def __init__(self):
super(HiveService, self).__init__(HiveService.get_service_id())
self.configurations.add('hive-site')
@classmethod
def get_service_id(cls):
return 'HIVE'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('HIVE_SERVER')
if count != 1:
raise ex.InvalidComponentCountException('HIVE_SERVER', 1, count)
def finalize_configuration(self, cluster_spec):
hive_servers = cluster_spec.determine_component_hosts('HIVE_SERVER')
if hive_servers:
props = {'global': ['hive_hostname'],
'core-site': ['hadoop.proxyuser.hive.hosts'],
'hive-site': ['javax.jdo.option.ConnectionURL']}
self._replace_config_token(
cluster_spec, '%HIVE_HOST%', hive_servers.pop().fqdn(), props)
hive_ms = cluster_spec.determine_component_hosts('HIVE_METASTORE')
if hive_ms:
self._replace_config_token(
cluster_spec, '%HIVE_METASTORE_HOST%', hive_ms.pop().fqdn(),
{'hive-site': ['hive.metastore.uris']})
hive_mysql = cluster_spec.determine_component_hosts('MYSQL_SERVER')
if hive_mysql:
self._replace_config_token(
cluster_spec, '%HIVE_MYSQL_HOST%', hive_mysql.pop().fqdn(),
{'global': ['hive_jdbc_connection_url']})
def register_user_input_handlers(self, ui_handlers):
ui_handlers['hive-site/javax.jdo.option.ConnectionUserName'] = (
self._handle_user_property_metastore_user)
ui_handlers['hive-site/javax.jdo.option.ConnectionPassword'] = (
self._handle_user_property_metastore_pwd)
def _handle_user_property_metastore_user(self, user_input, configurations):
hive_site_config_map = configurations['hive-site']
hive_site_config_map['javax.jdo.option.ConnectionUserName'] = (
user_input.value)
global_config_map = configurations['global']
global_config_map['hive_metastore_user_name'] = user_input.value
def _handle_user_property_metastore_pwd(self, user_input, configurations):
hive_site_config_map = configurations['hive-site']
hive_site_config_map['javax.jdo.option.ConnectionPassword'] = (
user_input.value)
global_config_map = configurations['global']
global_config_map['hive_metastore_user_passwd'] = user_input.value
def finalize_ng_components(self, cluster_spec):
hive_ng = cluster_spec.get_node_groups_containing_component(
'HIVE_SERVER')[0]
components = hive_ng.components
if not cluster_spec.get_deployed_node_group_count('HIVE_METASTORE'):
components.append('HIVE_METASTORE')
if not cluster_spec.get_deployed_node_group_count('MYSQL_SERVER'):
components.append('MYSQL_SERVER')
if not cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER'):
zk_service = next(service for service in cluster_spec.services
if service.name == 'ZOOKEEPER')
zk_service.deployed = True
components.append('ZOOKEEPER_SERVER')
class WebHCatService(Service):
def __init__(self):
super(WebHCatService, self).__init__(WebHCatService.get_service_id())
self.configurations.add('webhcat-site')
@classmethod
def get_service_id(cls):
return 'WEBHCAT'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('WEBHCAT_SERVER')
if count != 1:
raise ex.InvalidComponentCountException('WEBHCAT_SERVER', 1, count)
def finalize_configuration(self, cluster_spec):
webhcat_servers = cluster_spec.determine_component_hosts(
'WEBHCAT_SERVER')
if webhcat_servers:
self._replace_config_token(
cluster_spec, '%WEBHCAT_HOST%', webhcat_servers.pop().fqdn(),
{'core-site': ['hadoop.proxyuser.hcat.hosts']})
hive_ms_servers = cluster_spec.determine_component_hosts(
'HIVE_METASTORE')
if hive_ms_servers:
self._replace_config_token(
cluster_spec, '%HIVE_METASTORE_HOST%',
hive_ms_servers.pop().fqdn(),
{'webhcat-site': ['templeton.hive.properties']})
zk_servers = cluster_spec.determine_component_hosts('ZOOKEEPER_SERVER')
if zk_servers:
zk_list = ['{0}:2181'.format(z.fqdn()) for z in zk_servers]
self._replace_config_token(
cluster_spec, '%ZOOKEEPER_HOSTS%', ','.join(zk_list),
{'webhcat-site': ['templeton.zookeeper.hosts']})
def finalize_ng_components(self, cluster_spec):
webhcat_ng = cluster_spec.get_node_groups_containing_component(
'WEBHCAT_SERVER')[0]
components = webhcat_ng.components
if 'HDFS_CLIENT' not in components:
components.append('HDFS_CLIENT')
if 'MAPREDUCE_CLIENT' not in components:
components.append('MAPREDUCE_CLIENT')
if 'ZOOKEEPER_CLIENT' not in components:
# if zk server isn't in cluster, add to ng
if not cluster_spec.get_deployed_node_group_count(
'ZOOKEEPER_SERVER'):
zk_service = next(service for service in cluster_spec.services
if service.name == 'ZOOKEEPER')
zk_service.deployed = True
components.append('ZOOKEEPER_SERVER')
components.append('ZOOKEEPER_CLIENT')
class HBaseService(Service):
property_map = {
'hbase-site/hbase.tmp.dir': [
'hbase-site/hbase.tmp.dir', 'global/hbase_tmp_dir'],
'hbase-site/hbase.regionserver.global.memstore.upperLimit': [
'hbase-site/hbase.regionserver.global.memstore.upperLimit',
'global/regionserver_memstore_upperlimit'],
'hbase-site/hbase.hstore.blockingStoreFiles': [
'hbase-site/hbase.hstore.blockingStoreFiles',
'global/hstore_blockingstorefiles'],
'hbase-site/hbase.hstore.compactionThreshold': [
'hbase-site/hbase.hstore.compactionThreshold',
'global/hstore_compactionthreshold'],
'hbase-site/hfile.block.cache.size': [
'hbase-site/hfile.block.cache.size',
'global/hfile_blockcache_size'],
'hbase-site/hbase.hregion.max.filesize': [
'hbase-site/hbase.hregion.max.filesize',
'global/hstorefile_maxsize'],
'hbase-site/hbase.regionserver.handler.count': [
'hbase-site/hbase.regionserver.handler.count',
'global/regionserver_handlers'],
'hbase-site/hbase.hregion.majorcompaction': [
'hbase-site/hbase.hregion.majorcompaction',
'global/hregion_majorcompaction'],
'hbase-site/hbase.regionserver.global.memstore.lowerLimit': [
'hbase-site/hbase.regionserver.global.memstore.lowerLimit',
'global/regionserver_memstore_lowerlimit'],
'hbase-site/hbase.hregion.memstore.block.multiplier': [
'hbase-site/hbase.hregion.memstore.block.multiplier',
'global/hregion_blockmultiplier'],
'hbase-site/hbase.hregion.memstore.mslab.enabled': [
'hbase-site/hbase.hregion.memstore.mslab.enabled',
'global/regionserver_memstore_lab'],
'hbase-site/hbase.hregion.memstore.flush.size': [
'hbase-site/hbase.hregion.memstore.flush.size',
'global/hregion_memstoreflushsize'],
'hbase-site/hbase.client.scanner.caching': [
'hbase-site/hbase.client.scanner.caching',
'global/client_scannercaching'],
'hbase-site/zookeeper.session.timeout': [
'hbase-site/zookeeper.session.timeout',
'global/zookeeper_sessiontimeout'],
'hbase-site/hbase.client.keyvalue.maxsize': [
'hbase-site/hbase.client.keyvalue.maxsize',
'global/hfile_max_keyvalue_size'],
'hdfs-site/dfs.support.append': [
'hdfs-site/dfs.support.append',
'hbase-site/dfs.support.append',
'global/hdfs_support_append'],
'hbase-site/dfs.client.read.shortcircuit': [
'hbase-site/dfs.client.read.shortcircuit',
'global/hdfs_enable_shortcircuit_read']
}
def __init__(self):
super(HBaseService, self).__init__(
HBaseService.get_service_id())
self.configurations.add('hbase-site')
@classmethod
def get_service_id(cls):
return 'HBASE'
def validate(self, cluster_spec, cluster):
# check for a single HBASE_SERVER
count = cluster_spec.get_deployed_node_group_count('HBASE_MASTER')
if count != 1:
raise ex.InvalidComponentCountException('HBASE_MASTER', 1, count)
def register_service_urls(self, cluster_spec, url_info, cluster):
master_ip = cluster_spec.determine_component_hosts(
'HBASE_MASTER').pop().management_ip
hbase_config = cluster_spec.configurations['hbase-site']
info_port = hbase_config['hbase.master.info.port']
url_info['HBase'] = {
'Web UI': 'http://%s:%s/master-status' % (master_ip, info_port),
'Logs': 'http://%s:%s/logs' % (master_ip, info_port),
'Zookeeper Info': 'http://%s:%s/zk.jsp' % (master_ip, info_port),
'JMX': 'http://%s:%s/jmx' % (master_ip, info_port),
'Debug Dump': 'http://%s:%s/dump' % (master_ip, info_port),
'Thread Stacks': 'http://%s:%s/stacks' % (master_ip, info_port)
}
return url_info
def register_user_input_handlers(self, ui_handlers):
for prop_name in self.property_map:
ui_handlers[prop_name] = (
self._handle_config_property_update)
ui_handlers['hbase-site/hbase.rootdir'] = (
self._handle_user_property_root_dir)
def _handle_config_property_update(self, user_input, configurations):
self._update_config_values(configurations, user_input.value,
self.property_map[user_input.config.name])
def _handle_user_property_root_dir(self, user_input, configurations):
configurations['hbase-site']['hbase.rootdir'] = user_input.value
match = re.search('(^hdfs://)(.*?)(/.*)', user_input.value)
if match:
configurations['global']['hbase_hdfs_root_dir'] = match.group(3)
else:
raise e.InvalidDataException(
_("Invalid value for property 'hbase-site/hbase.rootdir' : %s")
% user_input.value)
def finalize_configuration(self, cluster_spec):
nn_servers = cluster_spec.determine_component_hosts('NAMENODE')
if nn_servers:
self._replace_config_token(
cluster_spec, '%NN_HOST%', nn_servers.pop().fqdn(),
{'hbase-site': ['hbase.rootdir']})
zk_servers = cluster_spec.determine_component_hosts('ZOOKEEPER_SERVER')
if zk_servers:
zk_list = [z.fqdn() for z in zk_servers]
self._replace_config_token(
cluster_spec, '%ZOOKEEPER_HOSTS%', ','.join(zk_list),
{'hbase-site': ['hbase.zookeeper.quorum']})
def finalize_ng_components(self, cluster_spec):
hbase_ng = cluster_spec.get_node_groups_containing_component(
'HBASE_MASTER')[0]
components = hbase_ng.components
if 'HDFS_CLIENT' not in components:
components.append('HDFS_CLIENT')
if not cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER'):
zk_service = next(service for service in cluster_spec.services
if service.name == 'ZOOKEEPER')
zk_service.deployed = True
components.append('ZOOKEEPER_SERVER')
class ZookeeperService(Service):
def __init__(self):
super(ZookeeperService, self).__init__(
ZookeeperService.get_service_id())
@classmethod
def get_service_id(cls):
return 'ZOOKEEPER'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER')
if count < 1:
raise ex.InvalidComponentCountException(
'ZOOKEEPER_SERVER', '1+', count)
class OozieService(Service):
def __init__(self):
super(OozieService, self).__init__(OozieService.get_service_id())
self.configurations.add('oozie-site')
@classmethod
def get_service_id(cls):
return 'OOZIE'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('OOZIE_SERVER')
if count != 1:
raise ex.InvalidComponentCountException(
'OOZIE_SERVER', 1, count)
count = cluster_spec.get_deployed_node_group_count('OOZIE_CLIENT')
if not count:
raise ex.InvalidComponentCountException(
'OOZIE_CLIENT', '1+', count)
def finalize_configuration(self, cluster_spec):
oozie_servers = cluster_spec.determine_component_hosts('OOZIE_SERVER')
if oozie_servers:
oozie_server = oozie_servers.pop()
name_list = [oozie_server.fqdn(), oozie_server.internal_ip,
oozie_server.management_ip]
self._replace_config_token(
cluster_spec, '%OOZIE_HOST%', oozie_server.fqdn(),
{'global': ['oozie_hostname'],
'oozie-site': ['oozie.base.url']})
self._replace_config_token(
cluster_spec, '%OOZIE_HOST%', ",".join(name_list),
{'core-site': ['hadoop.proxyuser.oozie.hosts']})
def finalize_ng_components(self, cluster_spec):
oozie_ng = cluster_spec.get_node_groups_containing_component(
'OOZIE_SERVER')[0]
components = oozie_ng.components
if 'HDFS_CLIENT' not in components:
components.append('HDFS_CLIENT')
if 'MAPREDUCE_CLIENT' not in components:
components.append('MAPREDUCE_CLIENT')
# ensure that mr and hdfs clients are colocated with oozie client
client_ngs = cluster_spec.get_node_groups_containing_component(
'OOZIE_CLIENT')
for ng in client_ngs:
components = ng.components
if 'HDFS_CLIENT' not in components:
components.append('HDFS_CLIENT')
if 'MAPREDUCE_CLIENT' not in components:
components.append('MAPREDUCE_CLIENT')
def register_service_urls(self, cluster_spec, url_info, cluster):
oozie_ip = cluster_spec.determine_component_hosts(
'OOZIE_SERVER').pop().management_ip
port = self._get_port_from_cluster_spec(cluster_spec, 'oozie-site',
'oozie.base.url')
url_info['JobFlow'] = {
'Oozie': 'http://%s:%s' % (oozie_ip, port)
}
return url_info
def register_user_input_handlers(self, ui_handlers):
ui_handlers['oozie-site/oozie.service.JPAService.jdbc.username'] = (
self._handle_user_property_db_user)
ui_handlers['oozie.service.JPAService.jdbc.password'] = (
self._handle_user_property_db_pwd)
def _handle_user_property_db_user(self, user_input, configurations):
oozie_site_config_map = configurations['oozie-site']
oozie_site_config_map['oozie.service.JPAService.jdbc.username'] = (
user_input.value)
global_config_map = configurations['global']
global_config_map['oozie_metastore_user_name'] = user_input.value
def _handle_user_property_db_pwd(self, user_input, configurations):
oozie_site_config_map = configurations['oozie-site']
oozie_site_config_map['oozie.service.JPAService.jdbc.password'] = (
user_input.value)
global_config_map = configurations['global']
global_config_map['oozie_metastore_user_passwd'] = user_input.value
class GangliaService(Service):
def __init__(self):
super(GangliaService, self).__init__(GangliaService.get_service_id())
@classmethod
def get_service_id(cls):
return 'GANGLIA'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('GANGLIA_SERVER')
if count != 1:
raise ex.InvalidComponentCountException('GANGLIA_SERVER', 1, count)
def is_user_template_component(self, component):
return component.name != 'GANGLIA_MONITOR'
def finalize_ng_components(self, cluster_spec):
for ng in cluster_spec.node_groups.values():
if 'GANGLIA_MONITOR' not in ng.components:
ng.components.append('GANGLIA_MONITOR')
class AmbariService(Service):
def __init__(self):
super(AmbariService, self).__init__(AmbariService.get_service_id())
self.configurations.add('ambari')
# TODO(jspeidel): don't hard code default admin user
self.admin_user_name = 'admin'
@classmethod
def get_service_id(cls):
return 'AMBARI'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('AMBARI_SERVER')
if count != 1:
raise ex.InvalidComponentCountException('AMBARI_SERVER', 1, count)
def register_service_urls(self, cluster_spec, url_info, cluster):
ambari_ip = cluster_spec.determine_component_hosts(
'AMBARI_SERVER').pop().management_ip
port = cluster_spec.configurations['ambari'].get(
'server.port', '8080')
url_info['Ambari Console'] = {
'Web UI': 'http://{0}:{1}'.format(ambari_ip, port)
}
return url_info
def is_user_template_component(self, component):
return component.name != 'AMBARI_AGENT'
def register_user_input_handlers(self, ui_handlers):
ui_handlers['ambari-stack/ambari.admin.user'] = (
self._handle_user_property_admin_user)
ui_handlers['ambari-stack/ambari.admin.password'] = (
self._handle_user_property_admin_password)
def is_mandatory(self):
return True
def _handle_user_property_admin_user(self, user_input, configurations):
admin_user = next(user for user in self.users
if user.name == 'admin')
admin_user.name = user_input.value
self.admin_user_name = user_input.value
def _handle_user_property_admin_password(self, user_input, configurations):
admin_user = next(user for user in self.users
if user.name == self.admin_user_name)
admin_user.password = user_input.value
class SqoopService(Service):
def __init__(self):
super(SqoopService, self).__init__(SqoopService.get_service_id())
@classmethod
def get_service_id(cls):
return 'SQOOP'
def finalize_ng_components(self, cluster_spec):
sqoop_ngs = cluster_spec.get_node_groups_containing_component('SQOOP')
for ng in sqoop_ngs:
if 'HDFS_CLIENT' not in ng.components:
ng.components.append('HDFS_CLIENT')
if 'MAPREDUCE_CLIENT' not in ng.components:
ng.components.append('MAPREDUCE_CLIENT')

View File

@ -1,703 +0,0 @@
# Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
import pkg_resources as pkg
from sahara import context
from sahara import exceptions as exc
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.plugins import exceptions as ex
from sahara.plugins.hdp import clusterspec as cs
from sahara.plugins.hdp import configprovider as cfgprov
from sahara.plugins.hdp.versions import abstractversionhandler as avm
from sahara.plugins.hdp.versions.version_1_3_2 import edp_engine
from sahara.plugins.hdp.versions.version_1_3_2 import services
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils import poll_utils
from sahara import version
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VersionHandler(avm.AbstractVersionHandler):
config_provider = None
version = None
client = None
def _set_version(self, version):
self.version = version
def _get_config_provider(self):
if self.config_provider is None:
self.config_provider = cfgprov.ConfigurationProvider(
json.load(pkg.resource_stream(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')),
hadoop_version='1.3.2')
return self.config_provider
def get_version(self):
return self.version
def get_ambari_client(self):
if not self.client:
self.client = AmbariClient(self)
return self.client
def get_config_items(self):
return self._get_config_provider().get_config_items()
def get_applicable_target(self, name):
return self._get_config_provider().get_applicable_target(name)
def get_cluster_spec(self, cluster, user_inputs,
scaled_groups=None, cluster_template=None):
if cluster_template:
cluster_spec = cs.ClusterSpec(cluster_template)
else:
if scaled_groups:
for ng in cluster.node_groups:
ng_id = ng['id']
if (ng_id in scaled_groups and
ng['count'] > scaled_groups[ng_id]):
raise ex.ClusterCannotBeScaled(
cluster.name,
_('The HDP plugin does not support '
'the decommissioning of nodes '
'for HDP version 1.3.2'))
cluster_spec = self.get_default_cluster_configuration()
cluster_spec.create_operational_config(
cluster, user_inputs, scaled_groups)
cs.validate_number_of_datanodes(
cluster, scaled_groups, self.get_config_items())
return cluster_spec
def get_default_cluster_configuration(self):
return cs.ClusterSpec(self._get_default_cluster_template())
def _get_default_cluster_template(self):
return pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
def get_node_processes(self):
node_processes = {}
for service in self.get_default_cluster_configuration().services:
components = []
for component in service.components:
components.append(component.name)
node_processes[service.name] = components
return node_processes
def install_swift_integration(self, servers):
if servers:
cpo.add_provisioning_step(
servers[0].cluster_id, _("Install Swift integration"),
len(servers))
for server in servers:
with context.set_current_instance_id(
server.instance['instance_id']):
server.install_swift_integration()
def get_services_processor(self):
return services
def get_edp_engine(self, cluster, job_type):
if job_type in edp_engine.EdpOozieEngine.get_supported_job_types():
return edp_engine.EdpOozieEngine(cluster)
return None
def get_edp_job_types(self):
return edp_engine.EdpOozieEngine.get_supported_job_types()
def get_edp_config_hints(self, job_type):
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
def get_open_ports(self, node_group):
ports = [8660] # for Ganglia
ports_map = {
'AMBARI_SERVER': [8080, 8440, 8441],
'NAMENODE': [50070, 50470, 8020, 9000],
'DATANODE': [50075, 50475, 50010, 50020],
'SECONDARY_NAMENODE': [50090],
'JOBTRACKER': [50030, 8021],
'TASKTRACKER': [50060],
'HISTORYSERVER': [51111],
'HIVE_SERVER': [10000],
'HIVE_METASTORE': [9083],
'HBASE_MASTER': [60000, 60010],
'HBASE_REGIONSERVER': [60020, 60030],
'WEBHCAT_SERVER': [50111],
'GANGLIA_SERVER': [8661, 8662, 8663, 8651],
'MYSQL_SERVER': [3306],
'OOZIE_SERVER': [11000, 11001],
'ZOOKEEPER_SERVER': [2181, 2888, 3888],
'NAGIOS_SERVER': [80]
}
for process in node_group.node_processes:
if process in ports_map:
ports.extend(ports_map[process])
return ports
class AmbariClient(object):
def __init__(self, handler):
# add an argument for neutron discovery
self.handler = handler
def _get_http_session(self, host, port):
return host.remote().get_http_client(port)
def _get_standard_headers(self):
return {"X-Requested-By": "sahara"}
def _post(self, url, ambari_info, data=None):
if data:
LOG.debug('AmbariClient:_post call, url = {url} data = {data}'
.format(url=url, data=str(data)))
else:
LOG.debug('AmbariClient:_post call, url = {url}'.format(url=url))
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.post(url, data=data,
auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
def _delete(self, url, ambari_info):
LOG.debug('AmbariClient:_delete call, url = {url}'.format(url=url))
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.delete(url,
auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
def _put(self, url, ambari_info, data=None):
if data:
LOG.debug('AmbariClient:_put call, url = {url} data = {data}'
.format(url=url, data=str(data)))
else:
LOG.debug('AmbariClient:_put call, url = {url}'.format(url=url))
session = self._get_http_session(ambari_info.host, ambari_info.port)
auth = (ambari_info.user, ambari_info.password)
return session.put(url, data=data, auth=auth,
headers=self._get_standard_headers())
def _get(self, url, ambari_info):
LOG.debug('AmbariClient:_get call, url = {url}'.format(url=url))
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.get(url, auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
def _add_cluster(self, ambari_info, name):
add_cluster_url = 'http://{0}/api/v1/clusters/{1}'.format(
ambari_info.get_address(), name)
result = self._post(add_cluster_url, ambari_info,
data='{"Clusters": {"version" : "HDP-' +
self.handler.get_version() + '"}}')
if result.status_code != 201:
LOG.error(_LE('Create cluster command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add cluster: %s') % result.text)
@cpo.event_wrapper(True, step=_("Add configurations to cluster"),
param=('ambari_info', 2))
def _add_configurations_to_cluster(
self, cluster_spec, ambari_info, name):
existing_config_url = ('http://{0}/api/v1/clusters/{1}?fields='
'Clusters/desired_configs'.format(
ambari_info.get_address(), name))
result = self._get(existing_config_url, ambari_info)
json_result = json.loads(result.text)
existing_configs = json_result['Clusters']['desired_configs']
configs = cluster_spec.get_deployed_configurations()
if 'ambari' in configs:
configs.remove('ambari')
if len(configs) == len(existing_configs):
# nothing to do
return
config_url = 'http://{0}/api/v1/clusters/{1}'.format(
ambari_info.get_address(), name)
body = {}
clusters = {}
version = 1
body['Clusters'] = clusters
for config_name in configs:
if config_name in existing_configs:
if config_name == 'core-site' or config_name == 'global':
existing_version = (
existing_configs[config_name]['tag'].lstrip('v'))
version = int(existing_version) + 1
else:
continue
config_body = {}
clusters['desired_config'] = config_body
config_body['type'] = config_name
config_body['tag'] = 'v%s' % version
config_body['properties'] = (
cluster_spec.configurations[config_name])
result = self._put(config_url, ambari_info, data=json.dumps(body))
if result.status_code != 200:
LOG.error(
_LE('Set configuration command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to set configurations on cluster: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Add services to cluster"), param=('ambari_info', 2))
def _add_services_to_cluster(self, cluster_spec, ambari_info, name):
services = cluster_spec.services
add_service_url = 'http://{0}/api/v1/clusters/{1}/services/{2}'
for service in services:
if service.deployed and service.name != 'AMBARI':
result = self._post(add_service_url.format(
ambari_info.get_address(), name, service.name),
ambari_info)
if result.status_code not in [201, 409]:
LOG.error(
_LE('Create service command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add services to cluster: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Add components to services"), param=('ambari_info', 2))
def _add_components_to_services(self, cluster_spec, ambari_info, name):
add_component_url = ('http://{0}/api/v1/clusters/{1}/services/{'
'2}/components/{3}')
for service in cluster_spec.services:
if service.deployed and service.name != 'AMBARI':
for component in service.components:
result = self._post(add_component_url.format(
ambari_info.get_address(), name, service.name,
component.name),
ambari_info)
if result.status_code not in [201, 409]:
LOG.error(
_LE('Create component command failed. {result}')
.format(result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add components to services: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Add hosts and components"), param=('ambari_info', 3))
def _add_hosts_and_components(
self, cluster_spec, servers, ambari_info, name):
add_host_url = 'http://{0}/api/v1/clusters/{1}/hosts/{2}'
add_host_component_url = ('http://{0}/api/v1/clusters/{1}'
'/hosts/{2}/host_components/{3}')
for host in servers:
with context.set_current_instance_id(host.instance['instance_id']):
hostname = host.instance.fqdn().lower()
result = self._post(
add_host_url.format(ambari_info.get_address(), name,
hostname), ambari_info)
if result.status_code != 201:
LOG.error(
_LE('Create host command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add host: %s') % result.text)
node_group_name = host.node_group.name
# TODO(jspeidel): ensure that node group exists
node_group = cluster_spec.node_groups[node_group_name]
for component in node_group.components:
# don't add any AMBARI components
if component.find('AMBARI') != 0:
result = self._post(add_host_component_url.format(
ambari_info.get_address(), name, hostname,
component), ambari_info)
if result.status_code != 201:
LOG.error(
_LE('Create host_component command failed. '
'{result}').format(result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add host component: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Install services"), param=('ambari_info', 2))
def _install_services(self, cluster_name, ambari_info):
ambari_address = ambari_info.get_address()
install_url = ('http://{0}/api/v1/clusters/{'
'1}/services?ServiceInfo/state=INIT'.format(
ambari_address, cluster_name))
body = ('{"RequestInfo" : { "context" : "Install all services" },'
'"Body" : {"ServiceInfo": {"state" : "INSTALLED"}}}')
result = self._put(install_url, ambari_info, data=body)
if result.status_code == 202:
json_result = json.loads(result.text)
request_id = json_result['Requests']['id']
success = self._wait_for_async_request(self._get_async_request_uri(
ambari_info, cluster_name, request_id),
ambari_info)
if success:
LOG.info(_LI("Hadoop stack installed successfully."))
self._finalize_ambari_state(ambari_info)
else:
LOG.error(_LE('Install command failed.'))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Install command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
def _get_async_request_uri(self, ambari_info, cluster_name, request_id):
return ('http://{0}/api/v1/clusters/{1}/requests/{'
'2}/tasks?fields=Tasks/status'.format(
ambari_info.get_address(), cluster_name,
request_id))
def _wait_for_async_request(self, request_url, ambari_info):
started = False
while not started:
result = self._get(request_url, ambari_info)
LOG.debug(
'async request {url} response: {response}'.format(
url=request_url, response=result.text))
json_result = json.loads(result.text)
started = True
for items in json_result['items']:
status = items['Tasks']['status']
if status == 'FAILED' or status == 'ABORTED':
return False
else:
if status != 'COMPLETED':
started = False
context.sleep(5)
return started
def _finalize_ambari_state(self, ambari_info):
persist_state_uri = 'http://{0}/api/v1/persist'.format(
ambari_info.get_address())
# this post data has non-standard format because persist
# resource doesn't comply with Ambari API standards
persist_data = ('{ "CLUSTER_CURRENT_STATUS":'
'"{\\"clusterState\\":\\"CLUSTER_STARTED_5\\"}" }')
result = self._post(persist_state_uri, ambari_info, data=persist_data)
if result.status_code != 201 and result.status_code != 202:
LOG.warning(_LW('Finalizing of Ambari cluster state failed. '
'{result}').format(result.text))
raise ex.HadoopProvisionError(_('Unable to finalize Ambari '
'state.'))
LOG.info(_LI('Ambari cluster state finalized.'))
@cpo.event_wrapper(
True, step=_("Start services"), param=('ambari_info', 3))
def start_services(self, cluster_name, cluster_spec, ambari_info):
start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/'
'state=INSTALLED'.format(
ambari_info.get_address(), cluster_name))
body = ('{"RequestInfo" : { "context" : "Start all services" },'
'"Body" : {"ServiceInfo": {"state" : "STARTED"}}}')
self._fire_service_start_notifications(
cluster_name, cluster_spec, ambari_info)
result = self._put(start_url, ambari_info, data=body)
if result.status_code == 202:
json_result = json.loads(result.text)
request_id = json_result['Requests']['id']
success = self._wait_for_async_request(
self._get_async_request_uri(ambari_info, cluster_name,
request_id), ambari_info)
if success:
LOG.info(
_LI("Successfully started Hadoop cluster."))
LOG.info(_LI('Ambari server address: {server_address}')
.format(server_address=ambari_info.get_address()))
else:
LOG.error(_LE('Failed to start Hadoop cluster.'))
raise ex.HadoopProvisionError(
_('Start of Hadoop services failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Start command failed. Status: {status}, '
'response: {response}').format(status=result.status_code,
response=result.text))
raise ex.HadoopProvisionError(
_('Start of Hadoop services failed.'))
def _exec_ambari_command(self, ambari_info, body, cmd_uri):
LOG.debug('PUT URI: {uri}'.format(uri=cmd_uri))
result = self._put(cmd_uri, ambari_info, data=body)
if result.status_code == 202:
LOG.debug(
'PUT response: {result}'.format(result=result.text))
json_result = json.loads(result.text)
href = json_result['href'] + '/tasks?fields=Tasks/status'
success = self._wait_for_async_request(href, ambari_info)
if success:
LOG.info(
_LI("Successfully changed state of Hadoop components "))
else:
LOG.error(_LE('Failed to change state of Hadoop components'))
raise ex.HadoopProvisionError(
_('Failed to change state of Hadoop components'))
else:
LOG.error(
_LE('Command failed. Status: {status}, response: '
'{response}').format(status=result.status_code,
response=result.text))
raise ex.HadoopProvisionError(_('Hadoop/Ambari command failed.'))
def _get_host_list(self, servers):
host_list = [server.instance.fqdn().lower() for server in servers]
return ",".join(host_list)
def _install_and_start_components(self, cluster_name, servers,
ambari_info, cluster_spec):
auth = (ambari_info.user, ambari_info.password)
self._install_components(ambari_info, auth, cluster_name, servers)
self.handler.install_swift_integration(servers)
self._start_components(ambari_info, auth, cluster_name,
servers, cluster_spec)
def _install_components(self, ambari_info, auth, cluster_name, servers):
# query for the host components on the given hosts that are in the
# INIT state
# TODO(jspeidel): provide request context
body = '{"HostRoles": {"state" : "INSTALLED"}}'
install_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INIT&'
'HostRoles/host_name.in({2})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers)))
self._exec_ambari_command(ambari_info, body, install_uri)
LOG.info(_LI('Started Hadoop components while scaling up'))
LOG.info(_LI('Ambari server ip {ip}')
.format(ip=ambari_info.get_address()))
def _start_components(self, ambari_info, auth, cluster_name, servers,
cluster_spec):
# query for all the host components in the INSTALLED state,
# then get a list of the client services in the list
installed_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INSTALLED&'
'HostRoles/host_name.in({2})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers)))
result = self._get(installed_uri, ambari_info)
if result.status_code == 200:
LOG.debug(
'GET response: {result}'.format(result=result.text))
json_result = json.loads(result.text)
items = json_result['items']
client_set = cluster_spec.get_components_for_type('CLIENT')
inclusion_list = list(set([x['HostRoles']['component_name']
for x in items
if x['HostRoles']['component_name']
not in client_set]))
# query and start all non-client components on the given set of
# hosts
# TODO(jspeidel): Provide request context
body = '{"HostRoles": {"state" : "STARTED"}}'
start_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INSTALLED&'
'HostRoles/host_name.in({2})'
'&HostRoles/component_name.in({3})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers),
",".join(inclusion_list)))
self._exec_ambari_command(ambari_info, body, start_uri)
else:
raise ex.HadoopProvisionError(
_('Unable to determine installed service '
'components in scaled instances. status'
' code returned = {0}').format(result.status))
def _check_host_registrations(self, num_hosts, ambari_info):
url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address())
try:
result = self._get(url, ambari_info)
json_result = json.loads(result.text)
LOG.debug('Registered Hosts: {current_number} '
'of {final_number}'.format(
current_number=len(json_result['items']),
final_number=num_hosts))
for hosts in json_result['items']:
LOG.debug('Registered Host: {host}'.format(
host=hosts['Hosts']['host_name']))
return result and len(json_result['items']) >= num_hosts
except Exception:
LOG.debug('Waiting to connect to ambari server')
return False
@cpo.event_wrapper(True, step=_("Wait for all Ambari agents to register"),
param=('ambari_info', 2))
def wait_for_host_registrations(self, num_hosts, ambari_info):
cluster = ambari_info.get_cluster()
poll_utils.plugin_option_poll(
cluster, self._check_host_registrations,
cfgprov.HOST_REGISTRATIONS_TIMEOUT,
_("Wait for host registrations"), 5, {
'num_hosts': num_hosts, 'ambari_info': ambari_info})
def update_ambari_admin_user(self, password, ambari_info):
old_pwd = ambari_info.password
user_url = 'http://{0}/api/v1/users/admin'.format(
ambari_info.get_address())
update_body = ('{{"Users":{{"roles":"admin","password":"{0}",'
'"old_password":"{1}"}} }}'.format(password, old_pwd))
result = self._put(user_url, ambari_info, data=update_body)
if result.status_code != 200:
raise ex.HadoopProvisionError(_('Unable to update Ambari admin '
'user credentials: {0}').format(
result.text))
def add_ambari_user(self, user, ambari_info):
user_url = 'http://{0}/api/v1/users/{1}'.format(
ambari_info.get_address(), user.name)
create_body = ('{{"Users":{{"password":"{0}","roles":"{1}"}} }}'.
format(user.password, '%s' %
','.join(map(str, user.groups))))
result = self._post(user_url, ambari_info, data=create_body)
if result.status_code != 201:
raise ex.HadoopProvisionError(
_('Unable to create Ambari user: {0}').format(result.text))
def delete_ambari_user(self, user_name, ambari_info):
user_url = 'http://{0}/api/v1/users/{1}'.format(
ambari_info.get_address(), user_name)
result = self._delete(user_url, ambari_info)
if result.status_code != 200:
raise ex.HadoopProvisionError(
_('Unable to delete Ambari user: %(user_name)s'
' : %(text)s') %
{'user_name': user_name, 'text': result.text})
def configure_scaled_cluster_instances(self, name, cluster_spec,
num_hosts, ambari_info):
self.wait_for_host_registrations(num_hosts, ambari_info)
self._add_configurations_to_cluster(
cluster_spec, ambari_info, name)
self._add_services_to_cluster(
cluster_spec, ambari_info, name)
self._add_components_to_services(
cluster_spec, ambari_info, name)
self._install_services(name, ambari_info)
def start_scaled_cluster_instances(self, name, cluster_spec, servers,
ambari_info):
self.start_services(name, cluster_spec, ambari_info)
self._add_hosts_and_components(
cluster_spec, servers, ambari_info, name)
self._install_and_start_components(
name, servers, ambari_info, cluster_spec)
@cpo.event_wrapper(
True, step=_("Decommission nodes"), param=('cluster', 1))
def decommission_cluster_instances(self, cluster, clusterspec, instances,
ambari_info):
raise exc.InvalidDataException(_('The HDP plugin does not support '
'the decommissioning of nodes '
'for HDP version 1.3.2'))
def provision_cluster(self, cluster_spec, servers, ambari_info, name):
self._add_cluster(ambari_info, name)
self._add_configurations_to_cluster(cluster_spec, ambari_info, name)
self._add_services_to_cluster(cluster_spec, ambari_info, name)
self._add_components_to_services(cluster_spec, ambari_info, name)
self._add_hosts_and_components(
cluster_spec, servers, ambari_info, name)
self._install_services(name, ambari_info)
self.handler.install_swift_integration(servers)
def cleanup(self, ambari_info):
try:
ambari_info.host.remote().close_http_session(ambari_info.port)
except exc.NotFoundException:
LOG.warning(_LW("HTTP session is not cached"))
def _get_services_in_state(self, cluster_name, ambari_info, state):
services_url = ('http://{0}/api/v1/clusters/{1}/services?'
'ServiceInfo/state.in({2})'.format(
ambari_info.get_address(), cluster_name, state))
result = self._get(services_url, ambari_info)
json_result = json.loads(result.text)
services = []
for service in json_result['items']:
services.append(service['ServiceInfo']['service_name'])
return services
def _fire_service_start_notifications(self, cluster_name,
cluster_spec, ambari_info):
started_services = self._get_services_in_state(
cluster_name, ambari_info, 'STARTED')
for service in cluster_spec.services:
if service.deployed and service.name not in started_services:
service.pre_service_start(cluster_spec, ambari_info,
started_services)

View File

@ -41,7 +41,7 @@ def get_instance_info(*args, **kwargs):
return args[0].instance_info
def create_clusterspec(hdp_version='1.3.2'):
def create_clusterspec(hdp_version='2.0.6'):
version_suffix = hdp_version.replace('.', '_')
cluster_config_file = pkg.resource_string(
version.version_info.package,

View File

@ -28,7 +28,7 @@ from sahara.utils import edp
from sahara import version
GET_REST_REQ = ("sahara.plugins.hdp.versions.version_1_3_2.versionhandler."
GET_REST_REQ = ("sahara.plugins.hdp.versions.version_2_0_6.versionhandler."
"AmbariClient._get_http_session")
@ -43,66 +43,33 @@ class AmbariPluginTest(sahara_base.SaharaTestCase):
def test_get_node_processes(self):
plugin = ap.AmbariPlugin()
service_components = plugin.get_node_processes('1.3.2')
self.assertEqual(13, len(service_components))
components = service_components['HDFS']
self.assertIn('NAMENODE', components)
self.assertIn('DATANODE', components)
self.assertIn('SECONDARY_NAMENODE', components)
self.assertIn('HDFS_CLIENT', components)
components = service_components['MAPREDUCE']
self.assertIn('JOBTRACKER', components)
self.assertIn('TASKTRACKER', components)
self.assertIn('MAPREDUCE_CLIENT', components)
components = service_components['GANGLIA']
self.assertIn('GANGLIA_SERVER', components)
components = service_components['NAGIOS']
self.assertIn('NAGIOS_SERVER', components)
components = service_components['AMBARI']
self.assertIn('AMBARI_SERVER', components)
components = service_components['HCATALOG']
self.assertIn('HCAT', components)
components = service_components['ZOOKEEPER']
self.assertIn('ZOOKEEPER_SERVER', components)
self.assertIn('ZOOKEEPER_CLIENT', components)
components = service_components['HIVE']
self.assertIn('HIVE_SERVER', components)
self.assertIn('HIVE_METASTORE', components)
self.assertIn('HIVE_CLIENT', components)
self.assertIn('MYSQL_SERVER', components)
components = service_components['PIG']
self.assertIn('PIG', components)
components = service_components['WEBHCAT']
self.assertIn('WEBHCAT_SERVER', components)
components = service_components['OOZIE']
self.assertIn('OOZIE_SERVER', components)
self.assertIn('OOZIE_CLIENT', components)
self.assertIn('SQOOP', service_components['SQOOP'])
components = service_components['HBASE']
self.assertIn('HBASE_MASTER', components)
self.assertIn('HBASE_REGIONSERVER', components)
self.assertIn('HBASE_CLIENT', components)
service_components = plugin.get_node_processes('2.0.6')
self.assertEqual({
'YARN': ['RESOURCEMANAGER', 'YARN_CLIENT', 'NODEMANAGER'],
'GANGLIA': ['GANGLIA_SERVER'],
'HUE': ['HUE'],
'HIVE': ['HIVE_SERVER', 'HIVE_METASTORE', 'HIVE_CLIENT',
'MYSQL_SERVER'],
'OOZIE': ['OOZIE_SERVER', 'OOZIE_CLIENT'],
'HDFS': ['NAMENODE', 'DATANODE', 'SECONDARY_NAMENODE',
'HDFS_CLIENT', 'JOURNALNODE', 'ZKFC'],
'SQOOP': ['SQOOP'],
'MAPREDUCE2': ['HISTORYSERVER', 'MAPREDUCE2_CLIENT'],
'ZOOKEEPER': ['ZOOKEEPER_SERVER', 'ZOOKEEPER_CLIENT'],
'HBASE': ['HBASE_MASTER', 'HBASE_REGIONSERVER', 'HBASE_CLIENT'],
'HCATALOG': ['HCAT'],
'NAGIOS': ['NAGIOS_SERVER'],
'AMBARI': ['AMBARI_SERVER'],
'WEBHCAT': ['WEBHCAT_SERVER'],
'PIG': ['PIG']}, service_components)
def test_convert(self):
plugin = ap.AmbariPlugin()
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'plugins/hdp/versions/version_2_0_6/resources/'
'default-cluster.template')
cluster = plugin.convert(cluster_config_file, 'ambari', '1.3.2',
cluster = plugin.convert(cluster_config_file, 'ambari', '2.0.6',
'test-plugin', create_cluster_template)
normalized_config = cs.ClusterSpec(cluster_config_file).normalize()
@ -119,13 +86,13 @@ class AmbariPluginTest(sahara_base.SaharaTestCase):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'plugins/hdp/versions/version_2_0_6/resources/'
'default-cluster.template')
cluster_spec = cs.ClusterSpec(cluster_config_file)
ambari_info = ap.AmbariInfo(TestHost('111.11.1111'),
'8080', 'admin', 'old-pwd')
plugin._set_ambari_credentials(cluster_spec, ambari_info, '1.3.2')
plugin._set_ambari_credentials(cluster_spec, ambari_info, '2.0.6')
self.assertEqual(1, len(self.requests))
request = self.requests[0]
@ -146,7 +113,7 @@ class AmbariPluginTest(sahara_base.SaharaTestCase):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'plugins/hdp/versions/version_2_0_6/resources/'
'default-cluster.template')
cluster_spec = cs.ClusterSpec(cluster_config_file)
@ -158,7 +125,7 @@ class AmbariPluginTest(sahara_base.SaharaTestCase):
ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), '8080',
'admin', 'old-pwd')
plugin._set_ambari_credentials(cluster_spec, ambari_info, '1.3.2')
plugin._set_ambari_credentials(cluster_spec, ambari_info, '2.0.6')
self.assertEqual(2, len(self.requests))
request = self.requests[0]
@ -186,7 +153,7 @@ class AmbariPluginTest(sahara_base.SaharaTestCase):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'plugins/hdp/versions/version_2_0_6/resources/'
'default-cluster.template')
cluster_spec = cs.ClusterSpec(cluster_config_file)
@ -197,7 +164,7 @@ class AmbariPluginTest(sahara_base.SaharaTestCase):
ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), '8080',
'admin', 'old-pwd')
plugin._set_ambari_credentials(cluster_spec, ambari_info, '1.3.2')
plugin._set_ambari_credentials(cluster_spec, ambari_info, '2.0.6')
self.assertEqual(2, len(self.requests))
request = self.requests[0]
@ -228,7 +195,7 @@ class AmbariPluginTest(sahara_base.SaharaTestCase):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'plugins/hdp/versions/version_2_0_6/resources/'
'default-cluster.template')
cluster_spec = cs.ClusterSpec(cluster_config_file)
@ -244,16 +211,16 @@ class AmbariPluginTest(sahara_base.SaharaTestCase):
self.assertRaises(ex.HadoopProvisionError,
plugin._set_ambari_credentials,
cluster_spec, ambari_info, '1.3.2')
cluster_spec, ambari_info, '2.0.6')
@mock.patch("sahara.utils.openstack.nova.get_instance_info",
base.get_instance_info)
@mock.patch('sahara.plugins.hdp.versions.version_1_3_2.services.'
@mock.patch('sahara.plugins.hdp.versions.version_2_0_6.services.'
'HdfsService._get_swift_properties', return_value=[])
def test__get_ambari_info(self, patched):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'plugins/hdp/versions/version_2_0_6/resources/'
'default-cluster.template')
test_host = base.TestServer(
@ -262,7 +229,10 @@ class AmbariPluginTest(sahara_base.SaharaTestCase):
node_group = base.TestNodeGroup(
'ng1', [test_host], ["AMBARI_SERVER", "NAMENODE", "DATANODE",
"JOBTRACKER", "TASKTRACKER"])
'RESOURCEMANAGER', 'YARN_CLIENT',
'NODEMANAGER',
'HISTORYSERVER', 'MAPREDUCE2_CLIENT',
'ZOOKEEPER_SERVER', 'ZOOKEEPER_CLIENT'])
cluster = base.TestCluster([node_group])
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster_config.create_operational_config(cluster, [])
@ -285,7 +255,7 @@ class AmbariPluginTest(sahara_base.SaharaTestCase):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'plugins/hdp/versions/version_2_0_6/resources/'
'default-cluster.template')
cluster_spec = cs.ClusterSpec(cluster_config_file)
@ -303,7 +273,7 @@ class AmbariPluginTest(sahara_base.SaharaTestCase):
node_group = base.TestNodeGroup(
'ng1', [test_host], ["AMBARI_SERVER", "NAMENODE", "DATANODE",
"JOBTRACKER", "TASKTRACKER", "OOZIE_SERVER"])
"OOZIE_SERVER"])
cluster = base.TestCluster([node_group])
cluster.hadoop_version = '2.0.6'
plugin = ap.AmbariPlugin()
@ -313,23 +283,12 @@ class AmbariPluginTest(sahara_base.SaharaTestCase):
node_group = base.TestNodeGroup(
'ng1', [test_host], ["AMBARI_SERVER", "NAMENODE", "DATANODE",
"JOBTRACKER", "TASKTRACKER", "NOT_OOZIE"])
"NOT_OOZIE"])
cluster = base.TestCluster([node_group])
cluster.hadoop_version = '2.0.6'
self.assertIsNone(plugin.get_edp_engine(
cluster, edp.JOB_TYPE_PIG).get_oozie_server(cluster))
@mock.patch('sahara.service.edp.hdfs_helper.create_dir_hadoop1')
def test_edp132_calls_hadoop1_create_dir(self, create_dir):
cluster = base.TestCluster([])
cluster.plugin_name = 'hdp'
cluster.hadoop_version = '1.3.2'
plugin = ap.AmbariPlugin()
plugin.get_edp_engine(cluster, edp.JOB_TYPE_PIG).create_hdfs_dir(
mock.Mock(), '/tmp')
self.assertEqual(1, create_dir.call_count)
@mock.patch('sahara.service.edp.hdfs_helper.create_dir_hadoop2')
def test_edp206_calls_hadoop2_create_dir(self, create_dir):
cluster = base.TestCluster([])

File diff suppressed because it is too large Load Diff

View File

@ -21,14 +21,14 @@ from sahara.plugins.hdp.versions import versionhandlerfactory as vhf
from sahara.tests.unit import base
from sahara.tests.unit.plugins.hdp import hdp_test_base
versions = ['1.3.2', '2.0.6']
versions = ['2.0.6']
class ServicesTest(base.SaharaTestCase):
# TODO(jspeidel): test remaining service functionality which isn't
# tested by coarser grained unit tests.
def get_services_processor(self, version='1.3.2'):
def get_services_processor(self, version='2.0.6'):
handler = (vhf.VersionHandlerFactory.get_instance().
get_version_handler(version))
s = handler.get_services_processor()
@ -44,31 +44,6 @@ class ServicesTest(base.SaharaTestCase):
expected_configs & service.configurations)
self.assertTrue(service.is_mandatory())
def test_hdfs_service_register_urls(self):
s = self.get_services_processor()
service = s.create_service('HDFS')
cluster_spec = mock.Mock()
cluster_spec.configurations = {
'core-site': {
'fs.default.name': 'hdfs://not_expected.com:9020'
},
'hdfs-site': {
'dfs.http.address': 'http://not_expected.com:10070'
}
}
instance_mock = mock.Mock()
instance_mock.management_ip = '127.0.0.1'
cluster_spec.determine_component_hosts = mock.Mock(
return_value=[instance_mock])
cluster = mock.Mock(cluster_configs={}, name="hdp")
url_info = {}
url_info = service.register_service_urls(cluster_spec, url_info,
cluster)
self.assertEqual(url_info['HDFS']['Web UI'],
'http://127.0.0.1:10070')
self.assertEqual(url_info['HDFS']['NameNode'],
'hdfs://127.0.0.1:9020')
def test_hdp2_hdfs_service_register_urls(self):
s = self.get_services_processor('2.0.6')
service = s.create_service('HDFS')
@ -122,15 +97,6 @@ class ServicesTest(base.SaharaTestCase):
self.assertEqual(url_info['HDFS']['NameService'],
'hdfs://hdp-cluster')
def test_create_mr_service(self):
s = self.get_services_processor()
service = s.create_service('MAPREDUCE')
self.assertEqual('MAPREDUCE', service.name)
expected_configs = set(['global', 'core-site', 'mapred-site'])
self.assertEqual(expected_configs,
expected_configs & service.configurations)
self.assertTrue(service.is_mandatory())
def test_create_mr2_service(self):
s = self.get_services_processor('2.0.6')
service = s.create_service('MAPREDUCE2')
@ -140,31 +106,6 @@ class ServicesTest(base.SaharaTestCase):
expected_configs & service.configurations)
self.assertTrue(service.is_mandatory())
def test_mr_service_register_urls(self):
s = self.get_services_processor()
service = s.create_service('MAPREDUCE')
cluster_spec = mock.Mock()
cluster_spec.configurations = {
'mapred-site': {
'mapred.job.tracker': 'hdfs://not_expected.com:10300',
'mapred.job.tracker.http.address':
'http://not_expected.com:10030',
'mapreduce.jobhistory.webapp.address':
'http://not_expected.com:10030'
}
}
instance_mock = mock.Mock()
instance_mock.management_ip = '127.0.0.1'
cluster_spec.determine_component_hosts = mock.Mock(
return_value=[instance_mock])
url_info = {}
url_info = service.register_service_urls(cluster_spec, url_info,
mock.Mock())
self.assertEqual(url_info['MapReduce']['Web UI'],
'http://127.0.0.1:10030')
self.assertEqual(url_info['MapReduce']['JobTracker'],
'127.0.0.1:10300')
def test_hdp2_mr2_service_register_urls(self):
s = self.get_services_processor('2.0.6')
service = s.create_service('MAPREDUCE2')
@ -217,7 +158,7 @@ class ServicesTest(base.SaharaTestCase):
expected_configs = set(['global', 'core-site'])
self.assertEqual(expected_configs,
expected_configs & service.configurations)
self.assertFalse(service.is_mandatory())
self.assertTrue(service.is_mandatory())
def test_create_oozie_service(self):
for version in versions:
@ -269,49 +210,6 @@ class ServicesTest(base.SaharaTestCase):
expected_configs & service.configurations)
self.assertTrue(service.is_mandatory())
@mock.patch("sahara.utils.openstack.nova.get_instance_info",
hdp_test_base.get_instance_info)
@mock.patch(
'sahara.plugins.hdp.versions.version_1_3_2.services.HdfsService.'
'_get_swift_properties',
return_value=[])
def test_create_sqoop_service(self, patched):
s = self.get_services_processor()
service = s.create_service('SQOOP')
self.assertEqual('SQOOP', service.name)
expected_configs = set(['global', 'core-site'])
self.assertEqual(expected_configs,
expected_configs & service.configurations)
self.assertFalse(service.is_mandatory())
# ensure that hdfs and mr clients are added implicitly
master_host = hdp_test_base.TestServer(
'master.novalocal', 'master', '11111', 3,
'111.11.1111', '222.11.1111')
master_ng = hdp_test_base.TestNodeGroup(
'master',
[master_host],
["NAMENODE",
"JOBTRACKER",
"SECONDARY_NAMENODE",
"TASKTRACKER",
"DATANODE",
"AMBARI_SERVER"])
sqoop_host = hdp_test_base.TestServer(
'sqoop.novalocal', 'sqoop', '11111', 3,
'111.11.1111', '222.11.1111')
sqoop_ng = hdp_test_base.TestNodeGroup(
'sqoop', [sqoop_host], ["SQOOP"])
cluster = hdp_test_base.TestCluster([master_ng, sqoop_ng])
cluster_spec = hdp_test_base.create_clusterspec()
cluster_spec.create_operational_config(cluster, [])
components = cluster_spec.get_node_groups_containing_component(
'SQOOP')[0].components
self.assertIn('HDFS_CLIENT', components)
self.assertIn('MAPREDUCE_CLIENT', components)
@mock.patch("sahara.utils.openstack.nova.get_instance_info",
hdp_test_base.get_instance_info)
@mock.patch(
@ -354,7 +252,7 @@ class ServicesTest(base.SaharaTestCase):
@mock.patch("sahara.utils.openstack.nova.get_instance_info",
hdp_test_base.get_instance_info)
@mock.patch(
'sahara.plugins.hdp.versions.version_1_3_2.services.HdfsService.'
'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.'
'_get_swift_properties',
return_value=[])
def test_create_hbase_service(self, patched):
@ -377,15 +275,11 @@ class ServicesTest(base.SaharaTestCase):
@mock.patch("sahara.utils.openstack.nova.get_instance_info",
hdp_test_base.get_instance_info)
@mock.patch(
'sahara.plugins.hdp.versions.version_1_3_2.services.HdfsService.'
'_get_swift_properties',
return_value=[])
@mock.patch(
'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.'
'_get_swift_properties',
return_value=[])
def test_create_hdp2_hbase_service(self, patched2, patched):
def test_create_hdp2_hbase_service(self, patched):
for version in versions:
s = self.get_services_processor(version=version)
service = s.create_service('HBASE')
@ -395,7 +289,7 @@ class ServicesTest(base.SaharaTestCase):
expected_configs & service.configurations)
self.assertFalse(service.is_mandatory())
cluster = self._create_hbase_cluster(version=version)
cluster = self._create_hbase_cluster()
cluster_spec = hdp_test_base.create_clusterspec(
hdp_version=version)
@ -416,17 +310,13 @@ class ServicesTest(base.SaharaTestCase):
@mock.patch("sahara.utils.openstack.nova.get_instance_info",
hdp_test_base.get_instance_info)
@mock.patch(
'sahara.plugins.hdp.versions.version_1_3_2.services.HdfsService.'
'_get_swift_properties',
return_value=[])
@mock.patch(
'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.'
'_get_swift_properties',
return_value=[])
def test_hbase_properties(self, patched2, patched):
def test_hbase_properties(self, patched):
for version in versions:
cluster = self._create_hbase_cluster(version=version)
cluster = self._create_hbase_cluster()
cluster_spec = hdp_test_base.create_clusterspec(
hdp_version=version)
@ -667,7 +557,7 @@ class ServicesTest(base.SaharaTestCase):
@mock.patch("sahara.utils.openstack.nova.get_instance_info",
hdp_test_base.get_instance_info)
@mock.patch(
'sahara.plugins.hdp.versions.version_1_3_2.services.HdfsService.'
'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.'
'_get_swift_properties',
return_value=[])
def test_hbase_validation(self, patched):
@ -675,10 +565,14 @@ class ServicesTest(base.SaharaTestCase):
'master.novalocal', 'master', '11111', 3,
'111.11.1111', '222.11.1111')
master_ng = hdp_test_base.TestNodeGroup(
'master', [master_host], ["NAMENODE", "JOBTRACKER",
'master', [master_host], ["NAMENODE",
'RESOURCEMANAGER', 'YARN_CLIENT',
'NODEMANAGER',
"SECONDARY_NAMENODE",
"TASKTRACKER", "DATANODE",
"AMBARI_SERVER"])
"DATANODE",
"AMBARI_SERVER",
'HISTORYSERVER', 'MAPREDUCE2_CLIENT',
'ZOOKEEPER_SERVER', 'ZOOKEEPER_CLIENT'])
hbase_host = hdp_test_base.TestServer(
'hbase.novalocal', 'hbase', '11111', 3,
'111.11.1111', '222.11.1111')
@ -812,17 +706,13 @@ class ServicesTest(base.SaharaTestCase):
@mock.patch("sahara.utils.openstack.nova.get_instance_info",
hdp_test_base.get_instance_info)
@mock.patch(
'sahara.plugins.hdp.versions.version_1_3_2.services.HdfsService.'
'_get_swift_properties',
return_value=[])
@mock.patch(
'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.'
'_get_swift_properties',
return_value=[])
def test_hbase_service_urls(self, patched2, patched):
def test_hbase_service_urls(self, patched):
for version in versions:
cluster = self._create_hbase_cluster(version=version)
cluster = self._create_hbase_cluster()
cluster_spec = hdp_test_base.create_clusterspec(
hdp_version=version)
cluster_spec.create_operational_config(cluster, [])
@ -848,17 +738,13 @@ class ServicesTest(base.SaharaTestCase):
@mock.patch("sahara.utils.openstack.nova.get_instance_info",
hdp_test_base.get_instance_info)
@mock.patch(
'sahara.plugins.hdp.versions.version_1_3_2.services.HdfsService.'
'_get_swift_properties',
return_value=[])
@mock.patch(
'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.'
'_get_swift_properties',
return_value=[])
def test_hbase_replace_tokens(self, patched2, patched):
def test_hbase_replace_tokens(self, patched):
for version in versions:
cluster = self._create_hbase_cluster(version=version)
cluster = self._create_hbase_cluster()
cluster_spec = hdp_test_base.create_clusterspec(
hdp_version=version)
cluster_spec.create_operational_config(cluster, [])
@ -900,22 +786,15 @@ class ServicesTest(base.SaharaTestCase):
paths = service._get_common_paths([ng1, ng2, ng3])
self.assertEqual(['/volume/disk1'], paths)
def _create_hbase_cluster(self, version='1.3.2'):
def _create_hbase_cluster(self):
master_host = hdp_test_base.TestServer(
'master.novalocal', 'master', '11111', 3,
'111.11.1111', '222.11.1111')
if version == '1.3.2':
master_ng = hdp_test_base.TestNodeGroup(
'master', [master_host], ["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "TASKTRACKER",
"DATANODE", "AMBARI_SERVER",
"ZOOKEEPER_SERVER"])
elif version == '2.0.6':
master_ng = hdp_test_base.TestNodeGroup(
'master', [master_host], ["NAMENODE", "RESOURCEMANAGER",
"SECONDARY_NAMENODE", "NODEMANAGER",
"DATANODE", "AMBARI_SERVER",
"HISTORYSERVER", "ZOOKEEPER_SERVER"])
master_ng = hdp_test_base.TestNodeGroup(
'master', [master_host], ["NAMENODE", "RESOURCEMANAGER",
"SECONDARY_NAMENODE", "NODEMANAGER",
"DATANODE", "AMBARI_SERVER",
"HISTORYSERVER", "ZOOKEEPER_SERVER"])
extra_zk_host = hdp_test_base.TestServer(
'zk.novalocal', 'zk', '11112', 3,
'111.11.1112', '222.11.1112')

View File

@ -23,8 +23,7 @@ class VersionManagerFactoryTest(base.SaharaTestCase):
factory = versionhandlerfactory.VersionHandlerFactory.get_instance()
versions = factory.get_versions()
self.assertEqual(2, len(versions))
self.assertIn('1.3.2', versions)
self.assertEqual(1, len(versions))
self.assertIn('2.0.6', versions)
def test_get_version_handlers(self):

View File

@ -1,98 +0,0 @@
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara.plugins.hdp.versions.version_1_3_2 import edp_engine
from sahara.tests.unit import base as sahara_base
from sahara.utils import edp
class HDPConfigHintsTest(sahara_base.SaharaTestCase):
@mock.patch(
'sahara.plugins.hdp.confighints_helper.get_possible_hive_config_from',
return_value={})
def test_get_possible_job_config_hive(
self, get_possible_hive_config_from):
expected_config = {'job_config': {}}
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_HIVE)
get_possible_hive_config_from.assert_called_once_with(
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')
self.assertEqual(expected_config, actual_config)
@mock.patch('sahara.plugins.hdp.edp_engine.EdpOozieEngine')
def test_get_possible_job_config_java(self, BaseHDPEdpOozieEngine):
expected_config = {'job_config': {}}
BaseHDPEdpOozieEngine.get_possible_job_config.return_value = (
expected_config)
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_JAVA)
BaseHDPEdpOozieEngine.get_possible_job_config.assert_called_once_with(
edp.JOB_TYPE_JAVA)
self.assertEqual(expected_config, actual_config)
@mock.patch(
'sahara.plugins.hdp.confighints_helper.'
'get_possible_mapreduce_config_from',
return_value={})
def test_get_possible_job_config_mapreduce(
self, get_possible_mapreduce_config_from):
expected_config = {'job_config': {}}
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_MAPREDUCE)
get_possible_mapreduce_config_from.assert_called_once_with(
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')
self.assertEqual(expected_config, actual_config)
@mock.patch(
'sahara.plugins.hdp.confighints_helper.'
'get_possible_mapreduce_config_from',
return_value={})
def test_get_possible_job_config_mapreduce_streaming(
self, get_possible_mapreduce_config_from):
expected_config = {'job_config': {}}
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_MAPREDUCE_STREAMING)
get_possible_mapreduce_config_from.assert_called_once_with(
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')
self.assertEqual(expected_config, actual_config)
@mock.patch(
'sahara.plugins.hdp.confighints_helper.get_possible_pig_config_from',
return_value={})
def test_get_possible_job_config_pig(
self, get_possible_pig_config_from):
expected_config = {'job_config': {}}
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_PIG)
get_possible_pig_config_from.assert_called_once_with(
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')
self.assertEqual(expected_config, actual_config)
@mock.patch('sahara.plugins.hdp.edp_engine.EdpOozieEngine')
def test_get_possible_job_config_shell(self, BaseHDPEdpOozieEngine):
expected_config = {'job_config': {}}
BaseHDPEdpOozieEngine.get_possible_job_config.return_value = (
expected_config)
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_SHELL)
BaseHDPEdpOozieEngine.get_possible_job_config.assert_called_once_with(
edp.JOB_TYPE_SHELL)
self.assertEqual(expected_config, actual_config)