Add additional HDP services

This patch added support of the following services:
* Falcon
* Flume
* HBase
* Kafka
* Knox
* Slider
* Spark
* Sqoop
* Storm
* Ranger

partially implements bp: hdp-22-support

Change-Id: Ifb13ca16649d2666f7eaa671a1a40c082cef0d40
This commit is contained in:
Sergey Reshetnyak 2015-06-25 16:43:35 +03:00
parent c78be77b8d
commit 5783d67b90
6 changed files with 297 additions and 6 deletions

View File

@ -17,26 +17,50 @@
# define service names
AMBARI_SERVICE = "Ambari"
FALCON_SERVICE = "Falcon"
FLUME_SERVICE = "Flume"
HBASE_SERVICE = "HBase"
HDFS_SERVICE = "HDFS"
HIVE_SERVICE = "Hive"
KAFKA_SERVICE = "Kafka"
KNOX_SERVICE = "Knox"
OOZIE_SERVICE = "Oozie"
RANGER_SERVICE = "Ranger"
SLIDER_SERVICE = "Slider"
SPARK_SERVICE = "Spark"
SQOOP_SERVICE = "Sqoop"
STORM_SERVICE = "Storm"
YARN_SERVICE = "YARN"
ZOOKEEPER_SERVICE = "ZooKeeper"
HIVE_SERVICE = "Hive"
OOZIE_SERVICE = "Oozie"
# define process names
AMBARI_SERVER = "Ambari"
APP_TIMELINE_SERVER = "YARN Timeline Server"
DATANODE = "DataNode"
DRPC_SERVER = "DRPC Server"
FALCON_SERVER = "Falcon Server"
FLUME_HANDLER = "Flume"
HBASE_MASTER = "HBase Master"
HBASE_REGIONSERVER = "HBase RegionServer"
HISTORYSERVER = "MapReduce History Server"
HIVE_METASTORE = "Hive Metastore"
HIVE_SERVER = "HiveServer"
KAFKA_BROKER = "Kafka Broker"
KNOX_GATEWAY = "Knox Gateway"
NAMENODE = "NameNode"
NIMBUS = "Nimbus"
NODEMANAGER = "NodeManager"
OOZIE_SERVER = "Oozie"
RANGER_ADMIN = "Ranger Admin"
RANGER_USERSYNC = "Ranger Usersync"
RESOURCEMANAGER = "ResourceManager"
SECONDARY_NAMENODE = "SecondaryNameNode"
SLIDER = "Slider"
SPARK_JOBHISTORYSERVER = "Spark History Server"
SQOOP = "Sqoop"
STORM_UI_SERVER = "Storm UI Server"
SUPERVISOR = "Supervisor"
ZOOKEEPER_SERVER = "ZooKeeper"
@ -44,20 +68,38 @@ PROC_MAP = {
AMBARI_SERVER: ["METRICS_COLLECTOR"],
APP_TIMELINE_SERVER: ["APP_TIMELINE_SERVER"],
DATANODE: ["DATANODE"],
DRPC_SERVER: ["DRPC_SERVER"],
FALCON_SERVER: ["FALCON_SERVER"],
HBASE_MASTER: ["HBASE_MASTER"],
HBASE_REGIONSERVER: ["HBASE_REGIONSERVER"],
HISTORYSERVER: ["HISTORYSERVER"],
HIVE_METASTORE: ["HIVE_METASTORE"],
HIVE_SERVER: ["HIVE_SERVER", "MYSQL_SERVER", "WEBHCAT_SERVER"],
KAFKA_BROKER: ["KAFKA_BROKER"],
KNOX_GATEWAY: ["KNOX_GATEWAY"],
NAMENODE: ["NAMENODE"],
NIMBUS: ["NIMBUS"],
NODEMANAGER: ["NODEMANAGER"],
OOZIE_SERVER: ["OOZIE_SERVER", "PIG"],
RANGER_ADMIN: ["RANGER_ADMIN"],
RANGER_USERSYNC: ["RANGER_USERSYNC"],
RESOURCEMANAGER: ["RESOURCEMANAGER"],
SECONDARY_NAMENODE: ["SECONDARY_NAMENODE"],
SLIDER: ["SLIDER"],
SPARK_JOBHISTORYSERVER: ["SPARK_JOBHISTORYSERVER"],
SQOOP: ["SQOOP"],
STORM_UI_SERVER: ["STORM_UI_SERVER"],
SUPERVISOR: ["SUPERVISOR"],
ZOOKEEPER_SERVER: ["ZOOKEEPER_SERVER"]
}
CLIENT_MAP = {
APP_TIMELINE_SERVER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"],
DATANODE: ["HDFS_CLIENT"],
FALCON_SERVER: ["FALCON_CLIENT"],
FLUME_HANDLER: ["FLUME_HANDLER"],
HBASE_MASTER: ["HBASE_CLIENT"],
HBASE_REGIONSERVER: ["HBASE_CLIENT"],
HISTORYSERVER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"],
HIVE_METASTORE: ["HIVE_CLIENT"],
HIVE_SERVER: ["HIVE_CLIENT"],
@ -66,6 +108,7 @@ CLIENT_MAP = {
OOZIE_SERVER: ["OOZIE_CLIENT", "TEZ_CLIENT"],
RESOURCEMANAGER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"],
SECONDARY_NAMENODE: ["HDFS_CLIENT"],
SPARK_JOBHISTORYSERVER: ["SPARK_CLIENT"],
ZOOKEEPER_SERVER: ["ZOOKEEPER_CLIENT"]
}

View File

@ -19,6 +19,7 @@ import six
from sahara.plugins.ambari import common
from sahara.plugins import provisioning
from sahara.plugins import utils
from sahara.swift import swift_helper
from sahara.utils import files
@ -26,6 +27,7 @@ from sahara.utils import files
configs = {}
obj_configs = {}
cfg_process_map = {
"admin-properties": common.RANGER_SERVICE,
"ams-env": common.AMBARI_SERVICE,
"ams-hbase-env": common.AMBARI_SERVICE,
"ams-hbase-policy": common.AMBARI_SERVICE,
@ -35,18 +37,41 @@ cfg_process_map = {
"capacity-scheduler": common.YARN_SERVICE,
"cluster-env": "general",
"core-site": common.HDFS_SERVICE,
"falcon-env": common.FALCON_SERVICE,
"falcon-runtime.properties": common.FALCON_SERVICE,
"falcon-startup.properties": common.FALCON_SERVICE,
"flume-env": common.FLUME_SERVICE,
"gateway-site": common.KNOX_SERVICE,
"hadoop-env": common.HDFS_SERVICE,
"hadoop-policy": common.HDFS_SERVICE,
"hbase-env": common.HBASE_SERVICE,
"hbase-policy": common.HBASE_SERVICE,
"hbase-site": common.HBASE_SERVICE,
"hdfs-site": common.HDFS_SERVICE,
"hive-env": common.HIVE_SERVICE,
"hive-site": common.HIVE_SERVICE,
"hiveserver2-site": common.HIVE_SERVICE,
"oozie-env": common.OOZIE_SERVICE,
"oozie-site": common.OOZIE_SERVICE,
"kafka-broker": common.KAFKA_SERVICE,
"kafka-env": common.KAFKA_SERVICE,
"knox-env": common.KNOX_SERVICE,
"mapred-env": common.YARN_SERVICE,
"mapred-site": common.YARN_SERVICE,
"ranger-hdfs-plugin-properties": common.RANGER_SERVICE,
"oozie-env": common.OOZIE_SERVICE,
"oozie-site": common.OOZIE_SERVICE,
"ranger-env": common.RANGER_SERVICE,
"ranger-hbase-plugin-properties": common.HBASE_SERVICE,
"ranger-hdfs-plugin-properties": common.HDFS_SERVICE,
"ranger-hive-plugin-properties": common.HIVE_SERVICE,
"ranger-knox-plugin-properties": common.KNOX_SERVICE,
"ranger-site": common.RANGER_SERVICE,
"ranger-storm-plugin-properties": common.STORM_SERVICE,
"spark-defaults": common.SPARK_SERVICE,
"spark-env": common.SPARK_SERVICE,
"sqoop-env": common.SQOOP_SERVICE,
"storm-env": common.STORM_SERVICE,
"storm-site": common.STORM_SERVICE,
"tez-site": common.OOZIE_SERVICE,
"usersync-properties": common.RANGER_SERVICE,
"yarn-env": common.YARN_SERVICE,
"yarn-site": common.YARN_SERVICE,
"zoo.cfg": common.ZOOKEEPER_SERVICE,
@ -174,4 +199,8 @@ def get_cluster_params(cluster):
for x in swift_helper.get_swift_configs()}
configs.setdefault("core-site", {})
configs["core-site"].update(swift_configs)
if utils.get_instance(cluster, common.RANGER_ADMIN):
configs.setdefault("admin-properties", {})
configs["admin-properties"]["db_root_password"] = (
cluster.extra["ranger_db_password"])
return _serialize_ambari_configs(configs)

View File

@ -99,6 +99,35 @@ def _check_port_accessible(host, port):
return False
def _prepare_ranger(cluster):
ranger = plugin_utils.get_instance(cluster, p_common.RANGER_ADMIN)
if not ranger:
return
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
with ambari.remote() as r:
r.execute_command("sudo yum install -y mysql-connector-java")
r.execute_command(
"sudo ambari-server setup --jdbc-db=mysql "
"--jdbc-driver=/usr/share/java/mysql-connector-java.jar")
init_db_template = """
create user 'root'@'%' identified by '{password}';
set password for 'root'@'localhost' = password('{password}');"""
password = uuidutils.generate_uuid()
extra = cluster.extra.to_dict() if cluster.extra else {}
extra["ranger_db_password"] = password
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {"extra": extra})
with ranger.remote() as r:
sudo = functools.partial(r.execute_command, run_as_root=True)
# TODO(sreshetnyak): add ubuntu support
sudo("yum install -y mysql-server")
sudo("service mysqld start")
r.write_file_to("/tmp/init.sql",
init_db_template.format(password=password))
sudo("mysql < /tmp/init.sql")
sudo("rm /tmp/init.sql")
def update_default_ambari_password(cluster):
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
new_password = uuidutils.generate_uuid()
@ -150,6 +179,8 @@ def set_up_hdp_repos(cluster):
def create_blueprint(cluster):
_prepare_ranger(cluster)
cluster = conductor.cluster_get(context.ctx(), cluster.id)
host_groups = []
for ng in cluster.node_groups:
hg = {

View File

@ -43,11 +43,25 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
def get_node_processes(self, hadoop_version):
return {
p_common.AMBARI_SERVICE: [p_common.AMBARI_SERVER],
p_common.FALCON_SERVICE: [p_common.FALCON_SERVER],
p_common.FLUME_SERVICE: [p_common.FLUME_HANDLER],
p_common.HBASE_SERVICE: [p_common.HBASE_MASTER,
p_common.HBASE_REGIONSERVER],
p_common.HDFS_SERVICE: [p_common.DATANODE, p_common.NAMENODE,
p_common.SECONDARY_NAMENODE],
p_common.HIVE_SERVICE: [p_common.HIVE_METASTORE,
p_common.HIVE_SERVER],
p_common.KAFKA_SERVICE: [p_common.KAFKA_BROKER],
p_common.KNOX_SERVICE: [p_common.KNOX_GATEWAY],
p_common.OOZIE_SERVICE: [p_common.OOZIE_SERVER],
p_common.RANGER_SERVICE: [p_common.RANGER_ADMIN,
p_common.RANGER_USERSYNC],
p_common.SLIDER_SERVICE: [p_common.SLIDER],
p_common.SPARK_SERVICE: [p_common.SPARK_JOBHISTORYSERVER],
p_common.SQOOP_SERVICE: [p_common.SQOOP],
p_common.STORM_SERVICE: [
p_common.DRPC_SERVER, p_common.NIMBUS,
p_common.STORM_UI_SERVER, p_common.SUPERVISOR],
p_common.YARN_SERVICE: [
p_common.APP_TIMELINE_SERVER, p_common.HISTORYSERVER,
p_common.NODEMANAGER, p_common.RESOURCEMANAGER],
@ -111,6 +125,36 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
info[p_common.OOZIE_SERVER] = {
"Web UI": "http://%s:11000/oozie" % oozie.management_ip
}
hbase_master = plugin_utils.get_instance(cluster,
p_common.HBASE_MASTER)
if hbase_master:
info[p_common.HBASE_MASTER] = {
"Web UI": "http://%s:60010" % hbase_master.management_ip
}
falcon = plugin_utils.get_instance(cluster, p_common.FALCON_SERVER)
if falcon:
info[p_common.FALCON_SERVER] = {
"Web UI": "http://%s:15000" % falcon.management_ip
}
storm_ui = plugin_utils.get_instance(cluster, p_common.STORM_UI_SERVER)
if storm_ui:
info[p_common.STORM_UI_SERVER] = {
"Web UI": "http://%s:8744" % storm_ui.management_ip
}
ranger_admin = plugin_utils.get_instance(cluster,
p_common.RANGER_ADMIN)
if ranger_admin:
info[p_common.RANGER_ADMIN] = {
"Web UI": "http://%s:6080" % ranger_admin.management_ip,
"Username": "admin",
"Password": "admin"
}
spark_hs = plugin_utils.get_instance(cluster,
p_common.SPARK_JOBHISTORYSERVER)
if spark_hs:
info[p_common.SPARK_JOBHISTORYSERVER] = {
"Web UI": "http://%s:18080" % spark_hs.management_ip
}
info.update(cluster.info.to_dict())
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {"info": info})
@ -145,14 +189,23 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
p_common.AMBARI_SERVER: [8080],
p_common.APP_TIMELINE_SERVER: [8188, 8190, 10200],
p_common.DATANODE: [50075, 50475],
p_common.DRPC_SERVER: [3772, 3773],
p_common.FALCON_SERVER: [15000],
p_common.FLUME_HANDLER: [8020, 41414],
p_common.HBASE_MASTER: [60000, 60010],
p_common.HBASE_REGIONSERVER: [60020, 60030],
p_common.HISTORYSERVER: [10020, 19888],
p_common.HIVE_METASTORE: [9933],
p_common.HIVE_SERVER: [9999, 10000],
p_common.NAMENODE: [8020, 9000, 50070, 50470],
p_common.NIMBUS: [6627],
p_common.NODEMANAGER: [8042, 8044, 45454],
p_common.OOZIE_SERVER: [11000, 11443],
p_common.RANGER_ADMIN: [6080],
p_common.RESOURCEMANAGER: [8025, 8030, 8050, 8088, 8141],
p_common.SECONDARY_NAMENODE: [50090]
p_common.SECONDARY_NAMENODE: [50090],
p_common.SPARK_JOBHISTORYSERVER: [18080],
p_common.STORM_UI_SERVER: [8000, 8080, 8744]
}
ports = []
for service in node_group.node_processes:

View File

@ -1,4 +1,28 @@
{
"admin-properties": {
"DB_FLAVOR": "MYSQL",
"SQL_COMMAND_INVOKER": "mysql",
"SQL_CONNECTOR_JAR": "/usr/share/java/mysql-connector-java.jar",
"audit_db_name": "ranger_audit",
"audit_db_user": "rangerlogger",
"authServiceHostName": "localhost",
"authServicePort": "5151",
"authentication_method": "UNIX",
"db_host": "localhost",
"db_name": "ranger",
"db_root_user": "root",
"db_user": "rangeradmin",
"policymgr_external_url": "http://localhost:6080",
"policymgr_http_enabled": "true",
"remoteLoginEnabled": "true",
"xa_ldap_ad_domain": "\"xasecure.net\"",
"xa_ldap_ad_url": "\"ldap://ad.xasecure.net:389\"",
"xa_ldap_groupRoleAttribute": "\"cn\"",
"xa_ldap_groupSearchBase": "\"ou=groups,dc=xasecure,dc=net\"",
"xa_ldap_groupSearchFilter": "\"(member=uid={0},ou=users,dc=xasecure,dc=net)\"",
"xa_ldap_url": "\"ldap://71.127.43.33:389\"",
"xa_ldap_userDNpattern": "\"uid={0},ou=users,dc=xasecure,dc=net\""
},
"ams-env": {
"ambari_metrics_user": "ams",
"metrics_collector_heapsize": "512m",
@ -736,6 +760,17 @@
"oozie.systemmode": "NORMAL",
"use.system.libpath.for.mapreduce.and.pig.jobs": "false"
},
"ranger-env": {
"admin_password": "admin",
"admin_username": "admin",
"oracle_home": "-",
"ranger_admin_log_dir": "/var/log/ranger/admin",
"ranger_admin_password": "795aab0c-4a87-4d13-ad3c-2df0c48bb237",
"ranger_admin_username": "amb_ranger_admin",
"ranger_group": "ranger",
"ranger_user": "ranger",
"ranger_usersync_log_dir": "/var/log/ranger/usersync"
},
"ranger-hbase-plugin-properties": {
"REPOSITORY_CONFIG_PASSWORD": "hbase",
"REPOSITORY_CONFIG_USERNAME": "hbase",
@ -837,6 +872,15 @@
"policy_user": "ambari-qa",
"ranger-knox-plugin-enabled": "No"
},
"ranger-site": {
"HTTPS_CLIENT_AUTH": "want",
"HTTPS_KEYSTORE_FILE": "/etc/ranger/admin/keys/server.jks",
"HTTPS_KEYSTORE_PASS": "ranger",
"HTTPS_KEY_ALIAS": "myKey",
"HTTPS_SERVICE_PORT": "6182",
"HTTP_ENABLED": "true",
"HTTP_SERVICE_PORT": "6080"
},
"ranger-storm-plugin-properties": {
"REPOSITORY_CONFIG_PASSWORD": "stormtestuser",
"REPOSITORY_CONFIG_USERNAME": "stormtestuser@EXAMPLE.COM",
@ -1030,6 +1074,24 @@
"tez.task.resource.memory.mb": "1024",
"tez.use.cluster.hadoop-libs": "false"
},
"usersync-properties": {
"CRED_KEYSTORE_FILENAME": "/usr/lib/xausersync/.jceks/xausersync.jceks",
"MIN_UNIX_USER_ID_TO_SYNC": "1000",
"SYNC_INTERVAL": "1",
"SYNC_LDAP_BIND_DN": "cn=admin,dc=xasecure,dc=net",
"SYNC_LDAP_BIND_PASSWORD": "admin321",
"SYNC_LDAP_GROUPNAME_CASE_CONVERSION": "lower",
"SYNC_LDAP_URL": "ldap://localhost:389",
"SYNC_LDAP_USERNAME_CASE_CONVERSION": "lower",
"SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE": "memberof,ismemberof",
"SYNC_LDAP_USER_NAME_ATTRIBUTE": "cn",
"SYNC_LDAP_USER_OBJECT_CLASS": "person",
"SYNC_LDAP_USER_SEARCH_BASE": "ou=users,dc=xasecure,dc=net",
"SYNC_LDAP_USER_SEARCH_FILTER": "-",
"SYNC_LDAP_USER_SEARCH_SCOPE": "sub",
"SYNC_SOURCE": "unix",
"logdir": "logs"
},
"webhcat-site": {
"templeton.exec.timeout": "60000",
"templeton.hadoop": "/usr/hdp/current/hadoop-client/bin/hadoop",

View File

@ -33,6 +33,10 @@ def validate_creation(cluster_id):
_check_yarn(cluster)
_check_oozie(cluster)
_check_hive(cluster)
_check_hbase(cluster)
_check_spark(cluster)
_check_ranger(cluster)
_check_storm(cluster)
def _check_ambari(cluster):
@ -97,3 +101,72 @@ def _check_hive(cluster):
if hs_count == 1 and hm_count == 0:
raise ex.RequiredServiceMissingException(
common.HIVE_METASTORE, required_by=common.HIVE_SERVER)
def _check_hbase(cluster):
hm_count = utils.get_instances_count(cluster, common.HBASE_MASTER)
hr_count = utils.get_instances_count(cluster, common.HBASE_REGIONSERVER)
if hm_count > 1:
raise ex.InvalidComponentCountException(common.HBASE_MASTER,
_("0 or 1"), hm_count)
if hm_count == 1 and hr_count == 0:
raise ex.RequiredServiceMissingException(
common.HBASE_REGIONSERVER, required_by=common.HBASE_MASTER)
if hr_count > 0 and hm_count == 0:
raise ex.RequiredServiceMissingException(
common.HBASE_MASTER, required_by=common.HBASE_REGIONSERVER)
def _check_spark(cluster):
count = utils.get_instances_count(cluster, common.SPARK_JOBHISTORYSERVER)
if count > 1:
raise ex.InvalidComponentCountException(common.SPARK_JOBHISTORYSERVER,
_("0 or 1"), count)
def _check_ranger(cluster):
ra_count = utils.get_instances_count(cluster, common.RANGER_ADMIN)
ru_count = utils.get_instances_count(cluster, common.RANGER_USERSYNC)
if ra_count > 1:
raise ex.InvalidComponentCountException(common.RANGER_ADMIN,
_("0 or 1"), ra_count)
if ru_count > 1:
raise ex.InvalidComponentCountException(common.RANGER_USERSYNC,
_("0 or 1"), ru_count)
if ra_count == 1 and ru_count == 0:
raise ex.RequiredServiceMissingException(
common.RANGER_USERSYNC, required_by=common.RANGER_ADMIN)
if ra_count == 0 and ru_count == 1:
raise ex.RequiredServiceMissingException(
common.RANGER_ADMIN, required_by=common.RANGER_USERSYNC)
def _check_storm(cluster):
dr_count = utils.get_instances_count(cluster, common.DRPC_SERVER)
ni_count = utils.get_instances_count(cluster, common.NIMBUS)
su_count = utils.get_instances_count(cluster, common.STORM_UI_SERVER)
sv_count = utils.get_instances_count(cluster, common.SUPERVISOR)
if dr_count > 1:
raise ex.InvalidComponentCountException(common.DRPC_SERVER,
_("0 or 1"), dr_count)
if ni_count > 1:
raise ex.InvalidComponentCountException(common.NIMBUS,
_("0 or 1"), ni_count)
if su_count > 1:
raise ex.InvalidComponentCountException(common.STORM_UI_SERVER,
_("0 or 1"), su_count)
if dr_count == 0 and ni_count == 1:
raise ex.RequiredServiceMissingException(
common.DRPC_SERVER, required_by=common.NIMBUS)
if dr_count == 1 and ni_count == 0:
raise ex.RequiredServiceMissingException(
common.NIMBUS, required_by=common.DRPC_SERVER)
if su_count == 1 and (dr_count == 0 or ni_count == 0):
raise ex.RequiredServiceMissingException(
common.NIMBUS, required_by=common.STORM_UI_SERVER)
if dr_count == 1 and sv_count == 0:
raise ex.RequiredServiceMissingException(
common.SUPERVISOR, required_by=common.DRPC_SERVER)
if sv_count > 0 and dr_count == 0:
raise ex.RequiredServiceMissingException(
common.DRPC_SERVER, required_by=common.SUPERVISOR)