Add EDP services to new HDP plugin
This patch added support of the following services: * Oozie * Hive Known issues: * Pig job with Swift doesn't work partially implements bp: hdp-22-support Change-Id: I92e6f09e0c9685f24cdcf3ca386c76ea20134e3e
This commit is contained in:
parent
8d0eba8d0f
commit
c78be77b8d
@ -14,6 +14,13 @@ clusters:
|
||||
- YARN Timeline Server
|
||||
- ZooKeeper
|
||||
auto_security_group: true
|
||||
- name: master-edp
|
||||
flavor: ${ci_flavor_id}
|
||||
node_processes:
|
||||
- Hive Metastore
|
||||
- HiveServer
|
||||
- Oozie
|
||||
auto_security_group: true
|
||||
- name: worker
|
||||
flavor: ${ci_flavor_id}
|
||||
node_processes:
|
||||
@ -26,6 +33,7 @@ clusters:
|
||||
name: ambari21
|
||||
node_group_templates:
|
||||
master: 1
|
||||
master-edp: 1
|
||||
worker: 3
|
||||
cluster_configs:
|
||||
HDFS:
|
||||
@ -33,4 +41,5 @@ clusters:
|
||||
cluster:
|
||||
name: ${cluster_name}
|
||||
scenario:
|
||||
- cinder
|
||||
- run_jobs
|
||||
edp_jobs_flow: hadoop_2
|
||||
|
@ -21,6 +21,8 @@ HDFS_SERVICE = "HDFS"
|
||||
RANGER_SERVICE = "Ranger"
|
||||
YARN_SERVICE = "YARN"
|
||||
ZOOKEEPER_SERVICE = "ZooKeeper"
|
||||
HIVE_SERVICE = "Hive"
|
||||
OOZIE_SERVICE = "Oozie"
|
||||
|
||||
# define process names
|
||||
|
||||
@ -28,8 +30,11 @@ AMBARI_SERVER = "Ambari"
|
||||
APP_TIMELINE_SERVER = "YARN Timeline Server"
|
||||
DATANODE = "DataNode"
|
||||
HISTORYSERVER = "MapReduce History Server"
|
||||
HIVE_METASTORE = "Hive Metastore"
|
||||
HIVE_SERVER = "HiveServer"
|
||||
NAMENODE = "NameNode"
|
||||
NODEMANAGER = "NodeManager"
|
||||
OOZIE_SERVER = "Oozie"
|
||||
RESOURCEMANAGER = "ResourceManager"
|
||||
SECONDARY_NAMENODE = "SecondaryNameNode"
|
||||
ZOOKEEPER_SERVER = "ZooKeeper"
|
||||
@ -40,8 +45,11 @@ PROC_MAP = {
|
||||
APP_TIMELINE_SERVER: ["APP_TIMELINE_SERVER"],
|
||||
DATANODE: ["DATANODE"],
|
||||
HISTORYSERVER: ["HISTORYSERVER"],
|
||||
HIVE_METASTORE: ["HIVE_METASTORE"],
|
||||
HIVE_SERVER: ["HIVE_SERVER", "MYSQL_SERVER", "WEBHCAT_SERVER"],
|
||||
NAMENODE: ["NAMENODE"],
|
||||
NODEMANAGER: ["NODEMANAGER"],
|
||||
OOZIE_SERVER: ["OOZIE_SERVER", "PIG"],
|
||||
RESOURCEMANAGER: ["RESOURCEMANAGER"],
|
||||
SECONDARY_NAMENODE: ["SECONDARY_NAMENODE"],
|
||||
ZOOKEEPER_SERVER: ["ZOOKEEPER_SERVER"]
|
||||
@ -51,8 +59,11 @@ CLIENT_MAP = {
|
||||
APP_TIMELINE_SERVER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"],
|
||||
DATANODE: ["HDFS_CLIENT"],
|
||||
HISTORYSERVER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"],
|
||||
HIVE_METASTORE: ["HIVE_CLIENT"],
|
||||
HIVE_SERVER: ["HIVE_CLIENT"],
|
||||
NAMENODE: ["HDFS_CLIENT"],
|
||||
NODEMANAGER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"],
|
||||
OOZIE_SERVER: ["OOZIE_CLIENT", "TEZ_CLIENT"],
|
||||
RESOURCEMANAGER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"],
|
||||
SECONDARY_NAMENODE: ["HDFS_CLIENT"],
|
||||
ZOOKEEPER_SERVER: ["ZOOKEEPER_CLIENT"]
|
||||
|
@ -38,9 +38,15 @@ cfg_process_map = {
|
||||
"hadoop-env": common.HDFS_SERVICE,
|
||||
"hadoop-policy": common.HDFS_SERVICE,
|
||||
"hdfs-site": common.HDFS_SERVICE,
|
||||
"hive-env": common.HIVE_SERVICE,
|
||||
"hive-site": common.HIVE_SERVICE,
|
||||
"hiveserver2-site": common.HIVE_SERVICE,
|
||||
"oozie-env": common.OOZIE_SERVICE,
|
||||
"oozie-site": common.OOZIE_SERVICE,
|
||||
"mapred-env": common.YARN_SERVICE,
|
||||
"mapred-site": common.YARN_SERVICE,
|
||||
"ranger-hdfs-plugin-properties": common.RANGER_SERVICE,
|
||||
"tez-site": common.OOZIE_SERVICE,
|
||||
"yarn-env": common.YARN_SERVICE,
|
||||
"yarn-site": common.YARN_SERVICE,
|
||||
"zoo.cfg": common.ZOOKEEPER_SERVICE,
|
||||
|
56
sahara/plugins/ambari/edp_engine.py
Normal file
56
sahara/plugins/ambari/edp_engine.py
Normal file
@ -0,0 +1,56 @@
|
||||
# Copyright (c) 2015 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from sahara.plugins.ambari import common as p_common
|
||||
from sahara.plugins import exceptions as pex
|
||||
from sahara.plugins import utils as plugin_utils
|
||||
from sahara.service.edp import hdfs_helper
|
||||
from sahara.service.edp.oozie import engine as oozie_engine
|
||||
|
||||
|
||||
class EDPOozieEngine(oozie_engine.OozieJobEngine):
|
||||
def get_hdfs_user(self):
|
||||
return "oozie"
|
||||
|
||||
def create_hdfs_dir(self, remote, dir_name):
|
||||
hdfs_helper.create_dir_hadoop2(remote, dir_name, self.get_hdfs_user())
|
||||
|
||||
def get_oozie_server_uri(self, cluster):
|
||||
oozie = plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER)
|
||||
return "http://%s:11000/oozie" % oozie.management_ip
|
||||
|
||||
def get_name_node_uri(self, cluster):
|
||||
namenode = plugin_utils.get_instance(cluster, p_common.NAMENODE)
|
||||
return "hdfs://%s:8020" % namenode.fqdn()
|
||||
|
||||
def get_resource_manager_uri(self, cluster):
|
||||
resourcemanager = plugin_utils.get_instance(cluster,
|
||||
p_common.RESOURCEMANAGER)
|
||||
return "%s:8050" % resourcemanager.fqdn()
|
||||
|
||||
def get_oozie_server(self, cluster):
|
||||
return plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER)
|
||||
|
||||
def validate_job_execution(self, cluster, job, data):
|
||||
oozie_count = plugin_utils.get_instances_count(cluster,
|
||||
p_common.OOZIE_SERVER)
|
||||
if oozie_count != 1:
|
||||
raise pex.InvalidComponentCountException(
|
||||
p_common.OOZIE_SERVER, "1", oozie_count)
|
||||
super(EDPOozieEngine, self).validate_job_execution(cluster, job, data)
|
||||
|
||||
@staticmethod
|
||||
def get_possible_job_config(job_type):
|
||||
return {"job_config": []}
|
@ -20,6 +20,7 @@ from sahara.i18n import _
|
||||
from sahara.plugins.ambari import common as p_common
|
||||
from sahara.plugins.ambari import configs
|
||||
from sahara.plugins.ambari import deploy
|
||||
from sahara.plugins.ambari import edp_engine
|
||||
from sahara.plugins.ambari import validation
|
||||
from sahara.plugins import provisioning as p
|
||||
from sahara.plugins import utils as plugin_utils
|
||||
@ -44,6 +45,9 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
|
||||
p_common.AMBARI_SERVICE: [p_common.AMBARI_SERVER],
|
||||
p_common.HDFS_SERVICE: [p_common.DATANODE, p_common.NAMENODE,
|
||||
p_common.SECONDARY_NAMENODE],
|
||||
p_common.HIVE_SERVICE: [p_common.HIVE_METASTORE,
|
||||
p_common.HIVE_SERVER],
|
||||
p_common.OOZIE_SERVICE: [p_common.OOZIE_SERVER],
|
||||
p_common.YARN_SERVICE: [
|
||||
p_common.APP_TIMELINE_SERVER, p_common.HISTORYSERVER,
|
||||
p_common.NODEMANAGER, p_common.RESOURCEMANAGER],
|
||||
@ -102,6 +106,11 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
|
||||
info[p_common.APP_TIMELINE_SERVER] = {
|
||||
"Web UI": "http://%s:8188" % atlserver.management_ip
|
||||
}
|
||||
oozie = plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER)
|
||||
if oozie:
|
||||
info[p_common.OOZIE_SERVER] = {
|
||||
"Web UI": "http://%s:11000/oozie" % oozie.management_ip
|
||||
}
|
||||
info.update(cluster.info.to_dict())
|
||||
ctx = context.ctx()
|
||||
conductor.cluster_update(ctx, cluster, {"info": info})
|
||||
@ -120,13 +129,16 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
|
||||
pass
|
||||
|
||||
def get_edp_engine(self, cluster, job_type):
|
||||
pass
|
||||
if job_type in edp_engine.EDPOozieEngine.get_supported_job_types():
|
||||
return edp_engine.EDPOozieEngine(cluster)
|
||||
return None
|
||||
|
||||
def get_edp_job_types(self, versions=None):
|
||||
pass
|
||||
return edp_engine.EDPOozieEngine.get_supported_job_types()
|
||||
|
||||
def get_edp_config_hints(self, job_type, version):
|
||||
pass
|
||||
if job_type in edp_engine.EDPOozieEngine.get_supported_job_types():
|
||||
return edp_engine.EDPOozieEngine.get_possible_job_config(job_type)
|
||||
|
||||
def get_open_ports(self, node_group):
|
||||
ports_map = {
|
||||
@ -134,8 +146,11 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
|
||||
p_common.APP_TIMELINE_SERVER: [8188, 8190, 10200],
|
||||
p_common.DATANODE: [50075, 50475],
|
||||
p_common.HISTORYSERVER: [10020, 19888],
|
||||
p_common.HIVE_METASTORE: [9933],
|
||||
p_common.HIVE_SERVER: [9999, 10000],
|
||||
p_common.NAMENODE: [8020, 9000, 50070, 50470],
|
||||
p_common.NODEMANAGER: [8042, 8044, 45454],
|
||||
p_common.OOZIE_SERVER: [11000, 11443],
|
||||
p_common.RESOURCEMANAGER: [8025, 8030, 8050, 8088, 8141],
|
||||
p_common.SECONDARY_NAMENODE: [50090]
|
||||
}
|
||||
|
@ -31,6 +31,8 @@ def validate_creation(cluster_id):
|
||||
_check_ambari(cluster)
|
||||
_check_hdfs(cluster)
|
||||
_check_yarn(cluster)
|
||||
_check_oozie(cluster)
|
||||
_check_hive(cluster)
|
||||
|
||||
|
||||
def _check_ambari(cluster):
|
||||
@ -71,3 +73,27 @@ def _check_yarn(cluster):
|
||||
if nm_count == 0:
|
||||
raise ex.InvalidComponentCountException(common.NODEMANAGER,
|
||||
_("1 or more"), nm_count)
|
||||
|
||||
|
||||
def _check_oozie(cluster):
|
||||
count = utils.get_instances_count(cluster, common.OOZIE_SERVER)
|
||||
if count > 1:
|
||||
raise ex.InvalidComponentCountException(common.OOZIE_SERVER,
|
||||
_("0 or 1"), count)
|
||||
|
||||
|
||||
def _check_hive(cluster):
|
||||
hs_count = utils.get_instances_count(cluster, common.HIVE_SERVER)
|
||||
hm_count = utils.get_instances_count(cluster, common.HIVE_METASTORE)
|
||||
if hs_count > 1:
|
||||
raise ex.InvalidComponentCountException(common.HIVE_SERVER,
|
||||
_("0 or 1"), hs_count)
|
||||
if hm_count > 1:
|
||||
raise ex.InvalidComponentCountException(common.HIVE_METASTORE,
|
||||
_("0 or 1"), hm_count)
|
||||
if hs_count == 0 and hm_count == 1:
|
||||
raise ex.RequiredServiceMissingException(
|
||||
common.HIVE_SERVER, required_by=common.HIVE_METASTORE)
|
||||
if hs_count == 1 and hm_count == 0:
|
||||
raise ex.RequiredServiceMissingException(
|
||||
common.HIVE_METASTORE, required_by=common.HIVE_SERVER)
|
||||
|
Loading…
Reference in New Issue
Block a user