Adding Hadoop 2.6.0 support to Vanilla plugin
partially implements bp: add-vanilla-2-hadoop-2-6-0 Change-Id: I287a92f8e72453e0a982b5671bb4b077886accb6
This commit is contained in:
parent
2579b9ec29
commit
caad5172ae
@ -20,6 +20,7 @@ include sahara/plugins/vanilla/v1_2_1/resources/*.sh
|
||||
include sahara/plugins/vanilla/v1_2_1/resources/*.sql
|
||||
include sahara/plugins/vanilla/v1_2_1/resources/*.xml
|
||||
include sahara/plugins/vanilla/v2_4_1/resources/*.xml
|
||||
include sahara/plugins/vanilla/v2_6_0/resources/*.xml
|
||||
include sahara/plugins/hdp/versions/version_1_3_2/resources/*.template
|
||||
include sahara/plugins/hdp/versions/version_1_3_2/resources/*.json
|
||||
include sahara/plugins/hdp/versions/version_1_3_2/resources/*.sh
|
||||
|
@ -170,9 +170,9 @@ def _check_datanodes_count(remote, count):
|
||||
|
||||
LOG.debug("Checking datanode count")
|
||||
exit_code, stdout = remote.execute_command(
|
||||
'sudo su -lc "hadoop dfsadmin -report" hadoop | '
|
||||
'grep \'Datanodes available:\' | '
|
||||
'awk \'{print $3}\'')
|
||||
'sudo su -lc "hdfs dfsadmin -report" hadoop | '
|
||||
'grep \'Live datanodes\|Datanodes available:\' | '
|
||||
'grep -o \'[0-9]\+\' | head -n 1')
|
||||
LOG.debug("Datanode count='%s'" % stdout.rstrip())
|
||||
|
||||
return exit_code == 0 and stdout and int(stdout) == count
|
||||
|
0
sahara/plugins/vanilla/v2_6_0/__init__.py
Normal file
0
sahara/plugins/vanilla/v2_6_0/__init__.py
Normal file
94
sahara/plugins/vanilla/v2_6_0/config_helper.py
Normal file
94
sahara/plugins/vanilla/v2_6_0/config_helper.py
Normal file
@ -0,0 +1,94 @@
|
||||
# Copyright (c) 2014 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from sahara.openstack.common import log as logging
|
||||
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
|
||||
from sahara.utils import xmlutils as x
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt("enable_data_locality", "sahara.topology.topology_helper")
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CORE_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_6_0/resources/core-default.xml')
|
||||
|
||||
HDFS_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_6_0/resources/hdfs-default.xml')
|
||||
|
||||
MAPRED_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_6_0/resources/mapred-default.xml')
|
||||
|
||||
YARN_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_6_0/resources/yarn-default.xml')
|
||||
|
||||
OOZIE_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_6_0/resources/oozie-default.xml')
|
||||
|
||||
XML_CONFS = {
|
||||
"Hadoop": [CORE_DEFAULT],
|
||||
"HDFS": [HDFS_DEFAULT],
|
||||
"YARN": [YARN_DEFAULT],
|
||||
"MapReduce": [MAPRED_DEFAULT],
|
||||
"JobFlow": [OOZIE_DEFAULT]
|
||||
}
|
||||
|
||||
ENV_CONFS = {
|
||||
"YARN": {
|
||||
'ResourceManager Heap Size': 1024,
|
||||
'NodeManager Heap Size': 1024
|
||||
},
|
||||
"HDFS": {
|
||||
'NameNode Heap Size': 1024,
|
||||
'SecondaryNameNode Heap Size': 1024,
|
||||
'DataNode Heap Size': 1024
|
||||
},
|
||||
"MapReduce": {
|
||||
'JobHistoryServer Heap Size': 1024
|
||||
},
|
||||
"JobFlow": {
|
||||
'Oozie Heap Size': 1024
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Initialise plugin Hadoop configurations
|
||||
PLUGIN_XML_CONFIGS = c_helper.init_xml_configs(XML_CONFS)
|
||||
PLUGIN_ENV_CONFIGS = c_helper.init_env_configs(ENV_CONFS)
|
||||
|
||||
|
||||
def _init_all_configs():
|
||||
configs = []
|
||||
configs.extend(PLUGIN_XML_CONFIGS)
|
||||
configs.extend(PLUGIN_ENV_CONFIGS)
|
||||
configs.extend(c_helper.PLUGIN_GENERAL_CONFIGS)
|
||||
return configs
|
||||
|
||||
|
||||
PLUGIN_CONFIGS = _init_all_configs()
|
||||
|
||||
|
||||
def get_plugin_configs():
|
||||
return PLUGIN_CONFIGS
|
||||
|
||||
|
||||
def get_xml_configs():
|
||||
return PLUGIN_XML_CONFIGS
|
||||
|
||||
|
||||
def get_env_configs():
|
||||
return ENV_CONFS
|
26
sahara/plugins/vanilla/v2_6_0/resources/README.rst
Normal file
26
sahara/plugins/vanilla/v2_6_0/resources/README.rst
Normal file
@ -0,0 +1,26 @@
|
||||
Apache Hadoop Configurations for Sahara
|
||||
========================================
|
||||
|
||||
This directory contains default XML configuration files:
|
||||
|
||||
* core-default.xml
|
||||
* hdfs-default.xml
|
||||
* mapred-default.xml
|
||||
* yarn-default.xml
|
||||
* oozie-default.xml
|
||||
|
||||
These files are applied for Sahara's plugin of Apache Hadoop version 2.6.0
|
||||
|
||||
|
||||
Files were taken from here:
|
||||
|
||||
* `core-default.xml <https://github.com/apache/hadoop/blob/release-2.6.0/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml>`_
|
||||
* `hdfs-default.xml <https://github.com/apache/hadoop/blob/release-2.6.0/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml>`_
|
||||
* `yarn-default.xml <https://github.com/apache/hadoop/blob/release-2.6.0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml>`_
|
||||
* `mapred-default.xml <https://github.com/apache/hadoop/blob/release-2.6.0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml>`_
|
||||
* `oozie-default.xml <https://github.com/apache/oozie/blob/release-4.0.1/core/src/main/resources/oozie-default.xml>`_
|
||||
|
||||
XML configs are used to expose default Hadoop configurations to the users
|
||||
through Sahara's REST API. It allows users to override some config values which
|
||||
will be pushed to the provisioned VMs running Hadoop services as part of
|
||||
appropriate xml config.
|
1649
sahara/plugins/vanilla/v2_6_0/resources/core-default.xml
Normal file
1649
sahara/plugins/vanilla/v2_6_0/resources/core-default.xml
Normal file
File diff suppressed because it is too large
Load Diff
2227
sahara/plugins/vanilla/v2_6_0/resources/hdfs-default.xml
Normal file
2227
sahara/plugins/vanilla/v2_6_0/resources/hdfs-default.xml
Normal file
File diff suppressed because it is too large
Load Diff
1955
sahara/plugins/vanilla/v2_6_0/resources/mapred-default.xml
Normal file
1955
sahara/plugins/vanilla/v2_6_0/resources/mapred-default.xml
Normal file
File diff suppressed because it is too large
Load Diff
1929
sahara/plugins/vanilla/v2_6_0/resources/oozie-default.xml
Normal file
1929
sahara/plugins/vanilla/v2_6_0/resources/oozie-default.xml
Normal file
File diff suppressed because it is too large
Load Diff
1528
sahara/plugins/vanilla/v2_6_0/resources/yarn-default.xml
Normal file
1528
sahara/plugins/vanilla/v2_6_0/resources/yarn-default.xml
Normal file
File diff suppressed because it is too large
Load Diff
141
sahara/plugins/vanilla/v2_6_0/versionhandler.py
Normal file
141
sahara/plugins/vanilla/v2_6_0/versionhandler.py
Normal file
@ -0,0 +1,141 @@
|
||||
# Copyright (c) 2014 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from sahara import conductor
|
||||
from sahara import context
|
||||
from sahara.openstack.common import log as logging
|
||||
from sahara.plugins import utils
|
||||
from sahara.plugins.vanilla import abstractversionhandler as avm
|
||||
from sahara.plugins.vanilla.hadoop2 import config as c
|
||||
from sahara.plugins.vanilla.hadoop2 import edp_engine
|
||||
from sahara.plugins.vanilla.hadoop2 import run_scripts as run
|
||||
from sahara.plugins.vanilla.hadoop2 import scaling as sc
|
||||
from sahara.plugins.vanilla.hadoop2 import validation as vl
|
||||
from sahara.plugins.vanilla import utils as vu
|
||||
from sahara.plugins.vanilla.v2_4_1 import config_helper as c_helper
|
||||
|
||||
|
||||
conductor = conductor.API
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class VersionHandler(avm.AbstractVersionHandler):
|
||||
def __init__(self):
|
||||
self.pctx = {
|
||||
'env_confs': c_helper.get_env_configs(),
|
||||
'all_confs': c_helper.get_plugin_configs()
|
||||
}
|
||||
|
||||
def get_plugin_configs(self):
|
||||
return self.pctx['all_confs']
|
||||
|
||||
def get_node_processes(self):
|
||||
return {
|
||||
"Hadoop": [],
|
||||
"MapReduce": ["historyserver"],
|
||||
"HDFS": ["namenode", "datanode", "secondarynamenode"],
|
||||
"YARN": ["resourcemanager", "nodemanager"],
|
||||
"JobFlow": ["oozie"]
|
||||
}
|
||||
|
||||
def validate(self, cluster):
|
||||
vl.validate_cluster_creating(self.pctx, cluster)
|
||||
|
||||
def update_infra(self, cluster):
|
||||
pass
|
||||
|
||||
def configure_cluster(self, cluster):
|
||||
c.configure_cluster(self.pctx, cluster)
|
||||
|
||||
def start_cluster(self, cluster):
|
||||
nn = vu.get_namenode(cluster)
|
||||
run.format_namenode(nn)
|
||||
run.start_hadoop_process(nn, 'namenode')
|
||||
|
||||
for snn in vu.get_secondarynamenodes(cluster):
|
||||
run.start_hadoop_process(snn, 'secondarynamenode')
|
||||
|
||||
rm = vu.get_resourcemanager(cluster)
|
||||
if rm:
|
||||
run.start_yarn_process(rm, 'resourcemanager')
|
||||
|
||||
run.start_all_processes(utils.get_instances(cluster),
|
||||
['datanode', 'nodemanager'])
|
||||
|
||||
run.await_datanodes(cluster)
|
||||
|
||||
hs = vu.get_historyserver(cluster)
|
||||
if hs:
|
||||
run.start_historyserver(hs)
|
||||
|
||||
oo = vu.get_oozie(cluster)
|
||||
if oo:
|
||||
run.start_oozie_process(self.pctx, oo)
|
||||
|
||||
self._set_cluster_info(cluster)
|
||||
|
||||
def decommission_nodes(self, cluster, instances):
|
||||
sc.decommission_nodes(self.pctx, cluster, instances)
|
||||
|
||||
def validate_scaling(self, cluster, existing, additional):
|
||||
vl.validate_additional_ng_scaling(cluster, additional)
|
||||
vl.validate_existing_ng_scaling(self.pctx, cluster, existing)
|
||||
|
||||
def scale_cluster(self, cluster, instances):
|
||||
sc.scale_cluster(self.pctx, cluster, instances)
|
||||
|
||||
def _set_cluster_info(self, cluster):
|
||||
nn = vu.get_namenode(cluster)
|
||||
rm = vu.get_resourcemanager(cluster)
|
||||
hs = vu.get_historyserver(cluster)
|
||||
oo = vu.get_oozie(cluster)
|
||||
|
||||
info = {}
|
||||
|
||||
if rm:
|
||||
info['YARN'] = {
|
||||
'Web UI': 'http://%s:%s' % (rm.management_ip, '8088'),
|
||||
'ResourceManager': 'http://%s:%s' % (rm.management_ip, '8032')
|
||||
}
|
||||
|
||||
if nn:
|
||||
info['HDFS'] = {
|
||||
'Web UI': 'http://%s:%s' % (nn.management_ip, '50070'),
|
||||
'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000')
|
||||
}
|
||||
|
||||
if oo:
|
||||
info['JobFlow'] = {
|
||||
'Oozie': 'http://%s:%s' % (oo.management_ip, '11000')
|
||||
}
|
||||
|
||||
if hs:
|
||||
info['MapReduce JobHistory Server'] = {
|
||||
'Web UI': 'http://%s:%s' % (hs.management_ip, '19888')
|
||||
}
|
||||
|
||||
ctx = context.ctx()
|
||||
conductor.cluster_update(ctx, cluster, {'info': info})
|
||||
|
||||
def get_edp_engine(self, cluster, job_type):
|
||||
if job_type in edp_engine.EdpOozieEngine.get_supported_job_types():
|
||||
return edp_engine.EdpOozieEngine(cluster)
|
||||
return None
|
||||
|
||||
def get_open_ports(self, node_group):
|
||||
return c.get_open_ports(node_group)
|
Loading…
Reference in New Issue
Block a user