Remove Vanilla 2.3 Hadoop
Vanilla 2.3 Hadoop deprecated in Juno and it will be deleted in Kilo partially implements bp: drop-hadoop-2-3-support Change-Id: Id94f0ddf618146fdff4eeb9b04f66e81ca492744
This commit is contained in:
parent
ea4e38a198
commit
8e2707b5eb
@ -18,7 +18,6 @@ include sahara/plugins/vanilla/hadoop2/resources/*.template
|
||||
include sahara/plugins/vanilla/v1_2_1/resources/*.sh
|
||||
include sahara/plugins/vanilla/v1_2_1/resources/*.sql
|
||||
include sahara/plugins/vanilla/v1_2_1/resources/*.xml
|
||||
include sahara/plugins/vanilla/v2_3_0/resources/*.xml
|
||||
include sahara/plugins/vanilla/v2_4_1/resources/*.xml
|
||||
include sahara/plugins/hdp/versions/version_1_3_2/resources/*.template
|
||||
include sahara/plugins/hdp/versions/version_1_3_2/resources/*.json
|
||||
|
Binary file not shown.
@ -1,94 +0,0 @@
|
||||
# Copyright (c) 2014 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from sahara.openstack.common import log as logging
|
||||
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
|
||||
from sahara.utils import xmlutils as x
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt("enable_data_locality", "sahara.topology.topology_helper")
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CORE_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_3_0/resources/core-default.xml')
|
||||
|
||||
HDFS_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_3_0/resources/hdfs-default.xml')
|
||||
|
||||
MAPRED_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_3_0/resources/mapred-default.xml')
|
||||
|
||||
YARN_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_3_0/resources/yarn-default.xml')
|
||||
|
||||
OOZIE_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_3_0/resources/oozie-default.xml')
|
||||
|
||||
XML_CONFS = {
|
||||
"Hadoop": [CORE_DEFAULT],
|
||||
"HDFS": [HDFS_DEFAULT],
|
||||
"YARN": [YARN_DEFAULT],
|
||||
"MapReduce": [MAPRED_DEFAULT],
|
||||
"JobFlow": [OOZIE_DEFAULT]
|
||||
}
|
||||
|
||||
ENV_CONFS = {
|
||||
"YARN": {
|
||||
'ResourceManager Heap Size': 1024,
|
||||
'NodeManager Heap Size': 1024
|
||||
},
|
||||
"HDFS": {
|
||||
'NameNode Heap Size': 1024,
|
||||
'SecondaryNameNode Heap Size': 1024,
|
||||
'DataNode Heap Size': 1024
|
||||
},
|
||||
"MapReduce": {
|
||||
'JobHistoryServer Heap Size': 1024
|
||||
},
|
||||
"JobFlow": {
|
||||
'Oozie Heap Size': 1024
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Initialise plugin Hadoop configurations
|
||||
PLUGIN_XML_CONFIGS = c_helper.init_xml_configs(XML_CONFS)
|
||||
PLUGIN_ENV_CONFIGS = c_helper.init_env_configs(ENV_CONFS)
|
||||
|
||||
|
||||
def _init_all_configs():
|
||||
configs = []
|
||||
configs.extend(PLUGIN_XML_CONFIGS)
|
||||
configs.extend(PLUGIN_ENV_CONFIGS)
|
||||
configs.extend(c_helper.PLUGIN_GENERAL_CONFIGS)
|
||||
return configs
|
||||
|
||||
|
||||
PLUGIN_CONFIGS = _init_all_configs()
|
||||
|
||||
|
||||
def get_plugin_configs():
|
||||
return PLUGIN_CONFIGS
|
||||
|
||||
|
||||
def get_xml_configs():
|
||||
return PLUGIN_XML_CONFIGS
|
||||
|
||||
|
||||
def get_env_configs():
|
||||
return ENV_CONFS
|
@ -1,26 +0,0 @@
|
||||
Apache Hadoop Configurations for Sahara
|
||||
========================================
|
||||
|
||||
This directory contains default XML configuration files:
|
||||
|
||||
* core-default.xml
|
||||
* hdfs-default.xml
|
||||
* mapred-default.xml
|
||||
* yarn-default.xml
|
||||
* oozie-default.xml
|
||||
|
||||
These files are applied for Sahara's plugin of Apache Hadoop version 2.3.0
|
||||
|
||||
|
||||
Files were taken from here:
|
||||
|
||||
* `core-default.xml <https://github.com/apache/hadoop-common/blob/release-2.3.0/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml>`_
|
||||
* `hdfs-default.xml <https://github.com/apache/hadoop-common/blob/release-2.3.0/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml>`_
|
||||
* `yarn-default.xml <https://github.com/apache/hadoop-common/blob/release-2.3.0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml>`_
|
||||
* `mapred-default.xml <https://github.com/apache/hadoop-common/blob/release-2.3.0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml>`_
|
||||
* `oozie-default.xml <https://github.com/apache/oozie/blob/release-4.0.0/core/src/main/resources/oozie-default.xml>`_
|
||||
|
||||
XML configs are used to expose default Hadoop configurations to the users through
|
||||
Sahara's REST API. It allows users to override some config values which will
|
||||
be pushed to the provisioned VMs running Hadoop services as part of appropriate
|
||||
xml config.
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,146 +0,0 @@
|
||||
# Copyright (c) 2014 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from sahara import conductor
|
||||
from sahara import context
|
||||
from sahara import exceptions as ex
|
||||
from sahara.i18n import _
|
||||
from sahara.openstack.common import log as logging
|
||||
from sahara.plugins import utils
|
||||
from sahara.plugins.vanilla import abstractversionhandler as avm
|
||||
from sahara.plugins.vanilla.hadoop2 import config as c
|
||||
from sahara.plugins.vanilla.hadoop2 import edp_engine
|
||||
from sahara.plugins.vanilla.hadoop2 import run_scripts as run
|
||||
from sahara.plugins.vanilla.hadoop2 import scaling as sc
|
||||
from sahara.plugins.vanilla.hadoop2 import validation as vl
|
||||
from sahara.plugins.vanilla import utils as vu
|
||||
from sahara.plugins.vanilla.v2_3_0 import config_helper as c_helper
|
||||
|
||||
|
||||
conductor = conductor.API
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class VersionHandler(avm.AbstractVersionHandler):
|
||||
def __init__(self):
|
||||
self.pctx = {
|
||||
'env_confs': c_helper.get_env_configs(),
|
||||
'all_confs': c_helper.get_plugin_configs()
|
||||
}
|
||||
|
||||
def get_plugin_configs(self):
|
||||
return self.pctx['all_confs']
|
||||
|
||||
def get_node_processes(self):
|
||||
return {
|
||||
"Hadoop": [],
|
||||
"MapReduce": ["historyserver"],
|
||||
"HDFS": ["namenode", "datanode", "secondarynamenode"],
|
||||
"YARN": ["resourcemanager", "nodemanager"],
|
||||
"JobFlow": ["oozie"]
|
||||
}
|
||||
|
||||
def validate(self, cluster):
|
||||
raise ex.DeprecatedException(
|
||||
_("The Vanilla 2.3.0 plugin is now deprecated and will be removed"
|
||||
" in the Kylo release. The Vanilla 2.4.1 plugin remains and"
|
||||
" continues to be supported."))
|
||||
|
||||
def update_infra(self, cluster):
|
||||
pass
|
||||
|
||||
def configure_cluster(self, cluster):
|
||||
c.configure_cluster(self.pctx, cluster)
|
||||
|
||||
def start_cluster(self, cluster):
|
||||
nn = vu.get_namenode(cluster)
|
||||
run.format_namenode(nn)
|
||||
run.start_hadoop_process(nn, 'namenode')
|
||||
|
||||
for snn in vu.get_secondarynamenodes(cluster):
|
||||
run.start_hadoop_process(snn, 'secondarynamenode')
|
||||
|
||||
rm = vu.get_resourcemanager(cluster)
|
||||
if rm:
|
||||
run.start_yarn_process(rm, 'resourcemanager')
|
||||
|
||||
run.start_all_processes(utils.get_instances(cluster),
|
||||
['datanode', 'nodemanager'])
|
||||
|
||||
run.await_datanodes(cluster)
|
||||
|
||||
hs = vu.get_historyserver(cluster)
|
||||
if hs:
|
||||
run.start_historyserver(hs)
|
||||
|
||||
oo = vu.get_oozie(cluster)
|
||||
if oo:
|
||||
run.start_oozie_process(self.pctx, oo)
|
||||
|
||||
self._set_cluster_info(cluster)
|
||||
|
||||
def decommission_nodes(self, cluster, instances):
|
||||
sc.decommission_nodes(self.pctx, cluster, instances)
|
||||
|
||||
def validate_scaling(self, cluster, existing, additional):
|
||||
vl.validate_additional_ng_scaling(cluster, additional)
|
||||
vl.validate_existing_ng_scaling(self.pctx, cluster, existing)
|
||||
|
||||
def scale_cluster(self, cluster, instances):
|
||||
sc.scale_cluster(self.pctx, cluster, instances)
|
||||
|
||||
def _set_cluster_info(self, cluster):
|
||||
nn = vu.get_namenode(cluster)
|
||||
rm = vu.get_resourcemanager(cluster)
|
||||
hs = vu.get_historyserver(cluster)
|
||||
oo = vu.get_oozie(cluster)
|
||||
|
||||
info = {}
|
||||
|
||||
if rm:
|
||||
info['YARN'] = {
|
||||
'Web UI': 'http://%s:%s' % (rm.management_ip, '8088'),
|
||||
'ResourceManager': 'http://%s:%s' % (rm.management_ip, '8032')
|
||||
}
|
||||
|
||||
if nn:
|
||||
info['HDFS'] = {
|
||||
'Web UI': 'http://%s:%s' % (nn.management_ip, '50070'),
|
||||
'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000')
|
||||
}
|
||||
|
||||
if oo:
|
||||
info['JobFlow'] = {
|
||||
'Oozie': 'http://%s:%s' % (oo.management_ip, '11000')
|
||||
}
|
||||
|
||||
if hs:
|
||||
info['MapReduce JobHistory Server'] = {
|
||||
'Web UI': 'http://%s:%s' % (hs.management_ip, '19888')
|
||||
}
|
||||
|
||||
ctx = context.ctx()
|
||||
conductor.cluster_update(ctx, cluster, {'info': info})
|
||||
|
||||
def get_edp_engine(self, cluster, job_type):
|
||||
if job_type in edp_engine.EdpOozieEngine.get_supported_job_types():
|
||||
return edp_engine.EdpOozieEngine(cluster)
|
||||
return None
|
||||
|
||||
def get_open_ports(self, node_group):
|
||||
return c.get_open_ports(node_group)
|
@ -50,7 +50,7 @@ should use the corresponding tox env:
|
||||
..
|
||||
|
||||
For example, you want to run tests for the Vanilla plugin with the Hadoop
|
||||
version 2.3.0 and for the HDP plugin with the Hortonworks Data Platform version
|
||||
version 2.4.1 and for the HDP plugin with the Hortonworks Data Platform version
|
||||
1.3.2. In this case you should use the following tox env:
|
||||
|
||||
.. sourcecode:: console
|
||||
@ -67,7 +67,7 @@ version 1.2.1. More info about transient cluster see in section ``Contents``.
|
||||
``tox -e integration -- hdp`` will run tests for the HDP plugin.
|
||||
|
||||
``tox -e integration -- transient vanilla2 hdp`` will run test for transient
|
||||
cluster, tests for the Vanilla plugin with the Hadoop version 2.3.0 and tests
|
||||
cluster, tests for the Vanilla plugin with the Hadoop version 2.4.1 and tests
|
||||
for the HDP plugin with the Hortonworks Data Platform version 1.3.2.
|
||||
|
||||
Contents
|
||||
@ -133,7 +133,7 @@ The Vanilla plugin with the Hadoop version 1.2.1 has the following checks:
|
||||
7. Cluster scaling.
|
||||
8. Transient cluster.
|
||||
|
||||
The Vanilla plugin with the Hadoop version 2.3.0 has the following checks:
|
||||
The Vanilla plugin with the Hadoop version 2.4.1 has the following checks:
|
||||
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
1. Proper cluster creation.
|
||||
|
@ -239,14 +239,14 @@ VANILLA_TWO_CONFIG_OPTS = [
|
||||
'then image for cluster creation will be chosen by '
|
||||
'tag "sahara_i_tests".'),
|
||||
cfg.StrOpt('HADOOP_VERSION',
|
||||
default='2.3.0',
|
||||
default='2.4.1',
|
||||
help='Version of Hadoop.'),
|
||||
cfg.StrOpt('HADOOP_USER',
|
||||
default='hadoop',
|
||||
help='Username which is used for access to Hadoop services.'),
|
||||
cfg.StrOpt('HADOOP_EXAMPLES_JAR_PATH',
|
||||
default='/opt/hadoop/share/hadoop/mapreduce/'
|
||||
'hadoop-mapreduce-examples-2.3.0.jar',
|
||||
'hadoop-mapreduce-examples-2.4.1.jar',
|
||||
help='Path to hadoop examples jar file.'),
|
||||
cfg.StrOpt('HADOOP_LOG_DIRECTORY',
|
||||
default='/mnt/yarn/logs/userlogs',
|
||||
|
@ -44,7 +44,7 @@ class EDPJobInfo(object):
|
||||
if hadoop_vers == 1:
|
||||
return open(self.JAVA_PATH + 'edp-java.jar').read()
|
||||
return open(self.HADOOP2_JAVA_PATH + (
|
||||
'hadoop-mapreduce-examples-2.3.0.jar')).read()
|
||||
'hadoop-mapreduce-examples-2.4.1.jar')).read()
|
||||
|
||||
def java_example_configs(self, hadoop_vers=1):
|
||||
if hadoop_vers == 1:
|
||||
|
@ -32,7 +32,7 @@ class VanillaPluginTest(base.SaharaWithDbTestCase):
|
||||
|
||||
@mock.patch('sahara.service.edp.hdfs_helper.create_dir_hadoop2')
|
||||
def test_edp_calls_hadoop2_create_dir(self, create_dir):
|
||||
for version in ['2.3.0', '2.4.1']:
|
||||
for version in ['2.4.1']:
|
||||
cluster_dict = {
|
||||
'name': 'cluster' + version.replace('.', '_'),
|
||||
'plugin_name': 'vanilla',
|
||||
|
Loading…
Reference in New Issue
Block a user