Drop Vanilla Hadoop 1
Dropping support of unpopular and unused Hadoop v1 in Vanilla plugin. Partially-implements bp: drop-hadoop-1 Change-Id: I0a322fb7b8db50941c4854f45077fe6232e2c766
This commit is contained in:
parent
cfcdc16bda
commit
6b5f0d0b1b
@ -31,9 +31,6 @@ include sahara/plugins/cdh/v5_4_0/resources/*.sql
|
|||||||
include sahara/plugins/vanilla/hadoop2/resources/*.sh
|
include sahara/plugins/vanilla/hadoop2/resources/*.sh
|
||||||
include sahara/plugins/vanilla/hadoop2/resources/*.sql
|
include sahara/plugins/vanilla/hadoop2/resources/*.sql
|
||||||
include sahara/plugins/vanilla/hadoop2/resources/*.template
|
include sahara/plugins/vanilla/hadoop2/resources/*.template
|
||||||
include sahara/plugins/vanilla/v1_2_1/resources/*.sh
|
|
||||||
include sahara/plugins/vanilla/v1_2_1/resources/*.sql
|
|
||||||
include sahara/plugins/vanilla/v1_2_1/resources/*.xml
|
|
||||||
include sahara/plugins/vanilla/v2_6_0/resources/*.xml
|
include sahara/plugins/vanilla/v2_6_0/resources/*.xml
|
||||||
include sahara/plugins/vanilla/v2_7_1/resources/*.xml
|
include sahara/plugins/vanilla/v2_7_1/resources/*.xml
|
||||||
include sahara/plugins/hdp/versions/version_1_3_2/resources/*.template
|
include sahara/plugins/hdp/versions/version_1_3_2/resources/*.template
|
||||||
|
@ -5,19 +5,13 @@ The vanilla plugin is a reference implementation which allows users to operate
|
|||||||
a cluster with Apache Hadoop.
|
a cluster with Apache Hadoop.
|
||||||
|
|
||||||
For cluster provisioning prepared images should be used. They already have
|
For cluster provisioning prepared images should be used. They already have
|
||||||
Apache Hadoop 1.2.1 and Apache Hadoop 2.7.1 installed.
|
Apache Hadoop 2.7.1 installed.
|
||||||
|
|
||||||
You may build images by yourself using :doc:`vanilla_imagebuilder` or you could
|
You may build images by yourself using :doc:`vanilla_imagebuilder` or you could
|
||||||
download prepared images from http://sahara-files.mirantis.com/images/upstream/liberty/
|
download prepared images from http://sahara-files.mirantis.com/images/upstream/liberty/
|
||||||
|
|
||||||
Keep in mind that if you want to use the Swift Integration feature
|
|
||||||
( :doc:`features`),
|
|
||||||
Hadoop 1.2.1 must be patched with an implementation of Swift File System.
|
|
||||||
For more information about patching required by the Swift Integration feature
|
|
||||||
see :doc:`hadoop-swift`.
|
|
||||||
|
|
||||||
Vanilla plugin requires an image to be tagged in Sahara Image Registry with
|
Vanilla plugin requires an image to be tagged in Sahara Image Registry with
|
||||||
two tags: 'vanilla' and '<hadoop version>' (e.g. '1.2.1').
|
two tags: 'vanilla' and '<hadoop version>' (e.g. '2.7.1').
|
||||||
|
|
||||||
The default username specified for these images is different
|
The default username specified for these images is different
|
||||||
for each distribution:
|
for each distribution:
|
||||||
@ -48,18 +42,6 @@ the cluster topology requested by user is verified for consistency.
|
|||||||
Currently there are the following limitations in cluster topology for Vanilla
|
Currently there are the following limitations in cluster topology for Vanilla
|
||||||
plugin:
|
plugin:
|
||||||
|
|
||||||
For Vanilla Hadoop version 1.X.X:
|
|
||||||
|
|
||||||
+ Cluster must contain exactly one namenode
|
|
||||||
+ Cluster can contain at most one jobtracker
|
|
||||||
+ Cluster can contain at most one secondary namenode
|
|
||||||
+ Cluster can contain at most one oozie and this process is also required
|
|
||||||
for EDP
|
|
||||||
+ Cluster can't contain oozie without jobtracker
|
|
||||||
+ Cluster can't have tasktracker nodes if it doesn't have jobtracker
|
|
||||||
+ Cluster can't have hive node if it doesn't have jobtracker.
|
|
||||||
+ Cluster can have at most one hive node.
|
|
||||||
|
|
||||||
For Vanilla Hadoop version 2.X.X:
|
For Vanilla Hadoop version 2.X.X:
|
||||||
|
|
||||||
+ Cluster must contain exactly one namenode
|
+ Cluster must contain exactly one namenode
|
||||||
|
@ -1,73 +0,0 @@
|
|||||||
clusters:
|
|
||||||
- plugin_name: vanilla
|
|
||||||
plugin_version: 1.2.1
|
|
||||||
image: ${vanilla_image}
|
|
||||||
node_group_templates:
|
|
||||||
- name: worker-tt-dn
|
|
||||||
flavor: ${ci_flavor_id}
|
|
||||||
node_processes:
|
|
||||||
- datanode
|
|
||||||
- tasktracker
|
|
||||||
volumes_per_node: 2
|
|
||||||
volumes_size: 2
|
|
||||||
auto_security_group: true
|
|
||||||
- name: worker-tt
|
|
||||||
flavor: ${ci_flavor_id}
|
|
||||||
node_processes:
|
|
||||||
- tasktracker
|
|
||||||
auto_security_group: true
|
|
||||||
- name: worker-dn
|
|
||||||
flavor: ${ci_flavor_id}
|
|
||||||
node_processes:
|
|
||||||
- datanode
|
|
||||||
volumes_per_node: 2
|
|
||||||
volumes_size: 2
|
|
||||||
auto_security_group: true
|
|
||||||
- name: master-jt-nn
|
|
||||||
flavor: ${ci_flavor_id}
|
|
||||||
node_processes:
|
|
||||||
- namenode
|
|
||||||
- jobtracker
|
|
||||||
auto_security_group: true
|
|
||||||
- name: master-sec-nn-oz
|
|
||||||
flavor: ${ci_flavor_id}
|
|
||||||
node_processes:
|
|
||||||
- oozie
|
|
||||||
- secondarynamenode
|
|
||||||
auto_security_group: true
|
|
||||||
|
|
||||||
cluster_template:
|
|
||||||
name: vanilla121
|
|
||||||
node_group_templates:
|
|
||||||
master-sec-nn-oz: 1
|
|
||||||
master-jt-nn: 1
|
|
||||||
worker-tt: 1
|
|
||||||
worker-tt-dn: 2
|
|
||||||
worker-dn: 1
|
|
||||||
cluster_configs:
|
|
||||||
HDFS:
|
|
||||||
dfs.replication: 1
|
|
||||||
MapReduce:
|
|
||||||
mapred.map.tasks.speculative.execution: False
|
|
||||||
mapred.child.java.opts: -Xmx512m
|
|
||||||
general:
|
|
||||||
'Enable Swift': True
|
|
||||||
cluster:
|
|
||||||
name: ${cluster_name}
|
|
||||||
scaling:
|
|
||||||
- operation: resize
|
|
||||||
node_group: worker-tt-dn
|
|
||||||
size: 1
|
|
||||||
- operation: resize
|
|
||||||
node_group: worker-dn
|
|
||||||
size: 0
|
|
||||||
- operation: resize
|
|
||||||
node_group: worker-tt
|
|
||||||
size: 0
|
|
||||||
- operation: add
|
|
||||||
node_group: worker-tt
|
|
||||||
size: 1
|
|
||||||
- operation: add
|
|
||||||
node_group: worker-dn
|
|
||||||
size: 1
|
|
||||||
edp_jobs_flow: hadoop_1
|
|
@ -1,24 +0,0 @@
|
|||||||
{
|
|
||||||
"plugin_name": "vanilla",
|
|
||||||
"hadoop_version": "1.2.1",
|
|
||||||
"node_groups": [
|
|
||||||
{
|
|
||||||
"name": "worker",
|
|
||||||
"count": 3,
|
|
||||||
"node_group_template_id": "{vanilla-121-default-worker}"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "secondary-master",
|
|
||||||
"count": 1,
|
|
||||||
"node_group_template_id": "{vanilla-121-default-secondary-master}"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "master",
|
|
||||||
"count": 1,
|
|
||||||
"node_group_template_id": "{vanilla-121-default-master}"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"name": "vanilla-121-default-cluster",
|
|
||||||
"neutron_management_network": "{neutron_management_network}",
|
|
||||||
"cluster_configs": {}
|
|
||||||
}
|
|
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"plugin_name": "vanilla",
|
|
||||||
"hadoop_version": "1.2.1",
|
|
||||||
"node_processes": [
|
|
||||||
"namenode",
|
|
||||||
"jobtracker"
|
|
||||||
],
|
|
||||||
"name": "vanilla-121-default-master",
|
|
||||||
"floating_ip_pool": "{floating_ip_pool}",
|
|
||||||
"flavor_id": "{flavor_id}"
|
|
||||||
}
|
|
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"plugin_name": "vanilla",
|
|
||||||
"hadoop_version": "1.2.1",
|
|
||||||
"node_processes": [
|
|
||||||
"secondarynamenode",
|
|
||||||
"oozie"
|
|
||||||
],
|
|
||||||
"name": "vanilla-121-default-secondary-master",
|
|
||||||
"floating_ip_pool": "{floating_ip_pool}",
|
|
||||||
"flavor_id": "{flavor_id}"
|
|
||||||
}
|
|
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"plugin_name": "vanilla",
|
|
||||||
"hadoop_version": "1.2.1",
|
|
||||||
"node_processes": [
|
|
||||||
"tasktracker",
|
|
||||||
"datanode"
|
|
||||||
],
|
|
||||||
"name": "vanilla-121-default-worker",
|
|
||||||
"floating_ip_pool": "{floating_ip_pool}",
|
|
||||||
"flavor_id": "{flavor_id}"
|
|
||||||
}
|
|
@ -1,500 +0,0 @@
|
|||||||
# Copyright (c) 2013 Mirantis Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
import six
|
|
||||||
|
|
||||||
from sahara import conductor as c
|
|
||||||
from sahara import context
|
|
||||||
from sahara import exceptions as ex
|
|
||||||
from sahara.i18n import _
|
|
||||||
from sahara.i18n import _LW
|
|
||||||
from sahara.plugins import provisioning as p
|
|
||||||
from sahara.plugins import utils
|
|
||||||
from sahara.plugins.vanilla import utils as vu
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import mysql_helper as m_h
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import oozie_helper as o_h
|
|
||||||
from sahara.swift import swift_helper as swift
|
|
||||||
from sahara.topology import topology_helper as topology
|
|
||||||
from sahara.utils import crypto
|
|
||||||
from sahara.utils import types as types
|
|
||||||
from sahara.utils import xmlutils as x
|
|
||||||
|
|
||||||
|
|
||||||
conductor = c.API
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
CORE_DEFAULT = x.load_hadoop_xml_defaults(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/core-default.xml')
|
|
||||||
|
|
||||||
HDFS_DEFAULT = x.load_hadoop_xml_defaults(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/hdfs-default.xml')
|
|
||||||
|
|
||||||
MAPRED_DEFAULT = x.load_hadoop_xml_defaults(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/mapred-default.xml')
|
|
||||||
|
|
||||||
HIVE_DEFAULT = x.load_hadoop_xml_defaults(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/hive-default.xml')
|
|
||||||
|
|
||||||
# Append Oozie configs fore core-site.xml
|
|
||||||
CORE_DEFAULT += o_h.OOZIE_CORE_DEFAULT
|
|
||||||
|
|
||||||
XML_CONFS = {
|
|
||||||
"HDFS": [CORE_DEFAULT, HDFS_DEFAULT],
|
|
||||||
"MapReduce": [MAPRED_DEFAULT],
|
|
||||||
"JobFlow": [o_h.OOZIE_DEFAULT],
|
|
||||||
"Hive": [HIVE_DEFAULT]
|
|
||||||
}
|
|
||||||
|
|
||||||
ENV_CONFS = {
|
|
||||||
"MapReduce": {
|
|
||||||
'Job Tracker Heap Size': 'HADOOP_JOBTRACKER_OPTS=\\"-Xmx%sm\\"',
|
|
||||||
'Task Tracker Heap Size': 'HADOOP_TASKTRACKER_OPTS=\\"-Xmx%sm\\"'
|
|
||||||
},
|
|
||||||
"HDFS": {
|
|
||||||
'Name Node Heap Size': 'HADOOP_NAMENODE_OPTS=\\"-Xmx%sm\\"',
|
|
||||||
'Secondary Name Node Heap Size': 'HADOOP_SECONDARYNAMENODE_OPTS='
|
|
||||||
'\\"-Xmx%sm\\"',
|
|
||||||
'Data Node Heap Size': 'HADOOP_DATANODE_OPTS=\\"-Xmx%sm\\"'
|
|
||||||
},
|
|
||||||
"JobFlow": {
|
|
||||||
'Oozie Heap Size': 'CATALINA_OPTS -Xmx%sm'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ENABLE_SWIFT = p.Config('Enable Swift', 'general', 'cluster',
|
|
||||||
config_type="bool", priority=1,
|
|
||||||
default_value=True, is_optional=True)
|
|
||||||
|
|
||||||
ENABLE_DATA_LOCALITY = p.Config('Enable Data Locality', 'general', 'cluster',
|
|
||||||
config_type="bool", priority=1,
|
|
||||||
default_value=True, is_optional=True)
|
|
||||||
|
|
||||||
ENABLE_MYSQL = p.Config('Enable MySQL', 'general', 'cluster',
|
|
||||||
config_type="bool", priority=1,
|
|
||||||
default_value=True, is_optional=True)
|
|
||||||
|
|
||||||
# Default set to 1 day, which is the default Keystone token
|
|
||||||
# expiration time. After the token is expired we can't continue
|
|
||||||
# scaling anyway.
|
|
||||||
DECOMMISSIONING_TIMEOUT = p.Config('Decommissioning Timeout', 'general',
|
|
||||||
'cluster', config_type='int', priority=1,
|
|
||||||
default_value=86400, is_optional=True,
|
|
||||||
description='Timeout for datanode'
|
|
||||||
' decommissioning operation'
|
|
||||||
' during scaling, in seconds')
|
|
||||||
|
|
||||||
DATANODES_STARTUP_TIMEOUT = p.Config(
|
|
||||||
'Datanodes startup timeout', 'general', 'cluster', config_type='int',
|
|
||||||
priority=1, default_value=10800, is_optional=True,
|
|
||||||
description='Timeout for datanodes startup, in seconds')
|
|
||||||
|
|
||||||
|
|
||||||
HIDDEN_CONFS = ['fs.default.name', 'dfs.name.dir', 'dfs.data.dir',
|
|
||||||
'mapred.job.tracker', 'mapred.system.dir', 'mapred.local.dir',
|
|
||||||
'hadoop.proxyuser.hadoop.hosts',
|
|
||||||
'hadoop.proxyuser.hadoop.groups']
|
|
||||||
|
|
||||||
CLUSTER_WIDE_CONFS = ['dfs.block.size', 'dfs.permissions', 'dfs.replication',
|
|
||||||
'dfs.replication.min', 'dfs.replication.max',
|
|
||||||
'io.file.buffer.size', 'mapreduce.job.counters.max',
|
|
||||||
'mapred.output.compress', 'io.compression.codecs',
|
|
||||||
'mapred.output.compression.codec',
|
|
||||||
'mapred.output.compression.type',
|
|
||||||
'mapred.compress.map.output',
|
|
||||||
'mapred.map.output.compression.codec']
|
|
||||||
|
|
||||||
PRIORITY_1_CONFS = ['dfs.datanode.du.reserved',
|
|
||||||
'dfs.datanode.failed.volumes.tolerated',
|
|
||||||
'dfs.datanode.max.xcievers', 'dfs.datanode.handler.count',
|
|
||||||
'dfs.namenode.handler.count', 'mapred.child.java.opts',
|
|
||||||
'mapred.jobtracker.maxtasks.per.job',
|
|
||||||
'mapred.job.tracker.handler.count',
|
|
||||||
'mapred.map.child.java.opts',
|
|
||||||
'mapred.reduce.child.java.opts',
|
|
||||||
'io.sort.mb', 'mapred.tasktracker.map.tasks.maximum',
|
|
||||||
'mapred.tasktracker.reduce.tasks.maximum']
|
|
||||||
|
|
||||||
# for now we have not so many cluster-wide configs
|
|
||||||
# lets consider all of them having high priority
|
|
||||||
PRIORITY_1_CONFS += CLUSTER_WIDE_CONFS
|
|
||||||
|
|
||||||
|
|
||||||
def _initialise_configs():
|
|
||||||
configs = []
|
|
||||||
for service, config_lists in six.iteritems(XML_CONFS):
|
|
||||||
for config_list in config_lists:
|
|
||||||
for config in config_list:
|
|
||||||
if config['name'] not in HIDDEN_CONFS:
|
|
||||||
cfg = p.Config(config['name'], service, "node",
|
|
||||||
is_optional=True, config_type="string",
|
|
||||||
default_value=str(config['value']),
|
|
||||||
description=config['description'])
|
|
||||||
if cfg.default_value in ["true", "false"]:
|
|
||||||
cfg.config_type = "bool"
|
|
||||||
cfg.default_value = (cfg.default_value == 'true')
|
|
||||||
elif types.is_int(cfg.default_value):
|
|
||||||
cfg.config_type = "int"
|
|
||||||
cfg.default_value = int(cfg.default_value)
|
|
||||||
if config['name'] in CLUSTER_WIDE_CONFS:
|
|
||||||
cfg.scope = 'cluster'
|
|
||||||
if config['name'] in PRIORITY_1_CONFS:
|
|
||||||
cfg.priority = 1
|
|
||||||
configs.append(cfg)
|
|
||||||
|
|
||||||
for service, config_items in six.iteritems(ENV_CONFS):
|
|
||||||
for name, param_format_str in six.iteritems(config_items):
|
|
||||||
configs.append(p.Config(name, service, "node",
|
|
||||||
default_value=1024, priority=1,
|
|
||||||
config_type="int"))
|
|
||||||
|
|
||||||
configs.append(ENABLE_SWIFT)
|
|
||||||
configs.append(ENABLE_MYSQL)
|
|
||||||
configs.append(DECOMMISSIONING_TIMEOUT)
|
|
||||||
configs.append(DATANODES_STARTUP_TIMEOUT)
|
|
||||||
if CONF.enable_data_locality:
|
|
||||||
configs.append(ENABLE_DATA_LOCALITY)
|
|
||||||
|
|
||||||
return configs
|
|
||||||
|
|
||||||
# Initialise plugin Hadoop configurations
|
|
||||||
PLUGIN_CONFIGS = _initialise_configs()
|
|
||||||
|
|
||||||
|
|
||||||
def get_plugin_configs():
|
|
||||||
return PLUGIN_CONFIGS
|
|
||||||
|
|
||||||
|
|
||||||
def get_general_configs(hive_hostname, passwd_hive_mysql):
|
|
||||||
config = {
|
|
||||||
ENABLE_SWIFT.name: {
|
|
||||||
'default_value': ENABLE_SWIFT.default_value,
|
|
||||||
'conf': extract_name_values(swift.get_swift_configs())
|
|
||||||
},
|
|
||||||
ENABLE_MYSQL.name: {
|
|
||||||
'default_value': ENABLE_MYSQL.default_value,
|
|
||||||
'conf': m_h.get_required_mysql_configs(
|
|
||||||
hive_hostname, passwd_hive_mysql)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if CONF.enable_data_locality:
|
|
||||||
config.update({
|
|
||||||
ENABLE_DATA_LOCALITY.name: {
|
|
||||||
'default_value': ENABLE_DATA_LOCALITY.default_value,
|
|
||||||
'conf': extract_name_values(topology.vm_awareness_all_config())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return config
|
|
||||||
|
|
||||||
|
|
||||||
def get_config_value(service, name, cluster=None):
|
|
||||||
if cluster:
|
|
||||||
sahara_configs = generate_sahara_configs(cluster)
|
|
||||||
if sahara_configs.get(name):
|
|
||||||
return sahara_configs[name]
|
|
||||||
|
|
||||||
for ng in cluster.node_groups:
|
|
||||||
if (ng.configuration().get(service) and
|
|
||||||
ng.configuration()[service].get(name)):
|
|
||||||
return ng.configuration()[service][name]
|
|
||||||
|
|
||||||
for configs in PLUGIN_CONFIGS:
|
|
||||||
if configs.applicable_target == service and configs.name == name:
|
|
||||||
return configs.default_value
|
|
||||||
|
|
||||||
raise ex.ConfigurationError(_("Unable get parameter '%(parameter)s' from "
|
|
||||||
"service %(service)s")
|
|
||||||
% {"parameter": name, "service": service})
|
|
||||||
|
|
||||||
|
|
||||||
def generate_cfg_from_general(cfg, configs, general_config,
|
|
||||||
rest_excluded=False):
|
|
||||||
if 'general' in configs:
|
|
||||||
for nm in general_config:
|
|
||||||
if nm not in configs['general'] and not rest_excluded:
|
|
||||||
configs['general'][nm] = general_config[nm]['default_value']
|
|
||||||
for name, value in configs['general'].items():
|
|
||||||
if value:
|
|
||||||
cfg = _set_config(cfg, general_config, name)
|
|
||||||
LOG.debug("Applying config: {config}".format(config=name))
|
|
||||||
else:
|
|
||||||
cfg = _set_config(cfg, general_config)
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
|
|
||||||
def get_hadoop_ssh_keys(cluster):
|
|
||||||
extra = cluster.extra.to_dict() if cluster.extra else {}
|
|
||||||
private_key = extra.get('hadoop_private_ssh_key')
|
|
||||||
public_key = extra.get('hadoop_public_ssh_key')
|
|
||||||
if not private_key or not public_key:
|
|
||||||
private_key, public_key = crypto.generate_key_pair()
|
|
||||||
extra['hadoop_private_ssh_key'] = private_key
|
|
||||||
extra['hadoop_public_ssh_key'] = public_key
|
|
||||||
conductor.cluster_update(context.ctx(), cluster, {'extra': extra})
|
|
||||||
|
|
||||||
return private_key, public_key
|
|
||||||
|
|
||||||
|
|
||||||
def generate_sahara_configs(cluster, node_group=None):
|
|
||||||
nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
|
|
||||||
jt_hostname = vu.get_instance_hostname(vu.get_jobtracker(cluster))
|
|
||||||
oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster))
|
|
||||||
hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
|
|
||||||
|
|
||||||
storage_path = node_group.storage_paths() if node_group else None
|
|
||||||
|
|
||||||
# inserting common configs depends on provisioned VMs and HDFS placement
|
|
||||||
# TODO(aignatov): should be moved to cluster context
|
|
||||||
|
|
||||||
cfg = {
|
|
||||||
'fs.default.name': 'hdfs://%s:8020' % nn_hostname,
|
|
||||||
'dfs.name.dir': extract_hadoop_path(storage_path,
|
|
||||||
'/lib/hadoop/hdfs/namenode'),
|
|
||||||
'dfs.data.dir': extract_hadoop_path(storage_path,
|
|
||||||
'/lib/hadoop/hdfs/datanode'),
|
|
||||||
'dfs.hosts': '/etc/hadoop/dn.incl',
|
|
||||||
'dfs.hosts.exclude': '/etc/hadoop/dn.excl',
|
|
||||||
}
|
|
||||||
|
|
||||||
if jt_hostname:
|
|
||||||
mr_cfg = {
|
|
||||||
'mapred.job.tracker': '%s:8021' % jt_hostname,
|
|
||||||
'mapred.system.dir': extract_hadoop_path(storage_path,
|
|
||||||
'/mapred/mapredsystem'),
|
|
||||||
'mapred.local.dir': extract_hadoop_path(storage_path,
|
|
||||||
'/lib/hadoop/mapred'),
|
|
||||||
'mapred.hosts': '/etc/hadoop/tt.incl',
|
|
||||||
'mapred.hosts.exclude': '/etc/hadoop/tt.excl',
|
|
||||||
}
|
|
||||||
cfg.update(mr_cfg)
|
|
||||||
|
|
||||||
if oozie_hostname:
|
|
||||||
o_cfg = {
|
|
||||||
'hadoop.proxyuser.hadoop.hosts': "localhost," + oozie_hostname,
|
|
||||||
'hadoop.proxyuser.hadoop.groups': 'hadoop',
|
|
||||||
}
|
|
||||||
cfg.update(o_cfg)
|
|
||||||
LOG.debug('Applied Oozie configs for core-site.xml')
|
|
||||||
cfg.update(o_h.get_oozie_required_xml_configs())
|
|
||||||
LOG.debug('Applied Oozie configs for oozie-site.xml')
|
|
||||||
|
|
||||||
if hive_hostname:
|
|
||||||
h_cfg = {
|
|
||||||
'hive.warehouse.subdir.inherit.perms': True,
|
|
||||||
'javax.jdo.option.ConnectionURL':
|
|
||||||
'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
|
|
||||||
}
|
|
||||||
cfg.update(h_cfg)
|
|
||||||
LOG.debug('Applied Hive config for hive metastore server')
|
|
||||||
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
|
|
||||||
def generate_xml_configs(cluster, node_group, hive_mysql_passwd):
|
|
||||||
oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster))
|
|
||||||
hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
|
|
||||||
|
|
||||||
ng_configs = node_group.configuration()
|
|
||||||
|
|
||||||
general_cfg = get_general_configs(hive_hostname, hive_mysql_passwd)
|
|
||||||
|
|
||||||
all_cfg = generate_sahara_configs(cluster, node_group)
|
|
||||||
|
|
||||||
# inserting user-defined configs
|
|
||||||
for key, value in extract_xml_confs(ng_configs):
|
|
||||||
all_cfg[key] = value
|
|
||||||
|
|
||||||
# applying swift configs if user enabled it
|
|
||||||
swift_xml_confs = swift.get_swift_configs()
|
|
||||||
all_cfg = generate_cfg_from_general(all_cfg, ng_configs, general_cfg)
|
|
||||||
|
|
||||||
# invoking applied configs to appropriate xml files
|
|
||||||
core_all = CORE_DEFAULT + swift_xml_confs
|
|
||||||
mapred_all = MAPRED_DEFAULT
|
|
||||||
|
|
||||||
if CONF.enable_data_locality:
|
|
||||||
all_cfg.update(topology.TOPOLOGY_CONFIG)
|
|
||||||
|
|
||||||
# applying vm awareness configs
|
|
||||||
core_all += topology.vm_awareness_core_config()
|
|
||||||
mapred_all += topology.vm_awareness_mapred_config()
|
|
||||||
|
|
||||||
xml_configs = {
|
|
||||||
'core-site': x.create_hadoop_xml(all_cfg, core_all),
|
|
||||||
'mapred-site': x.create_hadoop_xml(all_cfg, mapred_all),
|
|
||||||
'hdfs-site': x.create_hadoop_xml(all_cfg, HDFS_DEFAULT)
|
|
||||||
}
|
|
||||||
|
|
||||||
if hive_hostname:
|
|
||||||
cfg = all_cfg
|
|
||||||
cfg_filter = HIVE_DEFAULT
|
|
||||||
proxy_configs = cluster.cluster_configs.get('proxy_configs')
|
|
||||||
if CONF.use_identity_api_v3 and proxy_configs:
|
|
||||||
cfg, cfg_filter = _inject_swift_trust_info(cfg,
|
|
||||||
cfg_filter,
|
|
||||||
proxy_configs)
|
|
||||||
xml_configs.update({'hive-site':
|
|
||||||
x.create_hadoop_xml(cfg, cfg_filter)})
|
|
||||||
LOG.debug('Generated hive-site.xml for hive {host}'.format(
|
|
||||||
host=hive_hostname))
|
|
||||||
|
|
||||||
if oozie_hostname:
|
|
||||||
xml_configs.update({'oozie-site':
|
|
||||||
x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)})
|
|
||||||
LOG.debug('Generated oozie-site.xml for oozie {host}'.format(
|
|
||||||
host=oozie_hostname))
|
|
||||||
|
|
||||||
return xml_configs
|
|
||||||
|
|
||||||
|
|
||||||
def _inject_swift_trust_info(cfg, cfg_filter, proxy_configs):
|
|
||||||
cfg = cfg.copy()
|
|
||||||
cfg.update({
|
|
||||||
swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'],
|
|
||||||
swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'],
|
|
||||||
swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'],
|
|
||||||
swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name
|
|
||||||
})
|
|
||||||
|
|
||||||
allow_swift_auth_filter = [
|
|
||||||
{'name': swift.HADOOP_SWIFT_USERNAME},
|
|
||||||
{'name': swift.HADOOP_SWIFT_PASSWORD},
|
|
||||||
{'name': swift.HADOOP_SWIFT_TRUST_ID},
|
|
||||||
{'name': swift.HADOOP_SWIFT_DOMAIN_NAME}
|
|
||||||
]
|
|
||||||
cfg_filter = cfg_filter + allow_swift_auth_filter
|
|
||||||
|
|
||||||
return cfg, cfg_filter
|
|
||||||
|
|
||||||
|
|
||||||
def extract_environment_confs(configs):
|
|
||||||
"""Returns environment specific Hadoop configurations.
|
|
||||||
|
|
||||||
:returns list of Hadoop parameters which should be passed via environment
|
|
||||||
"""
|
|
||||||
|
|
||||||
lst = []
|
|
||||||
for service, srv_confs in configs.items():
|
|
||||||
if ENV_CONFS.get(service):
|
|
||||||
for param_name, param_value in srv_confs.items():
|
|
||||||
for cfg_name, cfg_format_str in ENV_CONFS[service].items():
|
|
||||||
if param_name == cfg_name and param_value is not None:
|
|
||||||
lst.append(cfg_format_str % param_value)
|
|
||||||
else:
|
|
||||||
LOG.warning(_LW("Plugin received wrong applicable target {service}"
|
|
||||||
" in environmental configs").format(
|
|
||||||
service=service))
|
|
||||||
return sorted(lst)
|
|
||||||
|
|
||||||
|
|
||||||
def extract_xml_confs(configs):
|
|
||||||
"""Returns xml specific Hadoop configurations.
|
|
||||||
|
|
||||||
:returns list of Hadoop parameters which should be passed into general
|
|
||||||
configs like core-site.xml
|
|
||||||
"""
|
|
||||||
|
|
||||||
lst = []
|
|
||||||
for service, srv_confs in configs.items():
|
|
||||||
if XML_CONFS.get(service):
|
|
||||||
for param_name, param_value in srv_confs.items():
|
|
||||||
for cfg_list in XML_CONFS[service]:
|
|
||||||
names = [cfg['name'] for cfg in cfg_list]
|
|
||||||
if param_name in names and param_value is not None:
|
|
||||||
lst.append((param_name, param_value))
|
|
||||||
else:
|
|
||||||
LOG.warning(_LW("Plugin received wrong applicable target {service}"
|
|
||||||
" for xml configs").format(service=service))
|
|
||||||
return sorted(lst)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_setup_script(storage_paths, env_configs, append_oozie=False):
|
|
||||||
script_lines = ["#!/bin/bash -x"]
|
|
||||||
script_lines.append("echo -n > /tmp/hadoop-env.sh")
|
|
||||||
for line in env_configs:
|
|
||||||
if 'HADOOP' in line:
|
|
||||||
script_lines.append('echo "%s" >> /tmp/hadoop-env.sh' % line)
|
|
||||||
script_lines.append("cat /etc/hadoop/hadoop-env.sh >> /tmp/hadoop-env.sh")
|
|
||||||
script_lines.append("cp /tmp/hadoop-env.sh /etc/hadoop/hadoop-env.sh")
|
|
||||||
|
|
||||||
hadoop_log = storage_paths[0] + "/log/hadoop/\$USER/"
|
|
||||||
script_lines.append('sed -i "s,export HADOOP_LOG_DIR=.*,'
|
|
||||||
'export HADOOP_LOG_DIR=%s," /etc/hadoop/hadoop-env.sh'
|
|
||||||
% hadoop_log)
|
|
||||||
|
|
||||||
hadoop_log = storage_paths[0] + "/log/hadoop/hdfs"
|
|
||||||
script_lines.append('sed -i "s,export HADOOP_SECURE_DN_LOG_DIR=.*,'
|
|
||||||
'export HADOOP_SECURE_DN_LOG_DIR=%s," '
|
|
||||||
'/etc/hadoop/hadoop-env.sh' % hadoop_log)
|
|
||||||
|
|
||||||
if append_oozie:
|
|
||||||
o_h.append_oozie_setup(script_lines, env_configs)
|
|
||||||
|
|
||||||
for path in storage_paths:
|
|
||||||
script_lines.append("chown -R hadoop:hadoop %s" % path)
|
|
||||||
script_lines.append("chmod -R 755 %s" % path)
|
|
||||||
return "\n".join(script_lines)
|
|
||||||
|
|
||||||
|
|
||||||
def extract_name_values(configs):
|
|
||||||
return {cfg['name']: cfg['value'] for cfg in configs}
|
|
||||||
|
|
||||||
|
|
||||||
def extract_hadoop_path(lst, hadoop_dir):
|
|
||||||
if lst:
|
|
||||||
return ",".join([p + hadoop_dir for p in lst])
|
|
||||||
|
|
||||||
|
|
||||||
def _set_config(cfg, gen_cfg, name=None):
|
|
||||||
if name in gen_cfg:
|
|
||||||
cfg.update(gen_cfg[name]['conf'])
|
|
||||||
if name is None:
|
|
||||||
for name in gen_cfg:
|
|
||||||
cfg.update(gen_cfg[name]['conf'])
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
|
|
||||||
def _get_general_cluster_config_value(cluster, option):
|
|
||||||
conf = cluster.cluster_configs
|
|
||||||
|
|
||||||
if 'general' in conf and option.name in conf['general']:
|
|
||||||
return conf['general'][option.name]
|
|
||||||
|
|
||||||
return option.default_value
|
|
||||||
|
|
||||||
|
|
||||||
def is_mysql_enable(cluster):
|
|
||||||
return _get_general_cluster_config_value(cluster, ENABLE_MYSQL)
|
|
||||||
|
|
||||||
|
|
||||||
def is_data_locality_enabled(cluster):
|
|
||||||
if not CONF.enable_data_locality:
|
|
||||||
return False
|
|
||||||
return _get_general_cluster_config_value(cluster, ENABLE_DATA_LOCALITY)
|
|
||||||
|
|
||||||
|
|
||||||
def is_swift_enable(cluster):
|
|
||||||
return _get_general_cluster_config_value(cluster, ENABLE_SWIFT)
|
|
||||||
|
|
||||||
|
|
||||||
def get_decommissioning_timeout(cluster):
|
|
||||||
return _get_general_cluster_config_value(cluster, DECOMMISSIONING_TIMEOUT)
|
|
||||||
|
|
||||||
|
|
||||||
def get_port_from_config(service, name, cluster=None):
|
|
||||||
address = get_config_value(service, name, cluster)
|
|
||||||
return utils.get_port_from_address(address)
|
|
@ -1,43 +0,0 @@
|
|||||||
# Copyright (c) 2014 Mirantis Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from sahara.plugins.vanilla import confighints_helper as ch_helper
|
|
||||||
from sahara.plugins.vanilla import edp_engine
|
|
||||||
from sahara.service.edp import hdfs_helper
|
|
||||||
from sahara.utils import edp
|
|
||||||
|
|
||||||
|
|
||||||
class EdpOozieEngine(edp_engine.EdpOozieEngine):
|
|
||||||
|
|
||||||
def create_hdfs_dir(self, remote, dir_name):
|
|
||||||
hdfs_helper.create_dir_hadoop1(remote, dir_name, self.get_hdfs_user())
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_possible_job_config(job_type):
|
|
||||||
if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
|
|
||||||
return {'job_config': ch_helper.get_possible_hive_config_from(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/hive-default.xml')}
|
|
||||||
if edp.compare_job_type(job_type,
|
|
||||||
edp.JOB_TYPE_MAPREDUCE,
|
|
||||||
edp.JOB_TYPE_MAPREDUCE_STREAMING):
|
|
||||||
return {'job_config': ch_helper.get_possible_mapreduce_config_from(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/mapred-default.xml')}
|
|
||||||
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
|
|
||||||
return {'job_config': ch_helper.get_possible_pig_config_from(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/mapred-default.xml')}
|
|
||||||
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
|
|
||||||
|
|
||||||
def get_resource_manager_uri(self, cluster):
|
|
||||||
return cluster['info']['MapReduce']['JobTracker']
|
|
@ -1,45 +0,0 @@
|
|||||||
# Copyright (c) 2013 Mirantis Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
def get_hive_mysql_configs(metastore_host, passwd):
|
|
||||||
return {
|
|
||||||
'javax.jdo.option.ConnectionURL': 'jdbc:mysql://%s/metastore' %
|
|
||||||
metastore_host,
|
|
||||||
'javax.jdo.option.ConnectionDriverName': 'com.mysql.jdbc.Driver',
|
|
||||||
'javax.jdo.option.ConnectionUserName': 'hive',
|
|
||||||
'javax.jdo.option.ConnectionPassword': passwd,
|
|
||||||
'datanucleus.autoCreateSchema': 'false',
|
|
||||||
'datanucleus.fixedDatastore': 'true',
|
|
||||||
'hive.metastore.uris': 'thrift://%s:9083' % metastore_host,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_oozie_mysql_configs():
|
|
||||||
return {
|
|
||||||
'oozie.service.JPAService.jdbc.driver':
|
|
||||||
'com.mysql.jdbc.Driver',
|
|
||||||
'oozie.service.JPAService.jdbc.url':
|
|
||||||
'jdbc:mysql://localhost:3306/oozie',
|
|
||||||
'oozie.service.JPAService.jdbc.username': 'oozie',
|
|
||||||
'oozie.service.JPAService.jdbc.password': 'oozie'
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_required_mysql_configs(hive_hostname, passwd_mysql):
|
|
||||||
configs = get_oozie_mysql_configs()
|
|
||||||
if hive_hostname:
|
|
||||||
configs.update(get_hive_mysql_configs(hive_hostname, passwd_mysql))
|
|
||||||
return configs
|
|
@ -1,62 +0,0 @@
|
|||||||
# Copyright (c) 2013 Mirantis Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from sahara.utils import xmlutils as x
|
|
||||||
|
|
||||||
|
|
||||||
OOZIE_DEFAULT = x.load_hadoop_xml_defaults(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/oozie-default.xml')
|
|
||||||
|
|
||||||
OOZIE_CORE_DEFAULT = [
|
|
||||||
{
|
|
||||||
'name': 'hadoop.proxyuser.hadoop.hosts',
|
|
||||||
'value': "localhost"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'name': 'hadoop.proxyuser.hadoop.groups',
|
|
||||||
'value': 'hadoop'
|
|
||||||
}]
|
|
||||||
|
|
||||||
OOZIE_HEAPSIZE_DEFAULT = "CATALINA_OPTS -Xmx1024m"
|
|
||||||
|
|
||||||
|
|
||||||
def get_oozie_required_xml_configs():
|
|
||||||
"""Following configs differ from default configs in oozie-default.xml."""
|
|
||||||
return {
|
|
||||||
'oozie.service.ActionService.executor.ext.classes':
|
|
||||||
'org.apache.oozie.action.email.EmailActionExecutor,'
|
|
||||||
'org.apache.oozie.action.hadoop.HiveActionExecutor,'
|
|
||||||
'org.apache.oozie.action.hadoop.ShellActionExecutor,'
|
|
||||||
'org.apache.oozie.action.hadoop.SqoopActionExecutor,'
|
|
||||||
'org.apache.oozie.action.hadoop.DistcpActionExecutor',
|
|
||||||
|
|
||||||
'oozie.service.SchemaService.wf.ext.schemas':
|
|
||||||
'shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,'
|
|
||||||
'email-action-0.1.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,'
|
|
||||||
'hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,'
|
|
||||||
'sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,'
|
|
||||||
'ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,'
|
|
||||||
'oozie-sla-0.1.xsd,oozie-sla-0.2.xsd',
|
|
||||||
|
|
||||||
'oozie.service.JPAService.create.db.schema': 'false',
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def append_oozie_setup(setup_script, env_configs):
|
|
||||||
for line in env_configs:
|
|
||||||
if 'CATALINA_OPT' in line:
|
|
||||||
setup_script.append('sed -i "s,%s,%s," '
|
|
||||||
'/opt/oozie/conf/oozie-env.sh'
|
|
||||||
% (OOZIE_HEAPSIZE_DEFAULT, line))
|
|
@ -1,27 +0,0 @@
|
|||||||
Apache Hadoop Configurations for Sahara
|
|
||||||
========================================
|
|
||||||
|
|
||||||
This directory contains default XML configuration files:
|
|
||||||
|
|
||||||
* core-default.xml
|
|
||||||
* hdfs-default.xml
|
|
||||||
* mapred-default.xml
|
|
||||||
* oozie-default.xml
|
|
||||||
* hive-default.xml
|
|
||||||
|
|
||||||
These files are applied for Sahara's plugin of Apache Hadoop version 1.2.1,
|
|
||||||
Oozie 4.0.0, Hive version 0.11.0.
|
|
||||||
|
|
||||||
|
|
||||||
Files were taken from here:
|
|
||||||
|
|
||||||
* `core-default.xml <https://github.com/apache/hadoop-common/blob/release-1.2.1/src/core/core-default.xml>`_
|
|
||||||
* `hdfs-default.xml <https://github.com/apache/hadoop-common/blob/release-1.2.1/src/hdfs/hdfs-default.xml>`_
|
|
||||||
* `mapred-default.xml <https://github.com/apache/hadoop-common/blob/release-1.2.1/src/mapred/mapred-default.xml>`_
|
|
||||||
* `oozie-default.xml <https://github.com/apache/oozie/blob/release-4.0.0/core/src/main/resources/oozie-default.xml>`_
|
|
||||||
* `hive-default.xml <https://github.com/apache/hive/blob/release-0.11.0/conf/hive-default.xml.template>`_
|
|
||||||
|
|
||||||
XML configs are used to expose default Hadoop configurations to the users through
|
|
||||||
the Sahara's REST API. It allows users to override some config values which will
|
|
||||||
be pushed to the provisioned VMs running Hadoop services as part of appropriate
|
|
||||||
xml config.
|
|
@ -1,632 +0,0 @@
|
|||||||
<?xml version="1.0"?>
|
|
||||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
|
||||||
|
|
||||||
<!-- Do not modify this file directly. Instead, copy entries that you -->
|
|
||||||
<!-- wish to modify from this file into core-site.xml and change them -->
|
|
||||||
<!-- there. If core-site.xml does not already exist, create it. -->
|
|
||||||
|
|
||||||
<configuration>
|
|
||||||
|
|
||||||
<!--- global properties -->
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.tmp.dir</name>
|
|
||||||
<value>/tmp/hadoop-${user.name}</value>
|
|
||||||
<description>A base for other temporary directories.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.native.lib</name>
|
|
||||||
<value>true</value>
|
|
||||||
<description>Should native hadoop libraries, if present, be used.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.http.filter.initializers</name>
|
|
||||||
<value></value>
|
|
||||||
<description>A comma separated list of class names. Each class in the list
|
|
||||||
must extend org.apache.hadoop.http.FilterInitializer. The corresponding
|
|
||||||
Filter will be initialized. Then, the Filter will be applied to all user
|
|
||||||
facing jsp and servlet web pages. The ordering of the list defines the
|
|
||||||
ordering of the filters.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.security.group.mapping</name>
|
|
||||||
<value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
|
|
||||||
<description>Class for user to group mapping (get groups for a given user)
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.security.authorization</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>Is service-level authorization enabled?</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.security.instrumentation.requires.admin</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>
|
|
||||||
Indicates if administrator ACLs are required to access
|
|
||||||
instrumentation servlets (JMX, METRICS, CONF, STACKS).
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.security.authentication</name>
|
|
||||||
<value>simple</value>
|
|
||||||
<description>Possible values are simple (no authentication), and kerberos
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.security.token.service.use_ip</name>
|
|
||||||
<value>true</value>
|
|
||||||
<description>Controls whether tokens always use IP addresses. DNS changes
|
|
||||||
will not be detected if this option is enabled. Existing client connections
|
|
||||||
that break will always reconnect to the IP of the original host. New clients
|
|
||||||
will connect to the host's new IP but fail to locate a token. Disabling
|
|
||||||
this option will allow existing and new clients to detect an IP change and
|
|
||||||
continue to locate the new host's token.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.security.use-weak-http-crypto</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>If enabled, use KSSL to authenticate HTTP connections to the
|
|
||||||
NameNode. Due to a bug in JDK6, using KSSL requires one to configure
|
|
||||||
Kerberos tickets to use encryption types that are known to be
|
|
||||||
cryptographically weak. If disabled, SPNEGO will be used for HTTP
|
|
||||||
authentication, which supports stronger encryption types.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<!--
|
|
||||||
<property>
|
|
||||||
<name>hadoop.security.service.user.name.key</name>
|
|
||||||
<value></value>
|
|
||||||
<description>Name of the kerberos principal of the user that owns
|
|
||||||
a given service daemon
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!--- logging properties -->
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.logfile.size</name>
|
|
||||||
<value>10000000</value>
|
|
||||||
<description>The max size of each log file</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.logfile.count</name>
|
|
||||||
<value>10</value>
|
|
||||||
<description>The max number of log files</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<!-- i/o properties -->
|
|
||||||
<property>
|
|
||||||
<name>io.file.buffer.size</name>
|
|
||||||
<value>4096</value>
|
|
||||||
<description>The size of buffer for use in sequence files.
|
|
||||||
The size of this buffer should probably be a multiple of hardware
|
|
||||||
page size (4096 on Intel x86), and it determines how much data is
|
|
||||||
buffered during read and write operations.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>io.bytes.per.checksum</name>
|
|
||||||
<value>512</value>
|
|
||||||
<description>The number of bytes per checksum. Must not be larger than
|
|
||||||
io.file.buffer.size.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>io.skip.checksum.errors</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>If true, when a checksum error is encountered while
|
|
||||||
reading a sequence file, entries are skipped, instead of throwing an
|
|
||||||
exception.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>io.compression.codecs</name>
|
|
||||||
<value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.SnappyCodec</value>
|
|
||||||
<description>A list of the compression codec classes that can be used
|
|
||||||
for compression/decompression.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>io.serializations</name>
|
|
||||||
<value>org.apache.hadoop.io.serializer.WritableSerialization</value>
|
|
||||||
<description>A list of serialization classes that can be used for
|
|
||||||
obtaining serializers and deserializers.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<!-- file system properties -->
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.default.name</name>
|
|
||||||
<value>file:///</value>
|
|
||||||
<description>The name of the default file system. A URI whose
|
|
||||||
scheme and authority determine the FileSystem implementation. The
|
|
||||||
uri's scheme determines the config property (fs.SCHEME.impl) naming
|
|
||||||
the FileSystem implementation class. The uri's authority is used to
|
|
||||||
determine the host, port, etc. for a filesystem.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.trash.interval</name>
|
|
||||||
<value>0</value>
|
|
||||||
<description>Number of minutes between trash checkpoints.
|
|
||||||
If zero, the trash feature is disabled.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.file.impl</name>
|
|
||||||
<value>org.apache.hadoop.fs.LocalFileSystem</value>
|
|
||||||
<description>The FileSystem for file: uris.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.hdfs.impl</name>
|
|
||||||
<value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
|
|
||||||
<description>The FileSystem for hdfs: uris.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.s3.impl</name>
|
|
||||||
<value>org.apache.hadoop.fs.s3.S3FileSystem</value>
|
|
||||||
<description>The FileSystem for s3: uris.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.s3n.impl</name>
|
|
||||||
<value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value>
|
|
||||||
<description>The FileSystem for s3n: (Native S3) uris.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.kfs.impl</name>
|
|
||||||
<value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value>
|
|
||||||
<description>The FileSystem for kfs: uris.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.hftp.impl</name>
|
|
||||||
<value>org.apache.hadoop.hdfs.HftpFileSystem</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.hsftp.impl</name>
|
|
||||||
<value>org.apache.hadoop.hdfs.HsftpFileSystem</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.webhdfs.impl</name>
|
|
||||||
<value>org.apache.hadoop.hdfs.web.WebHdfsFileSystem</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.ftp.impl</name>
|
|
||||||
<value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>
|
|
||||||
<description>The FileSystem for ftp: uris.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.ramfs.impl</name>
|
|
||||||
<value>org.apache.hadoop.fs.InMemoryFileSystem</value>
|
|
||||||
<description>The FileSystem for ramfs: uris.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.har.impl</name>
|
|
||||||
<value>org.apache.hadoop.fs.HarFileSystem</value>
|
|
||||||
<description>The filesystem for Hadoop archives. </description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.har.impl.disable.cache</name>
|
|
||||||
<value>true</value>
|
|
||||||
<description>Don't cache 'har' filesystem instances.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.checkpoint.dir</name>
|
|
||||||
<value>${hadoop.tmp.dir}/dfs/namesecondary</value>
|
|
||||||
<description>Determines where on the local filesystem the DFS secondary
|
|
||||||
name node should store the temporary images to merge.
|
|
||||||
If this is a comma-delimited list of directories then the image is
|
|
||||||
replicated in all of the directories for redundancy.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.checkpoint.edits.dir</name>
|
|
||||||
<value>${fs.checkpoint.dir}</value>
|
|
||||||
<description>Determines where on the local filesystem the DFS secondary
|
|
||||||
name node should store the temporary edits to merge.
|
|
||||||
If this is a comma-delimited list of directoires then teh edits is
|
|
||||||
replicated in all of the directoires for redundancy.
|
|
||||||
Default value is same as fs.checkpoint.dir
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.checkpoint.period</name>
|
|
||||||
<value>3600</value>
|
|
||||||
<description>The number of seconds between two periodic checkpoints.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.checkpoint.size</name>
|
|
||||||
<value>67108864</value>
|
|
||||||
<description>The size of the current edit log (in bytes) that triggers
|
|
||||||
a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.s3.block.size</name>
|
|
||||||
<value>67108864</value>
|
|
||||||
<description>Block size to use when writing files to S3.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.s3.buffer.dir</name>
|
|
||||||
<value>${hadoop.tmp.dir}/s3</value>
|
|
||||||
<description>Determines where on the local filesystem the S3 filesystem
|
|
||||||
should store files before sending them to S3
|
|
||||||
(or after retrieving them from S3).
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.s3.maxRetries</name>
|
|
||||||
<value>4</value>
|
|
||||||
<description>The maximum number of retries for reading or writing files to S3,
|
|
||||||
before we signal failure to the application.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.s3.sleepTimeSeconds</name>
|
|
||||||
<value>10</value>
|
|
||||||
<description>The number of seconds to sleep between each S3 retry.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>local.cache.size</name>
|
|
||||||
<value>10737418240</value>
|
|
||||||
<description>The limit on the size of cache you want to keep, set by default
|
|
||||||
to 10GB. This will act as a soft limit on the cache directory for out of band data.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>io.seqfile.compress.blocksize</name>
|
|
||||||
<value>1000000</value>
|
|
||||||
<description>The minimum block size for compression in block compressed
|
|
||||||
SequenceFiles.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>io.seqfile.lazydecompress</name>
|
|
||||||
<value>true</value>
|
|
||||||
<description>Should values of block-compressed SequenceFiles be decompressed
|
|
||||||
only when necessary.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>io.seqfile.sorter.recordlimit</name>
|
|
||||||
<value>1000000</value>
|
|
||||||
<description>The limit on number of records to be kept in memory in a spill
|
|
||||||
in SequenceFiles.Sorter
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>io.mapfile.bloom.size</name>
|
|
||||||
<value>1048576</value>
|
|
||||||
<description>The size of BloomFilter-s used in BloomMapFile. Each time this many
|
|
||||||
keys is appended the next BloomFilter will be created (inside a DynamicBloomFilter).
|
|
||||||
Larger values minimize the number of filters, which slightly increases the performance,
|
|
||||||
but may waste too much space if the total number of keys is usually much smaller
|
|
||||||
than this number.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>io.mapfile.bloom.error.rate</name>
|
|
||||||
<value>0.005</value>
|
|
||||||
<description>The rate of false positives in BloomFilter-s used in BloomMapFile.
|
|
||||||
As this value decreases, the size of BloomFilter-s increases exponentially. This
|
|
||||||
value is the probability of encountering false positives (default is 0.5%).
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.util.hash.type</name>
|
|
||||||
<value>murmur</value>
|
|
||||||
<description>The default implementation of Hash. Currently this can take one of the
|
|
||||||
two values: 'murmur' to select MurmurHash and 'jenkins' to select JenkinsHash.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
|
|
||||||
<!-- ipc properties -->
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>ipc.client.idlethreshold</name>
|
|
||||||
<value>4000</value>
|
|
||||||
<description>Defines the threshold number of connections after which
|
|
||||||
connections will be inspected for idleness.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>ipc.client.kill.max</name>
|
|
||||||
<value>10</value>
|
|
||||||
<description>Defines the maximum number of clients to disconnect in one go.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>ipc.client.connection.maxidletime</name>
|
|
||||||
<value>10000</value>
|
|
||||||
<description>The maximum time in msec after which a client will bring down the
|
|
||||||
connection to the server.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>ipc.client.connect.max.retries</name>
|
|
||||||
<value>10</value>
|
|
||||||
<description>Indicates the number of retries a client will make to establish
|
|
||||||
a server connection.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>ipc.server.listen.queue.size</name>
|
|
||||||
<value>128</value>
|
|
||||||
<description>Indicates the length of the listen queue for servers accepting
|
|
||||||
client connections.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>ipc.server.tcpnodelay</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>Turn on/off Nagle's algorithm for the TCP socket connection on
|
|
||||||
the server. Setting to true disables the algorithm and may decrease latency
|
|
||||||
with a cost of more/smaller packets.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>ipc.client.tcpnodelay</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>Turn on/off Nagle's algorithm for the TCP socket connection on
|
|
||||||
the client. Setting to true disables the algorithm and may decrease latency
|
|
||||||
with a cost of more/smaller packets.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
|
|
||||||
<!-- Web Interface Configuration -->
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>webinterface.private.actions</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description> If set to true, the web interfaces of JT and NN may contain
|
|
||||||
actions, such as kill job, delete file, etc., that should
|
|
||||||
not be exposed to public. Enable this option if the interfaces
|
|
||||||
are only reachable by those who have the right authorization.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<!-- Proxy Configuration -->
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.rpc.socket.factory.class.default</name>
|
|
||||||
<value>org.apache.hadoop.net.StandardSocketFactory</value>
|
|
||||||
<description> Default SocketFactory to use. This parameter is expected to be
|
|
||||||
formatted as "package.FactoryClassName".
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.rpc.socket.factory.class.ClientProtocol</name>
|
|
||||||
<value></value>
|
|
||||||
<description> SocketFactory to use to connect to a DFS. If null or empty, use
|
|
||||||
hadoop.rpc.socket.class.default. This socket factory is also used by
|
|
||||||
DFSClient to create sockets to DataNodes.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.socks.server</name>
|
|
||||||
<value></value>
|
|
||||||
<description> Address (host:port) of the SOCKS server to be used by the
|
|
||||||
SocksSocketFactory.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
|
|
||||||
<!-- Topology Configuration -->
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>topology.node.switch.mapping.impl</name>
|
|
||||||
<value>org.apache.hadoop.net.ScriptBasedMapping</value>
|
|
||||||
<description> The default implementation of the DNSToSwitchMapping. It
|
|
||||||
invokes a script specified in topology.script.file.name to resolve
|
|
||||||
node names. If the value for topology.script.file.name is not set, the
|
|
||||||
default value of DEFAULT_RACK is returned for all node names.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>net.topology.impl</name>
|
|
||||||
<value>org.apache.hadoop.net.NetworkTopology</value>
|
|
||||||
<description> The default implementation of NetworkTopology which is classic three layer one.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>topology.script.file.name</name>
|
|
||||||
<value></value>
|
|
||||||
<description> The script name that should be invoked to resolve DNS names to
|
|
||||||
NetworkTopology names. Example: the script would take host.foo.bar as an
|
|
||||||
argument, and return /rack1 as the output.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>topology.script.number.args</name>
|
|
||||||
<value>100</value>
|
|
||||||
<description> The max number of args that the script configured with
|
|
||||||
topology.script.file.name should be run with. Each arg is an
|
|
||||||
IP address.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.security.uid.cache.secs</name>
|
|
||||||
<value>14400</value>
|
|
||||||
<description> NativeIO maintains a cache from UID to UserName. This is
|
|
||||||
the timeout for an entry in that cache. </description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<!-- HTTP web-consoles Authentication -->
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.http.authentication.type</name>
|
|
||||||
<value>simple</value>
|
|
||||||
<description>
|
|
||||||
Defines authentication used for Oozie HTTP endpoint.
|
|
||||||
Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.http.authentication.token.validity</name>
|
|
||||||
<value>36000</value>
|
|
||||||
<description>
|
|
||||||
Indicates how long (in seconds) an authentication token is valid before it has
|
|
||||||
to be renewed.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.http.authentication.signature.secret.file</name>
|
|
||||||
<value>${user.home}/hadoop-http-auth-signature-secret</value>
|
|
||||||
<description>
|
|
||||||
The signature secret for signing the authentication tokens.
|
|
||||||
If not set a random secret is generated at startup time.
|
|
||||||
The same secret should be used for JT/NN/DN/TT configurations.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.http.authentication.cookie.domain</name>
|
|
||||||
<value></value>
|
|
||||||
<description>
|
|
||||||
The domain to use for the HTTP cookie that stores the authentication token.
|
|
||||||
In order to authentiation to work correctly across all Hadoop nodes web-consoles
|
|
||||||
the domain must be correctly set.
|
|
||||||
IMPORTANT: when using IP addresses, browsers ignore cookies with domain settings.
|
|
||||||
For this setting to work properly all nodes in the cluster must be configured
|
|
||||||
to generate URLs with hostname.domain names on it.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.http.authentication.simple.anonymous.allowed</name>
|
|
||||||
<value>true</value>
|
|
||||||
<description>
|
|
||||||
Indicates if anonymous requests are allowed when using 'simple' authentication.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.http.authentication.kerberos.principal</name>
|
|
||||||
<value>HTTP/localhost@LOCALHOST</value>
|
|
||||||
<description>
|
|
||||||
Indicates the Kerberos principal to be used for HTTP endpoint.
|
|
||||||
The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.http.authentication.kerberos.keytab</name>
|
|
||||||
<value>${user.home}/hadoop.keytab</value>
|
|
||||||
<description>
|
|
||||||
Location of the keytab file with the credentials for the principal.
|
|
||||||
Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.relaxed.worker.version.check</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>
|
|
||||||
By default datanodes refuse to connect to namenodes if their build
|
|
||||||
revision (svn revision) do not match, and tasktrackers refuse to
|
|
||||||
connect to jobtrackers if their build version (version, revision,
|
|
||||||
user, and source checksum) do not match. This option changes the
|
|
||||||
behavior of hadoop workers to only check for a version match (eg
|
|
||||||
"1.0.2") but ignore the other build fields (revision, user, and
|
|
||||||
source checksum).
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.skip.worker.version.check</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>
|
|
||||||
By default datanodes refuse to connect to namenodes if their build
|
|
||||||
revision (svn revision) do not match, and tasktrackers refuse to
|
|
||||||
connect to jobtrackers if their build version (version, revision,
|
|
||||||
user, and source checksum) do not match. This option changes the
|
|
||||||
behavior of hadoop workers to skip doing a version check at all.
|
|
||||||
This option supersedes the 'hadoop.relaxed.worker.version.check'
|
|
||||||
option.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.jetty.logs.serve.aliases</name>
|
|
||||||
<value>true</value>
|
|
||||||
<description>
|
|
||||||
Enable/Disable aliases serving from jetty
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>ipc.client.fallback-to-simple-auth-allowed</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>
|
|
||||||
When a client is configured to attempt a secure connection, but attempts to
|
|
||||||
connect to an insecure server, that server may instruct the client to
|
|
||||||
switch to SASL SIMPLE (unsecure) authentication. This setting controls
|
|
||||||
whether or not the client will accept this instruction from the server.
|
|
||||||
When false (the default), the client will not allow the fallback to SIMPLE
|
|
||||||
authentication, and will abort the connection.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
</configuration>
|
|
@ -1,9 +0,0 @@
|
|||||||
CREATE DATABASE metastore;
|
|
||||||
USE metastore;
|
|
||||||
SOURCE /opt/hive/scripts/metastore/upgrade/mysql/hive-schema-0.10.0.mysql.sql;
|
|
||||||
CREATE USER 'hive'@'localhost' IDENTIFIED BY 'pass';
|
|
||||||
REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'hive'@'localhost';
|
|
||||||
GRANT ALL PRIVILEGES ON metastore.* TO 'hive'@'localhost' IDENTIFIED BY 'pass';
|
|
||||||
GRANT ALL PRIVILEGES ON metastore.* TO 'hive'@'%' IDENTIFIED BY 'pass';
|
|
||||||
FLUSH PRIVILEGES;
|
|
||||||
exit
|
|
@ -1,4 +0,0 @@
|
|||||||
create database oozie;
|
|
||||||
grant all privileges on oozie.* to 'oozie'@'localhost' identified by 'oozie';
|
|
||||||
grant all privileges on oozie.* to 'oozie'@'%' identified by 'oozie';
|
|
||||||
exit
|
|
@ -1,709 +0,0 @@
|
|||||||
<?xml version="1.0"?>
|
|
||||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
|
||||||
|
|
||||||
<!-- Do not modify this file directly. Instead, copy entries that you -->
|
|
||||||
<!-- wish to modify from this file into hdfs-site.xml and change them -->
|
|
||||||
<!-- there. If hdfs-site.xml does not already exist, create it. -->
|
|
||||||
|
|
||||||
<configuration>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.logging.level</name>
|
|
||||||
<value>info</value>
|
|
||||||
<description>The logging level for dfs namenode. Other values are "dir"(trac
|
|
||||||
e namespace mutations), "block"(trace block under/over replications and block
|
|
||||||
creations/deletions), or "all".</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.rpc-address</name>
|
|
||||||
<value></value>
|
|
||||||
<description>
|
|
||||||
RPC address that handles all clients requests. If empty then we'll get the
|
|
||||||
value from fs.default.name.
|
|
||||||
The value of this property will take the form of hdfs://nn-host1:rpc-port.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.secondary.http.address</name>
|
|
||||||
<value>0.0.0.0:50090</value>
|
|
||||||
<description>
|
|
||||||
The secondary namenode http server address and port.
|
|
||||||
If the port is 0 then the server will start on a free port.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.address</name>
|
|
||||||
<value>0.0.0.0:50010</value>
|
|
||||||
<description>
|
|
||||||
The datanode server address and port for data transfer.
|
|
||||||
If the port is 0 then the server will start on a free port.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.http.address</name>
|
|
||||||
<value>0.0.0.0:50075</value>
|
|
||||||
<description>
|
|
||||||
The datanode http server address and port.
|
|
||||||
If the port is 0 then the server will start on a free port.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.ipc.address</name>
|
|
||||||
<value>0.0.0.0:50020</value>
|
|
||||||
<description>
|
|
||||||
The datanode ipc server address and port.
|
|
||||||
If the port is 0 then the server will start on a free port.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.handler.count</name>
|
|
||||||
<value>3</value>
|
|
||||||
<description>The number of server threads for the datanode.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.http.address</name>
|
|
||||||
<value>0.0.0.0:50070</value>
|
|
||||||
<description>
|
|
||||||
The address and the base port where the dfs namenode web ui will listen on.
|
|
||||||
If the port is 0 then the server will start on a free port.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.https.enable</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>Decide if HTTPS(SSL) is supported on HDFS
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.https.need.client.auth</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>Whether SSL client certificate authentication is required
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.https.server.keystore.resource</name>
|
|
||||||
<value>ssl-server.xml</value>
|
|
||||||
<description>Resource file from which ssl server keystore
|
|
||||||
information will be extracted
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.https.client.keystore.resource</name>
|
|
||||||
<value>ssl-client.xml</value>
|
|
||||||
<description>Resource file from which ssl client keystore
|
|
||||||
information will be extracted
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.https.address</name>
|
|
||||||
<value>0.0.0.0:50475</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.https.address</name>
|
|
||||||
<value>0.0.0.0:50470</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.dns.interface</name>
|
|
||||||
<value>default</value>
|
|
||||||
<description>The name of the Network Interface from which a data node should
|
|
||||||
report its IP address.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.dns.nameserver</name>
|
|
||||||
<value>default</value>
|
|
||||||
<description>The host name or IP address of the name server (DNS)
|
|
||||||
which a DataNode should use to determine the host name used by the
|
|
||||||
NameNode for communication and display purposes.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.replication.considerLoad</name>
|
|
||||||
<value>true</value>
|
|
||||||
<description>Decide if chooseTarget considers the target's load or not
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>dfs.default.chunk.view.size</name>
|
|
||||||
<value>32768</value>
|
|
||||||
<description>The number of bytes to view for a file on the browser.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.du.reserved</name>
|
|
||||||
<value>0</value>
|
|
||||||
<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.name.dir</name>
|
|
||||||
<value>${hadoop.tmp.dir}/dfs/name</value>
|
|
||||||
<description>Determines where on the local filesystem the DFS name node
|
|
||||||
should store the name table(fsimage). If this is a comma-delimited list
|
|
||||||
of directories then the name table is replicated in all of the
|
|
||||||
directories, for redundancy. </description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.name.edits.dir</name>
|
|
||||||
<value>${dfs.name.dir}</value>
|
|
||||||
<description>Determines where on the local filesystem the DFS name node
|
|
||||||
should store the transaction (edits) file. If this is a comma-delimited list
|
|
||||||
of directories then the transaction file is replicated in all of the
|
|
||||||
directories, for redundancy. Default value is same as dfs.name.dir
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.edits.toleration.length</name>
|
|
||||||
<value>0</value>
|
|
||||||
<description>
|
|
||||||
The length in bytes that namenode is willing to tolerate when the edit log
|
|
||||||
is corrupted. The edit log toleration feature checks the entire edit log.
|
|
||||||
It computes read length (the length of valid data), corruption length and
|
|
||||||
padding length. In case that corruption length is non-zero, the corruption
|
|
||||||
will be tolerated only if the corruption length is less than or equal to
|
|
||||||
the toleration length.
|
|
||||||
|
|
||||||
For disabling edit log toleration feature, set this property to -1. When
|
|
||||||
the feature is disabled, the end of edit log will not be checked. In this
|
|
||||||
case, namenode will startup normally even if the end of edit log is
|
|
||||||
corrupted.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.web.ugi</name>
|
|
||||||
<value>webuser,webgroup</value>
|
|
||||||
<description>The user account used by the web interface.
|
|
||||||
Syntax: USERNAME,GROUP1,GROUP2, ...
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.permissions</name>
|
|
||||||
<value>true</value>
|
|
||||||
<description>
|
|
||||||
If "true", enable permission checking in HDFS.
|
|
||||||
If "false", permission checking is turned off,
|
|
||||||
but all other behavior is unchanged.
|
|
||||||
Switching from one parameter value to the other does not change the mode,
|
|
||||||
owner or group of files or directories.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.permissions.supergroup</name>
|
|
||||||
<value>supergroup</value>
|
|
||||||
<description>The name of the group of super-users.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.block.access.token.enable</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>
|
|
||||||
If "true", access tokens are used as capabilities for accessing datanodes.
|
|
||||||
If "false", no access tokens are checked on accessing datanodes.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.block.access.key.update.interval</name>
|
|
||||||
<value>600</value>
|
|
||||||
<description>
|
|
||||||
Interval in minutes at which namenode updates its access keys.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.block.access.token.lifetime</name>
|
|
||||||
<value>600</value>
|
|
||||||
<description>The lifetime of access tokens in minutes.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.data.dir</name>
|
|
||||||
<value>${hadoop.tmp.dir}/dfs/data</value>
|
|
||||||
<description>Determines where on the local filesystem an DFS data node
|
|
||||||
should store its blocks. If this is a comma-delimited
|
|
||||||
list of directories, then data will be stored in all named
|
|
||||||
directories, typically on different devices.
|
|
||||||
Directories that do not exist are ignored.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.data.dir.perm</name>
|
|
||||||
<value>755</value>
|
|
||||||
<description>Permissions for the directories on on the local filesystem where
|
|
||||||
the DFS data node store its blocks. The permissions can either be octal or
|
|
||||||
symbolic.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.replication</name>
|
|
||||||
<value>3</value>
|
|
||||||
<description>Default block replication.
|
|
||||||
The actual number of replications can be specified when the file is created.
|
|
||||||
The default is used if replication is not specified in create time.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.replication.max</name>
|
|
||||||
<value>512</value>
|
|
||||||
<description>Maximal block replication.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.replication.min</name>
|
|
||||||
<value>1</value>
|
|
||||||
<description>Minimal block replication.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.block.size</name>
|
|
||||||
<value>67108864</value>
|
|
||||||
<description>The default block size for new files.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.df.interval</name>
|
|
||||||
<value>60000</value>
|
|
||||||
<description>Disk usage statistics refresh interval in msec.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.client.block.write.retries</name>
|
|
||||||
<value>3</value>
|
|
||||||
<description>The number of retries for writing blocks to the data nodes,
|
|
||||||
before we signal failure to the application.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.blockreport.intervalMsec</name>
|
|
||||||
<value>3600000</value>
|
|
||||||
<description>Determines block reporting interval in milliseconds.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.blockreport.initialDelay</name> <value>0</value>
|
|
||||||
<description>Delay for first block report in seconds.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.heartbeat.interval</name>
|
|
||||||
<value>3</value>
|
|
||||||
<description>Determines datanode heartbeat interval in seconds.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.handler.count</name>
|
|
||||||
<value>10</value>
|
|
||||||
<description>The number of server threads for the namenode.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.safemode.threshold.pct</name>
|
|
||||||
<value>0.999f</value>
|
|
||||||
<description>
|
|
||||||
Specifies the percentage of blocks that should satisfy
|
|
||||||
the minimal replication requirement defined by dfs.replication.min.
|
|
||||||
Values less than or equal to 0 mean not to wait for any particular
|
|
||||||
percentage of blocks before exiting safemode.
|
|
||||||
Values greater than 1 will make safe mode permanent.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.safemode.min.datanodes</name>
|
|
||||||
<value>0</value>
|
|
||||||
<description>
|
|
||||||
Specifies the number of datanodes that must be considered alive
|
|
||||||
before the name node exits safemode.
|
|
||||||
Values less than or equal to 0 mean not to take the number of live
|
|
||||||
datanodes into account when deciding whether to remain in safe mode
|
|
||||||
during startup.
|
|
||||||
Values greater than the number of datanodes in the cluster
|
|
||||||
will make safe mode permanent.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.safemode.extension</name>
|
|
||||||
<value>30000</value>
|
|
||||||
<description>
|
|
||||||
Determines extension of safe mode in milliseconds
|
|
||||||
after the threshold level is reached.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.balance.bandwidthPerSec</name>
|
|
||||||
<value>1048576</value>
|
|
||||||
<description>
|
|
||||||
Specifies the maximum amount of bandwidth that each datanode
|
|
||||||
can utilize for the balancing purpose in term of
|
|
||||||
the number of bytes per second.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.hosts</name>
|
|
||||||
<value></value>
|
|
||||||
<description>Names a file that contains a list of hosts that are
|
|
||||||
permitted to connect to the namenode. The full pathname of the file
|
|
||||||
must be specified. If the value is empty, all hosts are
|
|
||||||
permitted.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.hosts.exclude</name>
|
|
||||||
<value></value>
|
|
||||||
<description>Names a file that contains a list of hosts that are
|
|
||||||
not permitted to connect to the namenode. The full pathname of the
|
|
||||||
file must be specified. If the value is empty, no hosts are
|
|
||||||
excluded.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.max.objects</name>
|
|
||||||
<value>0</value>
|
|
||||||
<description>The maximum number of files, directories and blocks
|
|
||||||
dfs supports. A value of zero indicates no limit to the number
|
|
||||||
of objects that dfs supports.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.decommission.interval</name>
|
|
||||||
<value>30</value>
|
|
||||||
<description>Namenode periodicity in seconds to check if decommission is
|
|
||||||
complete.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.decommission.nodes.per.interval</name>
|
|
||||||
<value>5</value>
|
|
||||||
<description>The number of nodes namenode checks if decommission is complete
|
|
||||||
in each dfs.namenode.decommission.interval.</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.replication.interval</name>
|
|
||||||
<value>3</value>
|
|
||||||
<description>The periodicity in seconds with which the namenode computes
|
|
||||||
repliaction work for datanodes. </description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.access.time.precision</name>
|
|
||||||
<value>3600000</value>
|
|
||||||
<description>The access time for HDFS file is precise upto this value.
|
|
||||||
The default value is 1 hour. Setting a value of 0 disables
|
|
||||||
access times for HDFS.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.support.append</name>
|
|
||||||
<description>
|
|
||||||
This option is no longer supported. HBase no longer requires that
|
|
||||||
this option be enabled as sync is now enabled by default. See
|
|
||||||
HADOOP-8230 for additional information.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.delegation.key.update-interval</name>
|
|
||||||
<value>86400000</value>
|
|
||||||
<description>The update interval for master key for delegation tokens
|
|
||||||
in the namenode in milliseconds.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.delegation.token.max-lifetime</name>
|
|
||||||
<value>604800000</value>
|
|
||||||
<description>The maximum lifetime in milliseconds for which a delegation
|
|
||||||
token is valid.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.delegation.token.renew-interval</name>
|
|
||||||
<value>86400000</value>
|
|
||||||
<description>The renewal interval for delegation token in milliseconds.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.failed.volumes.tolerated</name>
|
|
||||||
<value>0</value>
|
|
||||||
<description>The number of volumes that are allowed to
|
|
||||||
fail before a datanode stops offering service. By default
|
|
||||||
any volume failure will cause a datanode to shutdown.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.max.xcievers</name>
|
|
||||||
<value>4096</value>
|
|
||||||
<description>Specifies the maximum number of threads to use for transferring data
|
|
||||||
in and out of the DN.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.readahead.bytes</name>
|
|
||||||
<value>4193404</value>
|
|
||||||
<description>
|
|
||||||
While reading block files, if the Hadoop native libraries are available,
|
|
||||||
the datanode can use the posix_fadvise system call to explicitly
|
|
||||||
page data into the operating system buffer cache ahead of the current
|
|
||||||
reader's position. This can improve performance especially when
|
|
||||||
disks are highly contended.
|
|
||||||
|
|
||||||
This configuration specifies the number of bytes ahead of the current
|
|
||||||
read position which the datanode will attempt to read ahead. This
|
|
||||||
feature may be disabled by configuring this property to 0.
|
|
||||||
|
|
||||||
If the native libraries are not available, this configuration has no
|
|
||||||
effect.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.drop.cache.behind.reads</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>
|
|
||||||
In some workloads, the data read from HDFS is known to be significantly
|
|
||||||
large enough that it is unlikely to be useful to cache it in the
|
|
||||||
operating system buffer cache. In this case, the DataNode may be
|
|
||||||
configured to automatically purge all data from the buffer cache
|
|
||||||
after it is delivered to the client. This behavior is automatically
|
|
||||||
disabled for workloads which read only short sections of a block
|
|
||||||
(e.g HBase random-IO workloads).
|
|
||||||
|
|
||||||
This may improve performance for some workloads by freeing buffer
|
|
||||||
cache spage usage for more cacheable data.
|
|
||||||
|
|
||||||
If the Hadoop native libraries are not available, this configuration
|
|
||||||
has no effect.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.drop.cache.behind.writes</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>
|
|
||||||
In some workloads, the data written to HDFS is known to be significantly
|
|
||||||
large enough that it is unlikely to be useful to cache it in the
|
|
||||||
operating system buffer cache. In this case, the DataNode may be
|
|
||||||
configured to automatically purge all data from the buffer cache
|
|
||||||
after it is written to disk.
|
|
||||||
|
|
||||||
This may improve performance for some workloads by freeing buffer
|
|
||||||
cache spage usage for more cacheable data.
|
|
||||||
|
|
||||||
If the Hadoop native libraries are not available, this configuration
|
|
||||||
has no effect.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.sync.behind.writes</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>
|
|
||||||
If this configuration is enabled, the datanode will instruct the
|
|
||||||
operating system to enqueue all written data to the disk immediately
|
|
||||||
after it is written. This differs from the usual OS policy which
|
|
||||||
may wait for up to 30 seconds before triggering writeback.
|
|
||||||
|
|
||||||
This may improve performance for some workloads by smoothing the
|
|
||||||
IO profile for data written to disk.
|
|
||||||
|
|
||||||
If the Hadoop native libraries are not available, this configuration
|
|
||||||
has no effect.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.client.use.datanode.hostname</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>Whether clients should use datanode hostnames when
|
|
||||||
connecting to datanodes.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.use.datanode.hostname</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>Whether datanodes should use datanode hostnames when
|
|
||||||
connecting to other datanodes for data transfer.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.client.local.interfaces</name>
|
|
||||||
<value></value>
|
|
||||||
<description>A comma separated list of network interface names to use
|
|
||||||
for data transfer between the client and datanodes. When creating
|
|
||||||
a connection to read from or write to a datanode, the client
|
|
||||||
chooses one of the specified interfaces at random and binds its
|
|
||||||
socket to the IP of that interface. Individual names may be
|
|
||||||
specified as either an interface name (eg "eth0"), a subinterface
|
|
||||||
name (eg "eth0:0"), or an IP address (which may be specified using
|
|
||||||
CIDR notation to match a range of IPs).
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.image.transfer.bandwidthPerSec</name>
|
|
||||||
<value>0</value>
|
|
||||||
<description>
|
|
||||||
Specifies the maximum amount of bandwidth that can be utilized
|
|
||||||
for image transfer in term of the number of bytes per second.
|
|
||||||
A default value of 0 indicates that throttling is disabled.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.webhdfs.enabled</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>
|
|
||||||
Enable WebHDFS (REST API) in Namenodes and Datanodes.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.kerberos.internal.spnego.principal</name>
|
|
||||||
<value>${dfs.web.authentication.kerberos.principal}</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.secondary.namenode.kerberos.internal.spnego.principal</name>
|
|
||||||
<value>${dfs.web.authentication.kerberos.principal}</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.invalidate.work.pct.per.iteration</name>
|
|
||||||
<value>0.32f</value>
|
|
||||||
<description>
|
|
||||||
*Note*: Advanced property. Change with caution.
|
|
||||||
This determines the percentage amount of block
|
|
||||||
invalidations (deletes) to do over a single DN heartbeat
|
|
||||||
deletion command. The final deletion count is determined by applying this
|
|
||||||
percentage to the number of live nodes in the system.
|
|
||||||
The resultant number is the number of blocks from the deletion list
|
|
||||||
chosen for proper invalidation over a single heartbeat of a single DN.
|
|
||||||
Value should be a positive, non-zero percentage in float notation (X.Yf),
|
|
||||||
with 1.0f meaning 100%.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.replication.work.multiplier.per.iteration</name>
|
|
||||||
<value>2</value>
|
|
||||||
<description>
|
|
||||||
*Note*: Advanced property. Change with caution.
|
|
||||||
This determines the total amount of block transfers to begin in
|
|
||||||
parallel at a DN, for replication, when such a command list is being
|
|
||||||
sent over a DN heartbeat by the NN. The actual number is obtained by
|
|
||||||
multiplying this multiplier with the total number of live nodes in the
|
|
||||||
cluster. The result number is the number of blocks to begin transfers
|
|
||||||
immediately for, per DN heartbeat. This number can be any positive,
|
|
||||||
non-zero integer.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.avoid.read.stale.datanode</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>
|
|
||||||
Indicate whether or not to avoid reading from "stale" datanodes whose
|
|
||||||
heartbeat messages have not been received by the namenode
|
|
||||||
for more than a specified time interval. Stale datanodes will be
|
|
||||||
moved to the end of the node list returned for reading. See
|
|
||||||
dfs.namenode.avoid.write.stale.datanode for a similar setting for writes.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.avoid.write.stale.datanode</name>
|
|
||||||
<value>false</value>
|
|
||||||
<description>
|
|
||||||
Indicate whether or not to avoid writing to "stale" datanodes whose
|
|
||||||
heartbeat messages have not been received by the namenode
|
|
||||||
for more than a specified time interval. Writes will avoid using
|
|
||||||
stale datanodes unless more than a configured ratio
|
|
||||||
(dfs.namenode.write.stale.datanode.ratio) of datanodes are marked as
|
|
||||||
stale. See dfs.namenode.avoid.read.stale.datanode for a similar setting
|
|
||||||
for reads.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.stale.datanode.interval</name>
|
|
||||||
<value>30000</value>
|
|
||||||
<description>
|
|
||||||
Default time interval for marking a datanode as "stale", i.e., if
|
|
||||||
the namenode has not received heartbeat msg from a datanode for
|
|
||||||
more than this time interval, the datanode will be marked and treated
|
|
||||||
as "stale" by default. The stale interval cannot be too small since
|
|
||||||
otherwise this may cause too frequent change of stale states.
|
|
||||||
We thus set a minimum stale interval value (the default value is 3 times
|
|
||||||
of heartbeat interval) and guarantee that the stale interval cannot be less
|
|
||||||
than the minimum value.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.write.stale.datanode.ratio</name>
|
|
||||||
<value>0.5f</value>
|
|
||||||
<description>
|
|
||||||
When the ratio of number stale datanodes to total datanodes marked
|
|
||||||
is greater than this ratio, stop avoiding writing to stale nodes so
|
|
||||||
as to prevent causing hotspots.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.datanode.plugins</name>
|
|
||||||
<value></value>
|
|
||||||
<description>Comma-separated list of datanode plug-ins to be activated.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>dfs.namenode.plugins</name>
|
|
||||||
<value></value>
|
|
||||||
<description>Comma-separated list of namenode plug-ins to be activated.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
</configuration>
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,21 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
HADOOP_CONF=/etc/hadoop
|
|
||||||
|
|
||||||
while [ $# -gt 0 ] ; do
|
|
||||||
nodeArg=$1
|
|
||||||
exec< ${HADOOP_CONF}/topology.data
|
|
||||||
result=""
|
|
||||||
while read line ; do
|
|
||||||
ar=( $line )
|
|
||||||
if [ "${ar[0]}" = "$nodeArg" ] ; then
|
|
||||||
result="${ar[1]}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
shift
|
|
||||||
if [ -z "$result" ] ; then
|
|
||||||
echo -n "/default/rack "
|
|
||||||
else
|
|
||||||
echo -n "$result "
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
@ -1,123 +0,0 @@
|
|||||||
# Copyright (c) 2013 Mirantis Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from sahara.utils import files
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def start_processes(remote, *processes):
|
|
||||||
for proc in processes:
|
|
||||||
remote.execute_command('sudo su -c "/usr/sbin/hadoop-daemon.sh '
|
|
||||||
'start %s" hadoop' % proc)
|
|
||||||
|
|
||||||
|
|
||||||
def refresh_nodes(remote, service):
|
|
||||||
remote.execute_command("sudo su -c 'hadoop %s -refreshNodes' hadoop"
|
|
||||||
% service)
|
|
||||||
|
|
||||||
|
|
||||||
def format_namenode(remote):
|
|
||||||
remote.execute_command("sudo su -c 'hadoop namenode -format' hadoop")
|
|
||||||
|
|
||||||
|
|
||||||
def hive_create_warehouse_dir(remote):
|
|
||||||
LOG.debug("Creating Hive warehouse dir")
|
|
||||||
remote.execute_command("sudo su - -c 'hadoop fs -mkdir "
|
|
||||||
"/user/hive/warehouse' hadoop")
|
|
||||||
|
|
||||||
|
|
||||||
def hive_copy_shared_conf(remote, dest):
|
|
||||||
LOG.debug("Copying shared Hive conf")
|
|
||||||
remote.execute_command(
|
|
||||||
"sudo su - -c 'hadoop fs -put /opt/hive/conf/hive-site.xml "
|
|
||||||
"%s' hadoop" % dest)
|
|
||||||
|
|
||||||
|
|
||||||
def oozie_share_lib(remote, nn_hostname):
|
|
||||||
LOG.debug("Sharing Oozie libs to hdfs://{host}:8020".format(
|
|
||||||
host=nn_hostname))
|
|
||||||
# remote.execute_command('sudo su - -c "/opt/oozie/bin/oozie-setup.sh '
|
|
||||||
# 'sharelib create -fs hdfs://%s:8020" hadoop'
|
|
||||||
# % nn_hostname)
|
|
||||||
|
|
||||||
# TODO(alazarev) return 'oozie-setup.sh sharelib create' back
|
|
||||||
# when #1262023 is resolved
|
|
||||||
remote.execute_command(
|
|
||||||
'sudo su - -c "mkdir /tmp/oozielib && '
|
|
||||||
'tar zxf /opt/oozie/oozie-sharelib-4.0.0.tar.gz -C /tmp/oozielib && '
|
|
||||||
'hadoop fs -put /tmp/oozielib/share share && '
|
|
||||||
'rm -rf /tmp/oozielib" hadoop')
|
|
||||||
|
|
||||||
LOG.debug("Creating sqlfile for Oozie")
|
|
||||||
remote.execute_command('sudo su - -c "/opt/oozie/bin/ooziedb.sh '
|
|
||||||
'create -sqlfile oozie.sql '
|
|
||||||
'-run Validate DB Connection" hadoop')
|
|
||||||
|
|
||||||
|
|
||||||
def check_datanodes_count(remote, count):
|
|
||||||
if count < 1:
|
|
||||||
return True
|
|
||||||
|
|
||||||
LOG.debug("Checking datanode count")
|
|
||||||
exit_code, stdout = remote.execute_command(
|
|
||||||
'sudo su -c "hadoop dfsadmin -report | '
|
|
||||||
'grep \'Datanodes available:\' | '
|
|
||||||
'awk \'{print \\$3}\'" hadoop')
|
|
||||||
LOG.debug("Datanode count={count}".format(count=stdout.rstrip()))
|
|
||||||
|
|
||||||
return exit_code == 0 and stdout and int(stdout) == count
|
|
||||||
|
|
||||||
|
|
||||||
def mysql_start(remote):
|
|
||||||
LOG.debug("Starting MySQL")
|
|
||||||
remote.execute_command("/opt/start-mysql.sh")
|
|
||||||
|
|
||||||
|
|
||||||
def oozie_create_db(remote):
|
|
||||||
LOG.debug("Creating Oozie DB Schema")
|
|
||||||
sql_script = files.get_file_text(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/create_oozie_db.sql')
|
|
||||||
script_location = "create_oozie_db.sql"
|
|
||||||
remote.write_file_to(script_location, sql_script)
|
|
||||||
remote.execute_command('mysql -u root < %(script_location)s && '
|
|
||||||
'rm %(script_location)s' %
|
|
||||||
{"script_location": script_location})
|
|
||||||
|
|
||||||
|
|
||||||
def start_oozie(remote):
|
|
||||||
remote.execute_command(
|
|
||||||
'sudo su - -c "/opt/oozie/bin/oozied.sh start" hadoop')
|
|
||||||
|
|
||||||
|
|
||||||
def hive_create_db(remote, hive_mysql_passwd):
|
|
||||||
LOG.debug("Creating Hive metastore db")
|
|
||||||
sql_script = files.get_file_text(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/create_hive_db.sql')
|
|
||||||
sql_script = sql_script.replace('pass', hive_mysql_passwd)
|
|
||||||
script_location = "create_hive_db.sql"
|
|
||||||
remote.write_file_to(script_location, sql_script)
|
|
||||||
remote.execute_command('mysql -u root < %(script_location)s && '
|
|
||||||
'rm %(script_location)s' %
|
|
||||||
{"script_location": script_location})
|
|
||||||
|
|
||||||
|
|
||||||
def hive_metastore_start(remote):
|
|
||||||
LOG.debug("Starting Hive Metastore Server")
|
|
||||||
remote.execute_command("sudo su - -c 'nohup /opt/hive/bin/hive"
|
|
||||||
" --service metastore > /dev/null &' hadoop")
|
|
@ -1,98 +0,0 @@
|
|||||||
# Copyright (c) 2013 Mirantis Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from sahara import context
|
|
||||||
from sahara.i18n import _
|
|
||||||
from sahara.plugins import utils
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import config_helper
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import run_scripts as run
|
|
||||||
from sahara.utils import cluster_progress_ops as cpo
|
|
||||||
from sahara.utils import poll_utils
|
|
||||||
from sahara.utils import remote
|
|
||||||
|
|
||||||
|
|
||||||
@cpo.event_wrapper(True, step=_("Decommission %s") % "TaskTrackers")
|
|
||||||
def decommission_tt(jt, inst_to_be_deleted, survived_inst):
|
|
||||||
with remote.get_remote(jt) as r:
|
|
||||||
r.write_file_to('/etc/hadoop/tt.excl',
|
|
||||||
utils.generate_fqdn_host_names(
|
|
||||||
inst_to_be_deleted))
|
|
||||||
run.refresh_nodes(remote.get_remote(jt), "mradmin")
|
|
||||||
context.sleep(3)
|
|
||||||
r.write_files_to({'/etc/hadoop/tt.incl':
|
|
||||||
utils.generate_fqdn_host_names(survived_inst),
|
|
||||||
'/etc/hadoop/tt.excl': "",
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
def is_decommissioned(r, inst_to_be_deleted):
|
|
||||||
cmd = r.execute_command("sudo su -c 'hadoop dfsadmin -report' hadoop")
|
|
||||||
datanodes_info = parse_dfs_report(cmd[1])
|
|
||||||
for inst in inst_to_be_deleted:
|
|
||||||
for dn in datanodes_info:
|
|
||||||
if (dn["Name"].startswith(inst.internal_ip)) and (
|
|
||||||
dn["Decommission Status"] != "Decommissioned"):
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
@cpo.event_wrapper(True, step=_("Decommission %s") % "DataNodes")
|
|
||||||
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
|
|
||||||
with remote.get_remote(nn) as r:
|
|
||||||
r.write_file_to('/etc/hadoop/dn.excl',
|
|
||||||
utils.generate_fqdn_host_names(
|
|
||||||
inst_to_be_deleted))
|
|
||||||
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
|
|
||||||
context.sleep(3)
|
|
||||||
|
|
||||||
poll_utils.plugin_option_poll(
|
|
||||||
nn.cluster, is_decommissioned,
|
|
||||||
config_helper.DECOMMISSIONING_TIMEOUT,
|
|
||||||
_("Decommission %s") % "DataNodes", 3,
|
|
||||||
{'r': r, 'inst_to_be_deleted': inst_to_be_deleted})
|
|
||||||
|
|
||||||
r.write_files_to({'/etc/hadoop/dn.incl':
|
|
||||||
utils.generate_fqdn_host_names(survived_inst),
|
|
||||||
'/etc/hadoop/dn.excl': ""})
|
|
||||||
|
|
||||||
|
|
||||||
def parse_dfs_report(cmd_output):
|
|
||||||
report = cmd_output.rstrip().split(os.linesep)
|
|
||||||
array = []
|
|
||||||
started = False
|
|
||||||
for line in report:
|
|
||||||
if started:
|
|
||||||
array.append(line)
|
|
||||||
if line.startswith("Datanodes available"):
|
|
||||||
started = True
|
|
||||||
|
|
||||||
res = []
|
|
||||||
datanode_info = {}
|
|
||||||
for i in six.moves.xrange(0, len(array)):
|
|
||||||
if array[i]:
|
|
||||||
idx = str.find(array[i], ':')
|
|
||||||
name = array[i][0:idx]
|
|
||||||
value = array[i][idx + 2:]
|
|
||||||
datanode_info[name.strip()] = value.strip()
|
|
||||||
if not array[i] and datanode_info:
|
|
||||||
res.append(datanode_info)
|
|
||||||
datanode_info = {}
|
|
||||||
if datanode_info:
|
|
||||||
res.append(datanode_info)
|
|
||||||
return res
|
|
@ -1,596 +0,0 @@
|
|||||||
# Copyright (c) 2013 Mirantis Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
import six
|
|
||||||
|
|
||||||
from sahara import conductor
|
|
||||||
from sahara import context
|
|
||||||
from sahara.i18n import _
|
|
||||||
from sahara.i18n import _LI
|
|
||||||
from sahara.plugins import exceptions as ex
|
|
||||||
from sahara.plugins import utils
|
|
||||||
from sahara.plugins.vanilla import abstractversionhandler as avm
|
|
||||||
from sahara.plugins.vanilla import utils as vu
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import config_helper as c_helper
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import edp_engine
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import run_scripts as run
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import scaling as sc
|
|
||||||
from sahara.topology import topology_helper as th
|
|
||||||
from sahara.utils import cluster_progress_ops as cpo
|
|
||||||
from sahara.utils import edp
|
|
||||||
from sahara.utils import files as f
|
|
||||||
from sahara.utils import general as g
|
|
||||||
from sahara.utils import poll_utils
|
|
||||||
from sahara.utils import proxy
|
|
||||||
from sahara.utils import remote
|
|
||||||
|
|
||||||
|
|
||||||
conductor = conductor.API
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
class VersionHandler(avm.AbstractVersionHandler):
|
|
||||||
def get_plugin_configs(self):
|
|
||||||
return c_helper.get_plugin_configs()
|
|
||||||
|
|
||||||
def get_node_processes(self):
|
|
||||||
return {
|
|
||||||
"HDFS": ["namenode", "datanode", "secondarynamenode"],
|
|
||||||
"MapReduce": ["tasktracker", "jobtracker"],
|
|
||||||
"JobFlow": ["oozie"],
|
|
||||||
"Hive": ["hiveserver"]
|
|
||||||
}
|
|
||||||
|
|
||||||
def validate(self, cluster):
|
|
||||||
nn_count = sum([ng.count for ng
|
|
||||||
in utils.get_node_groups(cluster, "namenode")])
|
|
||||||
if nn_count != 1:
|
|
||||||
raise ex.InvalidComponentCountException("namenode", 1, nn_count)
|
|
||||||
|
|
||||||
snn_count = sum(
|
|
||||||
[ng.count for ng
|
|
||||||
in utils.get_node_groups(cluster, 'secondarynamenode')])
|
|
||||||
if snn_count > 1:
|
|
||||||
raise ex.InvalidComponentCountException('secondarynamenode',
|
|
||||||
_('0 or 1'), snn_count)
|
|
||||||
|
|
||||||
jt_count = sum([ng.count for ng
|
|
||||||
in utils.get_node_groups(cluster, "jobtracker")])
|
|
||||||
|
|
||||||
if jt_count > 1:
|
|
||||||
raise ex.InvalidComponentCountException("jobtracker", _('0 or 1'),
|
|
||||||
jt_count)
|
|
||||||
|
|
||||||
oozie_count = sum([ng.count for ng
|
|
||||||
in utils.get_node_groups(cluster, "oozie")])
|
|
||||||
|
|
||||||
if oozie_count > 1:
|
|
||||||
raise ex.InvalidComponentCountException("oozie", _('0 or 1'),
|
|
||||||
oozie_count)
|
|
||||||
|
|
||||||
hive_count = sum([ng.count for ng
|
|
||||||
in utils.get_node_groups(cluster, "hiveserver")])
|
|
||||||
if jt_count == 0:
|
|
||||||
|
|
||||||
tt_count = sum([ng.count for ng
|
|
||||||
in utils.get_node_groups(cluster, "tasktracker")])
|
|
||||||
if tt_count > 0:
|
|
||||||
raise ex.RequiredServiceMissingException(
|
|
||||||
"jobtracker", required_by="tasktracker")
|
|
||||||
|
|
||||||
if oozie_count > 0:
|
|
||||||
raise ex.RequiredServiceMissingException(
|
|
||||||
"jobtracker", required_by="oozie")
|
|
||||||
|
|
||||||
if hive_count > 0:
|
|
||||||
raise ex.RequiredServiceMissingException(
|
|
||||||
"jobtracker", required_by="hive")
|
|
||||||
|
|
||||||
if hive_count > 1:
|
|
||||||
raise ex.InvalidComponentCountException("hive", _('0 or 1'),
|
|
||||||
hive_count)
|
|
||||||
|
|
||||||
def configure_cluster(self, cluster):
|
|
||||||
instances = utils.get_instances(cluster)
|
|
||||||
self._setup_instances(cluster, instances)
|
|
||||||
|
|
||||||
def start_namenode(self, cluster):
|
|
||||||
nn = vu.get_namenode(cluster)
|
|
||||||
self._start_namenode(nn)
|
|
||||||
|
|
||||||
@cpo.event_wrapper(
|
|
||||||
True, step=utils.start_process_event_message("NameNode"))
|
|
||||||
def _start_namenode(self, nn_instance):
|
|
||||||
with remote.get_remote(nn_instance) as r:
|
|
||||||
run.format_namenode(r)
|
|
||||||
run.start_processes(r, "namenode")
|
|
||||||
|
|
||||||
def start_secondarynamenode(self, cluster):
|
|
||||||
snn = vu.get_secondarynamenode(cluster)
|
|
||||||
if snn is None:
|
|
||||||
return
|
|
||||||
|
|
||||||
self._start_secondarynamenode(snn)
|
|
||||||
|
|
||||||
@cpo.event_wrapper(
|
|
||||||
True, step=utils.start_process_event_message("SecondaryNameNode"))
|
|
||||||
def _start_secondarynamenode(self, snn):
|
|
||||||
run.start_processes(remote.get_remote(snn), "secondarynamenode")
|
|
||||||
|
|
||||||
def start_jobtracker(self, cluster):
|
|
||||||
jt = vu.get_jobtracker(cluster)
|
|
||||||
if jt:
|
|
||||||
self._start_jobtracker(jt)
|
|
||||||
|
|
||||||
@cpo.event_wrapper(
|
|
||||||
True, step=utils.start_process_event_message("JobTracker"))
|
|
||||||
def _start_jobtracker(self, jt_instance):
|
|
||||||
run.start_processes(remote.get_remote(jt_instance), "jobtracker")
|
|
||||||
|
|
||||||
def start_oozie(self, cluster):
|
|
||||||
oozie = vu.get_oozie(cluster)
|
|
||||||
if oozie:
|
|
||||||
self._start_oozie(cluster, oozie)
|
|
||||||
|
|
||||||
@cpo.event_wrapper(
|
|
||||||
True, step=utils.start_process_event_message("Oozie"))
|
|
||||||
def _start_oozie(self, cluster, oozie):
|
|
||||||
nn_instance = vu.get_namenode(cluster)
|
|
||||||
|
|
||||||
with remote.get_remote(oozie) as r:
|
|
||||||
with context.set_current_instance_id(oozie.instance_id):
|
|
||||||
if c_helper.is_mysql_enable(cluster):
|
|
||||||
run.mysql_start(r)
|
|
||||||
run.oozie_create_db(r)
|
|
||||||
run.oozie_share_lib(r, nn_instance.hostname())
|
|
||||||
run.start_oozie(r)
|
|
||||||
LOG.info(
|
|
||||||
_LI("Oozie service has been started"))
|
|
||||||
|
|
||||||
def start_hiveserver(self, cluster):
|
|
||||||
hs = vu.get_hiveserver(cluster)
|
|
||||||
if hs:
|
|
||||||
self._start_hiveserver(cluster, hs)
|
|
||||||
|
|
||||||
@cpo.event_wrapper(
|
|
||||||
True, step=utils.start_process_event_message("HiveServer"))
|
|
||||||
def _start_hiveserver(self, cluster, hive_server):
|
|
||||||
oozie = vu.get_oozie(cluster)
|
|
||||||
|
|
||||||
with remote.get_remote(hive_server) as r:
|
|
||||||
with context.set_current_instance_id(hive_server.instance_id):
|
|
||||||
run.hive_create_warehouse_dir(r)
|
|
||||||
run.hive_copy_shared_conf(
|
|
||||||
r, edp.get_hive_shared_conf_path('hadoop'))
|
|
||||||
|
|
||||||
if c_helper.is_mysql_enable(cluster):
|
|
||||||
if not oozie or hive_server.hostname() != oozie.hostname():
|
|
||||||
run.mysql_start(r)
|
|
||||||
run.hive_create_db(r, cluster.extra['hive_mysql_passwd'])
|
|
||||||
run.hive_metastore_start(r)
|
|
||||||
LOG.info(_LI("Hive Metastore server has been started"))
|
|
||||||
|
|
||||||
def start_cluster(self, cluster):
|
|
||||||
self.start_namenode(cluster)
|
|
||||||
|
|
||||||
self.start_secondarynamenode(cluster)
|
|
||||||
|
|
||||||
self.start_jobtracker(cluster)
|
|
||||||
|
|
||||||
self._start_tt_dn_processes(utils.get_instances(cluster))
|
|
||||||
|
|
||||||
self._await_datanodes(cluster)
|
|
||||||
|
|
||||||
LOG.info(_LI("Hadoop services in cluster have been started"))
|
|
||||||
|
|
||||||
self.start_oozie(cluster)
|
|
||||||
|
|
||||||
self.start_hiveserver(cluster)
|
|
||||||
|
|
||||||
LOG.info(_LI('Cluster has been started successfully'))
|
|
||||||
self._set_cluster_info(cluster)
|
|
||||||
|
|
||||||
@cpo.event_wrapper(
|
|
||||||
True, step=_("Await %s start up") % "DataNodes", param=('cluster', 1))
|
|
||||||
def _await_datanodes(self, cluster):
|
|
||||||
datanodes_count = len(vu.get_datanodes(cluster))
|
|
||||||
if datanodes_count < 1:
|
|
||||||
return
|
|
||||||
|
|
||||||
l_message = _("Waiting on %s datanodes to start up") % datanodes_count
|
|
||||||
LOG.info(l_message)
|
|
||||||
with remote.get_remote(vu.get_namenode(cluster)) as r:
|
|
||||||
poll_utils.plugin_option_poll(
|
|
||||||
cluster, run.check_datanodes_count,
|
|
||||||
c_helper.DATANODES_STARTUP_TIMEOUT, l_message, 1, {
|
|
||||||
'remote': r,
|
|
||||||
'count': datanodes_count})
|
|
||||||
|
|
||||||
def _generate_hive_mysql_password(self, cluster):
|
|
||||||
extra = cluster.extra.to_dict() if cluster.extra else {}
|
|
||||||
password = extra.get('hive_mysql_passwd')
|
|
||||||
if not password:
|
|
||||||
password = six.text_type(uuid.uuid4())
|
|
||||||
extra['hive_mysql_passwd'] = password
|
|
||||||
conductor.cluster_update(context.ctx(), cluster, {'extra': extra})
|
|
||||||
return password
|
|
||||||
|
|
||||||
def _extract_configs_to_extra(self, cluster):
|
|
||||||
oozie = vu.get_oozie(cluster)
|
|
||||||
hive = vu.get_hiveserver(cluster)
|
|
||||||
|
|
||||||
extra = dict()
|
|
||||||
|
|
||||||
if hive:
|
|
||||||
extra['hive_mysql_passwd'] = self._generate_hive_mysql_password(
|
|
||||||
cluster)
|
|
||||||
|
|
||||||
for ng in cluster.node_groups:
|
|
||||||
extra[ng.id] = {
|
|
||||||
'xml': c_helper.generate_xml_configs(
|
|
||||||
cluster, ng, extra['hive_mysql_passwd'] if hive else None),
|
|
||||||
'setup_script': c_helper.generate_setup_script(
|
|
||||||
ng.storage_paths(),
|
|
||||||
c_helper.extract_environment_confs(ng.configuration()),
|
|
||||||
append_oozie=(
|
|
||||||
oozie and oozie.node_group.id == ng.id)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c_helper.is_data_locality_enabled(cluster):
|
|
||||||
topology_data = th.generate_topology_map(
|
|
||||||
cluster, CONF.enable_hypervisor_awareness)
|
|
||||||
extra['topology_data'] = "\n".join(
|
|
||||||
[k + " " + v for k, v in topology_data.items()]) + "\n"
|
|
||||||
|
|
||||||
return extra
|
|
||||||
|
|
||||||
def decommission_nodes(self, cluster, instances):
|
|
||||||
tts = vu.get_tasktrackers(cluster)
|
|
||||||
dns = vu.get_datanodes(cluster)
|
|
||||||
decommission_dns = False
|
|
||||||
decommission_tts = False
|
|
||||||
|
|
||||||
for i in instances:
|
|
||||||
if 'datanode' in i.node_group.node_processes:
|
|
||||||
dns.remove(i)
|
|
||||||
decommission_dns = True
|
|
||||||
if 'tasktracker' in i.node_group.node_processes:
|
|
||||||
tts.remove(i)
|
|
||||||
decommission_tts = True
|
|
||||||
|
|
||||||
nn = vu.get_namenode(cluster)
|
|
||||||
jt = vu.get_jobtracker(cluster)
|
|
||||||
|
|
||||||
if decommission_tts:
|
|
||||||
sc.decommission_tt(jt, instances, tts)
|
|
||||||
if decommission_dns:
|
|
||||||
sc.decommission_dn(nn, instances, dns)
|
|
||||||
|
|
||||||
def validate_scaling(self, cluster, existing, additional):
|
|
||||||
self._validate_existing_ng_scaling(cluster, existing)
|
|
||||||
self._validate_additional_ng_scaling(cluster, additional)
|
|
||||||
|
|
||||||
def scale_cluster(self, cluster, instances):
|
|
||||||
self._setup_instances(cluster, instances)
|
|
||||||
|
|
||||||
run.refresh_nodes(remote.get_remote(
|
|
||||||
vu.get_namenode(cluster)), "dfsadmin")
|
|
||||||
jt = vu.get_jobtracker(cluster)
|
|
||||||
if jt:
|
|
||||||
run.refresh_nodes(remote.get_remote(jt), "mradmin")
|
|
||||||
|
|
||||||
self._start_tt_dn_processes(instances)
|
|
||||||
|
|
||||||
def _start_tt_dn_processes(self, instances):
|
|
||||||
tt_dn_names = ["datanode", "tasktracker"]
|
|
||||||
|
|
||||||
instances = utils.instances_with_services(instances, tt_dn_names)
|
|
||||||
|
|
||||||
if not instances:
|
|
||||||
return
|
|
||||||
|
|
||||||
cpo.add_provisioning_step(
|
|
||||||
instances[0].cluster_id,
|
|
||||||
utils.start_process_event_message("DataNodes, TaskTrackers"),
|
|
||||||
len(instances))
|
|
||||||
|
|
||||||
with context.ThreadGroup() as tg:
|
|
||||||
for i in instances:
|
|
||||||
processes = set(i.node_group.node_processes)
|
|
||||||
tt_dn_procs = processes.intersection(tt_dn_names)
|
|
||||||
tg.spawn('vanilla-start-tt-dn-%s' % i.instance_name,
|
|
||||||
self._start_tt_dn, i, list(tt_dn_procs))
|
|
||||||
|
|
||||||
@cpo.event_wrapper(True)
|
|
||||||
def _start_tt_dn(self, instance, tt_dn_procs):
|
|
||||||
with instance.remote() as r:
|
|
||||||
run.start_processes(r, *tt_dn_procs)
|
|
||||||
|
|
||||||
@cpo.event_wrapper(True, step=_("Setup instances and push configs"),
|
|
||||||
param=('cluster', 1))
|
|
||||||
def _setup_instances(self, cluster, instances):
|
|
||||||
if (CONF.use_identity_api_v3 and CONF.use_domain_for_proxy_users and
|
|
||||||
vu.get_hiveserver(cluster) and
|
|
||||||
c_helper.is_swift_enable(cluster)):
|
|
||||||
cluster = proxy.create_proxy_user_for_cluster(cluster)
|
|
||||||
instances = utils.get_instances(cluster)
|
|
||||||
|
|
||||||
extra = self._extract_configs_to_extra(cluster)
|
|
||||||
cluster = conductor.cluster_get(context.ctx(), cluster)
|
|
||||||
self._push_configs_to_nodes(cluster, extra, instances)
|
|
||||||
|
|
||||||
def _push_configs_to_nodes(self, cluster, extra, new_instances):
|
|
||||||
all_instances = utils.get_instances(cluster)
|
|
||||||
new_ids = set([instance.id for instance in new_instances])
|
|
||||||
with context.ThreadGroup() as tg:
|
|
||||||
for instance in all_instances:
|
|
||||||
if instance.id in new_ids:
|
|
||||||
tg.spawn('vanilla-configure-%s' % instance.instance_name,
|
|
||||||
self._push_configs_to_new_node, cluster,
|
|
||||||
extra, instance)
|
|
||||||
else:
|
|
||||||
tg.spawn('vanilla-reconfigure-%s' % instance.instance_name,
|
|
||||||
self._push_configs_to_existing_node, cluster,
|
|
||||||
extra, instance)
|
|
||||||
|
|
||||||
def _push_configs_to_new_node(self, cluster, extra, instance):
|
|
||||||
ng_extra = extra[instance.node_group.id]
|
|
||||||
private_key, public_key = c_helper.get_hadoop_ssh_keys(cluster)
|
|
||||||
|
|
||||||
files = {
|
|
||||||
'/etc/hadoop/core-site.xml': ng_extra['xml']['core-site'],
|
|
||||||
'/etc/hadoop/mapred-site.xml': ng_extra['xml']['mapred-site'],
|
|
||||||
'/etc/hadoop/hdfs-site.xml': ng_extra['xml']['hdfs-site'],
|
|
||||||
'/tmp/sahara-hadoop-init.sh': ng_extra['setup_script'],
|
|
||||||
'id_rsa': private_key,
|
|
||||||
'authorized_keys': public_key
|
|
||||||
}
|
|
||||||
|
|
||||||
key_cmd = ('sudo mkdir -p /home/hadoop/.ssh/ && '
|
|
||||||
'sudo mv id_rsa authorized_keys /home/hadoop/.ssh && '
|
|
||||||
'sudo chown -R hadoop:hadoop /home/hadoop/.ssh && '
|
|
||||||
'sudo chmod 600 /home/hadoop/.ssh/{id_rsa,authorized_keys}')
|
|
||||||
|
|
||||||
with remote.get_remote(instance) as r:
|
|
||||||
# TODO(aignatov): sudo chown is wrong solution. But it works.
|
|
||||||
r.execute_command(
|
|
||||||
'sudo chown -R $USER:$USER /etc/hadoop'
|
|
||||||
)
|
|
||||||
r.execute_command(
|
|
||||||
'sudo chown -R $USER:$USER /opt/oozie/conf'
|
|
||||||
)
|
|
||||||
r.write_files_to(files)
|
|
||||||
r.execute_command(
|
|
||||||
'sudo chmod 0500 /tmp/sahara-hadoop-init.sh'
|
|
||||||
)
|
|
||||||
r.execute_command(
|
|
||||||
'sudo /tmp/sahara-hadoop-init.sh '
|
|
||||||
'>> /tmp/sahara-hadoop-init.log 2>&1')
|
|
||||||
|
|
||||||
r.execute_command(key_cmd)
|
|
||||||
|
|
||||||
if c_helper.is_data_locality_enabled(cluster):
|
|
||||||
r.write_file_to(
|
|
||||||
'/etc/hadoop/topology.sh',
|
|
||||||
f.get_file_text(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/topology.sh'))
|
|
||||||
r.execute_command(
|
|
||||||
'sudo chmod +x /etc/hadoop/topology.sh'
|
|
||||||
)
|
|
||||||
|
|
||||||
self._write_topology_data(r, cluster, extra)
|
|
||||||
self._push_master_configs(r, cluster, extra, instance)
|
|
||||||
|
|
||||||
def _push_configs_to_existing_node(self, cluster, extra, instance):
|
|
||||||
node_processes = instance.node_group.node_processes
|
|
||||||
need_update = (c_helper.is_data_locality_enabled(cluster) or
|
|
||||||
'namenode' in node_processes or
|
|
||||||
'jobtracker' in node_processes or
|
|
||||||
'oozie' in node_processes or
|
|
||||||
'hiveserver' in node_processes)
|
|
||||||
|
|
||||||
if not need_update:
|
|
||||||
return
|
|
||||||
|
|
||||||
with remote.get_remote(instance) as r:
|
|
||||||
self._write_topology_data(r, cluster, extra)
|
|
||||||
self._push_master_configs(r, cluster, extra, instance)
|
|
||||||
|
|
||||||
def _write_topology_data(self, r, cluster, extra):
|
|
||||||
if c_helper.is_data_locality_enabled(cluster):
|
|
||||||
topology_data = extra['topology_data']
|
|
||||||
r.write_file_to('/etc/hadoop/topology.data', topology_data)
|
|
||||||
|
|
||||||
def _push_master_configs(self, r, cluster, extra, instance):
|
|
||||||
ng_extra = extra[instance.node_group.id]
|
|
||||||
node_processes = instance.node_group.node_processes
|
|
||||||
|
|
||||||
if 'namenode' in node_processes:
|
|
||||||
self._push_namenode_configs(cluster, r)
|
|
||||||
|
|
||||||
if 'jobtracker' in node_processes:
|
|
||||||
self._push_jobtracker_configs(cluster, r)
|
|
||||||
|
|
||||||
if 'oozie' in node_processes:
|
|
||||||
self._push_oozie_configs(ng_extra, r)
|
|
||||||
|
|
||||||
if 'hiveserver' in node_processes:
|
|
||||||
self._push_hive_configs(ng_extra, r)
|
|
||||||
|
|
||||||
def _push_namenode_configs(self, cluster, r):
|
|
||||||
r.write_file_to('/etc/hadoop/dn.incl',
|
|
||||||
utils.generate_fqdn_host_names(
|
|
||||||
vu.get_datanodes(cluster)))
|
|
||||||
|
|
||||||
def _push_jobtracker_configs(self, cluster, r):
|
|
||||||
r.write_file_to('/etc/hadoop/tt.incl',
|
|
||||||
utils.generate_fqdn_host_names(
|
|
||||||
vu.get_tasktrackers(cluster)))
|
|
||||||
|
|
||||||
def _push_oozie_configs(self, ng_extra, r):
|
|
||||||
r.write_file_to('/opt/oozie/conf/oozie-site.xml',
|
|
||||||
ng_extra['xml']['oozie-site'])
|
|
||||||
|
|
||||||
def _push_hive_configs(self, ng_extra, r):
|
|
||||||
files = {
|
|
||||||
'/opt/hive/conf/hive-site.xml':
|
|
||||||
ng_extra['xml']['hive-site']
|
|
||||||
}
|
|
||||||
r.write_files_to(files)
|
|
||||||
|
|
||||||
def _set_cluster_info(self, cluster):
|
|
||||||
nn = vu.get_namenode(cluster)
|
|
||||||
jt = vu.get_jobtracker(cluster)
|
|
||||||
oozie = vu.get_oozie(cluster)
|
|
||||||
info = {}
|
|
||||||
|
|
||||||
if jt:
|
|
||||||
ui_port = c_helper.get_port_from_config(
|
|
||||||
'MapReduce', 'mapred.job.tracker.http.address', cluster)
|
|
||||||
jt_port = c_helper.get_port_from_config(
|
|
||||||
'MapReduce', 'mapred.job.tracker', cluster)
|
|
||||||
|
|
||||||
info['MapReduce'] = {
|
|
||||||
'Web UI': 'http://%s:%s' % (jt.management_ip, ui_port),
|
|
||||||
'JobTracker': '%s:%s' % (jt.hostname(), jt_port)
|
|
||||||
}
|
|
||||||
|
|
||||||
if nn:
|
|
||||||
ui_port = c_helper.get_port_from_config('HDFS', 'dfs.http.address',
|
|
||||||
cluster)
|
|
||||||
nn_port = c_helper.get_port_from_config('HDFS', 'fs.default.name',
|
|
||||||
cluster)
|
|
||||||
|
|
||||||
info['HDFS'] = {
|
|
||||||
'Web UI': 'http://%s:%s' % (nn.management_ip, ui_port),
|
|
||||||
'NameNode': 'hdfs://%s:%s' % (nn.hostname(), nn_port)
|
|
||||||
}
|
|
||||||
|
|
||||||
if oozie:
|
|
||||||
# TODO(yrunts) change from hardcode value
|
|
||||||
info['JobFlow'] = {
|
|
||||||
'Oozie': 'http://%s:11000' % oozie.management_ip
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = context.ctx()
|
|
||||||
conductor.cluster_update(ctx, cluster, {'info': info})
|
|
||||||
|
|
||||||
def _get_scalable_processes(self):
|
|
||||||
return ["datanode", "tasktracker"]
|
|
||||||
|
|
||||||
def _validate_additional_ng_scaling(self, cluster, additional):
|
|
||||||
jt = vu.get_jobtracker(cluster)
|
|
||||||
scalable_processes = self._get_scalable_processes()
|
|
||||||
|
|
||||||
for ng_id in additional:
|
|
||||||
ng = g.get_by_id(cluster.node_groups, ng_id)
|
|
||||||
if not set(ng.node_processes).issubset(scalable_processes):
|
|
||||||
raise ex.NodeGroupCannotBeScaled(
|
|
||||||
ng.name, _("Vanilla plugin cannot scale nodegroup"
|
|
||||||
" with processes: %s") %
|
|
||||||
' '.join(ng.node_processes))
|
|
||||||
if not jt and 'tasktracker' in ng.node_processes:
|
|
||||||
raise ex.NodeGroupCannotBeScaled(
|
|
||||||
ng.name, _("Vanilla plugin cannot scale node group with "
|
|
||||||
"processes which have no master-processes run "
|
|
||||||
"in cluster"))
|
|
||||||
|
|
||||||
def _validate_existing_ng_scaling(self, cluster, existing):
|
|
||||||
scalable_processes = self._get_scalable_processes()
|
|
||||||
dn_to_delete = 0
|
|
||||||
for ng in cluster.node_groups:
|
|
||||||
if ng.id in existing:
|
|
||||||
if (ng.count > existing[ng.id] and "datanode" in
|
|
||||||
ng.node_processes):
|
|
||||||
dn_to_delete += ng.count - existing[ng.id]
|
|
||||||
if not set(ng.node_processes).issubset(scalable_processes):
|
|
||||||
raise ex.NodeGroupCannotBeScaled(
|
|
||||||
ng.name, _("Vanilla plugin cannot scale nodegroup"
|
|
||||||
" with processes: %s") %
|
|
||||||
' '.join(ng.node_processes))
|
|
||||||
|
|
||||||
dn_amount = len(vu.get_datanodes(cluster))
|
|
||||||
rep_factor = c_helper.get_config_value('HDFS', 'dfs.replication',
|
|
||||||
cluster)
|
|
||||||
|
|
||||||
if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
|
|
||||||
raise ex.ClusterCannotBeScaled(
|
|
||||||
cluster.name, _("Vanilla plugin cannot shrink cluster because "
|
|
||||||
"it would be not enough nodes for replicas "
|
|
||||||
"(replication factor is %s)") % rep_factor)
|
|
||||||
|
|
||||||
def get_edp_engine(self, cluster, job_type):
|
|
||||||
if job_type in edp_engine.EdpOozieEngine.get_supported_job_types():
|
|
||||||
return edp_engine.EdpOozieEngine(cluster)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_edp_job_types(self):
|
|
||||||
return edp_engine.EdpOozieEngine.get_supported_job_types()
|
|
||||||
|
|
||||||
def get_edp_config_hints(self, job_type):
|
|
||||||
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
|
|
||||||
|
|
||||||
def get_open_ports(self, node_group):
|
|
||||||
cluster = node_group.cluster
|
|
||||||
|
|
||||||
ports = []
|
|
||||||
|
|
||||||
if "namenode" in node_group.node_processes:
|
|
||||||
ports.append(c_helper.get_port_from_config(
|
|
||||||
'HDFS', 'dfs.http.address', cluster))
|
|
||||||
ports.append(8020)
|
|
||||||
|
|
||||||
if "datanode" in node_group.node_processes:
|
|
||||||
ports.append(c_helper.get_port_from_config(
|
|
||||||
'HDFS', 'dfs.datanode.http.address', cluster))
|
|
||||||
ports.append(c_helper.get_port_from_config(
|
|
||||||
'HDFS', 'dfs.datanode.address', cluster))
|
|
||||||
ports.append(c_helper.get_port_from_config(
|
|
||||||
'HDFS', 'dfs.datanode.ipc.address', cluster))
|
|
||||||
|
|
||||||
if "jobtracker" in node_group.node_processes:
|
|
||||||
ports.append(c_helper.get_port_from_config(
|
|
||||||
'MapReduce', 'mapred.job.tracker.http.address', cluster))
|
|
||||||
ports.append(8021)
|
|
||||||
|
|
||||||
if "tasktracker" in node_group.node_processes:
|
|
||||||
ports.append(c_helper.get_port_from_config(
|
|
||||||
'MapReduce', 'mapred.task.tracker.http.address', cluster))
|
|
||||||
|
|
||||||
if "secondarynamenode" in node_group.node_processes:
|
|
||||||
ports.append(c_helper.get_port_from_config(
|
|
||||||
'HDFS', 'dfs.secondary.http.address', cluster))
|
|
||||||
|
|
||||||
if "oozie" in node_group.node_processes:
|
|
||||||
ports.append(11000)
|
|
||||||
|
|
||||||
if "hive" in node_group.node_processes:
|
|
||||||
ports.append(9999)
|
|
||||||
ports.append(10000)
|
|
||||||
|
|
||||||
return ports
|
|
||||||
|
|
||||||
def on_terminate_cluster(self, cluster):
|
|
||||||
proxy.delete_proxy_user_for_cluster(cluster)
|
|
||||||
|
|
||||||
def recommend_configs(self, cluster, scaling):
|
|
||||||
# We don't support any recommendations in Vanilla 1 plugin
|
|
||||||
pass
|
|
@ -317,13 +317,13 @@ def get_possible_job_config(job_type):
|
|||||||
edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_PIG):
|
edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_PIG):
|
||||||
# TODO(nmakhotkin): Here we need return config based on specific plugin
|
# TODO(nmakhotkin): Here we need return config based on specific plugin
|
||||||
cfg = xmlutils.load_hadoop_xml_defaults(
|
cfg = xmlutils.load_hadoop_xml_defaults(
|
||||||
'plugins/vanilla/v1_2_1/resources/mapred-default.xml')
|
'plugins/vanilla/v2_6_0/resources/mapred-default.xml')
|
||||||
if edp.compare_job_type(job_type, edp.JOB_TYPE_MAPREDUCE):
|
if edp.compare_job_type(job_type, edp.JOB_TYPE_MAPREDUCE):
|
||||||
cfg += get_possible_mapreduce_configs()
|
cfg += get_possible_mapreduce_configs()
|
||||||
elif edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
|
elif edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
|
||||||
# TODO(nmakhotkin): Here we need return config based on specific plugin
|
# TODO(nmakhotkin): Here we need return config based on specific plugin
|
||||||
cfg = xmlutils.load_hadoop_xml_defaults(
|
cfg = xmlutils.load_hadoop_xml_defaults(
|
||||||
'plugins/vanilla/v1_2_1/resources/hive-default.xml')
|
'plugins/vanilla/v2_6_0/resources/hive-default.xml')
|
||||||
|
|
||||||
config = {'configs': cfg}
|
config = {'configs': cfg}
|
||||||
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG, edp.JOB_TYPE_HIVE):
|
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG, edp.JOB_TYPE_HIVE):
|
||||||
|
@ -33,7 +33,7 @@ and use the following tox env:
|
|||||||
|
|
||||||
.. sourcecode:: console
|
.. sourcecode:: console
|
||||||
|
|
||||||
$ tox -e scenario -- -V templatevars.ini etc/scenario/sahara-ci/vanilla-1.2.1.yaml.mako
|
$ tox -e scenario -- -V templatevars.ini etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako
|
||||||
..
|
..
|
||||||
|
|
||||||
If you want to run scenario tests for a few plugins or their versions, you
|
If you want to run scenario tests for a few plugins or their versions, you
|
||||||
@ -41,7 +41,7 @@ should use the several YAML and/or YAML Mako template files:
|
|||||||
|
|
||||||
.. sourcecode:: console
|
.. sourcecode:: console
|
||||||
|
|
||||||
$ tox -e scenario -- -V templatevars.ini etc/scenario/sahara-ci/vanilla-1.2.1.yaml.mako etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako ...
|
$ tox -e scenario -- -V templatevars.ini etc/scenario/sahara-ci/cdh-5.4.0.yaml.mako etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako ...
|
||||||
..
|
..
|
||||||
|
|
||||||
Here are a few more examples.
|
Here are a few more examples.
|
||||||
|
@ -268,13 +268,6 @@ class TemplateUpdateTestCase(base.ConductorManagerTestCase):
|
|||||||
self.assertEqual(1, len(cl_templates))
|
self.assertEqual(1, len(cl_templates))
|
||||||
self.assertEqual(2, len(ng_templates))
|
self.assertEqual(2, len(ng_templates))
|
||||||
|
|
||||||
option_values = {"plugin_name": "vanilla",
|
|
||||||
"plugin_version": "1.2.1"}
|
|
||||||
template_api.set_conf(Config(option_values))
|
|
||||||
ng_templates, cl_templates = template_api.process_files(tempdir, files)
|
|
||||||
self.assertEqual(0, len(cl_templates))
|
|
||||||
self.assertEqual(0, len(ng_templates))
|
|
||||||
|
|
||||||
option_values = {"plugin_name": "hdp",
|
option_values = {"plugin_name": "hdp",
|
||||||
"plugin_version": "2.7.1"}
|
"plugin_version": "2.7.1"}
|
||||||
template_api.set_conf(Config(option_values))
|
template_api.set_conf(Config(option_values))
|
||||||
|
@ -34,7 +34,7 @@ class GeneralUtilsTest(testtools.TestCase):
|
|||||||
[i2, i3, i4])
|
[i2, i3, i4])
|
||||||
ng3 = tu.make_ng_dict("sn", "f1", ["dn"], 1, [i5])
|
ng3 = tu.make_ng_dict("sn", "f1", ["dn"], 1, [i5])
|
||||||
|
|
||||||
self.c1 = tu.create_cluster("cluster1", "tenant1", "general", "1.2.1",
|
self.c1 = tu.create_cluster("cluster1", "tenant1", "general", "2.6.0",
|
||||||
[ng1, ng2, ng3])
|
[ng1, ng2, ng3])
|
||||||
|
|
||||||
self.ng1 = self.c1.node_groups[0]
|
self.ng1 = self.c1.node_groups[0]
|
||||||
|
@ -31,17 +31,17 @@ class TestUtils(base.SaharaWithDbTestCase):
|
|||||||
self.ng_namenode = tu.make_ng_dict(
|
self.ng_namenode = tu.make_ng_dict(
|
||||||
'nn', 'f1', ['namenode'], 1,
|
'nn', 'f1', ['namenode'], 1,
|
||||||
[tu.make_inst_dict('nn1', 'namenode')])
|
[tu.make_inst_dict('nn1', 'namenode')])
|
||||||
self.ng_jobtracker = tu.make_ng_dict(
|
self.ng_resourcemanager = tu.make_ng_dict(
|
||||||
'jt', 'f1', ['jobtracker'], 1,
|
'jt', 'f1', ['resourcemanager'], 1,
|
||||||
[tu.make_inst_dict('jt1', 'jobtracker')])
|
[tu.make_inst_dict('jt1', 'resourcemanager')])
|
||||||
self.ng_datanode = tu.make_ng_dict(
|
self.ng_datanode = tu.make_ng_dict(
|
||||||
'dn', 'f1', ['datanode'], 2,
|
'dn', 'f1', ['datanode'], 2,
|
||||||
[tu.make_inst_dict('dn1', 'datanode-1'),
|
[tu.make_inst_dict('dn1', 'datanode-1'),
|
||||||
tu.make_inst_dict('dn2', 'datanode-2')])
|
tu.make_inst_dict('dn2', 'datanode-2')])
|
||||||
self.ng_tasktracker = tu.make_ng_dict(
|
self.ng_nodemanager = tu.make_ng_dict(
|
||||||
'tt', 'f1', ['tasktracker'], 2,
|
'tt', 'f1', ['nodemanager'], 2,
|
||||||
[tu.make_inst_dict('tt1', 'tasktracker-1'),
|
[tu.make_inst_dict('tt1', 'nodemanager-1'),
|
||||||
tu.make_inst_dict('tt2', 'tasktracker-2')])
|
tu.make_inst_dict('tt2', 'nodemanager-2')])
|
||||||
self.ng_oozie = tu.make_ng_dict(
|
self.ng_oozie = tu.make_ng_dict(
|
||||||
'ooz1', 'f1', ['oozie'], 1,
|
'ooz1', 'f1', ['oozie'], 1,
|
||||||
[tu.make_inst_dict('ooz1', 'oozie')])
|
[tu.make_inst_dict('ooz1', 'oozie')])
|
||||||
@ -53,43 +53,34 @@ class TestUtils(base.SaharaWithDbTestCase):
|
|||||||
[tu.make_inst_dict('snn1', 'secondarynamenode')])
|
[tu.make_inst_dict('snn1', 'secondarynamenode')])
|
||||||
|
|
||||||
def test_get_namenode(self):
|
def test_get_namenode(self):
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||||
[self.ng_manager, self.ng_namenode])
|
[self.ng_manager, self.ng_namenode])
|
||||||
self.assertEqual('nn1', u.get_namenode(cl).instance_id)
|
self.assertEqual('nn1', u.get_namenode(cl).instance_id)
|
||||||
|
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||||
[self.ng_manager])
|
[self.ng_manager])
|
||||||
self.assertIsNone(u.get_namenode(cl))
|
self.assertIsNone(u.get_namenode(cl))
|
||||||
|
|
||||||
def test_get_jobtracker(self):
|
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
|
||||||
[self.ng_manager, self.ng_jobtracker])
|
|
||||||
self.assertEqual('jt1', u.get_jobtracker(cl).instance_id)
|
|
||||||
|
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
|
||||||
[self.ng_manager])
|
|
||||||
self.assertIsNone(u.get_jobtracker(cl))
|
|
||||||
|
|
||||||
def test_get_oozie(self):
|
def test_get_oozie(self):
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||||
[self.ng_manager, self.ng_oozie])
|
[self.ng_manager, self.ng_oozie])
|
||||||
self.assertEqual('ooz1', u.get_oozie(cl).instance_id)
|
self.assertEqual('ooz1', u.get_oozie(cl).instance_id)
|
||||||
|
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||||
[self.ng_manager])
|
[self.ng_manager])
|
||||||
self.assertIsNone(u.get_oozie(cl))
|
self.assertIsNone(u.get_oozie(cl))
|
||||||
|
|
||||||
def test_get_hiveserver(self):
|
def test_get_hiveserver(self):
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||||
[self.ng_manager, self.ng_hiveserver])
|
[self.ng_manager, self.ng_hiveserver])
|
||||||
self.assertEqual('hs1', u.get_hiveserver(cl).instance_id)
|
self.assertEqual('hs1', u.get_hiveserver(cl).instance_id)
|
||||||
|
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||||
[self.ng_manager])
|
[self.ng_manager])
|
||||||
self.assertIsNone(u.get_hiveserver(cl))
|
self.assertIsNone(u.get_hiveserver(cl))
|
||||||
|
|
||||||
def test_get_datanodes(self):
|
def test_get_datanodes(self):
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||||
[self.ng_manager, self.ng_namenode,
|
[self.ng_manager, self.ng_namenode,
|
||||||
self.ng_datanode])
|
self.ng_datanode])
|
||||||
datanodes = u.get_datanodes(cl)
|
datanodes = u.get_datanodes(cl)
|
||||||
@ -98,30 +89,16 @@ class TestUtils(base.SaharaWithDbTestCase):
|
|||||||
set([datanodes[0].instance_id,
|
set([datanodes[0].instance_id,
|
||||||
datanodes[1].instance_id]))
|
datanodes[1].instance_id]))
|
||||||
|
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||||
[self.ng_manager])
|
[self.ng_manager])
|
||||||
self.assertEqual([], u.get_datanodes(cl))
|
self.assertEqual([], u.get_datanodes(cl))
|
||||||
|
|
||||||
def test_get_tasktrackers(self):
|
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
|
||||||
[self.ng_manager, self.ng_jobtracker,
|
|
||||||
self.ng_tasktracker])
|
|
||||||
tasktrackers = u.get_tasktrackers(cl)
|
|
||||||
self.assertEqual(2, len(tasktrackers))
|
|
||||||
self.assertEqual(set(['tt1', 'tt2']),
|
|
||||||
set([tasktrackers[0].instance_id,
|
|
||||||
tasktrackers[1].instance_id]))
|
|
||||||
|
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
|
||||||
[self.ng_manager])
|
|
||||||
self.assertEqual([], u.get_tasktrackers(cl))
|
|
||||||
|
|
||||||
def test_get_secondarynamenodes(self):
|
def test_get_secondarynamenodes(self):
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||||
[self.ng_manager, self.ng_namenode,
|
[self.ng_manager, self.ng_namenode,
|
||||||
self.ng_secondarynamenode])
|
self.ng_secondarynamenode])
|
||||||
self.assertEqual('snn1', u.get_secondarynamenode(cl).instance_id)
|
self.assertEqual('snn1', u.get_secondarynamenode(cl).instance_id)
|
||||||
|
|
||||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
|
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||||
[self.ng_manager])
|
[self.ng_manager])
|
||||||
self.assertEqual(None, u.get_secondarynamenode(cl))
|
self.assertEqual(None, u.get_secondarynamenode(cl))
|
||||||
|
@ -1,54 +0,0 @@
|
|||||||
# Copyright (c) 2013 Mirantis Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import pkg_resources as pkg
|
|
||||||
import testtools
|
|
||||||
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import scaling as sc
|
|
||||||
from sahara import version
|
|
||||||
|
|
||||||
|
|
||||||
class ProvisioningPluginBaseTest(testtools.TestCase):
|
|
||||||
def test_result_for_3_nodes(self):
|
|
||||||
ins = open(pkg.resource_filename(
|
|
||||||
version.version_info.package, "tests/unit/resources/"
|
|
||||||
"dfs_admin_3_nodes.txt"), "r")
|
|
||||||
big_string = ins.read()
|
|
||||||
|
|
||||||
exp1 = {"Name": "10.155.0.94:50010", "Decommission Status": "Normal"}
|
|
||||||
exp2 = {"Name": "10.155.0.90:50010", "Last contact": "Tue Jul 16 12:"
|
|
||||||
"00:07 UTC 2013"}
|
|
||||||
exp3 = {"Configured Capacity": "10568916992 (9.84 GB)", "DFS "
|
|
||||||
"Remaining%": "93.42%"}
|
|
||||||
expected = [exp1, exp2, exp3]
|
|
||||||
res = sc.parse_dfs_report(big_string)
|
|
||||||
self.assertEqual(expected, res)
|
|
||||||
|
|
||||||
def test_result_for_0_nodes(self):
|
|
||||||
ins = open(pkg.resource_filename(
|
|
||||||
version.version_info.package, "tests/unit/resources/"
|
|
||||||
"dfs_admin_0_nodes.txt"), "r")
|
|
||||||
big_string = ins.read()
|
|
||||||
res = sc.parse_dfs_report(big_string)
|
|
||||||
self.assertEqual(0, len(res))
|
|
||||||
|
|
||||||
def test_result_for_1_node(self):
|
|
||||||
ins = open(pkg.resource_filename(
|
|
||||||
version.version_info.package, "tests/unit/resources/"
|
|
||||||
"dfs_admin_1_nodes.txt"), "r")
|
|
||||||
big_string = ins.read()
|
|
||||||
exp = {"Name": "10.155.0.94:50010", "Decommission Status": "Normal"}
|
|
||||||
res = sc.parse_dfs_report(big_string)
|
|
||||||
self.assertIn(exp, res)
|
|
@ -1,96 +0,0 @@
|
|||||||
# Copyright (c) 2015 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import edp_engine
|
|
||||||
from sahara.tests.unit import base as sahara_base
|
|
||||||
from sahara.utils import edp
|
|
||||||
|
|
||||||
|
|
||||||
class VanillaConfigHintsTest(sahara_base.SaharaTestCase):
|
|
||||||
@mock.patch(
|
|
||||||
'sahara.plugins.vanilla.confighints_helper.'
|
|
||||||
'get_possible_hive_config_from',
|
|
||||||
return_value={})
|
|
||||||
def test_get_possible_job_config_hive(
|
|
||||||
self, get_possible_hive_config_from):
|
|
||||||
expected_config = {'job_config': {}}
|
|
||||||
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
|
|
||||||
edp.JOB_TYPE_HIVE)
|
|
||||||
get_possible_hive_config_from.assert_called_once_with(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/hive-default.xml')
|
|
||||||
self.assertEqual(expected_config, actual_config)
|
|
||||||
|
|
||||||
@mock.patch('sahara.plugins.vanilla.edp_engine.EdpOozieEngine')
|
|
||||||
def test_get_possible_job_config_java(self, BaseVanillaEdpOozieEngine):
|
|
||||||
expected_config = {'job_config': {}}
|
|
||||||
BaseVanillaEdpOozieEngine.get_possible_job_config.return_value = (
|
|
||||||
expected_config)
|
|
||||||
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
|
|
||||||
edp.JOB_TYPE_JAVA)
|
|
||||||
(BaseVanillaEdpOozieEngine.get_possible_job_config.
|
|
||||||
assert_called_once_with(edp.JOB_TYPE_JAVA))
|
|
||||||
self.assertEqual(expected_config, actual_config)
|
|
||||||
|
|
||||||
@mock.patch(
|
|
||||||
'sahara.plugins.vanilla.confighints_helper.'
|
|
||||||
'get_possible_mapreduce_config_from',
|
|
||||||
return_value={})
|
|
||||||
def test_get_possible_job_config_mapreduce(
|
|
||||||
self, get_possible_mapreduce_config_from):
|
|
||||||
expected_config = {'job_config': {}}
|
|
||||||
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
|
|
||||||
edp.JOB_TYPE_MAPREDUCE)
|
|
||||||
get_possible_mapreduce_config_from.assert_called_once_with(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/mapred-default.xml')
|
|
||||||
self.assertEqual(expected_config, actual_config)
|
|
||||||
|
|
||||||
@mock.patch(
|
|
||||||
'sahara.plugins.vanilla.confighints_helper.'
|
|
||||||
'get_possible_mapreduce_config_from',
|
|
||||||
return_value={})
|
|
||||||
def test_get_possible_job_config_mapreduce_streaming(
|
|
||||||
self, get_possible_mapreduce_config_from):
|
|
||||||
expected_config = {'job_config': {}}
|
|
||||||
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
|
|
||||||
edp.JOB_TYPE_MAPREDUCE_STREAMING)
|
|
||||||
get_possible_mapreduce_config_from.assert_called_once_with(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/mapred-default.xml')
|
|
||||||
self.assertEqual(expected_config, actual_config)
|
|
||||||
|
|
||||||
@mock.patch(
|
|
||||||
'sahara.plugins.vanilla.confighints_helper.'
|
|
||||||
'get_possible_pig_config_from',
|
|
||||||
return_value={})
|
|
||||||
def test_get_possible_job_config_pig(
|
|
||||||
self, get_possible_pig_config_from):
|
|
||||||
expected_config = {'job_config': {}}
|
|
||||||
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
|
|
||||||
edp.JOB_TYPE_PIG)
|
|
||||||
get_possible_pig_config_from.assert_called_once_with(
|
|
||||||
'plugins/vanilla/v1_2_1/resources/mapred-default.xml')
|
|
||||||
self.assertEqual(expected_config, actual_config)
|
|
||||||
|
|
||||||
@mock.patch('sahara.plugins.vanilla.edp_engine.EdpOozieEngine')
|
|
||||||
def test_get_possible_job_config_shell(self, BaseVanillaEdpOozieEngine):
|
|
||||||
expected_config = {'job_config': {}}
|
|
||||||
BaseVanillaEdpOozieEngine.get_possible_job_config.return_value = (
|
|
||||||
expected_config)
|
|
||||||
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
|
|
||||||
edp.JOB_TYPE_SHELL)
|
|
||||||
(BaseVanillaEdpOozieEngine.get_possible_job_config.
|
|
||||||
assert_called_once_with(edp.JOB_TYPE_SHELL))
|
|
||||||
self.assertEqual(expected_config, actual_config)
|
|
@ -1,312 +0,0 @@
|
|||||||
# Copyright (c) 2013 Mirantis Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import testtools
|
|
||||||
|
|
||||||
from sahara import conductor as cond
|
|
||||||
from sahara import context
|
|
||||||
from sahara import exceptions as e
|
|
||||||
from sahara.plugins import base as pb
|
|
||||||
from sahara.plugins import exceptions as ex
|
|
||||||
from sahara.plugins.vanilla import plugin as p
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import config_helper as c_h
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import mysql_helper as m_h
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import versionhandler as v_h
|
|
||||||
from sahara.tests.unit import base
|
|
||||||
from sahara.tests.unit import testutils as tu
|
|
||||||
from sahara.utils import edp
|
|
||||||
|
|
||||||
|
|
||||||
conductor = cond.API
|
|
||||||
|
|
||||||
|
|
||||||
class VanillaPluginTest(base.SaharaWithDbTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(VanillaPluginTest, self).setUp()
|
|
||||||
pb.setup_plugins()
|
|
||||||
self.pl = p.VanillaProvider()
|
|
||||||
|
|
||||||
def test_validate(self):
|
|
||||||
self.ng = []
|
|
||||||
self.ng.append(tu.make_ng_dict("nn", "f1", ["namenode"], 0))
|
|
||||||
self.ng.append(tu.make_ng_dict("jt", "f1", ["jobtracker"], 0))
|
|
||||||
self.ng.append(tu.make_ng_dict("tt", "f1", ["tasktracker"], 0))
|
|
||||||
self.ng.append(tu.make_ng_dict("oozie", "f1", ["oozie"], 0))
|
|
||||||
|
|
||||||
self._validate_case(1, 1, 10, 1)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(ex.InvalidComponentCountException):
|
|
||||||
self._validate_case(0, 1, 10, 1)
|
|
||||||
with testtools.ExpectedException(ex.InvalidComponentCountException):
|
|
||||||
self._validate_case(2, 1, 10, 1)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(ex.RequiredServiceMissingException):
|
|
||||||
self._validate_case(1, 0, 10, 1)
|
|
||||||
with testtools.ExpectedException(ex.InvalidComponentCountException):
|
|
||||||
self._validate_case(1, 2, 10, 1)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(ex.InvalidComponentCountException):
|
|
||||||
self._validate_case(1, 1, 0, 2)
|
|
||||||
with testtools.ExpectedException(ex.RequiredServiceMissingException):
|
|
||||||
self._validate_case(1, 0, 0, 1)
|
|
||||||
|
|
||||||
def _validate_case(self, *args):
|
|
||||||
lst = []
|
|
||||||
for i in range(0, len(args)):
|
|
||||||
self.ng[i]['count'] = args[i]
|
|
||||||
lst.append(self.ng[i])
|
|
||||||
|
|
||||||
cl = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1", lst)
|
|
||||||
|
|
||||||
self.pl.validate(cl)
|
|
||||||
|
|
||||||
def test_get_configs(self):
|
|
||||||
cl_configs = self.pl.get_configs("1.2.1")
|
|
||||||
for cfg in cl_configs:
|
|
||||||
if cfg.config_type is "bool":
|
|
||||||
self.assertIsInstance(cfg.default_value, bool)
|
|
||||||
elif cfg.config_type is "int":
|
|
||||||
try:
|
|
||||||
self.assertIsInstance(cfg.default_value, int)
|
|
||||||
except AssertionError:
|
|
||||||
self.assertIsInstance(cfg.default_value, long)
|
|
||||||
else:
|
|
||||||
self.assertIsInstance(cfg.default_value, str)
|
|
||||||
self.assertNotIn(cfg.name, c_h.HIDDEN_CONFS)
|
|
||||||
|
|
||||||
def test_extract_environment_configs(self):
|
|
||||||
env_configs = {
|
|
||||||
"JobFlow": {
|
|
||||||
'Oozie Heap Size': 4000
|
|
||||||
},
|
|
||||||
"MapReduce": {
|
|
||||||
'Job Tracker Heap Size': 1000,
|
|
||||||
'Task Tracker Heap Size': "2000"
|
|
||||||
},
|
|
||||||
"HDFS": {
|
|
||||||
'Name Node Heap Size': 3000,
|
|
||||||
'Data Node Heap Size': "4000"
|
|
||||||
},
|
|
||||||
"Wrong-applicable-target": {
|
|
||||||
't1': 4
|
|
||||||
}}
|
|
||||||
self.assertEqual(['CATALINA_OPTS -Xmx4000m',
|
|
||||||
'HADOOP_DATANODE_OPTS=\\"-Xmx4000m\\"',
|
|
||||||
'HADOOP_JOBTRACKER_OPTS=\\"-Xmx1000m\\"',
|
|
||||||
'HADOOP_NAMENODE_OPTS=\\"-Xmx3000m\\"',
|
|
||||||
'HADOOP_TASKTRACKER_OPTS=\\"-Xmx2000m\\"'],
|
|
||||||
c_h.extract_environment_confs(env_configs))
|
|
||||||
|
|
||||||
def test_extract_xml_configs(self):
|
|
||||||
xml_configs = {
|
|
||||||
"HDFS": {
|
|
||||||
'dfs.replication': 3,
|
|
||||||
'fs.default.name': 'hdfs://',
|
|
||||||
'key': 'value'
|
|
||||||
},
|
|
||||||
"MapReduce": {
|
|
||||||
'io.sort.factor': 10,
|
|
||||||
'mapred.reduce.tasks': 2
|
|
||||||
},
|
|
||||||
"Wrong-applicable-target": {
|
|
||||||
'key': 'value'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.assertEqual([('dfs.replication', 3),
|
|
||||||
('fs.default.name', 'hdfs://'),
|
|
||||||
('io.sort.factor', 10),
|
|
||||||
('mapred.reduce.tasks', 2)],
|
|
||||||
c_h.extract_xml_confs(xml_configs))
|
|
||||||
|
|
||||||
def test_general_configs(self):
|
|
||||||
gen_config = {
|
|
||||||
c_h.ENABLE_SWIFT.name: {
|
|
||||||
'default_value': c_h.ENABLE_SWIFT.default_value,
|
|
||||||
'conf': {
|
|
||||||
'fs.swift.enabled': True
|
|
||||||
}
|
|
||||||
},
|
|
||||||
c_h.ENABLE_MYSQL.name: {
|
|
||||||
'default_value': c_h.ENABLE_MYSQL.default_value,
|
|
||||||
'conf': {
|
|
||||||
'oozie.service.JPAService.jdbc.username': 'oozie'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
all_configured = {
|
|
||||||
'fs.swift.enabled': True,
|
|
||||||
'oozie.service.JPAService.jdbc.username': 'oozie'
|
|
||||||
}
|
|
||||||
configs = {
|
|
||||||
'general': {
|
|
||||||
'Enable Swift': True
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cfg = c_h.generate_cfg_from_general({}, configs, gen_config)
|
|
||||||
self.assertEqual(all_configured, cfg)
|
|
||||||
configs['general'].update({'Enable MySQL': False})
|
|
||||||
cfg = c_h.generate_cfg_from_general({}, configs, gen_config)
|
|
||||||
self.assertEqual({'fs.swift.enabled': True}, cfg)
|
|
||||||
configs['general'].update({
|
|
||||||
'Enable Swift': False,
|
|
||||||
'Enable MySQL': False
|
|
||||||
})
|
|
||||||
cfg = c_h.generate_cfg_from_general({}, configs, gen_config)
|
|
||||||
self.assertEqual({}, cfg)
|
|
||||||
configs = {}
|
|
||||||
cfg = c_h.generate_cfg_from_general({}, configs, gen_config)
|
|
||||||
self.assertEqual(all_configured, cfg)
|
|
||||||
|
|
||||||
def test_get_mysql_configs(self):
|
|
||||||
cfg = m_h.get_required_mysql_configs(None, None)
|
|
||||||
self.assertEqual(cfg, m_h.get_oozie_mysql_configs())
|
|
||||||
cfg = m_h.get_required_mysql_configs("metastore_host", "passwd")
|
|
||||||
cfg_to_compare = m_h.get_oozie_mysql_configs()
|
|
||||||
cfg_to_compare.update(m_h.get_hive_mysql_configs(
|
|
||||||
"metastore_host", "passwd"))
|
|
||||||
self.assertEqual(cfg, cfg_to_compare)
|
|
||||||
|
|
||||||
@mock.patch('sahara.conductor.api.LocalApi.cluster_get')
|
|
||||||
def test_get_config_value(self, cond_get_cluster):
|
|
||||||
cluster = self._get_fake_cluster()
|
|
||||||
cond_get_cluster.return_value = cluster
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
'hdfs://inst1:8020',
|
|
||||||
c_h.get_config_value('HDFS', 'fs.default.name', cluster))
|
|
||||||
self.assertEqual(
|
|
||||||
'eggs', c_h.get_config_value('HDFS', 'spam', cluster))
|
|
||||||
self.assertEqual(
|
|
||||||
30000, c_h.get_config_value('HDFS', 'dfs.safemode.extension'))
|
|
||||||
self.assertRaises(e.ConfigurationError,
|
|
||||||
c_h.get_config_value,
|
|
||||||
'MapReduce', 'spam', cluster)
|
|
||||||
|
|
||||||
@mock.patch('sahara.plugins.vanilla.v1_2_1.versionhandler.context')
|
|
||||||
@mock.patch('sahara.conductor.api.LocalApi.cluster_update')
|
|
||||||
def test_set_cluster_info(self, cond_cluster_update, context_mock):
|
|
||||||
cluster = self._get_fake_cluster()
|
|
||||||
v_h.VersionHandler()._set_cluster_info(cluster)
|
|
||||||
expected_info = {
|
|
||||||
'HDFS': {
|
|
||||||
'NameNode': 'hdfs://inst1:8020',
|
|
||||||
'Web UI': 'http://127.0.0.1:50070'
|
|
||||||
},
|
|
||||||
'MapReduce': {
|
|
||||||
'Web UI': 'http://127.0.0.1:50030',
|
|
||||||
'JobTracker': 'inst1:8021'
|
|
||||||
},
|
|
||||||
'JobFlow': {
|
|
||||||
'Oozie': 'http://127.0.0.1:11000'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cond_cluster_update.assert_called_with(context_mock.ctx(), cluster,
|
|
||||||
{'info': expected_info})
|
|
||||||
|
|
||||||
def _get_fake_cluster(self):
|
|
||||||
class FakeNG(object):
|
|
||||||
def __init__(self, name, flavor, processes, count, instances=None,
|
|
||||||
configuration=None, cluster_id=None):
|
|
||||||
self.name = name
|
|
||||||
self.flavor = flavor
|
|
||||||
self.node_processes = processes
|
|
||||||
self.count = count
|
|
||||||
self.instances = instances or []
|
|
||||||
self.ng_configuration = configuration
|
|
||||||
self.cluster_id = cluster_id
|
|
||||||
|
|
||||||
def configuration(self):
|
|
||||||
return self.ng_configuration
|
|
||||||
|
|
||||||
def storage_paths(self):
|
|
||||||
return ['/mnt']
|
|
||||||
|
|
||||||
class FakeCluster(object):
|
|
||||||
def __init__(self, name, tenant, plugin, version, node_groups):
|
|
||||||
self.name = name
|
|
||||||
self.tenant = tenant
|
|
||||||
self.plugin = plugin
|
|
||||||
self.version = version
|
|
||||||
self.node_groups = node_groups
|
|
||||||
|
|
||||||
class FakeInst(object):
|
|
||||||
def __init__(self, inst_name, inst_id, management_ip):
|
|
||||||
self.instance_name = inst_name
|
|
||||||
self.instance_id = inst_id
|
|
||||||
self.management_ip = management_ip
|
|
||||||
|
|
||||||
def hostname(self):
|
|
||||||
return self.instance_name
|
|
||||||
|
|
||||||
ms_inst = FakeInst('inst1', 'id1', '127.0.0.1')
|
|
||||||
wk_inst = FakeInst('inst2', 'id2', '127.0.0.1')
|
|
||||||
|
|
||||||
conf = {
|
|
||||||
"MapReduce": {},
|
|
||||||
"HDFS": {
|
|
||||||
"spam": "eggs"
|
|
||||||
},
|
|
||||||
"JobFlow": {},
|
|
||||||
}
|
|
||||||
|
|
||||||
ng1 = FakeNG('master', 'fl1', ['namenode', 'jobtracker', 'oozie'], 1,
|
|
||||||
[ms_inst], conf, 'id1')
|
|
||||||
ng2 = FakeNG('worker', 'fl1', ['datanode', 'tasktracker'], 1,
|
|
||||||
[wk_inst], conf, 'id1')
|
|
||||||
return FakeCluster('cl1', 'ten1', 'vanilla', '1.2.1', [ng1, ng2])
|
|
||||||
|
|
||||||
def test_get_hadoop_ssh_keys(self):
|
|
||||||
cluster_dict = {
|
|
||||||
'name': 'cluster1',
|
|
||||||
'plugin_name': 'mock_plugin',
|
|
||||||
'hadoop_version': 'mock_version',
|
|
||||||
'default_image_id': 'initial',
|
|
||||||
'node_groups': [tu.make_ng_dict("ng1", "f1", ["s1"], 1)],
|
|
||||||
'extra': {'test': '1'}}
|
|
||||||
|
|
||||||
cluster1 = conductor.cluster_create(context.ctx(), cluster_dict)
|
|
||||||
(private_key1, public_key1) = c_h.get_hadoop_ssh_keys(cluster1)
|
|
||||||
|
|
||||||
# should store keys for old cluster
|
|
||||||
cluster1 = conductor.cluster_get(context.ctx(), cluster1)
|
|
||||||
(private_key2, public_key2) = c_h.get_hadoop_ssh_keys(cluster1)
|
|
||||||
|
|
||||||
self.assertEqual(public_key1, public_key2)
|
|
||||||
self.assertEqual(private_key1, private_key2)
|
|
||||||
|
|
||||||
# should generate new keys for new cluster
|
|
||||||
cluster_dict.update({'name': 'cluster2'})
|
|
||||||
cluster2 = conductor.cluster_create(context.ctx(), cluster_dict)
|
|
||||||
(private_key3, public_key3) = c_h.get_hadoop_ssh_keys(cluster2)
|
|
||||||
|
|
||||||
self.assertNotEqual(public_key1, public_key3)
|
|
||||||
self.assertNotEqual(private_key1, private_key3)
|
|
||||||
|
|
||||||
@mock.patch('sahara.service.edp.hdfs_helper.create_dir_hadoop1')
|
|
||||||
def test_edp_calls_hadoop1_create_dir(self, create_dir):
|
|
||||||
cluster_dict = {
|
|
||||||
'name': 'cluster1',
|
|
||||||
'plugin_name': 'vanilla',
|
|
||||||
'hadoop_version': '1.2.1',
|
|
||||||
'default_image_id': 'image'}
|
|
||||||
|
|
||||||
cluster = conductor.cluster_create(context.ctx(), cluster_dict)
|
|
||||||
plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
|
|
||||||
plugin.get_edp_engine(cluster, edp.JOB_TYPE_PIG).create_hdfs_dir(
|
|
||||||
mock.Mock(), '/tmp')
|
|
||||||
|
|
||||||
self.assertEqual(1, create_dir.call_count)
|
|
@ -1,42 +0,0 @@
|
|||||||
# Copyright (c) 2013 Mirantis Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import testtools
|
|
||||||
|
|
||||||
from sahara.plugins.vanilla.v1_2_1 import run_scripts
|
|
||||||
|
|
||||||
|
|
||||||
class RunScriptsTest(testtools.TestCase):
|
|
||||||
|
|
||||||
def test_check_datanodes_count_positive(self):
|
|
||||||
remote = mock.Mock()
|
|
||||||
remote.execute_command.return_value = (0, "1")
|
|
||||||
self.assertTrue(run_scripts.check_datanodes_count(remote, 1))
|
|
||||||
|
|
||||||
def test_check_datanodes_count_negative(self):
|
|
||||||
remote = mock.Mock()
|
|
||||||
remote.execute_command.return_value = (0, "1")
|
|
||||||
self.assertFalse(run_scripts.check_datanodes_count(remote, 2))
|
|
||||||
|
|
||||||
def test_check_datanodes_count_nonzero_exitcode(self):
|
|
||||||
remote = mock.Mock()
|
|
||||||
remote.execute_command.return_value = (1, "1")
|
|
||||||
self.assertFalse(run_scripts.check_datanodes_count(remote, 1))
|
|
||||||
|
|
||||||
def test_check_datanodes_count_expects_zero(self):
|
|
||||||
remote = mock.Mock()
|
|
||||||
self.assertTrue(run_scripts.check_datanodes_count(remote, 0))
|
|
||||||
self.assertEqual(0, remote.execute_command.call_count)
|
|
@ -64,7 +64,7 @@ def create_job_binary(id, type):
|
|||||||
return binary
|
return binary
|
||||||
|
|
||||||
|
|
||||||
def create_cluster(plugin_name='vanilla', hadoop_version='1.2.1'):
|
def create_cluster(plugin_name='vanilla', hadoop_version='2.6.0'):
|
||||||
cluster = mock.Mock()
|
cluster = mock.Mock()
|
||||||
cluster.plugin_name = plugin_name
|
cluster.plugin_name = plugin_name
|
||||||
cluster.hadoop_version = hadoop_version
|
cluster.hadoop_version = hadoop_version
|
||||||
|
@ -24,10 +24,11 @@ class TestJobPossibleConfigs(testtools.TestCase):
|
|||||||
def test_possible_configs(self):
|
def test_possible_configs(self):
|
||||||
res = w_f.get_possible_job_config(edp.JOB_TYPE_MAPREDUCE)
|
res = w_f.get_possible_job_config(edp.JOB_TYPE_MAPREDUCE)
|
||||||
sample_config_property = {
|
sample_config_property = {
|
||||||
'name': 'mapred.map.tasks',
|
'name': 'mapreduce.jobtracker.expire.trackers.interval',
|
||||||
'value': '2',
|
'value': '600000',
|
||||||
'description': 'The default number of map tasks per job.'
|
'description': "Expert: The time-interval, in miliseconds, after "
|
||||||
'Ignored when mapred.job.tracker is "local". '
|
"whicha tasktracker is declared 'lost' if it "
|
||||||
|
"doesn't send heartbeats."
|
||||||
}
|
}
|
||||||
self.assertIn(sample_config_property, res['job_config']["configs"])
|
self.assertIn(sample_config_property, res['job_config']["configs"])
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ class TestClusterTemplate(base.SaharaWithDbTestCase):
|
|||||||
|
|
||||||
def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=[]):
|
def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=[]):
|
||||||
return tu.create_cluster("cluster", "tenant1", "general",
|
return tu.create_cluster("cluster", "tenant1", "general",
|
||||||
"1.2.1", [ng1, ng2],
|
"2.6.0", [ng1, ng2],
|
||||||
user_keypair_id='user_key',
|
user_keypair_id='user_key',
|
||||||
neutron_management_network=mng_network,
|
neutron_management_network=mng_network,
|
||||||
default_image_id='1', image_id=None,
|
default_image_id='1', image_id=None,
|
||||||
@ -84,7 +84,6 @@ class TestClusterTemplate(base.SaharaWithDbTestCase):
|
|||||||
|
|
||||||
ng1 = [ng for ng in cluster.node_groups if ng.name == "master"][0]
|
ng1 = [ng for ng in cluster.node_groups if ng.name == "master"][0]
|
||||||
ng2 = [ng for ng in cluster.node_groups if ng.name == "worker"][0]
|
ng2 = [ng for ng in cluster.node_groups if ng.name == "worker"][0]
|
||||||
|
|
||||||
expected = ['1', '2']
|
expected = ['1', '2']
|
||||||
actual = heat_template._get_security_groups(ng1)
|
actual = heat_template._get_security_groups(ng1)
|
||||||
self.assertEqual(expected, actual)
|
self.assertEqual(expected, actual)
|
||||||
|
@ -64,7 +64,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase):
|
|||||||
ng = tu.make_ng_dict('master', 42, ['oozie'], 1,
|
ng = tu.make_ng_dict('master', 42, ['oozie'], 1,
|
||||||
instances=[tu.make_inst_dict('id', 'name')])
|
instances=[tu.make_inst_dict('id', 'name')])
|
||||||
get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
|
get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
|
||||||
"vanilla", "1.2.1", [ng])
|
"vanilla", "2.6.0", [ng])
|
||||||
|
|
||||||
self._assert_create_object_validation(
|
self._assert_create_object_validation(
|
||||||
data={
|
data={
|
||||||
@ -113,7 +113,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase):
|
|||||||
ng = tu.make_ng_dict('master', 42, ['oozie'], 1,
|
ng = tu.make_ng_dict('master', 42, ['oozie'], 1,
|
||||||
instances=[tu.make_inst_dict('id', 'name')])
|
instances=[tu.make_inst_dict('id', 'name')])
|
||||||
get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
|
get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
|
||||||
"vanilla", "1.2.1", [ng])
|
"vanilla", "2.6.0", [ng])
|
||||||
|
|
||||||
self._assert_create_object_validation(
|
self._assert_create_object_validation(
|
||||||
data={
|
data={
|
||||||
@ -156,7 +156,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase):
|
|||||||
ng = tu.make_ng_dict('master', 42, ['namenode'], 1,
|
ng = tu.make_ng_dict('master', 42, ['namenode'], 1,
|
||||||
instances=[tu.make_inst_dict('id', 'name')])
|
instances=[tu.make_inst_dict('id', 'name')])
|
||||||
get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
|
get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
|
||||||
"vanilla", "1.2.1", [ng])
|
"vanilla", "2.6.0", [ng])
|
||||||
|
|
||||||
self._assert_create_object_validation(
|
self._assert_create_object_validation(
|
||||||
data={
|
data={
|
||||||
@ -199,7 +199,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase):
|
|||||||
ng = tu.make_ng_dict('master', 42, ['namenode', 'oozie'], 1,
|
ng = tu.make_ng_dict('master', 42, ['namenode', 'oozie'], 1,
|
||||||
instances=[tu.make_inst_dict('id', 'name')])
|
instances=[tu.make_inst_dict('id', 'name')])
|
||||||
cluster_get.return_value = tu.create_cluster("cluster", "tenant1",
|
cluster_get.return_value = tu.create_cluster("cluster", "tenant1",
|
||||||
"vanilla", "1.2.1", [ng])
|
"vanilla", "2.6.0", [ng])
|
||||||
|
|
||||||
self._assert_create_object_validation(
|
self._assert_create_object_validation(
|
||||||
data={
|
data={
|
||||||
|
@ -71,7 +71,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data = {
|
data = {
|
||||||
'name': "testname",
|
'name': "testname",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1"
|
'hadoop_version': "2.6.0"
|
||||||
}
|
}
|
||||||
self._assert_types(data)
|
self._assert_types(data)
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data = {
|
data = {
|
||||||
'name': "testname",
|
'name': "testname",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1"
|
'hadoop_version': "2.6.0"
|
||||||
}
|
}
|
||||||
self._assert_valid_name_hostname_validation(data)
|
self._assert_valid_name_hostname_validation(data)
|
||||||
|
|
||||||
@ -87,7 +87,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data = {
|
data = {
|
||||||
'name': 'test',
|
'name': 'test',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1'
|
'hadoop_version': '2.6.0'
|
||||||
}
|
}
|
||||||
self._assert_create_object_validation(
|
self._assert_create_object_validation(
|
||||||
data=data,
|
data=data,
|
||||||
@ -101,7 +101,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data = {
|
data = {
|
||||||
'name': 'test-heat',
|
'name': 'test-heat',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1'
|
'hadoop_version': '2.6.0'
|
||||||
}
|
}
|
||||||
self._assert_create_object_validation(
|
self._assert_create_object_validation(
|
||||||
data=data,
|
data=data,
|
||||||
@ -115,7 +115,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "testname",
|
'name': "testname",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'user_keypair_id': 'wrong_keypair'
|
'user_keypair_id': 'wrong_keypair'
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'NOT_FOUND',
|
bad_req_i=(1, 'NOT_FOUND',
|
||||||
@ -127,7 +127,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'user_keypair_id': '!'},
|
'user_keypair_id': '!'},
|
||||||
bad_req_i=(1, 'VALIDATION_ERROR',
|
bad_req_i=(1, 'VALIDATION_ERROR',
|
||||||
"'!' is not a 'valid_keypair_name'")
|
"'!' is not a 'valid_keypair_name'")
|
||||||
@ -138,7 +138,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'default_image_id': '550e8400-e29b-41d4-a616-446655440000'
|
'default_image_id': '550e8400-e29b-41d4-a616-446655440000'
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'INVALID_REFERENCE',
|
||||||
@ -151,7 +151,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "wrong_plugin",
|
'plugin_name': "wrong_plugin",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'INVALID_REFERENCE',
|
||||||
"Sahara doesn't contain plugin "
|
"Sahara doesn't contain plugin "
|
||||||
@ -164,7 +164,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'neutron_management_network': '53a36917-ab9f-4589-'
|
'neutron_management_network': '53a36917-ab9f-4589-'
|
||||||
'94ce-b6df85a68332'
|
'94ce-b6df85a68332'
|
||||||
@ -178,7 +178,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'neutron_management_network': '53a36917-ab9f-4589-'
|
'neutron_management_network': '53a36917-ab9f-4589-'
|
||||||
'94ce-b6df85a68332'
|
'94ce-b6df85a68332'
|
||||||
@ -194,7 +194,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'NOT_FOUND',
|
bad_req_i=(1, 'NOT_FOUND',
|
||||||
@ -206,7 +206,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "long-long-cluster-name",
|
'name': "long-long-cluster-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
||||||
'9a93-aa048022c1ca',
|
'9a93-aa048022c1ca',
|
||||||
@ -235,11 +235,11 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "testname",
|
'name': "testname",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'user_keypair_id': 'test_keypair',
|
'user_keypair_id': 'test_keypair',
|
||||||
'cluster_configs': {
|
'cluster_configs': {
|
||||||
'HDFS': {
|
'HDFS': {
|
||||||
u'hadoop.tmp.dir': '/temp/'
|
u'hadoop.hdfs.configuration.version': '2'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
@ -257,7 +257,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "testname",
|
'name': "testname",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'user_keypair_id': 'test_keypair',
|
'user_keypair_id': 'test_keypair',
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
||||||
@ -282,7 +282,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "testname",
|
'name': "testname",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'user_keypair_id': 'test_keypair',
|
'user_keypair_id': 'test_keypair',
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
||||||
@ -317,7 +317,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "testname",
|
'name': "testname",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'user_keypair_id': 'test_keypair',
|
'user_keypair_id': 'test_keypair',
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
||||||
@ -350,7 +350,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "testname",
|
'name': "testname",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'user_keypair_id': 'test_keypair',
|
'user_keypair_id': 'test_keypair',
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
||||||
@ -375,7 +375,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "testname",
|
'name': "testname",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'user_keypair_id': 'test_keypair',
|
'user_keypair_id': 'test_keypair',
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
||||||
@ -402,7 +402,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': 'testname',
|
'name': 'testname',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'user_keypair_id': 'test_keypair',
|
'user_keypair_id': 'test_keypair',
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
||||||
@ -430,7 +430,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': 'testname',
|
'name': 'testname',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'user_keypair_id': 'test_keypair',
|
'user_keypair_id': 'test_keypair',
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
||||||
@ -458,7 +458,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': 'testname',
|
'name': 'testname',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'user_keypair_id': 'test_keypair',
|
'user_keypair_id': 'test_keypair',
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
'neutron_management_network': 'd9a3bebc-f788-4b81-'
|
||||||
@ -521,7 +521,7 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
def _create_node_group_template(self, flavor='42'):
|
def _create_node_group_template(self, flavor='42'):
|
||||||
ng_tmpl = {
|
ng_tmpl = {
|
||||||
"plugin_name": "vanilla",
|
"plugin_name": "vanilla",
|
||||||
"hadoop_version": "1.2.1",
|
"hadoop_version": "2.6.0",
|
||||||
"node_processes": ["namenode"],
|
"node_processes": ["namenode"],
|
||||||
"name": "master",
|
"name": "master",
|
||||||
"flavor_id": flavor
|
"flavor_id": flavor
|
||||||
@ -531,7 +531,7 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
def _create_cluster_template(self, ng_id):
|
def _create_cluster_template(self, ng_id):
|
||||||
cl_tmpl = {
|
cl_tmpl = {
|
||||||
"plugin_name": "vanilla",
|
"plugin_name": "vanilla",
|
||||||
"hadoop_version": "1.2.1",
|
"hadoop_version": "2.6.0",
|
||||||
"node_groups": [
|
"node_groups": [
|
||||||
{"name": "master",
|
{"name": "master",
|
||||||
"count": 1,
|
"count": 1,
|
||||||
@ -548,7 +548,7 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
data = {
|
data = {
|
||||||
"name": "testname",
|
"name": "testname",
|
||||||
"plugin_name": "vanilla",
|
"plugin_name": "vanilla",
|
||||||
"hadoop_version": "1.2.1",
|
"hadoop_version": "2.6.0",
|
||||||
"cluster_template_id": '%s' % ctmpl_id,
|
"cluster_template_id": '%s' % ctmpl_id,
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
||||||
}
|
}
|
||||||
@ -559,7 +559,7 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
data1 = {
|
data1 = {
|
||||||
"name": "testwithnodegroups",
|
"name": "testwithnodegroups",
|
||||||
"plugin_name": "vanilla",
|
"plugin_name": "vanilla",
|
||||||
"hadoop_version": "1.2.1",
|
"hadoop_version": "2.6.0",
|
||||||
"node_groups": [
|
"node_groups": [
|
||||||
{
|
{
|
||||||
"name": "allinone",
|
"name": "allinone",
|
||||||
@ -567,9 +567,9 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
"flavor_id": "42",
|
"flavor_id": "42",
|
||||||
"node_processes": [
|
"node_processes": [
|
||||||
"namenode",
|
"namenode",
|
||||||
"jobtracker",
|
"resourcemanager",
|
||||||
"datanode",
|
"datanode",
|
||||||
"tasktracker"
|
"nodemanager"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -586,14 +586,14 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
data = {
|
data = {
|
||||||
"name": "testname",
|
"name": "testname",
|
||||||
"plugin_name": "vanilla",
|
"plugin_name": "vanilla",
|
||||||
"hadoop_version": "1.2.1",
|
"hadoop_version": "2.6.0",
|
||||||
"cluster_template_id": '%s' % ctmpl_id,
|
"cluster_template_id": '%s' % ctmpl_id,
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
||||||
}
|
}
|
||||||
data1 = {
|
data1 = {
|
||||||
"name": "testwithnodegroups",
|
"name": "testwithnodegroups",
|
||||||
"plugin_name": "vanilla",
|
"plugin_name": "vanilla",
|
||||||
"hadoop_version": "1.2.1",
|
"hadoop_version": "2.6.0",
|
||||||
"node_groups": [
|
"node_groups": [
|
||||||
{
|
{
|
||||||
"name": "allinone",
|
"name": "allinone",
|
||||||
@ -601,9 +601,9 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
"flavor_id": "10",
|
"flavor_id": "10",
|
||||||
"node_processes": [
|
"node_processes": [
|
||||||
"namenode",
|
"namenode",
|
||||||
"jobtracker",
|
"resourcemanager",
|
||||||
"datanode",
|
"datanode",
|
||||||
"tasktracker"
|
"nodemanager"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -630,7 +630,7 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
data = {
|
data = {
|
||||||
"name": "testtmplnodegroups",
|
"name": "testtmplnodegroups",
|
||||||
"plugin_name": "vanilla",
|
"plugin_name": "vanilla",
|
||||||
"hadoop_version": "1.2.1",
|
"hadoop_version": "2.6.0",
|
||||||
"cluster_template_id": '%s' % ctmpl_id,
|
"cluster_template_id": '%s' % ctmpl_id,
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
"node_groups": [
|
"node_groups": [
|
||||||
@ -640,9 +640,9 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
"flavor_id": "42",
|
"flavor_id": "42",
|
||||||
"node_processes": [
|
"node_processes": [
|
||||||
"namenode",
|
"namenode",
|
||||||
"jobtracker",
|
"resourcemanager",
|
||||||
"datanode",
|
"datanode",
|
||||||
"tasktracker"
|
"nodemanager"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@ -656,7 +656,7 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
data = {
|
data = {
|
||||||
"name": "testtmplnodegroups",
|
"name": "testtmplnodegroups",
|
||||||
"plugin_name": "vanilla",
|
"plugin_name": "vanilla",
|
||||||
"hadoop_version": "1.2.1",
|
"hadoop_version": "2.6.0",
|
||||||
"node_groups": [
|
"node_groups": [
|
||||||
{
|
{
|
||||||
"node_group_template_id": '%s' % ng_id,
|
"node_group_template_id": '%s' % ng_id,
|
||||||
@ -665,9 +665,9 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
"flavor_id": "42",
|
"flavor_id": "42",
|
||||||
"node_processes": [
|
"node_processes": [
|
||||||
"namenode",
|
"namenode",
|
||||||
"jobtracker",
|
"resourcemanager",
|
||||||
"datanode",
|
"datanode",
|
||||||
"tasktracker"
|
"nodemanager"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
@ -66,7 +66,7 @@ class TestScalingValidation(u.ValidationTestCase):
|
|||||||
def test_check_cluster_scaling_resize_ng(self, ops):
|
def test_check_cluster_scaling_resize_ng(self, ops):
|
||||||
ops.get_engine_type_and_version.return_value = "direct.1.1"
|
ops.get_engine_type_and_version.return_value = "direct.1.1"
|
||||||
ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
|
ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
|
||||||
cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1",
|
cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "2.6.0",
|
||||||
[ng1], status='Validating', id='12321')
|
[ng1], status='Validating', id='12321')
|
||||||
|
|
||||||
self._assert_check_scaling(
|
self._assert_check_scaling(
|
||||||
@ -76,7 +76,7 @@ class TestScalingValidation(u.ValidationTestCase):
|
|||||||
"status. Cluster status: "
|
"status. Cluster status: "
|
||||||
"Validating")
|
"Validating")
|
||||||
|
|
||||||
cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1",
|
cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "2.6.0",
|
||||||
[ng1], status='Active', id='12321')
|
[ng1], status='Active', id='12321')
|
||||||
data = {
|
data = {
|
||||||
'resize_node_groups': [
|
'resize_node_groups': [
|
||||||
@ -113,7 +113,7 @@ class TestScalingValidation(u.ValidationTestCase):
|
|||||||
ops.get_engine_type_and_version.return_value = "direct.1.1"
|
ops.get_engine_type_and_version.return_value = "direct.1.1"
|
||||||
ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
|
ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
|
||||||
cluster = tu.create_cluster("test-cluster", "tenant1", "vanilla",
|
cluster = tu.create_cluster("test-cluster", "tenant1", "vanilla",
|
||||||
"1.2.1", [ng1], status='Active',
|
"2.7.1", [ng1], status='Active',
|
||||||
id='12321')
|
id='12321')
|
||||||
data = {
|
data = {
|
||||||
'add_node_groups': [
|
'add_node_groups': [
|
||||||
@ -359,7 +359,7 @@ class TestScalingValidation(u.ValidationTestCase):
|
|||||||
ops.get_engine_type_and_version.return_value = "direct.1.1"
|
ops.get_engine_type_and_version.return_value = "direct.1.1"
|
||||||
ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
|
ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
|
||||||
cluster = tu.create_cluster(
|
cluster = tu.create_cluster(
|
||||||
"cluster1", "tenant1", "vanilla", "1.2.1", [ng1],
|
"cluster1", "tenant1", "vanilla", "2.6.0", [ng1],
|
||||||
status='Active', id='12321',
|
status='Active', id='12321',
|
||||||
sahara_info={"infrastructure_engine": "heat.1.1"})
|
sahara_info={"infrastructure_engine": "heat.1.1"})
|
||||||
|
|
||||||
@ -372,7 +372,7 @@ class TestScalingValidation(u.ValidationTestCase):
|
|||||||
def test_check_heat_cluster_scaling_missing_engine(self, ops):
|
def test_check_heat_cluster_scaling_missing_engine(self, ops):
|
||||||
ops.get_engine_type_and_version.return_value = "heat.1.1"
|
ops.get_engine_type_and_version.return_value = "heat.1.1"
|
||||||
ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
|
ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
|
||||||
cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1",
|
cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "2.6.0",
|
||||||
[ng1], status='Active', id='12321')
|
[ng1], status='Active', id='12321')
|
||||||
|
|
||||||
self._assert_check_scaling(
|
self._assert_check_scaling(
|
||||||
|
@ -38,7 +38,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'node_groups': [
|
'node_groups': [
|
||||||
{'name': 'a'}
|
{'name': 'a'}
|
||||||
]
|
]
|
||||||
@ -51,7 +51,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'node_groups': [
|
'node_groups': [
|
||||||
{'name': 'a',
|
{'name': 'a',
|
||||||
'flavor_id': '42'}
|
'flavor_id': '42'}
|
||||||
@ -66,7 +66,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'node_groups': [
|
'node_groups': [
|
||||||
{'name': 'a',
|
{'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
@ -83,7 +83,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'node_groups': [
|
'node_groups': [
|
||||||
{
|
{
|
||||||
'name': 'a',
|
'name': 'a',
|
||||||
@ -108,7 +108,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'node_groups': [
|
'node_groups': [
|
||||||
{
|
{
|
||||||
"node_group_template_id": "",
|
"node_group_template_id": "",
|
||||||
@ -125,7 +125,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'node_groups': [
|
'node_groups': [
|
||||||
{
|
{
|
||||||
"node_group_template_id": "test",
|
"node_group_template_id": "test",
|
||||||
@ -147,7 +147,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
'node_groups': [
|
'node_groups': [
|
||||||
{
|
{
|
||||||
"node_group_template_id": "550e8400-e29b-41d4-a716-"
|
"node_group_template_id": "550e8400-e29b-41d4-a716-"
|
||||||
@ -164,7 +164,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data = {
|
data = {
|
||||||
'name': "testname",
|
'name': "testname",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1"
|
'hadoop_version': "2.6.0"
|
||||||
}
|
}
|
||||||
self._assert_valid_name_hostname_validation(data)
|
self._assert_valid_name_hostname_validation(data)
|
||||||
|
|
||||||
@ -172,7 +172,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data = {
|
data = {
|
||||||
'name': "testname",
|
'name': "testname",
|
||||||
'plugin_name': "vanilla",
|
'plugin_name': "vanilla",
|
||||||
'hadoop_version': "1.2.1"
|
'hadoop_version': "2.6.0"
|
||||||
}
|
}
|
||||||
self._assert_types(data)
|
self._assert_types(data)
|
||||||
|
|
||||||
@ -208,7 +208,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': 'testname',
|
'name': 'testname',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'default_image_id': str(uuid.uuid4()),
|
'default_image_id': str(uuid.uuid4()),
|
||||||
'cluster_configs': {
|
'cluster_configs': {
|
||||||
"service_1": {
|
"service_1": {
|
||||||
@ -237,7 +237,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': 'testname',
|
'name': 'testname',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'default_image_id': None,
|
'default_image_id': None,
|
||||||
'cluster_configs': None,
|
'cluster_configs': None,
|
||||||
'node_groups': None,
|
'node_groups': None,
|
||||||
@ -251,7 +251,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data={
|
data={
|
||||||
'name': "test-name",
|
'name': "test-name",
|
||||||
'plugin_name': "wrong_plugin",
|
'plugin_name': "wrong_plugin",
|
||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "2.6.0",
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'INVALID_REFERENCE',
|
||||||
"Sahara doesn't contain plugin "
|
"Sahara doesn't contain plugin "
|
||||||
@ -262,7 +262,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data = {
|
data = {
|
||||||
'name': 'test',
|
'name': 'test',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1'
|
'hadoop_version': '2.6.0'
|
||||||
}
|
}
|
||||||
self._assert_create_object_validation(
|
self._assert_create_object_validation(
|
||||||
data=data,
|
data=data,
|
||||||
@ -274,7 +274,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
data = {
|
data = {
|
||||||
'name': 'test-template',
|
'name': 'test-template',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'neutron_management_network': '53a36917-ab9f-4589'
|
'neutron_management_network': '53a36917-ab9f-4589'
|
||||||
'-94ce-b6df85a68332'
|
'-94ce-b6df85a68332'
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ from sahara.tests.unit.service.validation import utils as u
|
|||||||
SAMPLE_DATA = {
|
SAMPLE_DATA = {
|
||||||
'name': 'testname',
|
'name': 'testname',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.7.1',
|
||||||
'is_public': False,
|
'is_public': False,
|
||||||
'is_protected': False
|
'is_protected': False
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1'
|
'hadoop_version': '2.6.0'
|
||||||
},
|
},
|
||||||
bad_req_i=(1, "VALIDATION_ERROR",
|
bad_req_i=(1, "VALIDATION_ERROR",
|
||||||
u"'node_processes' is a required property")
|
u"'node_processes' is a required property")
|
||||||
@ -66,7 +66,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': "a",
|
'name': "a",
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': []
|
'node_processes': []
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'VALIDATION_ERROR',
|
bad_req_i=(1, 'VALIDATION_ERROR',
|
||||||
@ -78,7 +78,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['namenode']
|
'node_processes': ['namenode']
|
||||||
}
|
}
|
||||||
self._assert_valid_name_hostname_validation(data)
|
self._assert_valid_name_hostname_validation(data)
|
||||||
@ -89,7 +89,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': "a",
|
'name': "a",
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ["namenode", "namenode"]
|
'node_processes': ["namenode", "namenode"]
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_DATA',
|
bad_req_i=(1, 'INVALID_DATA',
|
||||||
@ -100,7 +100,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['wrong_process']
|
'node_processes': ['wrong_process']
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'INVALID_REFERENCE',
|
||||||
@ -121,16 +121,16 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['namenode',
|
'node_processes': ['namenode',
|
||||||
'datanode',
|
'datanode',
|
||||||
'secondarynamenode',
|
'secondarynamenode',
|
||||||
'tasktracker',
|
'nodemanager',
|
||||||
'jobtracker'],
|
'resourcemanager'],
|
||||||
'image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'node_configs': {
|
'node_configs': {
|
||||||
'HDFS': {
|
'HDFS': {
|
||||||
u'hadoop.tmp.dir': '/temp/'
|
u'hadoop.hdfs.configuration.version': '2'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
'volumes_per_node': 2,
|
'volumes_per_node': 2,
|
||||||
@ -156,12 +156,12 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['namenode',
|
'node_processes': ['namenode',
|
||||||
'datanode',
|
'datanode',
|
||||||
'secondarynamenode',
|
'secondarynamenode',
|
||||||
'tasktracker',
|
'nodemanager',
|
||||||
'jobtracker'],
|
'resourcemanager'],
|
||||||
|
|
||||||
'image_id': None,
|
'image_id': None,
|
||||||
'node_configs': None,
|
'node_configs': None,
|
||||||
@ -187,7 +187,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['wrong_process'],
|
'node_processes': ['wrong_process'],
|
||||||
'volumes_per_node': -1
|
'volumes_per_node': -1
|
||||||
},
|
},
|
||||||
@ -199,7 +199,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['wrong_process'],
|
'node_processes': ['wrong_process'],
|
||||||
'volumes_size': 0
|
'volumes_size': 0
|
||||||
},
|
},
|
||||||
@ -211,7 +211,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
default_data = {
|
default_data = {
|
||||||
'name': 'a', 'flavor_id': '42',
|
'name': 'a', 'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['namenode']
|
'node_processes': ['namenode']
|
||||||
}
|
}
|
||||||
self._assert_types(default_data)
|
self._assert_types(default_data)
|
||||||
@ -221,7 +221,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'test',
|
'name': 'test',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['namenode']}
|
'node_processes': ['namenode']}
|
||||||
self._assert_create_object_validation(
|
self._assert_create_object_validation(
|
||||||
data=data,
|
data=data,
|
||||||
@ -235,7 +235,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'test-ng',
|
'name': 'test-ng',
|
||||||
'flavor_id': '1',
|
'flavor_id': '1',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['namenode']
|
'node_processes': ['namenode']
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'NOT_FOUND',
|
bad_req_i=(1, 'NOT_FOUND',
|
||||||
@ -248,11 +248,11 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'test-ng',
|
'name': 'test-ng',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['namenode'],
|
'node_processes': ['namenode'],
|
||||||
'node_configs': {
|
'node_configs': {
|
||||||
'wrong_target': {
|
'wrong_target': {
|
||||||
u'hadoop.tmp.dir': '/temp/'
|
u'mapreduce.task.tmp.dir': '/temp/'
|
||||||
}
|
}
|
||||||
}},
|
}},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'INVALID_REFERENCE',
|
||||||
@ -264,7 +264,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'test-ng',
|
'name': 'test-ng',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['namenode'],
|
'node_processes': ['namenode'],
|
||||||
'node_configs': {
|
'node_configs': {
|
||||||
'HDFS': {
|
'HDFS': {
|
||||||
@ -283,7 +283,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['wrong_process'],
|
'node_processes': ['wrong_process'],
|
||||||
'volumes_per_node': -1
|
'volumes_per_node': -1
|
||||||
},
|
},
|
||||||
@ -295,7 +295,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['wrong_process'],
|
'node_processes': ['wrong_process'],
|
||||||
'volumes_size': 0
|
'volumes_size': 0
|
||||||
},
|
},
|
||||||
@ -307,8 +307,8 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['datanode', 'tasktracker'],
|
'node_processes': ['datanode', 'nodemanager'],
|
||||||
'volumes_per_node': 1,
|
'volumes_per_node': 1,
|
||||||
'volumes_size': 1,
|
'volumes_size': 1,
|
||||||
'volume_mount_prefix': '/mnt/volume'
|
'volume_mount_prefix': '/mnt/volume'
|
||||||
@ -318,8 +318,8 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['datanode', 'tasktracker'],
|
'node_processes': ['datanode', 'nodemanager'],
|
||||||
'volumes_per_node': 1,
|
'volumes_per_node': 1,
|
||||||
'volumes_size': 1,
|
'volumes_size': 1,
|
||||||
'volume_mount_prefix': 'qwerty'
|
'volume_mount_prefix': 'qwerty'
|
||||||
@ -335,8 +335,8 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['datanode', 'tasktracker'],
|
'node_processes': ['datanode', 'nodemanager'],
|
||||||
'floating_ip_pool': 'network_bad'
|
'floating_ip_pool': 'network_bad'
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'NOT_FOUND', "Floating IP pool network_bad "
|
bad_req_i=(1, 'NOT_FOUND', "Floating IP pool network_bad "
|
||||||
|
@ -28,15 +28,15 @@ SAMPLE_DATA = {
|
|||||||
'name': 'a',
|
'name': 'a',
|
||||||
'flavor_id': '42',
|
'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'node_processes': ['namenode',
|
'node_processes': ['namenode',
|
||||||
'datanode',
|
'datanode',
|
||||||
'secondarynamenode',
|
'secondarynamenode',
|
||||||
'tasktracker',
|
'nodemanager',
|
||||||
'jobtracker'],
|
'resourcemanager'],
|
||||||
'node_configs': {
|
'node_configs': {
|
||||||
'HDFS': {
|
'HDFS': {
|
||||||
u'hadoop.tmp.dir': '/temp/'
|
u'mapreduce.task.tmp.dir': '/temp/'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
'image_id': '550e8400-e29b-41d4-a716-446655440000',
|
'image_id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
|
@ -186,7 +186,7 @@ def start_patch(patch_templates=True):
|
|||||||
@property
|
@property
|
||||||
def tags(self):
|
def tags(self):
|
||||||
if self.name == 'test':
|
if self.name == 'test':
|
||||||
return ['vanilla', '1.2.1']
|
return ['vanilla', '2.6.0']
|
||||||
else:
|
else:
|
||||||
return ['vanilla', 'wrong_tag']
|
return ['vanilla', 'wrong_tag']
|
||||||
|
|
||||||
@ -200,7 +200,7 @@ def start_patch(patch_templates=True):
|
|||||||
nova().images.list_registered.return_value = [Image(),
|
nova().images.list_registered.return_value = [Image(),
|
||||||
Image(name='wrong_name')]
|
Image(name='wrong_name')]
|
||||||
ng_dict = tu.make_ng_dict('ng', '42', ['namenode'], 1)
|
ng_dict = tu.make_ng_dict('ng', '42', ['namenode'], 1)
|
||||||
cluster = tu.create_cluster('test', 't', 'vanilla', '1.2.1', [ng_dict],
|
cluster = tu.create_cluster('test', 't', 'vanilla', '2.6.0', [ng_dict],
|
||||||
id=1, status='Active')
|
id=1, status='Active')
|
||||||
# stub clusters list
|
# stub clusters list
|
||||||
get_clusters.return_value = [cluster]
|
get_clusters.return_value = [cluster]
|
||||||
@ -209,14 +209,14 @@ def start_patch(patch_templates=True):
|
|||||||
# stub node templates
|
# stub node templates
|
||||||
if patch_templates:
|
if patch_templates:
|
||||||
ngt_dict = {'name': 'test', 'tenant_id': 't', 'flavor_id': '42',
|
ngt_dict = {'name': 'test', 'tenant_id': 't', 'flavor_id': '42',
|
||||||
'plugin_name': 'vanilla', 'hadoop_version': '1.2.1',
|
'plugin_name': 'vanilla', 'hadoop_version': '2.6.0',
|
||||||
'id': '550e8400-e29b-41d4-a716-446655440000',
|
'id': '550e8400-e29b-41d4-a716-446655440000',
|
||||||
'node_processes': ['namenode']}
|
'node_processes': ['namenode']}
|
||||||
|
|
||||||
get_ng_templates.return_value = [r.NodeGroupTemplateResource(ngt_dict)]
|
get_ng_templates.return_value = [r.NodeGroupTemplateResource(ngt_dict)]
|
||||||
|
|
||||||
ct_dict = {'name': 'test', 'tenant_id': 't',
|
ct_dict = {'name': 'test', 'tenant_id': 't',
|
||||||
'plugin_name': 'vanilla', 'hadoop_version': '1.2.1'}
|
'plugin_name': 'vanilla', 'hadoop_version': '2.6.0'}
|
||||||
|
|
||||||
get_cl_templates.return_value = [r.ClusterTemplateResource(ct_dict)]
|
get_cl_templates.return_value = [r.ClusterTemplateResource(ct_dict)]
|
||||||
|
|
||||||
@ -370,10 +370,10 @@ class ValidationTestCase(base.SaharaTestCase):
|
|||||||
data = {
|
data = {
|
||||||
'name': 'test-cluster',
|
'name': 'test-cluster',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'cluster_configs': {
|
'cluster_configs': {
|
||||||
'HDFS': {
|
'HDFS': {
|
||||||
u'hadoop.tmp.dir': '/temp/'
|
u'mapreduce.task.tmp.dir': '/temp/'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
||||||
@ -390,7 +390,7 @@ class ValidationTestCase(base.SaharaTestCase):
|
|||||||
data=_update_data(data.copy(), {
|
data=_update_data(data.copy(), {
|
||||||
'cluster_configs': {
|
'cluster_configs': {
|
||||||
'wrong_target': {
|
'wrong_target': {
|
||||||
u'hadoop.tmp.dir': '/temp/'
|
u'mapreduce.task.tmp.dir': '/temp/'
|
||||||
}
|
}
|
||||||
}}),
|
}}),
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'INVALID_REFERENCE',
|
||||||
@ -414,14 +414,14 @@ class ValidationTestCase(base.SaharaTestCase):
|
|||||||
data = {
|
data = {
|
||||||
'name': 'test-cluster',
|
'name': 'test-cluster',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
||||||
}
|
}
|
||||||
self._assert_create_object_validation(data=data)
|
self._assert_create_object_validation(data=data)
|
||||||
data = {
|
data = {
|
||||||
'name': 'test-cluster',
|
'name': 'test-cluster',
|
||||||
'plugin_name': 'vanilla',
|
'plugin_name': 'vanilla',
|
||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '2.6.0',
|
||||||
'default_image_id': '813fe450-40d2-4acc-ade5-ea753a1bd5bc'
|
'default_image_id': '813fe450-40d2-4acc-ade5-ea753a1bd5bc'
|
||||||
}
|
}
|
||||||
self._assert_create_object_validation(
|
self._assert_create_object_validation(
|
||||||
@ -430,7 +430,7 @@ class ValidationTestCase(base.SaharaTestCase):
|
|||||||
"Requested image "
|
"Requested image "
|
||||||
"'813fe450-40d2-4acc-ade5-ea753a1bd5bc' "
|
"'813fe450-40d2-4acc-ade5-ea753a1bd5bc' "
|
||||||
"doesn't contain required tags: "
|
"doesn't contain required tags: "
|
||||||
"['1.2.1']"))
|
"['2.6.0']"))
|
||||||
|
|
||||||
def assert_protected_resource_exception(self, ex):
|
def assert_protected_resource_exception(self, ex):
|
||||||
self.assertIn("marked as protected", six.text_type(ex))
|
self.assertIn("marked as protected", six.text_type(ex))
|
||||||
|
@ -169,9 +169,9 @@ class ApiValidatorTest(testtools.TestCase):
|
|||||||
self._validate_failure(schema, "_123")
|
self._validate_failure(schema, "_123")
|
||||||
self._validate_success(schema, "a" * 64)
|
self._validate_success(schema, "a" * 64)
|
||||||
self._validate_failure(schema, "")
|
self._validate_failure(schema, "")
|
||||||
self._validate_success(schema, "hadoop-examples-1.2.1.jar")
|
self._validate_success(schema, "hadoop-examples-2.6.0.jar")
|
||||||
self._validate_success(schema, "hadoop-examples-1.2.1")
|
self._validate_success(schema, "hadoop-examples-2.6.0")
|
||||||
self._validate_success(schema, "hadoop-examples-1.2.1.")
|
self._validate_success(schema, "hadoop-examples-2.6.0.")
|
||||||
self._validate_success(schema, "1")
|
self._validate_success(schema, "1")
|
||||||
self._validate_success(schema, "1a")
|
self._validate_success(schema, "1a")
|
||||||
self._validate_success(schema, "a1")
|
self._validate_success(schema, "a1")
|
||||||
@ -202,9 +202,9 @@ class ApiValidatorTest(testtools.TestCase):
|
|||||||
self._validate_success(schema, "_123")
|
self._validate_success(schema, "_123")
|
||||||
self._validate_success(schema, "a" * 64)
|
self._validate_success(schema, "a" * 64)
|
||||||
self._validate_failure(schema, "")
|
self._validate_failure(schema, "")
|
||||||
self._validate_failure(schema, "hadoop-examples-1.2.1.jar")
|
self._validate_failure(schema, "hadoop-examples-2.6.0.jar")
|
||||||
self._validate_failure(schema, "hadoop-examples-1.2.1")
|
self._validate_failure(schema, "hadoop-examples-2.6.0")
|
||||||
self._validate_failure(schema, "hadoop-examples-1.2.1.")
|
self._validate_failure(schema, "hadoop-examples-2.6.0.")
|
||||||
self._validate_success(schema, "1")
|
self._validate_success(schema, "1")
|
||||||
self._validate_success(schema, "1a")
|
self._validate_success(schema, "1a")
|
||||||
self._validate_success(schema, "a1")
|
self._validate_success(schema, "a1")
|
||||||
@ -238,9 +238,9 @@ class ApiValidatorTest(testtools.TestCase):
|
|||||||
self._validate_failure(schema, "_123")
|
self._validate_failure(schema, "_123")
|
||||||
self._validate_success(schema, "a" * 64)
|
self._validate_success(schema, "a" * 64)
|
||||||
self._validate_failure(schema, "")
|
self._validate_failure(schema, "")
|
||||||
self._validate_failure(schema, "hadoop-examples-1.2.1.jar")
|
self._validate_failure(schema, "hadoop-examples-2.6.0.jar")
|
||||||
self._validate_failure(schema, "hadoop-examples-1.2.1")
|
self._validate_failure(schema, "hadoop-examples-2.6.0")
|
||||||
self._validate_failure(schema, "hadoop-examples-1.2.1.")
|
self._validate_failure(schema, "hadoop-examples-2.6.0.")
|
||||||
self._validate_failure(schema, "1")
|
self._validate_failure(schema, "1")
|
||||||
self._validate_failure(schema, "1a")
|
self._validate_failure(schema, "1a")
|
||||||
self._validate_success(schema, "a1")
|
self._validate_success(schema, "a1")
|
||||||
|
Loading…
Reference in New Issue
Block a user