MapR plugin implementation
Change-Id: Id08a3c7a8a40fd826cd902cedbc2590ca284e548 Implements: blueprint mapr-plugin
This commit is contained in:
parent
e3c7235baa
commit
26e3244460
0
sahara/plugins/mapr/__init__.py
Normal file
0
sahara/plugins/mapr/__init__.py
Normal file
88
sahara/plugins/mapr/plugin.py
Normal file
88
sahara/plugins/mapr/plugin.py
Normal file
@ -0,0 +1,88 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import sahara.plugins.mapr.versions.version_handler_factory as vhf
|
||||
import sahara.plugins.provisioning as p
|
||||
|
||||
|
||||
class MapRPlugin(p.ProvisioningPluginBase):
|
||||
title = 'MapR Hadoop Distribution'
|
||||
description = ('The MapR Distribution provides a full Hadoop stack that'
|
||||
' includes the MapR File System (MapR-FS), MapReduce,'
|
||||
' a complete Hadoop ecosystem, and the MapR Control System'
|
||||
' user interface')
|
||||
hdfs_user = 'mapr'
|
||||
|
||||
def _get_handler(self, hadoop_version):
|
||||
return vhf.VersionHandlerFactory.get().get_handler(hadoop_version)
|
||||
|
||||
def get_title(self):
|
||||
return MapRPlugin.title
|
||||
|
||||
def get_description(self):
|
||||
return MapRPlugin.description
|
||||
|
||||
def get_hdfs_user(self):
|
||||
return MapRPlugin.hdfs_user
|
||||
|
||||
def get_versions(self):
|
||||
return vhf.VersionHandlerFactory.get().get_versions()
|
||||
|
||||
def get_node_processes(self, hadoop_version):
|
||||
return self._get_handler(hadoop_version).get_node_processes()
|
||||
|
||||
def get_configs(self, hadoop_version):
|
||||
return self._get_handler(hadoop_version).get_configs()
|
||||
|
||||
def configure_cluster(self, cluster):
|
||||
self._get_handler(cluster.hadoop_version).configure_cluster(cluster)
|
||||
|
||||
def start_cluster(self, cluster):
|
||||
self._get_handler(cluster.hadoop_version).start_cluster(cluster)
|
||||
|
||||
def validate(self, cluster):
|
||||
self._get_handler(cluster.hadoop_version).validate(cluster)
|
||||
|
||||
def validate_scaling(self, cluster, existing, additional):
|
||||
v_handler = self._get_handler(cluster.hadoop_version)
|
||||
v_handler.validate_scaling(cluster, existing, additional)
|
||||
|
||||
def scale_cluster(self, cluster, instances):
|
||||
v_handler = self._get_handler(cluster.hadoop_version)
|
||||
v_handler.scale_cluster(cluster, instances)
|
||||
|
||||
def decommission_nodes(self, cluster, instances):
|
||||
v_handler = self._get_handler(cluster.hadoop_version)
|
||||
v_handler.decommission_nodes(cluster, instances)
|
||||
|
||||
def get_oozie_server(self, cluster):
|
||||
v_handler = self._get_handler(cluster.hadoop_version)
|
||||
return v_handler.get_oozie_server(cluster)
|
||||
|
||||
def get_name_node_uri(self, cluster):
|
||||
v_handler = self._get_handler(cluster.hadoop_version)
|
||||
return v_handler.get_name_node_uri(cluster)
|
||||
|
||||
def get_oozie_server_uri(self, cluster):
|
||||
v_handler = self._get_handler(cluster.hadoop_version)
|
||||
return v_handler.get_oozie_server_uri(cluster)
|
||||
|
||||
def get_resource_manager_uri(self, cluster):
|
||||
v_handler = self._get_handler(cluster.hadoop_version)
|
||||
return v_handler.get_resource_manager_uri(cluster)
|
||||
|
||||
def get_edp_engine(self, cluster, job_type):
|
||||
v_handler = self._get_handler(cluster.hadoop_version)
|
||||
return v_handler.get_edp_engine(cluster, job_type)
|
0
sahara/plugins/mapr/util/__init__.py
Normal file
0
sahara/plugins/mapr/util/__init__.py
Normal file
20
sahara/plugins/mapr/util/attr_dict.py
Normal file
20
sahara/plugins/mapr/util/attr_dict.py
Normal file
@ -0,0 +1,20 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class AttrDict(dict):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(AttrDict, self).__init__(*args, **kwargs)
|
||||
self.__dict__ = self
|
78
sahara/plugins/mapr/util/cluster_helper.py
Normal file
78
sahara/plugins/mapr/util/cluster_helper.py
Normal file
@ -0,0 +1,78 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sahara.plugins.mapr.util import names
|
||||
import sahara.plugins.utils as u
|
||||
|
||||
|
||||
ZOOKEEPER_CLIENT_PORT = 5181
|
||||
|
||||
|
||||
def get_cldb_nodes_ip(cluster):
|
||||
cldb_node_list = u.get_instances(cluster, names.CLDB)
|
||||
return ','.join([i.management_ip for i in cldb_node_list])
|
||||
|
||||
|
||||
def get_zookeeper_nodes_ip(cluster):
|
||||
zkeeper_node_list = u.get_instances(cluster, names.ZOOKEEPER)
|
||||
return ','.join([i.management_ip for i in zkeeper_node_list])
|
||||
|
||||
|
||||
def get_zookeeper_nodes_ip_with_port(cluster):
|
||||
zkeeper_node_list = u.get_instances(cluster, names.ZOOKEEPER)
|
||||
return ','.join(['%s:%s' % (i.management_ip, ZOOKEEPER_CLIENT_PORT)
|
||||
for i in zkeeper_node_list])
|
||||
|
||||
|
||||
def get_resourcemanager_ip(cluster):
|
||||
rm_instance = u.get_instance(cluster, names.RESOURCE_MANAGER)
|
||||
return rm_instance.management_ip
|
||||
|
||||
|
||||
def get_historyserver_ip(cluster):
|
||||
hs_instance = u.get_instance(cluster, names.HISTORY_SERVER)
|
||||
return hs_instance.management_ip
|
||||
|
||||
|
||||
def get_jobtracker(cluster):
|
||||
instance = u.get_instance(cluster, names.JOBTRACKER)
|
||||
return instance
|
||||
|
||||
|
||||
def get_resourcemanager(cluster):
|
||||
return u.get_instance(cluster, names.RESOURCE_MANAGER)
|
||||
|
||||
|
||||
def get_nodemanagers(cluster):
|
||||
return u.get_instances(cluster, names.NODE_MANAGER)
|
||||
|
||||
|
||||
def get_oozie(cluster):
|
||||
return u.get_instance(cluster, names.OOZIE)
|
||||
|
||||
|
||||
def get_datanodes(cluster):
|
||||
return u.get_instances(cluster, names.DATANODE)
|
||||
|
||||
|
||||
def get_tasktrackers(cluster):
|
||||
return u.get_instances(cluster, names.TASK_TRACKER)
|
||||
|
||||
|
||||
def get_secondarynamenodes(cluster):
|
||||
return u.get_instances(cluster, names.SECONDARY_NAMENODE)
|
||||
|
||||
|
||||
def get_historyserver(cluster):
|
||||
return u.get_instance(cluster, names.HISTORY_SERVER)
|
76
sahara/plugins/mapr/util/cluster_info.py
Normal file
76
sahara/plugins/mapr/util/cluster_info.py
Normal file
@ -0,0 +1,76 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections as c
|
||||
|
||||
import six
|
||||
|
||||
import sahara.plugins.utils as u
|
||||
|
||||
|
||||
class ClusterInfo(object):
|
||||
|
||||
# TODO(aosadchiy): perform lookup for plugin_spec
|
||||
|
||||
def __init__(self, cluster, plugin_spec):
|
||||
self.cluster = cluster
|
||||
self.plugin_spec = plugin_spec
|
||||
|
||||
def get_default_configs(self, node_group=None):
|
||||
services = self.get_services(node_group)
|
||||
return self.plugin_spec.get_default_plugin_configs(services)
|
||||
|
||||
def get_services(self, node_group=None):
|
||||
if not node_group:
|
||||
return set(service for node_group in self.cluster.node_groups
|
||||
for service in self.get_services(node_group))
|
||||
else:
|
||||
return (set(self.plugin_spec.get_node_process_service(node_process)
|
||||
for node_process in node_group.node_processes)
|
||||
| set(['general']))
|
||||
|
||||
def get_user_configs(self, node_group=None):
|
||||
services = self.get_services(node_group)
|
||||
predicate = lambda i: i[0] in services and i[1]
|
||||
configs = dict(filter(
|
||||
predicate, six.iteritems(self.cluster.cluster_configs)))
|
||||
scope = 'node' if node_group else 'cluster'
|
||||
result = c.defaultdict(lambda: c.defaultdict(dict))
|
||||
for service, kv in six.iteritems(configs):
|
||||
for key, value in six.iteritems(kv):
|
||||
filename = self.plugin_spec.get_config_file(
|
||||
scope, service, key)
|
||||
result[service][filename][key] = value
|
||||
return result
|
||||
|
||||
def get_node_group_files(self):
|
||||
return
|
||||
|
||||
def get_node_groups(self, node_process=None):
|
||||
return u.get_node_groups(self.cluster, node_process)
|
||||
|
||||
def get_instances_count(self, node_process=None):
|
||||
return u.get_instances_count(self.cluster, node_process)
|
||||
|
||||
def get_instances(self, node_process=None):
|
||||
return u.get_instances(self.cluster, node_process)
|
||||
|
||||
def get_instance(self, node_process):
|
||||
return u.get_instance(self.cluster, node_process)
|
||||
|
||||
def get_instances_ip(self, node_process):
|
||||
return [i.management_ip for i in self.get_instances(node_process)]
|
||||
|
||||
def get_instance_ip(self, node_process):
|
||||
return self.get_instance(node_process).management_ip
|
110
sahara/plugins/mapr/util/config.py
Normal file
110
sahara/plugins/mapr/util/config.py
Normal file
@ -0,0 +1,110 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from sahara import exceptions as ex
|
||||
from sahara.i18n import _
|
||||
from sahara.i18n import _LI
|
||||
from sahara.openstack.common import log as logging
|
||||
import sahara.plugins.mapr.util.config_file_utils as cfu
|
||||
import sahara.plugins.mapr.versions.version_handler_factory as vhf
|
||||
from sahara.plugins import provisioning as p
|
||||
import sahara.plugins.utils as u
|
||||
from sahara.topology import topology_helper as th
|
||||
from sahara.utils import files as f
|
||||
|
||||
|
||||
MAPR_HOME = '/opt/mapr'
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt("enable_data_locality", "sahara.topology.topology_helper")
|
||||
ENABLE_DATA_LOCALITY = p.Config('Enable Data Locality', 'general', 'cluster',
|
||||
config_type="bool", priority=1,
|
||||
default_value=True, is_optional=True)
|
||||
|
||||
|
||||
def post_configure_instance(instance):
|
||||
LOG.info(_LI('START: Post configuration for instance.'))
|
||||
with instance.remote() as r:
|
||||
if is_data_locality_enabled(instance.node_group.cluster):
|
||||
LOG.debug('Data locality is enabled.')
|
||||
t_script = MAPR_HOME + '/topology.sh'
|
||||
LOG.debug('Start writing file %s', t_script)
|
||||
r.write_file_to(t_script, f.get_file_text(
|
||||
'plugins/mapr/util/resources/topology.sh'), run_as_root=True)
|
||||
LOG.debug('Done for writing file %s', t_script)
|
||||
LOG.debug('Start executing command: chmod +x %s', t_script)
|
||||
r.execute_command('chmod +x ' + t_script, run_as_root=True)
|
||||
LOG.debug('Done for executing command.')
|
||||
else:
|
||||
LOG.debug('Data locality is disabled.')
|
||||
LOG.info(_LI('END: Post configuration for instance.'))
|
||||
|
||||
|
||||
def configure_instances(cluster, instances):
|
||||
h_version = cluster.hadoop_version
|
||||
v_handler = vhf.VersionHandlerFactory.get().get_handler(h_version)
|
||||
p_spec = v_handler.get_plugin_spec()
|
||||
configurer = v_handler.get_cluster_configurer(cluster, p_spec)
|
||||
configurer.configure(instances)
|
||||
|
||||
|
||||
def configure_topology_data(cluster, is_node_awareness):
|
||||
LOG.info(_LI('START: configuring topology data.'))
|
||||
if is_data_locality_enabled(cluster):
|
||||
LOG.debug('Data locality is enabled.')
|
||||
LOG.debug('Start generating topology map.')
|
||||
topology_map = th.generate_topology_map(cluster, is_node_awareness)
|
||||
LOG.debug('Done for generating topology map.')
|
||||
topology_data = cfu.to_file_content(topology_map, 'topology')
|
||||
for i in u.get_instances(cluster):
|
||||
LOG.debug(
|
||||
'Start writing to file: %s/topology.data', MAPR_HOME)
|
||||
i.remote().write_file_to(MAPR_HOME + "/topology.data",
|
||||
topology_data, run_as_root=True)
|
||||
LOG.debug('Done writing to file: %s/topology.data', MAPR_HOME)
|
||||
else:
|
||||
LOG.debug('Data locality is disabled.')
|
||||
LOG.info(_LI('END: configuring topology data.'))
|
||||
|
||||
|
||||
def get_plugin_configs():
|
||||
configs = []
|
||||
if CONF.enable_data_locality:
|
||||
configs.append(ENABLE_DATA_LOCALITY)
|
||||
return configs
|
||||
|
||||
|
||||
def get_plugin_config_value(service, name, cluster):
|
||||
if cluster:
|
||||
for ng in cluster.node_groups:
|
||||
cl_param = ng.configuration().get(service, {}).get(name)
|
||||
if cl_param is not None:
|
||||
return cl_param
|
||||
|
||||
for c in get_plugin_configs():
|
||||
if c.applicable_target == service and c.name == name:
|
||||
return c.default_value
|
||||
|
||||
raise ex.NotFoundException(
|
||||
name, (_("Unable to get parameter '%(name)s' from service %(service)s")
|
||||
% {'name': name, 'service': service}))
|
||||
|
||||
|
||||
def is_data_locality_enabled(cluster):
|
||||
if not CONF.enable_data_locality:
|
||||
return False
|
||||
return get_plugin_config_value(ENABLE_DATA_LOCALITY.applicable_target,
|
||||
ENABLE_DATA_LOCALITY.name, cluster)
|
81
sahara/plugins/mapr/util/config_file_utils.py
Normal file
81
sahara/plugins/mapr/util/config_file_utils.py
Normal file
@ -0,0 +1,81 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
import sahara.plugins.mapr.util.func_utils as fu
|
||||
import sahara.utils.files as f
|
||||
import sahara.utils.xmlutils as x
|
||||
|
||||
|
||||
def load_properties_file(path):
|
||||
predicate = fu.and_predicate(lambda i: len(i) != 0,
|
||||
lambda i: not i.isspace(),
|
||||
lambda i: not i.startswith('#'))
|
||||
mapper = fu.chain_function(lambda i: tuple(i.split('=')),
|
||||
lambda i: (i[0].strip(), i[1].strip()))
|
||||
lines = f.get_file_text(path).splitlines()
|
||||
return dict(map(mapper, filter(predicate, lines)))
|
||||
|
||||
|
||||
def load_xml_file(path):
|
||||
kv_mapper = lambda i: (x._get_text_from_node(i, 'name'),
|
||||
x._adjust_field(x._get_text_from_node(i, 'value')))
|
||||
strip_mapper = lambda i: (i[0].strip(), i[1].strip())
|
||||
props = x.load_xml_document(path).getElementsByTagName('property')
|
||||
return dict(map(strip_mapper, map(kv_mapper, props)))
|
||||
|
||||
|
||||
def load_raw_file(path):
|
||||
return {'content': f.get_file_text(path)}
|
||||
|
||||
|
||||
def to_properties_file_content(data):
|
||||
mapper = lambda i: '%s=%s\n' % i
|
||||
reducer = lambda p, c: p + c
|
||||
return reduce(reducer, map(mapper, six.iteritems(data)), '')
|
||||
|
||||
|
||||
def to_xml_file_content(data):
|
||||
return x.create_hadoop_xml(data)
|
||||
|
||||
|
||||
def to_topology_file_content(data):
|
||||
mapper = lambda i: '%s %s\n' % i
|
||||
reducer = lambda p, c: p + c
|
||||
return reduce(reducer, map(mapper, six.iteritems(data)))
|
||||
|
||||
|
||||
def to_raw_file_content(data, cfu=True, conv=str):
|
||||
return data['content'] if cfu else conv(data)
|
||||
|
||||
|
||||
def load_file(path, file_type):
|
||||
if file_type == 'properties':
|
||||
return load_properties_file(path)
|
||||
elif file_type == 'xml':
|
||||
return load_xml_file(path)
|
||||
elif file_type == 'raw':
|
||||
return load_raw_file(path)
|
||||
|
||||
|
||||
def to_file_content(data, file_type, *args, **kargs):
|
||||
if file_type == 'properties':
|
||||
return to_properties_file_content(data, *args, **kargs)
|
||||
elif file_type == 'xml':
|
||||
return to_xml_file_content(data, *args, **kargs)
|
||||
elif file_type == 'topology':
|
||||
return to_topology_file_content(data, *args, **kargs)
|
||||
elif file_type == 'raw':
|
||||
return to_raw_file_content(data, *args, **kargs)
|
77
sahara/plugins/mapr/util/config_utils.py
Normal file
77
sahara/plugins/mapr/util/config_utils.py
Normal file
@ -0,0 +1,77 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sahara.plugins.mapr.util.dict_utils as du
|
||||
import sahara.plugins.mapr.util.func_utils as fu
|
||||
import sahara.plugins.mapr.versions.version_handler_factory as vhf
|
||||
import sahara.utils.configs as c
|
||||
|
||||
|
||||
def get_scope_default_configs(version_handler, scope, services=None):
|
||||
configs = map(lambda i: i.to_dict(), version_handler.get_configs())
|
||||
q_predicate = fu.field_equals_predicate('scope', scope)
|
||||
if services:
|
||||
at_predicate = fu.in_predicate('applicable_target', services)
|
||||
q_predicate = fu.and_predicate(q_predicate, at_predicate)
|
||||
q_fields = ['applicable_target', 'name', 'default_value']
|
||||
q_result = du.select(q_fields, configs, q_predicate)
|
||||
m_reducer = du.iterable_to_values_pair_dict_reducer(
|
||||
'name', 'default_value')
|
||||
return du.map_by_field_value(q_result, 'applicable_target',
|
||||
dict, m_reducer)
|
||||
|
||||
|
||||
def get_cluster_default_configs(version_handler, services=None):
|
||||
return get_scope_default_configs(version_handler, 'cluster', services)
|
||||
|
||||
|
||||
def get_node_default_configs(version_handler, services=None):
|
||||
return get_scope_default_configs(version_handler, 'node', services)
|
||||
|
||||
|
||||
def get_default_configs(version_handler, services=None):
|
||||
cluster_configs = get_cluster_default_configs(version_handler, services)
|
||||
node_configs = get_node_default_configs(version_handler, services)
|
||||
return c.merge_configs(cluster_configs, node_configs)
|
||||
|
||||
|
||||
def get_node_group_services(node_group):
|
||||
h_version = node_group.cluster.hadoop_version
|
||||
v_handler = vhf.VersionHandlerFactory.get().get_handler(h_version)
|
||||
services = v_handler.get_node_processes()
|
||||
node_processes = node_group.node_processes
|
||||
return set(s for np in node_processes
|
||||
for s in services if np in services[s])
|
||||
|
||||
|
||||
def get_cluster_configs(cluster):
|
||||
h_version = cluster.hadoop_version
|
||||
v_handler = vhf.VersionHandlerFactory.get().get_handler(h_version)
|
||||
default_configs = get_cluster_default_configs(v_handler)
|
||||
user_configs = cluster.cluster_configs
|
||||
return c.merge_configs(default_configs, user_configs)
|
||||
|
||||
|
||||
def get_configs(node_group):
|
||||
services = get_node_group_services(node_group)
|
||||
h_version = node_group.cluster.hadoop_version
|
||||
v_handler = vhf.VersionHandlerFactory.get().get_handler(h_version)
|
||||
default_configs = get_default_configs(v_handler, services)
|
||||
user_configs = node_group.configuration()
|
||||
return c.merge_configs(default_configs, user_configs)
|
||||
|
||||
|
||||
def get_service(version_handler, node_process):
|
||||
node_processes = version_handler.get_node_processes()
|
||||
return du.get_keys_by_value_2(node_processes, node_process)
|
124
sahara/plugins/mapr/util/dict_utils.py
Normal file
124
sahara/plugins/mapr/util/dict_utils.py
Normal file
@ -0,0 +1,124 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections as cl
|
||||
import copy as cp
|
||||
import functools as ft
|
||||
import itertools as it
|
||||
|
||||
import six
|
||||
|
||||
import sahara.plugins.mapr.util.func_utils as fu
|
||||
|
||||
|
||||
def append_to_key(dict_0, dict_1):
|
||||
return dict((k0, dict((k1, dict_1[k1]) for k1 in keys_1 if k1 in dict_1))
|
||||
for k0, keys_1 in six.iteritems(dict_0))
|
||||
|
||||
|
||||
def iterable_to_values_pair_dict_reducer(key_0, key_1):
|
||||
def reducer(previous, iterable, mapper):
|
||||
previous.update(dict(map(mapper, iterable)))
|
||||
return previous
|
||||
return ft.partial(reducer, mapper=fu.get_values_pair_function(key_0,
|
||||
key_1))
|
||||
|
||||
|
||||
def flatten_to_list_reducer():
|
||||
def reducer(previous, iterable):
|
||||
previous.extend(list(iterable))
|
||||
return previous
|
||||
return reducer
|
||||
|
||||
|
||||
def map_by_field_value(iterable, key, factory=list,
|
||||
iterator_reducer=flatten_to_list_reducer()):
|
||||
def reducer(mapping, current):
|
||||
mapping[current[0]] = iterator_reducer(
|
||||
mapping[current[0]], iter(current[1]))
|
||||
return mapping
|
||||
groups = it.groupby(iterable, fu.get_value_function(key))
|
||||
return reduce(reducer, groups, cl.defaultdict(factory))
|
||||
|
||||
|
||||
def map_by_fields_values(iterable, fields, factory=list,
|
||||
reducer=flatten_to_list_reducer()):
|
||||
if len(fields) == 1:
|
||||
return map_by_field_value(iterable, fields[0], factory, reducer)
|
||||
else:
|
||||
return dict((k, map_by_fields_values(v, fields[1:], factory, reducer))
|
||||
for k, v in six.iteritems(map_by_field_value(
|
||||
iterable, fields[0])))
|
||||
|
||||
|
||||
def get_keys_by_value_type(mapping, value_type):
|
||||
return filter(lambda k: isinstance(mapping[k], value_type), mapping)
|
||||
|
||||
|
||||
def deep_update(dict_0, dict_1, copy=True):
|
||||
result = cp.deepcopy(dict_0) if copy else dict_0
|
||||
dict_valued_keys_0 = set(get_keys_by_value_type(dict_0, dict))
|
||||
dict_valued_keys_1 = set(get_keys_by_value_type(dict_1, dict))
|
||||
common_keys = dict_valued_keys_0 & dict_valued_keys_1
|
||||
if not common_keys:
|
||||
result.update(dict_1)
|
||||
else:
|
||||
for k1, v1 in six.iteritems(dict_1):
|
||||
result[k1] = deep_update(
|
||||
dict_0[k1], v1) if k1 in common_keys else v1
|
||||
return result
|
||||
|
||||
|
||||
def get_keys_by_value(mapping, value):
|
||||
return [k for k, v in six.iteritems(mapping) if v == value]
|
||||
|
||||
# TODO(aosadchiy): find more appropriate name
|
||||
|
||||
|
||||
def get_keys_by_value_2(mapping, value):
|
||||
return [k for k, v in six.iteritems(mapping) if value in v]
|
||||
|
||||
|
||||
def iterable_to_values_list_reducer(key):
|
||||
def reducer(previous, iterable, mapper):
|
||||
previous.extend(map(mapper, iterable))
|
||||
return previous
|
||||
return ft.partial(reducer, mapper=fu.get_value_function(key))
|
||||
|
||||
|
||||
def select(fields, iterable, predicate=fu.true_predicate):
|
||||
return map(fu.extract_fields_function(fields), filter(predicate, iterable))
|
||||
|
||||
has_no_dict_values_predicate = lambda n: not get_keys_by_value_type(n, dict)
|
||||
|
||||
|
||||
def list_of_vp_dicts_function(key_0, key_1):
|
||||
def transformer(item, key_0, key_1):
|
||||
return [fu.values_pair_to_dict_function(key_0, key_1)(i)
|
||||
for i in six.iteritems(item)]
|
||||
return ft.partial(transformer, key_0=key_0, key_1=key_1)
|
||||
|
||||
|
||||
def flattened_dict(mapping, keys, is_terminal=has_no_dict_values_predicate,
|
||||
transform=None):
|
||||
if not transform:
|
||||
transform = list_of_vp_dicts_function(*keys[-2:])
|
||||
if is_terminal(mapping):
|
||||
return list(transform(mapping))
|
||||
else:
|
||||
temp = [it.imap(fu.append_field_function(keys[0], key),
|
||||
flattened_dict(value, keys[1:],
|
||||
is_terminal, transform))
|
||||
for key, value in six.iteritems(mapping)]
|
||||
return list(it.chain(*temp))
|
167
sahara/plugins/mapr/util/func_utils.py
Normal file
167
sahara/plugins/mapr/util/func_utils.py
Normal file
@ -0,0 +1,167 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy as c
|
||||
import functools as ft
|
||||
import itertools as it
|
||||
|
||||
import six
|
||||
|
||||
|
||||
# predicates
|
||||
true_predicate = lambda i: True
|
||||
false_predicate = lambda i: False
|
||||
|
||||
|
||||
def not_predicate(predicate):
|
||||
return ft.partial(lambda i, p: not p(i), p=predicate)
|
||||
|
||||
|
||||
def and_predicate(*predicates):
|
||||
if len(predicates) == 1:
|
||||
return predicates[0]
|
||||
else:
|
||||
def predicate(item, predicates):
|
||||
for p in predicates:
|
||||
if not p(item):
|
||||
return False
|
||||
return True
|
||||
return ft.partial(predicate, predicates=predicates)
|
||||
|
||||
|
||||
def or_predicate(*predicates):
|
||||
if len(predicates) == 1:
|
||||
return predicates[0]
|
||||
else:
|
||||
def predicate(item, predicates):
|
||||
for p in predicates:
|
||||
if p(item):
|
||||
return True
|
||||
return False
|
||||
return ft.partial(predicate, predicates=predicates)
|
||||
|
||||
|
||||
def impl_predicate(p0, p1):
|
||||
return or_predicate(not_predicate(p0), p1)
|
||||
|
||||
|
||||
def field_equals_predicate(key, value):
|
||||
return ft.partial(lambda i, k, v: i[k] == v, k=key, v=value)
|
||||
|
||||
|
||||
def like_predicate(template, ignored=[]):
|
||||
if not template:
|
||||
return true_predicate
|
||||
elif len(template) == 1:
|
||||
k, v = six.iteritems(template).next()
|
||||
return true_predicate if k in ignored else field_equals_predicate(k, v)
|
||||
else:
|
||||
return and_predicate(*[field_equals_predicate(key, value)
|
||||
for key, value in six.iteritems(template)
|
||||
if key not in ignored])
|
||||
|
||||
|
||||
def in_predicate(key, values):
|
||||
if not values:
|
||||
return false_predicate
|
||||
else:
|
||||
return or_predicate(*[field_equals_predicate(key, value)
|
||||
for value in values])
|
||||
|
||||
# functions
|
||||
|
||||
|
||||
def chain_function(*functions):
|
||||
return reduce(lambda p, c: ft.partial(lambda i, p, c: c(p(i)), p=p, c=c),
|
||||
functions)
|
||||
|
||||
|
||||
def copy_function():
|
||||
return lambda i: c.deepcopy(i)
|
||||
|
||||
|
||||
def append_field_function(key, value):
|
||||
def mapper(item, key, value):
|
||||
item = c.deepcopy(item)
|
||||
item[key] = value
|
||||
return item
|
||||
return ft.partial(mapper, key=key, value=value)
|
||||
|
||||
|
||||
def append_fields_function(fields):
|
||||
if not fields:
|
||||
return copy_function()
|
||||
elif len(fields) == 1:
|
||||
key, value = six.iteritems(fields).next()
|
||||
return append_field_function(key, value)
|
||||
else:
|
||||
return chain_function(*[append_field_function(key, value)
|
||||
for key, value in six.iteritems(fields)])
|
||||
|
||||
|
||||
def get_values_pair_function(key_0, key_1):
|
||||
return ft.partial(lambda i, k0, k1: (i[k0], i[k1]), k0=key_0, k1=key_1)
|
||||
|
||||
|
||||
def get_field_function(key):
|
||||
return ft.partial(lambda i, k: (k, i[k]), k=key)
|
||||
|
||||
|
||||
def get_fields_function(keys):
|
||||
return ft.partial(lambda i, k: [f(i) for f in [get_field_function(key)
|
||||
for key in k]], k=keys)
|
||||
|
||||
|
||||
def extract_fields_function(keys):
|
||||
return lambda i: dict(get_fields_function(keys)(i))
|
||||
|
||||
|
||||
def get_value_function(key):
|
||||
return ft.partial(lambda i, k: i[k], k=key)
|
||||
|
||||
|
||||
def set_default_value_function(key, value):
|
||||
def mapper(item, key, value):
|
||||
item = c.deepcopy(item)
|
||||
if key not in item:
|
||||
item[key] = value
|
||||
return item
|
||||
return ft.partial(mapper, key=key, value=value)
|
||||
|
||||
|
||||
def set_default_values_function(fields):
|
||||
if not fields:
|
||||
return copy_function()
|
||||
elif len(fields) == 1:
|
||||
key, value = six.iteritems(fields).next()
|
||||
return set_default_value_function(key, value)
|
||||
else:
|
||||
return chain_function(*[set_default_value_function(key, value)
|
||||
for key, value in six.iteritems(fields)])
|
||||
|
||||
|
||||
def values_pair_to_dict_function(key_0, key_1):
|
||||
return ft.partial(lambda vp, k0, k1: {k0: vp[0], k1: vp[1]},
|
||||
k0=key_0, k1=key_1)
|
||||
|
||||
|
||||
def flatten(iterable):
|
||||
return it.chain.from_iterable(iterable)
|
||||
|
||||
|
||||
def sync_execute_consumer(*consumers):
|
||||
def consumer(argument, consumers):
|
||||
for cn in consumers:
|
||||
cn(argument)
|
||||
return ft.partial(consumer, consumers=consumers)
|
37
sahara/plugins/mapr/util/maprfs_helper.py
Normal file
37
sahara/plugins/mapr/util/maprfs_helper.py
Normal file
@ -0,0 +1,37 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
import six
|
||||
|
||||
MV_TO_MAPRFS_CMD = ('sudo -u %(user)s'
|
||||
' hadoop fs -copyFromLocal %(source)s %(target)s'
|
||||
' && sudo rm -f %(source)s')
|
||||
MKDIR_CMD = 'sudo -u %(user)s hadoop fs -mkdir -p %(path)s'
|
||||
|
||||
|
||||
def put_file_to_maprfs(r, content, file_name, path, hdfs_user):
|
||||
tmp_file_name = '/tmp/%s.%s' % (file_name, six.text_type(uuid.uuid4()))
|
||||
r.write_file_to(tmp_file_name, content)
|
||||
move_from_local(r, tmp_file_name, path + '/' + file_name, hdfs_user)
|
||||
|
||||
|
||||
def move_from_local(r, source, target, hdfs_user):
|
||||
args = {'user': hdfs_user, 'source': source, 'target': target}
|
||||
r.execute_command(MV_TO_MAPRFS_CMD % args)
|
||||
|
||||
|
||||
def create_maprfs_dir(remote, dir_name, hdfs_user):
|
||||
remote.execute_command(MKDIR_CMD % {'user': hdfs_user, 'path': dir_name})
|
41
sahara/plugins/mapr/util/names.py
Normal file
41
sahara/plugins/mapr/util/names.py
Normal file
@ -0,0 +1,41 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
OOZIE = 'Oozie'
|
||||
HIVE = 'Hive'
|
||||
HIVE_METASTORE = 'HiveMetastore'
|
||||
HIVE_SERVER2 = 'HiveServer2'
|
||||
CLDB = 'CLDB'
|
||||
FILE_SERVER = 'FileServer'
|
||||
ZOOKEEPER = 'ZooKeeper'
|
||||
RESOURCE_MANAGER = 'ResourceManager'
|
||||
HISTORY_SERVER = 'HistoryServer'
|
||||
IS_M7_ENABLED = 'Enable MapR-DB'
|
||||
GENERAL = 'general'
|
||||
JOBTRACKER = 'JobTracker'
|
||||
NODE_MANAGER = 'NodeManager'
|
||||
DATANODE = 'Datanode'
|
||||
TASK_TRACKER = 'TaskTracker'
|
||||
SECONDARY_NAMENODE = 'SecondaryNamenode'
|
||||
NFS = 'NFS'
|
||||
WEB_SERVER = 'Webserver'
|
||||
WAIT_OOZIE_INTERVAL = 300
|
||||
WAIT_NODE_ALARM_NO_HEARTBEAT = 360
|
||||
ecosystem_components = ['Oozie',
|
||||
'Hive-Metastore',
|
||||
'HiveServer2',
|
||||
'HBase-Master',
|
||||
'HBase-RegionServer',
|
||||
'HBase-Client',
|
||||
'Pig']
|
198
sahara/plugins/mapr/util/plugin_spec.py
Normal file
198
sahara/plugins/mapr/util/plugin_spec.py
Normal file
@ -0,0 +1,198 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import os.path
|
||||
|
||||
import six
|
||||
|
||||
import sahara.openstack.common.log as logging
|
||||
import sahara.plugins.mapr.util.config_file_utils as cfu
|
||||
import sahara.plugins.mapr.util.dict_utils as du
|
||||
import sahara.plugins.mapr.util.func_utils as fu
|
||||
import sahara.plugins.provisioning as p
|
||||
import sahara.utils.files as fm
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PluginSpec(object):
|
||||
|
||||
def __init__(self, path):
|
||||
self.base_dir = os.path.dirname(path)
|
||||
self.plugin_spec_dict = self._load_plugin_spec_dict(path)
|
||||
self.service_file_name_map = self._load_service_file_name_map()
|
||||
self.default_configs = self._load_default_configs()
|
||||
self.service_node_process_map = self._load_service_node_process_map()
|
||||
self.plugin_config_objects = self._load_plugin_config_objects()
|
||||
self.file_name_config_map = self._load_file_name_config_map()
|
||||
self.plugin_config_items = self._load_plugin_config_items()
|
||||
self.plugin_configs = self._load_plugin_configs()
|
||||
self.default_plugin_configs = self._load_default_plugin_configs()
|
||||
self.file_type_map = self._load_file_type_map()
|
||||
|
||||
def _load_plugin_spec_dict(self, path):
|
||||
LOG.debug('Loading plugin spec from %s', path)
|
||||
plugin_spec_dict = json.loads(fm.get_file_text(path))
|
||||
return plugin_spec_dict
|
||||
|
||||
def _load_service_file_name_map(self):
|
||||
LOG.debug('Loading service -> filename mapping')
|
||||
return dict((s['name'], [fn for fn in s['files']])
|
||||
for s in self.plugin_spec_dict['services']
|
||||
if 'files' in s and s['files'])
|
||||
|
||||
def _load_default_configs(self):
|
||||
LOG.debug('Loading defaults from local files')
|
||||
file_name_data_map = {}
|
||||
for f in self.plugin_spec_dict['files']:
|
||||
if 'local' not in f:
|
||||
LOG.debug('%s skipped. No "local" section', f['remote'])
|
||||
continue
|
||||
local_path = os.path.join(self.base_dir, f['local'])
|
||||
LOG.debug('Loading %(local_path)s as default for %(remote)s',
|
||||
{'local_path': local_path, 'remote': f['remote']})
|
||||
data = cfu.load_file(local_path, f['type'])
|
||||
file_name_data_map[f['remote']] = data
|
||||
return du.append_to_key(self.service_file_name_map, file_name_data_map)
|
||||
|
||||
def _load_plugin_config_items(self):
|
||||
LOG.debug('Loading full configs map for plugin')
|
||||
items = map(lambda i: i.to_dict(), self.plugin_config_objects)
|
||||
|
||||
def mapper(item):
|
||||
file_name = du.get_keys_by_value_2(
|
||||
self.file_name_config_map, item['name'])[0]
|
||||
append_f = fu.append_field_function('file', file_name)
|
||||
return append_f(item)
|
||||
return map(mapper, items)
|
||||
|
||||
def _load_plugin_configs(self):
|
||||
LOG.debug('Loading plugin configs {service:{file:{name:value}}}')
|
||||
m_fields = ['applicable_target', 'file']
|
||||
vp_fields = ('name', 'default_value')
|
||||
reducer = du.iterable_to_values_pair_dict_reducer(*vp_fields)
|
||||
return du.map_by_fields_values(self.plugin_config_items,
|
||||
m_fields, dict, reducer)
|
||||
|
||||
def _load_default_plugin_configs(self):
|
||||
return du.deep_update(self.default_configs, self.plugin_configs)
|
||||
|
||||
def _load_service_node_process_map(self):
|
||||
LOG.debug('Loading {service:[node process]} mapping')
|
||||
return dict((s['name'], [np for np in s['node_processes']])
|
||||
for s in self.plugin_spec_dict['services']
|
||||
if 'node_processes' in s and s['node_processes'])
|
||||
|
||||
def _load_file_name_config_map(self):
|
||||
LOG.debug('Loading {filename:[config_name]} names mapping')
|
||||
r = {}
|
||||
for fd in self.plugin_spec_dict['files']:
|
||||
if 'configs' in fd:
|
||||
r[fd['remote']] = [i['name']
|
||||
for ir, sd in six.iteritems(fd['configs'])
|
||||
for s, items in six.iteritems(sd)
|
||||
for i in items]
|
||||
return r
|
||||
|
||||
def _load_plugin_config_objects(self):
|
||||
LOG.debug('Loading config objects for sahara-dashboard')
|
||||
|
||||
def mapper(item):
|
||||
req = ['name', 'applicable_target', 'scope']
|
||||
opt = ['description', 'config_type', 'config_values',
|
||||
'default_value', 'is_optional', 'priority']
|
||||
kargs = dict((k, item[k]) for k in req + opt if k in item)
|
||||
return p.Config(**kargs)
|
||||
result = []
|
||||
for file_dict in self.plugin_spec_dict['files']:
|
||||
if 'configs' not in file_dict:
|
||||
LOG.debug('%s skipped. No "configs" section',
|
||||
file_dict['remote'])
|
||||
continue
|
||||
remote_path = file_dict['remote']
|
||||
applicable_target = du.get_keys_by_value_2(
|
||||
self.service_file_name_map, remote_path)[0]
|
||||
for is_required, scope_dict in six.iteritems(file_dict['configs']):
|
||||
is_optional = is_required != 'required'
|
||||
for scope, items in six.iteritems(scope_dict):
|
||||
fields = {'file': remote_path, 'is_optional': is_optional,
|
||||
'scope': scope,
|
||||
'applicable_target': applicable_target}
|
||||
append_f = fu.append_fields_function(fields)
|
||||
result.extend([append_f(i) for i in items])
|
||||
return map(mapper, result)
|
||||
|
||||
def _load_file_type_map(self):
|
||||
LOG.debug('Loading {filename:type} mapping')
|
||||
return dict((f['remote'], f['type'])
|
||||
for f in self.plugin_spec_dict['files'])
|
||||
|
||||
def get_node_process_service(self, node_process):
|
||||
return du.get_keys_by_value_2(self.service_node_process_map,
|
||||
node_process)[0]
|
||||
|
||||
def get_default_plugin_configs(self, services):
|
||||
return dict((k, self.default_plugin_configs[k])
|
||||
for k in services if k in self.default_plugin_configs)
|
||||
|
||||
def get_config_file(self, scope, service, name):
|
||||
p_template = {
|
||||
'applicable_target': service, 'scope': scope, 'name': name}
|
||||
q_fields = ['file']
|
||||
q_predicate = fu.like_predicate(p_template)
|
||||
q_source = self.plugin_config_items
|
||||
q_result = du.select(q_fields, q_source, q_predicate)
|
||||
if q_result and 'file' in q_result[0]:
|
||||
return q_result[0]['file']
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_file_type(self, file_name):
|
||||
if file_name in self.file_type_map:
|
||||
return self.file_type_map[file_name]
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_service_for_file_name(self, file_name):
|
||||
return du.get_keys_by_value_2(self.service_file_name_map, file_name)[0]
|
||||
|
||||
def get_version_config_objects(self):
|
||||
common_fields = {'scope': 'cluster',
|
||||
'config_type': 'dropdown',
|
||||
'is_optional': False,
|
||||
'priority': 1}
|
||||
|
||||
def has_version_field(service):
|
||||
return 'versions' in service
|
||||
|
||||
def get_versions(service):
|
||||
return {'name': '%s Version' % service['name'],
|
||||
'applicable_target': service['name'],
|
||||
'config_values': [(v, v) for v in service['versions']]}
|
||||
|
||||
def add_common_fields(item):
|
||||
item.update(common_fields)
|
||||
return item
|
||||
|
||||
def to_config(item):
|
||||
return p.Config(**item)
|
||||
|
||||
mapper = fu.chain_function(get_versions, add_common_fields, to_config)
|
||||
source = self.plugin_spec_dict['services']
|
||||
return map(mapper, filter(has_version_field, source))
|
||||
|
||||
def get_configs(self):
|
||||
return self.plugin_config_objects + self.get_version_config_objects()
|
13
sahara/plugins/mapr/util/resources/create_disk_list_file.sh
Normal file
13
sahara/plugins/mapr/util/resources/create_disk_list_file.sh
Normal file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
disk_list_file=/tmp/disk.list
|
||||
|
||||
if [ -f ${disk_list_file} ]; then
|
||||
rm -f ${disk_list_file}
|
||||
fi
|
||||
|
||||
for path in $*; do
|
||||
device=`findmnt ${path} -cno SOURCE`
|
||||
umount -f ${device}
|
||||
echo ${device} >> ${disk_list_file}
|
||||
done
|
3
sahara/plugins/mapr/util/resources/hadoop_version
Normal file
3
sahara/plugins/mapr/util/resources/hadoop_version
Normal file
@ -0,0 +1,3 @@
|
||||
classic_version=0.20.2
|
||||
yarn_version=2.4.1
|
||||
default_mode=%(mode)s
|
20
sahara/plugins/mapr/util/resources/topology.sh
Executable file
20
sahara/plugins/mapr/util/resources/topology.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
MAPR_HOME=/opt/mapr
|
||||
|
||||
while [ $# -gt 0 ] ; do
|
||||
nodeArg=$1
|
||||
exec< ${MAPR_HOME}/topology.data
|
||||
result=""
|
||||
while read line ; do
|
||||
ar=( $line )
|
||||
if [ "${ar[0]}" = "$nodeArg" ]; then
|
||||
result="${ar[1]}"
|
||||
fi
|
||||
done
|
||||
shift
|
||||
if [ -z "$result" ]; then
|
||||
echo -n "/default/rack "
|
||||
else
|
||||
echo -n "$result "
|
||||
fi
|
||||
done
|
9
sahara/plugins/mapr/util/resources/waiting_script.sh
Normal file
9
sahara/plugins/mapr/util/resources/waiting_script.sh
Normal file
@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
|
||||
while True; do
|
||||
if [ -f '/tmp/launching-mapr-mfs.lck' ]; then
|
||||
sleep 5
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
71
sahara/plugins/mapr/util/run_scripts.py
Normal file
71
sahara/plugins/mapr/util/run_scripts.py
Normal file
@ -0,0 +1,71 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sahara.i18n import _LI
|
||||
from sahara.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def run_configure_sh(remote, script_string):
|
||||
LOG.info(_LI("running configure.sh script"))
|
||||
remote.execute_command(script_string, run_as_root=True)
|
||||
|
||||
|
||||
def start_zookeeper(remote):
|
||||
LOG.info(_LI("Starting mapr-zookeeper"))
|
||||
remote.execute_command('service mapr-zookeeper start', run_as_root=True)
|
||||
|
||||
|
||||
def start_oozie(remote):
|
||||
LOG.info(_LI("Starting mapr-oozie"))
|
||||
remote.execute_command('service mapr-oozie start',
|
||||
run_as_root=True,
|
||||
raise_when_error=False)
|
||||
|
||||
|
||||
def start_hive_metastore(remote):
|
||||
LOG.info(_LI("Starting mapr-hive-server2"))
|
||||
remote.execute_command('service mapr-hivemetastore start',
|
||||
run_as_root=True)
|
||||
|
||||
|
||||
def start_hive_server2(remote):
|
||||
LOG.info(_LI("Starting mapr-hive-server2"))
|
||||
remote.execute_command('service mapr-hiveserver2 start', run_as_root=True)
|
||||
|
||||
|
||||
def start_warden(remote):
|
||||
LOG.info(_LI("Starting mapr-warden"))
|
||||
remote.execute_command('service mapr-warden start', run_as_root=True)
|
||||
|
||||
|
||||
def start_cldb(remote):
|
||||
LOG.info(_LI("Starting mapr-cldb"))
|
||||
remote.execute_command('service mapr-cldb start', run_as_root=True)
|
||||
|
||||
|
||||
def start_node_manager(remote):
|
||||
LOG.info(_LI("Starting nodemanager"))
|
||||
remote.execute_command(('/opt/mapr/hadoop/hadoop-2.3.0'
|
||||
'/sbin/yarn-daemon.sh start nodemanager'),
|
||||
run_as_root=True)
|
||||
|
||||
|
||||
def start_resource_manager(remote):
|
||||
LOG.info(_LI("Starting resourcemanager"))
|
||||
remote.execute_command(('/opt/mapr/hadoop/hadoop-2.3.0'
|
||||
'/sbin/yarn-daemon.sh start resourcemanager'),
|
||||
run_as_root=True)
|
144
sahara/plugins/mapr/util/scaling.py
Normal file
144
sahara/plugins/mapr/util/scaling.py
Normal file
@ -0,0 +1,144 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sahara import context
|
||||
from sahara.i18n import _LI
|
||||
from sahara.openstack.common import log as logging
|
||||
from sahara.plugins.mapr.util import config
|
||||
from sahara.plugins.mapr.util import names
|
||||
from sahara.plugins.mapr.util import run_scripts
|
||||
from sahara.plugins.mapr.util import start_helper
|
||||
from sahara.utils import general as gen
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
STOP_WARDEN_CMD = 'service mapr-warden stop'
|
||||
STOP_ZOOKEEPER_CMD = 'service mapr-zookeeper stop'
|
||||
GET_SERVER_ID_CMD = ('maprcli node list -json -filter [ip==%s] -columns id'
|
||||
' | grep id | grep -o \'[0-9]*\'')
|
||||
MOVE_NODE_CMD = 'maprcli node move -serverids %s -topology /decommissioned'
|
||||
GET_HOSTNAME_CMD = ('maprcli node list -json -filter [ip==%s]'
|
||||
' -columns hostname | grep hostname'
|
||||
' | grep -Po \'(?<=("hostname":")).*?(?=")\'')
|
||||
REMOVE_NODE_CMD = 'maprcli node remove -filter [ip==%(ip)s] -nodes %(nodes)s'
|
||||
REMOVE_MAPR_PACKAGES_CMD = ('python -mplatform | grep Ubuntu '
|
||||
'&& apt-get remove mapr-\* -y'
|
||||
' || yum remove mapr-\* -y')
|
||||
REMOVE_MAPR_HOME_CMD = 'rm -rf /opt/mapr'
|
||||
REMOVE_MAPR_CORES_CMD = 'rm -rf /opt/cores/*.core.*'
|
||||
|
||||
|
||||
def scale_cluster(cluster, instances, disk_setup_script_path, waiting_script,
|
||||
context, configure_sh_string, is_node_awareness):
|
||||
LOG.info(_LI('START: Cluster scaling. Cluster = %s'), cluster.name)
|
||||
for inst in instances:
|
||||
start_helper.install_role_on_instance(inst, context)
|
||||
config.configure_instances(cluster, instances)
|
||||
start_services(cluster, instances, disk_setup_script_path,
|
||||
waiting_script, configure_sh_string)
|
||||
LOG.info(_LI('END: Cluster scaling. Cluster = %s'), cluster)
|
||||
|
||||
|
||||
def decommission_nodes(cluster, instances, configure_sh_string):
|
||||
LOG.info(_LI('Start decommission . Cluster = %s'), cluster.name)
|
||||
move_node(cluster, instances)
|
||||
stop_services(cluster, instances)
|
||||
context.sleep(names.WAIT_NODE_ALARM_NO_HEARTBEAT)
|
||||
remove_node(cluster, instances)
|
||||
remove_services(cluster, instances)
|
||||
if check_for_cldb_or_zookeeper_service(instances):
|
||||
all_instances = gen.get_instances(cluster)
|
||||
current_cluster_instances = [
|
||||
x for x in all_instances if x not in instances]
|
||||
for inst in current_cluster_instances:
|
||||
start_helper.exec_configure_sh_on_instance(
|
||||
cluster, inst, configure_sh_string)
|
||||
LOG.info(_LI('End decommission. Cluster = %s'), cluster.name)
|
||||
|
||||
|
||||
def start_services(cluster, instances, disk_setup_script_path,
|
||||
waiting_script, configure_sh_string):
|
||||
LOG.info(_LI('START: Starting services.'))
|
||||
for inst in instances:
|
||||
start_helper.exec_configure_sh_on_instance(
|
||||
cluster, inst, configure_sh_string)
|
||||
start_helper.wait_for_mfs_unlock(cluster, waiting_script)
|
||||
start_helper.setup_maprfs_on_instance(inst, disk_setup_script_path)
|
||||
|
||||
if check_if_is_zookeeper_node(inst):
|
||||
run_scripts.start_zookeeper(inst.remote())
|
||||
|
||||
run_scripts.start_warden(inst.remote())
|
||||
|
||||
if check_for_cldb_or_zookeeper_service(instances):
|
||||
start_helper.exec_configure_sh_on_cluster(
|
||||
cluster, configure_sh_string)
|
||||
LOG.info(_LI('END: Starting services.'))
|
||||
|
||||
|
||||
def stop_services(cluster, instances):
|
||||
LOG.info(_LI("Stop warden and zookeeper"))
|
||||
for instance in instances:
|
||||
with instance.remote() as r:
|
||||
r.execute_command(STOP_WARDEN_CMD, run_as_root=True)
|
||||
if check_if_is_zookeeper_node(instance):
|
||||
r.execute_command(STOP_ZOOKEEPER_CMD, run_as_root=True)
|
||||
LOG.info(_LI("Warden and zookeeper stoped"))
|
||||
|
||||
|
||||
def move_node(cluster, instances):
|
||||
LOG.info(_LI("Start moving the node to the /decommissioned"))
|
||||
for instance in instances:
|
||||
with instance.remote() as r:
|
||||
command = GET_SERVER_ID_CMD % instance.management_ip
|
||||
ec, out = r.execute_command(command, run_as_root=True)
|
||||
command = MOVE_NODE_CMD % out.strip()
|
||||
r.execute_command(command, run_as_root=True)
|
||||
LOG.info(_LI("Nodes moved to the /decommissioned"))
|
||||
|
||||
|
||||
def remove_node(cluster, instances):
|
||||
LOG.info("Start removing the nodes")
|
||||
for instance in instances:
|
||||
with instance.remote() as r:
|
||||
command = GET_HOSTNAME_CMD % instance.management_ip
|
||||
ec, out = r.execute_command(command, run_as_root=True)
|
||||
command = REMOVE_NODE_CMD % {'ip': instance.management_ip,
|
||||
'nodes': out.strip()}
|
||||
r.execute_command(command, run_as_root=True)
|
||||
LOG.info("Nodes removed")
|
||||
|
||||
|
||||
def remove_services(cluster, instances):
|
||||
LOG.info(_LI("Start remove all mapr services"))
|
||||
for instance in instances:
|
||||
with instance.remote() as r:
|
||||
r.execute_command(REMOVE_MAPR_PACKAGES_CMD, run_as_root=True)
|
||||
r.execute_command(REMOVE_MAPR_HOME_CMD, run_as_root=True)
|
||||
r.execute_command(REMOVE_MAPR_CORES_CMD, run_as_root=True)
|
||||
LOG.info(_LI("All mapr services removed"))
|
||||
|
||||
|
||||
def check_if_is_zookeeper_node(instance):
|
||||
processes_list = instance.node_group.node_processes
|
||||
return names.ZOOKEEPER in processes_list
|
||||
|
||||
|
||||
def check_for_cldb_or_zookeeper_service(instances):
|
||||
for inst in instances:
|
||||
np_list = inst.node_group.node_processes
|
||||
if names.ZOOKEEPER in np_list or names.CLDB in np_list:
|
||||
return True
|
||||
return False
|
177
sahara/plugins/mapr/util/start_helper.py
Normal file
177
sahara/plugins/mapr/util/start_helper.py
Normal file
@ -0,0 +1,177 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sahara import context
|
||||
from sahara.i18n import _LI
|
||||
from sahara.openstack.common import log as logging
|
||||
from sahara.plugins.mapr.util import names
|
||||
from sahara.plugins.mapr.util import run_scripts
|
||||
import sahara.plugins.mapr.versions.version_handler_factory as vhf
|
||||
import sahara.plugins.utils as utils
|
||||
from sahara.utils import files as files
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def exec_configure_sh_on_cluster(cluster, script_string):
|
||||
inst_list = utils.get_instances(cluster)
|
||||
for n in inst_list:
|
||||
exec_configure_sh_on_instance(cluster, n, script_string)
|
||||
|
||||
|
||||
def exec_configure_sh_on_instance(cluster, instance, script_string):
|
||||
LOG.info(_LI('START: Executing configure.sh'))
|
||||
if check_for_mapr_db(cluster):
|
||||
script_string += ' -M7'
|
||||
if not check_if_mapr_user_exist(instance):
|
||||
script_string += ' --create-user'
|
||||
LOG.debug('script_string = %s', script_string)
|
||||
instance.remote().execute_command(script_string, run_as_root=True)
|
||||
LOG.info(_LI('END: Executing configure.sh'))
|
||||
|
||||
|
||||
def check_for_mapr_db(cluster):
|
||||
h_version = cluster.hadoop_version
|
||||
v_handler = vhf.VersionHandlerFactory.get().get_handler(h_version)
|
||||
return v_handler.get_context(cluster).is_m7_enabled()
|
||||
|
||||
|
||||
def setup_maprfs_on_cluster(cluster, path_to_disk_setup_script):
|
||||
mapr_node_list = utils.get_instances(cluster, 'FileServer')
|
||||
for instance in mapr_node_list:
|
||||
setup_maprfs_on_instance(instance, path_to_disk_setup_script)
|
||||
|
||||
|
||||
def setup_maprfs_on_instance(instance, path_to_disk_setup_script):
|
||||
LOG.info(_LI('START: Setup maprfs on instance %s'), instance.instance_name)
|
||||
create_disk_list_file(instance, path_to_disk_setup_script)
|
||||
execute_disksetup(instance)
|
||||
LOG.info(_LI('END: Setup maprfs on instance.'))
|
||||
|
||||
|
||||
def create_disk_list_file(instance, path_to_disk_setup_script):
|
||||
LOG.info(_LI('START: Creating disk list file.'))
|
||||
script_path = '/tmp/disk_setup_script.sh'
|
||||
rmt = instance.remote()
|
||||
LOG.debug('Writing /tmp/disk_setup_script.sh')
|
||||
rmt.write_file_to(
|
||||
script_path, files.get_file_text(path_to_disk_setup_script))
|
||||
LOG.debug('Start executing command: chmod +x %s', script_path)
|
||||
rmt.execute_command('chmod +x ' + script_path, run_as_root=True)
|
||||
LOG.debug('Done for executing command.')
|
||||
args = ' '.join(instance.node_group.storage_paths())
|
||||
cmd = '%s %s' % (script_path, args)
|
||||
LOG.debug('Executing %s', cmd)
|
||||
rmt.execute_command(cmd, run_as_root=True)
|
||||
LOG.info(_LI('END: Creating disk list file.'))
|
||||
|
||||
|
||||
def execute_disksetup(instance):
|
||||
LOG.info(_LI('START: Executing disksetup.'))
|
||||
rmt = instance.remote()
|
||||
rmt.execute_command(
|
||||
'/opt/mapr/server/disksetup -F /tmp/disk.list', run_as_root=True)
|
||||
LOG.info(_LI('END: Executing disksetup.'))
|
||||
|
||||
|
||||
def wait_for_mfs_unlock(cluster, path_to_waiting_script):
|
||||
mapr_node_list = utils.get_instances(cluster, names.FILE_SERVER)
|
||||
for instance in mapr_node_list:
|
||||
create_waiting_script_file(instance, path_to_waiting_script)
|
||||
exec_waiting_script_on_instance(instance)
|
||||
|
||||
|
||||
def start_zookeeper_nodes_on_cluster(cluster):
|
||||
zkeeper_node_list = utils.get_instances(cluster, names.ZOOKEEPER)
|
||||
for z_keeper_node in zkeeper_node_list:
|
||||
run_scripts.start_zookeeper(z_keeper_node.remote())
|
||||
|
||||
|
||||
def start_warden_on_cluster(cluster):
|
||||
node_list = utils.get_instances(cluster)
|
||||
for node in node_list:
|
||||
run_scripts.start_warden(node.remote())
|
||||
|
||||
|
||||
def start_warden_on_cldb_nodes(cluster):
|
||||
node_list = utils.get_instances(cluster, names.CLDB)
|
||||
for node in node_list:
|
||||
run_scripts.start_warden(node.remote())
|
||||
|
||||
|
||||
def start_warden_on_other_nodes(cluster):
|
||||
node_list = utils.get_instances(cluster)
|
||||
for node in node_list:
|
||||
if names.CLDB not in node.node_group.node_processes:
|
||||
run_scripts.start_warden(node.remote())
|
||||
|
||||
|
||||
def create_waiting_script_file(instance, path_to_waiting_script):
|
||||
LOG.info(_LI('START: Creating waiting script file.'))
|
||||
script_path = '/tmp/waiting_script.sh'
|
||||
rmt = instance.remote()
|
||||
rmt.write_file_to(script_path, files.get_file_text(path_to_waiting_script))
|
||||
LOG.info(_LI('END: Creating waiting script file.'))
|
||||
|
||||
|
||||
def exec_waiting_script_on_instance(instance):
|
||||
LOG.info(_LI('START: Waiting script'))
|
||||
rmt = instance.remote()
|
||||
rmt.execute_command('chmod +x /tmp/waiting_script.sh', run_as_root=True)
|
||||
rmt.execute_command('/tmp/waiting_script.sh', run_as_root=True)
|
||||
LOG.info(_LI('END: Waiting script'))
|
||||
|
||||
|
||||
def check_if_mapr_user_exist(instance):
|
||||
ec, out = instance.remote().execute_command('id -u mapr',
|
||||
run_as_root=True,
|
||||
raise_when_error=False)
|
||||
return ec == 0
|
||||
|
||||
|
||||
def check_for_mapr_component(instance, component_name):
|
||||
component_list = instance.node_group.node_processes
|
||||
return component_name in component_list
|
||||
|
||||
|
||||
def install_role_on_instance(instance, cluster_context):
|
||||
LOG.info(_LI('START: Installing roles on node '))
|
||||
roles_list = instance.node_group.node_processes
|
||||
exec_str = (cluster_context.get_install_manager()
|
||||
+ cluster_context.get_roles_str(roles_list))
|
||||
LOG.debug('Executing "%(command)s" on %(instance)s',
|
||||
{'command': exec_str, 'instance': instance.instance_id})
|
||||
|
||||
instance.remote().execute_command(exec_str, run_as_root=True, timeout=900)
|
||||
LOG.info(_LI('END: Installing roles on node '))
|
||||
|
||||
|
||||
def install_roles(cluster, cluster_context):
|
||||
LOG.info(_LI('START: Installing roles on cluster'))
|
||||
instances = utils.get_instances(cluster)
|
||||
with context.ThreadGroup(len(instances)) as tg:
|
||||
for instance in instances:
|
||||
tg.spawn('install_roles_%s' % instance.instance_id,
|
||||
install_role_on_instance,
|
||||
instance,
|
||||
cluster_context)
|
||||
LOG.info(_LI('END: Installing roles on cluster'))
|
||||
|
||||
|
||||
def start_ecosystem(cluster_context):
|
||||
oozie_inst = cluster_context.get_oozie_instance()
|
||||
if oozie_inst is not None:
|
||||
context.sleep(names.WAIT_OOZIE_INTERVAL)
|
||||
run_scripts.start_oozie(oozie_inst.remote())
|
135
sahara/plugins/mapr/util/validation_utils.py
Normal file
135
sahara/plugins/mapr/util/validation_utils.py
Normal file
@ -0,0 +1,135 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools as ft
|
||||
|
||||
from sahara.i18n import _
|
||||
import sahara.plugins.exceptions as e
|
||||
import sahara.plugins.mapr.util.cluster_info as ci
|
||||
import sahara.plugins.mapr.util.wrapper as w
|
||||
|
||||
|
||||
class LessThanCountException(e.InvalidComponentCountException):
|
||||
|
||||
def __init__(self, component, expected_count, count):
|
||||
super(LessThanCountException, self).__init__(
|
||||
component, expected_count, count)
|
||||
self.message = (_("Hadoop cluster should contain at least"
|
||||
" %(expected_count)d %(component)s component(s)."
|
||||
" Actual %(component)s count is %(count)d")
|
||||
% {'expected_count': expected_count,
|
||||
'component': component, 'count': count})
|
||||
|
||||
|
||||
class MoreThanCountException(e.InvalidComponentCountException):
|
||||
|
||||
def __init__(self, component, expected_count, count):
|
||||
super(MoreThanCountException, self).__init__(
|
||||
component, expected_count, count)
|
||||
self.message = (_("Hadoop cluster should contain not more than"
|
||||
" %(expected_count)d %(component)s component(s)."
|
||||
" Actual %(component)s count is %(count)d")
|
||||
% {'expected_count': expected_count,
|
||||
'component': component, 'count': count})
|
||||
|
||||
|
||||
class NodeRequiredServiceMissingException(e.RequiredServiceMissingException):
|
||||
|
||||
def __init__(self, service_name, required_by=None):
|
||||
super(NodeRequiredServiceMissingException, self).__init__(
|
||||
service_name, required_by)
|
||||
self.message = _('Node is missing a service: %s') % service_name
|
||||
if required_by:
|
||||
self.message = (_('%(message)s, required by service:'
|
||||
' %(required_by)s')
|
||||
% {'message': self.message,
|
||||
'required_by': required_by})
|
||||
|
||||
|
||||
def not_less_than_count_component_vr(component, count):
|
||||
def validate(cluster, component, count):
|
||||
c_info = ci.ClusterInfo(cluster, None)
|
||||
actual_count = c_info.get_instances_count(component)
|
||||
if not actual_count >= count:
|
||||
raise LessThanCountException(component, count, actual_count)
|
||||
return ft.partial(validate, component=component, count=count)
|
||||
|
||||
|
||||
def not_more_than_count_component_vr(component, count):
|
||||
def validate(cluster, component, count):
|
||||
c_info = ci.ClusterInfo(cluster, None)
|
||||
actual_count = c_info.get_instances_count(component)
|
||||
if not actual_count <= count:
|
||||
raise MoreThanCountException(component, count, actual_count)
|
||||
return ft.partial(validate, component=component, count=count)
|
||||
|
||||
|
||||
def equal_count_component_vr(component, count):
|
||||
def validate(cluster, component, count):
|
||||
c_info = ci.ClusterInfo(cluster, None)
|
||||
actual_count = c_info.get_instances_count(component)
|
||||
if not actual_count == count:
|
||||
raise e.InvalidComponentCountException(
|
||||
component, count, actual_count)
|
||||
return ft.partial(validate, component=component, count=count)
|
||||
|
||||
|
||||
def require_component_vr(component):
|
||||
def validate(instance, component):
|
||||
if component not in instance.node_group.node_processes:
|
||||
raise NodeRequiredServiceMissingException(component)
|
||||
return ft.partial(validate, component=component)
|
||||
|
||||
|
||||
def require_of_listed_components(components):
|
||||
def validate(instance, components):
|
||||
if (False in (c in instance.node_group.node_processes
|
||||
for c in components)):
|
||||
raise NodeRequiredServiceMissingException(components)
|
||||
return ft.partial(validate, components=components)
|
||||
|
||||
|
||||
def each_node_has_component_vr(component):
|
||||
def validate(cluster, component):
|
||||
rc_vr = require_component_vr(component)
|
||||
c_info = ci.ClusterInfo(cluster, None)
|
||||
for i in c_info.get_instances():
|
||||
rc_vr(i)
|
||||
return ft.partial(validate, component=component)
|
||||
|
||||
|
||||
def each_node_has_at_least_one_of_listed_components(components):
|
||||
def validate(cluster, components):
|
||||
rc_vr = require_of_listed_components(components)
|
||||
c_info = ci.ClusterInfo(cluster, None)
|
||||
for i in c_info.get_instances():
|
||||
rc_vr(i)
|
||||
return ft.partial(validate, components=components)
|
||||
|
||||
|
||||
def node_dependency_satisfied_vr(component, dependency):
|
||||
def validate(cluster, component, dependency):
|
||||
c_info = ci.ClusterInfo(cluster, None)
|
||||
for ng in c_info.get_node_groups(component):
|
||||
if dependency not in ng.node_processes:
|
||||
raise NodeRequiredServiceMissingException(
|
||||
component, dependency)
|
||||
return ft.partial(validate, component=component, dependency=dependency)
|
||||
|
||||
|
||||
def create_fake_cluster(cluster, existing, additional):
|
||||
w_node_groups = [w.Wrapper(ng, count=existing[ng.id])
|
||||
if ng.id in existing else ng
|
||||
for ng in cluster.node_groups]
|
||||
return w.Wrapper(cluster, node_groups=w_node_groups)
|
28
sahara/plugins/mapr/util/wrapper.py
Normal file
28
sahara/plugins/mapr/util/wrapper.py
Normal file
@ -0,0 +1,28 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class Wrapper(object):
|
||||
WRAPPED = '__wrapped__'
|
||||
|
||||
def __init__(self, wrapped, **kargs):
|
||||
object.__getattribute__(self, '__dict__').update(kargs)
|
||||
object.__setattr__(self, Wrapper.WRAPPED, wrapped)
|
||||
|
||||
def __getattribute__(self, name):
|
||||
wrapped = object.__getattribute__(self, Wrapper.WRAPPED)
|
||||
try:
|
||||
return object.__getattribute__(self, name)
|
||||
except AttributeError:
|
||||
return object.__getattribute__(wrapped, name)
|
0
sahara/plugins/mapr/versions/__init__.py
Normal file
0
sahara/plugins/mapr/versions/__init__.py
Normal file
167
sahara/plugins/mapr/versions/base_cluster_configurer.py
Normal file
167
sahara/plugins/mapr/versions/base_cluster_configurer.py
Normal file
@ -0,0 +1,167 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import collections as c
|
||||
import os
|
||||
|
||||
import six
|
||||
|
||||
from sahara.i18n import _LI
|
||||
from sahara.openstack.common import log as logging
|
||||
import sahara.plugins.mapr.util.cluster_helper as ch
|
||||
import sahara.plugins.mapr.util.cluster_info as ci
|
||||
from sahara.plugins.mapr.util import config
|
||||
import sahara.plugins.mapr.util.config_file_utils as cfu
|
||||
import sahara.plugins.mapr.util.dict_utils as du
|
||||
import sahara.plugins.mapr.versions.version_handler_factory as vhf
|
||||
import sahara.plugins.utils as u
|
||||
import sahara.swift.swift_helper as sh
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class BaseClusterConfigurer(object):
|
||||
|
||||
def get_topology_configs(self):
|
||||
result = c.defaultdict(dict)
|
||||
if config.is_data_locality_enabled(self.cluster):
|
||||
if self.is_node_awareness_enabled():
|
||||
LOG.debug('Node group awareness is set to True')
|
||||
|
||||
file_name = '%s/core-site.xml' % self.get_hadoop_conf_dir()
|
||||
service = self.plugin_spec.get_service_for_file_name(file_name)
|
||||
data = {}
|
||||
data['net.topology.impl'] = (
|
||||
'org.apache.hadoop.net.NetworkTopologyWithNodeGroup')
|
||||
data['net.topology.nodegroup.aware'] = True
|
||||
data['dfs.block.replicator.classname'] = (
|
||||
'org.apache.hadoop.hdfs.server.namenode'
|
||||
'.BlockPlacementPolicyWithNodeGroup')
|
||||
result[service][file_name] = data
|
||||
|
||||
file_name = '%s/mapred-site.xml' % self.get_hadoop_conf_dir()
|
||||
service = self.plugin_spec.get_service_for_file_name(file_name)
|
||||
data = {}
|
||||
data['mapred.jobtracker.nodegroup.aware'] = True
|
||||
data['mapred.task.cache.levels'] = 3
|
||||
result[service][file_name] = data
|
||||
|
||||
file_name = '/opt/mapr/conf/cldb.conf'
|
||||
service = self.plugin_spec.get_service_for_file_name(file_name)
|
||||
data = {}
|
||||
data['net.topology.script.file.name'] = '/opt/mapr/topology.sh'
|
||||
result[service][file_name] = data
|
||||
else:
|
||||
LOG.debug('Node group awareness is not implemented in YARN'
|
||||
' yet so enable_hypervisor_awareness set to'
|
||||
' False explicitly')
|
||||
return result
|
||||
|
||||
def get_swift_configs(self):
|
||||
mapper = lambda i: (i['name'], i['value'])
|
||||
file_name = '%s/core-site.xml' % self.get_hadoop_conf_dir()
|
||||
service = self.plugin_spec.get_service_for_file_name(file_name)
|
||||
data = dict(map(mapper, sh.get_swift_configs()))
|
||||
return {service: {file_name: data}}
|
||||
|
||||
def get_cluster_configs(self):
|
||||
default_configs = self.cluster_info.get_default_configs()
|
||||
user_configs = self.cluster_info.get_user_configs()
|
||||
result = du.deep_update(default_configs, user_configs)
|
||||
file_name = '/opt/mapr/conf/cldb.conf'
|
||||
service = self.plugin_spec.get_service_for_file_name(file_name)
|
||||
if file_name not in result[service]:
|
||||
result[service][file_name] = {}
|
||||
data = result[service][file_name]
|
||||
data['cldb.zookeeper.servers'] = ch.get_zookeeper_nodes_ip_with_port(
|
||||
self.cluster)
|
||||
return result
|
||||
|
||||
def get_cluster_configs_template(self):
|
||||
template = {}
|
||||
du.deep_update(template, self.get_topology_configs(), False)
|
||||
du.deep_update(template, self.get_swift_configs(), False)
|
||||
du.deep_update(template, self.get_cluster_configs(), False)
|
||||
return template
|
||||
|
||||
def get_node_group_configs(self, node_groups=None):
|
||||
ng_configs = {}
|
||||
if not node_groups:
|
||||
node_groups = self.cluster.node_groups
|
||||
cc_template = self.cluster_configs_template
|
||||
p_spec = self.plugin_spec
|
||||
for ng in node_groups:
|
||||
ng_services = self.cluster_info.get_services(ng)
|
||||
d_configs = dict(filter(lambda i: i[0] in ng_services,
|
||||
six.iteritems(cc_template)))
|
||||
u_configs = self.cluster_info.get_user_configs(ng)
|
||||
nc_template = du.deep_update(d_configs, u_configs)
|
||||
nc_data = {}
|
||||
for files in nc_template.values():
|
||||
for f_name, f_data in six.iteritems(files):
|
||||
if f_name:
|
||||
f_type = p_spec.get_file_type(f_name)
|
||||
f_content = cfu.to_file_content(f_data, f_type)
|
||||
if f_content:
|
||||
nc_data[f_name] = f_content
|
||||
ng_configs[ng.id] = nc_data
|
||||
return ng_configs
|
||||
|
||||
def configure_instances(self, instances=None):
|
||||
if not instances:
|
||||
instances = u.get_instances(self.cluster)
|
||||
for i in instances:
|
||||
i_files = self.node_group_files[i.node_group_id]
|
||||
LOG.info(_LI('Writing files %(f_names)s to node %(node)s'),
|
||||
{'f_names': i_files.keys(), 'node': i.management_ip})
|
||||
with i.remote() as r:
|
||||
for f_name in i_files:
|
||||
r.execute_command('mkdir -p ' + os.path.dirname(f_name),
|
||||
run_as_root=True)
|
||||
LOG.debug('Created dir: %s', os.path.dirname(f_name))
|
||||
r.write_files_to(i_files, run_as_root=True)
|
||||
config.post_configure_instance(i)
|
||||
|
||||
def __init__(self, cluster, plugin_spec):
|
||||
h_version = cluster.hadoop_version
|
||||
v_handler = vhf.VersionHandlerFactory.get().get_handler(h_version)
|
||||
self.context = v_handler.get_context(cluster)
|
||||
self.cluster = cluster
|
||||
self.plugin_spec = plugin_spec
|
||||
self.cluster_info = ci.ClusterInfo(self.cluster, self.plugin_spec)
|
||||
self.cluster_configs_template = self.get_cluster_configs_template()
|
||||
self.node_group_files = self.get_node_group_configs()
|
||||
|
||||
def configure(self, instances=None):
|
||||
self.configure_topology_data(self.cluster)
|
||||
self.configure_instances(instances)
|
||||
|
||||
@staticmethod
|
||||
def _post_configure_instance(instance):
|
||||
config.post_configure_instance(instance)
|
||||
|
||||
def configure_topology_data(self, cluster):
|
||||
config.configure_topology_data(
|
||||
cluster, self.is_node_awareness_enabled())
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_hadoop_conf_dir(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_node_awareness_enabled(self):
|
||||
return
|
173
sahara/plugins/mapr/versions/base_context.py
Normal file
173
sahara/plugins/mapr/versions/base_context.py
Normal file
@ -0,0 +1,173 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
import six
|
||||
|
||||
import sahara.plugins.mapr.util.config_utils as cu
|
||||
import sahara.plugins.mapr.util.names as n
|
||||
import sahara.plugins.utils as u
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class BaseContext(object):
|
||||
|
||||
hive_version_config = 'Hive Version'
|
||||
oozie_version_config = 'Oozie Version'
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_cluster(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_m7_enabled(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_hadoop_version(self):
|
||||
return
|
||||
|
||||
def get_linux_distro_version(self):
|
||||
return self.get_zk_instances()[0].remote().execute_command(
|
||||
'lsb_release -is', run_as_root=True)[1].rstrip()
|
||||
|
||||
def get_install_manager(self):
|
||||
install_manager_map = {'Ubuntu': 'apt-get install --force-yes -y ',
|
||||
'CentOS': 'yum install -y ',
|
||||
'RedHatEnterpriseServer': 'yum install -y ',
|
||||
'Suse': 'zypper '}
|
||||
return install_manager_map.get(self.get_linux_distro_version())
|
||||
|
||||
def get_install_manager_version_separator(self):
|
||||
install_manager_map = {'Ubuntu': '=',
|
||||
'CentOS': '-',
|
||||
'RedHatEnterpriseServer': '-',
|
||||
'Suse': ':'}
|
||||
return install_manager_map.get(self.get_linux_distro_version())
|
||||
|
||||
def get_fs_instances(self):
|
||||
return u.get_instances(self.get_cluster(), n.FILE_SERVER)
|
||||
|
||||
def get_zk_instances(self):
|
||||
return u.get_instances(self.get_cluster(), n.ZOOKEEPER)
|
||||
|
||||
def get_zk_uris(self):
|
||||
mapper = lambda i: '%s' % i.management_ip
|
||||
return map(mapper, self.get_zk_instances())
|
||||
|
||||
def get_cldb_instances(self):
|
||||
return u.get_instances(self.get_cluster(), n.CLDB)
|
||||
|
||||
def get_cldb_uris(self):
|
||||
mapper = lambda i: '%s' % i.management_ip
|
||||
return map(mapper, self.get_cldb_instances())
|
||||
|
||||
def get_cldb_uri(self):
|
||||
return 'maprfs:///'
|
||||
|
||||
def get_rm_instance(self):
|
||||
return u.get_instance(self.get_cluster(), n.RESOURCE_MANAGER)
|
||||
|
||||
def get_rm_port(self):
|
||||
return '8032'
|
||||
|
||||
def get_rm_uri(self):
|
||||
port = self.get_rm_port()
|
||||
ip = self.get_rm_instance().management_ip
|
||||
return '%s:%s' % (ip, port) if port else ip
|
||||
|
||||
def get_hs_instance(self):
|
||||
return u.get_instance(self.get_cluster(), n.HISTORY_SERVER)
|
||||
|
||||
def get_hs_uri(self):
|
||||
return self.get_hs_instance().management_ip
|
||||
|
||||
def get_oozie_instance(self):
|
||||
return u.get_instance(self.get_cluster(), n.OOZIE)
|
||||
|
||||
def get_hive_metastore_instances(self):
|
||||
return u.get_instances(self.get_cluster(), n.HIVE_METASTORE)
|
||||
|
||||
def get_hive_server2_instances(self):
|
||||
return u.get_instances(self.get_cluster(), n.HIVE_SERVER2)
|
||||
|
||||
def get_oozie_uri(self):
|
||||
ip = self.get_oozie_instance().management_ip
|
||||
return 'http://%s:11000/oozie' % ip
|
||||
|
||||
def get_roles_str(self, comp_list):
|
||||
component_list_str = 'mapr-core ' + ' '.join(['mapr-' + role + ' '
|
||||
for role in comp_list])
|
||||
if 'HBase-Client' in comp_list:
|
||||
component_list_str = component_list_str.replace(
|
||||
'HBase-Client', 'hbase')
|
||||
if 'Oozie' in comp_list:
|
||||
component_list_str = component_list_str.replace(
|
||||
'Oozie', 'oozie' + self.get_oozie_version())
|
||||
if 'HiveMetastore' in comp_list:
|
||||
component_list_str = component_list_str.replace(
|
||||
'HiveMetastore', 'HiveMetastore' + self.get_hive_version())
|
||||
if 'HiveServer2' in comp_list:
|
||||
component_list_str = component_list_str.replace(
|
||||
'HiveServer2', 'HiveServer2' + self.get_hive_version())
|
||||
|
||||
return component_list_str.lower()
|
||||
|
||||
def user_exists(self):
|
||||
return
|
||||
|
||||
def get_plain_instances(self):
|
||||
fs = self.get_fs_instances()
|
||||
zk = self.get_zk_instances()
|
||||
cldb = self.get_cldb_instances()
|
||||
zk_fs_cldb = zk + fs + cldb
|
||||
instances = u.get_instances(self.get_cluster())
|
||||
return [i for i in instances if i not in zk_fs_cldb]
|
||||
|
||||
def get_configure_command(self):
|
||||
kargs = {'path': self.get_configure_sh_path(),
|
||||
'cldb_nodes': ','.join(self.get_cldb_uris()),
|
||||
'zk_nodes': ','.join(self.get_cldb_uris()),
|
||||
'rm_node': self.get_rm_uri(),
|
||||
'hs_node': self.get_hs_uri()}
|
||||
command = ('{path} -C {cldb_nodes} -Z {zk_nodes} -RM {rm_node}'
|
||||
' -HS {hs_node} -f').format(**kargs)
|
||||
if self.is_m7_enabled():
|
||||
command += ' -M7'
|
||||
if not self.user_exists():
|
||||
command += ' --create-user'
|
||||
return command
|
||||
|
||||
def get_fs_wait_command(self):
|
||||
return '/tmp/waiting_script.sh'
|
||||
|
||||
def get_disk_setup_command(self):
|
||||
return '/opt/mapr/server/disksetup -F /tmp/disk.list'
|
||||
|
||||
def get_configure_sh_path(self):
|
||||
return '/opt/mapr/server/configure.sh'
|
||||
|
||||
def get_oozie_version(self):
|
||||
configs = cu.get_cluster_configs(self.get_cluster())
|
||||
return (self.get_install_manager_version_separator()
|
||||
+ configs[n.OOZIE][BaseContext.oozie_version_config] + '*')
|
||||
|
||||
def get_hive_version(self):
|
||||
configs = cu.get_cluster_configs(self.get_cluster())
|
||||
return (self.get_install_manager_version_separator()
|
||||
+ configs[n.HIVE][BaseContext.hive_version_config] + "*")
|
||||
|
||||
def get_scripts(self):
|
||||
return
|
115
sahara/plugins/mapr/versions/base_version_handler.py
Normal file
115
sahara/plugins/mapr/versions/base_version_handler.py
Normal file
@ -0,0 +1,115 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
import six
|
||||
|
||||
import sahara.plugins.mapr.util.plugin_spec as ps
|
||||
import sahara.plugins.mapr.util.start_helper as sh
|
||||
import sahara.plugins.mapr.util.validation_utils as vu
|
||||
import sahara.plugins.mapr.versions.edp_engine as edp
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class BaseVersionHandler(object):
|
||||
|
||||
def __init__(self):
|
||||
self.plugin_spec = ps.PluginSpec(self.get_plugin_spec_path())
|
||||
|
||||
def get_plugin_spec(self):
|
||||
return self.plugin_spec
|
||||
|
||||
def get_configs(self):
|
||||
return self.plugin_spec.get_configs()
|
||||
|
||||
def get_node_processes(self):
|
||||
return self.plugin_spec.service_node_process_map
|
||||
|
||||
def get_disk_setup_script(self):
|
||||
return 'plugins/mapr/util/resources/create_disk_list_file.sh'
|
||||
|
||||
def validate(self, cluster):
|
||||
rules = self.get_cluster_validation_rules(cluster)
|
||||
for rule in rules:
|
||||
rule(cluster)
|
||||
|
||||
def validate_scaling(self, cluster, existing, additional):
|
||||
fake_cluster = vu.create_fake_cluster(cluster, existing, additional)
|
||||
self.validate(fake_cluster)
|
||||
|
||||
def validate_edp(self, cluster):
|
||||
for rule in self.get_edp_validation_rules():
|
||||
rule(cluster)
|
||||
|
||||
def configure_cluster(self, cluster):
|
||||
sh.install_roles(cluster, self.get_context(cluster))
|
||||
self.get_cluster_configurer(cluster, self.plugin_spec).configure()
|
||||
|
||||
def get_name_node_uri(self, cluster):
|
||||
return self.get_context(cluster).get_cldb_uri()
|
||||
|
||||
def get_oozie_server(self, cluster):
|
||||
return self.get_context(cluster).get_oozie_instance()
|
||||
|
||||
def get_oozie_server_uri(self, cluster):
|
||||
return self.get_context(cluster).get_oozie_uri()
|
||||
|
||||
def get_resource_manager_uri(self, cluster):
|
||||
return self.get_context(cluster).get_rm_uri()
|
||||
|
||||
def get_home_dir(self):
|
||||
return ('plugins/mapr/versions/v%s'
|
||||
% self.get_plugin_version().replace('.', '_').lower())
|
||||
|
||||
def get_plugin_spec_path(self):
|
||||
return '%s/resources/plugin_spec.json' % self.get_home_dir()
|
||||
|
||||
def get_edp_engine(self, cluster, job_type):
|
||||
if job_type in edp.MapROozieJobEngine.get_supported_job_types():
|
||||
return edp.MapROozieJobEngine(cluster)
|
||||
return None
|
||||
|
||||
# Astract methods
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_plugin_version(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_cluster_validation_rules(self, cluster):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_scaling_validation_rules(self):
|
||||
return
|
||||
|
||||
def get_waiting_script(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_edp_validation_rules(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_cluster_configurer(self, cluster, plugin_spec):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_configure_sh_string(self, cluster):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_context(self, cluster):
|
||||
return
|
76
sahara/plugins/mapr/versions/edp_engine.py
Normal file
76
sahara/plugins/mapr/versions/edp_engine.py
Normal file
@ -0,0 +1,76 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sahara.plugins.mapr.util.maprfs_helper as mfs
|
||||
import sahara.plugins.mapr.versions.version_handler_factory as vhf
|
||||
import sahara.service.edp.binary_retrievers.dispatch as d
|
||||
import sahara.service.edp.oozie.engine as e
|
||||
|
||||
|
||||
class MapROozieJobEngine(e.OozieJobEngine):
|
||||
|
||||
def get_hdfs_user(self):
|
||||
return 'mapr'
|
||||
|
||||
def create_hdfs_dir(self, remote, dir_name):
|
||||
mfs.create_maprfs_dir(remote, dir_name, self.get_hdfs_user())
|
||||
|
||||
def _upload_workflow_file(self, where, job_dir, wf_xml, hdfs_user):
|
||||
f_name = 'workflow.xml'
|
||||
with where.remote() as r:
|
||||
mfs.put_file_to_maprfs(r, wf_xml, f_name, job_dir, hdfs_user)
|
||||
return job_dir + '/' + f_name
|
||||
|
||||
def _upload_job_files_to_hdfs(self, where, job_dir, job):
|
||||
mains = job.mains or []
|
||||
libs = job.libs or []
|
||||
uploaded_paths = []
|
||||
hdfs_user = self.get_hdfs_user()
|
||||
with where.remote() as r:
|
||||
for m in mains:
|
||||
raw_data = d.get_raw_binary(m)
|
||||
mfs.put_file_to_maprfs(r, raw_data, m.name, job_dir, hdfs_user)
|
||||
uploaded_paths.append(job_dir + '/' + m.name)
|
||||
for l in libs:
|
||||
raw_data = d.get_raw_binary(l)
|
||||
lib_dir = job_dir + '/lib/'
|
||||
self.create_hdfs_dir(r, lib_dir)
|
||||
mfs.put_file_to_maprfs(r, raw_data, l.name, lib_dir,
|
||||
hdfs_user)
|
||||
uploaded_paths.append(lib_dir + l.name)
|
||||
return uploaded_paths
|
||||
|
||||
def get_name_node_uri(self, cluster):
|
||||
h_version = cluster.hadoop_version
|
||||
v_handler = vhf.VersionHandlerFactory.get().get_handler(h_version)
|
||||
uri = v_handler.get_name_node_uri(cluster)
|
||||
return uri
|
||||
|
||||
def get_oozie_server_uri(self, cluster):
|
||||
h_version = cluster.hadoop_version
|
||||
v_handler = vhf.VersionHandlerFactory.get().get_handler(h_version)
|
||||
uri = v_handler.get_oozie_server_uri(cluster)
|
||||
return uri
|
||||
|
||||
def get_oozie_server(self, cluster):
|
||||
h_version = cluster.hadoop_version
|
||||
v_handler = vhf.VersionHandlerFactory.get().get_handler(h_version)
|
||||
server = v_handler.get_oozie_server(cluster)
|
||||
return server
|
||||
|
||||
def get_resource_manager_uri(self, cluster):
|
||||
h_version = cluster.hadoop_version
|
||||
v_handler = vhf.VersionHandlerFactory.get().get_handler(h_version)
|
||||
uri = v_handler.get_resource_manager_uri(cluster)
|
||||
return uri
|
0
sahara/plugins/mapr/versions/v3_1_1/__init__.py
Normal file
0
sahara/plugins/mapr/versions/v3_1_1/__init__.py
Normal file
24
sahara/plugins/mapr/versions/v3_1_1/cluster_configurer.py
Normal file
24
sahara/plugins/mapr/versions/v3_1_1/cluster_configurer.py
Normal file
@ -0,0 +1,24 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sahara.plugins.mapr.versions.base_cluster_configurer as bcc
|
||||
|
||||
|
||||
class ClusterConfigurer(bcc.BaseClusterConfigurer):
|
||||
|
||||
def get_hadoop_conf_dir(self):
|
||||
return '/opt/mapr/hadoop/hadoop-0.20.2/conf'
|
||||
|
||||
def is_node_awareness_enabled(self):
|
||||
return True
|
@ -0,0 +1,63 @@
|
||||
#
|
||||
# CLDB Config file.
|
||||
# Properties defined in this file are loaded during startup
|
||||
# and are valid for only CLDB which loaded the config.
|
||||
# These parameters are not persisted anywhere else.
|
||||
#
|
||||
# Wait until minimum number of fileserver register with
|
||||
# CLDB before creating Root Volume
|
||||
cldb.min.fileservers=1
|
||||
# CLDB listening port
|
||||
cldb.port=7222
|
||||
# Number of worker threads
|
||||
cldb.numthreads=10
|
||||
# CLDB webport
|
||||
cldb.web.port=7221
|
||||
# CLDB https port
|
||||
cldb.web.https.port=7443
|
||||
# Disable duplicate hostid detection
|
||||
cldb.detect.dup.hostid.enabled=false
|
||||
# Deprecated: This param is no longer supported. To configure
|
||||
# the container cache, use the param cldb.containers.cache.percent
|
||||
# Number of RW containers in cache
|
||||
#cldb.containers.cache.entries=1000000
|
||||
#
|
||||
# Percentage (integer) of Xmx setting to be used for container cache
|
||||
#cldb.containers.cache.percent=20
|
||||
#
|
||||
# Topology script to be used to determine
|
||||
# Rack topology of node
|
||||
# Script should take an IP address as input and print rack path
|
||||
# on STDOUT. eg
|
||||
# $>/home/mapr/topo.pl 10.10.10.10
|
||||
# $>/mapr-rack1
|
||||
# $>/home/mapr/topo.pl 10.10.10.20
|
||||
# $>/mapr-rack2
|
||||
#net.topology.script.file.name=/home/mapr/topo.pl
|
||||
#
|
||||
# Topology mapping file used to determine
|
||||
# Rack topology of node
|
||||
# File is of a 2 column format (space separated)
|
||||
# 1st column is an IP address or hostname
|
||||
# 2nd column is the rack path
|
||||
# Line starting with '#' is a comment
|
||||
# Example file contents
|
||||
# 10.10.10.10 /mapr-rack1
|
||||
# 10.10.10.20 /mapr-rack2
|
||||
# host.foo.com /mapr-rack3
|
||||
#net.topology.table.file.name=/home/mapr/topo.txt
|
||||
#
|
||||
# ZooKeeper address
|
||||
#cldb.zookeeper.servers=10.250.1.91:5181
|
||||
# Hadoop metrics jar version
|
||||
#hadoop.version=0.20.2
|
||||
# CLDB JMX remote port
|
||||
cldb.jmxremote.port=7220
|
||||
num.volmirror.threads=1
|
||||
# Set this to set the default topology for all volumes and nodes
|
||||
# The default for all volumes is /data by default
|
||||
# UNCOMMENT the below to change the default topology.
|
||||
# For e.g., set cldb.default.topology=/mydata to create volumes
|
||||
# in /mydata topology and to place all nodes in /mydata topology
|
||||
# by default
|
||||
#cldb.default.topology=/mydata
|
@ -0,0 +1,57 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<!--
|
||||
Replace 'maprfs' by 'hdfs' to use HDFS.
|
||||
Replace localhost by an ip address for namenode/cldb.
|
||||
-->
|
||||
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>fs.default.name</name>
|
||||
<value>maprfs:///</value>
|
||||
<description>The name of the default file system. A URI whose
|
||||
scheme and authority determine the FileSystem implementation. The
|
||||
uri's scheme determines the config property (fs.SCHEME.impl) naming
|
||||
the FileSystem implementation class. The uri's authority is used to
|
||||
determine the host, port, etc. for a filesystem.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.mapr.working.dir</name>
|
||||
<value>/user/$USERNAME/</value>
|
||||
<description>The default directory to be used with relative paths.
|
||||
Note that $USERNAME is NOT an enviromental variable, but just a placeholder
|
||||
to indicate that it will be expanded to the corresponding username.
|
||||
Other example default directories could be "/", "/home/$USERNAME", "/$USERNAME" etc.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3n.block.size</name>
|
||||
<value>33554432</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3n.blockSize</name>
|
||||
<value>33554432</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3.block.size</name>
|
||||
<value>33554432</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3.blockSize</name>
|
||||
<value>33554432</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.proxyuser.mapr.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.proxyuser.mapr.hosts</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,30 @@
|
||||
# Sample Exports file
|
||||
|
||||
# for /mapr exports
|
||||
# <Path> <exports_control>
|
||||
|
||||
#access_control -> order is specific to default
|
||||
# list the hosts before specifying a default for all
|
||||
# a.b.c.d,1.2.3.4(ro) d.e.f.g(ro) (rw)
|
||||
# enforces ro for a.b.c.d & 1.2.3.4 and everybody else is rw
|
||||
|
||||
# special path to export clusters in mapr-clusters.conf. To disable exporting,
|
||||
# comment it out. to restrict access use the exports_control
|
||||
#
|
||||
/mapr (rw)
|
||||
|
||||
#to export only certain clusters, comment out the /mapr & uncomment.
|
||||
# Note: this will cause /mapr to be unexported
|
||||
#/mapr/clustername (rw)
|
||||
|
||||
#to export /mapr only to certain hosts (using exports_control)
|
||||
#/mapr a.b.c.d(rw),e.f.g.h(ro)
|
||||
|
||||
# export /mapr/cluster1 rw to a.b.c.d & ro to e.f.g.h (denied for others)
|
||||
#/mapr/cluster1 a.b.c.d(rw),e.f.g.h(ro)
|
||||
|
||||
# export /mapr/cluster2 only to e.f.g.h (denied for others)
|
||||
#/mapr/cluster2 e.f.g.h(rw)
|
||||
|
||||
# export /mapr/cluster3 rw to e.f.g.h & ro to others
|
||||
#/mapr/cluster2 e.f.g.h(rw) (ro)
|
@ -0,0 +1,41 @@
|
||||
#CLDB metrics config - Pick one out of null,file or ganglia.
|
||||
#Uncomment all properties in null, file or ganglia context, to send cldb metrics to that context
|
||||
|
||||
# Configuration of the "cldb" context for null
|
||||
#cldb.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
|
||||
#cldb.period=10
|
||||
|
||||
# Configuration of the "cldb" context for file
|
||||
#cldb.class=org.apache.hadoop.metrics.file.FileContext
|
||||
#cldb.period=60
|
||||
#cldb.fileName=/tmp/cldbmetrics.log
|
||||
|
||||
# Configuration of the "cldb" context for ganglia
|
||||
cldb.class=com.mapr.fs.cldb.counters.MapRGangliaContext31
|
||||
cldb.period=10
|
||||
cldb.servers=localhost:8649
|
||||
cldb.spoof=1
|
||||
|
||||
#FileServer metrics config - Pick one out of null,file or ganglia.
|
||||
#Uncomment all properties in null, file or ganglia context, to send fileserver metrics to that context
|
||||
|
||||
# Configuration of the "fileserver" context for null
|
||||
#fileserver.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
|
||||
#fileserver.period=10
|
||||
|
||||
# Configuration of the "fileserver" context for file
|
||||
#fileserver.class=org.apache.hadoop.metrics.file.FileContext
|
||||
#fileserver.period=60
|
||||
#fileserver.fileName=/tmp/fsmetrics.log
|
||||
|
||||
# Configuration of the "fileserver" context for ganglia
|
||||
fileserver.class=com.mapr.fs.cldb.counters.MapRGangliaContext31
|
||||
fileserver.period=37
|
||||
fileserver.servers=localhost:8649
|
||||
fileserver.spoof=1
|
||||
|
||||
maprmepredvariant.class=com.mapr.job.mngmnt.hadoop.metrics.MaprRPCContext
|
||||
maprmepredvariant.period=10
|
||||
maprmapred.class=com.mapr.job.mngmnt.hadoop.metrics.MaprRPCContextFinal
|
||||
maprmapred.period=10
|
||||
|
@ -0,0 +1,16 @@
|
||||
#mfs.num.compress.threads=1
|
||||
#mfs.max.aio.events=5000
|
||||
#mfs.disable.periodic.flush=0
|
||||
#mfs.io.disk.timeout=60
|
||||
#mfs.server.ip=127.0.0.1
|
||||
#mfs.max.resync.count=16
|
||||
#mfs.max.restore.count=16
|
||||
#mfs.ignore.container.delete=0
|
||||
#mfs.ignore.readdir.pattern=0
|
||||
mfs.server.port=5660
|
||||
#mfs.subnets.whitelist=127.0.0.1/8
|
||||
#UNCOMMENT this line to disable bulk writes
|
||||
#mfs.bulk.writes.enabled=0
|
||||
#UNCOMMENT this to set the topology of this node
|
||||
#For e.g., to set this node's topology to /compute-only uncomment the below line
|
||||
#mfs.network.location=/compute-only
|
@ -0,0 +1,43 @@
|
||||
# Configuration for nfsserver
|
||||
|
||||
#
|
||||
# The system defaults are in the comments
|
||||
#
|
||||
|
||||
# Default compression is true
|
||||
#Compression = true
|
||||
|
||||
# chunksize is 64M
|
||||
#ChunkSize = 67108864
|
||||
|
||||
# Number of threads for compression/decompression: default=2
|
||||
#CompThreads = 2
|
||||
|
||||
#Mount point for the ramfs file for mmap
|
||||
#RamfsMntDir = /ramfs/mapr
|
||||
|
||||
# Size of the ramfile to use (percent of total physical memory) default=0.25
|
||||
# 0: disables the use of ramfs
|
||||
#RamfsSize = 0.25
|
||||
|
||||
# Loglevel = DEBUG | INFO | WARN | ERROR | CRITICAL | OFF
|
||||
#Loglevel = INFO
|
||||
|
||||
#Duplicate Request cache size & timeout in seconds
|
||||
#DrCacheSize = 20480
|
||||
#DrCacheTimeout = 62
|
||||
# To keep the drcache lean, we only cache the response if the
|
||||
# time we took to populate is greater than 50% of DrCacheTimeout.
|
||||
# Set it to 0 to disable this optimization, Note that the DrCacheSize or
|
||||
# DrCacheTimeout will also need to be changed. Ex: if the nfsserver supports
|
||||
# 10,000 ops/sec (modification ops): then DrCacheSize will need to change
|
||||
# to: 10,000*DrCacheTimeout = 620,000
|
||||
#DRCacheTimeOutOpt = 0.5
|
||||
|
||||
#NFS fileid, by default the fileid is of 32 bit size.
|
||||
#Set Use32BitFileId=0 to use 64 bit fileid (inode number)
|
||||
#Use32BitFileId=0
|
||||
|
||||
#Auto refresh exports time interval in mins.
|
||||
#default is 0, means there is no auto refresh.
|
||||
#AutoRefreshExportsTimeInterval = 5
|
203
sahara/plugins/mapr/versions/v3_1_1/resources/plugin_spec.json
Normal file
203
sahara/plugins/mapr/versions/v3_1_1/resources/plugin_spec.json
Normal file
@ -0,0 +1,203 @@
|
||||
{
|
||||
"files": [
|
||||
{
|
||||
"remote": null,
|
||||
"type": null,
|
||||
"configs": {
|
||||
"required": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "Enable MapR-DB",
|
||||
"config_type": "bool",
|
||||
"default_value": false,
|
||||
"priority": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/cldb.conf",
|
||||
"local": "default/cldb.conf",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/hadoop-metrics.properties",
|
||||
"local": "default/hadoop-metrics.properties",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/mfs.conf",
|
||||
"local": "default/mfs.conf",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/nfsserver.conf",
|
||||
"local": "default/nfsserver.conf",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/exports",
|
||||
"local": "default/exports",
|
||||
"type": "raw"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/hadoop/hadoop-0.20.2/conf/core-site.xml",
|
||||
"local": "default/core-site.xml",
|
||||
"type": "xml",
|
||||
"configs": {
|
||||
"optional": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "fs.swift.impl",
|
||||
"default_value": "org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem"
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.connect.timeout",
|
||||
"config_type": "int",
|
||||
"default_value": 15000
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.socket.timeout",
|
||||
"config_type": "int",
|
||||
"default_value": 60000
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.connect.retry.count",
|
||||
"config_type": "int",
|
||||
"default_value": 3
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.connect.throttle.delay",
|
||||
"config_type": "int",
|
||||
"default_value": 0
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.blocksize",
|
||||
"config_type": "int",
|
||||
"default_value": 32768
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.partsize",
|
||||
"config_type": "int",
|
||||
"default_value": 4718592
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.requestsize",
|
||||
"config_type": "int",
|
||||
"default_value": 64
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.service.sahara.public",
|
||||
"config_type": "bool",
|
||||
"default_value": true
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.service.sahara.http.port",
|
||||
"config_type": "int",
|
||||
"default_value": 8080
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.service.sahara.https.port",
|
||||
"config_type": "int",
|
||||
"default_value": 443
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.service.sahara.auth.endpoint.prefix",
|
||||
"default_value": "/endpoints/AUTH_"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/hadoop/hadoop-0.20.2/conf/mapred-site.xml",
|
||||
"type": "xml"
|
||||
}
|
||||
],
|
||||
"services": [
|
||||
{
|
||||
"name": "general",
|
||||
"files": [
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Management",
|
||||
"node_processes": [
|
||||
"ZooKeeper",
|
||||
"Webserver",
|
||||
"MapR-Client",
|
||||
"Metrics"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "MapReduce",
|
||||
"node_processes": [
|
||||
"TaskTracker",
|
||||
"JobTracker"
|
||||
],
|
||||
"files": [
|
||||
"/opt/mapr/hadoop/hadoop-0.20.2/conf/mapred-site.xml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "MapR FS",
|
||||
"node_processes": [
|
||||
"CLDB",
|
||||
"FileServer",
|
||||
"NFS"
|
||||
],
|
||||
"files": [
|
||||
"/opt/mapr/conf/cldb.conf",
|
||||
"/opt/mapr/conf/hadoop-metrics.properties",
|
||||
"/opt/mapr/conf/mfs.conf",
|
||||
"/opt/mapr/conf/nfsserver.conf",
|
||||
"/opt/mapr/conf/exports",
|
||||
"/opt/mapr/hadoop/hadoop-0.20.2/conf/core-site.xml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "HBase",
|
||||
"node_processes": [
|
||||
"HBase-Master",
|
||||
"HBase-RegionServer",
|
||||
"HBase-Client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Hive",
|
||||
"node_processes": [
|
||||
"HiveMetastore",
|
||||
"HiveServer2"
|
||||
],
|
||||
"versions": [
|
||||
"0.13",
|
||||
"0.12"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Oozie",
|
||||
"node_processes": [
|
||||
"Oozie"
|
||||
],
|
||||
"versions": [
|
||||
"4.0.1",
|
||||
"4.0.0",
|
||||
"3.3.2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Pig",
|
||||
"node_processes": [
|
||||
"Pig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Mahout",
|
||||
"node_processes": [
|
||||
"Mahout"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
34
sahara/plugins/mapr/versions/v3_1_1/start_utils.py
Normal file
34
sahara/plugins/mapr/versions/v3_1_1/start_utils.py
Normal file
@ -0,0 +1,34 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sahara.plugins.mapr.util import cluster_helper as clh_utils
|
||||
from sahara.plugins.mapr.util import start_helper as start_helper
|
||||
import sahara.plugins.utils as utils
|
||||
|
||||
|
||||
def exec_configure_sh_on_cluster(cluster):
|
||||
inst_list = utils.get_instances(cluster)
|
||||
for n in inst_list:
|
||||
exec_configure_sh_on_instance(cluster, n)
|
||||
|
||||
|
||||
def exec_configure_sh_on_instance(cluster, instance):
|
||||
script_string = ('/opt/mapr/server/configure.sh'
|
||||
+ ' -C ' + clh_utils.get_cldb_nodes_ip(cluster)
|
||||
+ ' -Z ' + clh_utils.get_zookeeper_nodes_ip(cluster)
|
||||
+ ' -f')
|
||||
if not start_helper.check_if_mapr_user_exist(instance):
|
||||
script_string = script_string + ' --create-user'
|
||||
|
||||
instance.remote().execute_command(script_string, True)
|
112
sahara/plugins/mapr/versions/v3_1_1/version_handler.py
Normal file
112
sahara/plugins/mapr/versions/v3_1_1/version_handler.py
Normal file
@ -0,0 +1,112 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sahara import context
|
||||
from sahara.plugins.mapr.util import cluster_helper as clh_utils
|
||||
import sahara.plugins.mapr.util.config_utils as cu
|
||||
import sahara.plugins.mapr.util.names as n
|
||||
from sahara.plugins.mapr.util import scaling
|
||||
from sahara.plugins.mapr.util import start_helper as start_helper
|
||||
import sahara.plugins.mapr.util.validation_utils as vu
|
||||
import sahara.plugins.mapr.versions.base_context as bc
|
||||
from sahara.plugins.mapr.versions import base_version_handler as bvh
|
||||
import sahara.plugins.mapr.versions.v3_1_1.cluster_configurer as cc
|
||||
import sahara.plugins.utils as u
|
||||
|
||||
|
||||
version = '3.1.1'
|
||||
SIXTY_SECONDS = 60
|
||||
|
||||
|
||||
class VersionHandler(bvh.BaseVersionHandler):
|
||||
|
||||
def get_plugin_version(self):
|
||||
return version
|
||||
|
||||
def start_cluster(self, cluster):
|
||||
start_helper.exec_configure_sh_on_cluster(
|
||||
cluster, self.get_configure_sh_string(cluster))
|
||||
start_helper.wait_for_mfs_unlock(cluster, self.get_waiting_script())
|
||||
start_helper.setup_maprfs_on_cluster(
|
||||
cluster, self.get_disk_setup_script())
|
||||
start_helper.start_zookeeper_nodes_on_cluster(cluster)
|
||||
start_helper.start_warden_on_cldb_nodes(cluster)
|
||||
context.sleep(SIXTY_SECONDS)
|
||||
start_helper.start_warden_on_other_nodes(cluster)
|
||||
start_helper.start_ecosystem(self.get_context(cluster))
|
||||
|
||||
def get_waiting_script(self):
|
||||
return 'plugins/mapr/util/resources/waiting_script.sh'
|
||||
|
||||
def get_configure_sh_string(self, cluster):
|
||||
return ('/opt/mapr/server/configure.sh'
|
||||
+ ' -C ' + clh_utils.get_cldb_nodes_ip(cluster)
|
||||
+ ' -Z ' + clh_utils.get_zookeeper_nodes_ip(cluster) + ' -f')
|
||||
|
||||
def scale_cluster(self, cluster, instances):
|
||||
scaling.scale_cluster(cluster, instances, self.get_disk_setup_script(),
|
||||
self.get_waiting_script(),
|
||||
self.get_context(cluster),
|
||||
self.get_configure_sh_string(cluster), True)
|
||||
|
||||
def decommission_nodes(self, cluster, instances):
|
||||
scaling.decommission_nodes(
|
||||
cluster, instances, self.get_configure_sh_string(cluster))
|
||||
|
||||
def get_cluster_validation_rules(self, cluster):
|
||||
return [vu.not_less_than_count_component_vr(n.ZOOKEEPER, 1),
|
||||
vu.not_less_than_count_component_vr(n.CLDB, 1),
|
||||
vu.not_less_than_count_component_vr(n.TASK_TRACKER, 1),
|
||||
vu.not_less_than_count_component_vr(n.FILE_SERVER, 1),
|
||||
vu.not_more_than_count_component_vr(n.OOZIE, 1),
|
||||
vu.not_less_than_count_component_vr(n.JOBTRACKER, 1),
|
||||
vu.node_dependency_satisfied_vr(n.TASK_TRACKER, n.FILE_SERVER),
|
||||
vu.node_dependency_satisfied_vr(n.CLDB, n.FILE_SERVER)]
|
||||
|
||||
def get_scaling_validation_rules(self):
|
||||
return []
|
||||
|
||||
def get_edp_validation_rules(self):
|
||||
return []
|
||||
|
||||
def get_cluster_configurer(self, cluster, plugin_spec):
|
||||
return cc.ClusterConfigurer(cluster, plugin_spec)
|
||||
|
||||
def get_context(self, cluster):
|
||||
return Context(cluster)
|
||||
|
||||
|
||||
class Context(bc.BaseContext):
|
||||
m7_enabled_config = n.IS_M7_ENABLED
|
||||
hive_version_config = 'Hive Version'
|
||||
oozie_version_config = 'Oozie Version'
|
||||
|
||||
def __init__(self, cluster):
|
||||
self.cluster = cluster
|
||||
|
||||
def get_cluster(self):
|
||||
return self.cluster
|
||||
|
||||
def is_m7_enabled(self):
|
||||
configs = cu.get_cluster_configs(self.get_cluster())
|
||||
return configs[n.GENERAL][Context.m7_enabled_config]
|
||||
|
||||
def get_hadoop_version(self):
|
||||
return '0.20.2'
|
||||
|
||||
def get_rm_instance(self):
|
||||
return u.get_instance(self.get_cluster(), n.JOBTRACKER)
|
||||
|
||||
def get_rm_port(self):
|
||||
return '9001'
|
@ -0,0 +1,51 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sahara.i18n import _LI
|
||||
from sahara.openstack.common import log as logging
|
||||
import sahara.plugins.mapr.versions.base_cluster_configurer as bcc
|
||||
import sahara.plugins.utils as u
|
||||
from sahara.utils import files as f
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ClusterConfigurer(bcc.BaseClusterConfigurer):
|
||||
hadoop_version_path = '/opt/mapr/conf/hadoop_version'
|
||||
hadoop_mode = 'classic'
|
||||
hadoop_version_local = 'plugins/mapr/util/resources/hadoop_version'
|
||||
|
||||
def get_hadoop_conf_dir(self):
|
||||
return '/opt/mapr/hadoop/hadoop-0.20.2/conf'
|
||||
|
||||
def is_node_awareness_enabled(self):
|
||||
return True
|
||||
|
||||
def set_cluster_mode(self, instances):
|
||||
if not instances:
|
||||
instances = u.get_instances(self.cluster)
|
||||
LOG.info(_LI('Setting cluster mode to classic'))
|
||||
hv_template = f.get_file_text(self.hadoop_version_local)
|
||||
hv = hv_template % {"mode": self.hadoop_mode}
|
||||
for i in instances:
|
||||
with i.remote() as r:
|
||||
LOG.debug('Writing file %(f_name)s to node %(node)s',
|
||||
{'f_name': self.hadoop_version_path,
|
||||
'node': i.management_ip})
|
||||
r.write_file_to(self.hadoop_version_path, hv,
|
||||
run_as_root=True)
|
||||
|
||||
def configure_instances(self, instances=None):
|
||||
super(ClusterConfigurer, self).configure_instances(instances)
|
||||
self.set_cluster_mode(instances)
|
@ -0,0 +1,63 @@
|
||||
#
|
||||
# CLDB Config file.
|
||||
# Properties defined in this file are loaded during startup
|
||||
# and are valid for only CLDB which loaded the config.
|
||||
# These parameters are not persisted anywhere else.
|
||||
#
|
||||
# Wait until minimum number of fileserver register with
|
||||
# CLDB before creating Root Volume
|
||||
cldb.min.fileservers=1
|
||||
# CLDB listening port
|
||||
cldb.port=7222
|
||||
# Number of worker threads
|
||||
cldb.numthreads=10
|
||||
# CLDB webport
|
||||
cldb.web.port=7221
|
||||
# CLDB https port
|
||||
cldb.web.https.port=7443
|
||||
# Disable duplicate hostid detection
|
||||
cldb.detect.dup.hostid.enabled=false
|
||||
# Deprecated: This param is no longer supported. To configure
|
||||
# the container cache, use the param cldb.containers.cache.percent
|
||||
# Number of RW containers in cache
|
||||
#cldb.containers.cache.entries=1000000
|
||||
#
|
||||
# Percentage (integer) of Xmx setting to be used for container cache
|
||||
#cldb.containers.cache.percent=20
|
||||
#
|
||||
# Topology script to be used to determine
|
||||
# Rack topology of node
|
||||
# Script should take an IP address as input and print rack path
|
||||
# on STDOUT. eg
|
||||
# $>/home/mapr/topo.pl 10.10.10.10
|
||||
# $>/mapr-rack1
|
||||
# $>/home/mapr/topo.pl 10.10.10.20
|
||||
# $>/mapr-rack2
|
||||
#net.topology.script.file.name=/home/mapr/topo.pl
|
||||
#
|
||||
# Topology mapping file used to determine
|
||||
# Rack topology of node
|
||||
# File is of a 2 column format (space separated)
|
||||
# 1st column is an IP address or hostname
|
||||
# 2nd column is the rack path
|
||||
# Line starting with '#' is a comment
|
||||
# Example file contents
|
||||
# 10.10.10.10 /mapr-rack1
|
||||
# 10.10.10.20 /mapr-rack2
|
||||
# host.foo.com /mapr-rack3
|
||||
#net.topology.table.file.name=/home/mapr/topo.txt
|
||||
#
|
||||
# ZooKeeper address
|
||||
#cldb.zookeeper.servers=10.250.1.91:5181
|
||||
# Hadoop metrics jar version
|
||||
#hadoop.version=0.20.2
|
||||
# CLDB JMX remote port
|
||||
cldb.jmxremote.port=7220
|
||||
num.volmirror.threads=1
|
||||
# Set this to set the default topology for all volumes and nodes
|
||||
# The default for all volumes is /data by default
|
||||
# UNCOMMENT the below to change the default topology.
|
||||
# For e.g., set cldb.default.topology=/mydata to create volumes
|
||||
# in /mydata topology and to place all nodes in /mydata topology
|
||||
# by default
|
||||
#cldb.default.topology=/mydata
|
@ -0,0 +1,57 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<!--
|
||||
Replace 'maprfs' by 'hdfs' to use HDFS.
|
||||
Replace localhost by an ip address for namenode/cldb.
|
||||
-->
|
||||
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>fs.default.name</name>
|
||||
<value>maprfs:///</value>
|
||||
<description>The name of the default file system. A URI whose
|
||||
scheme and authority determine the FileSystem implementation. The
|
||||
uri's scheme determines the config property (fs.SCHEME.impl) naming
|
||||
the FileSystem implementation class. The uri's authority is used to
|
||||
determine the host, port, etc. for a filesystem.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.mapr.working.dir</name>
|
||||
<value>/user/$USERNAME/</value>
|
||||
<description>The default directory to be used with relative paths.
|
||||
Note that $USERNAME is NOT an enviromental variable, but just a placeholder
|
||||
to indicate that it will be expanded to the corresponding username.
|
||||
Other example default directories could be "/", "/home/$USERNAME", "/$USERNAME" etc.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3n.block.size</name>
|
||||
<value>33554432</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3n.blockSize</name>
|
||||
<value>33554432</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3.block.size</name>
|
||||
<value>33554432</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3.blockSize</name>
|
||||
<value>33554432</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.proxyuser.mapr.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.proxyuser.mapr.hosts</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,30 @@
|
||||
# Sample Exports file
|
||||
|
||||
# for /mapr exports
|
||||
# <Path> <exports_control>
|
||||
|
||||
#access_control -> order is specific to default
|
||||
# list the hosts before specifying a default for all
|
||||
# a.b.c.d,1.2.3.4(ro) d.e.f.g(ro) (rw)
|
||||
# enforces ro for a.b.c.d & 1.2.3.4 and everybody else is rw
|
||||
|
||||
# special path to export clusters in mapr-clusters.conf. To disable exporting,
|
||||
# comment it out. to restrict access use the exports_control
|
||||
#
|
||||
/mapr (rw)
|
||||
|
||||
#to export only certain clusters, comment out the /mapr & uncomment.
|
||||
# Note: this will cause /mapr to be unexported
|
||||
#/mapr/clustername (rw)
|
||||
|
||||
#to export /mapr only to certain hosts (using exports_control)
|
||||
#/mapr a.b.c.d(rw),e.f.g.h(ro)
|
||||
|
||||
# export /mapr/cluster1 rw to a.b.c.d & ro to e.f.g.h (denied for others)
|
||||
#/mapr/cluster1 a.b.c.d(rw),e.f.g.h(ro)
|
||||
|
||||
# export /mapr/cluster2 only to e.f.g.h (denied for others)
|
||||
#/mapr/cluster2 e.f.g.h(rw)
|
||||
|
||||
# export /mapr/cluster3 rw to e.f.g.h & ro to others
|
||||
#/mapr/cluster2 e.f.g.h(rw) (ro)
|
@ -0,0 +1,41 @@
|
||||
#CLDB metrics config - Pick one out of null,file or ganglia.
|
||||
#Uncomment all properties in null, file or ganglia context, to send cldb metrics to that context
|
||||
|
||||
# Configuration of the "cldb" context for null
|
||||
#cldb.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
|
||||
#cldb.period=10
|
||||
|
||||
# Configuration of the "cldb" context for file
|
||||
#cldb.class=org.apache.hadoop.metrics.file.FileContext
|
||||
#cldb.period=60
|
||||
#cldb.fileName=/tmp/cldbmetrics.log
|
||||
|
||||
# Configuration of the "cldb" context for ganglia
|
||||
cldb.class=com.mapr.fs.cldb.counters.MapRGangliaContext31
|
||||
cldb.period=10
|
||||
cldb.servers=localhost:8649
|
||||
cldb.spoof=1
|
||||
|
||||
#FileServer metrics config - Pick one out of null,file or ganglia.
|
||||
#Uncomment all properties in null, file or ganglia context, to send fileserver metrics to that context
|
||||
|
||||
# Configuration of the "fileserver" context for null
|
||||
#fileserver.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
|
||||
#fileserver.period=10
|
||||
|
||||
# Configuration of the "fileserver" context for file
|
||||
#fileserver.class=org.apache.hadoop.metrics.file.FileContext
|
||||
#fileserver.period=60
|
||||
#fileserver.fileName=/tmp/fsmetrics.log
|
||||
|
||||
# Configuration of the "fileserver" context for ganglia
|
||||
fileserver.class=com.mapr.fs.cldb.counters.MapRGangliaContext31
|
||||
fileserver.period=37
|
||||
fileserver.servers=localhost:8649
|
||||
fileserver.spoof=1
|
||||
|
||||
maprmepredvariant.class=com.mapr.job.mngmnt.hadoop.metrics.MaprRPCContext
|
||||
maprmepredvariant.period=10
|
||||
maprmapred.class=com.mapr.job.mngmnt.hadoop.metrics.MaprRPCContextFinal
|
||||
maprmapred.period=10
|
||||
|
@ -0,0 +1,16 @@
|
||||
#mfs.num.compress.threads=1
|
||||
#mfs.max.aio.events=5000
|
||||
#mfs.disable.periodic.flush=0
|
||||
#mfs.io.disk.timeout=60
|
||||
#mfs.server.ip=127.0.0.1
|
||||
#mfs.max.resync.count=16
|
||||
#mfs.max.restore.count=16
|
||||
#mfs.ignore.container.delete=0
|
||||
#mfs.ignore.readdir.pattern=0
|
||||
mfs.server.port=5660
|
||||
#mfs.subnets.whitelist=127.0.0.1/8
|
||||
#UNCOMMENT this line to disable bulk writes
|
||||
#mfs.bulk.writes.enabled=0
|
||||
#UNCOMMENT this to set the topology of this node
|
||||
#For e.g., to set this node's topology to /compute-only uncomment the below line
|
||||
#mfs.network.location=/compute-only
|
@ -0,0 +1,43 @@
|
||||
# Configuration for nfsserver
|
||||
|
||||
#
|
||||
# The system defaults are in the comments
|
||||
#
|
||||
|
||||
# Default compression is true
|
||||
#Compression = true
|
||||
|
||||
# chunksize is 64M
|
||||
#ChunkSize = 67108864
|
||||
|
||||
# Number of threads for compression/decompression: default=2
|
||||
#CompThreads = 2
|
||||
|
||||
#Mount point for the ramfs file for mmap
|
||||
#RamfsMntDir = /ramfs/mapr
|
||||
|
||||
# Size of the ramfile to use (percent of total physical memory) default=0.25
|
||||
# 0: disables the use of ramfs
|
||||
#RamfsSize = 0.25
|
||||
|
||||
# Loglevel = DEBUG | INFO | WARN | ERROR | CRITICAL | OFF
|
||||
#Loglevel = INFO
|
||||
|
||||
#Duplicate Request cache size & timeout in seconds
|
||||
#DrCacheSize = 20480
|
||||
#DrCacheTimeout = 62
|
||||
# To keep the drcache lean, we only cache the response if the
|
||||
# time we took to populate is greater than 50% of DrCacheTimeout.
|
||||
# Set it to 0 to disable this optimization, Note that the DrCacheSize or
|
||||
# DrCacheTimeout will also need to be changed. Ex: if the nfsserver supports
|
||||
# 10,000 ops/sec (modification ops): then DrCacheSize will need to change
|
||||
# to: 10,000*DrCacheTimeout = 620,000
|
||||
#DRCacheTimeOutOpt = 0.5
|
||||
|
||||
#NFS fileid, by default the fileid is of 32 bit size.
|
||||
#Set Use32BitFileId=0 to use 64 bit fileid (inode number)
|
||||
#Use32BitFileId=0
|
||||
|
||||
#Auto refresh exports time interval in mins.
|
||||
#default is 0, means there is no auto refresh.
|
||||
#AutoRefreshExportsTimeInterval = 5
|
@ -0,0 +1,203 @@
|
||||
{
|
||||
"files": [
|
||||
{
|
||||
"remote": null,
|
||||
"type": null,
|
||||
"configs": {
|
||||
"required": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "Enable MapR-DB",
|
||||
"config_type": "bool",
|
||||
"default_value": false,
|
||||
"priority": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/hadoop/hadoop-0.20.2/conf/core-site.xml",
|
||||
"local": "default/core-site.xml",
|
||||
"type": "xml",
|
||||
"configs": {
|
||||
"optional": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "fs.swift.impl",
|
||||
"default_value": "org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem"
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.connect.timeout",
|
||||
"config_type": "int",
|
||||
"default_value": 15000
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.socket.timeout",
|
||||
"config_type": "int",
|
||||
"default_value": 60000
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.connect.retry.count",
|
||||
"config_type": "int",
|
||||
"default_value": 3
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.connect.throttle.delay",
|
||||
"config_type": "int",
|
||||
"default_value": 0
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.blocksize",
|
||||
"config_type": "int",
|
||||
"default_value": 32768
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.partsize",
|
||||
"config_type": "int",
|
||||
"default_value": 4718592
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.requestsize",
|
||||
"config_type": "int",
|
||||
"default_value": 64
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.service.sahara.public",
|
||||
"config_type": "bool",
|
||||
"default_value": true
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.service.sahara.http.port",
|
||||
"config_type": "int",
|
||||
"default_value": 8080
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.service.sahara.https.port",
|
||||
"config_type": "int",
|
||||
"default_value": 443
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.service.sahara.auth.endpoint.prefix",
|
||||
"default_value": "/endpoints/AUTH_"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/hadoop/hadoop-0.20.2/conf/mapred-site.xml",
|
||||
"type": "xml"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/cldb.conf",
|
||||
"local": "default/cldb.conf",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/hadoop-metrics.properties",
|
||||
"local": "default/hadoop-metrics.properties",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/mfs.conf",
|
||||
"local": "default/mfs.conf",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/nfsserver.conf",
|
||||
"local": "default/nfsserver.conf",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/exports",
|
||||
"local": "default/exports",
|
||||
"type": "raw"
|
||||
}
|
||||
],
|
||||
"services": [
|
||||
{
|
||||
"name": "general",
|
||||
"files": [
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Management",
|
||||
"node_processes": [
|
||||
"ZooKeeper",
|
||||
"Webserver",
|
||||
"MapR-Client",
|
||||
"Metrics"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "MapReduce",
|
||||
"node_processes": [
|
||||
"TaskTracker",
|
||||
"JobTracker"
|
||||
],
|
||||
"files": [
|
||||
"/opt/mapr/hadoop/hadoop-0.20.2/conf/mapred-site.xml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "MapR FS",
|
||||
"node_processes": [
|
||||
"CLDB",
|
||||
"FileServer",
|
||||
"NFS"
|
||||
],
|
||||
"files": [
|
||||
"/opt/mapr/conf/cldb.conf",
|
||||
"/opt/mapr/conf/hadoop-metrics.properties",
|
||||
"/opt/mapr/conf/mfs.conf",
|
||||
"/opt/mapr/conf/nfsserver.conf",
|
||||
"/opt/mapr/conf/exports",
|
||||
"/opt/mapr/hadoop/hadoop-0.20.2/conf/core-site.xml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "HBase",
|
||||
"node_processes": [
|
||||
"HBase-Master",
|
||||
"HBase-RegionServer",
|
||||
"HBase-Client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Hive",
|
||||
"node_processes": [
|
||||
"HiveMetastore",
|
||||
"HiveServer2"
|
||||
],
|
||||
"versions": [
|
||||
"0.13",
|
||||
"0.12"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Oozie",
|
||||
"node_processes": [
|
||||
"Oozie"
|
||||
],
|
||||
"versions": [
|
||||
"4.0.1",
|
||||
"4.0.0",
|
||||
"3.3.2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Pig",
|
||||
"node_processes": [
|
||||
"Pig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Mahout",
|
||||
"node_processes": [
|
||||
"Mahout"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
114
sahara/plugins/mapr/versions/v4_0_1_mrv1/version_handler.py
Normal file
114
sahara/plugins/mapr/versions/v4_0_1_mrv1/version_handler.py
Normal file
@ -0,0 +1,114 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sahara import context
|
||||
from sahara.plugins.mapr.util import cluster_helper as clh_utils
|
||||
import sahara.plugins.mapr.util.config_utils as cu
|
||||
import sahara.plugins.mapr.util.names as n
|
||||
from sahara.plugins.mapr.util import scaling
|
||||
from sahara.plugins.mapr.util import start_helper as start_helper
|
||||
import sahara.plugins.mapr.util.validation_utils as vu
|
||||
import sahara.plugins.mapr.versions.base_context as bc
|
||||
from sahara.plugins.mapr.versions import base_version_handler as bvh
|
||||
import sahara.plugins.mapr.versions.v4_0_1_mrv1.cluster_configurer as cc
|
||||
import sahara.plugins.utils as u
|
||||
|
||||
|
||||
version = '4.0.1.mrv1'
|
||||
SIXTY_SECONDS = 60
|
||||
|
||||
|
||||
class VersionHandler(bvh.BaseVersionHandler):
|
||||
|
||||
def get_plugin_version(self):
|
||||
return version
|
||||
|
||||
def start_cluster(self, cluster):
|
||||
start_helper.exec_configure_sh_on_cluster(
|
||||
cluster, self.get_configure_sh_string(cluster))
|
||||
start_helper.wait_for_mfs_unlock(cluster, self.get_waiting_script())
|
||||
start_helper.setup_maprfs_on_cluster(
|
||||
cluster, self.get_disk_setup_script())
|
||||
start_helper.start_zookeeper_nodes_on_cluster(cluster)
|
||||
start_helper.start_warden_on_cldb_nodes(cluster)
|
||||
context.sleep(SIXTY_SECONDS)
|
||||
start_helper.start_warden_on_other_nodes(cluster)
|
||||
start_helper.start_ecosystem(self.get_context(cluster))
|
||||
|
||||
def get_waiting_script(self):
|
||||
return 'plugins/mapr/util/resources/waiting_script.sh'
|
||||
|
||||
def scale_cluster(self, cluster, instances):
|
||||
scaling.scale_cluster(cluster, instances, self.get_disk_setup_script(),
|
||||
self.get_waiting_script(),
|
||||
self.get_context(cluster),
|
||||
self.get_configure_sh_string(cluster), True)
|
||||
|
||||
def decommission_nodes(self, cluster, instances):
|
||||
scaling.decommission_nodes(
|
||||
cluster, instances, self.get_configure_sh_string(cluster))
|
||||
|
||||
def get_cluster_configurer(self, cluster, plugin_spec):
|
||||
return cc.ClusterConfigurer(cluster, plugin_spec)
|
||||
|
||||
def get_cluster_validation_rules(self, cluster):
|
||||
return [vu.not_less_than_count_component_vr(n.ZOOKEEPER, 1),
|
||||
vu.not_less_than_count_component_vr(n.CLDB, 1),
|
||||
vu.not_less_than_count_component_vr(n.TASK_TRACKER, 1),
|
||||
vu.not_less_than_count_component_vr(n.FILE_SERVER, 1),
|
||||
vu.not_more_than_count_component_vr(n.OOZIE, 1),
|
||||
vu.not_more_than_count_component_vr(n.WEB_SERVER, 1),
|
||||
vu.equal_count_component_vr(n.JOBTRACKER, 1),
|
||||
vu.node_dependency_satisfied_vr(n.TASK_TRACKER, n.FILE_SERVER),
|
||||
vu.node_dependency_satisfied_vr(n.CLDB, n.FILE_SERVER)]
|
||||
|
||||
def get_scaling_validation_rules(self):
|
||||
return []
|
||||
|
||||
def get_edp_validation_rules(self):
|
||||
return []
|
||||
|
||||
def get_configure_sh_string(self, cluster):
|
||||
return ('/opt/mapr/server/configure.sh'
|
||||
' -C ' + clh_utils.get_cldb_nodes_ip(cluster)
|
||||
+ ' -Z ' + clh_utils.get_zookeeper_nodes_ip(cluster)
|
||||
+ ' -f')
|
||||
|
||||
def get_context(self, cluster):
|
||||
return Context(cluster)
|
||||
|
||||
|
||||
class Context(bc.BaseContext):
|
||||
m7_enabled_config = n.IS_M7_ENABLED
|
||||
hive_version_config = 'Hive Version'
|
||||
oozie_version_config = 'Oozie Version'
|
||||
|
||||
def __init__(self, cluster):
|
||||
self.cluster = cluster
|
||||
|
||||
def get_cluster(self):
|
||||
return self.cluster
|
||||
|
||||
def is_m7_enabled(self):
|
||||
configs = cu.get_cluster_configs(self.get_cluster())
|
||||
return configs[n.GENERAL][Context.m7_enabled_config]
|
||||
|
||||
def get_hadoop_version(self):
|
||||
return '0.20.2'
|
||||
|
||||
def get_rm_instance(self):
|
||||
return u.get_instance(self.get_cluster(), n.JOBTRACKER)
|
||||
|
||||
def get_rm_port(self):
|
||||
return '9001'
|
@ -0,0 +1,24 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sahara.plugins.mapr.versions.base_cluster_configurer as bcc
|
||||
|
||||
|
||||
class ClusterConfigurer(bcc.BaseClusterConfigurer):
|
||||
|
||||
def get_hadoop_conf_dir(self):
|
||||
return '/opt/mapr/hadoop/hadoop-2.4.1/etc/hadoop'
|
||||
|
||||
def is_node_awareness_enabled(self):
|
||||
return False
|
@ -0,0 +1,63 @@
|
||||
#
|
||||
# CLDB Config file.
|
||||
# Properties defined in this file are loaded during startup
|
||||
# and are valid for only CLDB which loaded the config.
|
||||
# These parameters are not persisted anywhere else.
|
||||
#
|
||||
# Wait until minimum number of fileserver register with
|
||||
# CLDB before creating Root Volume
|
||||
cldb.min.fileservers=1
|
||||
# CLDB listening port
|
||||
cldb.port=7222
|
||||
# Number of worker threads
|
||||
cldb.numthreads=10
|
||||
# CLDB webport
|
||||
cldb.web.port=7221
|
||||
# CLDB https port
|
||||
cldb.web.https.port=7443
|
||||
# Disable duplicate hostid detection
|
||||
cldb.detect.dup.hostid.enabled=false
|
||||
# Deprecated: This param is no longer supported. To configure
|
||||
# the container cache, use the param cldb.containers.cache.percent
|
||||
# Number of RW containers in cache
|
||||
#cldb.containers.cache.entries=1000000
|
||||
#
|
||||
# Percentage (integer) of Xmx setting to be used for container cache
|
||||
#cldb.containers.cache.percent=20
|
||||
#
|
||||
# Topology script to be used to determine
|
||||
# Rack topology of node
|
||||
# Script should take an IP address as input and print rack path
|
||||
# on STDOUT. eg
|
||||
# $>/home/mapr/topo.pl 10.10.10.10
|
||||
# $>/mapr-rack1
|
||||
# $>/home/mapr/topo.pl 10.10.10.20
|
||||
# $>/mapr-rack2
|
||||
#net.topology.script.file.name=/home/mapr/topo.pl
|
||||
#
|
||||
# Topology mapping file used to determine
|
||||
# Rack topology of node
|
||||
# File is of a 2 column format (space separated)
|
||||
# 1st column is an IP address or hostname
|
||||
# 2nd column is the rack path
|
||||
# Line starting with '#' is a comment
|
||||
# Example file contents
|
||||
# 10.10.10.10 /mapr-rack1
|
||||
# 10.10.10.20 /mapr-rack2
|
||||
# host.foo.com /mapr-rack3
|
||||
#net.topology.table.file.name=/home/mapr/topo.txt
|
||||
#
|
||||
# ZooKeeper address
|
||||
#cldb.zookeeper.servers=10.250.1.91:5181
|
||||
# Hadoop metrics jar version
|
||||
#hadoop.version=0.20.2
|
||||
# CLDB JMX remote port
|
||||
cldb.jmxremote.port=7220
|
||||
num.volmirror.threads=1
|
||||
# Set this to set the default topology for all volumes and nodes
|
||||
# The default for all volumes is /data by default
|
||||
# UNCOMMENT the below to change the default topology.
|
||||
# For e.g., set cldb.default.topology=/mydata to create volumes
|
||||
# in /mydata topology and to place all nodes in /mydata topology
|
||||
# by default
|
||||
#cldb.default.topology=/mydata
|
@ -0,0 +1,57 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<!--
|
||||
Replace 'maprfs' by 'hdfs' to use HDFS.
|
||||
Replace localhost by an ip address for namenode/cldb.
|
||||
-->
|
||||
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>fs.default.name</name>
|
||||
<value>maprfs:///</value>
|
||||
<description>The name of the default file system. A URI whose
|
||||
scheme and authority determine the FileSystem implementation. The
|
||||
uri's scheme determines the config property (fs.SCHEME.impl) naming
|
||||
the FileSystem implementation class. The uri's authority is used to
|
||||
determine the host, port, etc. for a filesystem.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.mapr.working.dir</name>
|
||||
<value>/user/$USERNAME/</value>
|
||||
<description>The default directory to be used with relative paths.
|
||||
Note that $USERNAME is NOT an enviromental variable, but just a placeholder
|
||||
to indicate that it will be expanded to the corresponding username.
|
||||
Other example default directories could be "/", "/home/$USERNAME", "/$USERNAME" etc.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3n.block.size</name>
|
||||
<value>33554432</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3n.blockSize</name>
|
||||
<value>33554432</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3.block.size</name>
|
||||
<value>33554432</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3.blockSize</name>
|
||||
<value>33554432</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.proxyuser.mapr.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.proxyuser.mapr.hosts</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,30 @@
|
||||
# Sample Exports file
|
||||
|
||||
# for /mapr exports
|
||||
# <Path> <exports_control>
|
||||
|
||||
#access_control -> order is specific to default
|
||||
# list the hosts before specifying a default for all
|
||||
# a.b.c.d,1.2.3.4(ro) d.e.f.g(ro) (rw)
|
||||
# enforces ro for a.b.c.d & 1.2.3.4 and everybody else is rw
|
||||
|
||||
# special path to export clusters in mapr-clusters.conf. To disable exporting,
|
||||
# comment it out. to restrict access use the exports_control
|
||||
#
|
||||
/mapr (rw)
|
||||
|
||||
#to export only certain clusters, comment out the /mapr & uncomment.
|
||||
# Note: this will cause /mapr to be unexported
|
||||
#/mapr/clustername (rw)
|
||||
|
||||
#to export /mapr only to certain hosts (using exports_control)
|
||||
#/mapr a.b.c.d(rw),e.f.g.h(ro)
|
||||
|
||||
# export /mapr/cluster1 rw to a.b.c.d & ro to e.f.g.h (denied for others)
|
||||
#/mapr/cluster1 a.b.c.d(rw),e.f.g.h(ro)
|
||||
|
||||
# export /mapr/cluster2 only to e.f.g.h (denied for others)
|
||||
#/mapr/cluster2 e.f.g.h(rw)
|
||||
|
||||
# export /mapr/cluster3 rw to e.f.g.h & ro to others
|
||||
#/mapr/cluster2 e.f.g.h(rw) (ro)
|
@ -0,0 +1,41 @@
|
||||
#CLDB metrics config - Pick one out of null,file or ganglia.
|
||||
#Uncomment all properties in null, file or ganglia context, to send cldb metrics to that context
|
||||
|
||||
# Configuration of the "cldb" context for null
|
||||
#cldb.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
|
||||
#cldb.period=10
|
||||
|
||||
# Configuration of the "cldb" context for file
|
||||
#cldb.class=org.apache.hadoop.metrics.file.FileContext
|
||||
#cldb.period=60
|
||||
#cldb.fileName=/tmp/cldbmetrics.log
|
||||
|
||||
# Configuration of the "cldb" context for ganglia
|
||||
cldb.class=com.mapr.fs.cldb.counters.MapRGangliaContext31
|
||||
cldb.period=10
|
||||
cldb.servers=localhost:8649
|
||||
cldb.spoof=1
|
||||
|
||||
#FileServer metrics config - Pick one out of null,file or ganglia.
|
||||
#Uncomment all properties in null, file or ganglia context, to send fileserver metrics to that context
|
||||
|
||||
# Configuration of the "fileserver" context for null
|
||||
#fileserver.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
|
||||
#fileserver.period=10
|
||||
|
||||
# Configuration of the "fileserver" context for file
|
||||
#fileserver.class=org.apache.hadoop.metrics.file.FileContext
|
||||
#fileserver.period=60
|
||||
#fileserver.fileName=/tmp/fsmetrics.log
|
||||
|
||||
# Configuration of the "fileserver" context for ganglia
|
||||
fileserver.class=com.mapr.fs.cldb.counters.MapRGangliaContext31
|
||||
fileserver.period=37
|
||||
fileserver.servers=localhost:8649
|
||||
fileserver.spoof=1
|
||||
|
||||
maprmepredvariant.class=com.mapr.job.mngmnt.hadoop.metrics.MaprRPCContext
|
||||
maprmepredvariant.period=10
|
||||
maprmapred.class=com.mapr.job.mngmnt.hadoop.metrics.MaprRPCContextFinal
|
||||
maprmapred.period=10
|
||||
|
@ -0,0 +1,16 @@
|
||||
#mfs.num.compress.threads=1
|
||||
#mfs.max.aio.events=5000
|
||||
#mfs.disable.periodic.flush=0
|
||||
#mfs.io.disk.timeout=60
|
||||
#mfs.server.ip=127.0.0.1
|
||||
#mfs.max.resync.count=16
|
||||
#mfs.max.restore.count=16
|
||||
#mfs.ignore.container.delete=0
|
||||
#mfs.ignore.readdir.pattern=0
|
||||
mfs.server.port=5660
|
||||
#mfs.subnets.whitelist=127.0.0.1/8
|
||||
#UNCOMMENT this line to disable bulk writes
|
||||
#mfs.bulk.writes.enabled=0
|
||||
#UNCOMMENT this to set the topology of this node
|
||||
#For e.g., to set this node's topology to /compute-only uncomment the below line
|
||||
#mfs.network.location=/compute-only
|
@ -0,0 +1,43 @@
|
||||
# Configuration for nfsserver
|
||||
|
||||
#
|
||||
# The system defaults are in the comments
|
||||
#
|
||||
|
||||
# Default compression is true
|
||||
#Compression = true
|
||||
|
||||
# chunksize is 64M
|
||||
#ChunkSize = 67108864
|
||||
|
||||
# Number of threads for compression/decompression: default=2
|
||||
#CompThreads = 2
|
||||
|
||||
#Mount point for the ramfs file for mmap
|
||||
#RamfsMntDir = /ramfs/mapr
|
||||
|
||||
# Size of the ramfile to use (percent of total physical memory) default=0.25
|
||||
# 0: disables the use of ramfs
|
||||
#RamfsSize = 0.25
|
||||
|
||||
# Loglevel = DEBUG | INFO | WARN | ERROR | CRITICAL | OFF
|
||||
#Loglevel = INFO
|
||||
|
||||
#Duplicate Request cache size & timeout in seconds
|
||||
#DrCacheSize = 20480
|
||||
#DrCacheTimeout = 62
|
||||
# To keep the drcache lean, we only cache the response if the
|
||||
# time we took to populate is greater than 50% of DrCacheTimeout.
|
||||
# Set it to 0 to disable this optimization, Note that the DrCacheSize or
|
||||
# DrCacheTimeout will also need to be changed. Ex: if the nfsserver supports
|
||||
# 10,000 ops/sec (modification ops): then DrCacheSize will need to change
|
||||
# to: 10,000*DrCacheTimeout = 620,000
|
||||
#DRCacheTimeOutOpt = 0.5
|
||||
|
||||
#NFS fileid, by default the fileid is of 32 bit size.
|
||||
#Set Use32BitFileId=0 to use 64 bit fileid (inode number)
|
||||
#Use32BitFileId=0
|
||||
|
||||
#Auto refresh exports time interval in mins.
|
||||
#default is 0, means there is no auto refresh.
|
||||
#AutoRefreshExportsTimeInterval = 5
|
@ -0,0 +1,203 @@
|
||||
{
|
||||
"files": [
|
||||
{
|
||||
"remote": null,
|
||||
"type": null,
|
||||
"configs": {
|
||||
"required": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "Enable MapR-DB",
|
||||
"config_type": "bool",
|
||||
"default_value": false,
|
||||
"priority": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/hadoop/hadoop-2.4.1/etc/hadoop/core-site.xml",
|
||||
"local": "default/core-site.xml",
|
||||
"type": "xml",
|
||||
"configs": {
|
||||
"optional": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "fs.swift.impl",
|
||||
"default_value": "org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem"
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.connect.timeout",
|
||||
"config_type": "int",
|
||||
"default_value": 15000
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.socket.timeout",
|
||||
"config_type": "int",
|
||||
"default_value": 60000
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.connect.retry.count",
|
||||
"config_type": "int",
|
||||
"default_value": 3
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.connect.throttle.delay",
|
||||
"config_type": "int",
|
||||
"default_value": 0
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.blocksize",
|
||||
"config_type": "int",
|
||||
"default_value": 32768
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.partsize",
|
||||
"config_type": "int",
|
||||
"default_value": 4718592
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.requestsize",
|
||||
"config_type": "int",
|
||||
"default_value": 64
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.service.sahara.public",
|
||||
"config_type": "bool",
|
||||
"default_value": true
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.service.sahara.http.port",
|
||||
"config_type": "int",
|
||||
"default_value": 8080
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.service.sahara.https.port",
|
||||
"config_type": "int",
|
||||
"default_value": 443
|
||||
},
|
||||
{
|
||||
"name": "fs.swift.service.sahara.auth.endpoint.prefix",
|
||||
"default_value": "/endpoints/AUTH_"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/hadoop/hadoop-2.4.1/etc/hadoop/mapred-site.xml",
|
||||
"type": "xml"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/hadoop-metrics.properties",
|
||||
"local": "default/hadoop-metrics.properties",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/cldb.conf",
|
||||
"local": "default/cldb.conf",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/mfs.conf",
|
||||
"local": "default/mfs.conf",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/nfsserver.conf",
|
||||
"local": "default/nfsserver.conf",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "/opt/mapr/conf/exports",
|
||||
"local": "default/exports",
|
||||
"type": "raw"
|
||||
}
|
||||
],
|
||||
"services": [
|
||||
{
|
||||
"name": "general",
|
||||
"files": [
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Management",
|
||||
"node_processes": [
|
||||
"ZooKeeper",
|
||||
"Webserver",
|
||||
"MapR-Client",
|
||||
"Metrics"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "YARN",
|
||||
"node_processes": [
|
||||
"HistoryServer",
|
||||
"ResourceManager",
|
||||
"NodeManager"
|
||||
],
|
||||
"files": [
|
||||
"/opt/mapr/hadoop/hadoop-2.4.1/etc/hadoop/mapred-site.xml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "MapR FS",
|
||||
"node_processes": [
|
||||
"CLDB",
|
||||
"FileServer",
|
||||
"NFS"
|
||||
],
|
||||
"files": [
|
||||
"/opt/mapr/conf/cldb.conf",
|
||||
"/opt/mapr/conf/hadoop-metrics.properties",
|
||||
"/opt/mapr/conf/mfs.conf",
|
||||
"/opt/mapr/conf/nfsserver.conf",
|
||||
"/opt/mapr/conf/exports",
|
||||
"/opt/mapr/hadoop/hadoop-2.4.1/etc/hadoop/core-site.xml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "HBase",
|
||||
"node_processes": [
|
||||
"HBase-Master",
|
||||
"HBase-RegionServer",
|
||||
"HBase-Client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Hive",
|
||||
"node_processes": [
|
||||
"HiveMetastore",
|
||||
"HiveServer2"
|
||||
],
|
||||
"versions": [
|
||||
"0.13",
|
||||
"0.12"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Oozie",
|
||||
"node_processes": [
|
||||
"Oozie"
|
||||
],
|
||||
"versions": [
|
||||
"4.0.1",
|
||||
"4.0.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Pig",
|
||||
"node_processes": [
|
||||
"Pig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Mahout",
|
||||
"node_processes": [
|
||||
"Mahout"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
112
sahara/plugins/mapr/versions/v4_0_1_mrv2/version_handler.py
Executable file
112
sahara/plugins/mapr/versions/v4_0_1_mrv2/version_handler.py
Executable file
@ -0,0 +1,112 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sahara import context
|
||||
from sahara.plugins.mapr.util import cluster_helper as clh_utils
|
||||
import sahara.plugins.mapr.util.config_utils as cu
|
||||
import sahara.plugins.mapr.util.names as n
|
||||
from sahara.plugins.mapr.util import scaling
|
||||
from sahara.plugins.mapr.util import start_helper as start_helper
|
||||
import sahara.plugins.mapr.util.validation_utils as vu
|
||||
import sahara.plugins.mapr.versions.base_context as bc
|
||||
from sahara.plugins.mapr.versions import base_version_handler as bvh
|
||||
import sahara.plugins.mapr.versions.v4_0_1_mrv2.cluster_configurer as cc
|
||||
|
||||
|
||||
SIXTY_SECONDS = 60
|
||||
WAIT_OOZIE_INTERVAL = 300
|
||||
|
||||
|
||||
version = '4.0.1.mrv2'
|
||||
|
||||
|
||||
class VersionHandler(bvh.BaseVersionHandler):
|
||||
|
||||
def get_plugin_version(self):
|
||||
return version
|
||||
|
||||
def start_cluster(self, cluster):
|
||||
start_helper.exec_configure_sh_on_cluster(
|
||||
cluster, self.get_configure_sh_string(cluster))
|
||||
start_helper.wait_for_mfs_unlock(cluster, self.get_waiting_script())
|
||||
start_helper.setup_maprfs_on_cluster(
|
||||
cluster, self.get_disk_setup_script())
|
||||
start_helper.start_zookeeper_nodes_on_cluster(cluster)
|
||||
start_helper.start_warden_on_cldb_nodes(cluster)
|
||||
context.sleep(SIXTY_SECONDS)
|
||||
start_helper.start_warden_on_other_nodes(cluster)
|
||||
start_helper.start_ecosystem(self.get_context(cluster))
|
||||
|
||||
def get_cluster_configurer(self, cluster, plugin_spec):
|
||||
return cc.ClusterConfigurer(cluster, plugin_spec)
|
||||
|
||||
def get_configure_sh_string(self, cluster):
|
||||
return ('/opt/mapr/server/configure.sh'
|
||||
+ ' -C ' + clh_utils.get_cldb_nodes_ip(cluster)
|
||||
+ ' -Z ' + clh_utils.get_zookeeper_nodes_ip(cluster)
|
||||
+ ' -RM ' + clh_utils.get_resourcemanager_ip(cluster)
|
||||
+ ' -HS ' + clh_utils.get_historyserver_ip(cluster) + ' -f')
|
||||
|
||||
def scale_cluster(self, cluster, instances):
|
||||
scaling.scale_cluster(cluster, instances, self.get_disk_setup_script(),
|
||||
self.get_waiting_script(),
|
||||
self.get_context(cluster),
|
||||
self.get_configure_sh_string(cluster), False)
|
||||
|
||||
def decommission_nodes(self, cluster, instances):
|
||||
scaling.decommission_nodes(
|
||||
cluster, instances, self.get_configure_sh_string(cluster))
|
||||
|
||||
def get_waiting_script(self):
|
||||
return 'plugins/mapr/util/resources/waiting_script.sh'
|
||||
|
||||
def get_cluster_validation_rules(self, cluster):
|
||||
return [vu.not_less_than_count_component_vr(n.ZOOKEEPER, 1),
|
||||
vu.not_less_than_count_component_vr(n.CLDB, 1),
|
||||
vu.not_less_than_count_component_vr(n.NODE_MANAGER, 1),
|
||||
vu.not_less_than_count_component_vr(n.FILE_SERVER, 1),
|
||||
vu.not_more_than_count_component_vr(n.OOZIE, 1),
|
||||
vu.not_more_than_count_component_vr(n.WEB_SERVER, 1),
|
||||
vu.equal_count_component_vr(n.RESOURCE_MANAGER, 1),
|
||||
vu.equal_count_component_vr(n.HISTORY_SERVER, 1),
|
||||
vu.node_dependency_satisfied_vr(n.NODE_MANAGER, n.FILE_SERVER),
|
||||
vu.node_dependency_satisfied_vr(n.CLDB, n.FILE_SERVER)]
|
||||
|
||||
def get_scaling_validation_rules(self):
|
||||
return []
|
||||
|
||||
def get_edp_validation_rules(self):
|
||||
return []
|
||||
|
||||
def get_context(self, cluster):
|
||||
return Context(cluster)
|
||||
|
||||
|
||||
class Context(bc.BaseContext):
|
||||
m7_enabled_config = n.IS_M7_ENABLED
|
||||
hive_version_config = 'Hive Version'
|
||||
oozie_version_config = 'Oozie Version'
|
||||
|
||||
def __init__(self, cluster):
|
||||
self.cluster = cluster
|
||||
|
||||
def get_cluster(self):
|
||||
return self.cluster
|
||||
|
||||
def is_m7_enabled(self):
|
||||
configs = cu.get_cluster_configs(self.get_cluster())
|
||||
return configs[n.GENERAL][Context.m7_enabled_config]
|
||||
|
||||
def get_hadoop_version(self):
|
||||
return '2.4.1'
|
53
sahara/plugins/mapr/versions/version_handler_factory.py
Normal file
53
sahara/plugins/mapr/versions/version_handler_factory.py
Normal file
@ -0,0 +1,53 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def _load_versions():
|
||||
d_name = os.path.dirname(__file__)
|
||||
m_template = 'sahara.plugins.mapr.versions.%s.version_handler'
|
||||
|
||||
def predicate(v_dir):
|
||||
return os.path.isdir(os.path.join(d_name, v_dir))
|
||||
|
||||
def mapper(v_dir):
|
||||
return m_template % v_dir
|
||||
|
||||
def reducer(versions, m_name):
|
||||
m = __import__(m_name, fromlist=['sahara'])
|
||||
versions[m.version] = getattr(m, 'VersionHandler')()
|
||||
return versions
|
||||
|
||||
v_dirs = filter(predicate, os.listdir(d_name))
|
||||
m_names = map(mapper, v_dirs)
|
||||
return reduce(reducer, m_names, {})
|
||||
|
||||
|
||||
class VersionHandlerFactory(object):
|
||||
instance = None
|
||||
versions = None
|
||||
|
||||
@staticmethod
|
||||
def get():
|
||||
if not VersionHandlerFactory.instance:
|
||||
VersionHandlerFactory.versions = _load_versions()
|
||||
VersionHandlerFactory.instance = VersionHandlerFactory()
|
||||
return VersionHandlerFactory.instance
|
||||
|
||||
def get_versions(self):
|
||||
return VersionHandlerFactory.versions.keys()
|
||||
|
||||
def get_handler(self, version):
|
||||
return VersionHandlerFactory.versions[version]
|
0
sahara/tests/unit/plugins/mapr/__init__.py
Normal file
0
sahara/tests/unit/plugins/mapr/__init__.py
Normal file
185
sahara/tests/unit/plugins/mapr/stubs.py
Normal file
185
sahara/tests/unit/plugins/mapr/stubs.py
Normal file
@ -0,0 +1,185 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sahara.utils.configs as c
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class AttrDict(dict):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(AttrDict, self).__init__(*args, **kwargs)
|
||||
self.__dict__ = self
|
||||
|
||||
node_domain = None
|
||||
|
||||
|
||||
class Cluster(AttrDict):
|
||||
fields = ['id', 'name', 'description', 'tenant_id', 'trust_id',
|
||||
'is_transient', 'plugin_name', 'hadoop_version',
|
||||
'cluster_configs', 'default_image_id', 'anti_affinity',
|
||||
'management_private_key', 'management_public_key',
|
||||
'user_keypair_id', 'status', 'status_description', 'info',
|
||||
'extra', 'node_groups', 'cluster_template_id',
|
||||
'cluster_template']
|
||||
|
||||
def __init__(self, mapping=None, **kwargs):
|
||||
self.id = None
|
||||
self.cluster_template_id = None
|
||||
self.cluster_template = None
|
||||
self.node_groups = []
|
||||
d = dict((f, None) for f in Cluster.fields)
|
||||
if mapping:
|
||||
d.update(mapping)
|
||||
if kwargs:
|
||||
d.update(kwargs)
|
||||
AttrDict.__init__(self, d)
|
||||
if self.node_groups:
|
||||
for ng in self.node_groups:
|
||||
ng.cluster_id = self.id
|
||||
ng.cluster = self
|
||||
ng.cluster_template_id = self.cluster_template_id
|
||||
ng.cluster_template = self.cluster_template
|
||||
|
||||
|
||||
class NodeGroup(AttrDict):
|
||||
fields = ['id', 'name', 'flavor_id', 'image_id', 'image_username',
|
||||
'node_processes', 'node_configs', 'volumes_per_node',
|
||||
'volumes_size', 'volume_mount_prefix', 'floating_ip_pool',
|
||||
'count', 'instances', 'node_group_template_id',
|
||||
'node_group_template', 'cluster_id', 'cluster',
|
||||
'cluster_template_id', 'cluster_template']
|
||||
|
||||
def __init__(self, mapping=None, **kwargs):
|
||||
self.id = None
|
||||
self.instances = []
|
||||
d = dict((f, None) for f in NodeGroup.fields)
|
||||
if mapping:
|
||||
d.update(mapping)
|
||||
if kwargs:
|
||||
d.update(kwargs)
|
||||
AttrDict.__init__(self, d)
|
||||
if self.instances:
|
||||
for i in self.instances:
|
||||
i.node_group_id = self.id
|
||||
i.node_group = self
|
||||
|
||||
def configuration(self):
|
||||
return c.merge_configs(self.cluster.cluster_configs, self.node_configs)
|
||||
|
||||
def storage_paths(self):
|
||||
mp = [self.volume_mount_prefix + str(idx)
|
||||
for idx in range(1, self.volumes_per_node + 1)]
|
||||
if not mp:
|
||||
mp = ['/mnt']
|
||||
return mp
|
||||
|
||||
def get_image_id(self):
|
||||
return self.image_id or self.cluster.default_image_id
|
||||
|
||||
|
||||
class Instance(AttrDict):
|
||||
fields = ['id', 'node_group_id', 'node_group', 'instance_id',
|
||||
'instance_name', 'internal_ip', 'management_ip', 'volumes']
|
||||
|
||||
def __init__(self, mapping=None, **kwargs):
|
||||
d = dict((f, None) for f in Instance.fields)
|
||||
p = lambda i: i[0] in Instance.fields
|
||||
if mapping:
|
||||
d.update(dict(filter(p, six.iteritems(mapping))))
|
||||
if kwargs:
|
||||
d.update(dict(filter(p, six.iteritems(kwargs))))
|
||||
AttrDict.__init__(self, d)
|
||||
results = kwargs['results'] if 'results' in kwargs else []
|
||||
default_result = (kwargs['default_result']
|
||||
if 'default_result' in kwargs
|
||||
else Remote.DEFAULT_RESULT)
|
||||
self._remote = Remote(results, default_result)
|
||||
|
||||
def hostname(self):
|
||||
return self.instance_name
|
||||
|
||||
def fqdn(self):
|
||||
return self.instance_name + '.' + node_domain
|
||||
|
||||
def remote(self):
|
||||
return self._remote
|
||||
|
||||
|
||||
class Remote(object):
|
||||
DEFAULT_RESULT = (0, '', '')
|
||||
|
||||
def __init__(self, results=[], default_result=None):
|
||||
self.fs = []
|
||||
self.history = []
|
||||
self.results = results
|
||||
self.default_result = (default_result
|
||||
if default_result
|
||||
else Remote.DEFAULT_RESULT)
|
||||
|
||||
def register_result(self, command, result):
|
||||
result += [(command, result)]
|
||||
|
||||
def get_result(self, command):
|
||||
for r_command, result in self.results:
|
||||
if r_command == command:
|
||||
return result
|
||||
return (self.default_result
|
||||
if command['get_stderr']
|
||||
else self.default_result[:-1])
|
||||
|
||||
def __exit__(self, *args):
|
||||
pass
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def write_file_to(self, remote_file, data, run_as_root=False, timeout=120):
|
||||
self.fs += [{'file': remote_file, 'data': data, 'root': run_as_root,
|
||||
'timeout': timeout}]
|
||||
|
||||
def write_files_to(self, files, run_as_root=False, timeout=120):
|
||||
self.fs += [{'file': f, 'data': d, 'root': run_as_root,
|
||||
'timeout': timeout}
|
||||
for f, d in six.iteritems(files)]
|
||||
|
||||
def read_file_from(self, remote_file, run_as_root=False, timeout=120):
|
||||
for f in self.fs:
|
||||
if f['file'] == remote_file:
|
||||
return f['data']
|
||||
return None
|
||||
|
||||
def replace_remote_string(self, remote_file, old_str,
|
||||
new_str, timeout=120):
|
||||
pass
|
||||
|
||||
def get_neutron_info(self):
|
||||
return
|
||||
|
||||
def get_http_client(self, port, info=None):
|
||||
return
|
||||
|
||||
def close_http_sessions(self):
|
||||
pass
|
||||
|
||||
def execute_command(self, cmd, run_as_root=False, get_stderr=False,
|
||||
raise_when_error=True, timeout=300):
|
||||
command = {'cmd': cmd,
|
||||
'run_as_root': run_as_root,
|
||||
'get_stderr': get_stderr,
|
||||
'raise_when_error': raise_when_error,
|
||||
'timeout': timeout}
|
||||
self.history += [command]
|
||||
return self.get_result(command)
|
0
sahara/tests/unit/plugins/mapr/utils/__init__.py
Normal file
0
sahara/tests/unit/plugins/mapr/utils/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
net.topology.script.file.name=/opt/mapr/topology.sh
|
||||
cldb.zookeeper.servers=192.168.1.10:5181,192.168.1.11:5181,192.168.1.12:5181
|
@ -0,0 +1,69 @@
|
||||
<?xml version="1.0" ?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>fs.swift.impl</name>
|
||||
<value>org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem
|
||||
</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.socket.timeout</name>
|
||||
<value>60000</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.blocksize</name>
|
||||
<value>32768</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.connect.retry.count</name>
|
||||
<value>3</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.service.sahara.auth.endpoint.prefix</name>
|
||||
<value>/endpoints/AUTH_</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.connect.timeout</name>
|
||||
<value>15000</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.requestsize</name>
|
||||
<value>64</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.connect.throttle.delay</name>
|
||||
<value>0</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>k1</name>
|
||||
<value>v1</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>k0</name>
|
||||
<value>v0</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.service.sahara.https.port</name>
|
||||
<value>443</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.partsize</name>
|
||||
<value>4718592</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.service.sahara.auth.url</name>
|
||||
<value>http://auth:None/v2.0/tokens/</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.service.sahara.public</name>
|
||||
<value>True</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.service.sahara.http.port</name>
|
||||
<value>8080</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.service.sahara.tenant</name>
|
||||
<value>tenant_0</value>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,82 @@
|
||||
<?xml version="1.0" ?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>fs.swift.requestsize</name>
|
||||
<value>64</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.impl</name>
|
||||
<value>org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem
|
||||
</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.socket.timeout</name>
|
||||
<value>60000</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.blocksize</name>
|
||||
<value>32768</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>net.topology.impl</name>
|
||||
<value>org.apache.hadoop.net.NetworkTopologyWithNodeGroup</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.connect.retry.count</name>
|
||||
<value>3</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.service.sahara.auth.endpoint.prefix</name>
|
||||
<value>/endpoints/AUTH_</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.connect.timeout</name>
|
||||
<value>15000</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.block.replicator.classname</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyWithNodeGroup
|
||||
</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.connect.throttle.delay</name>
|
||||
<value>0</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>k1</name>
|
||||
<value>v1</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>k0</name>
|
||||
<value>v0</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>net.topology.nodegroup.aware</name>
|
||||
<value>True</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.service.sahara.https.port</name>
|
||||
<value>443</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.partsize</name>
|
||||
<value>4718592</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.service.sahara.auth.url</name>
|
||||
<value>http://auth:None/v2.0/tokens/</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.service.sahara.public</name>
|
||||
<value>True</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.service.sahara.http.port</name>
|
||||
<value>8080</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.swift.service.sahara.tenant</name>
|
||||
<value>tenant_0</value>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" ?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>node_config_0</name>
|
||||
<value>False</value>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,16 @@
|
||||
<?xml version="1.0" ?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>node_config_0</name>
|
||||
<value>False</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>mapred.task.cache.levels</name>
|
||||
<value>3</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>mapred.jobtracker.nodegroup.aware</name>
|
||||
<value>True</value>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,9 @@
|
||||
10.10.1.12 r
|
||||
10.10.1.10 r
|
||||
10.10.1.11 r
|
||||
192.168.1.12 r
|
||||
i1 r
|
||||
i0 r
|
||||
192.168.1.11 r
|
||||
192.168.1.10 r
|
||||
i2 r
|
@ -0,0 +1,6 @@
|
||||
edp-master-0001 /rack1
|
||||
10.50.0.8 /rack1
|
||||
edp-slave-0002 /rack1
|
||||
10.50.0.5 /rack1
|
||||
edp-slave-0001 /rack2
|
||||
10.50.0.6 /rack2
|
106
sahara/tests/unit/plugins/mapr/utils/resources/plugin_spec.json
Normal file
106
sahara/tests/unit/plugins/mapr/utils/resources/plugin_spec.json
Normal file
@ -0,0 +1,106 @@
|
||||
{
|
||||
"files": [
|
||||
{
|
||||
"remote": null,
|
||||
"type": null,
|
||||
"configs": {
|
||||
"required": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "k4"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"remote": "file_0",
|
||||
"type": "properties"
|
||||
},
|
||||
{
|
||||
"remote": "file_1",
|
||||
"local": "test.properties",
|
||||
"type": "properties",
|
||||
"configs": {
|
||||
"required": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "k0",
|
||||
"default_value": "default_value_0",
|
||||
"description": "description_0",
|
||||
"priority": 2
|
||||
}
|
||||
],
|
||||
"node": [
|
||||
{
|
||||
"name": "k1",
|
||||
"config_type": "int",
|
||||
"default_value": 3,
|
||||
"priority": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"optional": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "k2",
|
||||
"config_type": "bool"
|
||||
}
|
||||
],
|
||||
"node": [
|
||||
{
|
||||
"name": "k3"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"remote": "file_2",
|
||||
"local": "test.xml",
|
||||
"type": "xml"
|
||||
},
|
||||
{
|
||||
"remote": "file_3",
|
||||
"local": "raw.data",
|
||||
"type": "raw"
|
||||
}
|
||||
],
|
||||
"services": [
|
||||
{
|
||||
"name": "general",
|
||||
"files": [
|
||||
null,
|
||||
"file_3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "service_0"
|
||||
},
|
||||
{
|
||||
"name": "service_1",
|
||||
"files": [
|
||||
|
||||
],
|
||||
"node_processes": [
|
||||
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "service_2",
|
||||
"files": [
|
||||
"file_0",
|
||||
"file_1",
|
||||
"file_2"
|
||||
],
|
||||
"node_processes": [
|
||||
"node_process_0",
|
||||
"node_process_1"
|
||||
],
|
||||
"versions": [
|
||||
"v1",
|
||||
"v2"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
@ -0,0 +1,168 @@
|
||||
{
|
||||
"files":[
|
||||
{
|
||||
"remote":null,
|
||||
"type":null,
|
||||
"configs":{
|
||||
"required":{
|
||||
"cluster":[
|
||||
{
|
||||
"name":"Enable Data Locality",
|
||||
"config_type":"bool",
|
||||
"default_value":false,
|
||||
"priority":1
|
||||
},
|
||||
{
|
||||
"name":"Enable MapR-DB",
|
||||
"config_type":"bool",
|
||||
"default_value":false,
|
||||
"priority":1
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"remote":"/opt/mapr/hadoop/hadoop-0.20.2/conf/mapred-site.xml",
|
||||
"type":"xml",
|
||||
"configs":{
|
||||
"required":{
|
||||
"node":[
|
||||
{
|
||||
"name":"node_config_0",
|
||||
"config_type":"bool",
|
||||
"default_value":false,
|
||||
"priority":1
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"remote":"/opt/mapr/conf/cldb.conf",
|
||||
"type":"properties"
|
||||
},
|
||||
{
|
||||
"remote":"/opt/mapr/hadoop/hadoop-0.20.2/conf/core-site.xml",
|
||||
"local":"test.xml",
|
||||
"type":"xml",
|
||||
"configs":{
|
||||
"optional":{
|
||||
"cluster":[
|
||||
{
|
||||
"name":"fs.swift.impl",
|
||||
"default_value":"org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem"
|
||||
},
|
||||
{
|
||||
"name":"fs.swift.connect.timeout",
|
||||
"config_type":"int",
|
||||
"default_value":15000
|
||||
},
|
||||
{
|
||||
"name":"fs.swift.socket.timeout",
|
||||
"config_type":"int",
|
||||
"default_value":60000
|
||||
},
|
||||
{
|
||||
"name":"fs.swift.connect.retry.count",
|
||||
"config_type":"int",
|
||||
"default_value":3
|
||||
},
|
||||
{
|
||||
"name":"fs.swift.connect.throttle.delay",
|
||||
"config_type":"int",
|
||||
"default_value":0
|
||||
},
|
||||
{
|
||||
"name":"fs.swift.blocksize",
|
||||
"config_type":"int",
|
||||
"default_value":32768
|
||||
},
|
||||
{
|
||||
"name":"fs.swift.partsize",
|
||||
"config_type":"int",
|
||||
"default_value":4718592
|
||||
},
|
||||
{
|
||||
"name":"fs.swift.requestsize",
|
||||
"config_type":"int",
|
||||
"default_value":64
|
||||
},
|
||||
{
|
||||
"name":"fs.swift.service.sahara.public",
|
||||
"config_type":"bool",
|
||||
"default_value":true
|
||||
},
|
||||
{
|
||||
"name":"fs.swift.service.sahara.http.port",
|
||||
"config_type":"int",
|
||||
"default_value":8080
|
||||
},
|
||||
{
|
||||
"name":"fs.swift.service.sahara.https.port",
|
||||
"config_type":"int",
|
||||
"default_value":443
|
||||
},
|
||||
{
|
||||
"name":"fs.swift.service.sahara.auth.endpoint.prefix",
|
||||
"default_value":"/endpoints/AUTH_"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"services":[
|
||||
{
|
||||
"name":"general",
|
||||
"files":[
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"name":"Management",
|
||||
"node_processes":[
|
||||
"ZooKeeper",
|
||||
"Webserver",
|
||||
"MapR Client",
|
||||
"Metrics"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name":"MapReduce",
|
||||
"node_processes":[
|
||||
"TaskTracker",
|
||||
"JobTracker"
|
||||
],
|
||||
"files":[
|
||||
"/opt/mapr/hadoop/hadoop-0.20.2/conf/mapred-site.xml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name":"MapR FS",
|
||||
"node_processes":[
|
||||
"CLDB",
|
||||
"FileServer",
|
||||
"NFS"
|
||||
],
|
||||
"files":[
|
||||
"/opt/mapr/conf/cldb.conf",
|
||||
"/opt/mapr/hadoop/hadoop-0.20.2/conf/core-site.xml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name":"HBase",
|
||||
"node_processes":[
|
||||
"HBase Master",
|
||||
"HBase RegionServer",
|
||||
"HBase Client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name":"Oozie",
|
||||
"node_processes":[
|
||||
"Oozie"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
1
sahara/tests/unit/plugins/mapr/utils/resources/raw.data
Normal file
1
sahara/tests/unit/plugins/mapr/utils/resources/raw.data
Normal file
@ -0,0 +1 @@
|
||||
Some unparsable data
|
@ -0,0 +1,4 @@
|
||||
k0 = v0
|
||||
|
||||
# Comment
|
||||
k1 = v1
|
10
sahara/tests/unit/plugins/mapr/utils/resources/test.xml
Normal file
10
sahara/tests/unit/plugins/mapr/utils/resources/test.xml
Normal file
@ -0,0 +1,10 @@
|
||||
<configuration>
|
||||
<property>
|
||||
<name>k0</name>
|
||||
<value>v0</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>k1</name>
|
||||
<value>v1</value>
|
||||
</property>
|
||||
</configuration>
|
51
sahara/tests/unit/plugins/mapr/utils/test_cluster_info.py
Normal file
51
sahara/tests/unit/plugins/mapr/utils/test_cluster_info.py
Normal file
@ -0,0 +1,51 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sahara.plugins.mapr.util.cluster_info as ci
|
||||
import sahara.plugins.mapr.util.plugin_spec as ps
|
||||
import sahara.tests.unit.base as b
|
||||
import sahara.tests.unit.plugins.mapr.stubs as s
|
||||
|
||||
|
||||
class ClusterInfoTest(b.SaharaTestCase):
|
||||
|
||||
def assertItemsEqual(self, expected, actual):
|
||||
for e in expected:
|
||||
self.assertIn(e, actual)
|
||||
for a in actual:
|
||||
self.assertIn(a, expected)
|
||||
|
||||
def setUp(self):
|
||||
b.SaharaTestCase.setUp(self)
|
||||
path = 'tests/unit/plugins/mapr/utils/resources/plugin_spec_ci.json'
|
||||
self.plugin_spec = ps.PluginSpec(path)
|
||||
|
||||
def test_get_node_group_services(self):
|
||||
node_processes = ['ZooKeeper', 'Webserver', 'CLDB']
|
||||
node_group = s.NodeGroup(None, node_processes=node_processes)
|
||||
cluster_info = ci.ClusterInfo(None, self.plugin_spec)
|
||||
actual = cluster_info.get_services(node_group)
|
||||
expected = ['Management', 'MapR FS', 'general']
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
def test_get_cluster_services(self):
|
||||
np0 = ['ZooKeeper', 'Webserver', 'CLDB']
|
||||
ng0 = s.NodeGroup(node_processes=np0)
|
||||
np1 = ['ZooKeeper', 'TaskTracker', 'FileServer']
|
||||
ng1 = s.NodeGroup(node_processes=np1)
|
||||
cluster = s.Cluster(node_groups=[ng0, ng1])
|
||||
cluster_info = ci.ClusterInfo(cluster, self.plugin_spec)
|
||||
actual = cluster_info.get_services()
|
||||
expected = ['Management', 'MapR FS', 'general', 'MapReduce']
|
||||
self.assertItemsEqual(expected, actual)
|
@ -0,0 +1,96 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import StringIO as sio
|
||||
|
||||
import sahara.plugins.mapr.util.config_file_utils as cfu
|
||||
import sahara.tests.unit.base as b
|
||||
|
||||
import mock as m
|
||||
|
||||
|
||||
dirname = os.path.dirname(__file__)
|
||||
|
||||
|
||||
class ConfigFileUtilsTest(b.SaharaTestCase):
|
||||
|
||||
def assertItemsEqual(self, expected, actual):
|
||||
for e in expected:
|
||||
self.assertIn(e, actual)
|
||||
for a in actual:
|
||||
self.assertIn(a, expected)
|
||||
|
||||
def test_load_properties_file(self):
|
||||
path = 'tests/unit/plugins/mapr/utils/resources/test.properties'
|
||||
actual = cfu.load_properties_file(path)
|
||||
expected = {'k0': 'v0', 'k1': 'v1'}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_load_xml_file(self):
|
||||
path = 'tests/unit/plugins/mapr/utils/resources/test.xml'
|
||||
actual = cfu.load_xml_file(path)
|
||||
expected = {'k0': 'v0', 'k1': 'v1'}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_load_raw_file(self):
|
||||
path = 'tests/unit/plugins/mapr/utils/resources/raw.data'
|
||||
actual = cfu.load_raw_file(path)
|
||||
expected = {'content': 'Some unparsable data'}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
@m.patch('__builtin__.open')
|
||||
def test_to_properties_file_content(self, o_mock):
|
||||
data = {'k0': 'v0', 'k1': 'v1'}
|
||||
s = sio.StringIO(cfu.to_properties_file_content(data))
|
||||
s.flush()
|
||||
o_mock.return_value = s
|
||||
actual = cfu.load_properties_file('')
|
||||
self.assertEqual(data, actual)
|
||||
|
||||
data = {}
|
||||
actual = cfu.to_properties_file_content(data)
|
||||
expected = ''
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
@m.patch('__builtin__.open')
|
||||
def test_to_xml_file_content(self, o_mock):
|
||||
data = {'k0': 'v0', 'k1': 'v1'}
|
||||
s = sio.StringIO(cfu.to_xml_file_content(data))
|
||||
s.flush()
|
||||
o_mock.return_value = s
|
||||
actual = cfu.load_xml_file('')
|
||||
self.assertEqual(data, actual)
|
||||
|
||||
def test_to_raw_file_content(self):
|
||||
data = {'content': 'Some unparsable data'}
|
||||
actual = cfu.to_raw_file_content(data)
|
||||
expected = 'Some unparsable data'
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_load_file(self):
|
||||
path = 'tests/unit/plugins/mapr/utils/resources/test.properties'
|
||||
actual = cfu.load_file(path, 'properties')
|
||||
expected = {'k0': 'v0', 'k1': 'v1'}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
path = 'tests/unit/plugins/mapr/utils/resources/test.xml'
|
||||
actual = cfu.load_file(path, 'xml')
|
||||
expected = {'k0': 'v0', 'k1': 'v1'}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
path = 'tests/unit/plugins/mapr/utils/resources/raw.data'
|
||||
actual = cfu.load_file(path, 'raw')
|
||||
expected = {'content': 'Some unparsable data'}
|
||||
self.assertEqual(expected, actual)
|
196
sahara/tests/unit/plugins/mapr/utils/test_dict_utils.py
Normal file
196
sahara/tests/unit/plugins/mapr/utils/test_dict_utils.py
Normal file
@ -0,0 +1,196 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sahara.plugins.mapr.util.dict_utils as du
|
||||
import sahara.plugins.mapr.util.func_utils as fu
|
||||
import sahara.tests.unit.base as b
|
||||
|
||||
|
||||
class DictUtilsTest(b.SaharaTestCase):
|
||||
|
||||
def assertItemsEqual(self, expected, actual):
|
||||
for e in expected:
|
||||
self.assertIn(e, actual)
|
||||
for a in actual:
|
||||
self.assertIn(a, expected)
|
||||
|
||||
def assertDictValueItemsEqual(self, expected, actual):
|
||||
self.assertItemsEqual(expected.keys(), actual.keys())
|
||||
for k in actual:
|
||||
self.assertItemsEqual(expected[k], actual[k])
|
||||
|
||||
def test_append_to_key(self):
|
||||
arg_0 = {'k0': ['v0', 'v1'], 'k1': ['v1', 'v2'], 'k3': ['v3']}
|
||||
arg_1 = {'v0': {'a': 'a'}, 'v1': {'b': 'b'},
|
||||
'v2': {'c': 'c'}, 'v4': {'d': 'd'}}
|
||||
actual = du.append_to_key(arg_0, arg_1)
|
||||
expected = {'k0': {'v0': {'a': 'a'}, 'v1': {'b': 'b'}},
|
||||
'k1': {'v1': {'b': 'b'}, 'v2': {'c': 'c'}},
|
||||
'k3': {}}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_iterable_to_values_pair_dict_reducer(self):
|
||||
vp_dict_r = du.iterable_to_values_pair_dict_reducer
|
||||
arg = [[{'a': 'a0', 'b': 'b0', 'c': 'c0'},
|
||||
{'a': 'a1', 'b': 'b1', 'c': 'c1'}],
|
||||
[{'a': 'a2', 'b': 'b2', 'c': 'c2'}]]
|
||||
reducer = vp_dict_r('a', 'b')
|
||||
actual = reduce(reducer, arg, {})
|
||||
expected = {'a0': 'b0', 'a1': 'b1', 'a2': 'b2'}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_flatten_to_list_reducer(self):
|
||||
arg = [[{'a': 'a0'}, {'a': 'a1'}], [{'a': 'a2'}]]
|
||||
reducer = du.flatten_to_list_reducer()
|
||||
actual = reduce(reducer, arg, [])
|
||||
expected = [{'a': 'a0'}, {'a': 'a1'}, {'a': 'a2'}]
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
def test_map_by_field_value(self):
|
||||
arg = [{'a': 'a0', 'b': 'b0', 'c': 'c0'},
|
||||
{'a': 'a0', 'b': 'b2', 'c': 'c1'},
|
||||
{'a': 'a2', 'b': 'b2', 'c': 'c2'}]
|
||||
|
||||
actual = du.map_by_field_value(arg, 'a')
|
||||
expected = {'a0': [{'a': 'a0', 'b': 'b0', 'c': 'c0'},
|
||||
{'a': 'a0', 'b': 'b2', 'c': 'c1'}],
|
||||
'a2': [{'a': 'a2', 'b': 'b2', 'c': 'c2'}]}
|
||||
self.assertDictValueItemsEqual(expected, actual)
|
||||
|
||||
actual = du.map_by_field_value(arg, 'c')
|
||||
expected = {'c0': [{'a': 'a0', 'b': 'b0', 'c': 'c0'}],
|
||||
'c1': [{'a': 'a0', 'b': 'b2', 'c': 'c1'}],
|
||||
'c2': [{'a': 'a2', 'b': 'b2', 'c': 'c2'}]}
|
||||
self.assertDictValueItemsEqual(expected, actual)
|
||||
|
||||
def test_map_by_fields_values(self):
|
||||
arg = [{'a': 'a0', 'b': 'b0', 'c': 'c0'},
|
||||
{'a': 'a0', 'b': 'b2', 'c': 'c1'},
|
||||
{'a': 'a2', 'b': 'b2', 'c': 'c2'}]
|
||||
actual = du.map_by_fields_values(arg, ['a', 'b', 'c'])
|
||||
expected = {'a0': {'b0': {'c0': [{'a': 'a0', 'b': 'b0', 'c': 'c0'}]},
|
||||
'b2': {'c1': [{'a': 'a0', 'b': 'b2', 'c': 'c1'}]}},
|
||||
'a2': {'b2': {'c2': [{'a': 'a2', 'b': 'b2', 'c': 'c2'}]}}}
|
||||
self.assertItemsEqual(expected.keys(), actual.keys())
|
||||
for k0 in actual:
|
||||
self.assertItemsEqual(expected[k0].keys(), actual[k0].keys())
|
||||
for k1 in actual[k0]:
|
||||
self.assertDictValueItemsEqual(
|
||||
expected[k0][k1], actual[k0][k1])
|
||||
|
||||
def test_get_keys_by_value_type(self):
|
||||
arg = {'dict_0': {}, 'list': [], 'set': set(['elem']),
|
||||
'str': 'str', 'dict_1': {}}
|
||||
|
||||
actual = du.get_keys_by_value_type(arg, dict)
|
||||
expected = ['dict_0', 'dict_1']
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
actual = du.get_keys_by_value_type(arg, list)
|
||||
expected = ['list']
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
def test_deep_update(self):
|
||||
arg_0 = {'a0': {'b0': {'c0': 'v0', 'c1': 'v1'}},
|
||||
'a1': {'b1': 'v2'}, 'a3': 'v3'}
|
||||
arg_1 = {'a0': {'b0': {'c0': 'v1', 'c2': 'v2'}, 'b1': 'v4'},
|
||||
'a1': 'v5', 'a3': {'v1': 'v2'}}
|
||||
actual = du.deep_update(arg_0, arg_1)
|
||||
expected = {'a0': {'b0': {'c0': 'v1', 'c1': 'v1', 'c2': 'v2'},
|
||||
'b1': 'v4'},
|
||||
'a1': 'v5', 'a3': {'v1': 'v2'}}
|
||||
self.assertEqual(expected, actual)
|
||||
self.assertIsNot(actual, arg_0)
|
||||
|
||||
def test_get_keys_by_value(self):
|
||||
arg = {'k0': 'v0', 'k1': 'v0', 'k2': 'v2'}
|
||||
|
||||
actual = du.get_keys_by_value(arg, 'v0')
|
||||
expected = ['k0', 'k1']
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
actual = du.get_keys_by_value(arg, 'v2')
|
||||
expected = ['k2']
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
actual = du.get_keys_by_value(arg, 'v')
|
||||
expected = []
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
def test_get_keys_by_value_2(self):
|
||||
arg = {'k0': ['v0', 'v1'], 'k1': ['v1', 'v2'], 'k2': ['v2', 'v3']}
|
||||
|
||||
actual = du.get_keys_by_value_2(arg, 'v1')
|
||||
expected = ['k0', 'k1']
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
actual = du.get_keys_by_value_2(arg, 'v3')
|
||||
expected = ['k2']
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
actual = du.get_keys_by_value_2(arg, 'v')
|
||||
expected = []
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
def test_iterable_to_values_list_reducer(self):
|
||||
arg = [[{'a': 'a0', 'b': 'b0'}, {'a': 'a1', 'b': 'b0'}], [{'a': 'a2'}]]
|
||||
reducer = du.iterable_to_values_list_reducer('a')
|
||||
actual = reduce(reducer, arg, [])
|
||||
expected = ['a0', 'a1', 'a2']
|
||||
self.assertTrue(isinstance(actual, list))
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
def test_select(self):
|
||||
source = [{'a': 'a0', 'b': 'b0', 'c': 'c0'},
|
||||
{'a': 'a1', 'b': 'b1', 'c': 'c0'},
|
||||
{'a': 'a2', 'b': 'b2', 'c': 'c0'}]
|
||||
|
||||
predicate = fu.like_predicate({'c': 'c0'})
|
||||
actual = du.select(['a', 'b', 'c'], source, predicate)
|
||||
expected = [{'a': 'a0', 'b': 'b0', 'c': 'c0'},
|
||||
{'a': 'a1', 'b': 'b1', 'c': 'c0'},
|
||||
{'a': 'a2', 'b': 'b2', 'c': 'c0'}]
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
predicate = fu.in_predicate('b', ['b0', 'b1'])
|
||||
actual = du.select(['a'], source, predicate)
|
||||
expected = [{'a': 'a0'}, {'a': 'a1'}]
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
def test_list_of_vp_dicts_function(self):
|
||||
arg = {'a0': 'b0', 'a1': 'b1'}
|
||||
actual = du.list_of_vp_dicts_function('a', 'b')(arg)
|
||||
expected = [{'a': 'a0', 'b': 'b0'}, {'a': 'a1', 'b': 'b1'}]
|
||||
self.assertTrue(isinstance(actual, list))
|
||||
for a in actual:
|
||||
self.assertTrue(isinstance(a, dict))
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
def test_flattened_dict(self):
|
||||
arg = {'a0': {'b0': {'c0': 'd0'}},
|
||||
'a1': {'b0': {'c1': 'd1',
|
||||
'c2': 'd2'},
|
||||
'b1': {'c0': 'd0'}}}
|
||||
|
||||
actual = du.flattened_dict(arg, ['a', 'b', 'c', 'd'])
|
||||
expected = [{'a': 'a0', 'b': 'b0', 'c': 'c0', 'd': 'd0'},
|
||||
{'a': 'a1', 'b': 'b0', 'c': 'c1', 'd': 'd1'},
|
||||
{'a': 'a1', 'b': 'b0', 'c': 'c2', 'd': 'd2'},
|
||||
{'a': 'a1', 'b': 'b1', 'c': 'c0', 'd': 'd0'}]
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
arg = {'a0': 'b0', 'a1': 'b1'}
|
||||
actual = du.flattened_dict(arg, ['a', 'b'])
|
||||
expected = [{'a': 'a0', 'b': 'b0'}, {'a': 'a1', 'b': 'b1'}]
|
||||
self.assertItemsEqual(expected, actual)
|
202
sahara/tests/unit/plugins/mapr/utils/test_func_utils.py
Normal file
202
sahara/tests/unit/plugins/mapr/utils/test_func_utils.py
Normal file
@ -0,0 +1,202 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sahara.plugins.mapr.util.func_utils as fu
|
||||
import sahara.tests.unit.base as b
|
||||
|
||||
|
||||
class PredicatesTest(b.SaharaTestCase):
|
||||
|
||||
def test_true_predicate(self):
|
||||
self.assertTrue(fu.true_predicate(None))
|
||||
|
||||
def test_false_predicate(self):
|
||||
self.assertFalse(fu.false_predicate(None))
|
||||
|
||||
def test_not_predicate(self):
|
||||
self.assertFalse(fu.not_predicate(fu.true_predicate)(None))
|
||||
self.assertTrue(fu.not_predicate(fu.false_predicate)(None))
|
||||
|
||||
def test_and_predicate(self):
|
||||
true_p = fu.true_predicate
|
||||
false_p = fu.false_predicate
|
||||
and_p = fu.and_predicate
|
||||
self.assertTrue(and_p(true_p, true_p)(None))
|
||||
self.assertFalse(and_p(false_p, true_p)(None))
|
||||
self.assertFalse(and_p(true_p, false_p)(None))
|
||||
self.assertFalse(and_p(false_p, false_p)(None))
|
||||
|
||||
def test_or_predicate(self):
|
||||
true_p = fu.true_predicate
|
||||
false_p = fu.false_predicate
|
||||
or_p = fu.or_predicate
|
||||
self.assertTrue(or_p(true_p, true_p)(None))
|
||||
self.assertTrue(or_p(false_p, true_p)(None))
|
||||
self.assertTrue(or_p(true_p, false_p)(None))
|
||||
self.assertFalse(or_p(false_p, false_p)(None))
|
||||
|
||||
def test_field_equals_predicate(self):
|
||||
field_equals_p = fu.field_equals_predicate
|
||||
arg = {'a': 'a', 'b': 'b'}
|
||||
self.assertTrue(field_equals_p('a', 'a')(arg))
|
||||
self.assertFalse(field_equals_p('b', 'a')(arg))
|
||||
|
||||
def test_like_predicate(self):
|
||||
like_p = fu.like_predicate
|
||||
arg = {'a': 'a', 'b': 'b', 'c': 'c'}
|
||||
self.assertTrue(like_p({'a': 'a', 'b': 'b', 'c': 'c'})(arg))
|
||||
self.assertTrue(like_p({'a': 'a', 'b': 'b'})(arg))
|
||||
self.assertTrue(like_p({'a': 'a'})(arg))
|
||||
self.assertTrue(like_p({'a': 'a'}, ['a'])(arg))
|
||||
self.assertTrue(like_p({})(arg))
|
||||
self.assertTrue(like_p({'a': 'a', 'b': 'b', 'c': 'a'}, ['c'])(arg))
|
||||
self.assertFalse(like_p({'a': 'a', 'b': 'b', 'c': 'a'})(arg))
|
||||
self.assertFalse(like_p({'a': 'a', 'c': 'a'})(arg))
|
||||
self.assertFalse(like_p({'c': 'a'}, ['a'])(arg))
|
||||
|
||||
def test_in_predicate(self):
|
||||
in_p = fu.in_predicate
|
||||
arg = {'a': 'a', 'b': 'b'}
|
||||
self.assertTrue(in_p('a', ['a', 'b'])(arg))
|
||||
self.assertFalse(in_p('a', ['c', 'b'])(arg))
|
||||
self.assertFalse(in_p('a', [])(arg))
|
||||
|
||||
|
||||
class FunctionsTest(b.SaharaTestCase):
|
||||
|
||||
def test_copy_function(self):
|
||||
copy_f = fu.copy_function
|
||||
arg = {'a': 'a'}
|
||||
|
||||
actual = copy_f()(arg)
|
||||
expected = {'a': 'a'}
|
||||
self.assertEqual(expected, actual)
|
||||
self.assertIsNot(actual, arg)
|
||||
|
||||
def test_append_field_function(self):
|
||||
append_field_f = fu.append_field_function
|
||||
arg = {'a': 'a'}
|
||||
|
||||
actual = append_field_f('b', 'b')(arg)
|
||||
expected = {'a': 'a', 'b': 'b'}
|
||||
self.assertEqual(expected, actual)
|
||||
self.assertIsNot(actual, arg)
|
||||
|
||||
def test_append_fields_function(self):
|
||||
append_fields_f = fu.append_fields_function
|
||||
arg = {'a': 'a'}
|
||||
|
||||
actual = append_fields_f({'b': 'b', 'c': 'c'})(arg)
|
||||
expected = {'a': 'a', 'b': 'b', 'c': 'c'}
|
||||
self.assertEqual(expected, actual)
|
||||
self.assertIsNot(actual, arg)
|
||||
|
||||
actual = append_fields_f({'b': 'b'})(arg)
|
||||
expected = {'a': 'a', 'b': 'b'}
|
||||
self.assertEqual(expected, actual)
|
||||
self.assertIsNot(actual, arg)
|
||||
|
||||
actual = append_fields_f({})(arg)
|
||||
expected = {'a': 'a'}
|
||||
self.assertEqual(expected, actual)
|
||||
self.assertIsNot(actual, arg)
|
||||
|
||||
def test_get_values_pair_function(self):
|
||||
get_values_pair_f = fu.get_values_pair_function
|
||||
arg = {'a': 'a', 'b': 'b'}
|
||||
|
||||
actual = get_values_pair_f('a', 'b')(arg)
|
||||
expected = ('a', 'b')
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_get_field_function(self):
|
||||
get_field_f = fu.get_field_function
|
||||
arg = {'a': 'a', 'b': 'b'}
|
||||
|
||||
actual = get_field_f('a')(arg)
|
||||
expected = ('a', 'a')
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_get_fields_function(self):
|
||||
get_fields_f = fu.get_fields_function
|
||||
arg = {'a': 'a', 'b': 'b'}
|
||||
|
||||
actual = get_fields_f(['a', 'b'])(arg)
|
||||
expected = [('a', 'a'), ('b', 'b')]
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
actual = get_fields_f(['a'])(arg)
|
||||
expected = [('a', 'a')]
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_extract_fields_function(self):
|
||||
extract_fields_f = fu.extract_fields_function
|
||||
arg = {'a': 'a', 'b': 'b'}
|
||||
|
||||
actual = extract_fields_f(['a', 'b'])(arg)
|
||||
expected = {'a': 'a', 'b': 'b'}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
actual = extract_fields_f(['a'])(arg)
|
||||
expected = {'a': 'a'}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_get_value_function(self):
|
||||
get_value_f = fu.get_value_function
|
||||
arg = {'a': 'a', 'b': 'b'}
|
||||
|
||||
actual = get_value_f('a')(arg)
|
||||
expected = 'a'
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_set_default_value_function(self):
|
||||
set_default_value_f = fu.set_default_value_function
|
||||
arg = {'a': 'a'}
|
||||
|
||||
actual = set_default_value_f('b', 'b')(arg)
|
||||
expected = {'a': 'a', 'b': 'b'}
|
||||
self.assertEqual(expected, actual)
|
||||
self.assertIsNot(actual, arg)
|
||||
|
||||
actual = set_default_value_f('a', 'b')(arg)
|
||||
expected = {'a': 'a'}
|
||||
self.assertEqual(expected, actual)
|
||||
self.assertIsNot(actual, arg)
|
||||
|
||||
def test_set_default_values_function(self):
|
||||
set_default_values_f = fu.set_default_values_function
|
||||
arg = {'a': 'a'}
|
||||
|
||||
actual = set_default_values_f({'a': 'b', 'c': 'c'})(arg)
|
||||
expected = {'a': 'a', 'c': 'c'}
|
||||
self.assertEqual(expected, actual)
|
||||
self.assertIsNot(actual, arg)
|
||||
|
||||
actual = set_default_values_f({'b': 'b'})(arg)
|
||||
expected = {'a': 'a', 'b': 'b'}
|
||||
self.assertEqual(expected, actual)
|
||||
self.assertIsNot(actual, arg)
|
||||
|
||||
actual = set_default_values_f({})(arg)
|
||||
expected = {'a': 'a'}
|
||||
self.assertEqual(expected, actual)
|
||||
self.assertIsNot(actual, arg)
|
||||
|
||||
def test_values_pair_to_dict_function(self):
|
||||
values_pair_to_dict_f = fu.values_pair_to_dict_function
|
||||
arg = ('a', 'b')
|
||||
|
||||
actual = values_pair_to_dict_f('a', 'b')(arg)
|
||||
expected = {'a': 'a', 'b': 'b'}
|
||||
self.assertEqual(expected, actual)
|
324
sahara/tests/unit/plugins/mapr/utils/test_plugin_spec.py
Normal file
324
sahara/tests/unit/plugins/mapr/utils/test_plugin_spec.py
Normal file
@ -0,0 +1,324 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import os
|
||||
|
||||
import sahara.plugins.mapr.util.plugin_spec as ps
|
||||
import sahara.plugins.provisioning as p
|
||||
import sahara.tests.unit.base as b
|
||||
|
||||
import mock as m
|
||||
|
||||
|
||||
class PluginSpecTest(b.SaharaTestCase):
|
||||
|
||||
def assertItemsEqual(self, expected, actual):
|
||||
for e in expected:
|
||||
self.assertIn(e, actual)
|
||||
for a in actual:
|
||||
self.assertIn(a, expected)
|
||||
|
||||
def assertDictValueItemsEqual(self, expected, actual):
|
||||
self.assertItemsEqual(expected.keys(), actual.keys())
|
||||
for k in actual:
|
||||
self.assertItemsEqual(expected[k], actual[k])
|
||||
|
||||
@m.patch.object(ps.PluginSpec, '__init__', new=lambda i: None)
|
||||
def setUp(self):
|
||||
super(PluginSpecTest, self).setUp()
|
||||
path = 'tests/unit/plugins/mapr/utils/resources/plugin_spec.json'
|
||||
plugin_spec = ps.PluginSpec()
|
||||
plugin_spec.base_dir = os.path.dirname(path)
|
||||
plugin_spec.plugin_spec_dict = plugin_spec._load_plugin_spec_dict(path)
|
||||
self.plugin_spec = plugin_spec
|
||||
|
||||
def test_load_service_file_name_map(self):
|
||||
plugin_spec = self.plugin_spec
|
||||
|
||||
actual = plugin_spec._load_service_file_name_map()
|
||||
expected = {'service_2': ['file_0', 'file_1', 'file_2'],
|
||||
'general': ['file_3', None]}
|
||||
self.assertDictValueItemsEqual(expected, actual)
|
||||
|
||||
def test_load_file_name_config_map(self):
|
||||
plugin_spec = self.plugin_spec
|
||||
|
||||
actual = plugin_spec._load_file_name_config_map()
|
||||
expected = {'file_1': ['k1', 'k0', 'k3', 'k2'], None: ['k4']}
|
||||
self.assertDictValueItemsEqual(expected, actual)
|
||||
|
||||
def test_load_default_configs(self):
|
||||
pls = self.plugin_spec
|
||||
pls.service_file_name_map = pls._load_service_file_name_map()
|
||||
|
||||
actual = pls._load_default_configs()
|
||||
expected = {'service_2': {'file_1': {'k0': 'v0', 'k1': 'v1'},
|
||||
'file_2': {'k0': 'v0', 'k1': 'v1'}},
|
||||
'general': {'file_3': {'content': 'Some unparsable data'}}}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_load_service_node_process_map(self):
|
||||
pls = self.plugin_spec
|
||||
|
||||
actual = pls._load_service_node_process_map()
|
||||
expected = {'service_2': ['node_process_0', 'node_process_1']}
|
||||
self.assertDictValueItemsEqual(expected, actual)
|
||||
|
||||
def test_load_plugin_config_items(self):
|
||||
pls = self.plugin_spec
|
||||
pls.service_file_name_map = pls._load_service_file_name_map()
|
||||
pls.default_configs = pls._load_default_configs()
|
||||
pls.plugin_config_objects = pls._load_plugin_config_objects()
|
||||
pls.file_name_config_map = pls._load_file_name_config_map()
|
||||
|
||||
actual = pls._load_plugin_config_items()
|
||||
expected = [{'default_value': 3, 'name': 'k1', 'config_values': None,
|
||||
'priority': 1, 'config_type': 'int', 'file': 'file_1',
|
||||
'applicable_target': 'service_2', 'is_optional': False,
|
||||
'scope': 'node', 'description': None},
|
||||
{'default_value': None, 'name': 'k2',
|
||||
'config_values': None, 'priority': 2,
|
||||
'config_type': 'bool', 'file': 'file_1',
|
||||
'applicable_target': 'service_2', 'is_optional': True,
|
||||
'scope': 'cluster', 'description': None},
|
||||
{'default_value': 'default_value_0', 'name': 'k0',
|
||||
'config_values': None, 'priority': 2, 'file': 'file_1',
|
||||
'config_type': 'string', 'applicable_target': 'service_2',
|
||||
'is_optional': False, 'scope': 'cluster',
|
||||
'description': 'description_0'},
|
||||
{'default_value': None, 'name': 'k3',
|
||||
'config_values': None, 'priority': 2,
|
||||
'config_type': 'string', 'file': 'file_1',
|
||||
'applicable_target': 'service_2', 'is_optional': True,
|
||||
'scope': 'node', 'description': None},
|
||||
{'default_value': None, 'name': 'k4',
|
||||
'config_values': None, 'priority': 2,
|
||||
'config_type': 'string', 'file': None,
|
||||
'applicable_target': 'general', 'is_optional': False,
|
||||
'scope': 'cluster', 'description': None}]
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
def test_load_plugin_configs(self):
|
||||
pls = self.plugin_spec
|
||||
pls.service_file_name_map = pls._load_service_file_name_map()
|
||||
pls.plugin_config_objects = pls._load_plugin_config_objects()
|
||||
pls.file_name_config_map = pls._load_file_name_config_map()
|
||||
pls.plugin_config_items = pls._load_plugin_config_items()
|
||||
|
||||
actual = pls._load_plugin_configs()
|
||||
expected = {'service_2': {'file_1': {'k0': 'default_value_0', 'k1': 3,
|
||||
'k2': None, 'k3': None}},
|
||||
'general': {None: {'k4': None}}}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_load_default_plugin_configs(self):
|
||||
pls = self.plugin_spec
|
||||
pls.service_file_name_map = pls._load_service_file_name_map()
|
||||
pls.default_configs = pls._load_default_configs()
|
||||
pls.plugin_config_objects = pls._load_plugin_config_objects()
|
||||
pls.file_name_config_map = pls._load_file_name_config_map()
|
||||
pls.plugin_config_items = pls._load_plugin_config_items()
|
||||
pls.plugin_configs = pls._load_plugin_configs()
|
||||
|
||||
actual = pls._load_default_plugin_configs()
|
||||
expected = {'service_2': {'file_1': {'k0': 'default_value_0', 'k1': 3,
|
||||
'k2': None, 'k3': None},
|
||||
'file_2': {'k0': 'v0', 'k1': 'v1'}},
|
||||
'general': {None: {'k4': None},
|
||||
'file_3': {'content': 'Some unparsable data'}}}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_load_plugin_config_objects(self):
|
||||
pls = self.plugin_spec
|
||||
pls.service_file_name_map = pls._load_service_file_name_map()
|
||||
pls.default_configs = pls._load_default_configs()
|
||||
|
||||
actual = pls._load_plugin_config_objects()
|
||||
expected = [p.Config('k0', 'service_2', 'cluster',
|
||||
default_value='default_value_0',
|
||||
description='description_0'),
|
||||
p.Config('k1', 'service_2', 'node',
|
||||
config_type='int', default_value=3, priority=1),
|
||||
p.Config('k2', 'service_2', 'cluster',
|
||||
config_type='bool', is_optional=True),
|
||||
p.Config('k3', 'service_2', 'node', is_optional=True),
|
||||
p.Config('k4', 'general', 'cluster', is_optional=False)]
|
||||
m_actual = map(lambda i: i.to_dict(), actual)
|
||||
m_expected = map(lambda i: i.to_dict(), expected)
|
||||
self.assertItemsEqual(m_expected, m_actual)
|
||||
|
||||
def test_get_node_process_service(self):
|
||||
pls = self.plugin_spec
|
||||
pls.service_node_process_map = pls._load_service_node_process_map()
|
||||
|
||||
actual = pls.get_node_process_service('node_process_0')
|
||||
expected = 'service_2'
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_get_default_plugin_configs(self):
|
||||
pls = self.plugin_spec
|
||||
pls.service_file_name_map = pls._load_service_file_name_map()
|
||||
pls.default_configs = pls._load_default_configs()
|
||||
pls.plugin_config_objects = pls._load_plugin_config_objects()
|
||||
pls.file_name_config_map = pls._load_file_name_config_map()
|
||||
pls.plugin_config_items = pls._load_plugin_config_items()
|
||||
pls.plugin_configs = pls._load_plugin_configs()
|
||||
pls.default_plugin_configs = pls._load_default_plugin_configs()
|
||||
|
||||
actual = pls.get_default_plugin_configs(['service_2'])
|
||||
expected = {'service_2': {'file_1': {'k0': 'default_value_0', 'k1': 3,
|
||||
'k2': None, 'k3': None},
|
||||
'file_2': {'k0': 'v0', 'k1': 'v1'}}}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_get_config_file(self):
|
||||
path = 'tests/unit/plugins/mapr/utils/resources/plugin_spec.json'
|
||||
plugin_spec = ps.PluginSpec(path)
|
||||
|
||||
arg = {'service': 'service_2', 'scope': 'node', 'name': 'k1'}
|
||||
actual = plugin_spec.get_config_file(**arg)
|
||||
expected = 'file_1'
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
arg = {'service': 'service_1', 'scope': 'node', 'name': 'k1'}
|
||||
actual = plugin_spec.get_config_file(**arg)
|
||||
expected = None
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_get_version_config_objects(self):
|
||||
actual = self.plugin_spec.get_version_config_objects()
|
||||
expected = [p.Config(name='service_2 Version',
|
||||
applicable_target='service_2',
|
||||
scope='cluster',
|
||||
config_type='dropdown',
|
||||
config_values=[('v1', 'v1'), ('v2', 'v2')],
|
||||
is_optional=False,
|
||||
priority=1)]
|
||||
m_actual = map(lambda i: i.to_dict(), actual)
|
||||
m_expected = map(lambda i: i.to_dict(), expected)
|
||||
self.assertItemsEqual(m_expected, m_actual)
|
||||
|
||||
def test_get_configs(self):
|
||||
pls = self.plugin_spec
|
||||
pls.service_file_name_map = pls._load_service_file_name_map()
|
||||
pls.default_configs = pls._load_default_configs()
|
||||
pls.plugin_config_objects = pls._load_plugin_config_objects()
|
||||
|
||||
actual = pls.get_configs()
|
||||
expected = [p.Config('k0', 'service_2', 'cluster',
|
||||
default_value='default_value_0',
|
||||
description='description_0'),
|
||||
p.Config('k1', 'service_2', 'node',
|
||||
config_type='int', default_value=3, priority=1),
|
||||
p.Config('k2', 'service_2', 'cluster',
|
||||
config_type='bool', is_optional=True),
|
||||
p.Config('k3', 'service_2', 'node', is_optional=True),
|
||||
p.Config('k4', 'general', 'cluster', is_optional=False),
|
||||
p.Config('service_2 Version', 'service_2', 'cluster',
|
||||
config_type='dropdown',
|
||||
config_values=[('v1', 'v1'), ('v2', 'v2')],
|
||||
is_optional=False, priority=1)]
|
||||
m_actual = map(lambda i: i.to_dict(), actual)
|
||||
m_expected = map(lambda i: i.to_dict(), expected)
|
||||
self.assertItemsEqual(m_expected, m_actual)
|
||||
|
||||
def test_init(self):
|
||||
path = 'tests/unit/plugins/mapr/utils/resources/plugin_spec.json'
|
||||
plugin_spec = ps.PluginSpec(path)
|
||||
|
||||
actual = plugin_spec.service_file_name_map
|
||||
expected = {'service_2': ['file_0', 'file_1', 'file_2'],
|
||||
'general': [None, 'file_3']}
|
||||
self.assertDictValueItemsEqual(expected, actual)
|
||||
|
||||
actual = plugin_spec.file_name_config_map
|
||||
expected = {'file_1': ['k1', 'k0', 'k3', 'k2'], None: ['k4']}
|
||||
self.assertDictValueItemsEqual(expected, actual)
|
||||
|
||||
actual = plugin_spec.default_configs
|
||||
expected = {'service_2': {'file_1': {'k0': 'v0', 'k1': 'v1'},
|
||||
'file_2': {'k0': 'v0', 'k1': 'v1'}},
|
||||
'general': {'file_3': {'content': 'Some unparsable data'}}}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
actual = plugin_spec.service_node_process_map
|
||||
expected = {'service_2': ['node_process_0', 'node_process_1']}
|
||||
self.assertDictValueItemsEqual(expected, actual)
|
||||
|
||||
actual = plugin_spec.plugin_config_items
|
||||
expected = [{'default_value': 3, 'name': 'k1', 'config_values': None,
|
||||
'priority': 1, 'config_type': 'int', 'file': 'file_1',
|
||||
'applicable_target': 'service_2', 'is_optional': False,
|
||||
'scope': 'node', 'description': None},
|
||||
{'default_value': None, 'name': 'k2',
|
||||
'config_values': None, 'priority': 2,
|
||||
'config_type': 'bool', 'file': 'file_1',
|
||||
'applicable_target': 'service_2', 'is_optional': True,
|
||||
'scope': 'cluster', 'description': None},
|
||||
{'default_value': 'default_value_0', 'name': 'k0',
|
||||
'config_values': None, 'priority': 2, 'file': u'file_1',
|
||||
'config_type': 'string',
|
||||
'applicable_target': u'service_2',
|
||||
'is_optional': False, 'scope': u'cluster',
|
||||
'description': 'description_0'},
|
||||
{'default_value': None, 'name': 'k3',
|
||||
'config_values': None, 'priority': 2,
|
||||
'config_type': 'string', 'file': u'file_1',
|
||||
'applicable_target': u'service_2', 'is_optional': True,
|
||||
'scope': u'node', 'description': None},
|
||||
{'default_value': None, 'name': 'k4',
|
||||
'config_values': None, 'priority': 2,
|
||||
'config_type': 'string', 'file': None,
|
||||
'applicable_target': 'general', 'is_optional': False,
|
||||
'scope': 'cluster', 'description': None}]
|
||||
self.assertItemsEqual(expected, actual)
|
||||
|
||||
actual = plugin_spec.plugin_configs
|
||||
expected = {'service_2': {'file_1': {'k0': 'default_value_0', 'k1': 3,
|
||||
'k2': None, 'k3': None}},
|
||||
'general': {None: {'k4': None}}}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
actual = plugin_spec.default_plugin_configs
|
||||
expected = {'service_2': {'file_1': {'k0': 'default_value_0', 'k1': 3,
|
||||
'k2': None, 'k3': None},
|
||||
'file_2': {'k0': 'v0', 'k1': 'v1'}},
|
||||
'general': {None: {'k4': None},
|
||||
'file_3': {'content': 'Some unparsable data'}}}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
actual = plugin_spec._load_plugin_config_objects()
|
||||
expected = [p.Config('k0', 'service_2', 'cluster',
|
||||
default_value='default_value_0',
|
||||
description='description_0'),
|
||||
p.Config('k1', 'service_2', 'node',
|
||||
config_type='int', default_value=3, priority=1),
|
||||
p.Config('k2', 'service_2', 'cluster',
|
||||
config_type='bool', is_optional=True),
|
||||
p.Config('k3', 'service_2', 'node', is_optional=True),
|
||||
p.Config('k4', 'general', 'cluster', is_optional=False)]
|
||||
m_actual = map(lambda i: i.to_dict(), actual)
|
||||
m_expected = map(lambda i: i.to_dict(), expected)
|
||||
self.assertItemsEqual(m_expected, m_actual)
|
||||
|
||||
actual = plugin_spec.get_node_process_service('node_process_0')
|
||||
expected = 'service_2'
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
actual = plugin_spec.get_default_plugin_configs(['service_2'])
|
||||
expected = {'service_2': {'file_1': {'k0': 'default_value_0', 'k1': 3,
|
||||
'k2': None, 'k3': None},
|
||||
'file_2': {'k0': 'v0', 'k1': 'v1'}}}
|
||||
self.assertEqual(expected, actual)
|
0
sahara/tests/unit/plugins/mapr/versions/__init__.py
Normal file
0
sahara/tests/unit/plugins/mapr/versions/__init__.py
Normal file
@ -0,0 +1,204 @@
|
||||
# Copyright (c) 2014, MapR Technologies
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
import mock as m
|
||||
import six
|
||||
|
||||
import sahara.plugins.mapr.util.config_file_utils as cfu
|
||||
import sahara.plugins.mapr.util.plugin_spec as ps
|
||||
import sahara.plugins.mapr.versions.v4_0_1_mrv1.cluster_configurer as bcc
|
||||
import sahara.swift.swift_helper as sh
|
||||
import sahara.tests.unit.base as b
|
||||
import sahara.tests.unit.plugins.mapr.stubs as s
|
||||
import sahara.utils.files as f
|
||||
|
||||
|
||||
__dirname__ = os.path.dirname(__file__)
|
||||
|
||||
|
||||
class BaseClusterConfigurerTest(b.SaharaTestCase):
|
||||
|
||||
def assertItemsEqual(self, expected, actual):
|
||||
for e in expected:
|
||||
self.assertIn(e, actual)
|
||||
for a in actual:
|
||||
self.assertIn(a, expected)
|
||||
|
||||
@m.patch('sahara.context.ctx')
|
||||
@m.patch('sahara.plugins.mapr.util.config.is_data_locality_enabled')
|
||||
@m.patch('sahara.plugins.mapr.util.config_file_utils.to_file_content')
|
||||
def test_configure_wo_generals(self, tfc_mock, gtm_mock, cc_mock):
|
||||
def to_file_content(*args, **kargs):
|
||||
data = args[0]
|
||||
if isinstance(data, dict):
|
||||
return dict(map(lambda i: (str(i[0]), str(i[1])),
|
||||
six.iteritems(args[0])))
|
||||
elif isinstance(data, str):
|
||||
return {None: data}
|
||||
tfc_mock.side_effect = to_file_content
|
||||
gtm_mock.return_value = False
|
||||
cc_mock.return_value = s.AttrDict(auth_uri='http://auth',
|
||||
tenant_name='tenant_0',
|
||||
tenant_id='tenant_id')
|
||||
sh.CONF.os_region_name = None
|
||||
|
||||
i0 = s.Instance(instance_name='i0',
|
||||
management_ip='192.168.1.10',
|
||||
internal_ip='10.10.1.10')
|
||||
i1 = s.Instance(instance_name='i1',
|
||||
management_ip='192.168.1.11',
|
||||
internal_ip='10.10.1.11')
|
||||
i2 = s.Instance(instance_name='i2',
|
||||
management_ip='192.168.1.12',
|
||||
internal_ip='10.10.1.12')
|
||||
np0 = ['ZooKeeper', 'FileServer', 'TaskTracker']
|
||||
np1 = ['ZooKeeper', 'NFS', 'Oozie']
|
||||
ng0 = s.NodeGroup(id='ng0', instances=[i0, i1], node_processes=np0)
|
||||
ng1 = s.NodeGroup(id='ng1', instances=[i2], node_processes=np1)
|
||||
cc = {'general': {}}
|
||||
cluster = s.Cluster(node_groups=[ng0, ng1], cluster_configs=cc,
|
||||
hadoop_version='4.0.1.mrv1')
|
||||
|
||||
plugin_spec = ps.PluginSpec(
|
||||
'tests/unit/plugins/mapr/utils/resources/plugin_spec_ci.json')
|
||||
configurer = bcc.ClusterConfigurer(cluster, plugin_spec)
|
||||
cu_mock = m.MagicMock()
|
||||
configurer.conductor = m.MagicMock()
|
||||
configurer.conductor.cluster_update = cu_mock
|
||||
configurer.configure()
|
||||
bcc_expected_path = (
|
||||
'tests/unit/plugins/mapr/utils/resources/bcc_expected')
|
||||
core_site = {'data': cfu.load_xml_file(('%s/core-site-0.xml'
|
||||
% bcc_expected_path)),
|
||||
'file': ('/opt/mapr/hadoop/hadoop-0.20.2'
|
||||
'/conf/core-site.xml'),
|
||||
'root': True,
|
||||
'timeout': 120}
|
||||
mapred_site = {'data': cfu.load_xml_file(('%s/mapred-site-0.xml'
|
||||
% bcc_expected_path)),
|
||||
'root': True,
|
||||
'file': ('/opt/mapr/hadoop/hadoop-0.20.2'
|
||||
'/conf/mapred-site.xml'),
|
||||
'timeout': 120}
|
||||
cldb = {'root': True,
|
||||
'data': {'cldb.zookeeper.servers': ('192.168.1.10:5181,'
|
||||
'192.168.1.11:5181,'
|
||||
'192.168.1.12:5181')},
|
||||
'timeout': 120,
|
||||
'file': '/opt/mapr/conf/cldb.conf'}
|
||||
hadoop_v = {'root': True,
|
||||
'data': f.get_file_text('plugins/mapr/util'
|
||||
'/resources/'
|
||||
'hadoop_version') %
|
||||
{"mode": 'classic'},
|
||||
'timeout': 120,
|
||||
'file': '/opt/mapr/conf/hadoop_version'}
|
||||
self.assertItemsEqual(i0.remote().fs, [core_site, cldb, mapred_site,
|
||||
hadoop_v])
|
||||
self.assertItemsEqual(i1.remote().fs, [core_site, mapred_site, cldb,
|
||||
hadoop_v])
|
||||
self.assertItemsEqual(i2.remote().fs, [core_site, cldb,
|
||||
hadoop_v])
|
||||
|
||||
@m.patch('sahara.context.ctx')
|
||||
@m.patch('sahara.plugins.mapr.util.config.is_data_locality_enabled')
|
||||
@m.patch('sahara.topology.topology_helper.generate_topology_map')
|
||||
@m.patch('sahara.plugins.mapr.util.config_file_utils.to_file_content')
|
||||
def test_configure_with_topology(self, tfc_mock, gtm_mock,
|
||||
dle_mock, cc_mock):
|
||||
def to_file_content(*args, **kargs):
|
||||
data = args[0]
|
||||
if isinstance(data, dict):
|
||||
return dict(map(lambda i: (str(i[0]), str(i[1])),
|
||||
six.iteritems(args[0])))
|
||||
elif isinstance(data, str):
|
||||
return {None: data}
|
||||
tfc_mock.side_effect = to_file_content
|
||||
dle_mock.return_value = True
|
||||
gtm_mock.return_value = {'i0': 'r', '192.168.1.10': 'r',
|
||||
'10.10.1.10': 'r',
|
||||
'i1': 'r', '192.168.1.11': 'r',
|
||||
'10.10.1.11': 'r',
|
||||
'i2': 'r', '192.168.1.12': 'r',
|
||||
'10.10.1.12': 'r'}
|
||||
cc_mock.return_value = s.AttrDict(auth_uri='http://auth',
|
||||
tenant_name='tenant_0',
|
||||
tenant_id='tenant_id')
|
||||
sh.CONF.os_region_name = None
|
||||
i0 = s.Instance(instance_name='i0',
|
||||
management_ip='192.168.1.10',
|
||||
internal_ip='10.10.1.10')
|
||||
i1 = s.Instance(instance_name='i1',
|
||||
management_ip='192.168.1.11',
|
||||
internal_ip='10.10.1.11')
|
||||
i2 = s.Instance(instance_name='i2',
|
||||
management_ip='192.168.1.12',
|
||||
internal_ip='10.10.1.12')
|
||||
np0 = ['ZooKeeper', 'FileServer', 'TaskTracker']
|
||||
np1 = ['ZooKeeper', 'NFS', 'HBase RegionServer']
|
||||
ng0 = s.NodeGroup(id='ng0', instances=[i0, i1], node_processes=np0)
|
||||
ng1 = s.NodeGroup(id='ng1', instances=[i2], node_processes=np1)
|
||||
cc = {'general': {}}
|
||||
cluster = s.Cluster(node_groups=[ng0, ng1], cluster_configs=cc,
|
||||
hadoop_version='4.0.1.mrv1')
|
||||
plugin_spec = ps.PluginSpec(
|
||||
'tests/unit/plugins/mapr/utils/resources/plugin_spec_ci.json')
|
||||
configurer = bcc.ClusterConfigurer(cluster, plugin_spec)
|
||||
cu_mock = m.MagicMock()
|
||||
configurer.conductor = m.MagicMock()
|
||||
configurer.conductor.cluster_update = cu_mock
|
||||
configurer.configure()
|
||||
self.assertEqual(1, gtm_mock.call_count)
|
||||
bcc_expected_path = (
|
||||
'tests/unit/plugins/mapr/utils/resources/bcc_expected')
|
||||
core_site = {'data': cfu.load_xml_file(('%s/core-site-1.xml'
|
||||
% bcc_expected_path)),
|
||||
'file': ('/opt/mapr/hadoop/hadoop-0.20.2'
|
||||
'/conf/core-site.xml'),
|
||||
'root': True,
|
||||
'timeout': 120}
|
||||
mapred_site = {'data': cfu.load_xml_file('%s/mapred-site-1.xml'
|
||||
% bcc_expected_path),
|
||||
'root': True,
|
||||
'file': ('/opt/mapr/hadoop/hadoop-0.20.2'
|
||||
'/conf/mapred-site.xml'),
|
||||
'timeout': 120}
|
||||
topology_data = {'data': gtm_mock.return_value,
|
||||
'file': '/opt/mapr/topology.data',
|
||||
'root': True, 'timeout': 120}
|
||||
cldb = {'data': cfu.load_properties_file(('%s/cldb-1.conf'
|
||||
% bcc_expected_path)),
|
||||
'file': '/opt/mapr/conf/cldb.conf',
|
||||
'root': True, 'timeout': 120}
|
||||
t_sh = {'data': f.get_file_text('plugins/mapr/util'
|
||||
'/resources/topology.sh'),
|
||||
'file': '/opt/mapr/topology.sh',
|
||||
'root': True, 'timeout': 120}
|
||||
hadoop_v = {'root': True,
|
||||
'data': f.get_file_text('plugins/mapr/util'
|
||||
'/resources/hadoop_version') %
|
||||
{'mode': 'classic'},
|
||||
'timeout': 120,
|
||||
'file': '/opt/mapr/conf/hadoop_version'}
|
||||
self.assertItemsEqual(i0.remote().fs,
|
||||
[core_site, mapred_site,
|
||||
topology_data, cldb, t_sh, hadoop_v])
|
||||
self.assertItemsEqual(i1.remote().fs,
|
||||
[core_site, mapred_site,
|
||||
topology_data, cldb, t_sh, hadoop_v])
|
||||
self.assertItemsEqual(i2.remote().fs,
|
||||
[core_site, topology_data, cldb, t_sh,
|
||||
hadoop_v])
|
@ -39,6 +39,7 @@ console_scripts =
|
||||
sahara.cluster.plugins =
|
||||
vanilla = sahara.plugins.vanilla.plugin:VanillaProvider
|
||||
hdp = sahara.plugins.hdp.ambariplugin:AmbariPlugin
|
||||
mapr = sahara.plugins.mapr.plugin:MapRPlugin
|
||||
cdh = sahara.plugins.cdh.plugin:CDHPluginProvider
|
||||
fake = sahara.plugins.fake.plugin:FakePluginProvider
|
||||
spark = sahara.plugins.spark.plugin:SparkProvider
|
||||
|
Loading…
Reference in New Issue
Block a user