From 037ba02ea7588d2eae5bf3a82420c5d3868141e7 Mon Sep 17 00:00:00 2001 From: Vitaly Gridnev Date: Thu, 24 Mar 2016 13:30:20 +0300 Subject: [PATCH] Remove hdp 2.0.6 plugin Newton release is opened, so we can remove this plugin from sahara codebase Implements blueprint: remove-hdp206 Change-Id: I1ebf90e716964151c88de53d127789251bef066b --- devstack/settings | 2 +- .../notes/remove-hdp-137d0ad3d2389b7a.yaml | 4 + sahara/plugins/hdp/__init__.py | 0 sahara/plugins/hdp/ambariplugin.py | 450 --- sahara/plugins/hdp/clusterspec.py | 385 -- sahara/plugins/hdp/confighints_helper.py | 81 - sahara/plugins/hdp/configprovider.py | 94 - sahara/plugins/hdp/edp_engine.py | 42 - sahara/plugins/hdp/hadoopserver.py | 279 -- sahara/plugins/hdp/saharautils.py | 33 - sahara/plugins/hdp/versions/__init__.py | 0 .../hdp/versions/abstractversionhandler.py | 75 - .../hdp/versions/version_2_0_6/__init__.py | 0 .../hdp/versions/version_2_0_6/edp_engine.py | 46 - .../resources/ambari-config-resource.json | 3536 ----------------- .../resources/default-cluster.template | 1845 --------- .../version_2_0_6/resources/topology.sh | 21 - .../hdp/versions/version_2_0_6/services.py | 1272 ------ .../versions/version_2_0_6/versionhandler.py | 1165 ------ .../hdp/versions/versionhandlerfactory.py | 59 - sahara/tests/unit/plugins/hdp/__init__.py | 0 .../tests/unit/plugins/hdp/hdp_test_base.py | 136 - .../hdp/resources/config-resource.json | 42 - .../resources/sample-ambari-blueprint.json | 195 - .../unit/plugins/hdp/test_ambariplugin.py | 312 -- .../unit/plugins/hdp/test_clusterspec_hdp2.py | 2035 ---------- .../plugins/hdp/test_confighints_helper.py | 147 - .../tests/unit/plugins/hdp/test_services.py | 815 ---- .../plugins/hdp/test_versionmanagerfactory.py | 35 - .../unit/plugins/hdp/versions/__init__.py | 0 .../hdp/versions/version_2_0_6/__init__.py | 0 .../versions/version_2_0_6/test_edp_engine.py | 98 - setup.cfg | 1 - 33 files changed, 5 insertions(+), 13200 deletions(-) create mode 100644 releasenotes/notes/remove-hdp-137d0ad3d2389b7a.yaml delete mode 100644 sahara/plugins/hdp/__init__.py delete mode 100644 sahara/plugins/hdp/ambariplugin.py delete mode 100644 sahara/plugins/hdp/clusterspec.py delete mode 100644 sahara/plugins/hdp/confighints_helper.py delete mode 100644 sahara/plugins/hdp/configprovider.py delete mode 100644 sahara/plugins/hdp/edp_engine.py delete mode 100644 sahara/plugins/hdp/hadoopserver.py delete mode 100644 sahara/plugins/hdp/saharautils.py delete mode 100644 sahara/plugins/hdp/versions/__init__.py delete mode 100644 sahara/plugins/hdp/versions/abstractversionhandler.py delete mode 100644 sahara/plugins/hdp/versions/version_2_0_6/__init__.py delete mode 100644 sahara/plugins/hdp/versions/version_2_0_6/edp_engine.py delete mode 100644 sahara/plugins/hdp/versions/version_2_0_6/resources/ambari-config-resource.json delete mode 100644 sahara/plugins/hdp/versions/version_2_0_6/resources/default-cluster.template delete mode 100755 sahara/plugins/hdp/versions/version_2_0_6/resources/topology.sh delete mode 100644 sahara/plugins/hdp/versions/version_2_0_6/services.py delete mode 100644 sahara/plugins/hdp/versions/version_2_0_6/versionhandler.py delete mode 100644 sahara/plugins/hdp/versions/versionhandlerfactory.py delete mode 100644 sahara/tests/unit/plugins/hdp/__init__.py delete mode 100644 sahara/tests/unit/plugins/hdp/hdp_test_base.py delete mode 100644 sahara/tests/unit/plugins/hdp/resources/config-resource.json delete mode 100644 sahara/tests/unit/plugins/hdp/resources/sample-ambari-blueprint.json delete mode 100644 sahara/tests/unit/plugins/hdp/test_ambariplugin.py delete mode 100644 sahara/tests/unit/plugins/hdp/test_clusterspec_hdp2.py delete mode 100644 sahara/tests/unit/plugins/hdp/test_confighints_helper.py delete mode 100644 sahara/tests/unit/plugins/hdp/test_services.py delete mode 100644 sahara/tests/unit/plugins/hdp/test_versionmanagerfactory.py delete mode 100644 sahara/tests/unit/plugins/hdp/versions/__init__.py delete mode 100644 sahara/tests/unit/plugins/hdp/versions/version_2_0_6/__init__.py delete mode 100644 sahara/tests/unit/plugins/hdp/versions/version_2_0_6/test_edp_engine.py diff --git a/devstack/settings b/devstack/settings index 2439934b02..aad8119f89 100644 --- a/devstack/settings +++ b/devstack/settings @@ -27,7 +27,7 @@ SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara} SAHARA_ENABLED_PLUGINS=${SAHARA_ENABLED_PLUGINS:-\ -vanilla,hdp,cdh,mapr,spark,storm,fake} +vanilla,cdh,mapr,spark,storm,fake} SAHARA_BIN_DIR=$(get_python_exec_prefix) SAHARA_ENABLE_DISTRIBUTED_PERIODICS=${SAHARA_ENABLE_DISTRIBUTED_PERIODICS:-\ diff --git a/releasenotes/notes/remove-hdp-137d0ad3d2389b7a.yaml b/releasenotes/notes/remove-hdp-137d0ad3d2389b7a.yaml new file mode 100644 index 0000000000..dfcb0e4d84 --- /dev/null +++ b/releasenotes/notes/remove-hdp-137d0ad3d2389b7a.yaml @@ -0,0 +1,4 @@ +--- +deprecations: + - Support of HDP 2.0.6 plugin was removed. Use Ambari plugin + instead. diff --git a/sahara/plugins/hdp/__init__.py b/sahara/plugins/hdp/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sahara/plugins/hdp/ambariplugin.py b/sahara/plugins/hdp/ambariplugin.py deleted file mode 100644 index 9516b5f048..0000000000 --- a/sahara/plugins/hdp/ambariplugin.py +++ /dev/null @@ -1,450 +0,0 @@ -# Copyright (c) 2013 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_log import log as logging - -from sahara import conductor as c -from sahara import context -from sahara import exceptions as base_exc -from sahara.i18n import _ -from sahara.i18n import _LI -from sahara.plugins import exceptions as ex -from sahara.plugins.hdp import hadoopserver as h -from sahara.plugins.hdp import saharautils as utils -from sahara.plugins.hdp.versions import versionhandlerfactory as vhf -from sahara.plugins import provisioning as p -from sahara.topology import topology_helper as th -from sahara.utils import cluster_progress_ops as cpo - - -conductor = c.API -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class AmbariPlugin(p.ProvisioningPluginBase): - def __init__(self): - self.cluster_ambari_mapping = {} - self.version_factory = vhf.VersionHandlerFactory.get_instance() - - def create_cluster(self, cluster): - version = cluster.hadoop_version - handler = self.version_factory.get_version_handler(version) - - cluster_spec = handler.get_cluster_spec( - cluster, self._map_to_user_inputs( - version, cluster.cluster_configs)) - hosts = self._get_servers(cluster) - ambari_info = self.get_ambari_info(cluster_spec) - self.cluster_ambari_mapping[cluster.name] = ambari_info - rpm = self._get_rpm_uri(cluster_spec) - - servers = [] - for host in hosts: - host_role = utils.get_host_role(host) - servers.append( - h.HadoopServer(host, cluster_spec.node_groups[host_role], - ambari_rpm=rpm)) - - self._provision_cluster( - cluster.name, cluster_spec, ambari_info, servers, - cluster.hadoop_version) - - # add the topology data file and script if rack awareness is - # enabled - self._configure_topology_for_cluster(cluster, servers) - - LOG.info(_LI("Install of Hadoop stack successful.")) - # add service urls - self._set_cluster_info(cluster, cluster_spec) - - # check if HDFS HA is enabled; set it up if so - if cluster_spec.is_hdfs_ha_enabled(cluster): - self.configure_hdfs_ha(cluster) - - @cpo.event_wrapper( - True, step=_("Add configurations to cluster"), param=('cluster', 1)) - def configure_hdfs_ha(self, cluster): - LOG.debug("Configuring HDFS HA") - version = cluster.hadoop_version - handler = self.version_factory.get_version_handler(version) - - cluster_spec = handler.get_cluster_spec( - cluster, self._map_to_user_inputs( - version, cluster.cluster_configs)) - hosts = self._get_servers(cluster) - ambari_info = self.get_ambari_info(cluster_spec) - self.cluster_ambari_mapping[cluster.name] = ambari_info - rpm = self._get_rpm_uri(cluster_spec) - - servers = [] - for host in hosts: - host_role = utils.get_host_role(host) - servers.append( - h.HadoopServer(host, cluster_spec.node_groups[host_role], - ambari_rpm=rpm)) - - ambari_client = handler.get_ambari_client() - ambari_client.setup_hdfs_ha(cluster_spec, servers, ambari_info, - cluster.name) - LOG.info(_LI("Configure HDFS HA successful.")) - - def _get_servers(self, cluster): - servers = [] - if hasattr(cluster, 'node_groups') and cluster.node_groups is not None: - # code for a cluster object - for node_group in cluster.node_groups: - servers += node_group.instances - else: - # cluster is actually a cloud context - servers = cluster.instances - - return servers - - def get_node_processes(self, hadoop_version): - node_processes = {} - version_handler = ( - self.version_factory.get_version_handler(hadoop_version)) - default_config = version_handler.get_default_cluster_configuration() - for service in default_config.services: - components = [] - for component in service.components: - if service.is_user_template_component(component): - components.append(component.name) - node_processes[service.name] = components - - return node_processes - - def convert(self, config, plugin_name, version, template_name, - cluster_template_create): - handler = self.version_factory.get_version_handler(version) - normalized_config = handler.get_cluster_spec( - None, None, cluster_template=config).normalize() - - node_groups = [] - for ng in normalized_config.node_groups: - node_group = { - "name": ng.name, - "flavor_id": ng.flavor, - "node_processes": ng.node_processes, - "count": ng.count - } - node_groups.append(node_group) - - cluster_configs = dict() - config_resource = handler.get_config_items() - for entry in normalized_config.cluster_configs: - user_input = next((ui for ui in config_resource - if entry.config.name == ui.name), None) - if user_input is not None: - ci = entry.config - # get the associated service dictionary - target = entry.config.applicable_target - service_dict = cluster_configs.get(target, {}) - service_dict[ci.name] = entry.value - cluster_configs[target] = service_dict - else: - LOG.debug('Template based input "{entry_name}" is being' - ' filtered out as it is not considered a user input' - .format(entry_name=entry.config.name)) - - ctx = context.ctx() - return cluster_template_create(ctx, - {"name": template_name, - "plugin_name": plugin_name, - "hadoop_version": version, - "node_groups": node_groups, - "cluster_configs": cluster_configs}) - - def update_infra(self, cluster): - pass - - def convert_props_to_template(self, props): - raise NotImplementedError('not yet supported') - - def _provision_cluster(self, name, cluster_spec, ambari_info, - servers, version): - # TODO(jspeidel): encapsulate in another class - - if servers: - cpo.add_provisioning_step( - servers[0].cluster_id, - _("Provision cluster via Ambari"), len(servers)) - - with context.ThreadGroup() as tg: - for server in servers: - with context.set_current_instance_id( - server.instance['instance_id']): - tg.spawn( - "hdp-provision-instance-%s" % - server.instance.hostname(), - server.provision_ambari, ambari_info, cluster_spec) - - handler = self.version_factory.get_version_handler(version) - ambari_client = handler.get_ambari_client() - - ambari_client.wait_for_host_registrations(len(servers), ambari_info) - self._set_ambari_credentials(cluster_spec, ambari_info, version) - - ambari_client.provision_cluster( - cluster_spec, servers, ambari_info, name) - - LOG.info(_LI('Cluster provisioned via Ambari Server: {server_ip}') - .format(server_ip=ambari_info.get_address())) - - # TODO(jspeidel): invoke during scale cluster. Will need to handle dups - def _set_cluster_info(self, cluster, cluster_spec): - info = {} - for service in cluster_spec.services: - if service.deployed: - service.register_service_urls(cluster_spec, info, cluster) - - conductor.cluster_update(context.ctx(), cluster, {'info': info}) - - def _set_ambari_credentials(self, cluster_spec, ambari_info, version): - services = cluster_spec.services - ambari_client = (self.version_factory.get_version_handler(version). - get_ambari_client()) - for service in services: - if service.name == 'AMBARI': - is_admin_provided = False - admin_user = ambari_info.user - admin_password = ambari_info.password - for user in service.users: - if user.name == 'admin': - ambari_client.update_ambari_admin_user( - user.password, ambari_info) - is_admin_provided = True - ambari_info.user = 'admin' - ambari_info.password = user.password - else: - ambari_client.add_ambari_user(user, ambari_info) - if 'admin' in user.groups: - admin_user = user.name - admin_password = user.password - - if not is_admin_provided: - if admin_user is None: - raise ex.HadoopProvisionError(_("An Ambari user in the" - " admin group must be " - "configured.")) - ambari_info.user = admin_user - ambari_info.password = admin_password - ambari_client.delete_ambari_user('admin', ambari_info) - break - - def _update_ambari_info_credentials(self, cluster_spec, ambari_info): - services = cluster_spec.services - ambari_service = next((service for service in services if - service.name == 'AMBARI'), None) - if ambari_service is not None: - admin_user = next((user for user in ambari_service.users - if 'admin' in user.groups), None) - if admin_user is not None: - ambari_info.user = admin_user.name - ambari_info.password = admin_user.password - - LOG.info(_LI('Using "{username}" as admin user for scaling of cluster') - .format(username=ambari_info.user)) - - # PLUGIN SPI METHODS: - def get_versions(self): - return self.version_factory.get_versions() - - def configure_cluster(self, cluster): - self.create_cluster(cluster) - - def get_configs(self, hadoop_version): - handler = self.version_factory.get_version_handler(hadoop_version) - return handler.get_config_items() - - # cluster name argument supports the non-sahara cluster creation mode - def start_cluster(self, cluster): - client = self.version_factory.get_version_handler( - cluster.hadoop_version).get_ambari_client() - - handler = self.version_factory.get_version_handler( - cluster.hadoop_version) - - cluster_spec = handler.get_cluster_spec( - cluster, self._map_to_user_inputs( - cluster.hadoop_version, cluster.cluster_configs)) - - try: - client.start_services(cluster.name, cluster_spec, - self.cluster_ambari_mapping[cluster.name]) - finally: - client.cleanup(self.cluster_ambari_mapping[cluster.name]) - - def get_title(self): - return 'Hortonworks Data Platform' - - def get_description(self): - return _('The Hortonworks Sahara plugin automates the deployment ' - 'of the Hortonworks Data Platform (HDP) on OpenStack.') - - def validate(self, cluster): - raise base_exc.DeprecatedException( - _("The HDP 2.0.6 plugin is deprecated in Mitaka release and " - "will be removed in Newton release. Please, use the Ambari 2.3 " - "instead.")) - - def scale_cluster(self, cluster, instances): - handler = self.version_factory.get_version_handler( - cluster.hadoop_version) - ambari_client = handler.get_ambari_client() - cluster_spec = handler.get_cluster_spec( - cluster, self._map_to_user_inputs( - cluster.hadoop_version, cluster.cluster_configs)) - rpm = self._get_rpm_uri(cluster_spec) - - servers = [] - for instance in instances: - host_role = utils.get_host_role(instance) - servers.append(h.HadoopServer(instance, - cluster_spec.node_groups - [host_role], - ambari_rpm=rpm)) - - ambari_info = self.get_ambari_info(cluster_spec) - self._update_ambari_info_credentials(cluster_spec, ambari_info) - - cpo.add_provisioning_step( - cluster.id, _("Provision cluster via Ambari"), len(servers)) - - with context.ThreadGroup() as tg: - for server in servers: - with context.set_current_instance_id( - server.instance['instance_id']): - tg.spawn('Ambari provisioning thread', - server.provision_ambari, - ambari_info, cluster_spec) - - ambari_client.configure_scaled_cluster_instances( - cluster.name, cluster_spec, self._get_num_hosts(cluster), - ambari_info) - self._configure_topology_for_cluster(cluster, servers) - ambari_client.start_scaled_cluster_instances(cluster.name, - cluster_spec, servers, - ambari_info) - - ambari_client.cleanup(ambari_info) - - def get_edp_engine(self, cluster, job_type): - version_handler = ( - self.version_factory.get_version_handler(cluster.hadoop_version)) - return version_handler.get_edp_engine(cluster, job_type) - - def get_edp_job_types(self, versions=None): - res = {} - for vers in self.version_factory.get_versions(): - if not versions or vers in versions: - vh = self.version_factory.get_version_handler(vers) - res[vers] = vh.get_edp_job_types() - return res - - def get_edp_config_hints(self, job_type, version): - version_handler = ( - self.version_factory.get_version_handler(version)) - return version_handler.get_edp_config_hints(job_type) - - def decommission_nodes(self, cluster, instances): - LOG.info(_LI('AmbariPlugin: decommission_nodes called for ' - 'HDP version = {version}') - .format(version=cluster.hadoop_version)) - - handler = self.version_factory.get_version_handler( - cluster.hadoop_version) - ambari_client = handler.get_ambari_client() - cluster_spec = handler.get_cluster_spec( - cluster, self._map_to_user_inputs( - cluster.hadoop_version, cluster.cluster_configs)) - - ambari_info = self.get_ambari_info(cluster_spec) - ambari_client.decommission_cluster_instances(cluster, cluster_spec, - instances, - ambari_info) - - def validate_scaling(self, cluster, existing, additional): - handler = self.version_factory.get_version_handler( - cluster.hadoop_version) - - # results in validation - handler.get_cluster_spec( - cluster, [], - dict(list(existing.items()) + list(additional.items()))) - - def _get_num_hosts(self, cluster): - count = 0 - for node_group in cluster.node_groups: - count += node_group.count - - return count - - def _get_host_list(self, servers): - host_list = [server.instance.fqdn().lower() for server in servers] - return ",".join(host_list) - - def _get_rpm_uri(self, cluster_spec): - ambari_config = cluster_spec.configurations['ambari'] - return ambari_config.get('rpm', None) - - def get_ambari_info(self, cluster_spec): - ambari_host = cluster_spec.determine_component_hosts( - 'AMBARI_SERVER').pop() - - port = cluster_spec.configurations['ambari'].get( - 'server.port', '8080') - - return AmbariInfo(ambari_host, port, 'admin', 'admin') - - def _configure_topology_for_cluster(self, cluster, servers): - if CONF.enable_data_locality: - cpo.add_provisioning_step( - cluster.id, _("Enable data locality for cluster"), - len(servers)) - topology_data = th.generate_topology_map( - cluster, CONF.enable_hypervisor_awareness) - topology_str = "\n".join( - [k + " " + v for k, v in topology_data.items()]) + "\n" - for server in servers: - server.configure_topology(topology_str) - - def get_open_ports(self, node_group): - handler = self.version_factory.get_version_handler( - node_group.cluster.hadoop_version) - return handler.get_open_ports(node_group) - - -class AmbariInfo(object): - def __init__(self, host, port, user, password): - self.host = host - self.port = port - self.user = user - self.password = password - - def get_address(self): - return '{0}:{1}'.format(self.host.management_ip, self.port) - - def is_ambari_info(self): - pass - - def get_cluster(self): - sahara_instance = self.host.sahara_instance - return sahara_instance.cluster - - def get_event_info(self): - return self.host.sahara_instance diff --git a/sahara/plugins/hdp/clusterspec.py b/sahara/plugins/hdp/clusterspec.py deleted file mode 100644 index 1efc212808..0000000000 --- a/sahara/plugins/hdp/clusterspec.py +++ /dev/null @@ -1,385 +0,0 @@ -# Copyright (c) 2013 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging -from oslo_serialization import jsonutils as json - -from sahara.i18n import _ -from sahara.plugins import exceptions as ex -from sahara.plugins.hdp.versions import versionhandlerfactory as vhf - - -LOG = logging.getLogger(__name__) - - -def validate_number_of_datanodes(cluster, scaled_groups, default_configs): - dfs_replication = 0 - for config in default_configs: - if config.name == 'dfs.replication': - dfs_replication = config.default_value - conf = cluster.cluster_configs - if 'HDFS' in conf and 'dfs.replication' in conf['HDFS']: - dfs_replication = conf['HDFS']['dfs.replication'] - - if not scaled_groups: - scaled_groups = {} - dn_count = 0 - for ng in cluster.node_groups: - if 'DATANODE' in ng.node_processes: - if ng.id in scaled_groups: - dn_count += scaled_groups[ng.id] - else: - dn_count += ng.count - - if dn_count < int(dfs_replication): - raise ex.InvalidComponentCountException( - 'datanode', _('%s or more') % dfs_replication, dn_count, - _('Number of %(dn)s instances should not be less ' - 'than %(replication)s') - % {'dn': 'DATANODE', 'replication': 'dfs.replication'}) - - -class ClusterSpec(object): - def __init__(self, config, version='2.0.6'): - self._config_template = config - self.services = [] - self.configurations = {} - self.node_groups = {} - self.version = version - self.user_input_handlers = {} - - cluster_template = json.loads(config) - self._parse_services(cluster_template) - self._parse_configurations(cluster_template) - self._process_node_groups(template_json=cluster_template) - - def create_operational_config(self, cluster, user_inputs, - scaled_groups=None): - if scaled_groups is None: - scaled_groups = {} - self._determine_deployed_services(cluster) - self._process_node_groups(cluster=cluster) - - for ng_id in scaled_groups: - existing = next(group for group in self.node_groups.values() - if group.id == ng_id) - existing.count = scaled_groups[ng_id] - - self.validate_node_groups(cluster) - self._finalize_ng_components() - self._parse_configurations(json.loads(self._config_template)) - self._process_user_inputs(user_inputs) - self._replace_config_tokens() - - def scale(self, updated_groups): - for ng_id in updated_groups: - existing = next(group for group in self.node_groups.values() - if group.id == ng_id) - existing.count = updated_groups[ng_id] - - def validate_node_groups(self, cluster): - for service in self.services: - if service.deployed: - service.validate(self, cluster) - elif service.is_mandatory(): - raise ex.RequiredServiceMissingException(service.name) - - def get_deployed_configurations(self): - configs = set() - for service in self.services: - if service.deployed: - configs |= service.configurations - - return configs - - def determine_component_hosts(self, component): - hosts = set() - for ng in self.node_groups.values(): - if component in ng.components: - hosts |= ng.instances - - return hosts - - def normalize(self): - return NormalizedClusterConfig(self) - - def get_deployed_node_group_count(self, name): - count = 0 - for ng in self.get_node_groups_containing_component(name): - count += ng.count - - return count - - def get_node_groups_containing_component(self, component): - found_node_groups = [] - for ng in self.node_groups.values(): - if component in ng.components: - found_node_groups.append(ng) - - return found_node_groups - - def get_components_for_type(self, type): - components = set() - for service in self.services: - for component in service.components: - if component.type == type: - components.add(component.name) - - return components - - def is_hdfs_ha_enabled(self, cluster): - if self.version == '2.0.6': - if cluster.cluster_configs.get('HDFSHA', False): - if cluster.cluster_configs.HDFSHA.get('hdfs.nnha', - False) is True: - return True - return False - - def _parse_services(self, template_json): - handler = (vhf.VersionHandlerFactory.get_instance(). - get_version_handler(self.version)) - sp = handler.get_services_processor() - for s in template_json['services']: - name = s['name'] - service = sp.create_service(name) - - self.services.append(service) - for c in s['components']: - component = Component(c['name'], c['type'], c['cardinality']) - service.add_component(component) - - if 'users' in s: - for u in s['users']: - user = User(u['name'], u['password'], u['groups']) - service.add_user(user) - - configs = self._parse_configurations(s) - for config in configs: - service.add_configuration(config) - - def _parse_configurations(self, template_json): - config_names = [] - for config in template_json['configurations']: - config_props = {} - name = config['name'] - config_names.append(name) - if name in self.configurations: - config_props = self.configurations[name] - else: - self.configurations[name] = config_props - - if 'properties' in config: - for prop in config['properties']: - config_props[prop['name']] = prop['value'] - - return config_names - - def _process_node_groups(self, template_json=None, cluster=None): - # get node_groups from config - if template_json and not cluster: - for group in template_json['host_role_mappings']: - node_group = NodeGroup(group['name']) - for component in group['components']: - node_group.add_component(component['name']) - for host in group['hosts']: - if 'predicate' in host: - node_group.predicate = host['predicate'] - if 'cardinality' in host: - node_group.cardinality = host['cardinality'] - if 'default_count' in host: - node_group.count = host['default_count'] - self.node_groups[node_group.name] = node_group - - if cluster: - self.node_groups = {} - node_groups = cluster.node_groups - for ng in node_groups: - node_group = NodeGroup(ng.name) - node_group.count = ng.count - node_group.id = ng.id - node_group.components = ng.node_processes[:] - for instance in ng.instances: - node_group.instances.add(Instance(instance)) - self.node_groups[node_group.name] = node_group - - def _determine_deployed_services(self, cluster): - for ng in cluster.node_groups: - for service in self.services: - if service.deployed: - continue - for sc in service.components: - if sc.name in ng.node_processes: - service.deployed = True - service.register_user_input_handlers( - self.user_input_handlers) - break - - def _process_user_inputs(self, user_inputs): - for ui in user_inputs: - # if it doesn't have a tag then it's not part of the - # operational config that Ambari knows about - if not hasattr(ui.config, 'tag'): - continue - user_input_handler = self.user_input_handlers.get( - '{0}/{1}'.format(ui.config.tag, ui.config.name), - self._default_user_input_handler) - - user_input_handler(ui, self.configurations) - - def _replace_config_tokens(self): - for service in self.services: - if service.deployed: - service.finalize_configuration(self) - - def _finalize_ng_components(self): - for service in self.services: - if service.deployed: - service.finalize_ng_components(self) - - def _default_user_input_handler(self, user_input, configurations): - config_map = configurations[user_input.config.tag] - config_map[user_input.config.name] = user_input.value - - -class Component(object): - def __init__(self, name, component_type, cardinality): - self.name = name - self.type = component_type - self.cardinality = cardinality - - -class NodeGroup(object): - def __init__(self, name): - self.id = None - self.name = name - self.components = [] - self.predicate = None - self.cardinality = None - self.count = None - self.instances = set() - - def add_component(self, component): - self.components.append(component) - - -class User(object): - def __init__(self, name, password, groups): - self.name = name - self.password = password - self.groups = groups - - -class Instance(object): - def __init__(self, sahara_instance): - self.inst_fqdn = sahara_instance.fqdn() - self.management_ip = sahara_instance.management_ip - self.internal_ip = sahara_instance.internal_ip - self.sahara_instance = sahara_instance - - def fqdn(self): - return self.inst_fqdn - - def remote(self): - return self.sahara_instance.remote() - - def __hash__(self): - return hash(self.fqdn()) - - def __eq__(self, other): - return self.fqdn() == other.fqdn() - - -class NormalizedClusterConfig(object): - def __init__(self, cluster_spec): - self.hadoop_version = cluster_spec.version - self.cluster_configs = [] - self.node_groups = [] - self.handler = (vhf.VersionHandlerFactory.get_instance(). - get_version_handler(self.hadoop_version)) - - self._parse_configurations(cluster_spec.configurations) - self._parse_node_groups(cluster_spec.node_groups) - - def _parse_configurations(self, configurations): - for config_name, properties in configurations.items(): - for prop, value in properties.items(): - target = self._get_property_target(prop) - if target: - prop_type = self._get_property_type(prop, value) - # TODO(sdpeidel): should we supply a scope? - self.cluster_configs.append( - NormalizedConfigEntry(NormalizedConfig( - prop, prop_type, value, target, 'cluster'), - value)) - - def _parse_node_groups(self, node_groups): - for node_group in node_groups.values(): - self.node_groups.append(NormalizedNodeGroup(node_group)) - - def _get_property_target(self, prop): - return self.handler.get_applicable_target(prop) - - def _get_property_type(self, prop, value): - # TODO(jspeidel): seems that all numeric prop values in default config - # are encoded as strings. This may be incorrect. - # TODO(jspeidel): should probably analyze string value to determine if - # it is numeric - # TODO(jspeidel): would then need to know whether Ambari expects a - # string or a numeric value - prop_type = type(value).__name__ - # print 'Type: {0}'.format(prop_type) - if prop_type == 'str' or prop_type == 'unicode' or value == '': - return 'string' - elif prop_type == 'int': - return 'integer' - elif prop_type == 'bool': - return 'boolean' - else: - raise ValueError( - _("Could not determine property type for property " - "'%(property)s' with value: %(value)s") % - {"property": prop, "value": value}) - - -class NormalizedConfig(object): - def __init__(self, name, config_type, default_value, target, scope): - self.name = name - self.description = None - self.type = config_type - self.default_value = default_value - self.is_optional = False - self.applicable_target = target - self.scope = scope - - -class NormalizedConfigEntry(object): - def __init__(self, config, value): - self.config = config - self.value = value - - -class NormalizedNodeGroup(object): - def __init__(self, node_group): - self.name = node_group.name - self.node_processes = node_group.components - self.node_configs = None - # TODO(jpseidel): should not have to specify img/flavor - self.img = None - # TODO(jmaron) the flavor will be set via an ambari blueprint setting, - # but that setting doesn't exist yet. It will be addressed by a bug - # fix shortly - self.flavor = 3 - self.count = node_group.count - self.id = node_group.id diff --git a/sahara/plugins/hdp/confighints_helper.py b/sahara/plugins/hdp/confighints_helper.py deleted file mode 100644 index c08dbb313d..0000000000 --- a/sahara/plugins/hdp/confighints_helper.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_serialization import jsonutils as json - -from sahara.service.edp.oozie.workflow_creator import workflow_factory -from sahara.utils import files as pkg - - -def get_possible_hive_config_from(file_name): - '''Return the possible configs, args, params for a Hive job.''' - config = { - 'configs': load_hadoop_json_for_tag(file_name, 'hive-site.xml'), - 'params': {} - } - return config - - -def get_possible_mapreduce_config_from(file_name): - '''Return the possible configs, args, params for a MapReduce job.''' - config = { - 'configs': get_possible_pig_config_from(file_name).get('configs') - } - config['configs'] += workflow_factory.get_possible_mapreduce_configs() - return config - - -def get_possible_pig_config_from(file_name): - '''Return the possible configs, args, params for a Pig job.''' - config = { - 'configs': load_hadoop_json_for_tag(file_name, 'mapred-site.xml'), - 'args': [], - 'params': {} - } - return config - - -def get_properties_for_tag(configurations, tag_name): - '''Get the properties for a tag - - Given a list of configurations, return the properties for the named tag. - If the named tag cannot be found returns an empty list. - - ''' - for obj in configurations: - if obj.get('tag') == tag_name: - return obj.get('properties') - return [] - - -def load_hadoop_json_for_tag(file_name, tag_name): - '''Given a file name and a tag, return the configs from that tag.''' - full_json = load_json_file(file_name) - properties = get_properties_for_tag(full_json['configurations'], tag_name) - configs = [] - for prop in properties: - configs.append({ - 'name': prop.get('name'), - 'value': prop.get('default_value'), - 'description': prop.get('description') - }) - return configs - - -def load_json_file(file_name): - '''Given a package relative json file name, return the json.''' - ftext = pkg.get_file_text(file_name) - loaded_json = json.loads(ftext) - return loaded_json diff --git a/sahara/plugins/hdp/configprovider.py b/sahara/plugins/hdp/configprovider.py deleted file mode 100644 index a5fe926fcb..0000000000 --- a/sahara/plugins/hdp/configprovider.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (c) 2013 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - -from sahara import exceptions -from sahara.i18n import _ -from sahara.plugins import provisioning as p - - -HOST_REGISTRATIONS_TIMEOUT = p.Config( - 'Host registrations timeout', 'general', - 'cluster', config_type='int', priority=1, - default_value=3600, is_optional=True, - description='Timeout for host registrations, in seconds') - -DECOMMISSIONING_TIMEOUT = p.Config( - 'Timeout for decommissioning nodes', 'general', - 'cluster', config_type='int', priority=1, - default_value=1000, is_optional=True, - description='Timeout for decommissioning nodes, in seconds') - - -class ConfigurationProvider(object): - def __init__(self, config, hadoop_version): - self.config = config - self.config_mapper = {} - self.config_items = [] - self.hadoop_version = hadoop_version - self._initialize(config) - - def get_config_items(self): - return self.config_items - - def get_applicable_target(self, name): - return self.config_mapper.get(name) - - def _get_target(self, apptarget): - if apptarget == 'TODO': - apptarget = 'general' - - return apptarget - - def _initialize(self, config): - for configuration in self.config['configurations']: - for service_property in configuration['properties']: - config = p.Config(service_property['name'], - self._get_target( - service_property['applicable_target']), - service_property['scope'], - config_type=service_property['config_type'], - default_value=service_property - ['default_value'], - is_optional=service_property[ - 'is_optional'], - description=service_property[ - 'description']) - - setattr(config, 'tag', configuration['tag'].rsplit(".", 1)[0]) - self.config_items.append(config) - # TODO(jspeidel): an assumption is made that property names - # are unique across configuration sections which is dangerous - property_name = service_property['name'] - # if property already exists, throw an exception - if property_name in self.config_mapper: - # internal error - # ambari-config-resource contains duplicates - raise exceptions.InvalidDataException( - _('Internal Error. Duplicate property ' - 'name detected: %s') % property_name) - self.config_mapper[service_property['name']] = ( - self._get_target( - service_property['applicable_target'])) - host_reg_timeout = copy.copy(HOST_REGISTRATIONS_TIMEOUT) - setattr(host_reg_timeout, 'tag', 'global') - self.config_items.append(host_reg_timeout) - self.config_mapper[host_reg_timeout.name] = 'global' - if self.hadoop_version == '2.0.6': - dec_timeout = copy.copy(DECOMMISSIONING_TIMEOUT) - setattr(dec_timeout, 'tag', 'global') - self.config_items.append(dec_timeout) - self.config_mapper[dec_timeout.name] = 'global' diff --git a/sahara/plugins/hdp/edp_engine.py b/sahara/plugins/hdp/edp_engine.py deleted file mode 100644 index 2ad84726b3..0000000000 --- a/sahara/plugins/hdp/edp_engine.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) 2014 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sahara.plugins import exceptions as ex -from sahara.plugins import utils as u -from sahara.service.edp.oozie import engine as edp_engine - - -class EdpOozieEngine(edp_engine.OozieJobEngine): - - def get_hdfs_user(self): - return 'hdfs' - - def get_name_node_uri(self, cluster): - hdfs = cluster['info']['HDFS'] - return hdfs.get('NameService', hdfs['NameNode']) - - def get_oozie_server_uri(self, cluster): - return cluster['info']['JobFlow']['Oozie'] + "/oozie/" - - def get_oozie_server(self, cluster): - return u.get_instance(cluster, "OOZIE_SERVER") - - def validate_job_execution(self, cluster, job, data): - oo_count = u.get_instances_count(cluster, 'OOZIE_SERVER') - if oo_count != 1: - raise ex.InvalidComponentCountException( - 'OOZIE_SERVER', '1', oo_count) - - super(EdpOozieEngine, self).validate_job_execution(cluster, job, data) diff --git a/sahara/plugins/hdp/hadoopserver.py b/sahara/plugins/hdp/hadoopserver.py deleted file mode 100644 index c2c55c2f17..0000000000 --- a/sahara/plugins/hdp/hadoopserver.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright (c) 2013 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -from oslo_log import log as logging - -from sahara.i18n import _ -from sahara.i18n import _LI -from sahara.plugins import exceptions as ex -from sahara.plugins.hdp import saharautils -from sahara.utils import cluster_progress_ops as cpo -from sahara.utils import files as f - - -AMBARI_RPM = ('http://s3.amazonaws.com/public-repo-1.hortonworks.com/' - 'ambari/centos6/1.x/updates/1.6.0/ambari.repo') - -EPEL_RELEASE_PACKAGE_NAME = 'epel-release' - -HADOOP_SWIFT_RPM = ('https://s3.amazonaws.com/public-repo-1.hortonworks.com/' - 'sahara/swift/hadoop-swift-1.0-1.x86_64.rpm') - -HADOOP_SWIFT_LOCAL_RPM = ('/opt/hdp-local-repos/hadoop-swift/' - 'hadoop-swift-1.0-1.x86_64.rpm') - -LOG = logging.getLogger(__name__) - - -class HadoopServer(object): - _master_ip = None - - def __init__(self, instance, node_group, ambari_rpm=None): - self.instance = instance - self.node_group = node_group - self.ambari_rpm = ambari_rpm or AMBARI_RPM - - def get_event_info(self): - return self.instance - - @property - def cluster_id(self): - return self.instance.cluster_id - - @cpo.event_wrapper(True, param=('self', 0)) - def provision_ambari(self, ambari_info, cluster_spec): - self.install_rpms() - global_config = cluster_spec.configurations['global'] - jdk_path = global_config.get('java64_home') - if 'AMBARI_SERVER' in self.node_group.components: - self._setup_and_start_ambari_server(ambari_info.port, jdk_path) - - # all nodes must run Ambari agent - self._setup_and_start_ambari_agent(ambari_info.host.internal_ip) - - @saharautils.inject_remote('r') - def rpms_installed(self, r): - rpm_cmd = 'rpm -q %s' % EPEL_RELEASE_PACKAGE_NAME - ret_code, stdout = r.execute_command(rpm_cmd, - run_as_root=True, - raise_when_error=False) - return ret_code == 0 - - @saharautils.inject_remote('r') - def install_rpms(self, r): - LOG.debug("Installing rpm's") - - # TODO(jspeidel): based on image type, use correct command - curl_cmd = ('curl -f -s -o /etc/yum.repos.d/ambari.repo %s' % - self.ambari_rpm) - ret_code, stdout = r.execute_command(curl_cmd, - run_as_root=True, - raise_when_error=False) - if ret_code == 0: - yum_cmd = 'yum -y install %s' % EPEL_RELEASE_PACKAGE_NAME - r.execute_command(yum_cmd, run_as_root=True) - else: - LOG.debug("Unable to install rpm's from repo, " - "checking for local install.") - - if not self.rpms_installed(): - raise ex.HadoopProvisionError( - _('Failed to install Hortonworks Ambari')) - - @cpo.event_wrapper(True, param=('self', 0)) - @saharautils.inject_remote('r') - def install_swift_integration(self, r): - LOG.debug("Installing swift integration") - base_rpm_cmd = 'rpm -U --quiet ' - rpm_cmd = base_rpm_cmd + HADOOP_SWIFT_RPM - ret_code, stdout = r.execute_command(rpm_cmd, - run_as_root=True, - raise_when_error=False) - if ret_code != 0: - LOG.debug("Unable to install swift integration from " - "source, checking for local rpm.") - ret_code, stdout = r.execute_command( - 'ls ' + HADOOP_SWIFT_LOCAL_RPM, - run_as_root=True, - raise_when_error=False) - if ret_code == 0: - rpm_cmd = base_rpm_cmd + HADOOP_SWIFT_LOCAL_RPM - r.execute_command(rpm_cmd, run_as_root=True) - else: - raise ex.HadoopProvisionError( - _('Failed to install Hadoop Swift integration')) - - @cpo.event_wrapper(True, param=('self', 0)) - @saharautils.inject_remote('r') - def configure_topology(self, topology_str, r): - r.write_file_to( - '/etc/hadoop/conf/topology.sh', - f.get_file_text( - 'plugins/hdp/versions/version_2_0_6/resources/topology.sh')) - r.execute_command( - 'chmod +x /etc/hadoop/conf/topology.sh', run_as_root=True - ) - r.write_file_to('/etc/hadoop/conf/topology.data', topology_str) - - @saharautils.inject_remote('r') - def _setup_and_start_ambari_server(self, port, jdk_path, r): - LOG.debug('Installing ambari-server') - r.execute_command('yum -y install ambari-server', run_as_root=True) - - LOG.debug('Running Ambari Server setup') - # remove postgres data directory as a precaution since its existence - # has prevented successful postgres installation - r.execute_command('rm -rf /var/lib/pgsql/data', run_as_root=True) - - # determine if the JDK is installed on this image - # in the case of the plain image, no JDK will be available - return_code, stdout = r.execute_command('ls -l {jdk_location}'.format( - jdk_location=jdk_path), raise_when_error=False) - - LOG.debug('Queried for JDK location on VM instance, return code = ' - '{code}'.format(code=str(return_code))) - - # do silent setup since we only use default responses now - # only add -j command if the JDK is configured for the template, - # and if the JDK is present - # in all other cases, allow Ambari to install the JDK - r.execute_command( - 'ambari-server setup -s {jdk_arg} > /dev/null 2>&1'.format( - jdk_arg='-j ' + jdk_path if jdk_path and (return_code == 0) - else ''), - run_as_root=True, timeout=1800 - ) - - self._configure_ambari_server_api_port(port) - - # NOTE(dmitryme): Reading stdout from 'ambari-server start' - # hangs ssh. Redirecting output to /dev/null fixes that - r.execute_command( - 'ambari-server start > /dev/null 2>&1', run_as_root=True - ) - LOG.info(_LI('Ambari started')) - - @saharautils.inject_remote('r') - def _configure_ambari_server_api_port(self, port, r): - # do nothing if port is not specified or is default - if port is None or port == 8080: - return - - ambari_config_file = '/etc/ambari-server/conf/ambari.properties' - LOG.debug('Configuring Ambari Server API port: {port}'.format( - port=port)) - # read the current contents - data = r.read_file_from(ambari_config_file) - data = '{0}\nclient.api.port={1}\n'.format(data, port) - - # write the file back - r.write_file_to(ambari_config_file, data, run_as_root=True) - - @saharautils.inject_remote('r') - def _setup_and_start_ambari_agent(self, ambari_server_ip, r): - LOG.debug('Installing Ambari agent') - - r.execute_command('yum -y install ambari-agent', run_as_root=True) - LOG.debug( - 'Setting master-ip: {ip} in ambari-agent.ini'.format( - ip=ambari_server_ip)) - r.replace_remote_string( - '/etc/ambari-agent/conf/ambari-agent.ini', 'localhost', - ambari_server_ip) - - # If the HDP 2 ambari agent is pre-installed on an image, the agent - # will start up during instance launch and therefore the agent - # registration will fail. It is therefore more appropriate to call - # restart since it will either start (if stopped) or restart (if - # running) - r.execute_command('ambari-agent restart', run_as_root=True) - LOG.info(_LI('Ambari Agent started')) - - @saharautils.inject_remote('r') - def set_namenode_safemode(self, jh, r): - r.execute_command("sudo su -l hdfs -c 'JAVA_HOME={0} " - "hdfs dfsadmin -safemode enter'".format(jh), - run_as_root=True) - - @saharautils.inject_remote('r') - def save_namenode_namespace(self, jh, r): - r.execute_command("sudo su -l hdfs -c 'JAVA_HOME={0} " - "hdfs dfsadmin -saveNamespace'".format(jh), - run_as_root=True) - - @saharautils.inject_remote('r') - def initialize_shared_edits(self, jh, r): - r.execute_command("sudo su -l hdfs -c 'JAVA_HOME={0} " - "hdfs namenode -initializeSharedEdits'".format(jh), - run_as_root=True) - - @saharautils.inject_remote('r') - def format_zookeeper_fc(self, jh, r): - r.execute_command("sudo su -l hdfs -c 'JAVA_HOME={0} " - "hdfs zkfc -formatZK'".format(jh), - run_as_root=True) - - @saharautils.inject_remote('r') - def bootstrap_standby_namenode(self, jh, r): - r.execute_command("sudo su -l hdfs -c 'JAVA_HOME={0} " - "hdfs namenode -bootstrapStandby'".format(jh), - run_as_root=True) - - @saharautils.inject_remote('r') - def install_httpfs(self, r): - r.execute_command("yum -y install hadoop-httpfs", run_as_root=True) - - @saharautils.inject_remote('r') - def start_httpfs(self, r): - r.execute_command("service hadoop-httpfs start", run_as_root=True) - - @saharautils.inject_remote('r') - def write_hue_temp_file(self, filename, content, r): - r.execute_command("echo %s > %s" % (content, filename), - run_as_root=True) - - def _log(self, buf): - # FIXME(Kezar): I don't know what's this. Will check later. - LOG.debug(buf) - - def _is_component_available(self, component): - return component in self.node_group.components - - def _is_ganglia_master(self): - return self._is_component_available('GANGLIA_SERVER') - - def _is_ganglia_slave(self): - return self._is_component_available('GANGLIA_MONITOR') - - -class DefaultPromptMatcher(object): - prompt_pattern = re.compile('(.*\()(.)(\)\?\s*$)', re.DOTALL) - - def __init__(self, terminal_token): - self.eof_token = terminal_token - - def get_response(self, s): - match = self.prompt_pattern.match(s) - if match: - response = match.group(2) - return response - else: - return None - - def is_eof(self, s): - eof = self.eof_token in s - return eof diff --git a/sahara/plugins/hdp/saharautils.py b/sahara/plugins/hdp/saharautils.py deleted file mode 100644 index 02d356770c..0000000000 --- a/sahara/plugins/hdp/saharautils.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2013 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def get_host_role(host): - if hasattr(host, 'role'): - return host.role - else: - return host.node_group.name - - -def inject_remote(param_name): - def handle(func): - def call(self, *args, **kwargs): - with self.instance.remote() as r: - newkwargs = kwargs.copy() - newkwargs[param_name] = r - return func(self, *args, **newkwargs) - - return call - return handle diff --git a/sahara/plugins/hdp/versions/__init__.py b/sahara/plugins/hdp/versions/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sahara/plugins/hdp/versions/abstractversionhandler.py b/sahara/plugins/hdp/versions/abstractversionhandler.py deleted file mode 100644 index abd8164464..0000000000 --- a/sahara/plugins/hdp/versions/abstractversionhandler.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2013 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class AbstractVersionHandler(object): - - @abc.abstractmethod - def get_config_items(self): - return - - @abc.abstractmethod - def get_applicable_target(self, name): - return - - @abc.abstractmethod - def get_cluster_spec(self, cluster, user_inputs, scaled_groups=None, - cluster_template=None): - return - - @abc.abstractmethod - def get_ambari_client(self): - return - - @abc.abstractmethod - def get_default_cluster_configuration(self): - return - - @abc.abstractmethod - def get_node_processes(self): - return - - @abc.abstractmethod - def install_swift_integration(self, servers): - return - - @abc.abstractmethod - def get_version(self): - return - - @abc.abstractmethod - def get_services_processor(self): - return - - @abc.abstractmethod - def get_edp_engine(self, cluster, job_type): - return - - @abc.abstractmethod - def get_edp_job_types(self): - return [] - - @abc.abstractmethod - def get_edp_config_hints(self, job_type): - return {} - - @abc.abstractmethod - def get_open_ports(self, node_group): - return [] diff --git a/sahara/plugins/hdp/versions/version_2_0_6/__init__.py b/sahara/plugins/hdp/versions/version_2_0_6/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sahara/plugins/hdp/versions/version_2_0_6/edp_engine.py b/sahara/plugins/hdp/versions/version_2_0_6/edp_engine.py deleted file mode 100644 index 17923ffcb5..0000000000 --- a/sahara/plugins/hdp/versions/version_2_0_6/edp_engine.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2014 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sahara.plugins.hdp import confighints_helper as ch_helper -from sahara.plugins.hdp import edp_engine -from sahara.service.edp import hdfs_helper -from sahara.utils import edp - - -class EdpOozieEngine(edp_engine.EdpOozieEngine): - - def create_hdfs_dir(self, remote, dir_name): - hdfs_helper.create_dir_hadoop2(remote, dir_name, self.get_hdfs_user()) - - @staticmethod - def get_possible_job_config(job_type): - if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE): - return {'job_config': ch_helper.get_possible_hive_config_from( - 'plugins/hdp/versions/version_2_0_6/resources/' - 'ambari-config-resource.json')} - if edp.compare_job_type(job_type, - edp.JOB_TYPE_MAPREDUCE, - edp.JOB_TYPE_MAPREDUCE_STREAMING): - return {'job_config': ch_helper.get_possible_mapreduce_config_from( - 'plugins/hdp/versions/version_2_0_6/resources/' - 'ambari-config-resource.json')} - if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG): - return {'job_config': ch_helper.get_possible_pig_config_from( - 'plugins/hdp/versions/version_2_0_6/resources/' - 'ambari-config-resource.json')} - return edp_engine.EdpOozieEngine.get_possible_job_config(job_type) - - def get_resource_manager_uri(self, cluster): - return cluster['info']['Yarn']['ResourceManager'] diff --git a/sahara/plugins/hdp/versions/version_2_0_6/resources/ambari-config-resource.json b/sahara/plugins/hdp/versions/version_2_0_6/resources/ambari-config-resource.json deleted file mode 100644 index 2bca1af81e..0000000000 --- a/sahara/plugins/hdp/versions/version_2_0_6/resources/ambari-config-resource.json +++ /dev/null @@ -1,3536 +0,0 @@ -{ - "configurations": [ - { - "tag": "global.xml", - "properties": [ - { - "default_value": "/etc/ganglia/hdp", - "description": "", - "config_type": "string", - "applicable_target": "GANGLIA", - "is_optional": true, - "scope": "cluster", - "name": "ganglia_conf_dir" - }, - { - "default_value": "/var/run/ganglia/hdp", - "description": "", - "config_type": "string", - "applicable_target": "GANGLIA", - "is_optional": true, - "scope": "cluster", - "name": "ganglia_runtime_dir" - }, - { - "default_value": "nobody", - "description": "", - "config_type": "string", - "applicable_target": "GANGLIA", - "is_optional": true, - "scope": "cluster", - "name": "gmetad_user" - }, - { - "default_value": "nobody", - "description": "", - "config_type": "string", - "applicable_target": "GANGLIA", - "is_optional": true, - "scope": "cluster", - "name": "gmond_user" - }, - { - "default_value": "/var/lib/ganglia/rrds", - "description": "Location of rrd files.", - "config_type": "string", - "applicable_target": "GANGLIA", - "is_optional": true, - "scope": "cluster", - "name": "rrdcached_base_dir" - }, - { - "default_value": "100", - "description": "Base Client Scanner Caching", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "client_scannercaching" - }, - { - "default_value": "/etc/hbase", - "description": "Config Directory for HBase.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase_conf_dir" - }, - { - "default_value": "/apps/hbase/data", - "description": "HBase Relative Path to HDFS.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase_hdfs_root_dir" - }, - { - "default_value": "/var/log/hbase", - "description": "Log Directories for HBase.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase_log_dir" - }, - { - "default_value": "1024", - "description": "HBase Master Heap Size", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase_master_heapsize" - }, - { - "default_value": "/var/run/hbase", - "description": "Log Directories for HBase.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase_pid_dir" - }, - { - "default_value": "1024", - "description": "Log Directories for HBase.", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase_regionserver_heapsize" - }, - { - "default_value": "hbase", - "description": "HBase User Name.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase_user" - }, - { - "default_value": "true", - "description": "HDFS Short Circuit Read", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hdfs_enable_shortcircuit_read" - }, - { - "default_value": "true", - "description": "HDFS append support", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hdfs_support_append" - }, - { - "default_value": "0.40", - "description": "HFile block cache size.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hfile_blockcache_size" - }, - { - "default_value": "10485760", - "description": "HBase Client Maximum key-value Size", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hfile_max_keyvalue_size" - }, - { - "default_value": "2", - "description": "HBase Region Block Multiplier", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hregion_blockmultiplier" - }, - { - "default_value": "604800000", - "description": "The time between major compactions of all HStoreFiles in a region. Set to 0 to disable automated major compactions.", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hregion_majorcompaction" - }, - { - "default_value": "10", - "description": "HStore blocking storefiles.", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hstore_blockingstorefiles" - }, - { - "default_value": "3", - "description": "HBase HStore compaction threshold.", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hstore_compactionthreshold" - }, - { - "default_value": "10737418240", - "description": "Maximum HStoreFile Size", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hstorefile_maxsize" - }, - { - "default_value": "60", - "description": "HBase RegionServer Handler", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "regionserver_handlers" - }, - { - "default_value": "true", - "description": "Region Server memstore.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "regionserver_memstore_lab" - }, - { - "default_value": "0.38", - "description": "Region Server memstore lower limit.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "regionserver_memstore_lowerlimit" - }, - { - "default_value": "0.4", - "description": "Region Server memstore upper limit.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "regionserver_memstore_upperlimit" - }, - { - "default_value": "30000", - "description": "ZooKeeper Session Timeout", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "zookeeper_sessiontimeout" - }, - { - "default_value": "1073741824", - "description": "Reserved space for HDFS", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "datanode_du_reserved" - }, - { - "default_value": "hbase", - "description": "Default Block Replication.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs_block_local_path_access_user" - }, - { - "default_value": "50010", - "description": "Port for datanode address.", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs_datanode_address" - }, - { - "default_value": "/hadoop/hdfs/data", - "description": "Data directories for Data Nodes.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs_datanode_data_dir" - }, - { - "default_value": "750", - "description": "Datanode dir perms.", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs_datanode_data_dir_perm" - }, - { - "default_value": "0", - "description": "DataNode volumes failure toleration", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs_datanode_failed_volume_tolerated" - }, - { - "default_value": "50075", - "description": "Port for datanode address.", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs_datanode_http_address" - }, - { - "default_value": "/hadoop/hdfs/namesecondary", - "description": "Secondary NameNode checkpoint dir.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs_namenode_checkpoint_dir" - }, - { - "default_value": "21600", - "description": "HDFS Maximum Checkpoint Delay", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs_namenode_checkpoint_period" - }, - { - "default_value": "/hadoop/hdfs/namenode", - "description": "NameNode Directories.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs_namenode_name_dir" - }, - { - "default_value": "3", - "description": "Default Block Replication.", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs_replication" - }, - { - "default_value": "true", - "description": "WebHDFS enabled", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs_webhdfs_enabled" - }, - { - "default_value": "1024", - "description": "DataNode maximum Java heap size", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dtnode_heapsize" - }, - { - "default_value": "0.5", - "description": "FS Checkpoint Size.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "fs_checkpoint_size" - }, - { - "default_value": "1024", - "description": "Hadoop maximum Java heap size", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "hadoop_heapsize" - }, - { - "default_value": "/var/run/hadoop", - "description": "Hadoop PID Dir Prefix", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "hadoop_pid_dir_prefix" - }, - { - "default_value": "/var/log/hadoop", - "description": "Hadoop Log Dir Prefix", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "hdfs_log_dir_prefix" - }, - { - "default_value": "hdfs", - "description": "User and Groups.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "hdfs_user" - }, - { - "default_value": "EXAMPLE.COM", - "description": "Kerberos realm.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "kerberos_domain" - }, - { - "default_value": "/etc/security/keytabs", - "description": "Kerberos keytab path.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "keytab_path" - }, - { - "default_value": "true", - "description": "LZO compression enabled", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "lzo_enabled" - }, - { - "default_value": "/var/run/hadoop/hdfs/namenode/formatted/", - "description": "Formatteed Mark Directory.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "namenode_formatted_mark_dir" - }, - { - "default_value": "1024", - "description": "NameNode Java heap size", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "namenode_heapsize" - }, - { - "default_value": "200", - "description": "NameNode maximum new generation size", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "namenode_opt_maxnewsize" - }, - { - "default_value": "200", - "description": "NameNode new generation size", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "namenode_opt_newsize" - }, - { - "default_value": "users", - "description": "Proxy user group.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "proxyuser_group" - }, - { - "default_value": "false", - "description": "Hadoop Security", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "security_enabled" - }, - { - "default_value": "/var/log/hadoop-mapreduce", - "description": "Mapreduce Log Dir Prefix", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapred_log_dir_prefix" - }, - { - "default_value": "/var/run/hadoop-mapreduce", - "description": "Mapreduce PID Dir Prefix", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapred_pid_dir_prefix" - }, - { - "default_value": "mapred", - "description": "Mapreduce User", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapred_user" - }, - { - "default_value": "nagios", - "description": "", - "config_type": "string", - "applicable_target": "NAGIOS", - "is_optional": true, - "scope": "cluster", - "name": "nagios_group" - }, - { - "default_value": "nagios", - "description": "Nagios process user.", - "config_type": "string", - "applicable_target": "NAGIOS", - "is_optional": true, - "scope": "cluster", - "name": "nagios_user" - }, - { - "default_value": "nagiosadmin", - "description": "Web user name.", - "config_type": "string", - "applicable_target": "NAGIOS", - "is_optional": true, - "scope": "cluster", - "name": "nagios_web_login" - }, - { - "default_value": "/var/log/webhcat", - "description": "", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "hcat_log_dir" - }, - { - "default_value": "/var/run/webhcat", - "description": "", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "hcat_pid_dir" - }, - { - "default_value": "hcat", - "description": "", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "hcat_user" - }, - { - "default_value": "hcat", - "description": "", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "webhcat_user" - }, - { - "default_value": "1024", - "description": "Max heapsize for NodeManager using a numerical value in the scale of MB", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "nodemanager_heapsize" - }, - { - "default_value": "1024", - "description": "Max heapsize for ResourceManager using a numerical value in the scale of MB", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "resourcemanager_heapsize" - }, - { - "default_value": "1024", - "description": "Max heapsize for all YARN components using a numerical value in the scale of MB", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn_heapsize" - }, - { - "default_value": "/var/log/hadoop-yarn", - "description": "YARN Log Dir Prefix", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn_log_dir_prefix" - }, - { - "default_value": "/var/run/hadoop-yarn", - "description": "YARN PID Dir Prefix", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn_pid_dir_prefix" - }, - { - "default_value": "yarn", - "description": "YARN User", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn_user" - }, - { - "default_value": "2181", - "description": "Port for running ZK Server.", - "config_type": "int", - "applicable_target": "ZOOKEEPER", - "is_optional": true, - "scope": "cluster", - "name": "clientPort" - }, - { - "default_value": "10", - "description": "Ticks to allow for sync at Init.", - "config_type": "int", - "applicable_target": "ZOOKEEPER", - "is_optional": true, - "scope": "cluster", - "name": "initLimit" - }, - { - "default_value": "5", - "description": "Ticks to allow for sync at Runtime.", - "config_type": "int", - "applicable_target": "ZOOKEEPER", - "is_optional": true, - "scope": "cluster", - "name": "syncLimit" - }, - { - "default_value": "2000", - "description": "The length of a single tick in milliseconds,which is the basic time unit used by ZooKeeper", - "config_type": "int", - "applicable_target": "ZOOKEEPER", - "is_optional": true, - "scope": "cluster", - "name": "tickTime" - }, - { - "default_value": "/hadoop/zookeeper", - "description": "Data directory for ZooKeeper.", - "config_type": "string", - "applicable_target": "ZOOKEEPER", - "is_optional": true, - "scope": "cluster", - "name": "zk_data_dir" - }, - { - "default_value": "/var/log/zookeeper", - "description": "ZooKeeper Log Dir", - "config_type": "string", - "applicable_target": "ZOOKEEPER", - "is_optional": true, - "scope": "cluster", - "name": "zk_log_dir" - }, - { - "default_value": "/var/run/zookeeper", - "description": "ZooKeeper Pid Dir", - "config_type": "string", - "applicable_target": "ZOOKEEPER", - "is_optional": true, - "scope": "cluster", - "name": "zk_pid_dir" - }, - { - "default_value": "/var/run/zookeeper/zookeeper_server.pid", - "description": "ZooKeeper Pid File", - "config_type": "string", - "applicable_target": "ZOOKEEPER", - "is_optional": true, - "scope": "cluster", - "name": "zk_pid_file" - }, - { - "default_value": "zookeeper", - "description": "ZooKeeper User.", - "config_type": "string", - "applicable_target": "ZOOKEEPER", - "is_optional": true, - "scope": "cluster", - "name": "zk_user" - }, - { - "default_value": "/var/run/oozie", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie_pid_dir" - }, - { - "default_value": "/var/run/hive", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive_pid_dir" - }, - { - "default_value": "localhost", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie_hostname" - }, - { - "default_value": "ambari-qa", - "description": "...", - "config_type": "string", - "applicable_target": "general", - "is_optional": true, - "scope": "cluster", - "name": "smokeuser" - }, - { - "default_value": "MySQL", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive_ambari_database" - }, - { - "default_value": "/var/log/oozie", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie_log_dir" - }, - { - "default_value": "com.mysql.jdbc.Driver", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive_jdbc_driver" - }, - { - "default_value": "oozie", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie_user" - }, - { - "default_value": "", - "description": "...", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "hcat_conf_dir" - }, - { - "default_value": "/hadoop/oozie/data", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie_data_dir" - }, - { - "default_value": "hive", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive_user" - }, - { - "default_value": "localhost", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive_hostname" - }, - { - "default_value": "9083", - "description": "...", - "config_type": "int", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive_metastore_port" - }, - { - "default_value": "mysql", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive_database_type" - }, - { - "default_value": "New Derby Database", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie_database" - }, - { - "default_value": "admin@nowhere.com", - "description": "...", - "config_type": "string", - "applicable_target": "NAGIOS", - "is_optional": true, - "scope": "cluster", - "name": "nagios_contact" - }, - { - "default_value": "New MySQL Database", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive_database" - }, - { - "default_value": "admin", - "description": "...", - "config_type": "string", - "applicable_target": "NAGIOS", - "is_optional": true, - "scope": "cluster", - "name": "nagios_web_password" - }, - { - "default_value": "Derby", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie_derby_database" - }, - { - "default_value": "derby", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie_database_type" - }, - { - "default_value": "org.apache.derby.jdbc.EmbeddedDriver", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie_jdbc_driver" - }, - { - "default_value": "/var/log/hive", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive_log_dir" - }, - { - "default_value": "hadoop", - "description": "...", - "config_type": "string", - "applicable_target": "general", - "is_optional": true, - "scope": "cluster", - "name": "user_group" - }, - { - "default_value": "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive_aux_jars_path" - } - ] - }, - { - "tag": "hbase-site.xml", - "properties": [ - { - "default_value": "/var/lib/hadoop-hdfs/dn_socket", - "description": "Path to domain socket.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "dfs.domain.socket.path" - }, - { - "default_value": "10485760", - "description": "Specifies the combined maximum allowed size of a KeyValue\n instance. This is to set an upper boundary for a single entry saved in a\n storage file. Since they cannot be split it helps avoiding that a region\n cannot be split any further because the data is too large. It seems wise\n to set this to a fraction of the maximum region size. Setting it to zero\n or less disables the check.\n ", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.client.keyvalue.maxsize" - }, - { - "default_value": "100", - "description": "Number of rows that will be fetched when calling next\n on a scanner if it is not served from (local,client) memory. Higher\n caching values will enable faster scanners but will eat up more memory\n and some calls of next may take longer and longer times when the cache is empty.\n Do not set this value such that the time between invocations is greater\n than the scanner timeout; i.e. hbase.regionserver.lease.period\n ", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.client.scanner.caching" - }, - { - "default_value": "true", - "description": "The mode the cluster will be in. Possible values are\n false for standalone mode and true for distributed mode. If\n false,startup will run all HBase and ZooKeeper daemons together\n in the one JVM.\n ", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.cluster.distributed" - }, - { - "default_value": "true", - "description": "Disables version verification.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.defaults.for.version.skip" - }, - { - "default_value": "86400000", - "description": "The time (in milliseconds) between 'major' compactions of all\n HStoreFiles in a region. Default: 1 day.\n Set to 0 to disable automated major compactions.\n ", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.hregion.majorcompaction" - }, - { - "default_value": "10737418240", - "description": "\n Maximum HStoreFile size. If any one of a column families' HStoreFiles has\n grown to exceed this value,the hosting HRegion is split in two.\n Default: 1G.\n ", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.hregion.max.filesize" - }, - { - "default_value": "2", - "description": "Block updates if memstore has hbase.hregion.memstore.block.multiplier\n time hbase.hregion.flush.size bytes. Useful preventing\n runaway memstore during spikes in update traffic. Without an\n upper-bound,memstore fills such that when it flushes the\n resultant flush files take a long time to compact or split,or\n worse,we OOME\n ", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.hregion.memstore.block.multiplier" - }, - { - "default_value": "134217728", - "description": "\n Memstore will be flushed to disk if size of the memstore\n exceeds this number of bytes. Value is checked by a thread that runs\n every hbase.server.thread.wakefrequency.\n ", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.hregion.memstore.flush.size" - }, - { - "default_value": "true", - "description": "\n Enables the MemStore-Local Allocation Buffer,\n a feature which works to prevent heap fragmentation under\n heavy write loads. This can reduce the frequency of stop-the-world\n GC pauses on large heaps.\n ", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.hregion.memstore.mslab.enabled" - }, - { - "default_value": "10", - "description": "\n If more than this number of StoreFiles in any one Store\n (one StoreFile is written per flush of MemStore) then updates are\n blocked for this HRegion until a compaction is completed,or\n until hbase.hstore.blockingWaitTime has been exceeded.\n ", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.hstore.blockingStoreFiles" - }, - { - "default_value": "3", - "description": "\n If more than this number of HStoreFiles in any one HStore\n (one HStoreFile is written per flush of memstore) then a compaction\n is run to rewrite all HStoreFiles files as one. Larger numbers\n put off compaction but when it runs,it takes longer to complete.\n ", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.hstore.compactionThreshold" - }, - { - "default_value": "120", - "description": "\n The number of times the region flush operation will be retried.\n ", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.hstore.flush.retries.number" - }, - { - "default_value": "0.38", - "description": "When memstores are being forced to flush to make room in\n memory,keep flushing until we hit this mark. Defaults to 35% of heap.\n This value equal to hbase.regionserver.global.memstore.upperLimit causes\n the minimum possible flushing to occur when updates are blocked due to\n memstore limiting.\n ", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.regionserver.global.memstore.lowerLimit" - }, - { - "default_value": "0.4", - "description": "Maximum size of all memstores in a region server before new\n updates are blocked and flushes are forced. Defaults to 40% of heap\n ", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.regionserver.global.memstore.upperLimit" - }, - { - "default_value": "60", - "description": "Count of RPC Listener instances spun up on RegionServers.\n Same property is used by the Master for count of master handlers.\n Default is 10.\n ", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.regionserver.handler.count" - }, - { - "default_value": "hdfs://localhost:8020/apps/hbase/data", - "description": "The directory shared by region servers and into\n which HBase persists. The URL should be 'fully-qualified'\n to include the filesystem scheme. For example,to specify the\n HDFS directory '/hbase' where the HDFS instance's namenode is\n running at namenode.example.org on port 9000,set this value to:\n hdfs://namenode.example.org:9000/hbase. By default HBase writes\n into /tmp. Change this configuration else all data will be lost\n on machine restart.\n ", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.rootdir" - }, - { - "default_value": "simple", - "description": "...", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.security.authentication" - }, - { - "default_value": "false", - "description": "Enables HBase authorization. Set the value of this property to false to disable HBase authorization.\n ", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.security.authorization" - }, - { - "default_value": "hbase", - "description": "List of users or groups (comma-separated),who are allowed\n full privileges,regardless of stored ACLs,across the cluster.\n Only used when HBase security is enabled.\n ", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.superuser" - }, - { - "default_value": "/hadoop/hbase", - "description": "Temporary directory on the local filesystem.\n Change this setting to point to a location more permanent\n than '/tmp' (The '/tmp' directory is often cleared on\n machine restart).\n ", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.tmp.dir" - }, - { - "default_value": "2181", - "description": "Property from ZooKeeper's config zoo.cfg.\n The port at which the clients will connect.\n ", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.zookeeper.property.clientPort" - }, - { - "default_value": "localhost", - "description": "Comma separated list of servers in the ZooKeeper Quorum.\n For example,\"host1.mydomain.com,host2.mydomain.com,host3.mydomain.com\".\n By default this is set to localhost for local and pseudo-distributed modes\n of operation. For a fully-distributed setup,this should be set to a full\n list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh\n this is the list of servers which we will start/stop ZooKeeper on.\n ", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.zookeeper.quorum" - }, - { - "default_value": "true", - "description": "Instructs HBase to make use of ZooKeeper's multi-update functionality.\n This allows certain ZooKeeper operations to complete more quickly and prevents some issues\n with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).\u00b7\n IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+\n and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and will\n not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).\n ", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hbase.zookeeper.useMulti" - }, - { - "default_value": "0.40", - "description": "\n Percentage of maximum heap (-Xmx setting) to allocate to block cache\n used by HFile/StoreFile. Default of 0.25 means allocate 25%.\n Set to 0 to disable but it's not recommended.\n ", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "hfile.block.cache.size" - }, - { - "default_value": "30000", - "description": "ZooKeeper session timeout.\n HBase passes this to the zk quorum as suggested maximum time for a\n session (This setting becomes zookeeper's 'maxSessionTimeout'). See\n http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions\n \"The client sends a requested timeout,the server responds with the\n timeout that it can give the client. \" In milliseconds.\n ", - "config_type": "int", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "zookeeper.session.timeout" - }, - { - "default_value": "/hbase-unsecure", - "description": "Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper\n files that are configured with a relative path will go under this node.\n By default,all of HBase's ZooKeeper file path are configured with a\n relative path,so they will all go under this directory unless changed.\n ", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "zookeeper.znode.parent" - } - ] - }, - { - "tag": "hbase-policy.xml", - "properties": [ - { - "default_value": "*", - "description": "ACL for HMasterInterface protocol implementation (ie. \n clients talking to HMaster for admin operations).\n The ACL is a comma-separated list of user and group names. The user and \n group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n A special value of \"*\" means all users are allowed.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "security.admin.protocol.acl" - }, - { - "default_value": "*", - "description": "ACL for HMasterRegionInterface protocol implementations\n (for HRegionServers communicating with HMaster)\n The ACL is a comma-separated list of user and group names. The user and \n group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n A special value of \"*\" means all users are allowed.", - "config_type": "string", - "applicable_target": "HBASE", - "is_optional": true, - "scope": "cluster", - "name": "security.masterregion.protocol.acl" - } - ] - }, - { - "tag": "hdfs-site.xml", - "properties": [ - { - "default_value": "true", - "description": "\nIf \"true\",access tokens are used as capabilities for accessing datanodes.\nIf \"false\",no access tokens are checked on accessing datanodes.\n", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.block.access.token.enable" - }, - { - "default_value": "120", - "description": "Delay for first block report in seconds.", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.blockreport.initialDelay" - }, - { - "default_value": "134217728", - "description": "The default block size for new files.", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.blocksize" - }, - { - "default_value": "true", - "description": "\n This configuration parameter turns on short-circuit local reads.\n ", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.client.read.shortcircuit" - }, - { - "default_value": "4096", - "description": "\n The DFSClient maintains a cache of recently opened file descriptors. This\n parameter controls the size of that cache. Setting this higher will use\n more file descriptors,but potentially provide better performance on\n workloads involving lots of seeks.\n ", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.client.read.shortcircuit.streams.cache.size" - }, - { - "default_value": " hdfs", - "description": "ACL for who all can view the default servlets in the HDFS", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.cluster.administrators" - }, - { - "default_value": "0.0.0.0:50010", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.datanode.address" - }, - { - "default_value": "6250000", - "description": "\n Specifies the maximum amount of bandwidth that each datanode\n can utilize for the balancing purpose in term of\n the number of bytes per second.\n ", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.datanode.balance.bandwidthPerSec" - }, - { - "default_value": "750", - "description": "The permissions that should be there on dfs.datanode.data.dir\ndirectories. The datanode will not come up if the permissions are\ndifferent on existing dfs.datanode.data.dir directories. If the directories\ndon't exist,they will be created with this permission.", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.datanode.data.dir.perm" - }, - { - "default_value": "1073741824", - "description": "Reserved space in bytes per volume. Always leave this much space free for non dfs use.\n", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.datanode.du.reserved" - }, - { - "default_value": "0", - "description": "#of failed disks dn would tolerate", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.datanode.failed.volumes.tolerated" - }, - { - "default_value": "0.0.0.0:50075", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.datanode.http.address" - }, - { - "default_value": "0.0.0.0:8010", - "description": "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.datanode.ipc.address" - }, - { - "default_value": "1024", - "description": "PRIVATE CONFIG VARIABLE", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.datanode.max.transfer.threads" - }, - { - "default_value": "3", - "description": "Determines datanode heartbeat interval in seconds.", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.heartbeat.interval" - }, - { - "default_value": "50470", - "description": "\n This property is used by HftpFileSystem.\n ", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.https.port" - }, - { - "default_value": "/grid/0/hdfs/journal", - "description": "The path where the JournalNode daemon will store its local state. ", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.journalnode.edits.dir" - }, - { - "default_value": "0.0.0.0:8480", - "description": "The address and port the JournalNode web UI listens on.\n If the port is 0 then the server will start on a free port. ", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.journalnode.http-address" - }, - { - "default_value": "0", - "description": "The access time for HDFS file is precise upto this value.\n The default value is 1 hour. Setting a value of 0 disables\n access times for HDFS.\n ", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.accesstime.precision" - }, - { - "default_value": "true", - "description": "\n Indicate whether or not to avoid reading from stale datanodes whose\n heartbeat messages have not been received by the namenode for more than a\n specified time interval.\n ", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.avoid.read.stale.datanode" - }, - { - "default_value": "true", - "description": "\n Indicate whether or not to avoid writing to stale datanodes whose\n heartbeat messages have not been received by the namenode for more than a\n specified time interval.\n ", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.avoid.write.stale.datanode" - }, - { - "default_value": "${dfs.namenode.checkpoint.dir}", - "description": "Determines where on the local filesystem the DFS secondary\n name node should store the temporary edits to merge.\n If this is a comma-delimited list of directoires then teh edits is\n replicated in all of the directoires for redundancy.\n Default value is same as dfs.namenode.checkpoint.dir\n ", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.checkpoint.edits.dir" - }, - { - "default_value": "100", - "description": "Added to grow Queue size so that more client connections are allowed", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.handler.count" - }, - { - "default_value": "1.0f", - "description": "\n Specifies the percentage of blocks that should satisfy\n the minimal replication requirement defined by dfs.namenode.replication.min.\n Values less than or equal to 0 mean not to start in safe mode.\n Values greater than 1 will make safe mode permanent.\n ", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.safemode.threshold-pct" - }, - { - "default_value": "30000", - "description": "Datanode is stale after not getting a heartbeat in this interval in ms", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.stale.datanode.interval" - }, - { - "default_value": "1.0f", - "description": "When the ratio of number stale datanodes to total datanodes marked is greater\n than this ratio,stop avoiding writing to stale nodes so as to prevent causing hotspots.\n ", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.write.stale.datanode.ratio" - }, - { - "default_value": "true", - "description": "\nIf \"true\",enable permission checking in HDFS.\nIf \"false\",permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.permissions.enabled" - }, - { - "default_value": "hdfs", - "description": "The name of the group of super-users.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.permissions.superusergroup" - }, - { - "default_value": "3", - "description": "Default block replication.\n ", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.replication" - }, - { - "default_value": "50", - "description": "Maximal block replication.\n ", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.replication.max" - }, - { - "default_value": "true", - "description": "to enable dfs append", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.support.append" - }, - { - "default_value": "true", - "description": "to enable webhdfs", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.webhdfs.enabled" - }, - { - "default_value": "022", - "description": "\nThe octal umask used when creating files and directories.\n", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "fs.permissions.umask-mode" - }, - { - "default_value": "67108864", - "description": "...", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "fs.checkpoint.size" - }, - { - "default_value": "%NN_HOST%:50470", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.https-address" - }, - { - "default_value": "/mnt/hadoop/hdfs/namesecondary", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.checkpoint.dir" - }, - { - "default_value": "/mnt/hadoop/hdfs/data", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.datanode.data.dir" - }, - { - "default_value": "%NN_HOST%:50070", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.http-address" - }, - { - "default_value": "%SNN_HOST%:50090", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.secondary.http-address" - }, - { - "default_value": "/etc/hadoop/conf/dfs.exclude", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.hosts.exclude" - }, - { - "default_value": "21600", - "description": "...", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.checkpoint.period" - }, - { - "default_value": "true", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.name.dir.restore" - }, - { - "default_value": "/mnt/hadoop/hdfs/namenode", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "dfs.namenode.name.dir" - } - ] - }, - { - "tag": "core-site.xml", - "properties": [ - { - "default_value": "360", - "description": "Number of minutes between trash checkpoints.\n If zero,the trash feature is disabled.\n ", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "fs.trash.interval" - }, - { - "default_value": "\n RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n DEFAULT\n ", - "description": "The mapping from kerberos principal names to local OS mapreduce.job.user.names.\n So the default rule is just \"DEFAULT\" which takes all principals in your default domain to their first component.\n \"omalley@APACHE.ORG\" and \"omalley/admin@APACHE.ORG\" to \"omalley\",if your default domain is APACHE.ORG.\nThe translations rules have 3 sections:\n base filter substitution\nThe base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm,$1 to mean the first component and $2 to mean the second component.\n\n[1:$1@$0] translates \"omalley@APACHE.ORG\" to \"omalley@APACHE.ORG\"\n[2:$1] translates \"omalley/admin@APACHE.ORG\" to \"omalley\"\n[2:$1%$2] translates \"omalley/admin@APACHE.ORG\" to \"omalley%admin\"\n\nThe filter is a regex in parens that must the generated string for the rule to apply.\n\n\"(.*%admin)\" will take any string that ends in \"%admin\"\n\"(.*@ACME.COM)\" will take any string that ends in \"@ACME.COM\"\n\nFinally,the substitution is a sed rule to translate a regex into a fixed string.\n\n\"s/@ACME\\.COM//\" removes the first instance of \"@ACME.COM\".\n\"s/@[A-Z]*\\.COM//\" removes the first instance of \"@\" followed by a name followed by \".COM\".\n\"s/X/Y/g\" replaces all of the \"X\" in the name with \"Y\"\n\nSo,if your default realm was APACHE.ORG,but you also wanted to take all principals from ACME.COM that had a single component \"joe@ACME.COM\",you'd do:\n\nRULE:[1:$1@$0](.@ACME.ORG)s/@.//\nDEFAULT\n\nTo also translate the names with a second component,you'd make the rules:\n\nRULE:[1:$1@$0](.@ACME.ORG)s/@.//\nRULE:[2:$1@$0](.@ACME.ORG)s/@.//\nDEFAULT\n\nIf you want to treat all principals from APACHE.ORG with /admin as \"admin\",your rules would look like:\n\nRULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/\nDEFAULT\n ", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "hadoop.security.auth_to_local" - }, - { - "default_value": "simple", - "description": "\n Set the authentication for the cluster. Valid values are: simple or\n kerberos.\n ", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "hadoop.security.authentication" - }, - { - "default_value": "false", - "description": "\n Enable authorization for different protocols.\n ", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "hadoop.security.authorization" - }, - { - "default_value": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec", - "description": "A list of the compression codec classes that can be used\n for compression/decompression.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "io.compression.codecs" - }, - { - "default_value": "131072", - "description": "The size of buffer for use in sequence files.\n The size of this buffer should probably be a multiple of hardware\n page size (4096 on Intel x86),and it determines how much data is\n buffered during read and write operations.", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "io.file.buffer.size" - }, - { - "default_value": "org.apache.hadoop.io.serializer.WritableSerialization", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "io.serializations" - }, - { - "default_value": "50", - "description": "Defines the maximum number of retries for IPC connections.", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "ipc.client.connect.max.retries" - }, - { - "default_value": "30000", - "description": "The maximum time after which a client will bring down the\n connection to the server.\n ", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "ipc.client.connection.maxidletime" - }, - { - "default_value": "8000", - "description": "Defines the threshold number of connections after which\n connections will be inspected for idleness.\n ", - "config_type": "int", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "ipc.client.idlethreshold" - }, - { - "default_value": "false", - "description": " If set to true,the web interfaces of JT and NN may contain\n actions,such as kill job,delete file,etc.,that should\n not be exposed to public. Enable this option if the interfaces\n are only reachable by those who have the right authorization.\n ", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.jobtracker.webinterface.trusted" - }, - { - "default_value": "users", - "description": "...", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "hadoop.proxyuser.hcat.groups" - }, - { - "default_value": "%WEBHCAT_HOST%", - "description": "...", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "hadoop.proxyuser.hcat.hosts" - }, - { - "default_value": "users", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "hadoop.proxyuser.oozie.groups" - }, - { - "default_value": "users", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hadoop.proxyuser.hive.groups" - }, - { - "default_value": "null", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "fs.AbstractFileSystem.glusterfs.impl" - }, - { - "default_value": "localhost", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hadoop.proxyuser.hive.hosts" - }, - { - "default_value": "localhost", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "hadoop.proxyuser.oozie.hosts" - }, - { - "default_value": "hdfs://localhost:8020", - "description": "...", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "fs.defaultFS" - } - ] - }, - { - "tag": "hadoop-policy.xml", - "properties": [ - { - "default_value": "hadoop", - "description": "ACL for AdminOperationsProtocol. Used for admin commands.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "security.admin.operations.protocol.acl" - }, - { - "default_value": "*", - "description": "ACL for ClientDatanodeProtocol,the client-to-datanode protocol\n for block recovery.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "security.client.datanode.protocol.acl" - }, - { - "default_value": "*", - "description": "ACL for ClientProtocol,which is used by user code\n via the DistributedFileSystem.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "security.client.protocol.acl" - }, - { - "default_value": "*", - "description": "ACL for DatanodeProtocol,which is used by datanodes to\n communicate with the namenode.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "security.datanode.protocol.acl" - }, - { - "default_value": "*", - "description": "ACL for InterDatanodeProtocol,the inter-datanode protocol\n for updating generation timestamp.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "security.inter.datanode.protocol.acl" - }, - { - "default_value": "*", - "description": "ACL for InterTrackerProtocol,used by the tasktrackers to\n communicate with the jobtracker.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "security.inter.tracker.protocol.acl" - }, - { - "default_value": "*", - "description": "ACL for JobSubmissionProtocol,used by job clients to\n communciate with the jobtracker for job submission,querying job status etc.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "security.job.client.protocol.acl" - }, - { - "default_value": "*", - "description": "ACL for TaskUmbilicalProtocol,used by the map and reduce\n tasks to communicate with the parent tasktracker.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "security.job.task.protocol.acl" - }, - { - "default_value": "*", - "description": "ACL for NamenodeProtocol,the protocol used by the secondary\n namenode to communicate with the namenode.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "security.namenode.protocol.acl" - }, - { - "default_value": "hadoop", - "description": "ACL for RefreshAuthorizationPolicyProtocol,used by the\n dfsadmin and mradmin commands to refresh the security policy in-effect.\n The ACL is a comma-separated list of user and group names. The user and\n group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n A special value of \"*\" means all users are allowed.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "security.refresh.policy.protocol.acl" - }, - { - "default_value": "hadoop", - "description": "ACL for RefreshUserMappingsProtocol. Used to refresh\n users mappings. The ACL is a comma-separated list of user and\n group names. The user and group list is separated by a blank. For\n e.g. \"alice,bob users,wheel\". A special value of \"*\" means all\n users are allowed.", - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "cluster", - "name": "security.refresh.usertogroups.mappings.protocol.acl" - } - ] - }, - { - "tag": "hive-site.xml", - "properties": [ - { - "default_value": "hive", - "description": "Database name used as the Hive Metastore", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "ambari.hive.db.schema.name" - }, - { - "default_value": "true", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "fs.file.impl.disable.cache" - }, - { - "default_value": "true", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "fs.hdfs.impl.disable.cache" - }, - { - "default_value": "true", - "description": "Whether Hive enable the optimization about converting common\n join into mapjoin based on the input file size.", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.auto.convert.join" - }, - { - "default_value": "true", - "description": "Whether Hive enable the optimization about converting common join into mapjoin based on the input file\n size. If this paramater is on,and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\n specified size,the join is directly converted to a mapjoin (there is no conditional task).\n ", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.auto.convert.join.noconditionaltask" - }, - { - "default_value": "1000000000", - "description": "If hive.auto.convert.join.noconditionaltask is off,this parameter does not take affect. However,if it\n is on,and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size,the join is directly\n converted to a mapjoin(there is no conditional task). The default is 10MB.\n ", - "config_type": "int", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.auto.convert.join.noconditionaltask.size" - }, - { - "default_value": "true", - "description": "Will the join be automatically converted to a sort-merge join,if the joined tables pass\n the criteria for sort-merge join.\n ", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.auto.convert.sortmerge.join" - }, - { - "default_value": "true", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.auto.convert.sortmerge.join.noconditionaltask" - }, - { - "default_value": "true", - "description": "Whether bucketing is enforced. If true,while inserting into the table,bucketing is enforced.", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.enforce.bucketing" - }, - { - "default_value": "true", - "description": "Whether sorting is enforced. If true,while inserting into the table,sorting is enforced.", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.enforce.sorting" - }, - { - "default_value": "true", - "description": "Whether to use map-side aggregation in Hive Group By queries.", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.map.aggr" - }, - { - "default_value": "10000", - "description": "\n Size per reducer.The default is 1G,i.e if the input size is 10G,it\n will use 10 reducers.\n ", - "config_type": "int", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.mapjoin.bucket.cache.size" - }, - { - "default_value": "false", - "description": "Whether speculative execution for reducers should be turned on.", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.mapred.reduce.tasks.speculative.execution" - }, - { - "default_value": "Table,Database,Type,FieldSchema,Order", - "description": "List of comma separated metastore object types that should be pinned in the cache", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.metastore.cache.pinobjtypes" - }, - { - "default_value": "60", - "description": "MetaStore Client socket timeout in seconds", - "config_type": "int", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.metastore.client.socket.timeout" - }, - { - "default_value": "true", - "description": "In unsecure mode,setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false,client setting will be ignored.", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.metastore.execute.setugi" - }, - { - "default_value": "thrift://localhost:9083", - "description": "URI for client to contact metastore server", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.metastore.uris" - }, - { - "default_value": "/apps/hive/warehouse", - "description": "location of default database for the warehouse", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.metastore.warehouse.dir" - }, - { - "default_value": "true", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.optimize.bucketmapjoin" - }, - { - "default_value": "true", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.optimize.bucketmapjoin.sortedmerge" - }, - { - "default_value": "true", - "description": "\n Whether to enable automatic use of indexes\n ", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.optimize.index.filter" - }, - { - "default_value": "true", - "description": "If hive.auto.convert.join is off,this parameter does not take\n affect. If it is on,and if there are map-join jobs followed by a map-reduce\n job (for e.g a group by),each map-only job is merged with the following\n map-reduce job.\n ", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.optimize.mapjoin.mapreduce" - }, - { - "default_value": "true", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.optimize.reducededuplication" - }, - { - "default_value": "1", - "description": "Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.\n That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small,it can make very slow,single MR.\n The optimization will be disabled if number of reducers is less than specified value.\n ", - "config_type": "int", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.optimize.reducededuplication.min.reducer" - }, - { - "default_value": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator", - "description": "Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider. ", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.security.authenticator.manager" - }, - { - "default_value": "false", - "description": "enable or disable the hive client authorization", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.security.authorization.enabled" - }, - { - "default_value": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", - "description": "the hive client authorization manager class name.\n The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider. ", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.security.authorization.manager" - }, - { - "default_value": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", - "description": "The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider. ", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.security.metastore.authorization.manager" - }, - { - "default_value": "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory", - "description": "controls which SemanticAnalyzerFactory implemenation class is used by CLI", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.semantic.analyzer.factory.impl" - }, - { - "default_value": "true", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.server2.enable.doAs" - }, - { - "default_value": "false", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "hive.vectorized.execution.enabled" - }, - { - "default_value": "com.mysql.jdbc.Driver", - "description": "Driver class name for a JDBC metastore", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "javax.jdo.option.ConnectionDriverName" - }, - { - "default_value": " ", - "description": "password to use against metastore database", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "javax.jdo.option.ConnectionPassword" - }, - { - "default_value": "jdbc", - "description": "JDBC connect string for a JDBC metastore", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "javax.jdo.option.ConnectionURL" - }, - { - "default_value": "hive", - "description": "username to use against metastore database", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "javax.jdo.option.ConnectionUserName" - } - ] - }, - { - "tag": "mapred-queue-acls.xml", - "properties": [ - { - "default_value": "*", - "description": "...", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapred.queue.default.acl-administer-jobs" - }, - { - "default_value": "*", - "description": "...", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapred.queue.default.acl-submit-job" - } - ] - }, - { - "tag": "mapred-site.xml", - "properties": [ - { - "default_value": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", - "description": "...", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.admin.map.child.java.opts" - }, - { - "default_value": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", - "description": "...", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.admin.reduce.child.java.opts" - }, - { - "default_value": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`", - "description": "\n Additional execution environment entries for map and reduce task processes.\n This is not an additive property. You must preserve the original value if\n you want your map and reduce tasks to have access to native libraries (compression,etc)\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.admin.user.env" - }, - { - "default_value": "2", - "description": "\n The maximum number of application attempts. It is a\n application-specific setting. It should not be larger than the global number\n set by resourcemanager. Otherwise,it will be override. The default number is\n set to 2,to allow at least one retry for AM.\n ", - "config_type": "int", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.am.max-attempts" - }, - { - "default_value": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*", - "description": "\n CLASSPATH for MR applications. A comma-separated list of CLASSPATH\n entries.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.application.classpath" - }, - { - "default_value": " hadoop", - "description": "\n Administrators for MapReduce applications.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.cluster.administrators" - }, - { - "default_value": "yarn", - "description": "\n The runtime framework for executing MapReduce jobs. Can be one of local,\n classic or yarn.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.framework.name" - }, - { - "default_value": "0.05", - "description": "\n Fraction of the number of maps in the job which should be complete before\n reduces are scheduled for the job.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.job.reduce.slowstart.completedmaps" - }, - { - "default_value": "localhost:10020", - "description": "Enter your JobHistoryServer hostname.", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.jobhistory.address" - }, - { - "default_value": "/mr-history/done", - "description": "\n Directory where history files are managed by the MR JobHistory Server.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.jobhistory.done-dir" - }, - { - "default_value": "/mr-history/tmp", - "description": "\n Directory where history files are written by MapReduce jobs.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.jobhistory.intermediate-done-dir" - }, - { - "default_value": "localhost:19888", - "description": "Enter your JobHistoryServer hostname.", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.jobhistory.webapp.address" - }, - { - "default_value": "-Xmx756m", - "description": "\n Larger heap-size for child jvms of maps.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.map.java.opts" - }, - { - "default_value": "INFO", - "description": "\n The logging level for the map task. The allowed levels are:\n OFF,FATAL,ERROR,WARN,INFO,DEBUG,TRACE and ALL.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.map.log.level" - }, - { - "default_value": "1024", - "description": "Virtual memory for single Map task", - "config_type": "int", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.map.memory.mb" - }, - { - "default_value": "false", - "description": "...", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.map.output.compress" - }, - { - "default_value": "0.7", - "description": "\n The soft limit in the serialization buffer. Once reached,a thread will\n begin to spill the contents to disk in the background. Note that\n collection will not block if this threshold is exceeded while a spill\n is already in progress,so spills may be larger than this threshold when\n it is set to less than .5\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.map.sort.spill.percent" - }, - { - "default_value": "false", - "description": "\n If true,then multiple instances of some map tasks\n may be executed in parallel.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.map.speculative" - }, - { - "default_value": "false", - "description": "\n Should the job outputs be compressed?\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.output.fileoutputformat.compress" - }, - { - "default_value": "BLOCK", - "description": "\n If the job outputs are to compressed as SequenceFiles,how should\n they be compressed? Should be one of NONE,RECORD or BLOCK.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.output.fileoutputformat.compress.type" - }, - { - "default_value": "0.0", - "description": "\n The percentage of memory- relative to the maximum heap size- to\n retain map outputs during the reduce. When the shuffle is concluded,any\n remaining map outputs in memory must consume less than this threshold before\n the reduce can begin.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.reduce.input.buffer.percent" - }, - { - "default_value": "-Xmx756m", - "description": "\n Larger heap-size for child jvms of reduces.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.reduce.java.opts" - }, - { - "default_value": "INFO", - "description": "\n The logging level for the reduce task. The allowed levels are:\n OFF,FATAL,ERROR,WARN,INFO,DEBUG,TRACE and ALL.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.reduce.log.level" - }, - { - "default_value": "1024", - "description": "Virtual memory for single Reduce task", - "config_type": "int", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.reduce.memory.mb" - }, - { - "default_value": "0.7", - "description": "\n The percentage of memory to be allocated from the maximum heap\n size to storing map outputs during the shuffle.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.reduce.shuffle.input.buffer.percent" - }, - { - "default_value": "0.66", - "description": "\n The usage threshold at which an in-memory merge will be\n initiated,expressed as a percentage of the total memory allocated to\n storing in-memory map outputs,as defined by\n mapreduce.reduce.shuffle.input.buffer.percent.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.reduce.shuffle.merge.percent" - }, - { - "default_value": "30", - "description": "\n The default number of parallel transfers run by reduce during\n the copy(shuffle) phase.\n ", - "config_type": "int", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.reduce.shuffle.parallelcopies" - }, - { - "default_value": "false", - "description": "\n If true,then multiple instances of some reduce tasks may be\n executed in parallel.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.reduce.speculative" - }, - { - "default_value": "13562", - "description": "\n Default port that the ShuffleHandler will run on.\n ShuffleHandler is a service run at the NodeManager to facilitate\n transfers of intermediate Map outputs to requesting Reducers.\n ", - "config_type": "int", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.shuffle.port" - }, - { - "default_value": "100", - "description": "\n The number of streams to merge at once while sorting files.\n This determines the number of open file handles.\n ", - "config_type": "int", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.task.io.sort.factor" - }, - { - "default_value": "200", - "description": "\n The total amount of buffer memory to use while sorting files,in megabytes.\n By default,gives each merge stream 1MB,which should minimize seeks.\n ", - "config_type": "int", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.task.io.sort.mb" - }, - { - "default_value": "300000", - "description": "\n The number of milliseconds before a task will be\n terminated if it neither reads an input,writes an output,nor\n updates its status string.\n ", - "config_type": "int", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "mapreduce.task.timeout" - }, - { - "default_value": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", - "description": "\n Java opts for the MR App Master processes for admin purposes.\n It will appears before the opts set by yarn.app.mapreduce.am.command-opts and\n thus its options can be overridden user.\n\n Usage of -Djava.library.path can cause programs to no longer function if\n hadoop native libraries are used. These values should instead be set as part\n of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and\n mapreduce.reduce.env config settings.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "yarn.app.mapreduce.am.admin-command-opts" - }, - { - "default_value": "-Xmx312m", - "description": "\n Java opts for the MR App Master processes.\n The following symbol,if present,will be interpolated: @taskid@ is replaced\n by current TaskID. Any other occurrences of '@' will go unchanged.\n For example,to enable verbose gc logging to a file named for the taskid in\n /tmp and to set the heap maximum to be a gigabyte,pass a 'value' of:\n -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc\n\n Usage of -Djava.library.path can cause programs to no longer function if\n hadoop native libraries are used. These values should instead be set as part\n of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and\n mapreduce.reduce.env config settings.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "yarn.app.mapreduce.am.command-opts" - }, - { - "default_value": "INFO", - "description": "MR App Master process log level.", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "yarn.app.mapreduce.am.log.level" - }, - { - "default_value": "512", - "description": "The amount of memory the MR AppMaster needs.", - "config_type": "int", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "yarn.app.mapreduce.am.resource.mb" - }, - { - "default_value": "/user", - "description": "\n The staging dir used while submitting jobs.\n ", - "config_type": "string", - "applicable_target": "MAPREDUCE2", - "is_optional": true, - "scope": "cluster", - "name": "yarn.app.mapreduce.am.staging-dir" - } - ] - }, - { - "tag": "oozie-site.xml", - "properties": [ - { - "default_value": "\n RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n DEFAULT\n ", - "description": "The mapping from kerberos principal names to local OS user names.", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.authentication.kerberos.name.rules" - }, - { - "default_value": "simple", - "description": "\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.authentication.type" - }, - { - "default_value": "http://localhost:11000/oozie", - "description": "Base Oozie URL.", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.base.url" - }, - { - "default_value": "hcat=org.apache.oozie.action.hadoop.HCatCredentials", - "description": "\n Credential Class to be used for HCat.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.credentials.credentialclasses" - }, - { - "default_value": "oozie", - "description": "\n Oozie DataBase Name\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.db.schema.name" - }, - { - "default_value": "\n org.apache.oozie.action.email.EmailActionExecutor,\n org.apache.oozie.action.hadoop.HiveActionExecutor,\n org.apache.oozie.action.hadoop.ShellActionExecutor,\n org.apache.oozie.action.hadoop.SqoopActionExecutor,\n org.apache.oozie.action.hadoop.DistcpActionExecutor\n ", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.ActionService.executor.ext.classes" - }, - { - "default_value": "true", - "description": "\n Specifies whether security (user name/admin role) is enabled or not.\n If disabled any user can manage Oozie system and manage any job.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.AuthorizationService.security.enabled" - }, - { - "default_value": "3", - "description": "\n Maximum concurrency for a given callable type.\n Each command is a callable type (submit,start,run,signal,job,jobs,suspend,resume,etc).\n Each action type is a callable type (Map-Reduce,Pig,SSH,FS,sub-workflow,etc).\n All commands that use action executors (action-start,action-end,action-kill and action-check) use\n the action type as the callable type.\n ", - "config_type": "int", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.CallableQueueService.callable.concurrency" - }, - { - "default_value": "1000", - "description": "Max callable queue size", - "config_type": "int", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.CallableQueueService.queue.size" - }, - { - "default_value": "10", - "description": "Number of threads used for executing callables", - "config_type": "int", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.CallableQueueService.threads" - }, - { - "default_value": "*=/etc/hadoop/conf", - "description": "\n Comma separated AUTHORITY=HADOOP_CONF_DIR,where AUTHORITY is the HOST:PORT of\n the Hadoop service (JobTracker,HDFS). The wildcard '*' configuration is\n used when there is no exact match for an authority. The HADOOP_CONF_DIR contains\n the relevant Hadoop *-site.xml files. If the path is relative is looked within\n the Oozie configuration directory; though the path can be absolute (i.e. to point\n to Hadoop client conf/ directories in the local filesystem.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.HadoopAccessorService.hadoop.configurations" - }, - { - "default_value": " ", - "description": "\n Whitelisted job tracker for Oozie service.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.HadoopAccessorService.jobTracker.whitelist" - }, - { - "default_value": " ", - "description": "\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.HadoopAccessorService.nameNode.whitelist" - }, - { - "default_value": "false", - "description": "\n Creates Oozie DB.\n\n If set to true,it creates the DB schema if it does not exist. If the DB schema exists is a NOP.\n If set to false,it does not create the DB schema. If the DB schema does not exist it fails start up.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.JPAService.create.db.schema" - }, - { - "default_value": "org.apache.derby.jdbc.EmbeddedDriver", - "description": "\n JDBC driver class.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.JPAService.jdbc.driver" - }, - { - "default_value": " ", - "description": "\n DB user password.\n\n IMPORTANT: if password is emtpy leave a 1 space string,the service trims the value,\n if empty Configuration assumes it is NULL.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.JPAService.jdbc.password" - }, - { - "default_value": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true", - "description": "\n JDBC URL.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.JPAService.jdbc.url" - }, - { - "default_value": "oozie", - "description": "\n Database user name to use to connect to the database\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.JPAService.jdbc.username" - }, - { - "default_value": "10", - "description": "\n Max number of connections.\n ", - "config_type": "int", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.JPAService.pool.max.active.conn" - }, - { - "default_value": "30", - "description": "\n Jobs older than this value,in days,will be purged by the PurgeService.\n ", - "config_type": "int", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.PurgeService.older.than" - }, - { - "default_value": "3600", - "description": "\n Interval at which the purge service will run,in seconds.\n ", - "config_type": "int", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.PurgeService.purge.interval" - }, - { - "default_value": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd", - "description": "...", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.SchemaService.wf.ext.schemas" - }, - { - "default_value": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler", - "description": "\n Enlist the different uri handlers supported for data availability checks.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.URIHandlerService.uri.handlers" - }, - { - "default_value": "/user/${user.name}/share/lib", - "description": "\n System library path to use for workflow applications.\n This path is added to workflow application if their job properties sets\n the property 'oozie.use.system.libpath' to true.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.WorkflowAppService.system.libpath" - }, - { - "default_value": "120", - "description": "Default timeout for a coordinator action input check (in minutes) for normal job.\n -1 means infinite timeout", - "config_type": "int", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.coord.normal.default.timeout" - }, - { - "default_value": "30000", - "description": "\n Command re-queue interval for push dependencies (in millisecond).\n ", - "config_type": "int", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.service.coord.push.check.requeue.interval" - }, - { - "default_value": "\n org.apache.oozie.service.SchedulerService,\n org.apache.oozie.service.InstrumentationService,\n org.apache.oozie.service.CallableQueueService,\n org.apache.oozie.service.UUIDService,\n org.apache.oozie.service.ELService,\n org.apache.oozie.service.AuthorizationService,\n org.apache.oozie.service.UserGroupInformationService,\n org.apache.oozie.service.HadoopAccessorService,\n org.apache.oozie.service.URIHandlerService,\n org.apache.oozie.service.MemoryLocksService,\n org.apache.oozie.service.DagXLogInfoService,\n org.apache.oozie.service.SchemaService,\n org.apache.oozie.service.LiteWorkflowAppService,\n org.apache.oozie.service.JPAService,\n org.apache.oozie.service.StoreService,\n org.apache.oozie.service.CoordinatorStoreService,\n org.apache.oozie.service.SLAStoreService,\n org.apache.oozie.service.DBLiteWorkflowStoreService,\n org.apache.oozie.service.CallbackService,\n org.apache.oozie.service.ActionService,\n org.apache.oozie.service.ActionCheckerService,\n org.apache.oozie.service.RecoveryService,\n org.apache.oozie.service.PurgeService,\n org.apache.oozie.service.CoordinatorEngineService,\n org.apache.oozie.service.BundleEngineService,\n org.apache.oozie.service.DagEngineService,\n org.apache.oozie.service.CoordMaterializeTriggerService,\n org.apache.oozie.service.StatusTransitService,\n org.apache.oozie.service.PauseTransitService,\n org.apache.oozie.service.GroupsService,\n org.apache.oozie.service.ProxyUserService\n ", - "description": "List of Oozie services", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.services" - }, - { - "default_value": "org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService", - "description": "\n To add/replace services defined in 'oozie.services' with custom implementations.\n Class names must be separated by commas.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.services.ext" - }, - { - "default_value": "oozie-${user.name}", - "description": "\n The Oozie system ID.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.system.id" - }, - { - "default_value": "NORMAL", - "description": "\n System mode for Oozie at startup.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "oozie.systemmode" - }, - { - "default_value": "false", - "description": "\n If set to true,submissions of MapReduce and Pig jobs will include\n automatically the system library path,thus not requiring users to\n specify where the Pig JAR files are. Instead,the ones from the system\n library path are used.\n ", - "config_type": "string", - "applicable_target": "OOZIE", - "is_optional": true, - "scope": "cluster", - "name": "use.system.libpath.for.mapreduce.and.pig.jobs" - } - ] - }, - { - "tag": "webhcat-site.xml", - "properties": [ - { - "default_value": "60000", - "description": "Time out for templeton api", - "config_type": "int", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.exec.timeout" - }, - { - "default_value": "/usr/bin/hadoop", - "description": "The path to the Hadoop executable.", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.hadoop" - }, - { - "default_value": "/etc/hadoop/conf", - "description": "The path to the Hadoop configuration.", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.hadoop.conf.dir" - }, - { - "default_value": "/usr/bin/hcat", - "description": "The path to the hcatalog executable.", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.hcat" - }, - { - "default_value": "hdfs:///apps/webhcat/hive.tar.gz", - "description": "The path to the Hive archive.", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.hive.archive" - }, - { - "default_value": "hive.tar.gz/hive/bin/hive", - "description": "The path to the Hive executable.", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.hive.path" - }, - { - "default_value": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar", - "description": "The path to the Templeton jar file.", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.jar" - }, - { - "default_value": "/usr/lib/zookeeper/zookeeper.jar", - "description": "Jars to add the the classpath.", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.libjars" - }, - { - "default_value": "false", - "description": "\n Enable the override path in templeton.override.jars\n ", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.override.enabled" - }, - { - "default_value": "hdfs:///apps/webhcat/pig.tar.gz", - "description": "The path to the Pig archive.", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.pig.archive" - }, - { - "default_value": "pig.tar.gz/pig/bin/pig", - "description": "The path to the Pig executable.", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.pig.path" - }, - { - "default_value": "50111", - "description": "The HTTP port for the main server.", - "config_type": "int", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.port" - }, - { - "default_value": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage", - "description": "The class to use as storage", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.storage.class" - }, - { - "default_value": "hdfs:///apps/webhcat/hadoop-streaming.jar", - "description": "The hdfs path to the Hadoop streaming jar file.", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.streaming.jar" - }, - { - "default_value": "localhost:2181", - "description": "ZooKeeper servers,as comma separated host:port pairs", - "config_type": "string", - "applicable_target": "WEBHCAT", - "is_optional": true, - "scope": "cluster", - "name": "templeton.zookeeper.hosts" - }, - { - "default_value": "hive.metastore.local=false,hive.metastore.uris=thrift:///%HIVE_METASTORE_HOST%:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse", - "description": "...", - "config_type": "string", - "applicable_target": "HIVE", - "is_optional": true, - "scope": "cluster", - "name": "templeton.hive.properties" - } - ] - }, - { - "tag": "yarn-site.xml", - "properties": [ - { - "default_value": "true", - "description": "...", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.acl.enable" - }, - { - "default_value": "*", - "description": "...", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.admin.acl" - }, - { - "default_value": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*", - "description": "Classpath for typical applications.", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.application.classpath" - }, - { - "default_value": "true", - "description": "Whether to enable log aggregation", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.log-aggregation-enable" - }, - { - "default_value": "2592000", - "description": "\n How long to keep aggregation logs before deleting them. -1 disables.\n Be careful set this too small and you will spam the name node.\n ", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.log-aggregation.retain-seconds" - }, - { - "default_value": "0.0.0.0:45454", - "description": "The address of the container manager in the NM.", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.address" - }, - { - "default_value": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", - "description": "\n Environment variables that should be forwarded from the NodeManager's\n environment to the container's.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.admin-env" - }, - { - "default_value": "mapreduce_shuffle", - "description": "Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can\n not start with numbers", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.aux-services" - }, - { - "default_value": "org.apache.hadoop.mapred.ShuffleHandler", - "description": "...", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.aux-services.mapreduce_shuffle.class" - }, - { - "default_value": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", - "description": "ContainerExecutor for launching containers", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.container-executor.class" - }, - { - "default_value": "3000", - "description": "\n The interval,in milliseconds,for which the node manager\n waits between two cycles of monitoring its containers' memory usage.\n ", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.container-monitor.interval-ms" - }, - { - "default_value": "0", - "description": "\n Number of seconds after an application finishes before the nodemanager's\n DeletionService will delete the application's localized file directory\n and log directory.\n\n To diagnose Yarn application problems,set this property's value large\n enough (for example,to 600 = 10 minutes) to permit examination of these\n directories. After changing the property's value,you must restart the\n nodemanager in order for it to have an effect.\n\n The roots of Yarn applications' work directories is configurable with\n the yarn.nodemanager.local-dirs property (see below),and the roots\n of the Yarn applications' log directories is configurable with the\n yarn.nodemanager.log-dirs property (see also below).\n ", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.delete.debug-delay-sec" - }, - { - "default_value": "0.25", - "description": "\n The minimum fraction of number of disks to be healthy for the nodemanager\n to launch new containers. This correspond to both\n yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.\n If there are less number of healthy local-dirs (or log-dirs) available,\n then new containers will not be launched on this node.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.disk-health-checker.min-healthy-disks" - }, - { - "default_value": "135000", - "description": "Frequency of running node health script.", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.health-checker.interval-ms" - }, - { - "default_value": "60000", - "description": "Script time out period.", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.health-checker.script.timeout-ms" - }, - { - "default_value": "hadoop", - "description": "Unix group of the NodeManager", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.linux-container-executor.group" - }, - { - "default_value": "/hadoop/yarn/local", - "description": "\n List of directories to store localized files in. An\n application's localized file directory will be found in:\n ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.\n Individual containers' work directories,called container_${contid},will\n be subdirectories of this.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.local-dirs" - }, - { - "default_value": "gz", - "description": "\n T-file compression types used to compress aggregated logs.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.log-aggregation.compression-type" - }, - { - "default_value": "/hadoop/yarn/log", - "description": "\n Where to store container logs. An application's localized log directory\n will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.\n Individual containers' log directories will be below this,in directories\n named container_{$contid}. Each container directory will contain the files\n stderr,stdin,and syslog generated by that container.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.log-dirs" - }, - { - "default_value": "604800", - "description": "\n Time in seconds to retain user logs. Only applicable if\n log aggregation is disabled.\n ", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.log.retain-second" - }, - { - "default_value": "/app-logs", - "description": "...", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.remote-app-log-dir" - }, - { - "default_value": "logs", - "description": "\n The remote log dir will be created at\n {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.remote-app-log-dir-suffix" - }, - { - "default_value": "5120", - "description": "Amount of physical memory,in MB,that can be allocated\n for containers.", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.resource.memory-mb" - }, - { - "default_value": "false", - "description": "\n Whether virtual memory limits will be enforced for containers.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.vmem-check-enabled" - }, - { - "default_value": "2.1", - "description": "Ratio between virtual memory to physical memory when\n setting memory limits for containers. Container allocations are\n expressed in terms of physical memory,and virtual memory usage\n is allowed to exceed this allocation by this ratio.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.nodemanager.vmem-pmem-ratio" - }, - { - "default_value": "localhost:8050", - "description": "\n The address of the applications manager interface in the\n RM.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.resourcemanager.address" - }, - { - "default_value": "localhost:8141", - "description": "The address of the RM admin interface.", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.resourcemanager.admin.address" - }, - { - "default_value": "2", - "description": "\n The maximum number of application attempts. It's a global\n setting for all application masters. Each application master can specify\n its individual maximum number of application attempts via the API,but the\n individual number cannot be more than the global upper bound. If it is,\n the resourcemanager will override it. The default number is set to 2,to\n allow at least one retry for AM.\n ", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.resourcemanager.am.max-attempts" - }, - { - "default_value": "localhost:8025", - "description": "...", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.resourcemanager.resource-tracker.address" - }, - { - "default_value": "localhost:8030", - "description": "The address of the scheduler interface.", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.resourcemanager.scheduler.address" - }, - { - "default_value": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", - "description": "The class to use as the resource scheduler.", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.resourcemanager.scheduler.class" - }, - { - "default_value": "localhost:8088", - "description": "\n The address of the RM web application.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.resourcemanager.webapp.address" - }, - { - "default_value": "2048", - "description": "\n The maximum allocation for every container request at the RM,\n in MBs. Memory requests higher than this won't take effect,\n and will get capped to this value.\n ", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.maximum-allocation-mb" - }, - { - "default_value": "512", - "description": "\n TThe minimum allocation for every container request at the RM,\n in MBs. Memory requests lower than this won't take effect,\n and the specified value will get allocated at minimum.\n ", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.minimum-allocation-mb" - }, - { - "default_value": "localhost", - "description": "...", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.resourcemanager.hostname" - }, - { - "default_value": "http://master.novalocal:19888/jobhistory/logs", - "description": "...", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.log.server.url" - } - ] - }, - { - "tag": "capacity-scheduler.xml", - "properties": [ - { - "default_value": "0.2", - "description": "\n Maximum percent of resources in the cluster which can be used to run \n application masters i.e. controls number of concurrent running\n applications.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.maximum-am-resource-percent" - }, - { - "default_value": "10000", - "description": "\n Maximum number of applications that can be pending and running.\n ", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.maximum-applications" - }, - { - "default_value": "*", - "description": "\n The ACL for who can administer this queue i.e. change sub-queue \n allocations.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.root.acl_administer_queues" - }, - { - "default_value": "100", - "description": "\n The total capacity as a percentage out of 100 for this queue.\n If it has child queues then this includes their capacity as well.\n The child queues capacity should add up to their parent queue's capacity\n or less.\n ", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.root.capacity" - }, - { - "default_value": "*", - "description": "\n The ACL of who can administer jobs on the default queue.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.root.default.acl_administer_jobs" - }, - { - "default_value": "*", - "description": "\n The ACL of who can submit jobs to the default queue.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.root.default.acl_submit_jobs" - }, - { - "default_value": "100", - "description": "Default queue target capacity.", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.root.default.capacity" - }, - { - "default_value": "100", - "description": "\n The maximum capacity of the default queue. \n ", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.root.default.maximum-capacity" - }, - { - "default_value": "RUNNING", - "description": "\n The state of the default queue. State can be one of RUNNING or STOPPED.\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.root.default.state" - }, - { - "default_value": "1", - "description": "\n Default queue user limit a percentage from 0.0 to 1.0.\n ", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.root.default.user-limit-factor" - }, - { - "default_value": "default", - "description": "\n The queues at the this level (root is the root queue).\n ", - "config_type": "string", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.root.queues" - }, - { - "default_value": "50", - "description": "\n No description\n ", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.root.unfunded.capacity" - }, - { - "default_value": "40", - "description": "...", - "config_type": "int", - "applicable_target": "YARN", - "is_optional": true, - "scope": "cluster", - "name": "yarn.scheduler.capacity.node-locality-delay" - } - ] - }, - { - "tag": "ambari", - "properties": [ - { - "applicable_target": "AMBARI", - "config_type": "string", - "default_value": "8080", - "description": "Ambari Server API port.", - "is_optional": true, - "name": "server.port", - "scope": "cluster" - } - ] - }, - { - "tag": "ambari-stack", - "properties": [ - { - "applicable_target": "AMBARI", - "config_type": "string", - "default_value": "admin", - "description": "Ambari admin user name.", - "is_optional": true, - "name": "ambari.admin.user", - "scope": "cluster" - }, - { - "applicable_target": "AMBARI", - "config_type": "string", - "default_value": "admin", - "description": "Ambari admin user password.", - "is_optional": true, - "name": "ambari.admin.password", - "scope": "cluster" - } - ] - }, - { - "tag": "hue-ini", - "properties": [ - { - "applicable_target": "HUE", - "config_type": "int", - "default_value": "8000", - "description": "The HTTP port for the web-based user interface.", - "is_optional": true, - "name": "desktop/http_port", - "scope": "cluster" - }, - { - "applicable_target": "HUE", - "config_type": "string", - "default_value": "admin", - "description": "The username of the initial administrative user.", - "is_optional": true, - "name": "useradmin/default_username", - "scope": "cluster" - }, - { - "applicable_target": "HUE", - "config_type": "string", - "default_value": "admin", - "description": "The password for the initial administrative user.", - "is_optional": true, - "name": "useradmin/default_user_password", - "scope": "cluster" - }, - { - "applicable_target": "HUE", - "config_type": "string", - "default_value": "hadoop", - "description": "The default group for hue users.", - "is_optional": true, - "name": "useradmin/default_user_group", - "scope": "cluster" - } - ] - }, - { - "tag": "hdfsha", - "properties": [ - { - "applicable_target": "HDFSHA", - "config_type": "boolean", - "default_value": false, - "description": "Enable HDFS NameNode High Availability", - "is_optional": true, - "name": "hdfs.nnha", - "scope": "cluster" - } - ] - } - ] -} diff --git a/sahara/plugins/hdp/versions/version_2_0_6/resources/default-cluster.template b/sahara/plugins/hdp/versions/version_2_0_6/resources/default-cluster.template deleted file mode 100644 index 3914a48c48..0000000000 --- a/sahara/plugins/hdp/versions/version_2_0_6/resources/default-cluster.template +++ /dev/null @@ -1,1845 +0,0 @@ -{ - "services" : [ - { - "name" : "YARN", - "components" : [ - { - "name" : "RESOURCEMANAGER", - "type" : "MASTER", - "cardinality" : "1" - }, - { - "name" : "YARN_CLIENT", - "type" : "CLIENT", - "cardinality" : "1+" - }, - { - "name" : "NODEMANAGER", - "type" : "SLAVE", - "cardinality" : "1+" - } - ], - "configurations" : [ - ] - }, - { - "name" : "HDFS", - "components" : [ - { - "name" : "NAMENODE", - "type" : "MASTER", - "cardinality" : "1" - }, - { - "name" : "DATANODE", - "type" : "SLAVE", - "cardinality" : "1+" - }, - { - "name" : "SECONDARY_NAMENODE", - "type" : "MASTER", - "cardinality" : "1" - }, - { - "name" : "HDFS_CLIENT", - "type" : "CLIENT", - "cardinality" : "1+" - }, - { - "name" : "JOURNALNODE", - "type" : "MASTER", - "cardinality" : "1+" - }, - { - "name" : "ZKFC", - "type" : "MASTER", - "cardinality" : "1+" - } - ], - "configurations" : [ - ] - }, - { - "name" : "MAPREDUCE2", - "components" : [ - { - "name" : "HISTORYSERVER", - "type" : "MASTER", - "cardinality" : "1" - }, - { - "name" : "MAPREDUCE2_CLIENT", - "type" : "CLIENT", - "cardinality" : "1+" - } - ], - "configurations" : [ - ] - }, - { - "name" : "PIG", - "components" : [ - { - "name" : "PIG", - "type" : "CLIENT", - "cardinality" : "1+" - } - ], - "configurations" : [ - ] - }, - { - "name" : "HIVE", - "components" : [ - { - "name" : "HIVE_SERVER", - "type" : "MASTER", - "cardinality" : "1" - }, - { - "name" : "HIVE_METASTORE", - "type" : "MASTER", - "cardinality" : "1" - }, - { - "name" : "HIVE_CLIENT", - "type" : "CLIENT", - "cardinality" : "1+" - }, - { - "name" : "MYSQL_SERVER", - "type" : "MASTER", - "cardinality" : "1" - } - ], - "configurations" : [ - ] - }, - { - "name" : "HCATALOG", - "components" : [ - { - "name" : "HCAT", - "type" : "CLIENT", - "cardinality" : "1+" - } - ], - "configurations" : [ - ] - }, - { - "name" : "WEBHCAT", - "components" : [ - { - "name" : "WEBHCAT_SERVER", - "type" : "MASTER", - "cardinality" : "1" - } - ], - "configurations" : [ - ] - }, - { - "name" : "HBASE", - "components" : [ - { - "name" : "HBASE_MASTER", - "type" : "MASTER", - "cardinality" : "1" - }, - { - "name" : "HBASE_REGIONSERVER", - "type" : "SLAVE", - "cardinality" : "1+" - }, - { - "name" : "HBASE_CLIENT", - "type" : "CLIENT", - "cardinality" : "1+" - } - ], - "configurations" : [ - ] - }, - { - "name" : "ZOOKEEPER", - "components" : [ - { - "name" : "ZOOKEEPER_SERVER", - "type" : "MASTER", - "cardinality" : "1+" - }, - { - "name" : "ZOOKEEPER_CLIENT", - "type" : "CLIENT", - "cardinality" : "1+" - } - ], - "configurations" : [ - ] - }, - { - "name" : "OOZIE", - "components" : [ - { - "name" : "OOZIE_SERVER", - "type" : "MASTER", - "cardinality" : "1" - }, - { - "name" : "OOZIE_CLIENT", - "type" : "CLIENT", - "cardinality" : "1+" - } - ], - "configurations" : [ - ] - }, - { - "name" : "SQOOP", - "components" : [ - { - "name" : "SQOOP", - "type" : "CLIENT", - "cardinality" : "1+" - } - ], - "configurations" : [ - ] - }, - { - "name" : "GANGLIA", - "components" : [ - { - "name" : "GANGLIA_SERVER", - "type" : "MASTER", - "cardinality" : "1" - }, - { - "name" : "GANGLIA_MONITOR", - "type" : "SLAVE", - "cardinality" : "1+" - } - ], - "configurations" : [ - ] - }, - { - "name" : "NAGIOS", - "components" : [ - { - "name" : "NAGIOS_SERVER", - "type" : "MASTER", - "cardinality" : "1" - } - ], - "configurations" : [ - ] - }, - { - "name" : "AMBARI", - "components" : [ - { - "name" : "AMBARI_SERVER", - "type" : "MASTER", - "cardinality" : "1" - }, - { - "name" : "AMBARI_AGENT", - "type" : "SLAVE", - "cardinality" : "1+" - } - ], - "configurations" : [ - ], - "users" : [ - { - "name" : "admin", - "password" : "admin", - "groups" : [ - "admin" - ] - } - ] - }, - { - "name" : "HUE", - "components" : [ - { - "name" : "HUE", - "type" : "CLIENT", - "cardinality" : "1" - } - ], - "configurations" : [ - ] - } - ], - "host_role_mappings" : [ - { - "name" : "MASTER", - "components" : [ - { "name" : "RESOURCEMANAGER" }, - { "name" : "NAMENODE" }, - { "name" : "HISTORYSERVER" }, - { "name" : "SECONDARY_NAMENODE" }, - { "name" : "GANGLIA_SERVER" }, - { "name" : "NAGIOS_SERVER" }, - { "name" : "AMBARI_SERVER" }, - { "name" : "ZOOKEEPER_SERVER" } - ], - "hosts" : [ - { - "cardinality" : "1", - "default_count" : 1 - } - ] - }, - { - "name" : "SLAVE", - "components" : [ - { "name" : "NODEMANAGER" }, - { "name" : "DATANODE" }, - { "name" : "YARN_CLIENT" }, - { "name" : "HDFS_CLIENT" }, - { "name" : "MAPREDUCE2_CLIENT" } - ], - "hosts" : [ - { - "cardinality" : "1+", - "default_count" : 2 - } - ] - } - ], - "configurations" : [ - { - "name" : "global", - "properties" : [ - { - "name" : "java64_home", - "value" : "/usr/lib/jvm/java-openjdk" }, - { - "name": "security_enabled", - "value": "false" - }, - { - "name": "hbase_pid_dir", - "value": "/var/run/hbase" - }, - { - "name": "proxyuser_group", - "value": "users" - }, - { - "name": "zk_user", - "value": "zookeeper" - }, - { - "name": "rrdcached_base_dir", - "value": "/var/lib/ganglia/rrds" - }, - { - "name": "zk_data_dir", - "value": "/hadoop/zookeeper" - }, - { - "name": "hbase_regionserver_heapsize", - "value": "1024m" - }, - { - "name": "oozie_pid_dir", - "value": "/var/run/oozie" - }, - { - "name": "hive_pid_dir", - "value": "/var/run/hive" - }, - { - "name": "dtnode_heapsize", - "value": "1024m" - }, - { - "name": "hcat_log_dir", - "value": "/var/log/webhcat" - }, - { - "name": "oozie_hostname", - "value": "%OOZIE_HOST%" - }, - { - "name": "smokeuser", - "value": "ambari-qa" - }, - { - "name": "hive_ambari_database", - "value": "MySQL" - }, - { - "name": "gmetad_user", - "value": "nobody" - }, - { - "name": "namenode_heapsize", - "value": "1024m" - }, - { - "name": "oozie_log_dir", - "value": "/var/log/oozie" - }, - { - "name": "hive_jdbc_driver", - "value": "com.mysql.jdbc.Driver" - }, - { - "name": "oozie_user", - "value": "oozie" - }, - { - "name": "hcat_conf_dir", - "value": "" - }, - { - "name": "oozie_data_dir", - "value": "/hadoop/oozie/data" - }, - { - "name": "ganglia_runtime_dir", - "value": "/var/run/ganglia/hdp" - }, - { - "name": "hcat_user", - "value": "hcat" - }, - { - "name": "lzo_enabled", - "value": "true" - }, - { - "name": "namenode_opt_maxnewsize", - "value": "200m" - }, - { - "name": "syncLimit", - "value": "5" - }, - { - "name": "hive_user", - "value": "hive" - }, - { - "name": "hdfs_log_dir_prefix", - "value": "/var/log/hadoop" - }, - { - "name": "hive_hostname", - "value": "%HIVE_HOST%" - }, - { - "name": "mapred_pid_dir_prefix", - "value": "/var/run/hadoop-mapreduce" - }, - { - "name": "hive_metastore_port", - "value": "9083" - }, - { - "name": "hbase_master_heapsize", - "value": "1024m" - }, - { - "name": "clientPort", - "value": "2181" - }, - { - "name": "yarn_user", - "value": "yarn" - }, - { - "name": "yarn_pid_dir_prefix", - "value": "/var/run/hadoop-yarn" - }, - { - "name": "mapred_user", - "value": "mapred" - }, - { - "name": "initLimit", - "value": "10" - }, - { - "name": "hive_database_type", - "value": "mysql" - }, - { - "name": "oozie_database", - "value": "New Derby Database" - }, - { - "name": "hbase_log_dir", - "value": "/var/log/hbase" - }, - { - "name": "nagios_user", - "value": "nagios" - }, - { - "name": "yarn_heapsize", - "value": "1024" - }, - { - "name": "gmond_user", - "value": "nobody" - }, - { - "name": "tickTime", - "value": "2000" - }, - { - "name": "nagios_contact", - "value": "admin@some.com" - }, - { - "name": "hive_database", - "value": "New MySQL Database" - }, - { - "name": "nagios_web_password", - "value": "admin" - }, - { - "name": "hcat_pid_dir", - "value": "/var/run/webhcat" - }, - { - "name": "oozie_derby_database", - "value": "Derby" - }, - { - "name": "mapred_log_dir_prefix", - "value": "/var/log/hadoop-mapreduce" - }, - { - "name": "nagios_group", - "value": "nagios" - }, - { - "name": "zk_log_dir", - "value": "/var/log/zookeeper" - }, - { - "name": "hbase_user", - "value": "hbase" - }, - { - "name": "oozie_database_type", - "value": "derby" - }, - { - "name": "webhcat_user", - "value": "hcat" - }, - { - "name": "nodemanager_heapsize", - "value": "1024" - }, - { - "name": "oozie_jdbc_driver", - "value": "org.apache.derby.jdbc.EmbeddedDriver" - }, - { - "name": "hdfs_user", - "value": "hdfs" - }, - { - "name": "hive_log_dir", - "value": "/var/log/hive" - }, - { - "name": "user_group", - "value": "hadoop" - }, - { - "name": "namenode_opt_newsize", - "value": "200m" - }, - { - "name": "nagios_web_login", - "value": "nagiosadmin" - }, - { - "name": "resourcemanager_heapsize", - "value": "1024" - }, - { - "name": "yarn_log_dir_prefix", - "value": "/var/log/hadoop-yarn" - }, - { - "name": "hadoop_heapsize", - "value": "1024" - }, - { - "name": "hadoop_pid_dir_prefix", - "value": "/var/run/hadoop" - }, - { - "name": "hive_aux_jars_path", - "value": "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar" - }, - { - "name": "zk_pid_dir", - "value": "/var/run/zookeeper" - } - ] - }, - { - "name" : "core-site", - "properties" : [ - { - "name": "io.serializations", - "value": "org.apache.hadoop.io.serializer.WritableSerialization" - }, - { - "name": "hadoop.proxyuser.hcat.groups", - "value": "users" - }, - { - "name": "hadoop.proxyuser.hcat.hosts", - "value": "%WEBHCAT_HOST%" - }, - { - "name": "fs.trash.interval", - "value": "360" - }, - { - "name": "hadoop.proxyuser.oozie.groups", - "value": "hadoop" - }, - { - "name": "hadoop.proxyuser.hive.groups", - "value": "users" - }, - { - "name": "hadoop.security.authentication", - "value": "simple" - }, - { - "name": "io.compression.codecs", - "value": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec" - }, - { - "name": "mapreduce.jobtracker.webinterface.trusted", - "value": "false" - }, - { - "name": "hadoop.security.authorization", - "value": "false" - }, - { - "name": "ipc.client.connection.maxidletime", - "value": "30000" - }, - { - "name": "ipc.client.connect.max.retries", - "value": "50" - }, - { - "name": "hadoop.security.auth_to_local", - "value": "\n RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n DEFAULT" - }, - { - "name": "io.file.buffer.size", - "value": "131072" - }, - { - "name": "fs.AbstractFileSystem.glusterfs.impl", - "value": "null" - }, - { - "name": "hadoop.proxyuser.hive.hosts", - "value": "%HIVE_HOST%" - }, - { - "name": "ipc.client.idlethreshold", - "value": "8000" - }, - { - "name": "hadoop.proxyuser.oozie.hosts", - "value": "%OOZIE_HOST%" - }, - { - "name": "fs.defaultFS", - "value": "hdfs://%NN_HOST%:8020" - } - ] - }, - { - "name" : "mapred-site", - "properties" : [ - { - "name": "mapreduce.jobhistory.address", - "value": "%HS_HOST%:10020" - }, - { - "name": "mapreduce.cluster.administrators", - "value": " hadoop" - }, - { - "name": "mapreduce.reduce.input.buffer.percent", - "value": "0.0" - }, - { - "name": "mapreduce.output.fileoutputformat.compress", - "value": "false" - }, - { - "name": "mapreduce.framework.name", - "value": "yarn" - }, - { - "name": "mapreduce.map.speculative", - "value": "false" - }, - { - "name": "mapreduce.reduce.shuffle.merge.percent", - "value": "0.66" - }, - { - "name": "yarn.app.mapreduce.am.resource.mb", - "value": "1024" - }, - { - "name": "mapreduce.map.java.opts", - "value": "-Xmx273m" - }, - { - "name": "mapreduce.application.classpath", - "value": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*" - }, - { - "name": "mapreduce.job.reduce.slowstart.completedmaps", - "value": "0.05" - }, - { - "name": "mapreduce.output.fileoutputformat.compress.type", - "value": "BLOCK" - }, - { - "name": "mapreduce.reduce.speculative", - "value": "false" - }, - { - "name": "mapreduce.reduce.java.opts", - "value": "-Xmx546m" - }, - { - "name": "mapreduce.am.max-attempts", - "value": "2" - }, - { - "name": "yarn.app.mapreduce.am.admin-command-opts", - "value": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN" - }, - { - "name": "mapreduce.reduce.log.level", - "value": "INFO" - }, - { - "name": "mapreduce.map.sort.spill.percent", - "value": "0.7" - }, - { - "name": "mapreduce.task.timeout", - "value": "300000" - }, - { - "name": "mapreduce.map.memory.mb", - "value": "341" - }, - { - "name": "mapreduce.task.io.sort.factor", - "value": "100" - }, - { - "name": "mapreduce.jobhistory.intermediate-done-dir", - "value": "/mr-history/tmp" - }, - { - "name": "mapreduce.reduce.memory.mb", - "value": "683" - }, - { - "name": "yarn.app.mapreduce.am.log.level", - "value": "INFO" - }, - { - "name": "mapreduce.map.log.level", - "value": "INFO" - }, - { - "name": "mapreduce.shuffle.port", - "value": "13562" - }, - { - "name": "mapreduce.admin.user.env", - "value": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`" - }, - { - "name": "mapreduce.map.output.compress", - "value": "false" - }, - { - "name": "mapreduce.jobhistory.webapp.address", - "value": "%HS_HOST%:19888" - }, - { - "name": "mapreduce.reduce.shuffle.parallelcopies", - "value": "30" - }, - { - "name": "mapreduce.reduce.shuffle.input.buffer.percent", - "value": "0.7" - }, - { - "name": "yarn.app.mapreduce.am.staging-dir", - "value": "/user" - }, - { - "name": "mapreduce.jobhistory.done-dir", - "value": "/mr-history/done" - }, - { - "name": "mapreduce.admin.reduce.child.java.opts", - "value": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN" - }, - { - "name": "mapreduce.task.io.sort.mb", - "value": "136" - }, - { - "name": "yarn.app.mapreduce.am.command-opts", - "value": "-Xmx546m" - }, - { - "name": "mapreduce.admin.map.child.java.opts", - "value": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN" - } - ] - }, - { - "name" : "yarn-site", - "properties" : [ - { - "name": "yarn.nodemanager.disk-health-checker.min-healthy-disks", - "value": "0.25" - }, - { - "name": "yarn.nodemanager.container-executor.class", - "value": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor" - }, - { - "name": "yarn.nodemanager.local-dirs", - "value": "/mnt/hadoop/yarn/local" - }, - { - "name": "yarn.resourcemanager.resource-tracker.address", - "value": "%RM_HOST%:8025" - }, - { - "name": "yarn.nodemanager.remote-app-log-dir-suffix", - "value": "logs" - }, - { - "name": "yarn.resourcemanager.hostname", - "value": "%RM_HOST%" - }, - { - "name": "yarn.nodemanager.health-checker.script.timeout-ms", - "value": "60000" - }, - { - "name": "yarn.resourcemanager.scheduler.class", - "value": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler" - }, - { - "name": "yarn.nodemanager.resource.memory-mb", - "value": "16384" - }, - { - "name": "yarn.scheduler.minimum-allocation-mb", - "value": "683" - }, - { - "name": "yarn.resourcemanager.address", - "value": "%RM_HOST%:8050" - }, - { - "name": "yarn.resourcemanager.scheduler.address", - "value": "%RM_HOST%:8030" - }, - { - "name": "yarn.log-aggregation.retain-seconds", - "value": "2592000" - }, - { - "name": "yarn.scheduler.maximum-allocation-mb", - "value": "2048" - }, - { - "name": "yarn.log-aggregation-enable", - "value": "true" - }, - { - "name": "yarn.nodemanager.address", - "value": "0.0.0.0:45454" - }, - { - "name": "yarn.acl.enable", - "value": "true" - }, - { - "name": "yarn.nodemanager.container-monitor.interval-ms", - "value": "3000" - }, - { - "name": "yarn.nodemanager.log-aggregation.compression-type", - "value": "gz" - }, - { - "name": "yarn.nodemanager.log.retain-second", - "value": "604800" - }, - { - "name": "yarn.nodemanager.delete.debug-delay-sec", - "value": "0" - }, - { - "name": "yarn.nodemanager.linux-container-executor.group", - "value": "hadoop" - }, - { - "name": "yarn.nodemanager.health-checker.interval-ms", - "value": "135000" - }, - { - "name": "yarn.resourcemanager.am.max-attempts", - "value": "2" - }, - { - "name": "yarn.nodemanager.admin-env", - "value": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX" - }, - { - "name": "yarn.nodemanager.aux-services", - "value": "mapreduce_shuffle" - }, - { - "name": "yarn.nodemanager.vmem-check-enabled", - "value": "false" - }, - { - "name": "yarn.nodemanager.vmem-pmem-ratio", - "value": "2.1" - }, - { - "name": "yarn.nodemanager.aux-services.mapreduce_shuffle.class", - "value": "org.apache.hadoop.mapred.ShuffleHandler" - }, - { - "name": "yarn.resourcemanager.webapp.address", - "value": "%RM_HOST%:8088" - }, - { - "name": "yarn.nodemanager.log-dirs", - "value": "/mnt/hadoop/yarn/log" - }, - { - "name": "yarn.nodemanager.remote-app-log-dir", - "value": "/app-logs" - }, - { - "name": "yarn.log.server.url", - "value": "http://%RM_HOST%:19888/jobhistory/logs" - }, - { - "name": "yarn.application.classpath", - "value": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*" - }, - { - "name": "yarn.resourcemanager.admin.address", - "value": "%RM_HOST%:8141" - }, - { - "name": "yarn.admin.acl", - "value": "*" - } - ] - }, - { - "name" : "hdfs-site", - "properties" : [ - { - "name": "dfs.namenode.avoid.write.stale.datanode", - "value": "true" - }, - { - "name": "dfs.webhdfs.enabled", - "value": "true" - }, - { - "name": "dfs.block.access.token.enable", - "value": "true" - }, - { - "name": "dfs.support.append", - "value": "true" - }, - { - "name": "dfs.datanode.address", - "value": "0.0.0.0:50010" - }, - { - "name": "dfs.cluster.administrators", - "value": " hdfs" - }, - { - "name": "dfs.datanode.balance.bandwidthPerSec", - "value": "6250000" - }, - { - "name": "dfs.namenode.safemode.threshold-pct", - "value": "1.0f" - }, - { - "name": "dfs.namenode.checkpoint.edits.dir", - "value": "${dfs.namenode.checkpoint.dir}" - }, - { - "name": "dfs.permissions.enabled", - "value": "true" - }, - { - "name": "fs.checkpoint.size", - "value": "67108864" - }, - { - "name": "dfs.client.read.shortcircuit", - "value": "true" - }, - { - "name": "dfs.namenode.https-address", - "value": "%NN_HOST%:50470" - }, - { - "name": "dfs.journalnode.edits.dir", - "value": "/grid/0/hdfs/journal" - }, - { - "name": "dfs.blocksize", - "value": "134217728" - }, - { - "name": "dfs.datanode.max.transfer.threads", - "value": "1024" - }, - { - "name": "dfs.heartbeat.interval", - "value": "3" - }, - { - "name": "dfs.replication", - "value": "3" - }, - { - "name": "dfs.namenode.handler.count", - "value": "100" - }, - { - "name": "dfs.namenode.checkpoint.dir", - "value": "/mnt/hadoop/hdfs/namesecondary" - }, - { - "name": "fs.permissions.umask-mode", - "value": "022" - }, - { - "name": "dfs.datanode.http.address", - "value": "0.0.0.0:50075" - }, - { - "name": "dfs.datanode.ipc.address", - "value": "0.0.0.0:8010" - }, - { - "name": "dfs.datanode.data.dir", - "value": "/mnt/hadoop/hdfs/data" - }, - { - "name": "dfs.namenode.http-address", - "value": "%NN_HOST%:50070" - }, - { - "name": "dfs.blockreport.initialDelay", - "value": "120" - }, - { - "name": "dfs.datanode.failed.volumes.tolerated", - "value": "0" - }, - { - "name": "dfs.namenode.accesstime.precision", - "value": "0" - }, - { - "name": "dfs.namenode.write.stale.datanode.ratio", - "value": "1.0f" - }, - { - "name": "dfs.namenode.secondary.http-address", - "value": "%SNN_HOST%:50090" - }, - { - "name": "dfs.namenode.stale.datanode.interval", - "value": "30000" - }, - { - "name": "dfs.datanode.du.reserved", - "value": "1073741824" - }, - { - "name": "dfs.client.read.shortcircuit.streams.cache.size", - "value": "4096" - }, - { - "name": "dfs.hosts.exclude", - "value": "/etc/hadoop/conf/dfs.exclude" - }, - { - "name": "dfs.permissions.superusergroup", - "value": "hdfs" - }, - { - "name": "dfs.https.port", - "value": "50470" - }, - { - "name": "dfs.journalnode.http-address", - "value": "0.0.0.0:8480" - }, - { - "name": "dfs.domain.socket.path", - "value": "/var/lib/hadoop-hdfs/dn_socket" - }, - { - "name": "dfs.namenode.avoid.read.stale.datanode", - "value": "true" - }, - { - "name": "dfs.namenode.checkpoint.period", - "value": "21600" - }, - { - "name": "dfs.namenode.checkpoint.txns", - "value": "1000000" - }, - { - "name": "dfs.datanode.data.dir.perm", - "value": "750" - }, - { - "name": "dfs.namenode.name.dir.restore", - "value": "true" - }, - { - "name": "dfs.replication.max", - "value": "50" - }, - { - "name": "dfs.namenode.name.dir", - "value": "/mnt/hadoop/hdfs/namenode" - } - ] - }, - { - "name" : "capacity-scheduler", - "properties" : [ - { - "name": "yarn.scheduler.capacity.node-locality-delay", - "value": "40" - }, - { - "name": "yarn.scheduler.capacity.root.capacity", - "value": "100" - }, - { - "name": "yarn.scheduler.capacity.root.acl_administer_queues", - "value": "*" - }, - { - "name": "yarn.scheduler.capacity.maximum-am-resource-percent", - "value": "0.2" - }, - { - "name": "yarn.scheduler.capacity.maximum-applications", - "value": "10000" - }, - { - "name": "yarn.scheduler.capacity.root.default.user-limit-factor", - "value": "1" - }, - { - "name": "yarn.scheduler.capacity.root.unfunded.capacity", - "value": "50" - }, - { - "name": "yarn.scheduler.capacity.root.default.acl_submit_jobs", - "value": "*" - }, - { - "name": "yarn.scheduler.capacity.root.default.state", - "value": "RUNNING" - }, - { - "name": "yarn.scheduler.capacity.root.default.capacity", - "value": "100" - }, - { - "name": "yarn.scheduler.capacity.root.default.maximum-capacity", - "value": "100" - }, - { - "name": "yarn.scheduler.capacity.root.queues", - "value": "default" - }, - { - "name": "yarn.scheduler.capacity.root.default.acl_administer_jobs", - "value": "*" - } - ] - }, - { - "name" : "hive-site", - "properties" : [ - { - "name": "hive.enforce.sorting", - "value": "true" - }, - { - "name": "javax.jdo.option.ConnectionPassword", - "value": "hive" - }, - { - "name": "javax.jdo.option.ConnectionDriverName", - "value": "com.mysql.jdbc.Driver" - }, - { - "name": "hive.metastore.cache.pinobjtypes", - "value": "Table,Database,Type,FieldSchema,Order" - }, - { - "name": "hive.security.metastore.authorization.manager", - "value": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider" - }, - { - "name": "fs.file.impl.disable.cache", - "value": "true" - }, - { - "name": "hive.auto.convert.join.noconditionaltask", - "value": "true" - }, - { - "name": "hive.map.aggr", - "value": "true" - }, - { - "name": "hive.security.authorization.enabled", - "value": "false" - }, - { - "name": "hive.optimize.index.filter", - "value": "true" - }, - { - "name": "hive.optimize.bucketmapjoin", - "value": "true" - }, - { - "name": "hive.metastore.uris", - "value": "thrift://%HIVE_METASTORE_HOST%:9083" - }, - { - "name": "hive.mapjoin.bucket.cache.size", - "value": "10000" - }, - { - "name": "hive.auto.convert.join.noconditionaltask.size", - "value": "1000000000" - }, - { - "name": "hive.vectorized.execution.enabled", - "value": "false" - }, - { - "name": "javax.jdo.option.ConnectionUserName", - "value": "hive" - }, - { - "name": "hive.optimize.bucketmapjoin.sortedmerge", - "value": "true" - }, - { - "name": "hive.optimize.reducededuplication", - "value": "true" - }, - { - "name": "hive.metastore.warehouse.dir", - "value": "/apps/hive/warehouse" - }, - { - "name": "hive.metastore.client.socket.timeout", - "value": "60" - }, - { - "name": "hive.optimize.reducededuplication.min.reducer", - "value": "1" - }, - { - "name": "hive.semantic.analyzer.factory.impl", - "value": "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory" - }, - { - "name": "hive.auto.convert.join", - "value": "true" - }, - { - "name": "hive.enforce.bucketing", - "value": "true" - }, - { - "name": "hive.mapred.reduce.tasks.speculative.execution", - "value": "false" - }, - { - "name": "hive.security.authenticator.manager", - "value": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator" - }, - { - "name": "javax.jdo.option.ConnectionURL", - "value": "jdbc:mysql://%HIVE_MYSQL_HOST%/hive?createDatabaseIfNotExist=true" - }, - { - "name": "hive.auto.convert.sortmerge.join", - "value": "true" - }, - { - "name": "fs.hdfs.impl.disable.cache", - "value": "true" - }, - { - "name": "hive.security.authorization.manager", - "value": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider" - }, - { - "name": "ambari.hive.db.schema.name", - "value": "hive" - }, - { - "name": "hive.metastore.execute.setugi", - "value": "true" - }, - { - "name": "hive.auto.convert.sortmerge.join.noconditionaltask", - "value": "true" - }, - { - "name": "hive.server2.enable.doAs", - "value": "true" - }, - { - "name": "hive.optimize.mapjoin.mapreduce", - "value": "true" - } - ] - }, - { - "name" : "webhcat-site", - "properties" : [ - { - "name": "templeton.pig.path", - "value": "pig.tar.gz/pig/bin/pig" - }, - { - "name": "templeton.exec.timeout", - "value": "60000" - }, - { - "name": "templeton.hadoop.conf.dir", - "value": "/etc/hadoop/conf" - }, - { - "name": "templeton.jar", - "value": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar" - }, - { - "name": "templeton.zookeeper.hosts", - "value": "%ZOOKEEPER_HOSTS%" - }, - { - "name": "templeton.port", - "value": "50111" - }, - { - "name": "templeton.hive.properties", - "value": "hive.metastore.local=false,hive.metastore.uris=thrift://%HIVE_METASTORE_HOST%:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse" - }, - { - "name": "templeton.storage.class", - "value": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage" - }, - { - "name": "templeton.hive.archive", - "value": "hdfs:///apps/webhcat/hive.tar.gz" - }, - { - "name": "templeton.streaming.jar", - "value": "hdfs:///apps/webhcat/hadoop-streaming.jar" - }, - { - "name": "templeton.override.enabled", - "value": "false" - }, - { - "name": "templeton.libjars", - "value": "/usr/lib/zookeeper/zookeeper.jar" - }, - { - "name": "templeton.hadoop", - "value": "/usr/bin/hadoop" - }, - { - "name": "templeton.hive.path", - "value": "hive.tar.gz/hive/bin/hive" - }, - { - "name": "templeton.hcat", - "value": "/usr/bin/hcat" - }, - { - "name": "templeton.pig.archive", - "value": "hdfs:///apps/webhcat/pig.tar.gz" - } - ] - }, - { - "name" : "hbase-site", - "properties" : [ - { - "name": "hbase.hstore.flush.retries.number", - "value": "120" - }, - { - "name": "hbase.client.keyvalue.maxsize", - "value": "10485760" - }, - { - "name": "hbase.hstore.compactionThreshold", - "value": "3" - }, - { - "name": "hbase.rootdir", - "value": "hdfs://%NN_HOST%:8020/apps/hbase/data" - }, - { - "name": "hbase.regionserver.handler.count", - "value": "60" - }, - { - "name": "hbase.hregion.majorcompaction", - "value": "86400000" - }, - { - "name": "hbase.hregion.memstore.block.multiplier", - "value": "2" - }, - { - "name": "hbase.hregion.memstore.flush.size", - "value": "134217728" - }, - { - "name": "hbase.superuser", - "value": "hbase" - }, - { - "name": "hbase.zookeeper.property.clientPort", - "value": "2181" - }, - { - "name": "hbase.regionserver.global.memstore.upperLimit", - "value": "0.4" - }, - { - "name": "zookeeper.session.timeout", - "value": "30000" - }, - { - "name": "hbase.tmp.dir", - "value": "/hadoop/hbase" - }, - { - "name": "hbase.hregion.max.filesize", - "value": "10737418240" - }, - { - "name": "hfile.block.cache.size", - "value": "0.40" - }, - { - "name": "hbase.security.authentication", - "value": "simple" - }, - { - "name": "hbase.defaults.for.version.skip", - "value": "true" - }, - { - "name": "hbase.zookeeper.quorum", - "value": "%ZOOKEEPER_HOSTS%" - }, - { - "name": "zookeeper.znode.parent", - "value": "/hbase-unsecure" - }, - { - "name": "hbase.hstore.blockingStoreFiles", - "value": "10" - }, - { - "name": "hbase.regionserver.global.memstore.lowerLimit", - "value": "0.38" - }, - { - "name": "hbase.security.authorization", - "value": "false" - }, - { - "name": "hbase.cluster.distributed", - "value": "true" - }, - { - "name": "hbase.hregion.memstore.mslab.enabled", - "value": "true" - }, - { - "name": "hbase.client.scanner.caching", - "value": "100" - }, - { - "name": "hbase.zookeeper.useMulti", - "value": "true" - }, - { "name" : "hbase.master.info.port", - "value" : "60010" - } - ] - }, - { - "name" : "oozie-site", - "properties" : [ - { - "name": "oozie.service.PurgeService.purge.interval", - "value": "3600" - }, - { - "name": "oozie.service.CallableQueueService.queue.size", - "value": "1000" - }, - { - "name": "oozie.service.SchemaService.wf.ext.schemas", - "value": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd" - }, - { - "name": "oozie.service.JPAService.jdbc.url", - "value": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true" - }, - { - "name": "oozie.service.HadoopAccessorService.nameNode.whitelist", - "value": " " - }, - { - "name": "use.system.libpath.for.mapreduce.and.pig.jobs", - "value": "false" - }, - { - "name": "oozie.service.coord.push.check.requeue.interval", - "value": "30000" - }, - { - "name": "oozie.credentials.credentialclasses", - "value": "hcat=org.apache.oozie.action.hadoop.HCatCredentials" - }, - { - "name": "oozie.service.JPAService.create.db.schema", - "value": "false" - }, - { - "name": "oozie.authentication.kerberos.name.rules", - "value": "\n RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n DEFAULT" - }, - { - "name": "oozie.service.ActionService.executor.ext.classes", - "value": "\n org.apache.oozie.action.email.EmailActionExecutor,\n org.apache.oozie.action.hadoop.HiveActionExecutor,\n org.apache.oozie.action.hadoop.ShellActionExecutor,\n org.apache.oozie.action.hadoop.SqoopActionExecutor,\n org.apache.oozie.action.hadoop.DistcpActionExecutor" - }, - { - "name": "oozie.service.HadoopAccessorService.jobTracker.whitelist", - "value": " " - }, - { - "name": "oozie.service.JPAService.jdbc.password", - "value": "oozie" - }, - { - "name": "oozie.service.coord.normal.default.timeout", - "value": "120" - }, - { - "name": "oozie.service.AuthorizationService.security.enabled", - "value": "false" - }, - { - "name": "oozie.service.JPAService.pool.max.active.conn", - "value": "10" - }, - { - "name": "oozie.service.PurgeService.older.than", - "value": "30" - }, - { - "name": "oozie.db.schema.name", - "value": "oozie" - }, - { - "name": "oozie.service.HadoopAccessorService.hadoop.configurations", - "value": "*=/etc/hadoop/conf" - }, - { - "name": "oozie.base.url", - "value": "http://%OOZIE_HOST%:11000/oozie" - }, - { - "name": "oozie.service.CallableQueueService.callable.concurrency", - "value": "3" - }, - { - "name": "oozie.service.JPAService.jdbc.username", - "value": "oozie" - }, - { - "name": "oozie.service.CallableQueueService.threads", - "value": "10" - }, - { - "name": "oozie.services.ext", - "value": "org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService" - }, - { - "name": "oozie.systemmode", - "value": "NORMAL" - }, - { - "name": "oozie.service.WorkflowAppService.system.libpath", - "value": "/user/${user.name}/share/lib" - }, - { - "name": "oozie.services", - "value": "\n org.apache.oozie.service.SchedulerService,\n org.apache.oozie.service.InstrumentationService,\n org.apache.oozie.service.CallableQueueService,\n org.apache.oozie.service.UUIDService,\n org.apache.oozie.service.ELService,\n org.apache.oozie.service.AuthorizationService,\n org.apache.oozie.service.UserGroupInformationService,\n org.apache.oozie.service.HadoopAccessorService,\n org.apache.oozie.service.URIHandlerService,\n org.apache.oozie.service.MemoryLocksService,\n org.apache.oozie.service.DagXLogInfoService,\n org.apache.oozie.service.SchemaService,\n org.apache.oozie.service.LiteWorkflowAppService,\n org.apache.oozie.service.JPAService,\n org.apache.oozie.service.StoreService,\n org.apache.oozie.service.CoordinatorStoreService,\n org.apache.oozie.service.SLAStoreService,\n org.apache.oozie.service.DBLiteWorkflowStoreService,\n org.apache.oozie.service.CallbackService,\n org.apache.oozie.service.ActionService,\n org.apache.oozie.service.ActionCheckerService,\n org.apache.oozie.service.RecoveryService,\n org.apache.oozie.service.PurgeService,\n org.apache.oozie.service.CoordinatorEngineService,\n org.apache.oozie.service.BundleEngineService,\n org.apache.oozie.service.DagEngineService,\n org.apache.oozie.service.CoordMaterializeTriggerService,\n org.apache.oozie.service.StatusTransitService,\n org.apache.oozie.service.PauseTransitService,\n org.apache.oozie.service.GroupsService,\n org.apache.oozie.service.ProxyUserService" - }, - { - "name": "oozie.service.URIHandlerService.uri.handlers", - "value": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler" - }, - { - "name": "oozie.authentication.type", - "value": "simple" - }, - { - "name": "oozie.service.JPAService.jdbc.driver", - "value": "org.apache.derby.jdbc.EmbeddedDriver" - }, - { - "name": "oozie.system.id", - "value": "oozie-${user.name}" - } - ] - }, - { - "name" : "ambari", - "properties" : [ - { "name" : "server.port", "value" : "8080" }, - { "name" : "rpm", "value" : "http://s3.amazonaws.com/public-repo-1.hortonworks.com/ambari/centos6/1.x/updates/1.6.0/ambari.repo" } - ] - }, - { - "name" : "hue-ini", - "properties" : [ - { "name": "desktop/kredentials_dir", "value": "'/tmp'" }, - { "name": "desktop/send_dbug_messages", "value": "1" }, - { "name": "desktop/database_logging", "value": "0" }, - { "name": "desktop/secret_key", "value": "secretkeysecretkeysecretkeysecretkey" }, - { "name": "desktop/http_host", "value": "0.0.0.0" }, - { "name": "desktop/http_port", "value": "8000" }, - { "name": "desktop/time_zone", "value": "America/Los_Angeles" }, - { "name": "desktop/django_debug_mode", "value": "1" }, - { "name": "desktop/http_500_debug_mode", "value": "1" }, - { "name": "desktop/django_server_email", "value": "", "example": "'hue@localhost.localdomain'" }, - { "name": "desktop/django_email_backend", "value": "", "example": "django.core.mail.backends.smtp.EmailBackend" }, - { "name": "desktop/use_cherrypy_server", "value": "", "example": "false" }, - { "name": "desktop/server_user", "value": "hue" }, - { "name": "desktop/server_group", "value": "hadoop" }, - { "name": "desktop/enable_server", "value": "yes" }, - { "name": "desktop/cherrypy_server_threads", "value": "", "example": "10" }, - { "name": "desktop/ssl_certificate", "value": "", "example": "" }, - { "name": "desktop/ssl_private_key", "value": "", "example": "" }, - { "name": "desktop/default_site_encoding", "value": "", "example": "utf-8" }, - { "name": "desktop/x_frame_options", "value": "'ALLOWALL'" }, - { "name": "desktop/auth/backend", "value": "desktop.auth.backend.AllowFirstUserDjangoBackend" }, - { "name": "desktop/auth/pam_service", "value": "", "example": "login" }, - { "name": "desktop/auth/remote_user_header", "value": "", "example": "HTTP_REMOTE_USER" }, - { "name": "desktop/ldap/base_dn", "value": "", "example": "'DC=mycompany,DC=com'" }, - { "name": "desktop/ldap/nt_domain", "value": "", "example": "mycompany.com" }, - { "name": "desktop/ldap/ldap_url", "value": "", "example": "ldap://auth.mycompany.com" }, - { "name": "desktop/ldap/ldap_cert", "value": "", "example": "" }, - { "name": "desktop/ldap/bind_dn", "value": "", "example": "'CN=ServiceAccount,DC=mycompany,DC=com'" }, - { "name": "desktop/ldap/bind_password", "value": "", "example": "" }, - { "name": "desktop/ldap/ldap_username_pattern", "value": "", "example": "'uid=,ou=People,dc=mycompany,dc=com'" }, - { "name": "desktop/ldap/create_users_on_login", "value": "", "example": "true" }, - { "name": "desktop/ldap/users/user_filter", "value": "", "example": "'objectclass=*'" }, - { "name": "desktop/ldap/users/user_name_attr", "value": "", "example": "sAMAccountName" }, - { "name": "desktop/ldap/users/groups/group_filter", "value": "", "example": "'objectclass=*'" }, - { "name": "desktop/ldap/users/groups/group_name_attr", "value": "", "example": "cn" }, - { "name": "desktop/database/engine", "value": "sqlite3" }, - { "name": "desktop/database/name", "value": "/var/lib/hue/desktop.db" }, - { "name": "desktop/database/host", "value": "", "example": "" }, - { "name": "desktop/database/port", "value": "", "example": "" }, - { "name": "desktop/database/user", "value": "", "example": "" }, - { "name": "desktop/database/password", "value": "", "example": "" }, - { "name": "desktop/smtp/host", "value": "localhost" }, - { "name": "desktop/smtp/port", "value": "25" }, - { "name": "desktop/smtp/user", "value": "" }, - { "name": "desktop/smtp/password", "value": "" }, - { "name": "desktop/smtp/tls", "value": "no" }, - { "name": "desktop/smtp/default_from_email", "value": "", "example": "hue@localhost" }, - { "name": "desktop/kerberos/hue_keytab", "value": "", "example": "/etc/security/keytabs/hue.service.keytab" }, - { "name": "desktop/kerberos/hue_principal", "value": "", "example": "hue/IP" }, - { "name": "desktop/kerberos/kinit_path", "value": "", "example": "/usr/bin/kinit" }, - { "name": "desktop/kerberos/reinit_frequency", "value": "", "example": "3600" }, - { "name": "desktop/kerberos/ccache_path", "value": "", "example": "/tmp/hue_krb5_ccache" }, - { "name": "hadoop/hdfs_clusters/default/fs_defaultfs", "value": "hdfs://%NN_HOST%:8020" }, - { "name": "hadoop/hdfs_clusters/default/webhdfs_url", "value": "http://%NN_HOST%:50070/webhdfs/v1/" }, - { "name": "hadoop/hdfs_clusters/default/security_enabled", "value": "", "example": "true" }, - { "name": "hadoop/hdfs_clusters/default/hadoop_hdfs_home", "value": "/usr/lib/hadoop-hdfs" }, - { "name": "hadoop/hdfs_clusters/default/hadoop_bin", "value": "/usr/bin/hadoop" }, - { "name": "hadoop/hdfs_clusters/default/hadoop_conf_dir", "value": "/etc/hadoop/conf" }, - { "name": "hadoop/yarn_clusters/default/resourcemanager_host", "value": "%RM_HOST%" }, - { "name": "hadoop/yarn_clusters/default/resourcemanager_port", "value": "8050" }, - { "name": "hadoop/yarn_clusters/default/submit_to", "value": "true" }, - { "name": "hadoop/yarn_clusters/default/security_enabled", "value": "", "example": "false" }, - { "name": "hadoop/yarn_clusters/default/hadoop_mapred_home", "value": "/usr/lib/hadoop-mapreduce" }, - { "name": "hadoop/yarn_clusters/default/hadoop_bin", "value": "/usr/bin/hadoop" }, - { "name": "hadoop/yarn_clusters/default/hadoop_conf_dir", "value": "/etc/hadoop/conf" }, - { "name": "hadoop/yarn_clusters/default/resourcemanager_api_url", "value": "http://%RM_HOST%:8088" }, - { "name": "hadoop/yarn_clusters/default/proxy_api_url", "value": "http://%RM_HOST%:8088" }, - { "name": "hadoop/yarn_clusters/default/history_server_api_url", "value": "http://%HS_HOST%:19888" }, - { "name": "hadoop/yarn_clusters/default/node_manager_api_url", "value": "http://%RM_HOST%:8042" }, - { "name": "liboozie/oozie_url", "value": "http://%OOZIE_HOST%:11000/oozie" }, - { "name": "liboozie/security_enabled", "value": "", "example": "true" }, - { "name": "liboozie/remote_deployement_dir", "value": "/user/hue/oozie/deployments" }, - { "name": "oozie/local_data_dir", "value": "", "example": "..../examples" }, - { "name": "oozie/sample_data_dir", "value": "", "example": "...thirdparty/sample_data" }, - { "name": "oozie/remote_data_dir", "value": "", "example": "/user/hue/oozie/workspaces" }, - { "name": "oozie/share_jobs", "value": "", "example": "true" }, - { "name": "oozie/oozie_jobs_count", "value": "", "example": "100" }, - { "name": "beeswax/beeswax_server_host", "value": "%HUE_HOST%" }, - { "name": "beeswax/beeswax_server_port", "value": "8002" }, - { "name": "beeswax/beeswax_meta_server_host", "value": "%HUE_HOST%" }, - { "name": "beeswax/beeswax_meta_server_port", "value": "8003" }, - { "name": "beeswax/hive_home_dir", "value": "/usr/lib/hive" }, - { "name": "beeswax/hive_conf_dir", "value": "/etc/hive/conf" }, - { "name": "beeswax/beeswax_server_conn_timeout", "value": "", "example": "120" }, - { "name": "beeswax/metastore_conn_timeout", "value": "", "example": "10" }, - { "name": "beeswax/beeswax_server_heapsize", "value": "", "example": "1000" }, - { "name": "beeswax/share_saved_queries", "value": "", "example": "true" }, - { "name": "beeswax/server_interface", "value": "", "example": "beeswax" }, - { "name": "jobsub/remote_data_dir", "value": "", "example": "/user/hue/jobsub" }, - { "name": "jobsub/local_data_dir", "value": "", "example": "..../data" }, - { "name": "jobsub/sample_data_dir", "value": "", "example": "...thirdparty/sample_data" }, - { "name": "jobbrowser/share_jobs", "value": "", "example": "true" }, - { "name": "shell/shell_buffer_amount", "value": "", "example": "100" }, - { "name": "shell/shell_delegation_token_dir", "value": "", "example": "/tmp/hue_delegation_tokens" }, - { "name": "shell/shelltypes/flume/nice_name", "value": "", "example": "'Flume Shell'" }, - { "name": "shell/shelltypes/flume/command", "value": "", "example": "'/usr/bin/flume shell'" }, - { "name": "shell/shelltypes/flume/help", "value": "", "example": "'The command-line Flume client interface.'" }, - { "name": "shell/shelltypes/pig/nice_name", "value": "'Pig Shell (Grunt)'" }, - { "name": "shell/shelltypes/pig/command", "value": "'/usr/bin/pig -l /dev/null'" }, - { "name": "shell/shelltypes/pig/help", "value": "'The command-line interpreter for Pig'" }, - { "name": "shell/shelltypes/pig/environment/JAVA_HOME/value", "value": "'%JAVA_HOME%'" }, - { "name": "shell/shelltypes/pig/environment/PATH/value" , "value": "'/usr/local/bin:/bin:/usr/bin'" }, - { "name": "shell/shelltypes/hbase/nice_name", "value": "'HBase Shell'" }, - { "name": "shell/shelltypes/hbase/command", "value": "'/usr/bin/hbase shell'" }, - { "name": "shell/shelltypes/hbase/help", "value": "'The command-line HBase client interface.'" }, - { "name": "shell/shelltypes/r_shell/nice_name", "value": "", "example": "'R shell'" }, - { "name": "shell/shelltypes/r_shell/command", "value": "", "example": "'/usr/bin/R'" }, - { "name": "shell/shelltypes/r_shell/help", "value": "", "example": "'The R language for Statistical Computing'" }, - { "name": "shell/shelltypes/r_shell/environment/JAVA_HOME/value", "value": "", "example": "'%JAVA_HOME%'" }, - { "name": "shell/shelltypes/bash/nice_name", "value": "'Bash (Test only!!!)'" }, - { "name": "shell/shelltypes/bash/command", "value": "'/bin/bash'" }, - { "name": "shell/shelltypes/bash/help", "value": "'A shell that does not depend on Hadoop components'" }, - { "name": "useradmin/default_user_group", "value": "hadoop" }, - { "name": "useradmin/default_username", "value": "admin" }, - { "name": "useradmin/default_user_password", "value": "admin" }, - { "name": "hcatalog/templeton_url", "value": "http://%WEBHCAT_HOST%:50111/templeton/v1/" }, - { "name": "hcatalog/security_enabled", "value": "false" }, - { "name": "about/tutorials_installed", "value": "false" }, - { "name": "pig/udf_path", "value": "'/tmp/udfs'" }, - { "name": "proxy/whitelist", "value": "\"(localhost|127\\.0\\.0\\.1|%NN_HOST_PATTERN%|%HUE_HOST_PATTERN%|%WEBHCAT_HOST_PATTERN%):(50030|50070|50060|50075|50111)\"," } - ] - }, - { - "name" : "hue-core-site", - "properties" : [ - { "name": "hadoop.proxyuser.hue.hosts", "value": "*" }, - { "name": "hadoop.proxyuser.hue.groups", "value": "*" }, - { "name": "hadoop.proxyuser.hcat.groups", "value": "*" }, - { "name": "hadoop.proxyuser.hcat.hosts", "value": "*" } - ] - }, - { - "name" : "hue-hdfs-site", - "properties" : [ - { "name": "dfs.support.broken.append", "value": "true" }, - { "name": "dfs.webhdfs.enabled", "value": "true" } - ] - }, - { - "name" : "hue-webhcat-site", - "properties" : [ - { "name": "webhcat.proxyuser.hue.hosts", "value": "*" }, - { "name": "webhcat.proxyuser.hue.groups", "value": "*" } - ] - }, - { - "name" : "hue-oozie-site", - "properties" : [ - { "name": "oozie.service.ProxyUserService.proxyuser.hue.hosts", "value": "*" }, - { "name": "oozie.service.ProxyUserService.proxyuser.hue.groups", "value": "*" } - ] - }, - { - "name" : "hdfsha", - "properties" : [ - { "name": "hdfs.nnha", "value": "false"} - ] - } - ] -} diff --git a/sahara/plugins/hdp/versions/version_2_0_6/resources/topology.sh b/sahara/plugins/hdp/versions/version_2_0_6/resources/topology.sh deleted file mode 100755 index c34a0cf23e..0000000000 --- a/sahara/plugins/hdp/versions/version_2_0_6/resources/topology.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -HADOOP_CONF=/etc/hadoop/conf - -while [ $# -gt 0 ] ; do - nodeArg=$1 - exec< ${HADOOP_CONF}/topology.data - result="" - while read line ; do - ar=( $line ) - if [ "${ar[0]}" = "$nodeArg" ] ; then - result="${ar[1]}" - fi - done - shift - if [ -z "$result" ] ; then - echo -n "/default/rack " - else - echo -n "$result " - fi -done - diff --git a/sahara/plugins/hdp/versions/version_2_0_6/services.py b/sahara/plugins/hdp/versions/version_2_0_6/services.py deleted file mode 100644 index cd55030a88..0000000000 --- a/sahara/plugins/hdp/versions/version_2_0_6/services.py +++ /dev/null @@ -1,1272 +0,0 @@ -# Copyright (c) 2014 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -from oslo_config import cfg -from oslo_log import log as logging -import six - -from sahara import context -from sahara import exceptions as e -from sahara.i18n import _ -from sahara.i18n import _LI -from sahara.i18n import _LW -from sahara.plugins import exceptions as ex -from sahara.plugins import utils -from sahara.swift import swift_helper as h -from sahara.topology import topology_helper as th - -CONF = cfg.CONF -TOPOLOGY_CONFIG = { - "net.topology.node.switch.mapping.impl": - "org.apache.hadoop.net.ScriptBasedMapping", - "net.topology.script.file.name": - "/etc/hadoop/conf/topology.sh" -} - -LOG = logging.getLogger(__name__) - - -def create_service(name): - for cls in Service.__subclasses__(): - if cls.get_service_id() == name: - return cls() - # no subclass found, return service base class - return Service(name) - - -class Service(object): - def __init__(self, name, ambari_managed=True): - self.name = name - self.configurations = set(['global', 'core-site']) - self.components = [] - self.users = [] - self.deployed = False - self.ambari_managed = ambari_managed - - def add_component(self, component): - self.components.append(component) - - def add_user(self, user): - self.users.append(user) - - def validate(self, cluster_spec, cluster): - pass - - def finalize_configuration(self, cluster_spec): - pass - - def register_user_input_handlers(self, ui_handlers): - pass - - def register_service_urls(self, cluster_spec, url_info, cluster): - return url_info - - def pre_service_start(self, cluster_spec, ambari_info, started_services): - pass - - def finalize_ng_components(self, cluster_spec): - pass - - def is_user_template_component(self, component): - return True - - def is_mandatory(self): - return False - - def _replace_config_token(self, cluster_spec, token, value, props): - for config_name, props in six.iteritems(props): - config = cluster_spec.configurations[config_name] - for prop in props: - config[prop] = config[prop].replace(token, value) - - def _update_config_values(self, configurations, value, props): - for absolute_prop_name in props: - tokens = absolute_prop_name.split('/') - config_name = tokens[0] - prop_name = tokens[1] - config = configurations[config_name] - config[prop_name] = value - - def _get_common_paths(self, node_groups): - sets = [] - for node_group in node_groups: - for instance in node_group.instances: - sets.append(set(instance.sahara_instance.storage_paths())) - - return list(set.intersection(*sets)) if sets else [] - - def _generate_storage_path(self, storage_paths, path): - return ",".join([p + path for p in storage_paths]) - - def _get_port_from_cluster_spec(self, cluster_spec, service, prop_name): - address = cluster_spec.configurations[service][prop_name] - return utils.get_port_from_address(address) - - -class HdfsService(Service): - def __init__(self): - super(HdfsService, self).__init__(HdfsService.get_service_id()) - self.configurations.add('hdfs-site') - - @classmethod - def get_service_id(cls): - return 'HDFS' - - def validate(self, cluster_spec, cluster): - # Check NAMENODE and HDFS HA constraints - nn_count = cluster_spec.get_deployed_node_group_count('NAMENODE') - jn_count = cluster_spec.get_deployed_node_group_count('JOURNALNODE') - zkfc_count = cluster_spec.get_deployed_node_group_count('ZKFC') - - if cluster_spec.is_hdfs_ha_enabled(cluster): - if nn_count != 2: - raise ex.NameNodeHAConfigurationError( - "Hadoop cluster with HDFS HA enabled requires " - "2 NAMENODE. Actual NAMENODE count is %s" % nn_count) - # Check the number of journalnodes - if not (jn_count >= 3 and (jn_count % 2 == 1)): - raise ex.NameNodeHAConfigurationError( - "JOURNALNODE count should be an odd number " - "greater than or equal 3 for NameNode High Availability. " - "Actual JOURNALNODE count is %s" % jn_count) - else: - if nn_count != 1: - raise ex.InvalidComponentCountException('NAMENODE', 1, - nn_count) - # make sure that JOURNALNODE is only used when HDFS HA is enabled - if jn_count > 0: - raise ex.NameNodeHAConfigurationError( - "JOURNALNODE can only be added when " - "NameNode High Availability is enabled.") - # make sure that ZKFC is only used when HDFS HA is enabled - if zkfc_count > 0: - raise ex.NameNodeHAConfigurationError( - "ZKFC can only be added when " - "NameNode High Availability is enabled.") - - def finalize_configuration(self, cluster_spec): - nn_hosts = cluster_spec.determine_component_hosts('NAMENODE') - if nn_hosts: - props = {'core-site': ['fs.defaultFS'], - 'hdfs-site': ['dfs.namenode.http-address', - 'dfs.namenode.https-address']} - self._replace_config_token( - cluster_spec, '%NN_HOST%', nn_hosts.pop().fqdn(), props) - - snn_hosts = cluster_spec.determine_component_hosts( - 'SECONDARY_NAMENODE') - if snn_hosts: - props = {'hdfs-site': ['dfs.namenode.secondary.http-address']} - self._replace_config_token( - cluster_spec, '%SNN_HOST%', snn_hosts.pop().fqdn(), props) - - # add swift properties to configuration - core_site_config = cluster_spec.configurations['core-site'] - for prop in self._get_swift_properties(): - core_site_config[prop['name']] = prop['value'] - - # add topology properties to configuration, if enabled - if CONF.enable_data_locality: - for prop in th.vm_awareness_core_config(): - core_site_config[prop['name']] = prop['value'] - - core_site_config.update(TOPOLOGY_CONFIG) - - # process storage paths to accommodate ephemeral or cinder storage - nn_ng = cluster_spec.get_node_groups_containing_component( - 'NAMENODE')[0] - dn_node_groups = cluster_spec.get_node_groups_containing_component( - 'DATANODE') - common_paths = [] - if dn_node_groups: - common_paths = self._get_common_paths(dn_node_groups) - hdfs_site_config = cluster_spec.configurations['hdfs-site'] - hdfs_site_config['dfs.namenode.name.dir'] = ( - self._generate_storage_path( - self._get_common_paths([nn_ng]), '/hadoop/hdfs/namenode')) - if common_paths: - hdfs_site_config['dfs.datanode.data.dir'] = ( - self._generate_storage_path( - common_paths, '/hadoop/hdfs/data')) - - def register_service_urls(self, cluster_spec, url_info, cluster): - namenode_ip = cluster_spec.determine_component_hosts( - 'NAMENODE').pop().management_ip - - ui_port = self._get_port_from_cluster_spec(cluster_spec, 'hdfs-site', - 'dfs.namenode.http-address') - nn_port = self._get_port_from_cluster_spec(cluster_spec, 'core-site', - 'fs.defaultFS') - - url_info['HDFS'] = { - 'Web UI': 'http://%s:%s' % (namenode_ip, ui_port), - 'NameNode': 'hdfs://%s:%s' % (namenode_ip, nn_port) - } - if cluster_spec.is_hdfs_ha_enabled(cluster): - url_info['HDFS'].update({ - 'NameService': 'hdfs://%s' % cluster.name}) - return url_info - - def finalize_ng_components(self, cluster_spec): - hdfs_ng = cluster_spec.get_node_groups_containing_component( - 'NAMENODE')[0] - components = hdfs_ng.components - if not cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER'): - zk_service = next(service for service in cluster_spec.services - if service.name == 'ZOOKEEPER') - zk_service.deployed = True - components.append('ZOOKEEPER_SERVER') - - def is_mandatory(self): - return True - - def _get_swift_properties(self): - return h.get_swift_configs() - - -class MapReduce2Service(Service): - def __init__(self): - super(MapReduce2Service, self).__init__( - MapReduce2Service.get_service_id()) - self.configurations.add('mapred-site') - - @classmethod - def get_service_id(cls): - return 'MAPREDUCE2' - - def validate(self, cluster_spec, cluster): - count = cluster_spec.get_deployed_node_group_count('HISTORYSERVER') - if count != 1: - raise ex.InvalidComponentCountException('HISTORYSERVER', 1, count) - - def finalize_configuration(self, cluster_spec): - hs_hosts = cluster_spec.determine_component_hosts('HISTORYSERVER') - if hs_hosts: - props = {'mapred-site': ['mapreduce.jobhistory.webapp.address', - 'mapreduce.jobhistory.address']} - - self._replace_config_token( - cluster_spec, '%HS_HOST%', hs_hosts.pop().fqdn(), props) - - # data locality/rack awareness prop processing - mapred_site_config = cluster_spec.configurations['mapred-site'] - if CONF.enable_data_locality: - for prop in th.vm_awareness_mapred_config(): - mapred_site_config[prop['name']] = prop['value'] - - def register_service_urls(self, cluster_spec, url_info, cluster): - historyserver_ip = cluster_spec.determine_component_hosts( - 'HISTORYSERVER').pop().management_ip - - ui_port = self._get_port_from_cluster_spec( - cluster_spec, 'mapred-site', 'mapreduce.jobhistory.webapp.address') - hs_port = self._get_port_from_cluster_spec( - cluster_spec, 'mapred-site', 'mapreduce.jobhistory.address') - - url_info['MapReduce2'] = { - 'Web UI': 'http://%s:%s' % (historyserver_ip, ui_port), - 'History Server': '%s:%s' % (historyserver_ip, hs_port) - } - return url_info - - def finalize_ng_components(self, cluster_spec): - mr2_ng = cluster_spec.get_node_groups_containing_component( - 'HISTORYSERVER')[0] - components = mr2_ng.components - if 'HDFS_CLIENT' not in components: - components.append('HDFS_CLIENT') - - def is_mandatory(self): - return True - - -class YarnService(Service): - def __init__(self): - super(YarnService, self).__init__( - YarnService.get_service_id()) - self.configurations.add('yarn-site') - self.configurations.add('capacity-scheduler') - - @classmethod - def get_service_id(cls): - return 'YARN' - - def validate(self, cluster_spec, cluster): - count = cluster_spec.get_deployed_node_group_count('RESOURCEMANAGER') - if count != 1: - raise ex.InvalidComponentCountException('RESOURCEMANAGER', 1, - count) - - count = cluster_spec.get_deployed_node_group_count('NODEMANAGER') - if not count: - raise ex.InvalidComponentCountException( - 'NODEMANAGER', '> 0', count) - - def finalize_configuration(self, cluster_spec): - rm_hosts = cluster_spec.determine_component_hosts('RESOURCEMANAGER') - if rm_hosts: - props = {'yarn-site': ['yarn.resourcemanager.' - 'resource-tracker.address', - 'yarn.resourcemanager.hostname', - 'yarn.resourcemanager.address', - 'yarn.resourcemanager.scheduler.address', - 'yarn.resourcemanager.webapp.address', - 'yarn.log.server.url', - 'yarn.resourcemanager.admin.address']} - - self._replace_config_token( - cluster_spec, '%RM_HOST%', rm_hosts.pop().fqdn(), props) - - # data locality/rack awareness prop processing - mapred_site_config = cluster_spec.configurations['mapred-site'] - if CONF.enable_data_locality: - for prop in th.vm_awareness_mapred_config(): - mapred_site_config[prop['name']] = prop['value'] - - # process storage paths to accommodate ephemeral or cinder storage - yarn_site_config = cluster_spec.configurations['yarn-site'] - nm_node_groups = cluster_spec.get_node_groups_containing_component( - 'NODEMANAGER') - if nm_node_groups: - common_paths = self._get_common_paths(nm_node_groups) - yarn_site_config['yarn.nodemanager.local-dirs'] = ( - self._generate_storage_path(common_paths, - '/hadoop/yarn/local')) - - def register_service_urls(self, cluster_spec, url_info, cluster): - resourcemgr_ip = cluster_spec.determine_component_hosts( - 'RESOURCEMANAGER').pop().management_ip - - ui_port = self._get_port_from_cluster_spec( - cluster_spec, 'yarn-site', 'yarn.resourcemanager.webapp.address') - rm_port = self._get_port_from_cluster_spec( - cluster_spec, 'yarn-site', 'yarn.resourcemanager.address') - - url_info['Yarn'] = { - 'Web UI': 'http://%s:%s' % (resourcemgr_ip, ui_port), - 'ResourceManager': '%s:%s' % (resourcemgr_ip, rm_port) - } - return url_info - - def is_mandatory(self): - return True - - -class HiveService(Service): - def __init__(self): - super(HiveService, self).__init__(HiveService.get_service_id()) - self.configurations.add('hive-site') - - @classmethod - def get_service_id(cls): - return 'HIVE' - - def validate(self, cluster_spec, cluster): - count = cluster_spec.get_deployed_node_group_count('HIVE_SERVER') - if count != 1: - raise ex.InvalidComponentCountException('HIVE_SERVER', 1, count) - - def finalize_configuration(self, cluster_spec): - hive_servers = cluster_spec.determine_component_hosts('HIVE_SERVER') - if hive_servers: - props = {'global': ['hive_hostname'], - 'core-site': ['hadoop.proxyuser.hive.hosts']} - self._replace_config_token( - cluster_spec, '%HIVE_HOST%', hive_servers.pop().fqdn(), props) - - hive_ms = cluster_spec.determine_component_hosts('HIVE_METASTORE') - if hive_ms: - self._replace_config_token( - cluster_spec, '%HIVE_METASTORE_HOST%', hive_ms.pop().fqdn(), - {'hive-site': ['hive.metastore.uris']}) - - hive_mysql = cluster_spec.determine_component_hosts('MYSQL_SERVER') - if hive_mysql: - self._replace_config_token( - cluster_spec, '%HIVE_MYSQL_HOST%', hive_mysql.pop().fqdn(), - {'hive-site': ['javax.jdo.option.ConnectionURL']}) - - def register_user_input_handlers(self, ui_handlers): - ui_handlers['hive-site/javax.jdo.option.ConnectionUserName'] = ( - self._handle_user_property_metastore_user) - ui_handlers['hive-site/javax.jdo.option.ConnectionPassword'] = ( - self._handle_user_property_metastore_pwd) - - def _handle_user_property_metastore_user(self, user_input, configurations): - hive_site_config_map = configurations['hive-site'] - hive_site_config_map['javax.jdo.option.ConnectionUserName'] = ( - user_input.value) - - def _handle_user_property_metastore_pwd(self, user_input, configurations): - hive_site_config_map = configurations['hive-site'] - hive_site_config_map['javax.jdo.option.ConnectionPassword'] = ( - user_input.value) - - def finalize_ng_components(self, cluster_spec): - hive_ng = cluster_spec.get_node_groups_containing_component( - 'HIVE_SERVER')[0] - components = hive_ng.components - if 'MAPREDUCE2_CLIENT' not in components: - components.append('MAPREDUCE2_CLIENT') - if not cluster_spec.get_deployed_node_group_count('HIVE_METASTORE'): - components.append('HIVE_METASTORE') - if not cluster_spec.get_deployed_node_group_count('MYSQL_SERVER'): - components.append('MYSQL_SERVER') - if not cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER'): - zk_service = next(service for service in cluster_spec.services - if service.name == 'ZOOKEEPER') - zk_service.deployed = True - components.append('ZOOKEEPER_SERVER') - - -class WebHCatService(Service): - def __init__(self): - super(WebHCatService, self).__init__(WebHCatService.get_service_id()) - self.configurations.add('webhcat-site') - - @classmethod - def get_service_id(cls): - return 'WEBHCAT' - - def validate(self, cluster_spec, cluster): - count = cluster_spec.get_deployed_node_group_count('WEBHCAT_SERVER') - if count != 1: - raise ex.InvalidComponentCountException('WEBHCAT_SERVER', 1, count) - - def finalize_configuration(self, cluster_spec): - webhcat_servers = cluster_spec.determine_component_hosts( - 'WEBHCAT_SERVER') - if webhcat_servers: - self._replace_config_token( - cluster_spec, '%WEBHCAT_HOST%', webhcat_servers.pop().fqdn(), - {'core-site': ['hadoop.proxyuser.hcat.hosts']}) - - hive_ms_servers = cluster_spec.determine_component_hosts( - 'HIVE_METASTORE') - if hive_ms_servers: - self._replace_config_token( - cluster_spec, '%HIVE_METASTORE_HOST%', - hive_ms_servers.pop().fqdn(), - {'webhcat-site': ['templeton.hive.properties']}) - - zk_servers = cluster_spec.determine_component_hosts('ZOOKEEPER_SERVER') - if zk_servers: - zk_list = ['{0}:2181'.format(z.fqdn()) for z in zk_servers] - self._replace_config_token( - cluster_spec, '%ZOOKEEPER_HOSTS%', ','.join(zk_list), - {'webhcat-site': ['templeton.zookeeper.hosts']}) - - def finalize_ng_components(self, cluster_spec): - webhcat_ng = cluster_spec.get_node_groups_containing_component( - 'WEBHCAT_SERVER')[0] - components = webhcat_ng.components - if 'HDFS_CLIENT' not in components: - components.append('HDFS_CLIENT') - if 'MAPREDUCE2_CLIENT' not in components: - components.append('MAPREDUCE2_CLIENT') - # per AMBARI-3483 - if 'YARN_CLIENT' not in components: - components.append('YARN_CLIENT') - if 'ZOOKEEPER_CLIENT' not in components: - # if zk server isn't in cluster, add to ng - if not cluster_spec.get_deployed_node_group_count( - 'ZOOKEEPER_SERVER'): - - zk_service = next(service for service in cluster_spec.services - if service.name == 'ZOOKEEPER') - zk_service.deployed = True - components.append('ZOOKEEPER_SERVER') - components.append('ZOOKEEPER_CLIENT') - - -class HBaseService(Service): - property_map = { - 'hbase-site/hbase.tmp.dir': [ - 'hbase-site/hbase.tmp.dir', 'global/hbase_tmp_dir'], - 'hbase-site/hbase.regionserver.global.memstore.upperLimit': [ - 'hbase-site/hbase.regionserver.global.memstore.upperLimit', - 'global/regionserver_memstore_upperlimit'], - 'hbase-site/hbase.hstore.blockingStoreFiles': [ - 'hbase-site/hbase.hstore.blockingStoreFiles', - 'global/hstore_blockingstorefiles'], - 'hbase-site/hbase.hstore.compactionThreshold': [ - 'hbase-site/hbase.hstore.compactionThreshold', - 'global/hstore_compactionthreshold'], - 'hbase-site/hfile.block.cache.size': [ - 'hbase-site/hfile.block.cache.size', - 'global/hfile_blockcache_size'], - 'hbase-site/hbase.hregion.max.filesize': [ - 'hbase-site/hbase.hregion.max.filesize', - 'global/hstorefile_maxsize'], - 'hbase-site/hbase.regionserver.handler.count': [ - 'hbase-site/hbase.regionserver.handler.count', - 'global/regionserver_handlers'], - 'hbase-site/hbase.hregion.majorcompaction': [ - 'hbase-site/hbase.hregion.majorcompaction', - 'global/hregion_majorcompaction'], - 'hbase-site/hbase.regionserver.global.memstore.lowerLimit': [ - 'hbase-site/hbase.regionserver.global.memstore.lowerLimit', - 'global/regionserver_memstore_lowerlimit'], - 'hbase-site/hbase.hregion.memstore.block.multiplier': [ - 'hbase-site/hbase.hregion.memstore.block.multiplier', - 'global/hregion_blockmultiplier'], - 'hbase-site/hbase.hregion.memstore.mslab.enabled': [ - 'hbase-site/hbase.hregion.memstore.mslab.enabled', - 'global/regionserver_memstore_lab'], - 'hbase-site/hbase.hregion.memstore.flush.size': [ - 'hbase-site/hbase.hregion.memstore.flush.size'], - 'hbase-site/hbase.client.scanner.caching': [ - 'hbase-site/hbase.client.scanner.caching', - 'global/client_scannercaching'], - 'hbase-site/zookeeper.session.timeout': [ - 'hbase-site/zookeeper.session.timeout', - 'global/zookeeper_sessiontimeout'], - 'hbase-site/hbase.client.keyvalue.maxsize': [ - 'hbase-site/hbase.client.keyvalue.maxsize', - 'global/hfile_max_keyvalue_size'], - 'hdfs-site/dfs.support.append': [ - 'hdfs-site/dfs.support.append', - 'hbase-site/dfs.support.append', - 'global/hdfs_support_append'], - 'hbase-site/dfs.client.read.shortcircuit': [ - 'hbase-site/dfs.client.read.shortcircuit', - 'global/hdfs_enable_shortcircuit_read'] - } - - def __init__(self): - super(HBaseService, self).__init__( - HBaseService.get_service_id()) - self.configurations.add('hbase-site') - - @classmethod - def get_service_id(cls): - return 'HBASE' - - def validate(self, cluster_spec, cluster): - # check for a single HBASE_SERVER - count = cluster_spec.get_deployed_node_group_count('HBASE_MASTER') - if count != 1: - raise ex.InvalidComponentCountException('HBASE_MASTER', 1, count) - - def register_service_urls(self, cluster_spec, url_info, cluster): - master_ip = cluster_spec.determine_component_hosts( - 'HBASE_MASTER').pop().management_ip - - hbase_config = cluster_spec.configurations['hbase-site'] - info_port = hbase_config['hbase.master.info.port'] - - url_info['HBase'] = { - 'Web UI': 'http://%s:%s/master-status' % (master_ip, info_port), - 'Logs': 'http://%s:%s/logs' % (master_ip, info_port), - 'Zookeeper Info': 'http://%s:%s/zk.jsp' % (master_ip, info_port), - 'JMX': 'http://%s:%s/jmx' % (master_ip, info_port), - 'Debug Dump': 'http://%s:%s/dump' % (master_ip, info_port), - 'Thread Stacks': 'http://%s:%s/stacks' % (master_ip, info_port) - } - return url_info - - def register_user_input_handlers(self, ui_handlers): - for prop_name in self.property_map: - ui_handlers[prop_name] = ( - self._handle_config_property_update) - - ui_handlers['hbase-site/hbase.rootdir'] = ( - self._handle_user_property_root_dir) - - def _handle_config_property_update(self, user_input, configurations): - self._update_config_values(configurations, user_input.value, - self.property_map[user_input.config.name]) - - def _handle_user_property_root_dir(self, user_input, configurations): - configurations['hbase-site']['hbase.rootdir'] = user_input.value - - match = re.search('(^hdfs://)(.*?)(/.*)', user_input.value) - if match: - configurations['global']['hbase_hdfs_root_dir'] = match.group(3) - else: - raise e.InvalidDataException( - _("Invalid value for property 'hbase-site/hbase.rootdir' : %s") - % user_input.value) - - def finalize_configuration(self, cluster_spec): - nn_servers = cluster_spec.determine_component_hosts('NAMENODE') - if nn_servers: - self._replace_config_token( - cluster_spec, '%NN_HOST%', nn_servers.pop().fqdn(), - {'hbase-site': ['hbase.rootdir']}) - - zk_servers = cluster_spec.determine_component_hosts('ZOOKEEPER_SERVER') - if zk_servers: - zk_list = [z.fqdn() for z in zk_servers] - self._replace_config_token( - cluster_spec, '%ZOOKEEPER_HOSTS%', ','.join(zk_list), - {'hbase-site': ['hbase.zookeeper.quorum']}) - - def finalize_ng_components(self, cluster_spec): - hbase_ng = cluster_spec.get_node_groups_containing_component( - 'HBASE_MASTER') - components = hbase_ng[0].components - if 'HDFS_CLIENT' not in components: - components.append('HDFS_CLIENT') - - if not cluster_spec.get_deployed_node_group_count( - 'HBASE_REGIONSERVER'): - components.append('HBASE_REGIONSERVER') - else: - hbase_ng = cluster_spec.get_node_groups_containing_component( - 'HBASE_REGIONSERVER') - for ng in hbase_ng: - components = ng.components - if 'HDFS_CLIENT' not in components: - components.append('HDFS_CLIENT') - - if not cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER'): - zk_service = next(service for service in cluster_spec.services - if service.name == 'ZOOKEEPER') - zk_service.deployed = True - components.append('ZOOKEEPER_SERVER') - - -class ZookeeperService(Service): - def __init__(self): - super(ZookeeperService, self).__init__( - ZookeeperService.get_service_id()) - - @classmethod - def get_service_id(cls): - return 'ZOOKEEPER' - - def validate(self, cluster_spec, cluster): - count = cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER') - if count < 1: - raise ex.InvalidComponentCountException( - 'ZOOKEEPER_SERVER', '1+', count) - - # check if HDFS HA is enabled - if cluster_spec.is_hdfs_ha_enabled(cluster): - # check if we have an odd number of zookeeper_servers > 3 - if not (count >= 3 and (count % 2 == 1)): - raise ex.NameNodeHAConfigurationError( - "ZOOKEEPER_SERVER count should be an odd number " - "greater than 3 for NameNode High Availability. " - "Actual ZOOKEEPER_SERVER count is %s" % count) - - def is_mandatory(self): - return True - - -class OozieService(Service): - def __init__(self): - super(OozieService, self).__init__(OozieService.get_service_id()) - self.configurations.add('oozie-site') - - @classmethod - def get_service_id(cls): - return 'OOZIE' - - def validate(self, cluster_spec, cluster): - count = cluster_spec.get_deployed_node_group_count('OOZIE_SERVER') - if count != 1: - raise ex.InvalidComponentCountException( - 'OOZIE_SERVER', 1, count) - count = cluster_spec.get_deployed_node_group_count('OOZIE_CLIENT') - if not count: - raise ex.InvalidComponentCountException( - 'OOZIE_CLIENT', '1+', count) - - def finalize_configuration(self, cluster_spec): - oozie_servers = cluster_spec.determine_component_hosts('OOZIE_SERVER') - if oozie_servers: - oozie_server = oozie_servers.pop() - name_list = [oozie_server.fqdn(), oozie_server.internal_ip, - oozie_server.management_ip] - self._replace_config_token( - cluster_spec, '%OOZIE_HOST%', oozie_server.fqdn(), - {'global': ['oozie_hostname'], - 'oozie-site': ['oozie.base.url']}) - self._replace_config_token( - cluster_spec, '%OOZIE_HOST%', ",".join(name_list), - {'core-site': ['hadoop.proxyuser.oozie.hosts']}) - - def finalize_ng_components(self, cluster_spec): - oozie_ng = cluster_spec.get_node_groups_containing_component( - 'OOZIE_SERVER')[0] - components = oozie_ng.components - if 'HDFS_CLIENT' not in components: - components.append('HDFS_CLIENT') - if 'MAPREDUCE2_CLIENT' not in components: - components.append('MAPREDUCE2_CLIENT') - # per AMBARI-3483 - if 'YARN_CLIENT' not in components: - components.append('YARN_CLIENT') - # ensure that mr and hdfs clients are colocated with oozie client - client_ngs = cluster_spec.get_node_groups_containing_component( - 'OOZIE_CLIENT') - for ng in client_ngs: - components = ng.components - if 'HDFS_CLIENT' not in components: - components.append('HDFS_CLIENT') - if 'MAPREDUCE2_CLIENT' not in components: - components.append('MAPREDUCE2_CLIENT') - - def register_service_urls(self, cluster_spec, url_info, cluster): - oozie_ip = cluster_spec.determine_component_hosts( - 'OOZIE_SERVER').pop().management_ip - port = self._get_port_from_cluster_spec(cluster_spec, 'oozie-site', - 'oozie.base.url') - url_info['JobFlow'] = { - 'Oozie': 'http://%s:%s' % (oozie_ip, port) - } - return url_info - - def register_user_input_handlers(self, ui_handlers): - ui_handlers['oozie-site/oozie.service.JPAService.jdbc.username'] = ( - self._handle_user_property_db_user) - ui_handlers['oozie.service.JPAService.jdbc.password'] = ( - self._handle_user_property_db_pwd) - - def _handle_user_property_db_user(self, user_input, configurations): - oozie_site_config_map = configurations['oozie-site'] - oozie_site_config_map['oozie.service.JPAService.jdbc.username'] = ( - user_input.value) - - def _handle_user_property_db_pwd(self, user_input, configurations): - oozie_site_config_map = configurations['oozie-site'] - oozie_site_config_map['oozie.service.JPAService.jdbc.password'] = ( - user_input.value) - - -class GangliaService(Service): - def __init__(self): - super(GangliaService, self).__init__(GangliaService.get_service_id()) - - @classmethod - def get_service_id(cls): - return 'GANGLIA' - - def validate(self, cluster_spec, cluster): - count = cluster_spec.get_deployed_node_group_count('GANGLIA_SERVER') - if count != 1: - raise ex.InvalidComponentCountException('GANGLIA_SERVER', 1, count) - - def is_user_template_component(self, component): - return component.name != 'GANGLIA_MONITOR' - - def finalize_ng_components(self, cluster_spec): - for ng in cluster_spec.node_groups.values(): - if 'GANGLIA_MONITOR' not in ng.components: - ng.components.append('GANGLIA_MONITOR') - - -class AmbariService(Service): - def __init__(self): - super(AmbariService, self).__init__(AmbariService.get_service_id(), - False) - self.configurations.add('ambari') - # TODO(jspeidel): don't hard code default admin user - self.admin_user_name = 'admin' - - @classmethod - def get_service_id(cls): - return 'AMBARI' - - def validate(self, cluster_spec, cluster): - count = cluster_spec.get_deployed_node_group_count('AMBARI_SERVER') - if count != 1: - raise ex.InvalidComponentCountException('AMBARI_SERVER', 1, count) - - def register_service_urls(self, cluster_spec, url_info, cluster): - ambari_ip = cluster_spec.determine_component_hosts( - 'AMBARI_SERVER').pop().management_ip - - port = cluster_spec.configurations['ambari'].get( - 'server.port', '8080') - - url_info['Ambari Console'] = { - 'Web UI': 'http://{0}:{1}'.format(ambari_ip, port) - } - return url_info - - def is_user_template_component(self, component): - return component.name != 'AMBARI_AGENT' - - def register_user_input_handlers(self, ui_handlers): - ui_handlers['ambari-stack/ambari.admin.user'] = ( - self._handle_user_property_admin_user) - ui_handlers['ambari-stack/ambari.admin.password'] = ( - self._handle_user_property_admin_password) - - def is_mandatory(self): - return True - - def _handle_user_property_admin_user(self, user_input, configurations): - admin_user = next(user for user in self.users - if user.name == 'admin') - admin_user.name = user_input.value - self.admin_user_name = user_input.value - - def _handle_user_property_admin_password(self, user_input, configurations): - admin_user = next(user for user in self.users - if user.name == self.admin_user_name) - admin_user.password = user_input.value - - -class SqoopService(Service): - def __init__(self): - super(SqoopService, self).__init__(SqoopService.get_service_id()) - - @classmethod - def get_service_id(cls): - return 'SQOOP' - - def finalize_ng_components(self, cluster_spec): - sqoop_ngs = cluster_spec.get_node_groups_containing_component('SQOOP') - for ng in sqoop_ngs: - if 'HDFS_CLIENT' not in ng.components: - ng.components.append('HDFS_CLIENT') - if 'MAPREDUCE2_CLIENT' not in ng.components: - ng.components.append('MAPREDUCE2_CLIENT') - - -class NagiosService(Service): - def __init__(self): - super(NagiosService, self).__init__(NagiosService.get_service_id()) - - @classmethod - def get_service_id(cls): - return 'NAGIOS' - - def finalize_ng_components(self, cluster_spec): - # per AMBARI-2946 - nagios_ngs = ( - cluster_spec.get_node_groups_containing_component('NAGIOS_SERVER')) - for ng in nagios_ngs: - if 'YARN_CLIENT' not in ng.components: - ng.components.append('YARN_CLIENT') - if 'MAPREDUCE2_CLIENT' not in ng.components: - ng.components.append('MAPREDUCE2_CLIENT') - if cluster_spec.get_deployed_node_group_count('OOZIE_SERVER'): - if 'OOZIE_CLIENT' not in ng.components: - ng.components.append('OOZIE_CLIENT') - if cluster_spec.get_deployed_node_group_count('HIVE_SERVER'): - if 'HIVE_CLIENT' not in ng.components: - ng.components.append('HIVE_CLIENT') - if 'HCAT' not in ng.components: - if not cluster_spec.get_deployed_node_group_count( - 'HCATALOG'): - hcat_service = next(service for service in - cluster_spec.services if - service.name == 'HCATALOG') - hcat_service.deployed = True - ng.components.append('HCAT') - - -class HueService(Service): - default_web_ui_port = '8000' - required_services = ['HIVE', 'OOZIE', 'WEBHCAT', 'YARN'] - - def __init__(self): - super(HueService, self).__init__(HueService.get_service_id(), False) - - @classmethod - def get_service_id(cls): - return "HUE" - - @staticmethod - def _get_java_home_from_config(config): - return (config.get('java64_home', None) - or config.get('java_home', None) if config else None) - - @staticmethod - def _get_java_home(cluster_spec): - java_home = HueService._get_java_home_from_config( - cluster_spec.configurations.get('hue', None) - ) - - if not java_home: - java_home = HueService._get_java_home_from_config( - cluster_spec.configurations.get('global', None) - ) - - return java_home or '/opt/jdk1.6.0_31' - - @staticmethod - def _append_host_substitution(cluster_spec, component, var_name, - var_pattern_name, subs): - hosts = cluster_spec.determine_component_hosts(component) - - if hosts: - subs[var_name] = hosts.pop().fqdn() or 'localhost' - subs[var_pattern_name] = subs[var_name].replace('.', '\.') - - @staticmethod - def _create_hue_ini_file_section(property_sub_tree, level): - properties = property_sub_tree['properties'] - sections = property_sub_tree['sections'] - - s = '' - - if properties: - for name, value in six.iteritems(properties): - s += ' ' * (level * 2) - s += "{0} = {1}\n".format(name, value) - - if sections: - for name, section in six.iteritems(sections): - s += "\n" - - s += ' ' * ((level - 1) * 2) - s += '[' * level - s += name - s += ']' * level - s += "\n" - - s += HueService._create_hue_ini_file_section(section, - level + 1) - - return s - - @staticmethod - def _create_hue_ini_file(property_tree): - if property_tree: - return HueService._create_hue_ini_file_section(property_tree, 1) - else: - return '' - - @staticmethod - def _create_hue_property_tree(cluster_spec): - config_name = 'hue-ini' - - hue_ini_property_tree = {'sections': {}, 'properties': {}} - - config = cluster_spec.configurations[config_name] - - if config is None: - LOG.warning(_LW('Missing configuration named {config_name}, ' - 'aborting Hue ini file creation').format( - config_name=config_name)) - else: - # replace values in hue-ini configuration - subs = {} - - subs['%JAVA_HOME%'] = HueService._get_java_home(cluster_spec) - - HueService._append_host_substitution(cluster_spec, - 'NAMENODE', - '%NN_HOST%', - '%NN_HOST_PATTERN%', - subs) - - HueService._append_host_substitution(cluster_spec, - 'RESOURCEMANAGER', - '%RM_HOST%', - '%RM_HOST_PATTERN%', - subs) - - HueService._append_host_substitution(cluster_spec, - 'HISTORYSERVER', - '%HS_HOST%', - '%HS_HOST_PATTERN%', - subs) - - HueService._append_host_substitution(cluster_spec, - 'OOZIE_SERVER', - '%OOZIE_HOST%', - '%OOZIE_HOST_PATTERN%', - subs) - - HueService._append_host_substitution(cluster_spec, - 'WEBHCAT_SERVER', - '%WEBHCAT_HOST%', - '%WEBHCAT_HOST_PATTERN%', - subs) - - HueService._append_host_substitution(cluster_spec, - 'HUE', - '%HUE_HOST%', - '%HUE_HOST_PATTERN%', - subs) - - # Parse configuration properties into Hue ini configuration tree - # where :: = - # becomes - # { - # { - # = - # } - # } - for prop_name, prop_value in six.iteritems(config): - # Skip empty property values - if prop_value: - # Attempt to make any necessary substitutions - if subs: - for placeholder, sub in six.iteritems(subs): - if prop_value.find(placeholder) >= 0: - value = prop_value.replace(placeholder, sub) - LOG.debug('Converting placeholder in property ' - '{p_name}:\n\t\t{p_value}\n\tto\n\t' - '\t{value}\n'. - format(p_name=prop_name, - p_value=prop_value, - value=value)) - prop_value = value - - # If the property value still is a value, add it and it's - # relevant path to the tree - if prop_value and len(prop_value) > 0: - node = hue_ini_property_tree - tokens = prop_name.split('/') - - if tokens: - name = tokens.pop() - - while tokens: - token = tokens.pop(0) - - if token not in node['sections']: - data = {'sections': {}, - 'properties': {}} - - node['sections'][token] = data - - node = node['sections'][token] - - # TODO(rlevas) : handle collisions - node['properties'][name] = prop_value - - LOG.info(_LI('Created Hue ini property tree from configuration named ' - '{config_name}').format(config_name=config_name)) - - return hue_ini_property_tree - - @staticmethod - def _merge_configurations(cluster_spec, src_config_name, dst_config_name): - src_config = cluster_spec.configurations[src_config_name] - dst_config = cluster_spec.configurations[dst_config_name] - - if src_config is None: - LOG.warning(_LW('Missing source configuration property set, ' - 'aborting merge: {config_name}'). - format(config_name=src_config_name)) - elif dst_config is None: - LOG.warning(_LW('Missing destination configuration property set, ' - 'aborting merge: {config_name}'). - format(config_name=dst_config_name)) - else: - for property_name, property_value in six.iteritems(src_config): - if property_name in dst_config: - if dst_config[property_name] == src_config[property_name]: - LOG.debug('Skipping unchanged configuration property ' - 'in {d_config_name} and {s_config_name}: ' - '{property_name}' - .format(d_config_name=dst_config_name, - s_config_name=src_config_name, - property_name=property_name)) - else: - LOG.warning(_LW('Overwriting existing configuration ' - 'property in {dst_config_name} from ' - '{src_config_name} for Hue: ' - '{property_name} ' - '[{dst_config} -> {src_config}]'). - format(dst_config_name=dst_config_name, - src_config_name=src_config_name, - property_name=property_name, - dst_config=dst_config[ - property_name], - src_config=src_config[ - property_name])) - else: - LOG.debug('Adding Hue configuration property to {d_config}' - ' from {s_config}: {p_name}'.format( - d_config=dst_config_name, - s_config=src_config_name, - p_name=property_name)) - - dst_config[property_name] = property_value - LOG.info(_LI('Merged configuration properties: {source} -> ' - '{destination}') - .format(source=src_config_name, - destination=dst_config_name)) - - @staticmethod - def _handle_pre_service_start(instance, cluster_spec, hue_ini, - create_user): - with instance.remote() as r: - r.execute_command('yum -y install hue', - run_as_root=True) - LOG.info(_LI('Installed Hue')) - - r.write_file_to('/etc/hue/conf/hue.ini', - hue_ini, - True) - # update hue.ini if HDFS HA is enabled and restart hadoop-httpfs - # /tmp/hueini-hdfsha is written by versionhandler when HDFS is - # enabled - r.execute_command('[ ! -f /tmp/hueini-hdfsha ] || sed -i ' - '"s/hdfs.*.:8020/hdfs:\\/\\/`cat ' - '/tmp/hueini-hdfsha`/g" /etc/hue/conf/hue.ini', - run_as_root=True) - r.execute_command('[ ! -f /tmp/hueini-hdfsha ] || sed -i ' - '"s/http.*.\\/webhdfs\\/v1\\//http:\\/\\' - '/localhost:14000\\/webhdfs\\/v1\\//g" ' - '/etc/hue/conf/hue.ini', run_as_root=True) - LOG.info(_LI('Setting Hue configuration')) - - r.execute_command( - '/usr/lib/hue/build/env/bin/python ' - '/usr/lib/hue/tools/app_reg/app_reg.py ' - '--remove shell', - run_as_root=True) - LOG.info(_LI('Shell uninstalled, if it was installed')) - - if create_user: - r.execute_command('/usr/lib/hue/build/env/bin/hue ' - 'create_sandbox_user', run_as_root=True) - LOG.info(_LI('Initial Hue user created')) - - java_home = HueService._get_java_home(cluster_spec) - if java_home: - r.replace_remote_string( - '/etc/hadoop/conf/hadoop-env.sh', - 'export JAVA_HOME=.*', - 'export JAVA_HOME=%s' % java_home) - - r.execute_command('service hue start', run_as_root=True) - - # start httpfs if HDFS HA is enabled - r.execute_command('[ ! -f /tmp/hueini-hdfsha ] || ' - 'service hadoop-httpfs start', - run_as_root=True) - LOG.info(_LI('Hue (re)started')) - - def finalize_configuration(self, cluster_spec): - # add Hue-specific properties to the core-site file ideally only on - # the following nodes: - # - # NameNode - # Secondary - # NameNode - # DataNodes - # - LOG.debug('Inserting Hue configuration properties into core-site') - self._merge_configurations(cluster_spec, 'hue-core-site', 'core-site') - - # add Hue-specific properties to the hdfs-site file - LOG.debug('Inserting Hue configuration properties into hdfs-site') - self._merge_configurations(cluster_spec, 'hue-hdfs-site', 'hdfs-site') - - # add Hue-specific properties to the webhcat-site file - LOG.debug('Inserting Hue configuration properties into webhcat-site') - self._merge_configurations(cluster_spec, 'hue-webhcat-site', - 'webhcat-site') - - # add Hue-specific properties to the webhcat-site file - LOG.debug('Inserting Hue configuration properties into oozie-site') - self._merge_configurations(cluster_spec, 'hue-oozie-site', - 'oozie-site') - - def register_service_urls(self, cluster_spec, url_info, cluster): - hosts = cluster_spec.determine_component_hosts('HUE') - - if hosts is not None: - host = hosts.pop() - - if host is not None: - config = cluster_spec.configurations['hue-ini'] - if config is not None: - port = config.get('desktop/http_port', - self.default_web_ui_port) - else: - port = self.default_web_ui_port - - ip = host.management_ip - - url_info[self.name.title()] = { - 'Web UI': 'http://{0}:{1}'.format(ip, port) - } - - return url_info - - def validate(self, cluster_spec, cluster): - count = cluster_spec.get_deployed_node_group_count('HUE') - if count != 1: - raise ex.InvalidComponentCountException('HUE', 1, count) - - services = cluster_spec.services - - for reqd_service in self.required_services: - reqd_service_deployed = False - - if services is not None: - for service in services: - reqd_service_deployed = (service.deployed - and service.name == reqd_service) - - if reqd_service_deployed: - break - - if not reqd_service_deployed: - raise ex.RequiredServiceMissingException(reqd_service, - self.name) - - def finalize_ng_components(self, cluster_spec): - hue_ngs = cluster_spec.get_node_groups_containing_component('HUE') - - if hue_ngs is not None: - for hue_ng in hue_ngs: - components = hue_ng.components - - if 'HDFS_CLIENT' not in components: - components.append('HDFS_CLIENT') - LOG.info(_LI('HDFS client was missed from Hue node. ' - 'Added it since it is required for Hue')) - - if cluster_spec.get_deployed_node_group_count('HIVE_SERVER'): - if 'HIVE_CLIENT' not in components: - components.append('HIVE_CLIENT') - LOG.info(_LI('HIVE client was missed from Hue node. ' - 'Added it since it is required for ' - 'Beeswax and HCatalog')) - - def pre_service_start(self, cluster_spec, ambari_info, started_services): - - # Create hue.ini file - hue_property_tree = HueService._create_hue_property_tree(cluster_spec) - hue_ini = HueService._create_hue_ini_file(hue_property_tree) - - create_user = False - config = cluster_spec.configurations['hue-ini'] - - if config is not None: - username = config.get('useradmin/default_username', '') - password = config.get('useradmin/default_user_password', '') - - # NOTE(tkelsey): test prevents creation of user with defaults - create_user = username != '' and password != '' # nosec(tkelsey) - - # Install Hue on the appropriate node(s)... - hue_ngs = cluster_spec.get_node_groups_containing_component("HUE") - if hue_ngs: - for ng in hue_ngs: - if ng.instances: - for instance in ng.instances: - with context.set_current_instance_id( - instance.instance_id): - HueService._handle_pre_service_start(instance, - cluster_spec, - hue_ini, - create_user) diff --git a/sahara/plugins/hdp/versions/version_2_0_6/versionhandler.py b/sahara/plugins/hdp/versions/version_2_0_6/versionhandler.py deleted file mode 100644 index 498e9b1e63..0000000000 --- a/sahara/plugins/hdp/versions/version_2_0_6/versionhandler.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright (c) 2014 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils as json -import pkg_resources as pkg -import six - -from sahara import context -from sahara import exceptions as exc -from sahara.i18n import _ -from sahara.i18n import _LE -from sahara.i18n import _LI -from sahara.i18n import _LW -from sahara.plugins import exceptions as ex -from sahara.plugins.hdp import clusterspec as cs -from sahara.plugins.hdp import configprovider as cfgprov -from sahara.plugins.hdp.versions import abstractversionhandler as avm -from sahara.plugins.hdp.versions.version_2_0_6 import edp_engine -from sahara.plugins.hdp.versions.version_2_0_6 import services -from sahara.utils import cluster_progress_ops as cpo -from sahara.utils import poll_utils -from sahara import version - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class VersionHandler(avm.AbstractVersionHandler): - config_provider = None - version = None - client = None - - def _set_version(self, version): - self.version = version - - def _get_config_provider(self): - if self.config_provider is None: - self.config_provider = cfgprov.ConfigurationProvider( - json.load(pkg.resource_stream( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'ambari-config-resource.json')), - hadoop_version='2.0.6') - - return self.config_provider - - def get_version(self): - return self.version - - def get_ambari_client(self): - if not self.client: - self.client = AmbariClient(self) - - return self.client - - def get_config_items(self): - return self._get_config_provider().get_config_items() - - def get_applicable_target(self, name): - return self._get_config_provider().get_applicable_target(name) - - def get_cluster_spec(self, cluster, user_inputs, - scaled_groups=None, cluster_template=None): - if cluster_template: - cluster_spec = cs.ClusterSpec(cluster_template, '2.0.6') - else: - cluster_spec = self.get_default_cluster_configuration() - cluster_spec.create_operational_config( - cluster, user_inputs, scaled_groups) - - cs.validate_number_of_datanodes( - cluster, scaled_groups, self.get_config_items()) - - return cluster_spec - - def get_default_cluster_configuration(self): - return cs.ClusterSpec(self._get_default_cluster_template(), '2.0.6') - - def _get_default_cluster_template(self): - return pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - def get_node_processes(self): - node_processes = {} - for service in self.get_default_cluster_configuration().services: - components = [] - for component in service.components: - components.append(component.name) - node_processes[service.name] = components - - return node_processes - - def install_swift_integration(self, servers): - if servers: - cpo.add_provisioning_step( - servers[0].cluster_id, _("Install swift integration"), - len(servers)) - - for server in servers: - with context.set_current_instance_id( - server.instance['instance_id']): - server.install_swift_integration() - - def get_services_processor(self): - return services - - def get_edp_engine(self, cluster, job_type): - if job_type in edp_engine.EdpOozieEngine.get_supported_job_types(): - return edp_engine.EdpOozieEngine(cluster) - return None - - def get_edp_job_types(self): - return edp_engine.EdpOozieEngine.get_supported_job_types() - - def get_edp_config_hints(self, job_type): - return edp_engine.EdpOozieEngine.get_possible_job_config(job_type) - - def get_open_ports(self, node_group): - ports = [8660] # for Ganglia - - ports_map = { - 'AMBARI_SERVER': [8080, 8440, 8441], - 'NAMENODE': [50070, 50470, 8020, 9000], - 'DATANODE': [50075, 50475, 50010, 8010], - 'SECONDARY_NAMENODE': [50090], - 'HISTORYSERVER': [19888], - 'RESOURCEMANAGER': [8025, 8041, 8050, 8088], - 'NODEMANAGER': [45454], - 'HIVE_SERVER': [10000], - 'HIVE_METASTORE': [9083], - 'HBASE_MASTER': [60000, 60010], - 'HBASE_REGIONSERVER': [60020, 60030], - 'WEBHCAT_SERVER': [50111], - 'GANGLIA_SERVER': [8661, 8662, 8663, 8651], - 'MYSQL_SERVER': [3306], - 'OOZIE_SERVER': [11000, 11001], - 'ZOOKEEPER_SERVER': [2181, 2888, 3888], - 'NAGIOS_SERVER': [80] - } - for process in node_group.node_processes: - if process in ports_map: - ports.extend(ports_map[process]) - - return ports - - -class AmbariClient(object): - - def __init__(self, handler): - # add an argument for neutron discovery - self.handler = handler - - def _get_http_session(self, host, port): - return host.remote().get_http_client(port) - - def _get_standard_headers(self): - return {"X-Requested-By": "sahara"} - - def _post(self, url, ambari_info, data=None): - session = self._get_http_session(ambari_info.host, ambari_info.port) - return session.post(url, data=data, - auth=(ambari_info.user, ambari_info.password), - headers=self._get_standard_headers()) - - def _delete(self, url, ambari_info): - session = self._get_http_session(ambari_info.host, ambari_info.port) - return session.delete(url, - auth=(ambari_info.user, ambari_info.password), - headers=self._get_standard_headers()) - - def _put(self, url, ambari_info, data=None): - session = self._get_http_session(ambari_info.host, ambari_info.port) - auth = (ambari_info.user, ambari_info.password) - return session.put(url, data=data, auth=auth, - headers=self._get_standard_headers()) - - def _get(self, url, ambari_info): - session = self._get_http_session(ambari_info.host, ambari_info.port) - return session.get(url, auth=(ambari_info.user, ambari_info.password), - headers=self._get_standard_headers()) - - def _add_cluster(self, ambari_info, name): - add_cluster_url = 'http://{0}/api/v1/clusters/{1}'.format( - ambari_info.get_address(), name) - result = self._post(add_cluster_url, ambari_info, - data='{"Clusters": {"version" : "HDP-' + - self.handler.get_version() + '"}}') - - if result.status_code != 201: - LOG.error(_LE('Create cluster command failed. {result}').format( - result=result.text)) - raise ex.HadoopProvisionError( - _('Failed to add cluster: %s') % result.text) - - @cpo.event_wrapper(True, step=_("Add configurations to cluster"), - param=('ambari_info', 2)) - def _add_configurations_to_cluster( - self, cluster_spec, ambari_info, name): - - existing_config_url = ('http://{0}/api/v1/clusters/{1}?fields=' - 'Clusters/desired_configs'.format( - ambari_info.get_address(), name)) - - result = self._get(existing_config_url, ambari_info) - - json_result = json.loads(result.text) - existing_configs = json_result['Clusters']['desired_configs'] - - configs = cluster_spec.get_deployed_configurations() - if 'ambari' in configs: - configs.remove('ambari') - if len(configs) == len(existing_configs): - # nothing to do - return - - config_url = 'http://{0}/api/v1/clusters/{1}'.format( - ambari_info.get_address(), name) - - body = {} - clusters = {} - version = 1 - body['Clusters'] = clusters - for config_name in configs: - if config_name in existing_configs: - if config_name == 'core-site' or config_name == 'global': - existing_version = (existing_configs[config_name]['tag'] - .lstrip('v')) - version = int(existing_version) + 1 - else: - continue - - config_body = {} - clusters['desired_config'] = config_body - config_body['type'] = config_name - config_body['tag'] = 'v%s' % version - config_body['properties'] = ( - cluster_spec.configurations[config_name]) - result = self._put(config_url, ambari_info, data=json.dumps(body)) - if result.status_code != 200: - LOG.error( - _LE('Set configuration command failed. {result}').format( - result=result.text)) - raise ex.HadoopProvisionError( - _('Failed to set configurations on cluster: %s') - % result.text) - - @cpo.event_wrapper( - True, step=_("Add services to cluster"), param=('ambari_info', 2)) - def _add_services_to_cluster(self, cluster_spec, ambari_info, name): - services = cluster_spec.services - add_service_url = 'http://{0}/api/v1/clusters/{1}/services/{2}' - for service in services: - # Make sure the service is deployed and is managed by Ambari - if service.deployed and service.ambari_managed: - result = self._post(add_service_url.format( - ambari_info.get_address(), name, service.name), - ambari_info) - if result.status_code not in [201, 409]: - LOG.error( - _LE('Create service command failed. {result}').format( - result=result.text)) - raise ex.HadoopProvisionError( - _('Failed to add services to cluster: %s') - % result.text) - - @cpo.event_wrapper( - True, step=_("Add components to services"), param=('ambari_info', 2)) - def _add_components_to_services(self, cluster_spec, ambari_info, name): - add_component_url = ('http://{0}/api/v1/clusters/{1}/services/{' - '2}/components/{3}') - for service in cluster_spec.services: - # Make sure the service is deployed and is managed by Ambari - if service.deployed and service.ambari_managed: - for component in service.components: - result = self._post(add_component_url.format( - ambari_info.get_address(), name, service.name, - component.name), - ambari_info) - if result.status_code not in [201, 409]: - LOG.error( - _LE('Create component command failed. {result}') - .format(result=result.text)) - raise ex.HadoopProvisionError( - _('Failed to add components to services: %s') - % result.text) - - @cpo.event_wrapper( - True, step=_("Add host and components"), param=('ambari_info', 3)) - def _add_hosts_and_components( - self, cluster_spec, servers, ambari_info, name): - - add_host_url = 'http://{0}/api/v1/clusters/{1}/hosts/{2}' - add_host_component_url = ('http://{0}/api/v1/clusters/{1}' - '/hosts/{2}/host_components/{3}') - for host in servers: - with context.set_current_instance_id(host.instance['instance_id']): - hostname = host.instance.fqdn().lower() - result = self._post( - add_host_url.format(ambari_info.get_address(), name, - hostname), ambari_info) - if result.status_code != 201: - LOG.error( - _LE('Create host command failed. {result}').format( - result=result.text)) - raise ex.HadoopProvisionError( - _('Failed to add host: %s') % result.text) - - node_group_name = host.node_group.name - # TODO(jspeidel): ensure that node group exists - node_group = cluster_spec.node_groups[node_group_name] - for component in node_group.components: - # Don't add any AMBARI or HUE components - # TODO(rlevas): Pragmatically determine if component is - # managed by Ambari - if (component.find('AMBARI') != 0 - and component.find('HUE') != 0): - result = self._post(add_host_component_url.format( - ambari_info.get_address(), name, hostname, - component), ambari_info) - if result.status_code != 201: - LOG.error( - _LE('Create host_component command failed. ' - '{result}').format(result=result.text)) - raise ex.HadoopProvisionError( - _('Failed to add host component: %s') - % result.text) - - @cpo.event_wrapper( - True, step=_("Install services"), param=('ambari_info', 2)) - def _install_services(self, cluster_name, ambari_info): - ambari_address = ambari_info.get_address() - install_url = ('http://{0}/api/v1/clusters/{' - '1}/services?ServiceInfo/state=INIT'.format( - ambari_address, cluster_name)) - body = ('{"RequestInfo" : { "context" : "Install all services" },' - '"Body" : {"ServiceInfo": {"state" : "INSTALLED"}}}') - - result = self._put(install_url, ambari_info, data=body) - - if result.status_code == 202: - json_result = json.loads(result.text) - request_id = json_result['Requests']['id'] - success = self._wait_for_async_request(self._get_async_request_uri( - ambari_info, cluster_name, request_id), - ambari_info) - if success: - LOG.info(_LI("Install of Hadoop stack successful.")) - self._finalize_ambari_state(ambari_info) - else: - LOG.error(_LE('Install command failed.')) - raise ex.HadoopProvisionError( - _('Installation of Hadoop stack failed.')) - elif result.status_code != 200: - LOG.error( - _LE('Install command failed. {result}').format( - result=result.text)) - raise ex.HadoopProvisionError( - _('Installation of Hadoop stack failed.')) - - def _get_async_request_uri(self, ambari_info, cluster_name, request_id): - return ('http://{0}/api/v1/clusters/{1}/requests/{' - '2}/tasks?fields=Tasks/status'.format( - ambari_info.get_address(), cluster_name, - request_id)) - - # Returns the top-level requests API URI - def _get_command_request_uri(self, ambari_info, cluster_name): - return ('http://{0}/api/v1/clusters/{1}/requests'.format( - ambari_info.get_address(), cluster_name)) - - def _wait_for_async_request(self, request_url, ambari_info): - started = False - while not started: - result = self._get(request_url, ambari_info) - LOG.debug('Async request url: {url} response:\n{response}'.format( - url=request_url, response=result.text)) - json_result = json.loads(result.text) - started = True - for items in json_result['items']: - status = items['Tasks']['status'] - if (status == 'FAILED' or status == 'ABORTED' or - status == 'TIMEDOUT'): - return False - else: - if status != 'COMPLETED': - started = False - - context.sleep(5) - return started - - def _finalize_ambari_state(self, ambari_info): - persist_state_uri = 'http://{0}/api/v1/persist'.format( - ambari_info.get_address()) - # this post data has non-standard format because persist - # resource doesn't comply with Ambari API standards - persist_data = ('{ "CLUSTER_CURRENT_STATUS":' - '"{\\"clusterState\\":\\"CLUSTER_STARTED_5\\"}" }') - result = self._post(persist_state_uri, ambari_info, data=persist_data) - - if result.status_code != 201 and result.status_code != 202: - LOG.warning(_LW('Ambari cluster state not finalized. {result}'). - format(result=result.text)) - raise ex.HadoopProvisionError( - _('Unable to finalize Ambari state.')) - LOG.info(_LI('Ambari cluster state finalized.')) - - @cpo.event_wrapper( - True, step=_("Start services"), param=('ambari_info', 3)) - def start_services(self, cluster_name, cluster_spec, ambari_info): - start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/' - 'state=INSTALLED'.format( - ambari_info.get_address(), cluster_name)) - body = ('{"RequestInfo" : { "context" : "Start all services" },' - '"Body" : {"ServiceInfo": {"state" : "STARTED"}}}') - - self._fire_service_start_notifications( - cluster_name, cluster_spec, ambari_info) - result = self._put(start_url, ambari_info, data=body) - if result.status_code == 202: - json_result = json.loads(result.text) - request_id = json_result['Requests']['id'] - success = self._wait_for_async_request( - self._get_async_request_uri(ambari_info, cluster_name, - request_id), ambari_info) - if success: - LOG.info(_LI("Successfully started Hadoop cluster")) - LOG.info(_LI('Ambari server address: {server_address}').format( - server_address=ambari_info.get_address())) - else: - LOG.error(_LE('Failed to start Hadoop cluster.')) - raise ex.HadoopProvisionError( - _('Start of Hadoop services failed.')) - - elif result.status_code != 200: - LOG.error( - _LE('Start command failed. Status: {status}, response: ' - '{response}').format(status=result.status_code, - response=result.text)) - raise ex.HadoopProvisionError( - _('Start of Hadoop services failed.')) - - def _exec_ambari_command(self, ambari_info, body, cmd_uri): - - LOG.debug('PUT URI: {uri}'.format(uri=cmd_uri)) - result = self._put(cmd_uri, ambari_info, data=body) - if result.status_code == 202: - LOG.debug( - 'PUT response: {result}'.format(result=result.text)) - json_result = json.loads(result.text) - href = json_result['href'] + '/tasks?fields=Tasks/status' - success = self._wait_for_async_request(href, ambari_info) - if success: - LOG.info( - _LI("Successfully changed state of Hadoop components ")) - else: - LOG.error(_LE('Failed to change state of Hadoop components')) - raise ex.HadoopProvisionError( - _('Failed to change state of Hadoop components')) - - else: - LOG.error( - _LE('Command failed. Status: {status}, response: ' - '{response}').format(status=result.status_code, - response=result.text)) - raise ex.HadoopProvisionError(_('Hadoop/Ambari command failed.')) - - def _get_host_list(self, servers): - host_list = [server.instance.fqdn().lower() for server in servers] - return ",".join(host_list) - - def _install_and_start_components(self, cluster_name, servers, - ambari_info, cluster_spec): - - auth = (ambari_info.user, ambari_info.password) - self._install_components(ambari_info, auth, cluster_name, servers) - self.handler.install_swift_integration(servers) - self._start_components(ambari_info, auth, cluster_name, - servers, cluster_spec) - - def _install_components(self, ambari_info, auth, cluster_name, servers): - # query for the host components on the given hosts that are in the - # INIT state - # TODO(jspeidel): provide request context - body = '{"HostRoles": {"state" : "INSTALLED"}}' - install_uri = ('http://{0}/api/v1/clusters/{' - '1}/host_components?HostRoles/state=INIT&' - 'HostRoles/host_name.in({2})'.format( - ambari_info.get_address(), cluster_name, - self._get_host_list(servers))) - self._exec_ambari_command(ambari_info, body, install_uri) - LOG.info(_LI('Started Hadoop components while scaling up')) - LOG.info(_LI('Ambari server ip {ip}').format( - ip=ambari_info.get_address())) - - def _start_components(self, ambari_info, auth, cluster_name, servers, - cluster_spec): - # query for all the host components in the INSTALLED state, - # then get a list of the client services in the list - installed_uri = ('http://{0}/api/v1/clusters/{' - '1}/host_components?HostRoles/state=INSTALLED&' - 'HostRoles/host_name.in({2})'.format( - ambari_info.get_address(), cluster_name, - self._get_host_list(servers))) - result = self._get(installed_uri, ambari_info) - if result.status_code == 200: - LOG.debug( - 'GET response: {result}'.format(result=result.text)) - json_result = json.loads(result.text) - items = json_result['items'] - - client_set = cluster_spec.get_components_for_type('CLIENT') - inclusion_list = list(set([x['HostRoles']['component_name'] - for x in items - if x['HostRoles']['component_name'] - not in client_set])) - - # query and start all non-client components on the given set of - # hosts - # TODO(jspeidel): Provide request context - body = '{"HostRoles": {"state" : "STARTED"}}' - start_uri = ('http://{0}/api/v1/clusters/{' - '1}/host_components?HostRoles/state=INSTALLED&' - 'HostRoles/host_name.in({2})' - '&HostRoles/component_name.in({3})'.format( - ambari_info.get_address(), cluster_name, - self._get_host_list(servers), - ",".join(inclusion_list))) - self._exec_ambari_command(ambari_info, body, start_uri) - else: - raise ex.HadoopProvisionError( - _('Unable to determine installed service ' - 'components in scaled instances. status' - ' code returned = {0}').format(result.status)) - - def _check_host_registrations(self, num_hosts, ambari_info): - url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address()) - try: - result = self._get(url, ambari_info) - json_result = json.loads(result.text) - - LOG.debug('Registered Hosts: {current_number} ' - 'of {final_number}'.format( - current_number=len(json_result['items']), - final_number=num_hosts)) - for hosts in json_result['items']: - LOG.debug('Registered Host: {host}'.format( - host=hosts['Hosts']['host_name'])) - return result and len(json_result['items']) >= num_hosts - except Exception: - LOG.debug('Waiting to connect to ambari server') - return False - - @cpo.event_wrapper(True, step=_("Wait for all Ambari agents to register"), - param=('ambari_info', 2)) - def wait_for_host_registrations(self, num_hosts, ambari_info): - cluster = ambari_info.get_cluster() - poll_utils.plugin_option_poll( - cluster, self._check_host_registrations, - cfgprov.HOST_REGISTRATIONS_TIMEOUT, - _("Wait for host registrations"), 5, { - 'num_hosts': num_hosts, 'ambari_info': ambari_info}) - - def update_ambari_admin_user(self, password, ambari_info): - old_pwd = ambari_info.password - user_url = 'http://{0}/api/v1/users/admin'.format( - ambari_info.get_address()) - update_body = ('{{"Users":{{"roles":"admin","password":"{0}",' - '"old_password":"{1}"}} }}'.format(password, old_pwd)) - - result = self._put(user_url, ambari_info, data=update_body) - - if result.status_code != 200: - raise ex.HadoopProvisionError(_('Unable to update Ambari admin ' - 'user credentials: {0}').format( - result.text)) - - def add_ambari_user(self, user, ambari_info): - user_url = 'http://{0}/api/v1/users/{1}'.format( - ambari_info.get_address(), user.name) - - create_body = ('{{"Users":{{"password":"{0}","roles":"{1}"}} }}'. - format(user.password, '%s' % ','. - join(map(str, user.groups)))) - - result = self._post(user_url, ambari_info, data=create_body) - - if result.status_code != 201: - raise ex.HadoopProvisionError( - _('Unable to create Ambari user: {0}').format(result.text)) - - def delete_ambari_user(self, user_name, ambari_info): - user_url = 'http://{0}/api/v1/users/{1}'.format( - ambari_info.get_address(), user_name) - - result = self._delete(user_url, ambari_info) - - if result.status_code != 200: - raise ex.HadoopProvisionError( - _('Unable to delete Ambari user: %(user_name)s' - ' : %(text)s') % - {'user_name': user_name, 'text': result.text}) - - def configure_scaled_cluster_instances(self, name, cluster_spec, - num_hosts, ambari_info): - self.wait_for_host_registrations(num_hosts, ambari_info) - self._add_configurations_to_cluster( - cluster_spec, ambari_info, name) - self._add_services_to_cluster( - cluster_spec, ambari_info, name) - self._add_components_to_services( - cluster_spec, ambari_info, name) - self._install_services(name, ambari_info) - - def start_scaled_cluster_instances(self, name, cluster_spec, servers, - ambari_info): - self.start_services(name, cluster_spec, ambari_info) - self._add_hosts_and_components( - cluster_spec, servers, ambari_info, name) - self._install_and_start_components( - name, servers, ambari_info, cluster_spec) - - @cpo.event_wrapper( - True, step=_("Decommission nodes"), param=('cluster', 1)) - def decommission_cluster_instances(self, cluster, clusterspec, instances, - ambari_info): - - request_uri = self._get_command_request_uri(ambari_info, cluster.name) - - hosts_to_decommission = [] - # Decommission HDFS datanodes to avoid loss of data - # during decommissioning process - for instance in instances: - ng_name = instance.node_group.name - if "DATANODE" in clusterspec.node_groups[ng_name].components: - # determine the instances that include HDFS support - hosts_to_decommission.append(instance.fqdn()) - - LOG.debug('AmbariClient: hosts_to_decommission = {hosts}'.format( - hosts=str(hosts_to_decommission))) - - # template for request body - body_header = ('{"RequestInfo" : { "context": "Decommission DataNode",' - ' "command" : "DECOMMISSION", "service_name" : "HDFS",' - ' "component_name" : "NAMENODE", ' - ' "parameters" : { "slave_type" : "DATANODE", ') - - excluded_hosts_request = '"excluded_hosts" : "{0}"' - - # generate comma-separated list of hosts to de-commission - list_of_hosts = ",".join(hosts_to_decommission) - - LOG.debug('AmbariClient: list_of_hosts = {hosts}'.format( - hosts=list_of_hosts)) - - # create the request body - request_body = ( - body_header + - excluded_hosts_request.format(list_of_hosts) - + '}}' - + ', "Requests/resource_filters":[{"service_name":"HDFS",' - '"component_name":"NAMENODE"}]}') - - LOG.debug('AmbariClient: about to make decommission request, uri = ' - '{uri}'.format(uri=request_uri)) - LOG.debug('AmbariClient: about to make decommission request, ' - 'request body = {body}'.format(body=request_body)) - - # ask Ambari to decommission the datanodes - result = self._post(request_uri, ambari_info, request_body) - if result.status_code != 202: - LOG.error(_LE('AmbariClient: error while making decommission post ' - 'request. Error is = {result}').format( - result=result.text)) - raise ex.DecommissionError( - _('An error occurred while trying to ' - 'decommission the DataNode instances that are ' - 'being shut down. ' - 'Please consult the Ambari server logs on the ' - 'master node for ' - 'more information about the failure.')) - else: - LOG.info(_LI('AmbariClient: decommission post request succeeded!')) - - status_template = ('http://{0}/api/v1/clusters/{1}/hosts/{2}/' - 'host_components/{3}') - - # find the host that the NameNode is deployed on - name_node_host = clusterspec.determine_component_hosts( - 'NAMENODE').pop() - status_request = status_template.format( - ambari_info.get_address(), - cluster.name, name_node_host.fqdn(), - 'NAMENODE') - - LOG.debug('AmbariClient: about to make decommission status request,' - 'uri = {uri}'.format(uri=status_request)) - - poll_utils.plugin_option_poll( - ambari_info.get_cluster(), - self.process_decommission, - cfgprov.DECOMMISSIONING_TIMEOUT, _("Decommission nodes"), 5, - {'status_request': status_request, 'ambari_info': ambari_info, - 'hosts_to_decommission': hosts_to_decommission}) - - def process_decommission(self, status_request, ambari_info, - hosts_to_decommission): - if len(hosts_to_decommission) == 0: - # Nothing for decommissioning - return True - - LOG.debug('AmbariClient: number of hosts waiting for ' - 'decommissioning to complete = {count}'.format( - count=str(len(hosts_to_decommission)))) - - result = self._get(status_request, ambari_info) - if result.status_code != 200: - LOG.error(_LE('AmbariClient: error in making decommission ' - 'status request, error = {result}').format( - result=result.text)) - else: - LOG.info(_LI('AmbariClient: decommission status request ok, ' - 'result = {result}').format(result=result.text)) - json_result = json.loads(result.text) - live_nodes = ( - json_result['metrics']['dfs']['namenode']['LiveNodes']) - # parse out the map of live hosts associated with the NameNode - json_result_nodes = json.loads(live_nodes) - for node, val in six.iteritems(json_result_nodes): - admin_state = val['adminState'] - if admin_state == 'Decommissioned': - LOG.debug('AmbariClient: node = {node} is ' - 'now in adminState = {admin_state}'.format( - node=node, admin_state=admin_state)) - # remove from list, to track which nodes - # are now in Decommissioned state - hosts_to_decommission.remove(node) - return False - - def provision_cluster(self, cluster_spec, servers, ambari_info, name): - self._add_cluster(ambari_info, name) - self._add_configurations_to_cluster(cluster_spec, ambari_info, name) - self._add_services_to_cluster(cluster_spec, ambari_info, name) - self._add_components_to_services(cluster_spec, ambari_info, name) - self._add_hosts_and_components( - cluster_spec, servers, ambari_info, name) - - self._install_services(name, ambari_info) - self.handler.install_swift_integration(servers) - - def cleanup(self, ambari_info): - try: - ambari_info.host.remote().close_http_session(ambari_info.port) - except exc.NotFoundException: - LOG.debug("HTTP session is not cached") - - def _get_services_in_state(self, cluster_name, ambari_info, state): - services_url = ('http://{0}/api/v1/clusters/{1}/services?' - 'ServiceInfo/state.in({2})'.format( - ambari_info.get_address(), cluster_name, state)) - - result = self._get(services_url, ambari_info) - - json_result = json.loads(result.text) - services = [] - for service in json_result['items']: - services.append(service['ServiceInfo']['service_name']) - - return services - - def _fire_service_start_notifications(self, cluster_name, - cluster_spec, ambari_info): - started_services = self._get_services_in_state( - cluster_name, ambari_info, 'STARTED') - - for service in cluster_spec.services: - if service.deployed and service.name not in started_services: - service.pre_service_start(cluster_spec, ambari_info, - started_services) - - def setup_hdfs_ha(self, cluster_spec, servers, ambari_info, name): - - # Get HA cluster map - hac = self._hdfs_ha_cluster_map(cluster_spec, servers, - ambari_info, name) - - # start active namenode in order to format and save namesapce - self._hdfs_ha_update_host_component(hac, hac['nn_active'], - 'NAMENODE', 'STARTED') - - hac['server_active'].set_namenode_safemode(hac['java_home']) - hac['server_active'].save_namenode_namespace(hac['java_home']) - - # shutdown active namenode - self._hdfs_ha_update_host_component(hac, hac['nn_active'], - 'NAMENODE', 'INSTALLED') - - # Install HDFS_CLIENT on namenodes, to be used later for updating - # HDFS configs - if hac['nn_active'] not in hac['hdfsc_hosts']: - self._hdfs_ha_add_host_component(hac, hac['nn_active'], - 'HDFS_CLIENT') - if hac['nn_standby'] not in hac['hdfsc_hosts']: - self._hdfs_ha_add_host_component(hac, hac['nn_standby'], - 'HDFS_CLIENT') - - # start the journal_nodes - for jn in hac['jn_hosts']: - self._hdfs_ha_update_host_component(hac, jn, - 'JOURNALNODE', 'STARTED') - - # disable any secondary namnodes - for snn in hac['snn_hosts']: - self._hdfs_ha_update_host_component(hac, snn, - 'SECONDARY_NAMENODE', - 'DISABLED') - - # get hdfs-site config tag - hdfs_site_tag = self._hdfs_ha_get_config_tag(hac, 'hdfs-site') - - # get hdfs-site config - hdfs_site = self._hdfs_ha_get_config(hac, 'hdfs-site', hdfs_site_tag) - - # update hdfs-site with HDFS HA properties - hdfs_site_ha = self._hdfs_ha_update_hdfs_site(hac, hdfs_site) - - # put new hdfs-site config - self._hdfs_ha_put_config(hac, 'hdfs-site', hac['config_ver'], - hdfs_site_ha) - - # get core-site tag - core_site_tag = self._hdfs_ha_get_config_tag(hac, 'core-site') - - # get core-site config - core_site = self._hdfs_ha_get_config(hac, 'core-site', core_site_tag) - - # update core-site with HDFS HA properties - core_site_ha = self._hdfs_ha_update_core_site(hac, core_site) - - # put new HA core-site config - self._hdfs_ha_put_config(hac, 'core-site', hac['config_ver'], - core_site_ha) - - # update hbase-site if Hbase is installed - if hac['hbase_hosts']: - hbase_site_tag = self._hdfs_ha_get_config_tag(hac, 'hbase-site') - hbase_site = self._hdfs_ha_get_config(hac, 'hbase-site', - hbase_site_tag) - hbase_site_ha = self._hdfs_ha_update_hbase_site(hac, hbase_site) - self._hdfs_ha_put_config(hac, 'hbase-site', hac['config_ver'], - hbase_site_ha) - - # force the deployment of HDFS HA configs on namenodes by re-installing - # hdfs-client - self._hdfs_ha_update_host_component(hac, hac['nn_active'], - 'HDFS_CLIENT', 'INSTALLED') - self._hdfs_ha_update_host_component(hac, hac['nn_standby'], - 'HDFS_CLIENT', 'INSTALLED') - - # initialize shared edits on the active namenode - hac['server_active'].initialize_shared_edits(hac['java_home']) - - # start zookeeper servers - for zk in hac['zk_hosts']: - self._hdfs_ha_update_host_component(hac, zk, - 'ZOOKEEPER_SERVER', 'STARTED') - - # start active namenode - self._hdfs_ha_update_host_component(hac, hac['nn_active'], - 'NAMENODE', 'STARTED') - - # setup active namenode automatic failover - hac['server_active'].format_zookeeper_fc(hac['java_home']) - - # format standby namenode - hac['server_standby'].bootstrap_standby_namenode(hac['java_home']) - - # start namenode process on standby namenode - self._hdfs_ha_update_host_component(hac, hac['nn_standby'], - 'NAMENODE', 'STARTED') - - # add, install and start ZKFC on namenodes for automatic fail-over - for nn in hac['nn_hosts']: - self._hdfs_ha_add_host_component(hac, nn, 'ZKFC') - self._hdfs_ha_update_host_component(hac, nn, 'ZKFC', 'INSTALLED') - self._hdfs_ha_update_host_component(hac, nn, 'ZKFC', 'STARTED') - - # delete any secondary namenodes - for snn in hac['snn_hosts']: - self._hdfs_ha_delete_host_component(hac, snn, 'SECONDARY_NAMENODE') - - # stop journalnodes and namenodes before terminating - # not doing so causes warnings in Ambari for stale config - for jn in hac['jn_hosts']: - self._hdfs_ha_update_host_component(hac, jn, 'JOURNALNODE', - 'INSTALLED') - for nn in hac['nn_hosts']: - self._hdfs_ha_update_host_component(hac, nn, 'NAMENODE', - 'INSTALLED') - - # install httpfs and write temp file if HUE is installed - if hac['hue_host']: - self._hdfs_ha_setup_hue(hac) - - def _hdfs_ha_cluster_map(self, cluster_spec, servers, ambari_info, name): - - hacluster = {} - - hacluster['name'] = name - - hacluster['config_ver'] = 'v2' - - # set JAVA_HOME - global_config = cluster_spec.configurations.get('global', None) - global_config_jh = (global_config.get('java64_home', None) or - global_config.get('java_home', None) if - global_config else None) - hacluster['java_home'] = global_config_jh or '/opt/jdk1.6.0_31' - - # set namnode ports - hacluster['nn_rpc'] = '8020' - hacluster['nn_ui'] = '50070' - - hacluster['ambari_info'] = ambari_info - - # get host lists - hacluster['nn_hosts'] = [x.fqdn().lower() for x in - cluster_spec.determine_component_hosts( - 'NAMENODE')] - hacluster['snn_hosts'] = [x.fqdn().lower() for x in - cluster_spec.determine_component_hosts( - 'SECONDARY_NAMENODE')] - hacluster['jn_hosts'] = [x.fqdn().lower() for x in - cluster_spec.determine_component_hosts( - 'JOURNALNODE')] - hacluster['zk_hosts'] = [x.fqdn().lower() for x in - cluster_spec.determine_component_hosts( - 'ZOOKEEPER_SERVER')] - hacluster['hdfsc_hosts'] = [x.fqdn().lower() for x in - cluster_spec.determine_component_hosts( - 'HDFS_CLIENT')] - hacluster['hbase_hosts'] = [x.fqdn().lower() for x in - cluster_spec.determine_component_hosts( - 'HBASE_MASTER')] - hacluster['hue_host'] = [x.fqdn().lower() for x in - cluster_spec.determine_component_hosts('HUE')] - - # get servers for remote command execution - # consider hacluster['nn_hosts'][0] as active namenode - hacluster['nn_active'] = hacluster['nn_hosts'][0] - hacluster['nn_standby'] = hacluster['nn_hosts'][1] - # get the 2 namenode servers and hue server - for server in servers: - if server.instance.fqdn().lower() == hacluster['nn_active']: - hacluster['server_active'] = server - if server.instance.fqdn().lower() == hacluster['nn_standby']: - hacluster['server_standby'] = server - if hacluster['hue_host']: - if server.instance.fqdn().lower() == hacluster['hue_host'][0]: - hacluster['server_hue'] = server - - return hacluster - - def _hdfs_ha_delete_host_component(self, hac, host, component): - - delete_service_component_url = ('http://{0}/api/v1/clusters/{1}/hosts' - '/{2}/host_components/{3}').format( - hac['ambari_info'].get_address(), - hac['name'], host, component) - - result = self._delete(delete_service_component_url, hac['ambari_info']) - if result.status_code != 200: - LOG.error(_LE('Configuring HDFS HA failed. {result}').format( - result=result.text)) - raise ex.NameNodeHAConfigurationError( - _('Configuring HDFS HA failed. %s') % result.text) - - def _hdfs_ha_add_host_component(self, hac, host, component): - add_host_component_url = ('http://{0}/api/v1/clusters/{1}' - '/hosts/{2}/host_components/{3}').format( - hac['ambari_info'].get_address(), - hac['name'], host, component) - - result = self._post(add_host_component_url, hac['ambari_info']) - if result.status_code != 201: - LOG.error(_LE('Configuring HDFS HA failed. {result}').format( - result=result.text)) - raise ex.NameNodeHAConfigurationError( - _('Configuring HDFS HA failed. %s') % result.text) - - def _hdfs_ha_update_host_component(self, hac, host, component, state): - - update_host_component_url = ('http://{0}/api/v1/clusters/{1}' - '/hosts/{2}/host_components/{3}').format( - hac['ambari_info'].get_address(), - hac['name'], host, component) - component_state = {"HostRoles": {"state": state}} - body = json.dumps(component_state) - - result = self._put(update_host_component_url, - hac['ambari_info'], data=body) - - if result.status_code == 202: - json_result = json.loads(result.text) - request_id = json_result['Requests']['id'] - success = self._wait_for_async_request(self._get_async_request_uri( - hac['ambari_info'], hac['name'], request_id), - hac['ambari_info']) - if success: - LOG.info(_LI("HDFS-HA: Host component updated successfully: " - "{host} {component}").format(host=host, - component=component)) - else: - LOG.error(_LE("HDFS-HA: Host component update failed: " - "{host} {component}").format( - host=host, component=component)) - raise ex.NameNodeHAConfigurationError( - _('Configuring HDFS HA failed. %s') % result.text) - elif result.status_code != 200: - LOG.error( - _LE('Configuring HDFS HA failed. {result}').format( - result=result.text)) - raise ex.NameNodeHAConfigurationError( - _('Configuring HDFS HA failed. %s') % result.text) - - def _hdfs_ha_get_config_tag(self, hac, config_name): - - config_url = ('http://{0}/api/v1/clusters/{1}' - '/configurations?type={2}').format( - hac['ambari_info'].get_address(), hac['name'], - config_name) - - result = self._get(config_url, hac['ambari_info']) - if result.status_code == 200: - json_result = json.loads(result.text) - items = json_result['items'] - return items[0]['tag'] - else: - LOG.error( - _LE('Configuring HDFS HA failed. {result}').format( - result=result.text)) - raise ex.NameNodeHAConfigurationError( - _('Configuring HDFS HA failed. %s') % result.text) - - def _hdfs_ha_get_config(self, hac, config_name, tag): - - config_url = ('http://{0}/api/v1/clusters/{1}' - '/configurations?type={2}&tag={3}').format( - hac['ambari_info'].get_address(), hac['name'], - config_name, tag) - - result = self._get(config_url, hac['ambari_info']) - if result.status_code == 200: - json_result = json.loads(result.text) - items = json_result['items'] - return items[0]['properties'] - else: - LOG.error( - _LE('Configuring HDFS HA failed. {result}').format( - result=result.text)) - raise ex.NameNodeHAConfigurationError( - _('Configuring HDFS HA failed. %s') % result.text) - - def _hdfs_ha_put_config(self, hac, config_name, tag, properties): - - config_url = ('http://{0}/api/v1/clusters/{1}').format( - hac['ambari_info'].get_address(), hac['name']) - - body = {} - clusters = {} - body['Clusters'] = clusters - body['Clusters']['desired_config'] = {} - body['Clusters']['desired_config']['type'] = config_name - body['Clusters']['desired_config']['tag'] = tag - body['Clusters']['desired_config']['properties'] = properties - - LOG.debug("body: {body}".format(body=body)) - - result = self._put(config_url, hac['ambari_info'], - data=json.dumps(body)) - if result.status_code != 200: - LOG.error( - _LE('Configuring HDFS HA failed. {result}').format( - result=result.text)) - raise ex.NameNodeHAConfigurationError( - _('Configuring HDFS HA failed. %s') % result.text) - - def _hdfs_ha_update_hdfs_site(self, hac, hdfs_site): - - hdfs_site['dfs.nameservices'] = hac['name'] - - hdfs_site['dfs.ha.namenodes.{0}'.format( - hac['name'])] = hac['nn_active'] + ',' + hac['nn_standby'] - - hdfs_site['dfs.namenode.rpc-address.{0}.{1}'.format( - hac['name'], hac['nn_active'])] = '{0}:{1}'.format( - hac['nn_active'], hac['nn_rpc']) - hdfs_site['dfs.namenode.rpc-address.{0}.{1}'.format( - hac['name'], hac['nn_standby'])] = '{0}:{1}'.format( - hac['nn_standby'], hac['nn_rpc']) - hdfs_site['dfs.namenode.http-address.{0}.{1}'.format( - hac['name'], hac['nn_active'])] = '{0}:{1}'.format( - hac['nn_active'], hac['nn_ui']) - hdfs_site['dfs.namenode.http-address.{0}.{1}'.format( - hac['name'], hac['nn_standby'])] = '{0}:{1}'.format( - hac['nn_standby'], hac['nn_ui']) - - qjournal = ';'.join([x+':8485' for x in hac['jn_hosts']]) - hdfs_site['dfs.namenode.shared.edits.dir'] = ('qjournal://{0}/{1}'. - format(qjournal, - hac['name'])) - - hdfs_site['dfs.client.failover.proxy.provider.{0}'.format( - hac['name'])] = ("org.apache.hadoop.hdfs.server.namenode.ha." - "ConfiguredFailoverProxyProvider") - - hdfs_site['dfs.ha.fencing.methods'] = 'shell(/bin/true)' - - hdfs_site['dfs.ha.automatic-failover.enabled'] = 'true' - - return hdfs_site - - def _hdfs_ha_update_core_site(self, hac, core_site): - - core_site['fs.defaultFS'] = 'hdfs://{0}'.format(hac['name']) - core_site['ha.zookeeper.quorum'] = '{0}'.format( - ','.join([x+':2181' for x in hac['zk_hosts']])) - - # if HUE is installed add some httpfs configs - if hac['hue_host']: - core_site['hadoop.proxyuser.httpfs.groups'] = '*' - core_site['hadoop.proxyuser.httpfs.hosts'] = '*' - - return core_site - - def _hdfs_ha_update_hbase_site(self, hac, hbase_site): - - hbase_site['hbase.rootdir'] = 'hdfs://{0}/apps/hbase/data'.format( - hac['name']) - return hbase_site - - def _hdfs_ha_setup_hue(self, hac): - - hac['server_hue'].install_httpfs() - - # write a temp file and - # use it when starting HUE with HDFS HA enabled - hac['server_hue'].write_hue_temp_file('/tmp/hueini-hdfsha', - hac['name']) diff --git a/sahara/plugins/hdp/versions/versionhandlerfactory.py b/sahara/plugins/hdp/versions/versionhandlerfactory.py deleted file mode 100644 index 21f0f519e1..0000000000 --- a/sahara/plugins/hdp/versions/versionhandlerfactory.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2013 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from sahara.utils import general - - -class VersionHandlerFactory(object): - versions = None - modules = None - initialized = False - - @staticmethod - def get_instance(): - if not VersionHandlerFactory.initialized: - src_dir = os.path.join(os.path.dirname(__file__), '') - versions = [name[8:].replace('_', '.') - for name in os.listdir(src_dir) - if os.path.isdir(os.path.join(src_dir, name)) - and name.startswith('version_')] - versions.sort(key=general.natural_sort_key) - VersionHandlerFactory.versions = versions - - VersionHandlerFactory.modules = {} - for version in VersionHandlerFactory.versions: - module_name = ('sahara.plugins.hdp.versions.version_{0}.' - 'versionhandler'.format( - version.replace('.', '_'))) - module_class = getattr( - __import__(module_name, fromlist=['sahara']), - 'VersionHandler') - module = module_class() - # would prefer to use __init__ or some constructor, but keep - # getting exceptions... - module._set_version(version) - VersionHandlerFactory.modules[version] = module - - VersionHandlerFactory.initialized = True - - return VersionHandlerFactory() - - def get_versions(self): - return VersionHandlerFactory.versions - - def get_version_handler(self, version): - return VersionHandlerFactory.modules[version] diff --git a/sahara/tests/unit/plugins/hdp/__init__.py b/sahara/tests/unit/plugins/hdp/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sahara/tests/unit/plugins/hdp/hdp_test_base.py b/sahara/tests/unit/plugins/hdp/hdp_test_base.py deleted file mode 100644 index aabc5e0f71..0000000000 --- a/sahara/tests/unit/plugins/hdp/hdp_test_base.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) 2013 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pkg_resources as pkg - -from sahara.plugins.hdp import clusterspec as cs -from sahara import version - - -class TestServer(object): - def __init__(self, hostname, role, img, flavor, public_ip, private_ip): - self.inst_fqdn = hostname - self.role = role - self.instance_info = InstanceInfo( - hostname, img, flavor, public_ip, private_ip) - self.management_ip = public_ip - self.public_ip = public_ip - self.internal_ip = private_ip - self.node_group = None - self.sahara_instance = self - self.storage_path = ['/mnt'] - - def storage_paths(self): - return self.storage_path - - def fqdn(self): - return self.inst_fqdn - - def remote(self): - return None - - -def get_instance_info(*args, **kwargs): - return args[0].instance_info - - -def create_clusterspec(hdp_version='2.0.6'): - version_suffix = hdp_version.replace('.', '_') - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_{0}/resources/' - 'default-cluster.template'.format(version_suffix)) - - return cs.ClusterSpec(cluster_config_file, version=hdp_version) - - -class InstanceInfo(object): - def __init__(self, hostname, image, flavor, management_ip, internal_ip): - self.image = image - self.flavor = flavor - self.management_ip = management_ip - self.internal_ip = internal_ip - - -class TestCluster(object): - def __init__(self, node_groups, cluster_configs=None): - self.plugin_name = 'hdp' - self.hadoop_version = None - if cluster_configs: - self.cluster_configs = cluster_configs - else: - self.cluster_configs = {} - self.node_groups = node_groups - self.default_image_id = '11111' - - -class TestNodeGroup(object): - def __init__(self, name, instances, node_processes, count=1): - self.name = name - self.instances = instances - if instances: - for i in instances: - i.node_group = self - self.node_processes = node_processes - self.count = count - self.id = name - - -class TestUserInputConfig(object): - def __init__(self, tag, target, name): - self.tag = tag - self.applicable_target = target - self.name = name - - -class TestRequest(object): - def put(self, url, data=None, auth=None, headers=None): - self.url = url - self.data = data - self.auth = auth - self.headers = headers - self.method = 'put' - - return TestResult(200) - - def post(self, url, data=None, auth=None, headers=None): - self.url = url - self.data = data - self.auth = auth - self.headers = headers - self.method = 'post' - - return TestResult(201) - - def delete(self, url, auth=None, headers=None): - self.url = url - self.auth = auth - self.data = None - self.headers = headers - self.method = 'delete' - - return TestResult(200) - - -class TestResult(object): - def __init__(self, status): - self.status_code = status - self.text = '' - - -class TestUserInput(object): - def __init__(self, config, value): - self.config = config - self.value = value diff --git a/sahara/tests/unit/plugins/hdp/resources/config-resource.json b/sahara/tests/unit/plugins/hdp/resources/config-resource.json deleted file mode 100644 index a5be3bec64..0000000000 --- a/sahara/tests/unit/plugins/hdp/resources/config-resource.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "configurations": [ - { - "file": "core-site", - "properties": [ - { - "name": "fs.trash.interval", - "default_value": "360", - "config_type": "integer", - "is_optional": true, - "description": "...", - "applicable_target": "service:hdfs", - "scope" : "node" - }, - { - "name": "fs.checkpoint.size", - "default_value": "536870912", - "config_type": "integer", - "is_optional": true, - "description": "...", - "applicable_target": "service:hdfs", - "scope" : "node" - - } - ] - }, - { - "file": "global", - "properties": [ - { - "name": "dfs_name_dir", - "default_value": "/hadoop/hdfs/namenode", - "config_type": "string", - "is_optional": true, - "description": "...", - "applicable_target": "service:hdfs", - "scope" : "cluster" - } - ] - } - ] -} \ No newline at end of file diff --git a/sahara/tests/unit/plugins/hdp/resources/sample-ambari-blueprint.json b/sahara/tests/unit/plugins/hdp/resources/sample-ambari-blueprint.json deleted file mode 100644 index 53374f2c84..0000000000 --- a/sahara/tests/unit/plugins/hdp/resources/sample-ambari-blueprint.json +++ /dev/null @@ -1,195 +0,0 @@ -{ - "name": "HDP", - "version": "1.3.2", - "author": "Hortonworks", - "created": "03-31-2013", - "reference": "Hortonworks-linux", - "packages": { - "type": "rpm", - "repos": [ - { - "name": "HDP", - "repoLocations": [ - { - "target": "centos6", - "uri": "http://public-repo-1.hortonworks.com/ambari/centos6/1.x/GA/" - }, - { - "target": "suse11", - "uri": "http://public-repo-1.hortonworks.com/ambari/suse11/1.x/GA/" - } - ] - } - ] - }, - "services": [ - { - "name": "HDFS", - "components": [ - { - "name": "NAMENODE", - "type": "MASTER", - "cardinality": "1", - "hostRequirements": [ - { - "name": "python", - "value": "exists" - }, - { - "name": "jdk-1.6", - "value": "exists" - } - ], - "deployedPackages": { - "type": "rpm", - "deploymentContext": [ - { - "name": "customName", - "value": "customValue" - } - ] - } - }, - { - "name": "DATANODE", - "type": "SLAVE", - "cardinality": "1+", - "hostRequirements": { - "python": "exists", - "jdk-1.6": "exists" - }, - "deployedPackages": { - "type": "rpm" - } - } - ], - "configurations": [ - { - "name": "core-site", - "properties": [ - { - "name": "fs.trash.interval", - "value": "360" - }, - { - "name": "fs.checkpoint.size", - "value": "536870912" - } - ] - }, - { - "name": "global", - "properties": [ - { - "name": "dfs_name_dir", - "value": "/hadoop/hdfs/namenode" - } - ] - } - ] - }, - { - "name": "MAPREDUCE", - "components": [ - { - "name": "JOBTRACKER", - "type": "MASTER", - "cardinality": "1", - "hostRequirements": { - "jdk-1.6": "exists" - }, - "deployedPackages": { - "type": "rpm" - } - }, - { - "name": "MAPREDUCE_CLIENT", - "type": "CLIENT", - "cardinality": "0+" - } - ], - "configurations": [ - { - "name": "global", - "properties": [ - { - "name": "jobtracker_host", - "value": "localhost" - } - ] - } - ] - }, - { - "name" : "AMBARI", - "components" : [ - { - "name" : "AMBARI_SERVER", - "type" : "MASTER", - "cardinality" : "1" - }, - { - "name" : "AMBARI_AGENT", - "type" : "SLAVE", - "cardinality" : "1+" - } - ], - "configurations" : [ - ], - "users" : [ - { - "name" : "admin", - "password" : "admin", - "groups" : [ - "admin", - "user" - ] - } - ] - } - ], - "host_role_mappings" : [ - { - "name" : "MASTER", - "components" : [ - { "name" : "NAMENODE" }, - { "name" : "JOBTRACKER" }, - { "name" : "SECONDARY_NAMENODE" }, - { "name" : "GANGLIA_SERVER" }, - { "name" : "GANGLIA_MONITOR" }, - { "name" : "NAGIOS_SERVER" }, - { "name" : "AMBARI_SERVER" }, - { "name" : "AMBARI_AGENT" } - ], - "hosts" : [ - { - "cardinality" : "1" - } - ] - }, - { - "name" : "SLAVE", - "components" : [ - { "name" : "DATANODE" }, - { "name" : "TASKTRACKER" }, - { "name" : "GANGLIA_MONITOR" }, - { "name" : "HDFS_CLIENT" }, - { "name" : "MAPREDUCE_CLIENT" }, - { "name" : "AMBARI_AGENT" } - ], - "hosts" : [ - { - "cardinality" : "1+" - } - ] - } - ], - "configurations" : [ - { - "name" : "global", - "properties" : [ - { "name" : "dfs_name_dir", "value" : "/hadoop/hdfs/namenode" } - ] - } - ] -} \ No newline at end of file diff --git a/sahara/tests/unit/plugins/hdp/test_ambariplugin.py b/sahara/tests/unit/plugins/hdp/test_ambariplugin.py deleted file mode 100644 index 884f48bb8a..0000000000 --- a/sahara/tests/unit/plugins/hdp/test_ambariplugin.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright (c) 2013 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -import pkg_resources as pkg -import testtools - -from sahara.conductor import resource as r -from sahara.plugins import base as pb -from sahara.plugins import exceptions as ex -from sahara.plugins.hdp import ambariplugin as ap -from sahara.plugins.hdp import clusterspec as cs -from sahara.tests.unit import base as sahara_base -import sahara.tests.unit.plugins.hdp.hdp_test_base as base -from sahara.utils import edp -from sahara import version - - -GET_REST_REQ = ("sahara.plugins.hdp.versions.version_2_0_6.versionhandler." - "AmbariClient._get_http_session") - - -def create_cluster_template(ctx, dct): - return r.ClusterTemplateResource(dct) - - -class AmbariPluginTest(sahara_base.SaharaTestCase): - def setUp(self): - super(AmbariPluginTest, self).setUp() - pb.setup_plugins() - - def test_get_node_processes(self): - plugin = ap.AmbariPlugin() - service_components = plugin.get_node_processes('2.0.6') - self.assertEqual({ - 'YARN': ['RESOURCEMANAGER', 'YARN_CLIENT', 'NODEMANAGER'], - 'GANGLIA': ['GANGLIA_SERVER'], - 'HUE': ['HUE'], - 'HIVE': ['HIVE_SERVER', 'HIVE_METASTORE', 'HIVE_CLIENT', - 'MYSQL_SERVER'], - 'OOZIE': ['OOZIE_SERVER', 'OOZIE_CLIENT'], - 'HDFS': ['NAMENODE', 'DATANODE', 'SECONDARY_NAMENODE', - 'HDFS_CLIENT', 'JOURNALNODE', 'ZKFC'], - 'SQOOP': ['SQOOP'], - 'MAPREDUCE2': ['HISTORYSERVER', 'MAPREDUCE2_CLIENT'], - 'ZOOKEEPER': ['ZOOKEEPER_SERVER', 'ZOOKEEPER_CLIENT'], - 'HBASE': ['HBASE_MASTER', 'HBASE_REGIONSERVER', 'HBASE_CLIENT'], - 'HCATALOG': ['HCAT'], - 'NAGIOS': ['NAGIOS_SERVER'], - 'AMBARI': ['AMBARI_SERVER'], - 'WEBHCAT': ['WEBHCAT_SERVER'], - 'PIG': ['PIG']}, service_components) - - def test_convert(self): - plugin = ap.AmbariPlugin() - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - cluster = plugin.convert(cluster_config_file, 'ambari', '2.0.6', - 'test-plugin', create_cluster_template) - normalized_config = cs.ClusterSpec(cluster_config_file).normalize() - - self.assertEqual(normalized_config.hadoop_version, - cluster.hadoop_version) - self.assertEqual(len(normalized_config.node_groups), - len(cluster.node_groups)) - - @mock.patch(GET_REST_REQ) - def test__set_ambari_credentials__admin_only(self, client): - client.side_effect = self._get_test_request - self.requests = [] - plugin = ap.AmbariPlugin() - - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - cluster_spec = cs.ClusterSpec(cluster_config_file) - - ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), - '8080', 'admin', 'old-pwd') - plugin._set_ambari_credentials(cluster_spec, ambari_info, '2.0.6') - - self.assertEqual(1, len(self.requests)) - request = self.requests[0] - self.assertEqual('put', request.method) - self.assertEqual('http://111.11.1111:8080/api/v1/users/admin', - request.url) - self.assertEqual('{"Users":{"roles":"admin","password":"admin",' - '"old_password":"old-pwd"} }', request.data) - self.assertEqual(('admin', 'old-pwd'), request.auth) - self.assertEqual('admin', ambari_info.user) - self.assertEqual('admin', ambari_info.password) - - @mock.patch(GET_REST_REQ) - def test__set_ambari_credentials__new_user_no_admin(self, client): - self.requests = [] - plugin = ap.AmbariPlugin() - client.side_effect = self._get_test_request - - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - cluster_spec = cs.ClusterSpec(cluster_config_file) - - for service in cluster_spec.services: - if service.name == 'AMBARI': - user = service.users[0] - user.name = 'test' - user.password = 'test_pw' - - ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), '8080', - 'admin', 'old-pwd') - plugin._set_ambari_credentials(cluster_spec, ambari_info, '2.0.6') - self.assertEqual(2, len(self.requests)) - - request = self.requests[0] - self.assertEqual('post', request.method) - self.assertEqual('http://111.11.1111:8080/api/v1/users/test', - request.url) - self.assertEqual('{"Users":{"password":"test_pw","roles":"admin"' - '} }', request.data) - self.assertEqual(('admin', 'old-pwd'), request.auth) - - request = self.requests[1] - self.assertEqual('delete', request.method) - self.assertEqual('http://111.11.1111:8080/api/v1/users/admin', - request.url) - self.assertIsNone(request.data) - self.assertEqual(('test', 'test_pw'), request.auth) - self.assertEqual('test', ambari_info.user) - self.assertEqual('test_pw', ambari_info.password) - - @mock.patch(GET_REST_REQ) - def test__set_ambari_credentials__new_user_with_admin(self, client): - self.requests = [] - plugin = ap.AmbariPlugin() - client.side_effect = self._get_test_request - - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - cluster_spec = cs.ClusterSpec(cluster_config_file) - - for service in cluster_spec.services: - if service.name == 'AMBARI': - new_user = cs.User('test', 'test_pw', ['user']) - service.users.append(new_user) - - ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), '8080', - 'admin', 'old-pwd') - plugin._set_ambari_credentials(cluster_spec, ambari_info, '2.0.6') - self.assertEqual(2, len(self.requests)) - - request = self.requests[0] - self.assertEqual('put', request.method) - self.assertEqual('http://111.11.1111:8080/api/v1/users/admin', - request.url) - self.assertEqual('{"Users":{"roles":"admin","password":"admin",' - '"old_password":"old-pwd"} }', request.data) - self.assertEqual(('admin', 'old-pwd'), request.auth) - - request = self.requests[1] - self.assertEqual('post', request.method) - self.assertEqual('http://111.11.1111:8080/api/v1/users/test', - request.url) - self.assertEqual('{"Users":{"password":"test_pw","roles":"user"} }', - request.data) - self.assertEqual(('admin', 'admin'), request.auth) - - self.assertEqual('admin', ambari_info.user) - self.assertEqual('admin', ambari_info.password) - - @mock.patch(GET_REST_REQ) - @testtools.skip("test failure because of #1325108") - def test__set_ambari_credentials__no_admin_user(self, client): - self.requests = [] - plugin = ap.AmbariPlugin() - client.side_effect = self._get_test_request - - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - cluster_spec = cs.ClusterSpec(cluster_config_file) - - for service in cluster_spec.services: - if service.name == 'AMBARI': - user = service.users[0] - user.name = 'test' - user.password = 'test_pw' - user.groups = ['user'] - - ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), - '8080', 'admin', 'old-pwd') - - self.assertRaises(ex.HadoopProvisionError, - plugin._set_ambari_credentials, - cluster_spec, ambari_info, '2.0.6') - - @mock.patch("sahara.utils.openstack.nova.get_instance_info", - base.get_instance_info) - @mock.patch('sahara.plugins.hdp.versions.version_2_0_6.services.' - 'HdfsService._get_swift_properties', return_value=[]) - def test__get_ambari_info(self, patched): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - test_host = base.TestServer( - 'host1', 'test-master', '11111', 3, '111.11.1111', - '222.11.1111') - - node_group = base.TestNodeGroup( - 'ng1', [test_host], ["AMBARI_SERVER", "NAMENODE", "DATANODE", - 'RESOURCEMANAGER', 'YARN_CLIENT', - 'NODEMANAGER', - 'HISTORYSERVER', 'MAPREDUCE2_CLIENT', - 'ZOOKEEPER_SERVER', 'ZOOKEEPER_CLIENT']) - cluster = base.TestCluster([node_group]) - cluster_config = cs.ClusterSpec(cluster_config_file) - cluster_config.create_operational_config(cluster, []) - plugin = ap.AmbariPlugin() - - # change port - cluster_config.configurations['ambari']['server.port'] = '9000' - - ambari_info = plugin.get_ambari_info(cluster_config) - self.assertEqual('9000', ambari_info.port) - - # remove port - del cluster_config.configurations['ambari']['server.port'] - ambari_info = plugin.get_ambari_info(cluster_config) - - self.assertEqual('8080', ambari_info.port) - - def test_update_ambari_info_credentials(self): - plugin = ap.AmbariPlugin() - - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - cluster_spec = cs.ClusterSpec(cluster_config_file) - - ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), - '8080', 'admin', 'old-pwd') - plugin._update_ambari_info_credentials(cluster_spec, ambari_info) - - self.assertEqual('admin', ambari_info.user) - self.assertEqual('admin', ambari_info.password) - - def test_get_oozie_server(self): - test_host = base.TestServer( - 'host1', 'test-master', '11111', 3, '111.11.1111', - '222.11.1111') - - node_group = base.TestNodeGroup( - 'ng1', [test_host], ["AMBARI_SERVER", "NAMENODE", "DATANODE", - "OOZIE_SERVER"]) - cluster = base.TestCluster([node_group]) - cluster.hadoop_version = '2.0.6' - plugin = ap.AmbariPlugin() - - self.assertIsNotNone(plugin.get_edp_engine( - cluster, edp.JOB_TYPE_PIG).get_oozie_server(cluster)) - - node_group = base.TestNodeGroup( - 'ng1', [test_host], ["AMBARI_SERVER", "NAMENODE", "DATANODE", - "NOT_OOZIE"]) - cluster = base.TestCluster([node_group]) - cluster.hadoop_version = '2.0.6' - self.assertIsNone(plugin.get_edp_engine( - cluster, edp.JOB_TYPE_PIG).get_oozie_server(cluster)) - - @mock.patch('sahara.service.edp.hdfs_helper.create_dir_hadoop2') - def test_edp206_calls_hadoop2_create_dir(self, create_dir): - cluster = base.TestCluster([]) - cluster.plugin_name = 'hdp' - cluster.hadoop_version = '2.0.6' - plugin = ap.AmbariPlugin() - plugin.get_edp_engine(cluster, edp.JOB_TYPE_PIG).create_hdfs_dir( - mock.Mock(), '/tmp') - - self.assertEqual(1, create_dir.call_count) - - def _get_test_request(self, host, port): - request = base.TestRequest() - self.requests.append(request) - return request - - -class TestHost(object): - def __init__(self, management_ip, role=None): - self.management_ip = management_ip - self.role = role diff --git a/sahara/tests/unit/plugins/hdp/test_clusterspec_hdp2.py b/sahara/tests/unit/plugins/hdp/test_clusterspec_hdp2.py deleted file mode 100644 index 26f6bc0eff..0000000000 --- a/sahara/tests/unit/plugins/hdp/test_clusterspec_hdp2.py +++ /dev/null @@ -1,2035 +0,0 @@ -# Copyright (c) 2013 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -import pkg_resources as pkg - -from sahara.conductor import resource as rsc -from sahara.plugins import exceptions as ex -from sahara.plugins.hdp import clusterspec as cs -from sahara.plugins.hdp.versions.version_2_0_6 import services as s2 -from sahara.plugins import provisioning -from sahara.tests.unit import base as sahara_base -import sahara.tests.unit.plugins.hdp.hdp_test_base as base -from sahara.topology import topology_helper as th -from sahara import version - - -class TestCONF(object): - def __init__(self, enable_data_locality, enable_hypervisor_awareness): - self.enable_data_locality = enable_data_locality - self.enable_hypervisor_awareness = enable_hypervisor_awareness - - -@mock.patch("sahara.utils.openstack.nova.get_instance_info", - base.get_instance_info) -@mock.patch('sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.' - '_get_swift_properties', - return_value=[]) -class ClusterSpecTestForHDP2(sahara_base.SaharaTestCase): - service_validators = {} - - def setUp(self): - super(ClusterSpecTestForHDP2, self).setUp() - self.service_validators['YARN'] = self._assert_yarn - self.service_validators['HDFS'] = self._assert_hdfs - self.service_validators['MAPREDUCE2'] = self._assert_mrv2 - self.service_validators['GANGLIA'] = self._assert_ganglia - self.service_validators['NAGIOS'] = self._assert_nagios - self.service_validators['AMBARI'] = self._assert_ambari - self.service_validators['PIG'] = self._assert_pig - self.service_validators['HIVE'] = self._assert_hive - self.service_validators['HCATALOG'] = self._assert_hcatalog - self.service_validators['ZOOKEEPER'] = self._assert_zookeeper - self.service_validators['WEBHCAT'] = self._assert_webhcat - self.service_validators['OOZIE'] = self._assert_oozie - self.service_validators['SQOOP'] = self._assert_sqoop - self.service_validators['HBASE'] = self._assert_hbase - self.service_validators['HUE'] = self._assert_hue - - def test_parse_default_with_cluster(self, patched): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - server1 = base.TestServer('host1', 'test-master', '11111', 3, - '111.11.1111', '222.11.1111') - server2 = base.TestServer('host2', 'test-slave', '11111', 3, - '222.22.2222', '333.22.2222') - - node_group1 = TestNodeGroup( - 'master', [server1], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "SECONDARY_NAMENODE", - "GANGLIA_SERVER", "GANGLIA_MONITOR", - "NAGIOS_SERVER", "AMBARI_SERVER", - "AMBARI_AGENT", "ZOOKEEPER_SERVER"]) - node_group2 = TestNodeGroup('slave', [server2], ['NODEMANAGER', - 'DATANODE']) - cluster = base.TestCluster([node_group1, node_group2]) - - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - cluster_config.create_operational_config(cluster, []) - - self._assert_services(cluster_config.services) - self._assert_configurations(cluster_config.configurations) - - node_groups = cluster_config.node_groups - self.assertEqual(2, len(node_groups)) - self.assertIn('master', node_groups) - self.assertIn('slave', node_groups) - - master_node_group = node_groups['master'] - self.assertEqual('master', master_node_group.name) - self.assertEqual(13, len(master_node_group.components)) - self.assertIn('NAMENODE', master_node_group.components) - self.assertIn('RESOURCEMANAGER', master_node_group.components) - self.assertIn('HISTORYSERVER', master_node_group.components) - self.assertIn('SECONDARY_NAMENODE', master_node_group.components) - self.assertIn('GANGLIA_SERVER', master_node_group.components) - self.assertIn('GANGLIA_MONITOR', master_node_group.components) - self.assertIn('NAGIOS_SERVER', master_node_group.components) - self.assertIn('AMBARI_SERVER', master_node_group.components) - self.assertIn('AMBARI_AGENT', master_node_group.components) - self.assertIn('YARN_CLIENT', master_node_group.components) - self.assertIn('ZOOKEEPER_SERVER', master_node_group.components) - - slave_node_group = node_groups['slave'] - self.assertEqual('slave', slave_node_group.name) - self.assertIn('NODEMANAGER', slave_node_group.components) - - return cluster_config - - def test_determine_component_hosts(self, patched): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - server1 = base.TestServer('ambari_machine', 'master', '11111', 3, - '111.11.1111', '222.11.1111') - server2 = base.TestServer('host2', 'slave', '11111', 3, '222.22.2222', - '333.22.2222') - server3 = base.TestServer('host3', 'slave', '11111', 3, '222.22.2223', - '333.22.2223') - - node_group1 = TestNodeGroup( - 'master', [server1], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "SECONDARY_NAMENODE", - "GANGLIA_SERVER", "NAGIOS_SERVER", - "AMBARI_SERVER", "ZOOKEEPER_SERVER"]) - node_group2 = TestNodeGroup( - 'slave', [server2], ["DATANODE", "NODEMANAGER", - "HDFS_CLIENT", "MAPREDUCE2_CLIENT"]) - - node_group3 = TestNodeGroup( - 'slave2', [server3], ["DATANODE", "NODEMANAGER", - "HDFS_CLIENT", "MAPREDUCE2_CLIENT"]) - - cluster = base.TestCluster([node_group1, node_group2, node_group3]) - - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - cluster_config.create_operational_config(cluster, []) - - hosts = cluster_config.determine_component_hosts('AMBARI_SERVER') - self.assertEqual(1, len(hosts)) - self.assertEqual('ambari_machine', hosts.pop().fqdn()) - - hosts = cluster_config.determine_component_hosts('DATANODE') - self.assertEqual(2, len(hosts)) - datanodes = set([server2.fqdn(), server3.fqdn()]) - host_fqdn = set([hosts.pop().fqdn(), hosts.pop().fqdn()]) - # test intersection is both servers - self.assertEqual(datanodes, host_fqdn & datanodes) - - def test_finalize_configuration(self, patched): - patched.return_value = [{'name': 'swift.prop1', - 'value': 'swift_prop_value'}, - {'name': 'swift.prop2', - 'value': 'swift_prop_value2'}] - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - master_host = base.TestServer( - 'master.novalocal', 'master', '11111', 3, - '111.11.1111', '222.11.1111') - - jt_host = base.TestServer( - 'jt_host.novalocal', 'jt', '11111', 3, - '111.11.2222', '222.11.2222') - - nn_host = base.TestServer( - 'nn_host.novalocal', 'nn', '11111', 3, - '111.11.3333', '222.11.3333') - - snn_host = base.TestServer( - 'snn_host.novalocal', 'jt', '11111', 3, - '111.11.4444', '222.11.4444') - - hive_host = base.TestServer( - 'hive_host.novalocal', 'hive', '11111', 3, - '111.11.5555', '222.11.5555') - - hive_ms_host = base.TestServer( - 'hive_ms_host.novalocal', 'hive_ms', '11111', 3, - '111.11.6666', '222.11.6666') - - hive_mysql_host = base.TestServer( - 'hive_mysql_host.novalocal', 'hive_mysql', '11111', 3, - '111.11.7777', '222.11.7777') - - hcat_host = base.TestServer( - 'hcat_host.novalocal', 'hcat', '11111', 3, - '111.11.8888', '222.11.8888') - - zk1_host = base.TestServer( - 'zk1_host.novalocal', 'zk1', '11111', 3, - '111.11.9999', '222.11.9999') - - zk2_host = base.TestServer( - 'zk2_host.novalocal', 'zk2', '11112', 3, - '111.11.9990', '222.11.9990') - - oozie_host = base.TestServer( - 'oozie_host.novalocal', 'oozie', '11111', 3, - '111.11.9999', '222.11.9999') - - slave_host = base.TestServer( - 'slave1.novalocal', 'slave', '11111', 3, - '222.22.6666', '333.22.6666') - - master_ng = TestNodeGroup( - 'master', [master_host], ["GANGLIA_SERVER", - "GANGLIA_MONITOR", - "NAGIOIS_SERVER", - "AMBARI_SERVER", - "AMBARI_AGENT"]) - - jt_ng = TestNodeGroup( - 'jt', [jt_host], ["RESOURCEMANAGER", "GANGLIA_MONITOR", - "HISTORYSERVER", "AMBARI_AGENT"]) - - nn_ng = TestNodeGroup( - 'nn', [nn_host], ["NAMENODE", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - snn_ng = TestNodeGroup( - 'snn', [snn_host], ["SECONDARY_NAMENODE", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - hive_ng = TestNodeGroup( - 'hive', [hive_host], ["HIVE_SERVER", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - hive_ms_ng = TestNodeGroup( - 'meta', [hive_ms_host], ["HIVE_METASTORE", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - hive_mysql_ng = TestNodeGroup( - 'mysql', [hive_mysql_host], ["MYSQL_SERVER", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - hcat_ng = TestNodeGroup( - 'hcat', [hcat_host], ["WEBHCAT_SERVER", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - zk1_ng = TestNodeGroup( - 'zk1', [zk1_host], ["ZOOKEEPER_SERVER", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - zk2_ng = TestNodeGroup( - 'zk2', [zk2_host], ["ZOOKEEPER_SERVER", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - oozie_ng = TestNodeGroup( - 'oozie', [oozie_host], ["OOZIE_SERVER", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - slave_ng = TestNodeGroup( - 'slave', [slave_host], ["DATANODE", "NODEMANAGER", - "GANGLIA_MONITOR", "HDFS_CLIENT", - "MAPREDUCE2_CLIENT", "OOZIE_CLIENT", - "AMBARI_AGENT"]) - - user_input_config = TestUserInputConfig( - 'core-site', 'cluster', 'fs.defaultFS') - user_input = provisioning.UserInput( - user_input_config, 'hdfs://nn_dif_host.novalocal:8020') - - cluster = base.TestCluster([master_ng, jt_ng, nn_ng, snn_ng, hive_ng, - hive_ms_ng, hive_mysql_ng, - hcat_ng, zk1_ng, zk2_ng, oozie_ng, - slave_ng]) - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - cluster_config.create_operational_config(cluster, [user_input]) - config = cluster_config.configurations - - # for this value, validating that user inputs override configured - # values, whether they are processed by runtime or not - self.assertEqual('hdfs://nn_dif_host.novalocal:8020', - config['core-site']['fs.defaultFS']) - - self.assertEqual('jt_host.novalocal:19888', - config['mapred-site'] - ['mapreduce.jobhistory.webapp.address']) - - self.assertEqual('nn_host.novalocal:50070', - config['hdfs-site']['dfs.namenode.http-address']) - self.assertEqual('snn_host.novalocal:50090', - config['hdfs-site'] - ['dfs.namenode.secondary.http-address']) - self.assertEqual('nn_host.novalocal:50470', - config['hdfs-site']['dfs.namenode.https-address']) - - self.assertEqual('hive_host.novalocal', - config['global']['hive_hostname']) - self.assertEqual('hive_host.novalocal', - config['core-site']['hadoop.proxyuser.hive.hosts']) - self.assertEqual('jdbc:mysql://hive_mysql_host.novalocal/hive?' - 'createDatabaseIfNotExist=true', - config['hive-site'] - ['javax.jdo.option.ConnectionURL']) - self.assertEqual('thrift://hive_ms_host.novalocal:9083', - config['hive-site']['hive.metastore.uris']) - self.assertTrue( - 'hive.metastore.uris=thrift://hive_ms_host.novalocal:9083' in - config['webhcat-site']['templeton.hive.properties']) - self.assertEqual('hcat_host.novalocal', - config['core-site']['hadoop.proxyuser.hcat.hosts']) - self.assertEqual( - set(['zk1_host.novalocal:2181', 'zk2_host.novalocal:2181']), - set(config['webhcat-site'] - ['templeton.zookeeper.hosts'].split(','))) - - self.assertEqual('http://oozie_host.novalocal:11000/oozie', - config['oozie-site']['oozie.base.url']) - self.assertEqual('oozie_host.novalocal', - config['global']['oozie_hostname']) - self.assertEqual('oozie_host.novalocal,222.11.9999,111.11.9999', - config['core-site']['hadoop.proxyuser.oozie.hosts']) - - # test swift properties - self.assertEqual('swift_prop_value', - config['core-site']['swift.prop1']) - self.assertEqual('swift_prop_value2', - config['core-site']['swift.prop2']) - - def test_finalize_configuration_with_hue(self, patched): - patched.return_value = [{'name': 'swift.prop1', - 'value': 'swift_prop_value'}, - {'name': 'swift.prop2', - 'value': 'swift_prop_value2'}] - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - master_host = base.TestServer( - 'master.novalocal', 'master', '11111', 3, - '111.11.1111', '222.11.1111') - - jt_host = base.TestServer( - 'jt_host.novalocal', 'jt', '11111', 3, - '111.11.2222', '222.11.2222') - - nn_host = base.TestServer( - 'nn_host.novalocal', 'nn', '11111', 3, - '111.11.3333', '222.11.3333') - - snn_host = base.TestServer( - 'snn_host.novalocal', 'jt', '11111', 3, - '111.11.4444', '222.11.4444') - - hive_host = base.TestServer( - 'hive_host.novalocal', 'hive', '11111', 3, - '111.11.5555', '222.11.5555') - - hive_ms_host = base.TestServer( - 'hive_ms_host.novalocal', 'hive_ms', '11111', 3, - '111.11.6666', '222.11.6666') - - hive_mysql_host = base.TestServer( - 'hive_mysql_host.novalocal', 'hive_mysql', '11111', 3, - '111.11.7777', '222.11.7777') - - hcat_host = base.TestServer( - 'hcat_host.novalocal', 'hcat', '11111', 3, - '111.11.8888', '222.11.8888') - - zk1_host = base.TestServer( - 'zk1_host.novalocal', 'zk1', '11111', 3, - '111.11.9999', '222.11.9999') - - zk2_host = base.TestServer( - 'zk2_host.novalocal', 'zk2', '11112', 3, - '111.11.9990', '222.11.9990') - - oozie_host = base.TestServer( - 'oozie_host.novalocal', 'oozie', '11111', 3, - '111.11.9999', '222.11.9999') - - slave_host = base.TestServer( - 'slave1.novalocal', 'slave', '11111', 3, - '222.22.6666', '333.22.6666') - - master_ng = TestNodeGroup( - 'master', [master_host], ["GANGLIA_SERVER", - "GANGLIA_MONITOR", - "NAGIOIS_SERVER", - "AMBARI_SERVER", - "AMBARI_AGENT"]) - - jt_ng = TestNodeGroup( - 'jt', [jt_host], ["RESOURCEMANAGER", "GANGLIA_MONITOR", - "HISTORYSERVER", "AMBARI_AGENT"]) - - nn_ng = TestNodeGroup( - 'nn', [nn_host], ["NAMENODE", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - snn_ng = TestNodeGroup( - 'snn', [snn_host], ["SECONDARY_NAMENODE", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - hive_ng = TestNodeGroup( - 'hive', [hive_host], ["HIVE_SERVER", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - hive_ms_ng = TestNodeGroup( - 'meta', [hive_ms_host], ["HIVE_METASTORE", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - hive_mysql_ng = TestNodeGroup( - 'mysql', [hive_mysql_host], ["MYSQL_SERVER", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - hcat_ng = TestNodeGroup( - 'hcat', [hcat_host], ["WEBHCAT_SERVER", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - zk1_ng = TestNodeGroup( - 'zk1', [zk1_host], ["ZOOKEEPER_SERVER", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - zk2_ng = TestNodeGroup( - 'zk2', [zk2_host], ["ZOOKEEPER_SERVER", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - - oozie_ng = TestNodeGroup( - 'oozie', [oozie_host], ["OOZIE_SERVER", "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - slave_ng = TestNodeGroup( - 'slave', [slave_host], ["DATANODE", "NODEMANAGER", - "GANGLIA_MONITOR", "HDFS_CLIENT", - "MAPREDUCE2_CLIENT", "OOZIE_CLIENT", - "AMBARI_AGENT", "HUE"]) - - user_input_config = TestUserInputConfig( - 'core-site', 'cluster', 'fs.defaultFS') - user_input = provisioning.UserInput( - user_input_config, 'hdfs://nn_dif_host.novalocal:8020') - - cluster = base.TestCluster([master_ng, jt_ng, nn_ng, snn_ng, hive_ng, - hive_ms_ng, hive_mysql_ng, - hcat_ng, zk1_ng, zk2_ng, oozie_ng, - slave_ng]) - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - cluster_config.create_operational_config(cluster, [user_input]) - config = cluster_config.configurations - - # for this value, validating that user inputs override configured - # values, whether they are processed by runtime or not - self.assertEqual('hdfs://nn_dif_host.novalocal:8020', - config['core-site']['fs.defaultFS']) - - self.assertEqual('jt_host.novalocal:19888', - config['mapred-site'] - ['mapreduce.jobhistory.webapp.address']) - - self.assertEqual('nn_host.novalocal:50070', - config['hdfs-site']['dfs.namenode.http-address']) - self.assertEqual('snn_host.novalocal:50090', - config['hdfs-site'] - ['dfs.namenode.secondary.http-address']) - self.assertEqual('nn_host.novalocal:50470', - config['hdfs-site']['dfs.namenode.https-address']) - self.assertEqual('true', - config['hdfs-site']['dfs.support.broken.append']) - self.assertEqual('true', - config['hdfs-site']['dfs.webhdfs.enabled']) - - self.assertEqual('hive_host.novalocal', - config['global']['hive_hostname']) - self.assertEqual('hive_host.novalocal', - config['core-site']['hadoop.proxyuser.hive.hosts']) - self.assertEqual('jdbc:mysql://hive_mysql_host.novalocal/hive?' - 'createDatabaseIfNotExist=true', - config['hive-site'] - ['javax.jdo.option.ConnectionURL']) - self.assertEqual('thrift://hive_ms_host.novalocal:9083', - config['hive-site']['hive.metastore.uris']) - self.assertTrue( - 'hive.metastore.uris=thrift://hive_ms_host.novalocal:9083' in - config['webhcat-site']['templeton.hive.properties']) - self.assertEqual('*', - config['core-site']['hadoop.proxyuser.hcat.hosts']) - self.assertEqual('*', - config['core-site']['hadoop.proxyuser.hcat.groups']) - self.assertEqual('*', - config['core-site']['hadoop.proxyuser.hue.hosts']) - self.assertEqual('*', - config['core-site']['hadoop.proxyuser.hue.groups']) - self.assertEqual( - set(['zk1_host.novalocal:2181', 'zk2_host.novalocal:2181']), - set(config['webhcat-site'] - ['templeton.zookeeper.hosts'].split(','))) - self.assertEqual('*', - config['webhcat-site']['webhcat.proxyuser.hue.hosts']) - self.assertEqual('*', - config['webhcat-site'] - ['webhcat.proxyuser.hue.groups']) - - self.assertEqual('http://oozie_host.novalocal:11000/oozie', - config['oozie-site']['oozie.base.url']) - self.assertEqual('*', - config['oozie-site'] - ['oozie.service.ProxyUserService.proxyuser.hue.' - 'groups']) - self.assertEqual('*', - config['oozie-site'] - ['oozie.service.ProxyUserService.proxyuser.hue.' - 'hosts']) - self.assertEqual('oozie_host.novalocal', - config['global']['oozie_hostname']) - self.assertEqual('oozie_host.novalocal,222.11.9999,111.11.9999', - config['core-site']['hadoop.proxyuser.oozie.hosts']) - - # test swift properties - self.assertEqual('swift_prop_value', - config['core-site']['swift.prop1']) - self.assertEqual('swift_prop_value2', - config['core-site']['swift.prop2']) - - def test__determine_deployed_services(self, nova_mock): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - master_host = base.TestServer( - 'master.novalocal', 'master', '11111', 3, - '111.11.1111', '222.11.1111') - - jt_host = base.TestServer( - 'jt_host.novalocal', 'jt', '11111', 3, - '111.11.2222', '222.11.2222') - - nn_host = base.TestServer( - 'nn_host.novalocal', 'nn', '11111', 3, - '111.11.3333', '222.11.3333') - - snn_host = base.TestServer( - 'snn_host.novalocal', 'jt', '11111', 3, - '111.11.4444', '222.11.4444') - - slave_host = base.TestServer( - 'slave1.novalocal', 'slave', '11111', 3, - '222.22.6666', '333.22.6666') - - master_ng = TestNodeGroup( - 'master', [master_host], - ['GANGLIA_SERVER', - 'GANGLIA_MONITOR', 'NAGIOS_SERVER', - 'AMBARI_SERVER', 'AMBARI_AGENT', 'ZOOKEEPER_SERVER']) - jt_ng = TestNodeGroup('jt', [jt_host], ["RESOURCEMANAGER", - "HISTORYSERVER", - "GANGLIA_MONITOR", - "AMBARI_AGENT"]) - nn_ng = TestNodeGroup('nn', [nn_host], ["NAMENODE", - "GANGLIA_MONITOR", "AMBARI_AGENT"]) - snn_ng = TestNodeGroup('snn', [snn_host], ["SECONDARY_NAMENODE", - "GANGLIA_MONITOR", "AMBARI_AGENT"]) - slave_ng = TestNodeGroup( - 'slave', [slave_host], - ["DATANODE", "NODEMANAGER", - "GANGLIA_MONITOR", "HDFS_CLIENT", "MAPREDUCE2_CLIENT", - "AMBARI_AGENT"]) - - cluster = base.TestCluster([master_ng, jt_ng, nn_ng, - snn_ng, slave_ng]) - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - cluster_config.create_operational_config(cluster, []) - services = cluster_config.services - for service in services: - if service.name in ['YARN', 'HDFS', 'MAPREDUCE2', 'GANGLIA', - 'AMBARI', 'NAGIOS', 'ZOOKEEPER']: - self.assertTrue(service.deployed) - else: - self.assertFalse(service.deployed) - - def test_ambari_rpm_path(self, patched): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - cluster_spec = cs.ClusterSpec(cluster_config_file, version='2.0.6') - - ambari_config = cluster_spec.configurations['ambari'] - rpm = ambari_config.get('rpm', None) - self.assertEqual('http://s3.amazonaws.com/' - 'public-repo-1.hortonworks.com/ambari/centos6/' - '1.x/updates/1.6.0/ambari.repo', rpm) - - def test_fs_umask(self, patched): - s_conf = s2.CONF - try: - s2.CONF = TestCONF(False, False) - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - server1 = base.TestServer('host1', 'test-master', '11111', 3, - '111.11.1111', '222.11.1111') - server2 = base.TestServer('host2', 'test-slave', '11111', 3, - '222.22.2222', '333.22.2222') - - node_group1 = TestNodeGroup( - 'master', [server1], ["NAMENODE", "RESOURCEMANAGER", - "SECONDARY_NAMENODE", "GANGLIA_SERVER", - "GANGLIA_MONITOR", "NAGIOS_SERVER", - "AMBARI_SERVER", "AMBARI_AGENT", - "HISTORYSERVER", "ZOOKEEPER_SERVER"]) - node_group2 = TestNodeGroup( - 'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT", - "GANGLIA_MONITOR"]) - - cluster = base.TestCluster([node_group1, node_group2]) - cluster_config = cs.ClusterSpec(cluster_config_file, '2.0.6') - cluster_config.create_operational_config(cluster, []) - # core-site - self.assertEqual( - '022', - cluster_config.configurations['hdfs-site'] - ['fs.permissions.umask-mode']) - finally: - s2.CONF = s_conf - - def test_parse_default(self, patched): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - - self._assert_services(cluster_config.services) - self._assert_configurations(cluster_config.configurations) - - node_groups = cluster_config.node_groups - self.assertEqual(2, len(node_groups)) - master_node_group = node_groups['MASTER'] - self.assertEqual('MASTER', master_node_group.name) - self.assertIsNone(master_node_group.predicate) - self.assertEqual('1', master_node_group.cardinality) - self.assertEqual(8, len(master_node_group.components)) - self.assertIn('NAMENODE', master_node_group.components) - self.assertIn('RESOURCEMANAGER', master_node_group.components) - self.assertIn('HISTORYSERVER', master_node_group.components) - self.assertIn('SECONDARY_NAMENODE', master_node_group.components) - self.assertIn('GANGLIA_SERVER', master_node_group.components) - self.assertIn('NAGIOS_SERVER', master_node_group.components) - self.assertIn('AMBARI_SERVER', master_node_group.components) - self.assertIn('ZOOKEEPER_SERVER', master_node_group.components) - - slave_node_group = node_groups['SLAVE'] - self.assertEqual('SLAVE', slave_node_group.name) - self.assertIsNone(slave_node_group.predicate) - self.assertEqual('1+', slave_node_group.cardinality) - self.assertEqual(5, len(slave_node_group.components)) - self.assertIn('DATANODE', slave_node_group.components) - self.assertIn('NODEMANAGER', slave_node_group.components) - self.assertIn('HDFS_CLIENT', slave_node_group.components) - self.assertIn('YARN_CLIENT', slave_node_group.components) - self.assertIn('MAPREDUCE2_CLIENT', slave_node_group.components) - - return cluster_config - - def test_ambari_rpm(self, patched): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - - self._assert_configurations(cluster_config.configurations) - ambari_config = cluster_config.configurations['ambari'] - self.assertIsNotNone('no rpm uri found', - ambari_config.get('rpm', None)) - - def test_normalize(self, patched): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - cluster = cluster_config.normalize() - - configs = cluster.cluster_configs - contains_dfs_datanode_http_address = False - contains_staging_dir = False - contains_mapred_user = False - - for entry in configs: - config = entry.config - # assert some random configurations across targets - if config.name == 'dfs.datanode.http.address': - contains_dfs_datanode_http_address = True - self.assertEqual('string', config.type) - self.assertEqual('0.0.0.0:50075', config.default_value) - self.assertEqual('HDFS', config.applicable_target) - - if config.name == 'yarn.app.mapreduce.am.staging-dir': - contains_staging_dir = True - self.assertEqual('string', config.type) - self.assertEqual( - '/user', - config.default_value) - self.assertEqual('MAPREDUCE2', - config.applicable_target) - - if config.name == 'mapred_user': - contains_mapred_user = True - self.assertEqual('string', config.type) - self.assertEqual('mapred', config.default_value) - self.assertEqual('MAPREDUCE2', config.applicable_target) - - # print 'Config: name: {0}, type:{1}, - # default value:{2}, target:{3}, Value:{4}'.format( - # config.name, config.type, - # config.default_value, - # config.applicable_target, entry.value) - - self.assertTrue(contains_dfs_datanode_http_address) - self.assertTrue(contains_staging_dir) - self.assertTrue(contains_mapred_user) - node_groups = cluster.node_groups - self.assertEqual(2, len(node_groups)) - contains_master_group = False - contains_slave_group = False - for i in range(2): - node_group = node_groups[i] - components = node_group.node_processes - if node_group.name == "MASTER": - contains_master_group = True - self.assertEqual(8, len(components)) - self.assertIn('NAMENODE', components) - self.assertIn('RESOURCEMANAGER', components) - self.assertIn('HISTORYSERVER', components) - self.assertIn('SECONDARY_NAMENODE', components) - self.assertIn('GANGLIA_SERVER', components) - self.assertIn('NAGIOS_SERVER', components) - self.assertIn('AMBARI_SERVER', components) - self.assertIn('ZOOKEEPER_SERVER', components) - # TODO(jspeidel): node configs - # TODO(jspeidel): vm_requirements - elif node_group.name == 'SLAVE': - contains_slave_group = True - self.assertEqual(5, len(components)) - self.assertIn('DATANODE', components) - self.assertIn('NODEMANAGER', components) - self.assertIn('HDFS_CLIENT', components) - self.assertIn('YARN_CLIENT', components) - self.assertIn('MAPREDUCE2_CLIENT', components) - # TODO(jspeidel): node configs - # TODO(jspeidel): vm requirements - else: - self.fail('Unexpected node group: {0}'.format(node_group.name)) - self.assertTrue(contains_master_group) - self.assertTrue(contains_slave_group) - - def test_existing_config_item_in_top_level_within_blueprint(self, patched): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - user_input_config = TestUserInputConfig( - 'global', 'OOZIE', 'oozie_log_dir') - user_input = provisioning.UserInput(user_input_config, - '/some/new/path') - - server1 = base.TestServer('host1', 'test-master', '11111', 3, - '111.11.1111', '222.11.1111') - server2 = base.TestServer('host2', 'test-slave', '11111', 3, - '222.22.2222', '333.22.2222') - - node_group1 = TestNodeGroup( - 'master', [server1], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "SECONDARY_NAMENODE", - "GANGLIA_SERVER", "GANGLIA_MONITOR", - "NAGIOS_SERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER", "AMBARI_AGENT"]) - node_group2 = TestNodeGroup( - 'slave', [server2], ["NODEMANAGER", "DATANODE", - "AMBARI_AGENT", "GANGLIA_MONITOR"]) - - cluster = base.TestCluster([node_group1, node_group2]) - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - cluster_config.create_operational_config(cluster, [user_input]) - self.assertEqual('/some/new/path', cluster_config.configurations - ['global']['oozie_log_dir']) - - def test_new_config_item_in_top_level_within_blueprint(self, patched): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - user_input_config = TestUserInputConfig( - 'global', 'general', 'new_property') - user_input = provisioning.UserInput(user_input_config, 'foo') - - server1 = base.TestServer('host1', 'test-master', '11111', 3, - '111.11.1111', '222.11.1111') - server2 = base.TestServer('host2', 'test-slave', '11111', 3, - '222.22.2222', '333.22.2222') - - node_group1 = TestNodeGroup( - 'master', [server1], - ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "SECONDARY_NAMENODE", "GANGLIA_SERVER", - "GANGLIA_MONITOR", "NAGIOS_SERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER", "AMBARI_AGENT"]) - node_group2 = TestNodeGroup( - 'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT", - "GANGLIA_MONITOR"]) - - cluster = base.TestCluster([node_group1, node_group2]) - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - cluster_config.create_operational_config(cluster, [user_input]) - self.assertEqual( - 'foo', cluster_config.configurations['global']['new_property']) - - def test_topology_configuration_no_hypervisor(self, patched): - s_conf = s2.CONF - th_conf = th.CONF - try: - s2.CONF = TestCONF(True, False) - th.CONF = TestCONF(True, False) - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - server1 = base.TestServer('host1', 'test-master', '11111', 3, - '111.11.1111', '222.11.1111') - server2 = base.TestServer('host2', 'test-slave', '11111', 3, - '222.22.2222', '333.22.2222') - - node_group1 = TestNodeGroup( - 'master', [server1], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "SECONDARY_NAMENODE", - "GANGLIA_SERVER", "GANGLIA_MONITOR", - "NAGIOS_SERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER", "AMBARI_AGENT"]) - node_group2 = TestNodeGroup( - 'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT", - "GANGLIA_MONITOR"]) - - cluster = base.TestCluster([node_group1, node_group2]) - cluster_config = cs.ClusterSpec(cluster_config_file, - version='2.0.6') - cluster_config.create_operational_config(cluster, []) - # core-site - self.assertEqual( - 'org.apache.hadoop.net.NetworkTopology', - cluster_config.configurations['core-site'] - ['net.topology.impl']) - self.assertEqual( - 'true', - cluster_config.configurations['core-site'] - ['net.topology.nodegroup.aware']) - self.assertEqual( - 'org.apache.hadoop.hdfs.server.namenode.' - 'BlockPlacementPolicyWithNodeGroup', - cluster_config.configurations['core-site'] - ['dfs.block.replicator.classname']) - self.assertEqual( - 'true', - cluster_config.configurations['core-site'] - ['fs.swift.service.sahara.location-aware']) - self.assertEqual( - 'org.apache.hadoop.net.ScriptBasedMapping', - cluster_config.configurations['core-site'] - ['net.topology.node.switch.mapping.impl']) - self.assertEqual( - '/etc/hadoop/conf/topology.sh', - cluster_config.configurations['core-site'] - ['net.topology.script.file.name']) - - # mapred-site - self.assertEqual( - 'true', - cluster_config.configurations['mapred-site'] - ['mapred.jobtracker.nodegroup.aware']) - self.assertEqual( - '3', - cluster_config.configurations['mapred-site'] - ['mapred.task.cache.levels']) - self.assertEqual( - 'org.apache.hadoop.mapred.JobSchedulableWithNodeGroup', - cluster_config.configurations['mapred-site'] - ['mapred.jobtracker.jobSchedulable']) - finally: - s2.CONF = s_conf - th.CONF = th_conf - - def test_topology_configuration_with_hypervisor(self, patched): - s_conf = s2.CONF - try: - s2.CONF = TestCONF(True, True) - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - server1 = base.TestServer('host1', 'test-master', '11111', 3, - '111.11.1111', '222.11.1111') - server2 = base.TestServer('host2', 'test-slave', '11111', 3, - '222.22.2222', '333.22.2222') - - node_group1 = TestNodeGroup( - 'master', [server1], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "SECONDARY_NAMENODE", - "GANGLIA_SERVER", "GANGLIA_MONITOR", - "NAGIOS_SERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER", "AMBARI_AGENT"]) - node_group2 = TestNodeGroup( - 'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT", - "GANGLIA_MONITOR"]) - - cluster = base.TestCluster([node_group1, node_group2]) - cluster_config = cs.ClusterSpec(cluster_config_file, - version='2.0.6') - cluster_config.create_operational_config(cluster, []) - # core-site - self.assertEqual( - 'org.apache.hadoop.net.NetworkTopologyWithNodeGroup', - cluster_config.configurations['core-site'] - ['net.topology.impl']) - finally: - s2.CONF = s_conf - - def test_update_ambari_admin_user(self, patched): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI', - 'ambari.admin.user') - user_input = provisioning.UserInput(user_input_config, 'new-user') - - server1 = base.TestServer('host1', 'test-master', '11111', 3, - '111.11.1111', '222.11.1111') - server2 = base.TestServer('host2', 'test-slave', '11111', 3, - '222.22.2222', '333.22.2222') - - node_group1 = TestNodeGroup( - 'master', - [server1], - ["NAMENODE", - "RESOURCEMANAGER", - "HISTORYSERVER", - "SECONDARY_NAMENODE", - "GANGLIA_SERVER", - "GANGLIA_MONITOR", - "NAGIOS_SERVER", - "AMBARI_SERVER", - "ZOOKEEPER_SERVER", - "AMBARI_AGENT"]) - node_group2 = TestNodeGroup( - 'slave', - [server2], - ["NODEMANAGER", - "DATANODE", - "AMBARI_AGENT", - "GANGLIA_MONITOR"]) - - cluster = base.TestCluster([node_group1, node_group2]) - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - cluster_config.create_operational_config(cluster, [user_input]) - ambari_service = next(service for service in cluster_config.services - if service.name == 'AMBARI') - users = ambari_service.users - self.assertEqual(1, len(users)) - self.assertEqual('new-user', users[0].name) - - def test_update_ambari_admin_password(self, patched): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI', - 'ambari.admin.password') - user_input = provisioning.UserInput(user_input_config, 'new-pwd') - - server1 = base.TestServer('host1', 'test-master', '11111', 3, - '111.11.1111', '222.11.1111') - server2 = base.TestServer('host2', 'test-slave', '11111', 3, - '222.22.2222', '333.22.2222') - - node_group1 = TestNodeGroup( - 'master', - [server1], - ["NAMENODE", - "RESOURCEMANAGER", - "HISTORYSERVER", - "SECONDARY_NAMENODE", - "GANGLIA_SERVER", - "GANGLIA_MONITOR", - "NAGIOS_SERVER", - "AMBARI_SERVER", - "ZOOKEEPER_SERVER", - "AMBARI_AGENT"]) - node_group2 = TestNodeGroup( - 'slave', - [server2], - ["NODEMANAGER", - "DATANODE", - "AMBARI_AGENT", - "GANGLIA_MONITOR"]) - - cluster = base.TestCluster([node_group1, node_group2]) - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - cluster_config.create_operational_config(cluster, [user_input]) - ambari_service = next(service for service in cluster_config.services - if service.name == 'AMBARI') - users = ambari_service.users - self.assertEqual(1, len(users)) - self.assertEqual('new-pwd', users[0].password) - - def test_update_ambari_admin_user_and_password(self, patched): - cluster_config_file = pkg.resource_string( - version.version_info.package, - 'plugins/hdp/versions/version_2_0_6/resources/' - 'default-cluster.template') - - user_user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI', - 'ambari.admin.user') - pwd_user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI', - 'ambari.admin.password') - user_user_input = provisioning.UserInput(user_user_input_config, - 'new-admin_user') - pwd_user_input = provisioning.UserInput(pwd_user_input_config, - 'new-admin_pwd') - - server1 = base.TestServer('host1', 'test-master', '11111', 3, - '111.11.1111', '222.11.1111') - server2 = base.TestServer('host2', 'test-slave', '11111', 3, - '222.22.2222', '333.22.2222') - - node_group1 = TestNodeGroup( - 'one', [server1], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "SECONDARY_NAMENODE", - "GANGLIA_SERVER", "GANGLIA_MONITOR", - "NAGIOS_SERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER", "AMBARI_AGENT"]) - node_group2 = TestNodeGroup( - 'two', [server2], ["NODEMANAGER", "DATANODE", - "AMBARI_AGENT", "GANGLIA_MONITOR"]) - - cluster = base.TestCluster([node_group1, node_group2]) - cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6') - cluster_config.create_operational_config( - cluster, [user_user_input, pwd_user_input]) - ambari_service = next(service for service in cluster_config.services - if service.name == 'AMBARI') - users = ambari_service.users - self.assertEqual(1, len(users)) - self.assertEqual('new-admin_user', users[0].name) - self.assertEqual('new-admin_pwd', users[0].password) - - def test_validate_missing_hdfs(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["NODEMANAGER", "MAPREDUCE2_CLIENT", - "HISTORYSERVER"]) - - node_group2 = TestNodeGroup( - 'master', [server2], ["RESOURCEMANAGER", "ZOOKEEPER_SERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing hdfs service - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.RequiredServiceMissingException: - # expected - pass - - def test_validate_missing_mr2(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE"]) - - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "ZOOKEEPER_SERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing mr service - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.RequiredServiceMissingException: - # expected - pass - - def test_validate_missing_ambari(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["NAMENODE", "RESOURCEMANAGER", - "ZOOKEEPER_SERVER"]) - - node_group2 = TestNodeGroup( - 'master', [server2], ["DATANODE", "NODEMANAGER"]) - - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing ambari service - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.RequiredServiceMissingException: - # expected - pass - - # TODO(jspeidel): move validate_* to test_services when validate - # is called independently of cluspterspec - def test_validate_hdfs(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER", - "HDFS_CLIENT", "MAPREDUCE2_CLIENT"], 1) - - node_group2 = TestNodeGroup( - 'master', [server2], ["RESOURCEMANAGER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing NN - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "ZOOKEEPER_SERVER", - "AMBARI_SERVER"]) - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should validate successfully now - cluster_config.create_operational_config(cluster, []) - - # should cause validation exception due to 2 NN - node_group3 = TestNodeGroup( - 'master2', [server2], ["NAMENODE"]) - cluster = base.TestCluster([node_group, node_group2, node_group3]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - - def test_validate_hdfs_ha(self, patched): - server1 = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - server3 = base.TestServer('host3', 'master', '11113', 3, - '111.11.1113', '222.22.2224') - - node_group1 = TestNodeGroup( - 'slave', [server1], ["DATANODE", "NODEMANAGER", "HDFS_CLIENT", - "MAPREDUCE2_CLIENT"], 1) - node_group2 = TestNodeGroup( - 'master1', [server2], ["NAMENODE", "ZOOKEEPER_SERVER", - "JOURNALNODE"], 1) - node_group3 = TestNodeGroup( - 'master2', [server3], ["RESOURCEMANAGER", "HISTORYSERVER", - "ZOOKEEPER_SERVER", "AMBARI_SERVER", - "JOURNALNODE"], 1) - - # Setup a cluster_configs resource with HDFS HA ON - cc = {'HDFSHA': {'hdfs.nnha': True}} - cc_r = rsc.Resource(cc) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - - # Test namenodes - cluster1 = base.TestCluster([node_group1, node_group2, node_group3], - cc_r) - # should fail due to missing second namenode - self.assertRaises(ex.NameNodeHAConfigurationError, - cluster_config.create_operational_config, - cluster1, []) - - # Test Journalnodes - node_group2 = TestNodeGroup( - 'master1', [server2], ["NAMENODE", "ZOOKEEPER_SERVER"], 2) - cluster1 = base.TestCluster([node_group1, node_group2, node_group3], - cc_r) - # should fail due to missing odd number greater than 3 of journalnodes - self.assertRaises(ex.NameNodeHAConfigurationError, - cluster_config.create_operational_config, - cluster1, []) - - # Test zookeepers - node_group2 = TestNodeGroup( - 'master1', [server2], ["NAMENODE", "JOURNALNODE"], 2) - cluster1 = base.TestCluster([node_group1, node_group2, node_group3], - cc_r) - # should fail due to missing odd number greater than 3 of zookeepers - self.assertRaises(ex.NameNodeHAConfigurationError, - cluster_config.create_operational_config, - cluster1, []) - - # should validate successfully now - node_group2 = TestNodeGroup( - 'master1', [server2], ["NAMENODE", "JOURNALNODE", - "ZOOKEEPER_SERVER"], 2) - cluster1 = base.TestCluster([node_group1, node_group2, node_group3], - cc_r) - cluster_config.create_operational_config(cluster1, []) - - # Test when HDFS HA disabled - cc = {'HDFSHA': {'hdfs.nnha': False}} - cc_r = rsc.Resource(cc) - - node_group2 = TestNodeGroup( - 'master1', [server2], ["NAMENODE", "JOURNALNODE", - "ZOOKEEPER_SERVER"], 1) - cluster1 = base.TestCluster([node_group1, node_group2, node_group3], - cc_r) - - # should fail due to using journalnode in non HDFS HA case - self.assertRaises(ex.NameNodeHAConfigurationError, - cluster_config.create_operational_config, - cluster1, []) - - node_group2 = TestNodeGroup( - 'master1', [server2], ["NAMENODE", "ZKFC", "ZOOKEEPER_SERVER"], 1) - - cluster1 = base.TestCluster([node_group1, node_group2, node_group3], - cc_r) - - # should fail due to using zkfc in non HDFS HA case - self.assertRaises(ex.NameNodeHAConfigurationError, - cluster_config.create_operational_config, - cluster1, []) - - def test_validate_yarn(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER", - "HDFS_CLIENT", "MAPREDUCE2_CLIENT"]) - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "AMBARI_SERVER", - "ZOOKEEPER_SERVER", "HISTORYSERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing JT - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "AMBARI_SERVER", "ZOOKEEPER_SERVER", - "HISTORYSERVER"]) - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should validate successfully now - cluster_config.create_operational_config(cluster, []) - - # should cause validation exception due to 2 JT - node_group3 = TestNodeGroup( - 'master', [server2], ["RESOURCEMANAGER"]) - cluster = base.TestCluster([node_group, node_group2, node_group3]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - - # should cause validation exception due to 2 NN - node_group3 = TestNodeGroup( - 'master', [server2], ["NAMENODE"]) - cluster = base.TestCluster([node_group, node_group2, node_group3]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - - # should fail due to no nodemanager - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "HDFS_CLIENT", - "MAPREDUCE2_CLIENT"]) - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing JT - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - - def test_validate_hive(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER", - "HIVE_CLIENT"]) - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing hive_server - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HIVE_SERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER", "HISTORYSERVER"]) - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should validate successfully now - cluster_config.create_operational_config(cluster, []) - - # should cause validation exception due to 2 HIVE_SERVER - node_group3 = TestNodeGroup( - 'master', [server2], ["HIVE_SERVER"]) - cluster = base.TestCluster([node_group, node_group2, node_group3]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - - def test_validate_zk(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - server3 = base.TestServer('host3', 'master', '11113', 3, - '111.11.1113', '222.22.2224') - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER", - "ZOOKEEPER_CLIENT"]) - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "AMBARI_SERVER", "HISTORYSERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing ZOOKEEPER_SERVER - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "ZOOKEEPER_SERVER", - "AMBARI_SERVER"]) - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should validate successfully now - cluster_config.create_operational_config(cluster, []) - - # should allow multiple ZOOKEEPER_SERVER processes - node_group3 = TestNodeGroup( - 'zkserver', [server3], ["ZOOKEEPER_SERVER"]) - cluster = base.TestCluster([node_group, node_group2, node_group3]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - cluster_config.create_operational_config(cluster, []) - - def test_validate_oozie(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER", - "OOZIE_CLIENT"]) - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing OOZIE_SERVER - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "OOZIE_SERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER", "HISTORYSERVER"]) - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should validate successfully now - cluster_config.create_operational_config(cluster, []) - - # should cause validation exception due to 2 OOZIE_SERVER - node_group3 = TestNodeGroup( - 'master', [server2], ["OOZIE_SERVER"]) - cluster = base.TestCluster([node_group, node_group2, node_group3]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - - def test_validate_ganglia(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER", - "GANGLIA_MONITOR"]) - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing GANGLIA_SERVER - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "GANGLIA_SERVER", "AMBARI_SERVER", - "HISTORYSERVER", "ZOOKEEPER_SERVER"]) - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should validate successfully now - cluster_config.create_operational_config(cluster, []) - - # should cause validation exception due to 2 GANGLIA_SERVER - node_group3 = TestNodeGroup( - 'master2', [server2], ["GANGLIA_SERVER"]) - cluster = base.TestCluster([node_group, node_group2, node_group3]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - - def test_validate_ambari(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER", - "AMBARI_AGENT"]) - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "ZOOKEEPER_SERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing AMBARI_SERVER - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER"]) - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should validate successfully now - cluster_config.create_operational_config(cluster, []) - - # should cause validation exception due to 2 AMBARI_SERVER - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "AMBARI_SERVER", "ZOOKEEPER_SERVER"]) - node_group3 = TestNodeGroup( - 'master', [server2], ["AMBARI_SERVER"]) - cluster = base.TestCluster([node_group, node_group2, node_group3]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - try: - cluster_config.create_operational_config(cluster, []) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - - def test_validate_hue(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER", - "HUE"]) - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing hive_server, oozie_server and - # webhchat_server which is required by hue - self.assertRaises(ex.RequiredServiceMissingException, - cluster_config.create_operational_config, - cluster, []) - - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HIVE_SERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER", "HISTORYSERVER"]) - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing oozie_server and webhchat_server, which - # is required by hue - self.assertRaises(ex.RequiredServiceMissingException, - cluster_config.create_operational_config, - cluster, []) - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER", - "OOZIE_CLIENT", "HUE"]) - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HIVE_SERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER", "HISTORYSERVER", - "OOZIE_SERVER"]) - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should fail due to missing webhchat_server, which is required by hue - self.assertRaises(ex.RequiredServiceMissingException, - cluster_config.create_operational_config, - cluster, []) - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER", - "OOZIE_CLIENT", "HUE"]) - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HIVE_SERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER", "HISTORYSERVER", - "OOZIE_SERVER", "WEBHCAT_SERVER"]) - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # should validate successfully now - cluster_config.create_operational_config(cluster, []) - - # should have automatically added a HIVE_CLIENT to "slave" node group - hue_ngs = cluster_config.get_node_groups_containing_component("HUE") - self.assertEqual(1, len(hue_ngs)) - self.assertIn("HIVE_CLIENT", hue_ngs.pop().components) - - # should cause validation exception due to 2 hue instances - node_group3 = TestNodeGroup( - 'master', [server2], ["HUE"]) - cluster = base.TestCluster([node_group, node_group2, node_group3]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - self.assertRaises(ex.InvalidComponentCountException, - cluster_config.create_operational_config, - cluster, []) - - def test_validate_scaling_existing_ng(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER"]) - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # sanity check that original config validates - cluster_config.create_operational_config(cluster, []) - - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - scaled_groups = {'master': 2} - # should fail due to 2 JT - try: - cluster_config.create_operational_config( - cluster, [], scaled_groups) - self.fail('Validation should have thrown an exception') - except ex.InvalidComponentCountException: - # expected - pass - - def test_scale(self, patched): - - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER", - "AMBARI_AGENT"]) - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "ZOOKEEPER_SERVER", - "AMBARI_SERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # sanity check that original config validates - cluster_config.create_operational_config(cluster, []) - - slave_ng = cluster_config.node_groups['slave'] - self.assertEqual(1, slave_ng.count) - - cluster_config.scale({'slave': 2}) - - self.assertEqual(2, slave_ng.count) - - def test_get_deployed_configurations(self, patched): - - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - node_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER"]) - node_group2 = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "AMBARI_SERVER", "ZOOKEEPER_SERVER", - "HISTORYSERVER"]) - - cluster = base.TestCluster([node_group, node_group2]) - - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - # sanity check that original config validates - cluster_config.create_operational_config(cluster, []) - configs = cluster_config.get_deployed_configurations() - expected_configs = set(['mapred-site', 'ambari', 'hdfs-site', - 'global', 'core-site', 'yarn-site']) - self.assertEqual(expected_configs, expected_configs & configs) - - def test_get_deployed_node_group_count(self, patched): - - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - slave_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER"]) - slave2_group = TestNodeGroup( - 'slave2', [server], ["DATANODE", "NODEMANAGER"]) - master_group = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER"]) - - cluster = base.TestCluster([master_group, slave_group, slave2_group]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - cluster_config.create_operational_config(cluster, []) - - self.assertEqual(2, cluster_config.get_deployed_node_group_count( - 'DATANODE')) - self.assertEqual(1, cluster_config.get_deployed_node_group_count( - 'AMBARI_SERVER')) - - def test_get_node_groups_containing_component(self, patched): - server = base.TestServer('host1', 'slave', '11111', 3, - '111.11.1111', '222.22.2222') - server2 = base.TestServer('host2', 'master', '11112', 3, - '111.11.1112', '222.22.2223') - - slave_group = TestNodeGroup( - 'slave', [server], ["DATANODE", "NODEMANAGER"]) - slave2_group = TestNodeGroup( - 'slave2', [server], ["DATANODE", "NODEMANAGER"]) - master_group = TestNodeGroup( - 'master', [server2], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "AMBARI_SERVER", - "ZOOKEEPER_SERVER"]) - - cluster = base.TestCluster([master_group, slave_group, slave2_group]) - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - cluster_config.create_operational_config(cluster, []) - - datanode_ngs = cluster_config.get_node_groups_containing_component( - 'DATANODE') - self.assertEqual(2, len(datanode_ngs)) - ng_names = set([datanode_ngs[0].name, datanode_ngs[1].name]) - self.assertIn('slave', ng_names) - self.assertIn('slave2', ng_names) - - def test_get_components_for_type(self, patched): - - cluster_config = base.create_clusterspec(hdp_version='2.0.6') - clients = cluster_config.get_components_for_type('CLIENT') - slaves = cluster_config.get_components_for_type('SLAVE') - masters = cluster_config.get_components_for_type('MASTER') - - expected_clients = set(['HCAT', 'ZOOKEEPER_CLIENT', - 'MAPREDUCE2_CLIENT', 'HIVE_CLIENT', - 'HDFS_CLIENT', 'PIG', 'YARN_CLIENT', 'HUE']) - self.assertEqual(expected_clients, expected_clients & set(clients)) - - expected_slaves = set(['AMBARI_AGENT', 'NODEMANAGER', 'DATANODE', - 'GANGLIA_MONITOR']) - self.assertEqual(expected_slaves, expected_slaves & set(slaves)) - - expected_masters = set(['SECONDARY_NAMENODE', 'HIVE_METASTORE', - 'AMBARI_SERVER', 'RESOURCEMANAGER', - 'WEBHCAT_SERVER', 'NAGIOS_SERVER', - 'MYSQL_SERVER', 'ZOOKEEPER_SERVER', - 'NAMENODE', 'HIVE_SERVER', 'GANGLIA_SERVER']) - self.assertEqual(expected_masters, expected_masters & set(masters)) - - def _assert_services(self, services): - found_services = [] - for service in services: - name = service.name - found_services.append(name) - self.service_validators[name](service) - - self.assertEqual(15, len(found_services)) - self.assertIn('HDFS', found_services) - self.assertIn('MAPREDUCE2', found_services) - self.assertIn('GANGLIA', found_services) - self.assertIn('NAGIOS', found_services) - self.assertIn('AMBARI', found_services) - self.assertIn('PIG', found_services) - self.assertIn('HIVE', found_services) - self.assertIn('HCATALOG', found_services) - self.assertIn('ZOOKEEPER', found_services) - self.assertIn('WEBHCAT', found_services) - self.assertIn('OOZIE', found_services) - self.assertIn('SQOOP', found_services) - self.assertIn('HBASE', found_services) - self.assertIn('HUE', found_services) - - def _assert_hdfs(self, service): - self.assertEqual('HDFS', service.name) - - found_components = {} - for component in service.components: - found_components[component.name] = component - - self.assertEqual(6, len(found_components)) - self._assert_component('NAMENODE', 'MASTER', "1", - found_components['NAMENODE']) - self._assert_component('DATANODE', 'SLAVE', "1+", - found_components['DATANODE']) - self._assert_component('SECONDARY_NAMENODE', 'MASTER', "1", - found_components['SECONDARY_NAMENODE']) - self._assert_component('HDFS_CLIENT', 'CLIENT', "1+", - found_components['HDFS_CLIENT']) - self._assert_component('JOURNALNODE', 'MASTER', "1+", - found_components['JOURNALNODE']) - self._assert_component('ZKFC', 'MASTER', "1+", - found_components['ZKFC']) - # TODO(jspeidel) config - - def _assert_mrv2(self, service): - self.assertEqual('MAPREDUCE2', service.name) - - found_components = {} - for component in service.components: - found_components[component.name] = component - - self.assertEqual(2, len(found_components)) - self._assert_component('HISTORYSERVER', 'MASTER', "1", - found_components['HISTORYSERVER']) - self._assert_component('MAPREDUCE2_CLIENT', 'CLIENT', "1+", - found_components['MAPREDUCE2_CLIENT']) - - def _assert_yarn(self, service): - self.assertEqual('YARN', service.name) - - found_components = {} - for component in service.components: - found_components[component.name] = component - - self.assertEqual(3, len(found_components)) - self._assert_component('RESOURCEMANAGER', 'MASTER', "1", - found_components['RESOURCEMANAGER']) - self._assert_component('NODEMANAGER', 'SLAVE', "1+", - found_components['NODEMANAGER']) - self._assert_component('YARN_CLIENT', 'CLIENT', "1+", - found_components['YARN_CLIENT']) - - def _assert_nagios(self, service): - self.assertEqual('NAGIOS', service.name) - - found_components = {} - for component in service.components: - found_components[component.name] = component - - self.assertEqual(1, len(found_components)) - self._assert_component('NAGIOS_SERVER', 'MASTER', "1", - found_components['NAGIOS_SERVER']) - - def _assert_ganglia(self, service): - self.assertEqual('GANGLIA', service.name) - - found_components = {} - for component in service.components: - found_components[component.name] = component - - self.assertEqual(2, len(found_components)) - self._assert_component('GANGLIA_SERVER', 'MASTER', "1", - found_components['GANGLIA_SERVER']) - self._assert_component('GANGLIA_MONITOR', 'SLAVE', "1+", - found_components['GANGLIA_MONITOR']) - - def _assert_ambari(self, service): - self.assertEqual('AMBARI', service.name) - - found_components = {} - for component in service.components: - found_components[component.name] = component - - self.assertEqual(2, len(found_components)) - self._assert_component('AMBARI_SERVER', 'MASTER', "1", - found_components['AMBARI_SERVER']) - self._assert_component('AMBARI_AGENT', 'SLAVE', "1+", - found_components['AMBARI_AGENT']) - - self.assertEqual(1, len(service.users)) - user = service.users[0] - self.assertEqual('admin', user.name) - self.assertEqual('admin', user.password) - groups = user.groups - self.assertEqual(1, len(groups)) - self.assertIn('admin', groups) - - def _assert_pig(self, service): - self.assertEqual('PIG', service.name) - self.assertEqual(1, len(service.components)) - self.assertEqual('PIG', service.components[0].name) - - def _assert_hive(self, service): - self.assertEqual('HIVE', service.name) - found_components = {} - for component in service.components: - found_components[component.name] = component - - self.assertEqual(4, len(found_components)) - self._assert_component('HIVE_SERVER', 'MASTER', "1", - found_components['HIVE_SERVER']) - self._assert_component('HIVE_METASTORE', 'MASTER', "1", - found_components['HIVE_METASTORE']) - self._assert_component('MYSQL_SERVER', 'MASTER', "1", - found_components['MYSQL_SERVER']) - self._assert_component('HIVE_CLIENT', 'CLIENT', "1+", - found_components['HIVE_CLIENT']) - - def _assert_hcatalog(self, service): - self.assertEqual('HCATALOG', service.name) - self.assertEqual(1, len(service.components)) - self.assertEqual('HCAT', service.components[0].name) - - def _assert_zookeeper(self, service): - self.assertEqual('ZOOKEEPER', service.name) - found_components = {} - for component in service.components: - found_components[component.name] = component - - self.assertEqual(2, len(found_components)) - self._assert_component('ZOOKEEPER_SERVER', 'MASTER', "1+", - found_components['ZOOKEEPER_SERVER']) - self._assert_component('ZOOKEEPER_CLIENT', 'CLIENT', "1+", - found_components['ZOOKEEPER_CLIENT']) - - def _assert_webhcat(self, service): - self.assertEqual('WEBHCAT', service.name) - self.assertEqual(1, len(service.components)) - self.assertEqual('WEBHCAT_SERVER', service.components[0].name) - - def _assert_oozie(self, service): - self.assertEqual('OOZIE', service.name) - found_components = {} - for component in service.components: - found_components[component.name] = component - - self.assertEqual(2, len(found_components)) - self._assert_component('OOZIE_SERVER', 'MASTER', "1", - found_components['OOZIE_SERVER']) - self._assert_component('OOZIE_CLIENT', 'CLIENT', "1+", - found_components['OOZIE_CLIENT']) - - def _assert_sqoop(self, service): - self.assertEqual('SQOOP', service.name) - self.assertEqual(1, len(service.components)) - self.assertEqual('SQOOP', service.components[0].name) - - def _assert_hbase(self, service): - self.assertEqual('HBASE', service.name) - found_components = {} - for component in service.components: - found_components[component.name] = component - - self.assertEqual(3, len(found_components)) - self._assert_component('HBASE_MASTER', 'MASTER', "1", - found_components['HBASE_MASTER']) - self._assert_component('HBASE_REGIONSERVER', 'SLAVE', "1+", - found_components['HBASE_REGIONSERVER']) - self._assert_component('HBASE_CLIENT', 'CLIENT', "1+", - found_components['HBASE_CLIENT']) - - def _assert_hue(self, service): - self.assertEqual('HUE', service.name) - found_components = {} - for component in service.components: - found_components[component.name] = component - - self.assertEqual(1, len(found_components)) - self._assert_component('HUE', 'CLIENT', "1", - found_components['HUE']) - - def _assert_component(self, name, comp_type, cardinality, component): - self.assertEqual(name, component.name) - self.assertEqual(comp_type, component.type) - self.assertEqual(cardinality, component.cardinality) - - def _assert_configurations(self, configurations): - self.assertEqual(17, len(configurations)) - self.assertIn('global', configurations) - self.assertIn('core-site', configurations) - self.assertIn('yarn-site', configurations) - self.assertIn('mapred-site', configurations) - self.assertIn('hdfs-site', configurations) - self.assertIn('ambari', configurations) - self.assertIn('webhcat-site', configurations) - self.assertIn('hive-site', configurations) - self.assertIn('oozie-site', configurations) - self.assertIn('hbase-site', configurations) - self.assertIn('capacity-scheduler', configurations) - self.assertIn('hue-ini', configurations) - self.assertIn('hue-core-site', configurations) - self.assertIn('hue-hdfs-site', configurations) - self.assertIn('hue-webhcat-site', configurations) - self.assertIn('hue-oozie-site', configurations) - self.assertIn('hdfsha', configurations) - - -class TestNodeGroup(object): - def __init__(self, name, instances, node_processes, count=1): - self.name = name - self.instances = instances - for i in instances: - i.node_group = self - self.node_processes = node_processes - self.count = count - self.id = name - - def storage_paths(self): - return [''] - - -class TestUserInputConfig(object): - def __init__(self, tag, target, name): - self.tag = tag - self.applicable_target = target - self.name = name diff --git a/sahara/tests/unit/plugins/hdp/test_confighints_helper.py b/sahara/tests/unit/plugins/hdp/test_confighints_helper.py deleted file mode 100644 index b4111d6885..0000000000 --- a/sahara/tests/unit/plugins/hdp/test_confighints_helper.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from sahara.plugins.hdp import confighints_helper as ch_helper -from sahara.tests.unit import base as sahara_base - - -SAMPLE_CONFIG = { - 'configurations': [ - { - 'tag': 'tag1.xml', - 'properties': [ - { - 'name': 'prop1', - 'default_value': '1234', - 'description': 'the first property of tag1' - }, - { - 'name': 'prop2', - 'default_value': '5678', - 'description': 'the second property of tag1' - } - ] - }, - { - 'tag': 'tag2.xml', - 'properties': [ - { - 'name': 'prop3', - 'default_value': '0000', - 'description': 'the first property of tag2' - } - ] - } - ] -} - - -class ConfigHintsHelperTest(sahara_base.SaharaTestCase): - @mock.patch( - 'sahara.plugins.hdp.confighints_helper.load_hadoop_json_for_tag', - wraps=ch_helper.load_hadoop_json_for_tag) - @mock.patch( - 'sahara.plugins.hdp.confighints_helper.load_json_file', - return_value=SAMPLE_CONFIG) - def test_get_possible_hive_config_from(self, - load_json_file, - load_hadoop_json_for_tag): - expected_config = { - 'configs': [], - 'params': {} - } - actual_config = ch_helper.get_possible_hive_config_from( - 'sample-file-name.json') - load_hadoop_json_for_tag.assert_called_once_with( - 'sample-file-name.json', 'hive-site.xml') - self.assertEqual(expected_config, actual_config) - - @mock.patch( - 'sahara.service.edp.oozie.workflow_creator.workflow_factory.' - 'get_possible_mapreduce_configs', - return_value=[]) - @mock.patch( - 'sahara.plugins.hdp.confighints_helper.load_hadoop_json_for_tag', - wraps=ch_helper.load_hadoop_json_for_tag) - @mock.patch( - 'sahara.plugins.hdp.confighints_helper.load_json_file', - return_value=SAMPLE_CONFIG) - def test_get_possible_mapreduce_config_from(self, - load_json_file, - load_hadoop_json_for_tag, - get_poss_mr_configs): - expected_config = { - 'configs': [] - } - actual_config = ch_helper.get_possible_mapreduce_config_from( - 'sample-file-name.json') - load_hadoop_json_for_tag.assert_called_once_with( - 'sample-file-name.json', 'mapred-site.xml') - get_poss_mr_configs.assert_called_once_with() - self.assertEqual(expected_config, actual_config) - - @mock.patch( - 'sahara.plugins.hdp.confighints_helper.load_hadoop_json_for_tag', - wraps=ch_helper.load_hadoop_json_for_tag) - @mock.patch( - 'sahara.plugins.hdp.confighints_helper.load_json_file', - return_value=SAMPLE_CONFIG) - def test_get_possible_pig_config_from(self, - load_json_file, - load_hadoop_json_for_tag): - expected_config = { - 'configs': [], - 'args': [], - 'params': {} - } - actual_config = ch_helper.get_possible_pig_config_from( - 'sample-file-name.json') - load_hadoop_json_for_tag.assert_called_once_with( - 'sample-file-name.json', 'mapred-site.xml') - self.assertEqual(expected_config, actual_config) - - def test_get_properties_for_tag(self): - expected_properties = [ - { - 'name': 'prop1', - 'default_value': '1234', - 'description': 'the first property of tag1' - }, - { - 'name': 'prop2', - 'default_value': '5678', - 'description': 'the second property of tag1' - } - ] - actual_properties = ch_helper.get_properties_for_tag( - SAMPLE_CONFIG['configurations'], 'tag1.xml') - self.assertEqual(expected_properties, actual_properties) - - @mock.patch( - 'sahara.plugins.hdp.confighints_helper.load_json_file', - return_value=SAMPLE_CONFIG) - def test_load_hadoop_json_for_tag(self, load_json_file): - expected_configs = [ - { - 'name': 'prop3', - 'value': '0000', - 'description': 'the first property of tag2' - } - ] - actual_configs = ch_helper.load_hadoop_json_for_tag( - 'sample-file-name.json', 'tag2.xml') - self.assertEqual(expected_configs, actual_configs) diff --git a/sahara/tests/unit/plugins/hdp/test_services.py b/sahara/tests/unit/plugins/hdp/test_services.py deleted file mode 100644 index 6ddaac5b01..0000000000 --- a/sahara/tests/unit/plugins/hdp/test_services.py +++ /dev/null @@ -1,815 +0,0 @@ -# Copyright (c) 2013 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from sahara import exceptions as e -from sahara.plugins import exceptions as ex -from sahara.plugins.hdp.versions import versionhandlerfactory as vhf -from sahara.tests.unit import base -from sahara.tests.unit.plugins.hdp import hdp_test_base - -versions = ['2.0.6'] - - -class ServicesTest(base.SaharaTestCase): - # TODO(jspeidel): test remaining service functionality which isn't - # tested by coarser grained unit tests. - - def get_services_processor(self, version='2.0.6'): - handler = (vhf.VersionHandlerFactory.get_instance(). - get_version_handler(version)) - s = handler.get_services_processor() - return s - - def test_create_hdfs_service(self): - for version in versions: - s = self.get_services_processor(version) - service = s.create_service('HDFS') - self.assertEqual('HDFS', service.name) - expected_configs = set(['global', 'core-site', 'hdfs-site']) - self.assertEqual(expected_configs, - expected_configs & service.configurations) - self.assertTrue(service.is_mandatory()) - - def test_hdp2_hdfs_service_register_urls(self): - s = self.get_services_processor('2.0.6') - service = s.create_service('HDFS') - cluster_spec = mock.Mock() - cluster_spec.configurations = { - 'core-site': { - 'fs.defaultFS': 'hdfs://not_expected.com:9020' - }, - 'hdfs-site': { - 'dfs.namenode.http-address': 'http://not_expected.com:10070' - } - } - instance_mock = mock.Mock() - instance_mock.management_ip = '127.0.0.1' - cluster_spec.determine_component_hosts = mock.Mock( - return_value=[instance_mock]) - cluster = mock.Mock(cluster_configs={}, name="hdp") - url_info = {} - url_info = service.register_service_urls(cluster_spec, url_info, - cluster) - self.assertEqual(url_info['HDFS']['Web UI'], - 'http://127.0.0.1:10070') - self.assertEqual(url_info['HDFS']['NameNode'], - 'hdfs://127.0.0.1:9020') - - def test_hdp2_ha_hdfs_service_register_urls(self): - s = self.get_services_processor('2.0.6') - service = s.create_service('HDFS') - cluster_spec = mock.Mock() - cluster_spec.configurations = { - 'core-site': { - 'fs.defaultFS': 'hdfs://not_expected.com:9020' - }, - 'hdfs-site': { - 'dfs.namenode.http-address': 'http://not_expected.com:10070' - } - } - instance_mock = mock.Mock() - instance_mock.management_ip = '127.0.0.1' - cluster_spec.determine_component_hosts = mock.Mock( - return_value=[instance_mock]) - cluster = mock.Mock(cluster_configs={'HDFSHA': {'hdfs.nnha': True}}) - cluster.name = "hdp-cluster" - url_info = {} - url_info = service.register_service_urls(cluster_spec, url_info, - cluster) - self.assertEqual(url_info['HDFS']['Web UI'], - 'http://127.0.0.1:10070') - self.assertEqual(url_info['HDFS']['NameNode'], - 'hdfs://127.0.0.1:9020') - self.assertEqual(url_info['HDFS']['NameService'], - 'hdfs://hdp-cluster') - - def test_create_mr2_service(self): - s = self.get_services_processor('2.0.6') - service = s.create_service('MAPREDUCE2') - self.assertEqual('MAPREDUCE2', service.name) - expected_configs = set(['global', 'core-site', 'mapred-site']) - self.assertEqual(expected_configs, - expected_configs & service.configurations) - self.assertTrue(service.is_mandatory()) - - def test_hdp2_mr2_service_register_urls(self): - s = self.get_services_processor('2.0.6') - service = s.create_service('MAPREDUCE2') - cluster_spec = mock.Mock() - cluster_spec.configurations = { - 'mapred-site': { - 'mapreduce.jobhistory.address': - 'hdfs://not_expected.com:10300', - 'mapreduce.jobhistory.webapp.address': - 'http://not_expected.com:10030' - } - } - instance_mock = mock.Mock() - instance_mock.management_ip = '127.0.0.1' - cluster_spec.determine_component_hosts = mock.Mock( - return_value=[instance_mock]) - url_info = {} - url_info = service.register_service_urls(cluster_spec, url_info, - mock.Mock()) - self.assertEqual(url_info['MapReduce2']['Web UI'], - 'http://127.0.0.1:10030') - self.assertEqual(url_info['MapReduce2']['History Server'], - '127.0.0.1:10300') - - def test_create_hive_service(self): - for version in versions: - s = self.get_services_processor(version) - service = s.create_service('HIVE') - self.assertEqual('HIVE', service.name) - expected_configs = set(['global', 'core-site', 'hive-site']) - self.assertEqual(expected_configs, - expected_configs & service.configurations) - self.assertFalse(service.is_mandatory()) - - def test_create_webhcat_service(self): - for version in versions: - s = self.get_services_processor(version) - service = s.create_service('WEBHCAT') - self.assertEqual('WEBHCAT', service.name) - expected_configs = set(['global', 'core-site', 'webhcat-site']) - self.assertEqual(expected_configs, - expected_configs & service.configurations) - self.assertFalse(service.is_mandatory()) - - def test_create_zk_service(self): - for version in versions: - s = self.get_services_processor() - service = s.create_service('ZOOKEEPER') - self.assertEqual('ZOOKEEPER', service.name) - expected_configs = set(['global', 'core-site']) - self.assertEqual(expected_configs, - expected_configs & service.configurations) - self.assertTrue(service.is_mandatory()) - - def test_create_oozie_service(self): - for version in versions: - s = self.get_services_processor() - service = s.create_service('OOZIE') - self.assertEqual('OOZIE', service.name) - expected_configs = set(['global', 'core-site', 'oozie-site']) - self.assertEqual(expected_configs, - expected_configs & service.configurations) - self.assertFalse(service.is_mandatory()) - - def test_oozie_service_register_urls(self): - for version in versions: - s = self.get_services_processor(version) - service = s.create_service('OOZIE') - cluster_spec = mock.Mock() - cluster_spec.configurations = { - 'oozie-site': { - 'oozie.base.url': 'hdfs://not_expected.com:21000' - } - } - instance_mock = mock.Mock() - instance_mock.management_ip = '127.0.0.1' - cluster_spec.determine_component_hosts = mock.Mock( - return_value=[instance_mock]) - url_info = {} - url_info = service.register_service_urls(cluster_spec, url_info, - mock.Mock()) - self.assertEqual('http://127.0.0.1:21000', - url_info['JobFlow']['Oozie']) - - def test_create_ganglia_service(self): - for version in versions: - s = self.get_services_processor(version) - service = s.create_service('GANGLIA') - self.assertEqual('GANGLIA', service.name) - expected_configs = set(['global', 'core-site']) - self.assertEqual(expected_configs, - expected_configs & service.configurations) - self.assertFalse(service.is_mandatory()) - - def test_create_ambari_service(self): - for version in versions: - s = self.get_services_processor(version) - service = s.create_service('AMBARI') - self.assertEqual('AMBARI', service.name) - expected_configs = set(['global', 'core-site']) - self.assertEqual(expected_configs, - expected_configs & service.configurations) - self.assertTrue(service.is_mandatory()) - - @mock.patch("sahara.utils.openstack.nova.get_instance_info", - hdp_test_base.get_instance_info) - @mock.patch( - 'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.' - '_get_swift_properties', - return_value=[]) - def test_hdp2_create_sqoop_service(self, patched): - s = self.get_services_processor('2.0.6') - service = s.create_service('SQOOP') - self.assertEqual('SQOOP', service.name) - expected_configs = set(['global', 'core-site']) - self.assertEqual(expected_configs, - expected_configs & service.configurations) - self.assertFalse(service.is_mandatory()) - - # ensure that hdfs and mr clients are added implicitly - master_host = hdp_test_base.TestServer( - 'master.novalocal', 'master', '11111', 3, - '111.11.1111', '222.11.1111') - master_ng = hdp_test_base.TestNodeGroup( - 'master', [master_host], ["NAMENODE", "RESOURCEMANAGER", - "HISTORYSERVER", "SECONDARY_NAMENODE", - "NODEMANAGER", "DATANODE", - "AMBARI_SERVER", "ZOOKEEPER_SERVER"]) - sqoop_host = hdp_test_base.TestServer( - 'sqoop.novalocal', 'sqoop', '11111', 3, - '111.11.1111', '222.11.1111') - sqoop_ng = hdp_test_base.TestNodeGroup( - 'sqoop', [sqoop_host], ["SQOOP"]) - cluster = hdp_test_base.TestCluster([master_ng, sqoop_ng]) - - cluster_spec = hdp_test_base.create_clusterspec(hdp_version='2.0.6') - cluster_spec.create_operational_config(cluster, []) - - components = cluster_spec.get_node_groups_containing_component( - 'SQOOP')[0].components - self.assertIn('HDFS_CLIENT', components) - self.assertIn('MAPREDUCE2_CLIENT', components) - - @mock.patch("sahara.utils.openstack.nova.get_instance_info", - hdp_test_base.get_instance_info) - @mock.patch( - 'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.' - '_get_swift_properties', - return_value=[]) - def test_create_hbase_service(self, patched): - s = self.get_services_processor() - service = s.create_service('HBASE') - self.assertEqual('HBASE', service.name) - expected_configs = set(['global', 'core-site', 'hbase-site']) - self.assertEqual(expected_configs, - expected_configs & service.configurations) - self.assertFalse(service.is_mandatory()) - - cluster = self._create_hbase_cluster() - - cluster_spec = hdp_test_base.create_clusterspec() - cluster_spec.create_operational_config(cluster, []) - - components = cluster_spec.get_node_groups_containing_component( - 'HBASE_MASTER')[0].components - self.assertIn('HDFS_CLIENT', components) - - @mock.patch("sahara.utils.openstack.nova.get_instance_info", - hdp_test_base.get_instance_info) - @mock.patch( - 'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.' - '_get_swift_properties', - return_value=[]) - def test_create_hdp2_hbase_service(self, patched): - for version in versions: - s = self.get_services_processor(version=version) - service = s.create_service('HBASE') - self.assertEqual('HBASE', service.name) - expected_configs = set(['global', 'core-site', 'hbase-site']) - self.assertEqual(expected_configs, - expected_configs & service.configurations) - self.assertFalse(service.is_mandatory()) - - cluster = self._create_hbase_cluster() - - cluster_spec = hdp_test_base.create_clusterspec( - hdp_version=version) - cluster_spec.create_operational_config(cluster, []) - - components = cluster_spec.get_node_groups_containing_component( - 'HBASE_MASTER')[0].components - self.assertIn('HDFS_CLIENT', components) - - def test_create_yarn_service(self): - s = self.get_services_processor(version='2.0.6') - service = s.create_service('YARN') - self.assertEqual('YARN', service.name) - expected_configs = set(['global', 'core-site', 'yarn-site']) - self.assertEqual(expected_configs, - expected_configs & service.configurations) - self.assertTrue(service.is_mandatory()) - - @mock.patch("sahara.utils.openstack.nova.get_instance_info", - hdp_test_base.get_instance_info) - @mock.patch( - 'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.' - '_get_swift_properties', - return_value=[]) - def test_hbase_properties(self, patched): - for version in versions: - cluster = self._create_hbase_cluster() - - cluster_spec = hdp_test_base.create_clusterspec( - hdp_version=version) - cluster_spec.create_operational_config(cluster, []) - s = self.get_services_processor(version=version) - service = s.create_service('HBASE') - - ui_handlers = {} - service.register_user_input_handlers(ui_handlers) - ui_handlers['hbase-site/hbase.rootdir']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/hbase.rootdir'), - "hdfs://%NN_HOST%:99/some/other/dir"), - cluster_spec.configurations) - self.assertEqual( - "hdfs://%NN_HOST%:99/some/other/dir", - cluster_spec.configurations['hbase-site']['hbase.rootdir']) - self.assertEqual( - "/some/other/dir", - cluster_spec.configurations['global']['hbase_hdfs_root_dir']) - - self.assertRaises( - e.InvalidDataException, - ui_handlers['hbase-site/hbase.rootdir'], - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/hbase.rootdir'), - "badprotocol://%NN_HOST%:99/some/other/dir"), - cluster_spec.configurations) - - ui_handlers['hbase-site/hbase.tmp.dir']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/hbase.tmp.dir'), - "/some/dir"), - cluster_spec.configurations) - self.assertEqual( - "/some/dir", - cluster_spec.configurations['hbase-site']['hbase.tmp.dir']) - self.assertEqual( - "/some/dir", - cluster_spec.configurations['global']['hbase_tmp_dir']) - ui_handlers[ - 'hbase-site/hbase.regionserver.global.memstore.upperLimit']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/hbase.regionserver.global.' - 'memstore.upperLimit'), - "111"), - cluster_spec.configurations) - self.assertEqual( - "111", - cluster_spec.configurations['hbase-site'][ - 'hbase.regionserver.global.memstore.upperLimit']) - self.assertEqual( - "111", - cluster_spec.configurations['global'][ - 'regionserver_memstore_upperlimit']) - ui_handlers[ - 'hbase-site/hbase.hstore.blockingStoreFiles']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', - 'hbase-site/hbase.hstore.blockingStoreFiles'), - "112"), - cluster_spec.configurations) - self.assertEqual("112", cluster_spec.configurations['hbase-site'][ - 'hbase.hstore.blockingStoreFiles']) - self.assertEqual("112", cluster_spec.configurations['global'][ - 'hstore_blockingstorefiles']) - ui_handlers[ - 'hbase-site/hbase.hstore.compactionThreshold']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', - 'hbase-site/hbase.hstore.compactionThreshold'), - "113"), - cluster_spec.configurations) - self.assertEqual("113", cluster_spec.configurations['hbase-site'][ - 'hbase.hstore.compactionThreshold']) - self.assertEqual("113", cluster_spec.configurations['global'][ - 'hstore_compactionthreshold']) - ui_handlers[ - 'hbase-site/hfile.block.cache.size']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/hfile.block.cache.size'), - "114"), - cluster_spec.configurations) - self.assertEqual("114", cluster_spec.configurations['hbase-site'][ - 'hfile.block.cache.size']) - self.assertEqual("114", cluster_spec.configurations['global'][ - 'hfile_blockcache_size']) - ui_handlers[ - 'hbase-site/hbase.hregion.max.filesize']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/hbase.hregion.max.filesize'), - "115"), - cluster_spec.configurations) - self.assertEqual("115", cluster_spec.configurations['hbase-site'][ - 'hbase.hregion.max.filesize']) - self.assertEqual("115", cluster_spec.configurations['global'][ - 'hstorefile_maxsize']) - ui_handlers[ - 'hbase-site/hbase.regionserver.handler.count']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', - 'hbase-site/hbase.regionserver.handler.count'), - "116"), - cluster_spec.configurations) - self.assertEqual("116", cluster_spec.configurations['hbase-site'][ - 'hbase.regionserver.handler.count']) - self.assertEqual("116", cluster_spec.configurations['global'][ - 'regionserver_handlers']) - ui_handlers[ - 'hbase-site/hbase.hregion.majorcompaction']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', - 'hbase-site/hbase.hregion.majorcompaction'), - "117"), - cluster_spec.configurations) - self.assertEqual("117", cluster_spec.configurations['hbase-site'][ - 'hbase.hregion.majorcompaction']) - self.assertEqual("117", cluster_spec.configurations['global'][ - 'hregion_majorcompaction']) - ui_handlers[ - 'hbase-site/hbase.regionserver.global.memstore.lowerLimit']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/hbase.regionserver.global.' - 'memstore.lowerLimit'), - "118"), - cluster_spec.configurations) - self.assertEqual("118", cluster_spec.configurations['hbase-site'][ - 'hbase.regionserver.global.memstore.lowerLimit']) - self.assertEqual("118", cluster_spec.configurations['global'][ - 'regionserver_memstore_lowerlimit']) - ui_handlers[ - 'hbase-site/hbase.hregion.memstore.block.multiplier']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/hbase.hregion.memstore.block.' - 'multiplier'), - "119"), - cluster_spec.configurations) - self.assertEqual("119", cluster_spec.configurations['hbase-site'][ - 'hbase.hregion.memstore.block.multiplier']) - self.assertEqual("119", cluster_spec.configurations['global'][ - 'hregion_blockmultiplier']) - ui_handlers[ - 'hbase-site/hbase.hregion.memstore.mslab.enabled']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/hbase.hregion.memstore.mslab.' - 'enabled'), - "false"), - cluster_spec.configurations) - self.assertEqual("false", cluster_spec.configurations['hbase-site'] - ['hbase.hregion.memstore.mslab.enabled']) - self.assertEqual("false", cluster_spec.configurations['global'][ - 'regionserver_memstore_lab']) - ui_handlers[ - 'hbase-site/hbase.hregion.memstore.flush.size']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/hbase.hregion.memstore.flush.' - 'size'), - "120"), - cluster_spec.configurations) - self.assertEqual("120", cluster_spec.configurations['hbase-site'][ - 'hbase.hregion.memstore.flush.size']) - if version == '1.3.2': - self.assertEqual("120", cluster_spec.configurations['global'][ - 'hregion_memstoreflushsize']) - ui_handlers[ - 'hbase-site/hbase.client.scanner.caching']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/hbase.client.scanner.caching'), - "121"), - cluster_spec.configurations) - self.assertEqual("121", cluster_spec.configurations['hbase-site'][ - 'hbase.client.scanner.caching']) - self.assertEqual("121", cluster_spec.configurations['global'][ - 'client_scannercaching']) - ui_handlers[ - 'hbase-site/zookeeper.session.timeout']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/zookeeper.session.timeout'), - "122"), - cluster_spec.configurations) - self.assertEqual("122", cluster_spec.configurations['hbase-site'][ - 'zookeeper.session.timeout']) - self.assertEqual("122", cluster_spec.configurations['global'][ - 'zookeeper_sessiontimeout']) - ui_handlers[ - 'hbase-site/hbase.client.keyvalue.maxsize']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', - 'hbase-site/hbase.client.keyvalue.maxsize'), - "123"), - cluster_spec.configurations) - self.assertEqual("123", cluster_spec.configurations['hbase-site'][ - 'hbase.client.keyvalue.maxsize']) - self.assertEqual("123", cluster_spec.configurations['global'][ - 'hfile_max_keyvalue_size']) - ui_handlers[ - 'hdfs-site/dfs.support.append']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hdfs-site/dfs.support.append'), - "false"), - cluster_spec.configurations) - self.assertEqual("false", cluster_spec.configurations['hbase-site'] - ['dfs.support.append']) - self.assertEqual("false", cluster_spec.configurations['hdfs-site'][ - 'dfs.support.append']) - self.assertEqual("false", cluster_spec.configurations['global'][ - 'hdfs_support_append']) - ui_handlers[ - 'hbase-site/dfs.client.read.shortcircuit']( - hdp_test_base.TestUserInput( - hdp_test_base.TestUserInputConfig( - '', '', 'hbase-site/dfs.client.read.shortcircuit'), - "false"), - cluster_spec.configurations) - self.assertEqual("false", cluster_spec.configurations['hbase-site'] - ['dfs.client.read.shortcircuit']) - self.assertEqual("false", cluster_spec.configurations['global'][ - 'hdfs_enable_shortcircuit_read']) - - @mock.patch("sahara.utils.openstack.nova.get_instance_info", - hdp_test_base.get_instance_info) - @mock.patch( - 'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.' - '_get_swift_properties', - return_value=[]) - def test_hbase_validation(self, patched): - master_host = hdp_test_base.TestServer( - 'master.novalocal', 'master', '11111', 3, - '111.11.1111', '222.11.1111') - master_ng = hdp_test_base.TestNodeGroup( - 'master', [master_host], ["NAMENODE", - 'RESOURCEMANAGER', 'YARN_CLIENT', - 'NODEMANAGER', - "SECONDARY_NAMENODE", - "DATANODE", - "AMBARI_SERVER", - 'HISTORYSERVER', 'MAPREDUCE2_CLIENT', - 'ZOOKEEPER_SERVER', 'ZOOKEEPER_CLIENT']) - hbase_host = hdp_test_base.TestServer( - 'hbase.novalocal', 'hbase', '11111', 3, - '111.11.1111', '222.11.1111') - - hbase_ng = hdp_test_base.TestNodeGroup( - 'hbase', [hbase_host], ["HBASE_MASTER"]) - - hbase_ng2 = hdp_test_base.TestNodeGroup( - 'hbase2', [hbase_host], ["HBASE_MASTER"]) - - hbase_client_host = hdp_test_base.TestServer( - 'hbase-client.novalocal', 'hbase-client', '11111', 3, - '111.11.1111', '222.11.1111') - - hbase_client_ng = hdp_test_base.TestNodeGroup( - 'hbase-client', [hbase_client_host], ["HBASE_CLIENT"]) - - hbase_slave_host = hdp_test_base.TestServer( - 'hbase-rs.novalocal', 'hbase-rs', '11111', 3, - '111.11.1111', '222.11.1111') - - hbase_slave_ng = hdp_test_base.TestNodeGroup( - 'hbase-rs', [hbase_slave_host], ["HBASE_REGIONSERVER"]) - - cluster = hdp_test_base.TestCluster([master_ng, hbase_client_ng]) - cluster_spec = hdp_test_base.create_clusterspec() - - # validation should fail due to lack of hbase master - self.assertRaises( - ex.InvalidComponentCountException, - cluster_spec.create_operational_config, cluster, []) - - cluster = hdp_test_base.TestCluster( - [master_ng, hbase_client_ng, hbase_slave_ng]) - cluster_spec = hdp_test_base.create_clusterspec() - - # validation should fail due to lack of hbase master - - self.assertRaises( - ex.InvalidComponentCountException, - cluster_spec.create_operational_config, cluster, []) - - cluster = hdp_test_base.TestCluster( - [master_ng, hbase_client_ng, hbase_ng]) - cluster_spec = hdp_test_base.create_clusterspec() - - # validation should succeed with hbase master included - cluster_spec.create_operational_config(cluster, []) - - cluster = hdp_test_base.TestCluster( - [master_ng, hbase_client_ng, hbase_ng, hbase_ng2]) - cluster_spec = hdp_test_base.create_clusterspec() - - # validation should fail with multiple hbase master components - self.assertRaises( - ex.InvalidComponentCountException, - cluster_spec.create_operational_config, cluster, []) - - @mock.patch("sahara.utils.openstack.nova.get_instance_info", - hdp_test_base.get_instance_info) - @mock.patch( - 'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.' - '_get_swift_properties', - return_value=[]) - def test_hdp2_hbase_validation(self, patched): - master_host = hdp_test_base.TestServer( - 'master.novalocal', 'master', '11111', 3, - '111.11.1111', '222.11.1111') - master_ng = hdp_test_base.TestNodeGroup( - 'master', [master_host], ["NAMENODE", "RESOURCEMANAGER", - "SECONDARY_NAMENODE", "HISTORYSERVER", - "NODEMANAGER", "DATANODE", - "AMBARI_SERVER", "ZOOKEEPER_SERVER"]) - hbase_host = hdp_test_base.TestServer( - 'hbase.novalocal', 'hbase', '11111', 3, - '111.11.1111', '222.11.1111') - - hbase_ng = hdp_test_base.TestNodeGroup( - 'hbase', [hbase_host], ["HBASE_MASTER"]) - - hbase_ng2 = hdp_test_base.TestNodeGroup( - 'hbase2', [hbase_host], ["HBASE_MASTER"]) - - hbase_client_host = hdp_test_base.TestServer( - 'hbase-client.novalocal', 'hbase-client', '11111', 3, - '111.11.1111', '222.11.1111') - - hbase_client_ng = hdp_test_base.TestNodeGroup( - 'hbase-client', [hbase_client_host], ["HBASE_CLIENT"]) - - hbase_slave_host = hdp_test_base.TestServer( - 'hbase-rs.novalocal', 'hbase-rs', '11111', 3, - '111.11.1111', '222.11.1111') - - hbase_slave_ng = hdp_test_base.TestNodeGroup( - 'hbase-rs', [hbase_slave_host], ["HBASE_REGIONSERVER"]) - - cluster = hdp_test_base.TestCluster([master_ng, hbase_client_ng]) - cluster_spec = hdp_test_base.create_clusterspec(hdp_version='2.0.6') - - # validation should fail due to lack of hbase master - self.assertRaises( - ex.InvalidComponentCountException, - cluster_spec.create_operational_config, cluster, []) - - cluster = hdp_test_base.TestCluster( - [master_ng, hbase_client_ng, hbase_slave_ng]) - cluster_spec = hdp_test_base.create_clusterspec(hdp_version='2.0.6') - - # validation should fail due to lack of hbase master - - self.assertRaises( - ex.InvalidComponentCountException, - cluster_spec.create_operational_config, cluster, []) - - cluster = hdp_test_base.TestCluster( - [master_ng, hbase_client_ng, hbase_ng]) - cluster_spec = hdp_test_base.create_clusterspec(hdp_version='2.0.6') - - # validation should succeed with hbase master included - cluster_spec.create_operational_config(cluster, []) - - cluster = hdp_test_base.TestCluster( - [master_ng, hbase_client_ng, hbase_ng, hbase_ng2]) - cluster_spec = hdp_test_base.create_clusterspec(hdp_version='2.0.6') - - # validation should fail with multiple hbase master components - self.assertRaises( - ex.InvalidComponentCountException, - cluster_spec.create_operational_config, cluster, []) - - @mock.patch("sahara.utils.openstack.nova.get_instance_info", - hdp_test_base.get_instance_info) - @mock.patch( - 'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.' - '_get_swift_properties', - return_value=[]) - def test_hbase_service_urls(self, patched): - for version in versions: - cluster = self._create_hbase_cluster() - cluster_spec = hdp_test_base.create_clusterspec( - hdp_version=version) - cluster_spec.create_operational_config(cluster, []) - s = self.get_services_processor(version=version) - service = s.create_service('HBASE') - - url_info = {} - service.register_service_urls(cluster_spec, url_info, mock.Mock()) - self.assertEqual(1, len(url_info)) - self.assertEqual(6, len(url_info['HBase'])) - self.assertEqual('http://222.22.2222:60010/master-status', - url_info['HBase']['Web UI']) - self.assertEqual('http://222.22.2222:60010/logs', - url_info['HBase']['Logs']) - self.assertEqual('http://222.22.2222:60010/zk.jsp', - url_info['HBase']['Zookeeper Info']) - self.assertEqual('http://222.22.2222:60010/jmx', - url_info['HBase']['JMX']) - self.assertEqual('http://222.22.2222:60010/dump', - url_info['HBase']['Debug Dump']) - self.assertEqual('http://222.22.2222:60010/stacks', - url_info['HBase']['Thread Stacks']) - - @mock.patch("sahara.utils.openstack.nova.get_instance_info", - hdp_test_base.get_instance_info) - @mock.patch( - 'sahara.plugins.hdp.versions.version_2_0_6.services.HdfsService.' - '_get_swift_properties', - return_value=[]) - def test_hbase_replace_tokens(self, patched): - for version in versions: - cluster = self._create_hbase_cluster() - cluster_spec = hdp_test_base.create_clusterspec( - hdp_version=version) - cluster_spec.create_operational_config(cluster, []) - s = self.get_services_processor(version=version) - service = s.create_service('HBASE') - service.finalize_configuration(cluster_spec) - - self.assertEqual("hdfs://master.novalocal:8020/apps/hbase/data", - cluster_spec.configurations['hbase-site'][ - 'hbase.rootdir']) - self.assertEqual(set(['zk.novalocal', 'master.novalocal']), - set(cluster_spec.configurations['hbase-site'][ - 'hbase.zookeeper.quorum'].split(','))) - - def test_get_storage_paths(self): - for version in versions: - s = self.get_services_processor(version=version) - service = s.create_service('AMBARI') - server1 = hdp_test_base.TestServer( - 'host1', 'test-master', '11111', 3, '1.1.1.1', '2.2.2.2') - server2 = hdp_test_base.TestServer( - 'host2', 'test-slave', '11111', 3, '3.3.3.3', '4.4.4.4') - server3 = hdp_test_base.TestServer( - 'host3', 'another-test', '11111', 3, '6.6.6.6', '5.5.5.5') - ng1 = hdp_test_base.TestNodeGroup('ng1', [server1], None) - ng2 = hdp_test_base.TestNodeGroup('ng2', [server2], None) - ng3 = hdp_test_base.TestNodeGroup('ng3', [server3], None) - - server1.storage_path = ['/volume/disk1'] - server2.storage_path = ['/mnt'] - - paths = service._get_common_paths([ng1, ng2]) - self.assertEqual([], paths) - - server1.storage_path = ['/volume/disk1', '/volume/disk2'] - server2.storage_path = ['/mnt'] - server3.storage_path = ['/volume/disk1'] - - paths = service._get_common_paths([ng1, ng2, ng3]) - self.assertEqual([], paths) - - server1.storage_path = ['/volume/disk1', '/volume/disk2'] - server2.storage_path = ['/volume/disk1'] - server3.storage_path = ['/volume/disk1'] - - paths = service._get_common_paths([ng1, ng2, ng3]) - self.assertEqual(['/volume/disk1'], paths) - - def _create_hbase_cluster(self): - master_host = hdp_test_base.TestServer( - 'master.novalocal', 'master', '11111', 3, - '111.11.1111', '222.11.1111') - master_ng = hdp_test_base.TestNodeGroup( - 'master', [master_host], ["NAMENODE", "RESOURCEMANAGER", - "SECONDARY_NAMENODE", "NODEMANAGER", - "DATANODE", "AMBARI_SERVER", - "HISTORYSERVER", "ZOOKEEPER_SERVER"]) - extra_zk_host = hdp_test_base.TestServer( - 'zk.novalocal', 'zk', '11112', 3, - '111.11.1112', '222.11.1112') - extra_zk_ng = hdp_test_base.TestNodeGroup( - 'zk', [extra_zk_host], ['ZOOKEEPER_SERVER']) - hbase_host = hdp_test_base.TestServer( - 'hbase.novalocal', 'hbase', '11111', 3, - '222.22.2222', '222.11.1111') - hbase_ng = hdp_test_base.TestNodeGroup( - 'hbase', [hbase_host], ["HBASE_MASTER"]) - return hdp_test_base.TestCluster([master_ng, extra_zk_ng, hbase_ng]) diff --git a/sahara/tests/unit/plugins/hdp/test_versionmanagerfactory.py b/sahara/tests/unit/plugins/hdp/test_versionmanagerfactory.py deleted file mode 100644 index 702f3db698..0000000000 --- a/sahara/tests/unit/plugins/hdp/test_versionmanagerfactory.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2013 Hortonworks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sahara.plugins.hdp.versions import versionhandlerfactory -from sahara.tests.unit import base - - -class VersionManagerFactoryTest(base.SaharaTestCase): - - def test_get_versions(self): - factory = versionhandlerfactory.VersionHandlerFactory.get_instance() - versions = factory.get_versions() - - self.assertEqual(1, len(versions)) - self.assertIn('2.0.6', versions) - - def test_get_version_handlers(self): - factory = versionhandlerfactory.VersionHandlerFactory.get_instance() - versions = factory.get_versions() - for version in versions: - handler = factory.get_version_handler(version) - self.assertIsNotNone(handler) - self.assertEqual(version, handler.get_version()) diff --git a/sahara/tests/unit/plugins/hdp/versions/__init__.py b/sahara/tests/unit/plugins/hdp/versions/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sahara/tests/unit/plugins/hdp/versions/version_2_0_6/__init__.py b/sahara/tests/unit/plugins/hdp/versions/version_2_0_6/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sahara/tests/unit/plugins/hdp/versions/version_2_0_6/test_edp_engine.py b/sahara/tests/unit/plugins/hdp/versions/version_2_0_6/test_edp_engine.py deleted file mode 100644 index cca1ba3301..0000000000 --- a/sahara/tests/unit/plugins/hdp/versions/version_2_0_6/test_edp_engine.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from sahara.plugins.hdp.versions.version_2_0_6 import edp_engine -from sahara.tests.unit import base as sahara_base -from sahara.utils import edp - - -class HDP2ConfigHintsTest(sahara_base.SaharaTestCase): - @mock.patch( - 'sahara.plugins.hdp.confighints_helper.get_possible_hive_config_from', - return_value={}) - def test_get_possible_job_config_hive(self, - get_possible_hive_config_from): - expected_config = {'job_config': {}} - actual_config = edp_engine.EdpOozieEngine.get_possible_job_config( - edp.JOB_TYPE_HIVE) - get_possible_hive_config_from.assert_called_once_with( - 'plugins/hdp/versions/version_2_0_6/resources/' - 'ambari-config-resource.json') - self.assertEqual(expected_config, actual_config) - - @mock.patch('sahara.plugins.hdp.edp_engine.EdpOozieEngine') - def test_get_possible_job_config_java(self, BaseHDPEdpOozieEngine): - expected_config = {'job_config': {}} - BaseHDPEdpOozieEngine.get_possible_job_config.return_value = ( - expected_config) - actual_config = edp_engine.EdpOozieEngine.get_possible_job_config( - edp.JOB_TYPE_JAVA) - BaseHDPEdpOozieEngine.get_possible_job_config.assert_called_once_with( - edp.JOB_TYPE_JAVA) - self.assertEqual(expected_config, actual_config) - - @mock.patch( - 'sahara.plugins.hdp.confighints_helper.' - 'get_possible_mapreduce_config_from', - return_value={}) - def test_get_possible_job_config_mapreduce( - self, get_possible_mapreduce_config_from): - expected_config = {'job_config': {}} - actual_config = edp_engine.EdpOozieEngine.get_possible_job_config( - edp.JOB_TYPE_MAPREDUCE) - get_possible_mapreduce_config_from.assert_called_once_with( - 'plugins/hdp/versions/version_2_0_6/resources/' - 'ambari-config-resource.json') - self.assertEqual(expected_config, actual_config) - - @mock.patch( - 'sahara.plugins.hdp.confighints_helper.' - 'get_possible_mapreduce_config_from', - return_value={}) - def test_get_possible_job_config_mapreduce_streaming( - self, get_possible_mapreduce_config_from): - expected_config = {'job_config': {}} - actual_config = edp_engine.EdpOozieEngine.get_possible_job_config( - edp.JOB_TYPE_MAPREDUCE_STREAMING) - get_possible_mapreduce_config_from.assert_called_once_with( - 'plugins/hdp/versions/version_2_0_6/resources/' - 'ambari-config-resource.json') - self.assertEqual(expected_config, actual_config) - - @mock.patch( - 'sahara.plugins.hdp.confighints_helper.get_possible_pig_config_from', - return_value={}) - def test_get_possible_job_config_pig(self, - get_possible_pig_config_from): - expected_config = {'job_config': {}} - actual_config = edp_engine.EdpOozieEngine.get_possible_job_config( - edp.JOB_TYPE_PIG) - get_possible_pig_config_from.assert_called_once_with( - 'plugins/hdp/versions/version_2_0_6/resources/' - 'ambari-config-resource.json') - self.assertEqual(expected_config, actual_config) - - @mock.patch('sahara.plugins.hdp.edp_engine.EdpOozieEngine') - def test_get_possible_job_config_shell(self, BaseHDPEdpOozieEngine): - expected_config = {'job_config': {}} - BaseHDPEdpOozieEngine.get_possible_job_config.return_value = ( - expected_config) - actual_config = edp_engine.EdpOozieEngine.get_possible_job_config( - edp.JOB_TYPE_SHELL) - BaseHDPEdpOozieEngine.get_possible_job_config.assert_called_once_with( - edp.JOB_TYPE_SHELL) - self.assertEqual(expected_config, actual_config) diff --git a/setup.cfg b/setup.cfg index 4568ee5e18..f999b5464c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,7 +41,6 @@ wsgi_scripts = sahara.cluster.plugins = vanilla = sahara.plugins.vanilla.plugin:VanillaProvider - hdp = sahara.plugins.hdp.ambariplugin:AmbariPlugin ambari = sahara.plugins.ambari.plugin:AmbariPluginProvider mapr = sahara.plugins.mapr.plugin:MapRPlugin cdh = sahara.plugins.cdh.plugin:CDHPluginProvider