diff --git a/sahara/plugins/cdh/cloudera_utils.py b/sahara/plugins/cdh/cloudera_utils.py index 6914e551..a5aaf288 100644 --- a/sahara/plugins/cdh/cloudera_utils.py +++ b/sahara/plugins/cdh/cloudera_utils.py @@ -39,6 +39,12 @@ HUE_SERVICE_NAME = 'hue01' SPARK_SERVICE_NAME = 'spark_on_yarn01' ZOOKEEPER_SERVICE_NAME = 'zookeeper01' HBASE_SERVICE_NAME = 'hbase01' +FLUME_SERVICE_NAME = 'flume01' +SENTRY_SERVICE_NAME = 'sentry01' +SOLR_SERVICE_NAME = 'solr01' +SQOOP_SERVICE_NAME = 'sqoop01' +KS_INDEXER_SERVICE_NAME = 'ks_indexer01' +IMPALA_SERVICE_NAME = 'impala01' def have_cm_api_libs(): @@ -115,6 +121,18 @@ def get_service(process, cluster=None, instance=None): return cm_cluster.get_service(ZOOKEEPER_SERVICE_NAME) elif process in ['MASTER', 'REGIONSERVER']: return cm_cluster.get_service(HBASE_SERVICE_NAME) + elif process in ['AGENT']: + return cm_cluster.get_service(FLUME_SERVICE_NAME) + elif process in ['SQOOP_SERVER']: + return cm_cluster.get_service(SQOOP_SERVICE_NAME) + elif process in ['SENTRY_SERVER']: + return cm_cluster.get_service(SENTRY_SERVICE_NAME) + elif process in ['SOLR_SERVER']: + return cm_cluster.get_service(SOLR_SERVICE_NAME) + elif process in ['HBASE_INDEXER']: + return cm_cluster.get_service(KS_INDEXER_SERVICE_NAME) + elif process in ['CATALOGSERVER', 'STATESTORE', 'IMPALAD', 'LLAMA']: + return cm_cluster.get_service(IMPALA_SERVICE_NAME) else: raise ValueError( _("Process %(process)s is not supported by CDH plugin") % @@ -161,24 +179,32 @@ def first_run(cluster): def get_role_name(instance, service): # NOTE: role name must match regexp "[_A-Za-z][-_A-Za-z0-9]{0,63}" shortcuts = { + 'AGENT': 'A', 'ALERTPUBLISHER': 'AP', + 'CATALOGSERVER': 'ICS', 'DATANODE': 'DN', 'EVENTSERVER': 'ES', + 'HBASE_INDEXER': 'LHBI', 'HIVEMETASTORE': 'HVM', 'HIVESERVER2': 'HVS', 'HOSTMONITOR': 'HM', + 'IMPALAD': 'ID', 'JOBHISTORY': 'JS', + 'MASTER': 'M', 'NAMENODE': 'NN', 'NODEMANAGER': 'NM', 'OOZIE_SERVER': 'OS', + 'REGIONSERVER': 'RS', 'RESOURCEMANAGER': 'RM', 'SECONDARYNAMENODE': 'SNN', - 'SERVICEMONITOR': 'SM', - 'WEBHCAT': 'WHC', - 'SPARK_YARN_HISTORY_SERVER': 'SHS', + 'SENTRY_SERVER': 'SS', 'SERVER': 'S', - 'MASTER': 'M', - 'REGIONSERVER': 'RS' + 'SERVICEMONITOR': 'SM', + 'SOLR_SERVER': 'SS', + 'SPARK_YARN_HISTORY_SERVER': 'SHS', + 'SQOOP_SERVER': 'S2S', + 'STATESTORE': 'ISS', + 'WEBHCAT': 'WHC' } return '%s_%s' % (shortcuts.get(service, service), instance.hostname().replace('-', '_')) @@ -200,6 +226,14 @@ def create_mgmt_service(cluster): cm.hosts_start_roles([hostname]) +@cloudera_cmd +def restart_mgmt_service(cluster): + api = get_api_client(cluster) + cm = api.get_cloudera_manager() + mgmt_service = cm.get_service() + yield mgmt_service.restart() + + @cloudera_cmd def start_service(service): yield service.start() diff --git a/sahara/plugins/cdh/config_helper.py b/sahara/plugins/cdh/config_helper.py index 2bbfb4f2..0be9b857 100644 --- a/sahara/plugins/cdh/config_helper.py +++ b/sahara/plugins/cdh/config_helper.py @@ -107,11 +107,34 @@ hue_service_confs = _load_json(path_to_config + 'hue-service.json') hue_role_confs = _load_json(path_to_config + 'hue-hue.json') spark_service_confs = _load_json(path_to_config + 'spark-service.json') spark_role_confs = _load_json(path_to_config + 'spark-history.json') -zookeeper_server_confs = _load_json(path_to_config + 'zookeeper-server.json') zookeeper_service_confs = _load_json(path_to_config + 'zookeeper-service.json') +zookeeper_server_confs = _load_json(path_to_config + 'zookeeper-server.json') hbase_confs = _load_json(path_to_config + 'hbase-service.json') master_confs = _load_json(path_to_config + 'hbase-master.json') regionserver_confs = _load_json(path_to_config + 'hbase-regionserver.json') +flume_service_confs = _load_json(path_to_config + 'flume-service.json') +flume_agent_confs = _load_json(path_to_config + 'flume-agent.json') +sentry_service_confs = _load_json(path_to_config + 'sentry-service.json') +sentry_server_confs = _load_json(path_to_config + + 'sentry-sentry_server.json') +solr_service_confs = _load_json(path_to_config + 'solr-service.json') +solr_server_confs = _load_json(path_to_config + 'solr-solr_server.json') +sqoop_service_confs = _load_json(path_to_config + 'sqoop-service.json') +sqoop_server_confs = _load_json(path_to_config + + 'sqoop-sqoop_server.json') +ks_indexer_service_confs = _load_json(path_to_config + + 'ks_indexer-service.json') +ks_indexer_role_confs = _load_json(path_to_config + + 'ks_indexer-hbase_indexer.json') +impala_service_confs = _load_json(path_to_config + 'impala-service.json') +impala_catalogserver_confs = _load_json(path_to_config + + 'impala-catalogserver.json') +impala_impalad_confs = _load_json(path_to_config + + 'impala-impalad.json') +impala_llama_confs = _load_json(path_to_config + + 'impala-llama.json') +impala_statestore_confs = _load_json(path_to_config + + 'impala-statestore.json') priority_one_confs = _load_json(path_to_config + 'priority-one-confs.json') @@ -160,6 +183,20 @@ def _get_ng_plugin_configs(): cfg += _init_configs(hbase_confs, 'HBASE', 'cluster') cfg += _init_configs(master_confs, 'MASTER', 'node') cfg += _init_configs(regionserver_confs, 'REGIONSERVER', 'node') + cfg += _init_configs(flume_service_confs, 'FLUME', 'cluster') + cfg += _init_configs(flume_agent_confs, 'FLUME', 'node') + cfg += _init_configs(sentry_service_confs, 'SENTRY', 'cluster') + cfg += _init_configs(sentry_server_confs, 'SENTRY', 'node') + cfg += _init_configs(solr_service_confs, 'SOLR', 'cluster') + cfg += _init_configs(solr_server_confs, 'SOLR', 'node') + cfg += _init_configs(sqoop_service_confs, 'SQOOP', 'cluster') + cfg += _init_configs(sqoop_server_confs, 'SQOOP', 'node') + cfg += _init_configs(ks_indexer_service_confs, 'KS_INDEXER', 'cluster') + cfg += _init_configs(ks_indexer_role_confs, 'KS_INDEXER', 'node') + cfg += _init_configs(impala_service_confs, 'IMPALA', 'cluster') + cfg += _init_configs(impala_catalogserver_confs, 'CATALOGSERVER', 'node') + cfg += _init_configs(impala_impalad_confs, 'IMPALAD', 'node') + cfg += _init_configs(impala_statestore_confs, 'STATESTORE', 'node') return cfg diff --git a/sahara/plugins/cdh/db_helper.py b/sahara/plugins/cdh/db_helper.py index 784be12e..841a9d61 100644 --- a/sahara/plugins/cdh/db_helper.py +++ b/sahara/plugins/cdh/db_helper.py @@ -51,3 +51,32 @@ def create_hive_database(cluster, remote): '-h localhost -p 7432 -d scm -f %s') % script_name remote.execute_command(psql_cmd) remote.execute_command('rm %s' % script_name) + + +def get_sentry_db_password(cluster): + ctx = context.ctx() + cluster = conductor.cluster_get(ctx, cluster.id) + passwd = cluster.extra.get('sentry_db_password') if cluster.extra else None + if passwd: + return passwd + + passwd = six.text_type(uuid.uuid4()) + extra = cluster.extra.to_dict() if cluster.extra else {} + extra['sentry_db_password'] = passwd + cluster = conductor.cluster_update(ctx, cluster, {'extra': extra}) + return passwd + + +def create_sentry_database(cluster, remote): + db_password = get_sentry_db_password(cluster) + create_db_script = files.get_file_text( + 'plugins/cdh/resources/create_sentry_db.sql') + create_db_script = create_db_script % db_password + script_name = 'create_sentry_db.sql' + remote.write_file_to(script_name, create_db_script) + + psql_cmd = ('PGPASSWORD=$(sudo head -1 /var/lib/cloudera-scm-server-db' + '/data/generated_password.txt) psql -U cloudera-scm ' + '-h localhost -p 7432 -d scm -f %s') % script_name + remote.execute_command(psql_cmd) + remote.execute_command('rm %s' % script_name) diff --git a/sahara/plugins/cdh/deploy.py b/sahara/plugins/cdh/deploy.py index fbc52302..74049f60 100644 --- a/sahara/plugins/cdh/deploy.py +++ b/sahara/plugins/cdh/deploy.py @@ -28,6 +28,7 @@ from sahara.plugins.cdh import commands as cmd from sahara.plugins.cdh import config_helper as c_helper from sahara.plugins.cdh import db_helper from sahara.plugins.cdh import utils as pu +from sahara.plugins.cdh import validation as v from sahara.plugins import exceptions as ex from sahara.plugins import utils as gu from sahara.swift import swift_helper @@ -44,6 +45,12 @@ HUE_SERVICE_TYPE = 'HUE' SPARK_SERVICE_TYPE = 'SPARK_ON_YARN' ZOOKEEPER_SERVICE_TYPE = 'ZOOKEEPER' HBASE_SERVICE_TYPE = 'HBASE' +FLUME_SERVICE_TYPE = 'FLUME' +SENTRY_SERVICE_TYPE = 'SENTRY' +SOLR_SERVICE_TYPE = 'SOLR' +SQOOP_SERVICE_TYPE = 'SQOOP' +KS_INDEXER_SERVICE_TYPE = 'KS_INDEXER' +IMPALA_SERVICE_TYPE = 'IMPALA' PATH_TO_CORE_SITE_XML = '/etc/hadoop/conf/core-site.xml' HADOOP_LIB_DIR = '/usr/lib/hadoop-mapreduce' @@ -53,6 +60,7 @@ PACKAGES = [ 'cloudera-manager-daemons', 'cloudera-manager-server', 'cloudera-manager-server-db-2', + 'flume-ng', 'hadoop-hdfs-datanode', 'hadoop-hdfs-namenode', 'hadoop-hdfs-secondarynamenode', @@ -60,16 +68,26 @@ PACKAGES = [ 'hadoop-mapreduce-historyserver', 'hadoop-yarn-nodemanager', 'hadoop-yarn-resourcemanager', + 'hbase', + 'hbase-solr', + 'hive-hcatalog', 'hive-metastore', 'hive-server2', + 'hive-webhcat-server', 'hue', + 'impala', + 'impala-server', + 'impala-state-store', + 'impala-catalog', 'ntp', 'oozie', 'oracle-j2sdk1.7', + 'sentry', + 'solr-server', 'spark-history-server', + 'sqoop2', 'unzip', - 'zookeeper', - 'hbase' + 'zookeeper' ] LOG = logging.getLogger(__name__) @@ -94,25 +112,106 @@ def _get_configs(service, cluster=None, node_group=None): def get_hadoop_dirs(mount_points, suffix): return ','.join([x + suffix for x in mount_points]) - all_confs = { - 'OOZIE': { - 'mapreduce_yarn_service': cu.YARN_SERVICE_NAME - }, - 'YARN': { - 'hdfs_service': cu.HDFS_SERVICE_NAME - }, - 'HUE': { - 'hive_service': cu.HIVE_SERVICE_NAME, - 'oozie_service': cu.OOZIE_SERVICE_NAME - }, - 'SPARK_ON_YARN': { - 'yarn_service': cu.YARN_SERVICE_NAME - }, - 'HBASE': { - 'hdfs_service': cu.HDFS_SERVICE_NAME, - 'zookeeper_service': cu.ZOOKEEPER_SERVICE_NAME + all_confs = {} + if cluster: + zk_count = v._get_inst_count(cluster, 'SERVER') + hbm_count = v._get_inst_count(cluster, 'MASTER') + ss_count = v._get_inst_count(cluster, 'SENTRY_SERVER') + ks_count = v._get_inst_count(cluster, 'HBASE_INDEXER') + imp_count = v._get_inst_count(cluster, 'CATALOGSERVER') + all_confs = { + 'HDFS': { + 'zookeeper_service': + cu.ZOOKEEPER_SERVICE_NAME if zk_count else '', + 'dfs_block_local_path_access_user': + 'impala' if imp_count else '' + }, + 'HIVE': { + 'mapreduce_yarn_service': cu.YARN_SERVICE_NAME, + 'zookeeper_service': + cu.ZOOKEEPER_SERVICE_NAME if zk_count else '' + }, + 'OOZIE': { + 'mapreduce_yarn_service': cu.YARN_SERVICE_NAME, + 'zookeeper_service': + cu.ZOOKEEPER_SERVICE_NAME if zk_count else '' + }, + 'YARN': { + 'hdfs_service': cu.HDFS_SERVICE_NAME, + 'zookeeper_service': + cu.ZOOKEEPER_SERVICE_NAME if zk_count else '' + }, + 'HUE': { + 'hive_service': cu.HIVE_SERVICE_NAME, + 'oozie_service': cu.OOZIE_SERVICE_NAME, + 'sentry_service': cu.SENTRY_SERVICE_NAME if ss_count else '', + 'zookeeper_service': + cu.ZOOKEEPER_SERVICE_NAME if zk_count else '' + }, + 'SPARK_ON_YARN': { + 'yarn_service': cu.YARN_SERVICE_NAME + }, + 'HBASE': { + 'hdfs_service': cu.HDFS_SERVICE_NAME, + 'zookeeper_service': cu.ZOOKEEPER_SERVICE_NAME, + 'hbase_enable_indexing': 'true' if ks_count else 'false', + 'hbase_enable_replication': 'true' if ks_count else 'false' + }, + 'FLUME': { + 'hdfs_service': cu.HDFS_SERVICE_NAME, + 'hbase_service': cu.HBASE_SERVICE_NAME if hbm_count else '' + }, + 'SENTRY': { + 'hdfs_service': cu.HDFS_SERVICE_NAME + }, + 'SOLR': { + 'hdfs_service': cu.HDFS_SERVICE_NAME, + 'zookeeper_service': cu.ZOOKEEPER_SERVICE_NAME + }, + 'SQOOP': { + 'mapreduce_yarn_service': cu.YARN_SERVICE_NAME + }, + 'KS_INDEXER': { + 'hbase_service': cu.HBASE_SERVICE_NAME, + 'solr_service': cu.SOLR_SERVICE_NAME + }, + 'IMPALA': { + 'hdfs_service': cu.HDFS_SERVICE_NAME, + 'hbase_service': cu.HBASE_SERVICE_NAME if hbm_count else '', + 'hive_service': cu.HIVE_SERVICE_NAME + } } - } + hive_confs = { + 'HIVE': { + 'hive_metastore_database_type': 'postgresql', + 'hive_metastore_database_host': + pu.get_manager(cluster).internal_ip, + 'hive_metastore_database_port': '7432', + 'hive_metastore_database_password': + db_helper.get_hive_db_password(cluster) + } + } + hue_confs = { + 'HUE': { + 'hue_webhdfs': cu.get_role_name(pu.get_namenode(cluster), + 'NAMENODE') + } + } + sentry_confs = { + 'SENTRY': { + 'sentry_server_database_type': 'postgresql', + 'sentry_server_database_host': + pu.get_manager(cluster).internal_ip, + 'sentry_server_database_port': '7432', + 'sentry_server_database_password': + db_helper.get_sentry_db_password(cluster) + } + } + + all_confs = _merge_dicts(all_confs, hue_confs) + all_confs = _merge_dicts(all_confs, hive_confs) + all_confs = _merge_dicts(all_confs, sentry_confs) + all_confs = _merge_dicts(all_confs, cluster.cluster_configs) if node_group: paths = node_group.storage_paths() @@ -125,11 +224,16 @@ def _get_configs(service, cluster=None, node_group=None): 'fs_checkpoint_dir_list': get_hadoop_dirs(paths, '/fs/snn') }, 'DATANODE': { - 'dfs_data_dir_list': get_hadoop_dirs(paths, '/fs/dn') + 'dfs_data_dir_list': get_hadoop_dirs(paths, '/fs/dn'), + 'dfs_datanode_data_dir_perm': 755, + 'dfs_datanode_handler_count': 30 }, 'NODEMANAGER': { 'yarn_nodemanager_local_dirs': get_hadoop_dirs(paths, '/yarn/local') + }, + 'SERVER': { + 'maxSessionTimeout': 60000 } } @@ -137,29 +241,6 @@ def _get_configs(service, cluster=None, node_group=None): all_confs = _merge_dicts(all_confs, ng_user_confs) all_confs = _merge_dicts(all_confs, ng_default_confs) - if cluster: - hive_confs = { - 'HIVE': { - 'hive_metastore_database_type': 'postgresql', - 'hive_metastore_database_host': - pu.get_manager(cluster).internal_ip, - 'hive_metastore_database_port': '7432', - 'hive_metastore_database_password': - db_helper.get_hive_db_password(cluster), - 'mapreduce_yarn_service': cu.YARN_SERVICE_NAME - } - } - hue_confs = { - 'HUE': { - 'hue_webhdfs': cu.get_role_name(pu.get_namenode(cluster), - 'NAMENODE') - } - } - - all_confs = _merge_dicts(all_confs, hue_confs) - all_confs = _merge_dicts(all_confs, hive_confs) - all_confs = _merge_dicts(all_confs, cluster.cluster_configs) - return all_confs.get(service, {}) @@ -370,6 +451,20 @@ def _create_services(cluster): cm_cluster.create_service(cu.SPARK_SERVICE_NAME, SPARK_SERVICE_TYPE) if pu.get_hbase_master(cluster): cm_cluster.create_service(cu.HBASE_SERVICE_NAME, HBASE_SERVICE_TYPE) + if len(pu.get_flumes(cluster)) > 0: + cm_cluster.create_service(cu.FLUME_SERVICE_NAME, FLUME_SERVICE_TYPE) + if pu.get_sentry(cluster): + cm_cluster.create_service(cu.SENTRY_SERVICE_NAME, SENTRY_SERVICE_TYPE) + if len(pu.get_solrs(cluster)) > 0: + cm_cluster.create_service(cu.SOLR_SERVICE_NAME, + SOLR_SERVICE_TYPE) + if pu.get_sqoop(cluster): + cm_cluster.create_service(cu.SQOOP_SERVICE_NAME, SQOOP_SERVICE_TYPE) + if len(pu.get_hbase_indexers(cluster)) > 0: + cm_cluster.create_service(cu.KS_INDEXER_SERVICE_NAME, + KS_INDEXER_SERVICE_TYPE) + if pu.get_catalogserver(cluster): + cm_cluster.create_service(cu.IMPALA_SERVICE_NAME, IMPALA_SERVICE_TYPE) def _configure_services(cluster): @@ -400,10 +495,38 @@ def _configure_services(cluster): if pu.get_spark_historyserver(cluster): spark = cm_cluster.get_service(cu.SPARK_SERVICE_NAME) spark.update_config(_get_configs(SPARK_SERVICE_TYPE, cluster=cluster)) + if pu.get_hbase_master(cluster): hbase = cm_cluster.get_service(cu.HBASE_SERVICE_NAME) hbase.update_config(_get_configs(HBASE_SERVICE_TYPE, cluster=cluster)) + if len(pu.get_flumes(cluster)) > 0: + flume = cm_cluster.get_service(cu.FLUME_SERVICE_NAME) + flume.update_config(_get_configs(FLUME_SERVICE_TYPE, cluster=cluster)) + + if pu.get_sentry(cluster): + sentry = cm_cluster.get_service(cu.SENTRY_SERVICE_NAME) + sentry.update_config(_get_configs(SENTRY_SERVICE_TYPE, + cluster=cluster)) + + if len(pu.get_solrs(cluster)) > 0: + solr = cm_cluster.get_service(cu.SOLR_SERVICE_NAME) + solr.update_config(_get_configs(SOLR_SERVICE_TYPE, cluster=cluster)) + + if pu.get_sqoop(cluster): + sqoop = cm_cluster.get_service(cu.SQOOP_SERVICE_NAME) + sqoop.update_config(_get_configs(SQOOP_SERVICE_TYPE, cluster=cluster)) + + if len(pu.get_hbase_indexers(cluster)) > 0: + ks_indexer = cm_cluster.get_service(cu.KS_INDEXER_SERVICE_NAME) + ks_indexer.update_config(_get_configs(KS_INDEXER_SERVICE_TYPE, + cluster=cluster)) + + if pu.get_catalogserver(cluster): + impala = cm_cluster.get_service(cu.IMPALA_SERVICE_NAME) + impala.update_config(_get_configs(IMPALA_SERVICE_TYPE, + cluster=cluster)) + def _configure_instances(instances): for inst in instances: @@ -467,6 +590,12 @@ def _configure_hive(cluster): db_helper.create_hive_database(cluster, r) +def _configure_sentry(cluster): + manager = pu.get_manager(cluster) + with manager.remote() as r: + db_helper.create_sentry_database(cluster, r) + + def _install_extjs(cluster): extjs_remote_location = c_helper.get_extjs_lib_url(cluster) extjs_vm_location_dir = '/var/lib/oozie' @@ -512,6 +641,9 @@ def start_cluster(cluster): if pu.get_hive_metastore(cluster): _configure_hive(cluster) + if pu.get_sentry(cluster): + _configure_sentry(cluster) + cu.first_run(cluster) if pu.get_hive_metastore(cluster): @@ -519,6 +651,13 @@ def start_cluster(cluster): _restore_deploy_cc(cluster) + if pu.get_flumes(cluster): + cm_cluster = cu.get_cloudera_cluster(cluster) + flume = cm_cluster.get_service(cu.FLUME_SERVICE_NAME) + cu.start_service(flume) + + cu.restart_mgmt_service(cluster) + def get_open_ports(node_group): ports = [9000] # for CM agent @@ -539,7 +678,15 @@ def get_open_ports(node_group): 'SPARK_YARN_HISTORY_SERVER': [18088], 'SERVER': [2181, 3181, 4181, 9010], 'MASTER': [60000], - 'REGIONSERVER': [60020] + 'REGIONSERVER': [60020], + 'AGENT': [41414], + 'SENTRY_SERVER': [8038], + 'SOLR_SERVER': [8983, 8984], + 'SQOOP_SERVER': [8005, 12000], + 'HBASE_INDEXER': [], + 'CATALOGSERVER': [25020, 26000], + 'STATESTORE': [25010, 24000], + 'IMPALAD': [21050, 21000, 23000, 25000, 28000, 22000] } for process in node_group.node_processes: diff --git a/sahara/plugins/cdh/plugin.py b/sahara/plugins/cdh/plugin.py index 8a8a87d9..37db3f7d 100644 --- a/sahara/plugins/cdh/plugin.py +++ b/sahara/plugins/cdh/plugin.py @@ -40,7 +40,7 @@ class CDHPluginProvider(p.ProvisioningPluginBase): return ['5', '5.2.0'] def get_node_processes(self, hadoop_version): - return { + processes = { "CLOUDERA": ['MANAGER'], "HDFS": [], "NAMENODE": ['NAMENODE'], @@ -60,8 +60,19 @@ class CDHPluginProvider(p.ProvisioningPluginBase): "ZOOKEEPER": ['SERVER'], "HBASE": [], "MASTER": ['MASTER'], - "REGIONSERVER": ['REGIONSERVER'] + "REGIONSERVER": ['REGIONSERVER'], + "FLUME": ['AGENT'], + "IMPALA": [], + "CATALOGSERVER": ['CATALOGSERVER'], + "STATESTORE": ['STATESTORE'], + "IMPALAD": ['IMPALAD'], + "KS_INDEXER": ['HBASE_INDEXER'], + "SOLR": ['SOLR_SERVER'], + "SQOOP": ['SQOOP_SERVER'] } + if hadoop_version == '5.2.0': + processes["SENTRY"] = ['SENTRY_SERVER'] + return processes def get_configs(self, hadoop_version): return c_helper.get_plugin_configs() diff --git a/sahara/plugins/cdh/resources/cdh_config.py b/sahara/plugins/cdh/resources/cdh_config.py index 1ded4554..0db60615 100644 --- a/sahara/plugins/cdh/resources/cdh_config.py +++ b/sahara/plugins/cdh/resources/cdh_config.py @@ -33,6 +33,12 @@ hue_service_name = 'hue01' spark_service_name = 'spark_on_yarn01' zookeeper_service_name = 'zookeeper01' hbase_service_name = 'hbase01' +flume_service_name = 'flume01' +sqoop_service_name = 'sqoop01' +solr_service_name = 'solr01' +ks_indexer_service_name = 'ks_indexer01' +impala_service_name = 'impala01' +sentry_service_name = 'sentry01' def get_cm_api(): @@ -48,7 +54,7 @@ def process_service(service, service_name): for role_cfgs in service.get_all_role_config_groups(): role_cm_cfg = role_cfgs.get_config(view='full') role_cfg = parse_config(role_cm_cfg) - role_name = role_cfgs.displayName.split(' ')[0].lower() + role_name = role_cfgs.roleType write_cfg(role_cfg, '%s-%s.json' % (service_name, role_name)) service_cm_cfg = service.get_config(view='full')[0] @@ -106,5 +112,24 @@ def main(): hbase = cluster.get_service(hbase_service_name) process_service(hbase, 'hbase') + flume = cluster.get_service(flume_service_name) + process_service(flume, 'flume') + + sqoop = cluster.get_service(sqoop_service_name) + process_service(sqoop, 'sqoop') + + solr = cluster.get_service(solr_service_name) + process_service(solr, 'solr') + + ks_indexer = cluster.get_service(ks_indexer_service_name) + process_service(ks_indexer, 'ks_indexer') + + impala = cluster.get_service(impala_service_name) + process_service(impala, 'impala') + + sentry = cluster.get_service(sentry_service_name) + process_service(sentry, 'sentry') + + if __name__ == '__main__': main() diff --git a/sahara/plugins/cdh/resources/create_sentry_db.sql b/sahara/plugins/cdh/resources/create_sentry_db.sql new file mode 100644 index 00000000..30cb6ab3 --- /dev/null +++ b/sahara/plugins/cdh/resources/create_sentry_db.sql @@ -0,0 +1,4 @@ +CREATE ROLE sentry LOGIN PASSWORD '%s'; +CREATE DATABASE sentry OWNER sentry encoding 'UTF8'; +GRANT ALL PRIVILEGES ON DATABASE sentry TO sentry; +COMMIT; diff --git a/sahara/plugins/cdh/resources/flume-agent.json b/sahara/plugins/cdh/resources/flume-agent.json new file mode 100644 index 00000000..33116564 --- /dev/null +++ b/sahara/plugins/cdh/resources/flume-agent.json @@ -0,0 +1,260 @@ +[ + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.", + "display_name": "Log Directory Free Space Monitoring Percentage Thresholds", + "name": "log_directory_free_space_percentage_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "Used to select an agent configuration to use from flume.conf. Multiple agents may share the same agent name, in which case they will be assigned the same agent configuration.", + "display_name": "Agent Name", + "name": "agent_name", + "value": "tier1" + }, + { + "desc": "
This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.
Each rule has some or all of the following fields:
Example:{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}
This rule will send events to Cloudera Manager for every StringIndexOutOfBoundsException, up to a maximum of 10 every minute.
", + "display_name": "Rules to Extract Events from Log Files", + "name": "log_event_whitelist", + "value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}\n" + }, + { + "desc": "Text that goes into morphlines.conf file used by the Flume-NG Solr sink. The text goes verbatim into the config file except that $ZK_HOST is replaced by the ZooKeeper quorum of the Solr service.", + "display_name": "Morphlines File", + "name": "agent_morphlines_conf_file", + "value": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Application configuration file in HOCON format (Human-Optimized Config Object Notation). \n# HOCON syntax is defined at http://github.com/typesafehub/config/blob/master/HOCON.md\n# and also used by Akka (http://www.akka.io) and Play (http://www.playframework.org/).\n# For more examples see http://doc.akka.io/docs/akka/2.1.2/general/configuration.html\n\n# morphline.conf example file\n# this is a comment\n\n# Specify server locations in a SOLR_LOCATOR variable; used later in variable substitutions:\nSOLR_LOCATOR : {\n # Name of solr collection\n collection : collection1\n \n # ZooKeeper ensemble\n zkHost : \"$ZK_HOST\"\n \n # Relative or absolute path to a directory containing conf/solrconfig.xml and conf/schema.xml\n # If this path is uncommented it takes precedence over the configuration stored in ZooKeeper. \n # solrHomeDir : \"example/solr/collection1\"\n \n # The maximum number of documents to send to Solr per network batch (throughput knob)\n # batchSize : 100\n}\n\n# Specify an array of one or more morphlines, each of which defines an ETL \n# transformation chain. A morphline consists of one or more (potentially \n# nested) commands. A morphline is a way to consume records (e.g. Flume events, \n# HDFS files or blocks), turn them into a stream of records, and pipe the stream \n# of records through a set of easily configurable transformations on it's way to \n# Solr (or a MapReduceIndexerTool RecordWriter that feeds via a Reducer into Solr).\nmorphlines : [\n {\n # Name used to identify a morphline. E.g. used if there are multiple morphlines in a \n # morphline config file\n id : morphline1 \n \n # Import all morphline commands in these java packages and their subpackages.\n # Other commands that may be present on the classpath are not visible to this morphline.\n importCommands : [\"org.kitesdk.**\", \"org.apache.solr.**\"]\n \n commands : [ \n { \n # Parse Avro container file and emit a record for each avro object\n readAvroContainer {\n # Optionally, require the input record to match one of these MIME types:\n # supportedMimeTypes : [avro/binary]\n \n # Optionally, use a custom Avro schema in JSON format inline:\n # schemaString : \"\"\"$KERBEROS_PRINCIPAL
\" and \"$KERBEROS_KEYTAB
\", which will be replaced by the principal name and the keytab path respectively.",
+ "display_name": "Configuration File",
+ "name": "agent_config_file",
+ "value": "# Please paste flume.conf here. Example:\n\n# Sources, channels, and sinks are defined per\n# agent name, in this case 'tier1'.\ntier1.sources = source1\ntier1.channels = channel1\ntier1.sinks = sink1\n\n# For each source, channel, and sink, set\n# standard properties.\ntier1.sources.source1.type = netcat\ntier1.sources.source1.bind = 127.0.0.1\ntier1.sources.source1.port = 9999\ntier1.sources.source1.channels = channel1\ntier1.channels.channel1.type = memory\ntier1.sinks.sink1.type = logger\ntier1.sinks.sink1.channel = channel1\n\n# Other properties are specific to each type of\n# source, channel, or sink. In this case, we\n# specify the capacity of the memory channel.\ntier1.channels.channel1.capacity = 100\n"
+ },
+ {
+ "desc": "Whether or not periodic stacks collection is enabled.",
+ "display_name": "Stacks Collection Enabled",
+ "name": "stacks_collection_enabled",
+ "value": "false"
+ },
+ {
+ "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
+ "display_name": "Automatically Restart Process",
+ "name": "process_auto_restart",
+ "value": "true"
+ },
+ {
+ "desc": "The maximum number of rolled log files to keep for Agent logs. Typically used by log4j.",
+ "display_name": "Agent Maximum Log File Backups",
+ "name": "max_log_backup_index",
+ "value": "10"
+ },
+ {
+ "desc": "The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific role. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger configured for a DataNode that fires if the DataNode has more than 1500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Role Triggers", + "name": "role_triggers", + "value": "[]" + }, + { + "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Soft Limit", + "name": "rm_memory_soft_limit", + "value": "-1" + }, + { + "desc": "Enables the health test that the Agent's process state is consistent with the role configuration", + "display_name": "Agent Process Health Test", + "name": "flume_agent_scm_health_enabled", + "value": "true" + }, + { + "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Hard Limit", + "name": "rm_memory_hard_limit", + "value": "-1" + }, + { + "desc": "The period to review when computing unexpected exits.", + "display_name": "Unexpected Exits Monitoring Period", + "name": "unexpected_exits_window", + "value": "5" + }, + { + "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.", + "display_name": "File Descriptor Monitoring Thresholds", + "name": "flume_agent_fd_thresholds", + "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}" + }, + { + "desc": "The amount of stacks data that will be retained. After the retention limit is hit, the oldest data will be deleted.", + "display_name": "Stacks Collection Data Retention", + "name": "stacks_collection_data_retention", + "value": "104857600" + }, + { + "desc": "The port on which the Flume web server listens for requests.", + "display_name": "HTTP Port", + "name": "agent_http_port", + "value": "41414" + }, + { + "desc": "For advanced use only, a string to be inserted into log4j.properties for this role only.", + "display_name": "Agent Logging Advanced Configuration Snippet (Safety Valve)", + "name": "log4j_safety_valve", + "value": null + }, + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.", + "display_name": "Log Directory Free Space Monitoring Absolute Thresholds", + "name": "log_directory_free_space_absolute_thresholds", + "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" + }, + { + "desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.", + "display_name": "Java Heap Size of Agent in Bytes", + "name": "agent_java_heapsize", + "value": "1073741824" + }, + { + "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.", + "display_name": "Maximum Process File Descriptors", + "name": "rlimit_fds", + "value": null + }, + { + "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", + "display_name": "Enable Health Alerts for this Role", + "name": "enable_alerts", + "value": "true" + }, + { + "desc": "Disables import of ZooKeeper configuration from the HBase classpath. This prevents zoo.cfg from overriding hbase-site.xml for Zookeeper quorum information. This option is only supported on CDH 4.4 or later deployments.", + "display_name": "HBase sink prefer hbase-site.xml over Zookeeper config", + "name": "agent_disable_zoo_cfg", + "value": "true" + }, + { + "desc": "The maximum size, in megabytes, per log file for Agent logs. Typically used by log4j.", + "display_name": "Agent Max Log Size", + "name": "max_log_size", + "value": "200" + }, + { + "desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.", + "display_name": "Kill When Out of Memory", + "name": "oom_sigkill_enabled", + "value": "true" + }, + { + "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.", + "display_name": "Dump Heap When Out of Memory", + "name": "oom_heap_dump_enabled", + "value": "false" + }, + { + "desc": "Directory where Flume Agent will place its log files.", + "display_name": "Flume Agent Log Directory", + "name": "flume_agent_log_dir", + "value": "/var/log/flume-ng" + }, + { + "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.", + "display_name": "Unexpected Exits Thresholds", + "name": "unexpected_exits_thresholds", + "value": "{\"critical\":\"any\",\"warning\":\"never\"}" + }, + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "Text that goes verbatim into custom-mimetypes.xml file used by the Flume-NG Solr sink.", + "display_name": "Custom Mime-types File", + "name": "agent_custom_mimetypes_file", + "value": "\n\n\nThe configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific service. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger that fires if there are more than 10 DataNodes with more than 500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Service Triggers", + "name": "service_triggers", + "value": "[]" + }, + { + "desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.", + "display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)", + "name": "smon_derived_configs_safety_valve", + "value": null + }, + { + "desc": "Name of the HDFS service that this Flume service instance depends on", + "display_name": "HDFS Service", + "name": "hdfs_service", + "value": null + }, + { + "desc": "The health test thresholds of the overall Agent health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Agents falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Agents falls below the critical threshold.", + "display_name": "Healthy Agent Monitoring Thresholds", + "name": "flume_agents_healthy_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"95.0\"}" + }, + { + "desc": "When set, each role identifies important log events and forwards them to Cloudera Manager.", + "display_name": "Enable Log Event Capture", + "name": "catch_events", + "value": "true" + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/impala-catalogserver.json b/sahara/plugins/cdh/resources/impala-catalogserver.json new file mode 100644 index 00000000..5f9f283b --- /dev/null +++ b/sahara/plugins/cdh/resources/impala-catalogserver.json @@ -0,0 +1,230 @@ +[ + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.", + "display_name": "Log Directory Free Space Monitoring Percentage Thresholds", + "name": "log_directory_free_space_percentage_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "The amount of time allowed after this role is started that failures of health checks that rely on communication with this role will be tolerated.", + "display_name": "Health Check Startup Tolerance", + "name": "catalogserver_startup_tolerance", + "value": "5" + }, + { + "desc": "Port where Catalog Server debug web server runs.", + "display_name": "Catalog Server HTTP Server Port", + "name": "catalogserver_webserver_port", + "value": "25020" + }, + { + "desc": "The health test thresholds on the duration of the metrics request to the web server.", + "display_name": "Web Metric Collection Duration", + "name": "catalogserver_web_metric_collection_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}" + }, + { + "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.", + "display_name": "File Descriptor Monitoring Thresholds", + "name": "catalogserver_fd_thresholds", + "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}" + }, + { + "desc": "Port where Catalog Server is exported.", + "display_name": "Catalog Server Service Port", + "name": "catalog_service_port", + "value": "26000" + }, + { + "desc": "Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; 1 means buffer WARNING only, ...)", + "display_name": "Catalog Server Log Buffer Level", + "name": "logbuflevel", + "value": "0" + }, + { + "desc": "The location of the debug web server's SSL certificate file, in .pem format. If empty, webserver SSL support is not enabled.", + "display_name": "SSL Certificate File Location", + "name": "webserver_certificate_file", + "value": null + }, + { + "desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.", + "display_name": "Web Metric Collection", + "name": "catalogserver_web_metric_collection_enabled", + "value": "true" + }, + { + "desc": "The amount of time to wait for the Catalog Server to fully start up and connect to the StateStore before enforcing the connectivity check.", + "display_name": "Catalog Server Connectivity Tolerance at Startup", + "name": "catalogserver_connectivity_tolerance", + "value": "180" + }, + { + "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.", + "display_name": "Automatically Restart Process", + "name": "process_auto_restart", + "value": "true" + }, + { + "desc": "The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific role. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger configured for a DataNode that fires if the DataNode has more than 1500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Role Triggers", + "name": "role_triggers", + "value": "[]" + }, + { + "desc": "Timeout for requests to the Hive Metastore Server from Catalog Server. Consider increasing this if you have tables with a lot of metadata and see timeout errors.", + "display_name": "Catalog Server Hive Metastore Connection Timeout", + "name": "hive_metastore_timeout", + "value": "3600" + }, + { + "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Soft Limit", + "name": "rm_memory_soft_limit", + "value": "-1" + }, + { + "desc": "Username for Catalog Server web server authentication.", + "display_name": "Catalog Server Web Server Username", + "name": "webserver_htpassword_user", + "value": null + }, + { + "desc": "Password for Catalog Server web server authentication.", + "display_name": "Catalog Server Web Server User Password", + "name": "webserver_htpassword_password", + "value": null + }, + { + "desc": "Enables the health test that the Impala Catalog Server's process state is consistent with the role configuration", + "display_name": "Impala Catalog Server Process Health Test", + "name": "catalogserver_scm_health_enabled", + "value": "true" + }, + { + "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Hard Limit", + "name": "rm_memory_hard_limit", + "value": "-1" + }, + { + "desc": "For advanced use only, key-value pairs (one on each line) to be added (verbatim) to Catalog Server command line flags. Key names should begin with a hyphen(-). For example: -log_filename=foo.log", + "display_name": "Catalog Server Command Line Argument Advanced Configuration Snippet (Safety Valve)", + "name": "catalogd_cmd_args_safety_valve", + "value": null + }, + { + "desc": "The period to review when computing unexpected exits.", + "display_name": "Unexpected Exits Monitoring Period", + "name": "unexpected_exits_window", + "value": "5" + }, + { + "desc": "For advanced use only, a string to be inserted into hdfs-site.xml for this role only.", + "display_name": "Catalog Server HDFS Advanced Configuration Snippet (Safety Valve)", + "name": "catalogd_hdfs_site_conf_safety_valve", + "value": null + }, + { + "desc": "Directory where Catalog Server will place its log files.", + "display_name": "Catalog Server Log Directory", + "name": "log_dir", + "value": "/var/log/catalogd" + }, + { + "desc": "For advanced use only, a string to be inserted into hbase-site.xml for this role only.", + "display_name": "Catalog Server HBase Advanced Configuration Snippet (Safety Valve)", + "name": "catalogd_hbase_conf_safety_valve", + "value": null + }, + { + "desc": "Enable/Disable Catalog Server web server. This web server contains useful information about Catalog Server daemon.", + "display_name": "Enable Catalog Server Web Server", + "name": "catalogd_enable_webserver", + "value": "true" + }, + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.", + "display_name": "Log Directory Free Space Monitoring Absolute Thresholds", + "name": "log_directory_free_space_absolute_thresholds", + "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" + }, + { + "desc": "Enables the health test that verifies the Catalog Server is connected to the StateStore", + "display_name": "Catalog Server Connectivity Health Test", + "name": "catalogserver_connectivity_health_enabled", + "value": "true" + }, + { + "desc": "When computing the overall Impala Catalog Server health, consider the host's health.", + "display_name": "Impala Catalog Server Host Health Test", + "name": "catalogserver_host_health_enabled", + "value": "true" + }, + { + "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.", + "display_name": "Maximum Process File Descriptors", + "name": "rlimit_fds", + "value": null + }, + { + "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", + "display_name": "Enable Health Alerts for this Role", + "name": "enable_alerts", + "value": "true" + }, + { + "desc": "The maximum size, in megabytes, per log file for Impala Catalog Server logs. Typically used by log4j.", + "display_name": "Impala Catalog Server Max Log Size", + "name": "max_log_size", + "value": "200" + }, + { + "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.", + "display_name": "Unexpected Exits Thresholds", + "name": "unexpected_exits_thresholds", + "value": "{\"critical\":\"any\",\"warning\":\"never\"}" + }, + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "For advanced use only, a string to be inserted into hive-site.xml for this role only.", + "display_name": "Catalog Server Hive Advanced Configuration Snippet (Safety Valve)", + "name": "catalogd_hive_conf_safety_valve", + "value": null + }, + { + "desc": "Verbose logging level for the GLog logger. These messages are always logged at 'INFO' log level, so this setting has no effect if Logging Threshold is set to 'WARN' or above.", + "display_name": "Catalog Server Verbose Log Level", + "name": "log_verbose_level", + "value": "1" + }, + { + "desc": "The health test thresholds on the resident size of the process.", + "display_name": "Resident Set Size Thresholds", + "name": "process_resident_set_size_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup I/O Weight", + "name": "rm_io_weight", + "value": "500" + }, + { + "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup CPU Shares", + "name": "rm_cpu_shares", + "value": "1024" + }, + { + "desc": "The minimum log level for Impala Catalog Server logs", + "display_name": "Impala Catalog Server Logging Threshold", + "name": "log_threshold", + "value": "INFO" + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/impala-impalad.json b/sahara/plugins/cdh/resources/impala-impalad.json new file mode 100644 index 00000000..cc4f75cb --- /dev/null +++ b/sahara/plugins/cdh/resources/impala-impalad.json @@ -0,0 +1,356 @@ +[ + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.", + "display_name": "Log Directory Free Space Monitoring Percentage Thresholds", + "name": "log_directory_free_space_percentage_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "Configures the health check thresholds for monitoring free space on the filesystems that contain Impala's scratch directories. Specified as a percentage of the capacity on the filesystem. This setting is not used if a Scratch Directories Free Space Monitoring Absolute Thresholds setting is configured.", + "display_name": "Impala Scratch Directories Free Space Monitoring Percentage Thresholds", + "name": "impalad_scratch_directories_free_space_percentage_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "User-defined function (UDF) libraries are copied from HDFS into this local directory.", + "display_name": "Local UDF Library Dir", + "name": "local_library_dir", + "value": "/var/lib/impala/udfs" + }, + { + "desc": "Maximum number of query results a client may request to be cached on a per-query basis to support restarting fetches. This option guards against unreasonably large result caches requested by clients. Requests exceeding this maximum will be rejected.", + "display_name": "Result Cache Maximum Size", + "name": "impalad_result_cache_max_size", + "value": "100000" + }, + { + "desc": "The timeout used by the Cloudera Manager Agent's query monitor when communicating with the Impala Daemon web server, specified in seconds.", + "display_name": "Query Monitoring Timeout", + "name": "executing_queries_timeout_seconds", + "value": "5.0" + }, + { + "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup I/O Weight", + "name": "rm_io_weight", + "value": "500" + }, + { + "desc": "Directories where Impala Daemon will write data such as spilling information to disk to free up memory. This can potentially be large amounts of data.", + "display_name": "Impala Daemon Scratch Directories", + "name": "scratch_dirs", + "value": null + }, + { + "desc": "Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; 1 means buffer WARNING only, ...)", + "display_name": "Impala Daemon Log Buffer Level", + "name": "logbuflevel", + "value": "0" + }, + { + "desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.", + "display_name": "Web Metric Collection", + "name": "impalad_web_metric_collection_enabled", + "value": "true" + }, + { + "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup CPU Shares", + "name": "rm_cpu_shares", + "value": "1024" + }, + { + "desc": "For advanced use only, key-value pairs (one on each line) to be added (verbatim) to Impala Daemon command-line flags. Key names should begin with a hyphen(-). For example: -log_filename=foo.log", + "display_name": "Impala Daemon Command Line Argument Advanced Configuration Snippet (Safety Valve)", + "name": "impalad_cmd_args_safety_valve", + "value": null + }, + { + "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Hard Limit", + "name": "rm_memory_hard_limit", + "value": "-1" + }, + { + "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.", + "display_name": "Automatically Restart Process", + "name": "process_auto_restart", + "value": "true" + }, + { + "desc": "Enable or disable the Impala Daemon web server. This web server contains useful information about Impala Daemon.", + "display_name": "Enable Impala Daemon Web Server", + "name": "impalad_enable_webserver", + "value": "true" + }, + { + "desc": "Port on which HiveServer2 client requests are served by Impala Daemons.", + "display_name": "Impala Daemon HiveServer2 Port", + "name": "hs2_port", + "value": "21050" + }, + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Soft Limit", + "name": "rm_memory_soft_limit", + "value": "-1" + }, + { + "desc": "Username for Impala Daemon webserver authentication.", + "display_name": "Impala Daemon Web Server Username", + "name": "webserver_htpassword_user", + "value": null + }, + { + "desc": "Enables audit event generation by Impala daemons. The audit log file will be placed in the directory specified by 'Impala Daemon Audit Log Directory' parameter.", + "display_name": "Enable Impala Audit Event Generation", + "name": "enable_audit_event_log", + "value": "false" + }, + { + "desc": "When computing the overall Impala Daemon health, consider the host's health.", + "display_name": "Impala Daemon Host Health Test", + "name": "impalad_host_health_enabled", + "value": "true" + }, + { + "desc": "The directory in which Impala Daemon audit event log files are written. If \"Impala Audit Event Generation\" property is enabled, Impala will generate its audit logs in this directory.", + "display_name": "Impala Daemon Audit Log Directory", + "name": "audit_event_log_dir", + "value": "/var/log/impalad/audit" + }, + { + "desc": "The period to review when computing unexpected exits.", + "display_name": "Unexpected Exits Monitoring Period", + "name": "unexpected_exits_window", + "value": "5" + }, + { + "desc": "An XML string to use verbatim for the contents of fair-scheduler.xml for Impala Daemons. This configuration only has effect on Impala versions 1.3 or greater.", + "display_name": "Impala Daemon Fair Scheduler Advanced Configuration", + "name": "impalad_fair_scheduler_safety_valve", + "value": null + }, + { + "desc": "A list of key-value pairs of additional query options to pass to the Impala Daemon command line, separated by ','.", + "display_name": "Impala Daemon Query Options Advanced Configuration Snippet (Safety Valve)", + "name": "default_query_options", + "value": "" + }, + { + "desc": "Port on which Beeswax client requests are served by Impala Daemons.", + "display_name": "Impala Daemon Beeswax Port", + "name": "beeswax_port", + "value": "21000" + }, + { + "desc": "Port where StateStoreSubscriberService is running.", + "display_name": "StateStoreSubscriber Service Port", + "name": "state_store_subscriber_port", + "value": "23000" + }, + { + "desc": "For advanced use only, a string to be inserted into log4j.properties for this role only.", + "display_name": "Impala Daemon Logging Advanced Configuration Snippet (Safety Valve)", + "name": "log4j_safety_valve", + "value": null + }, + { + "desc": "Enables the health check that determines if the Impala Daemon is ready to process queries.", + "display_name": "Impala Daemon Ready Status Health Check", + "name": "impalad_ready_status_check_enabled", + "value": "true" + }, + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.", + "display_name": "Log Directory Free Space Monitoring Absolute Thresholds", + "name": "log_directory_free_space_absolute_thresholds", + "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" + }, + { + "desc": "An XML snippet to append to llama-site.xml for Impala Daemons. This configuration only has effect on Impala versions 1.3 or greater.", + "display_name": "Impala Daemon Llama Site Advanced Configuration", + "name": "impala_llama_site_conf_safety_valve", + "value": null + }, + { + "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.", + "display_name": "Maximum Process File Descriptors", + "name": "rlimit_fds", + "value": null + }, + { + "desc": "Address of the load balancer used for Impala Daemons. Should be specified in host:port format.", + "display_name": "Impala Daemons Load Balancer", + "name": "impalad_load_balancer", + "value": null + }, + { + "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", + "display_name": "Enable Health Alerts for this Role", + "name": "enable_alerts", + "value": "true" + }, + { + "desc": "Directory where Impala Daemon will place its log files.", + "display_name": "Impala Daemon Log Directory", + "name": "log_dir", + "value": "/var/log/impalad" + }, + { + "desc": "For advanced use only, a string to be inserted into hbase-site.xml for this role only.", + "display_name": "Impala Daemon HBase Advanced Configuration Snippet (Safety Valve)", + "name": "impalad_hbase_conf_safety_valve", + "value": null + }, + { + "desc": "The maximum size, in megabytes, per log file for Impala Daemon logs. Typically used by log4j.", + "display_name": "Impala Daemon Max Log Size", + "name": "max_log_size", + "value": "200" + }, + { + "desc": "The health test thresholds on the duration of the metrics request to the web server.", + "display_name": "Web Metric Collection Duration", + "name": "impalad_web_metric_collection_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}" + }, + { + "desc": "The polling period of the Impala query monitor in the Cloudera Manager Agent, specified in seconds. If set to zero, query monitoring is disabled.", + "display_name": "Query Monitoring Period", + "name": "query_monitoring_period_seconds", + "value": "1.0" + }, + { + "desc": "For advanced use only, a string to be inserted into hive-site.xml for this role only.", + "display_name": "Impala Daemon Hive Advanced Configuration Snippet (Safety Valve)", + "name": "impala_hive_conf_safety_valve", + "value": null + }, + { + "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.", + "display_name": "Unexpected Exits Thresholds", + "name": "unexpected_exits_thresholds", + "value": "{\"critical\":\"any\",\"warning\":\"never\"}" + }, + { + "desc": "The amount of time at Impala Daemon startup allowed for the Impala Daemon to start accepting new queries for processing.", + "display_name": "Impala Daemon Ready Status Startup Tolerance", + "name": "impalad_ready_status_check_startup_tolerance", + "value": "180" + }, + { + "desc": "The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific role. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger configured for a DataNode that fires if the DataNode has more than 1500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Role Triggers", + "name": "role_triggers", + "value": "[]" + }, + { + "desc": "Enables the health test that verifies the Impala Daemon is connected to the StateStore", + "display_name": "Impala Daemon Connectivity Health Test", + "name": "impalad_connectivity_health_enabled", + "value": "true" + }, + { + "desc": "Port where Impala debug web server runs.", + "display_name": "Impala Daemon HTTP Server Port", + "name": "impalad_webserver_port", + "value": "25000" + }, + { + "desc": "Verbose logging level for the GLog logger. These messages are always logged at 'INFO' log level, so this setting has no effect if Logging Threshold is set to 'WARN' or above. ", + "display_name": "Impala Daemon Verbose Log Level", + "name": "log_verbose_level", + "value": "1" + }, + { + "desc": "The health test thresholds on the resident size of the process.", + "display_name": "Resident Set Size Thresholds", + "name": "process_resident_set_size_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "Enables the health test that the Impala Daemon's process state is consistent with the role configuration", + "display_name": "Impala Daemon Process Health Test", + "name": "impalad_scm_health_enabled", + "value": "true" + }, + { + "desc": "The maximum size (in queries) of the Impala Daemon audit event log file before a new one is created.", + "display_name": "Impala Daemon Maximum Audit Log File Size", + "name": "max_audit_event_log_file_size", + "value": "5000" + }, + { + "desc": "Timeout for requests to the Hive Metastore Server from Impala. Consider increasing this if you have tables with a lot of metadata and see timeout errors.", + "display_name": "Impala Daemon Hive Metastore Connection Timeout", + "name": "hive_metastore_timeout", + "value": "3600" + }, + { + "desc": "Memory limit in bytes for Impala Daemon, enforced by the daemon itself. If reached, queries running on the Impala Daemon may be killed. Leave it blank to let Impala pick its own limit. Use a value of -1 B to specify no limit.", + "display_name": "Impala Daemon Memory Limit", + "name": "impalad_memory_limit", + "value": null + }, + { + "desc": "Password for Impala Daemon webserver authentication.", + "display_name": "Impala Daemon Web Server User Password", + "name": "webserver_htpassword_password", + "value": null + }, + { + "desc": "The amount of time to wait for the Impala Daemon to fully start up and connect to the StateStore before enforcing the connectivity check.", + "display_name": "Impala Daemon Connectivity Tolerance at Startup", + "name": "impalad_connectivity_tolerance", + "value": "180" + }, + { + "desc": "Port where Llama notification callback should be started", + "display_name": "Llama Callback Port", + "name": "llama_callback_port", + "value": "28000" + }, + { + "desc": "The location of the debug webserver's SSL certificate file, in .pem format. If empty, webserver SSL support is not enabled.", + "display_name": "SSL Certificate File Location", + "name": "webserver_certificate_file", + "value": null + }, + { + "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.", + "display_name": "File Descriptor Monitoring Thresholds", + "name": "impalad_fd_thresholds", + "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}" + }, + { + "desc": "The health test thresholds for monitoring of free space on the filesystems that contain Impala's scratch directories.", + "display_name": "Impala Scratch Directories Free Space Monitoring Absolute Thresholds", + "name": "impalad_scratch_directories_free_space_absolute_thresholds", + "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" + }, + { + "desc": "For advanced use only, a string to be inserted into hdfs-site.xml for this role only.", + "display_name": "Impala Daemon HDFS Advanced Configuration Snippet (Safety Valve)", + "name": "impala_hdfs_site_conf_safety_valve", + "value": null + }, + { + "desc": "Port on which ImpalaBackendService is exported.", + "display_name": "Impala Daemon Backend Port", + "name": "be_port", + "value": "22000" + }, + { + "desc": "The minimum log level for Impala Daemon logs", + "display_name": "Impala Daemon Logging Threshold", + "name": "log_threshold", + "value": "INFO" + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/impala-llama.json b/sahara/plugins/cdh/resources/impala-llama.json new file mode 100644 index 00000000..9aaa241c --- /dev/null +++ b/sahara/plugins/cdh/resources/impala-llama.json @@ -0,0 +1,338 @@ +[ + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.", + "display_name": "Log Directory Free Space Monitoring Percentage Thresholds", + "name": "log_directory_free_space_percentage_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "Directory where Llama will place its log files.", + "display_name": "Llama Log Directory", + "name": "llama_log_dir", + "value": "/var/log/impala-llama" + }, + { + "desc": "Whether Llama should cache allocated resources on release.", + "display_name": "Enable Resource Caching", + "name": "llama_am_cache_enabled", + "value": "true" + }, + { + "desc": "Whether or not periodic stacks collection is enabled.", + "display_name": "Stacks Collection Enabled", + "name": "stacks_collection_enabled", + "value": "false" + }, + { + "desc": "The group that the Llama processes should run as.", + "display_name": "Llama System Group", + "name": "process_groupname", + "value": "llama" + }, + { + "desc": "Queues Llama ApplicationMaster should connect to at start up.", + "display_name": "Core Queues", + "name": "llama_am_core_queues", + "value": "" + }, + { + "desc": "Maximum amount of time the backed off reservations will be in 'backed off' state. The actual amount time is a random value between the minimum and the maximum.", + "display_name": "Anti-Deadlock Maximum Delay", + "name": "llama_am_gang_anti_deadlock_max_delay_ms", + "value": "30000" + }, + { + "desc": "For advanced use only, a string to be inserted into llama-site.xml for this role only.", + "display_name": "Impala Llama ApplicationMaster Advanced Configuration Snippet (Safety Valve) for llama-site.xml", + "name": "llama_config_valve", + "value": null + }, + { + "desc": "ACL for Impala ApplicationMaster clients. The ACL is a comma-separated list of user and group names. The user and group list is separated by a blank. For e.g. \"alice,bob users,wheel\". A special value of \"*\" means all users are allowed. These take effect only if security is enabled.", + "display_name": "Client ACLs", + "name": "llama_am_server_thrift_client_acl", + "value": "*" + }, + { + "desc": "When computing the overall Impala Llama ApplicationMaster health, consider the host's health.", + "display_name": "Impala Llama ApplicationMaster Host Health Test", + "name": "llama_host_health_enabled", + "value": "true" + }, + { + "desc": "Port on which the Llama ApplicationMaster listens to HTTP requests.", + "display_name": "Llama HTTP Port", + "name": "llama_http_port", + "value": "15001" + }, + { + "desc": "Minimum number of threads used by the Llama ApplicationMaster auxiliary service uses for serving client requests.", + "display_name": "Thrift Server Minimum Threads", + "name": "llama_am_server_thrift_server_min_threads", + "value": "10" + }, + { + "desc": "The method that will be used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that expose an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected that HTTP endpoint is periodically scraped.", + "display_name": "Stacks Collection Method", + "name": "stacks_collection_method", + "value": "jstack" + }, + { + "desc": "Enter an XML string that will be inserted verbatim into the Fair Scheduler allocations file. Overrides the configuration set using the Pools configuration UI. This configuration only has effect on Impala versions 1.3 or greater.", + "display_name": "Fair Scheduler XML Advanced Configuration Snippet (Safety Valve)", + "name": "llama_fair_scheduler_safety_valve", + "value": null + }, + { + "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.", + "display_name": "Automatically Restart Process", + "name": "process_auto_restart", + "value": "true" + }, + { + "desc": "The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific role. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger configured for a DataNode that fires if the DataNode has more than 1500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Role Triggers", + "name": "role_triggers", + "value": "[]" + }, + { + "desc": "Client notification retry interval, in milliseconds.", + "display_name": "Client Notification Retry Interval", + "name": "llama_am_server_thrift_client_notifier_retry_interval_ms", + "value": "5000" + }, + { + "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Soft Limit", + "name": "rm_memory_soft_limit", + "value": "-1" + }, + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.", + "display_name": "Log Directory Free Space Monitoring Absolute Thresholds", + "name": "log_directory_free_space_absolute_thresholds", + "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" + }, + { + "desc": "Timeout policy for resources being cached.", + "display_name": "Resource Caching Idle Timeout", + "name": "llama_am_cache_eviction_timeout_policy_idle_timeout_ms", + "value": "30000" + }, + { + "desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.", + "display_name": "Java Heap Size of Impala Llama ApplicationMaster in Bytes", + "name": "llama_java_heapsize", + "value": "268435456" + }, + { + "desc": "Interval of time without any new allocation that will trigger the Impala ApplicationMaster anti-deadlock logic.", + "display_name": "Anti-Deadlock No Allocation Limit Interval", + "name": "llama_am_gang_anti_deadlock_no_allocation_limit_ms", + "value": "30000" + }, + { + "desc": "The period to review when computing unexpected exits.", + "display_name": "Unexpected Exits Monitoring Period", + "name": "unexpected_exits_window", + "value": "5" + }, + { + "desc": "Maximum number of threads used by the Llama ApplicationMaster auxiliary service uses for serving client requests.", + "display_name": "Thrift Server Maximum Threads", + "name": "llama_am_server_thrift_server_max_threads", + "value": "50" + }, + { + "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup I/O Weight", + "name": "rm_io_weight", + "value": "500" + }, + { + "desc": "The amount of stacks data that will be retained. After the retention limit is hit, the oldest data will be deleted.", + "display_name": "Stacks Collection Data Retention", + "name": "stacks_collection_data_retention", + "value": "104857600" + }, + { + "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Hard Limit", + "name": "rm_memory_hard_limit", + "value": "-1" + }, + { + "desc": "Time in milliseconds after which Llama will discard its AM for a queue that has been empty of reservations. Does not apply to queues specified with the Core Queues property.", + "display_name": "Queue Expiration Age", + "name": "llama_am_queue_expire_ms", + "value": "300000" + }, + { + "desc": "For advanced use only, a string to be inserted into log4j.properties for this role only.", + "display_name": "Impala Llama ApplicationMaster Logging Advanced Configuration Snippet (Safety Valve)", + "name": "log4j_safety_valve", + "value": null + }, + { + "desc": "Minimum amount of time the backed off reservations will be in 'backed off' state. The actual amount time is a random value between the minimum and the maximum.", + "display_name": "Anti-Deadlock Minimum Delay", + "name": "llama_am_gang_anti_deadlock_min_delay_ms", + "value": "10000" + }, + { + "desc": "Port on which the Llama ApplicationMaster listens to administrative requests on its administrative Thrift interface.", + "display_name": "Llama Thrift Admin Port", + "name": "llama_am_server_thrift_admin_address", + "value": "15002" + }, + { + "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.", + "display_name": "File Descriptor Monitoring Thresholds", + "name": "llama_fd_thresholds", + "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}" + }, + { + "desc": "If enabled, the Impala Llama ApplicationMaster binds to the wildcard address (\"0.0.0.0\") on all of its ports.", + "display_name": "Bind Impala Llama ApplicationMaster to Wildcard Address", + "name": "llama_bind_wildcard", + "value": "false" + }, + { + "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.", + "display_name": "Maximum Process File Descriptors", + "name": "rlimit_fds", + "value": null + }, + { + "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", + "display_name": "Enable Health Alerts for this Role", + "name": "enable_alerts", + "value": "true" + }, + { + "desc": "The maximum size, in megabytes, per log file for Impala Llama ApplicationMaster logs. Typically used by log4j.", + "display_name": "Impala Llama ApplicationMaster Max Log Size", + "name": "max_log_size", + "value": "200" + }, + { + "desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.", + "display_name": "Kill When Out of Memory", + "name": "oom_sigkill_enabled", + "value": "true" + }, + { + "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.", + "display_name": "Dump Heap When Out of Memory", + "name": "oom_heap_dump_enabled", + "value": "false" + }, + { + "desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.", + "display_name": "Java Configuration Options for Llama Server", + "name": "llama_java_opts", + "value": "" + }, + { + "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.", + "display_name": "Unexpected Exits Thresholds", + "name": "unexpected_exits_thresholds", + "value": "{\"critical\":\"any\",\"warning\":\"never\"}" + }, + { + "desc": "Llama ApplicationMaster heartbeat interval, in milliseconds. On each heartbeat the ApplicationMaster submits new reservations to YARN ResourceManager and gets updates from it.", + "display_name": "AM Heartbeat Interval", + "name": "llama_am_server_thrift_client_notifier_heartbeat_ms", + "value": "5000" + }, + { + "desc": "The user that the Llama process should run as.", + "display_name": "Llama System User", + "name": "process_username", + "value": "llama" + }, + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "Percentage of resources that will be backed off by the Impala ApplicationMaster anti-deadlock logic. Random reservations will be backed off until the percentage of backed off resources reaches this percentage.", + "display_name": "Anti-Deadlock Backoff Percentage", + "name": "llama_am_gang_anti_deadlock_backoff_percent", + "value": "30" + }, + { + "desc": "Maximum number of retries for a client notification. After the maximum number of client notification retries has been reached without success the client is considered lost and all its reservations are released. A successful client notification resets the retries count.", + "display_name": "Maximum Client Notification Retries", + "name": "llama_am_server_thrift_client_notifier_max_retries", + "value": "5" + }, + { + "desc": "Port on which the Llama ApplicationMaster serves its Thrift interface.", + "display_name": "Llama Thrift Port", + "name": "llama_port", + "value": "15000" + }, + { + "desc": "Whether to break resource requests into smaller requests of standard size before the resource cache. The sizes are taken from Yarn settings Container Memory Increment and Container Virtual CPU Cores Increment.", + "display_name": "Enable Resource Cache Normalization", + "name": "llama_am_resource_normalizing_enabled", + "value": "true" + }, + { + "desc": "The maximum number of rolled log files to keep for Impala Llama ApplicationMaster logs. Typically used by log4j.", + "display_name": "Impala Llama ApplicationMaster Maximum Log File Backups", + "name": "max_log_backup_index", + "value": "10" + }, + { + "desc": "Socket time, in milliseconds, used Llama ApplicationMaster auxiliary service for all its server and client Thrift connections.", + "display_name": "Thrift Transport Timeout", + "name": "llama_am_server_thrift_transport_timeout_ms", + "value": "60000" + }, + { + "desc": "Enables the health test that the Impala Llama ApplicationMaster's process state is consistent with the role configuration", + "display_name": "Impala Llama ApplicationMaster Process Health Test", + "name": "llama_scm_health_enabled", + "value": "true" + }, + { + "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup CPU Shares", + "name": "rm_cpu_shares", + "value": "1024" + }, + { + "desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it doesn't exist. However, if this directory already exists, role user must have write access to this directory. If this directory is shared amongst multiple roles, it should have 1777 permissions. Note that the heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.", + "display_name": "Heap Dump Directory", + "name": "oom_heap_dump_dir", + "value": "/tmp" + }, + { + "desc": "ACL for Impala ApplicationMaster admins. The ACL is a comma-separated list of user and group names. The user and group list is separated by a blank. For e.g. \"alice,bob users,wheel\". A special value of \"*\" means all users are allowed. These take effect only if security is enabled.", + "display_name": "Administrative Interface ACLs", + "name": "llama_am_server_thrift_admin_acl", + "value": "*" + }, + { + "desc": "The frequency with which stacks will be collected.", + "display_name": "Stacks Collection Frequency", + "name": "stacks_collection_frequency", + "value": "5.0" + }, + { + "desc": "The directory in which stacks logs will be placed. If not set, stacks will be logged into a stackssubdirectory of the role's log directory.", + "display_name": "Stacks Collection Directory", + "name": "stacks_collection_directory", + "value": null + }, + { + "desc": "The minimum log level for Impala Llama ApplicationMaster logs", + "display_name": "Impala Llama ApplicationMaster Logging Threshold", + "name": "log_threshold", + "value": "INFO" + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/impala-service.json b/sahara/plugins/cdh/resources/impala-service.json new file mode 100644 index 00000000..a67e02df --- /dev/null +++ b/sahara/plugins/cdh/resources/impala-service.json @@ -0,0 +1,272 @@ +[ + { + "desc": "Name of the HBase service that this Impala service instance depends on", + "display_name": "HBase Service", + "name": "hbase_service", + "value": null + }, + { + "desc": "JSON representation of all the configurations that the Fair Scheduler can take on across all schedules. Typically edited using the Pools configuration UI. This configuration only has effect on Impala versions 1.3 or greater.", + "display_name": "Fair Scheduler Allocations", + "name": "impala_scheduled_allocations", + "value": "{\"defaultMinSharePreemptionTimeout\":null,\"defaultQueueSchedulingPolicy\":null,\"fairSharePreemptionTimeout\":null,\"queueMaxAppsDefault\":null,\"queuePlacementRules\":null,\"queues\":[{\"aclAdministerApps\":null,\"aclSubmitApps\":null,\"minSharePreemptionTimeout\":null,\"name\":\"root\",\"queues\":[{\"aclAdministerApps\":null,\"aclSubmitApps\":null,\"minSharePreemptionTimeout\":null,\"name\":\"default\",\"queues\":[],\"schedulablePropertiesList\":[{\"impalaMaxMemory\":null,\"impalaMaxQueuedQueries\":null,\"impalaMaxRunningQueries\":null,\"maxResources\":null,\"maxRunningApps\":null,\"minResources\":null,\"scheduleName\":\"default\",\"weight\":null}],\"schedulingPolicy\":null}],\"schedulablePropertiesList\":[{\"impalaMaxMemory\":null,\"impalaMaxQueuedQueries\":null,\"impalaMaxRunningQueries\":null,\"maxResources\":null,\"maxRunningApps\":null,\"minResources\":null,\"scheduleName\":\"default\",\"weight\":null}],\"schedulingPolicy\":null}],\"userMaxAppsDefault\":null,\"users\":[]}" + }, + { + "desc": "For advanced use only, a string to be inserted into the client configuration for navigator.client.properties.", + "display_name": "Impala Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties", + "name": "navigator_client_config_safety_valve", + "value": null + }, + { + "desc": "\nConfigures the rules for event tracking and coalescing. This feature is\nused to define equivalency between different audit events. When\nevents match, according to a set of configurable parameters, only one\nentry in the audit list is generated for all the matching events.\n
\n\n\nTracking works by keeping a reference to events when they first appear,\nand comparing other incoming events against the \"tracked\" events according\nto the rules defined here.\n
\n\nEvent trackers are defined in a JSON object like the following:
\n\n\n{\n \"timeToLive\" : [integer],\n \"fields\" : [\n {\n \"type\" : [string],\n \"name\" : [string]\n }\n ]\n}\n\n\n
\nWhere:\n
\n\n\nEach field has an evaluator type associated with it. The evaluator defines\nhow the field data is to be compared. The following evaluators are\navailable:\n
\n\n\nThe following is the list of fields that can be used to compare Impala events:\n
\n\nEvent filters are defined in a JSON object like the following:
\n\n\n{\n \"defaultAction\" : (\"accept\", \"discard\"),\n \"rules\" : [\n {\n \"action\" : (\"accept\", \"discard\"),\n \"fields\" : [\n {\n \"name\" : \"fieldName\",\n \"match\" : \"regex\"\n }\n ]\n }\n ]\n}\n\n\n
\nA filter has a default action and a list of rules, in order of precedence.\nEach rule defines an action, and a list of fields to match against the\naudit event.\n
\n\n\nA rule is \"accepted\" if all the listed field entries match the audit\nevent. At that point, the action declared by the rule is taken.\n
\n\n\nIf no rules match the event, the default action is taken. Actions\ndefault to \"accept\" if not defined in the JSON object.\n
\n\n\nThe following is the list of fields that can be filtered for Impala events:\n
\n\nThe configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific service. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger that fires if there are more than 10 DataNodes with more than 500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Service Triggers", + "name": "service_triggers", + "value": "[]" + }, + { + "desc": "Controls which queries admin users can see in the queries list view", + "display_name": "Admin Users Query List Visibility Settings", + "name": "admin_query_list_settings", + "value": "ALL" + }, + { + "desc": "Enable support for encrypted client-server communication using Secure Socket Layer (SSL) for Impala client services. This is only applicable to non-Kerberos environments.", + "display_name": "Enable SSL for Impala Client Services", + "name": "client_services_ssl_enabled", + "value": "false" + }, + { + "desc": "The minimum number of assignments that must occur during the test time period before the threshold values will be checked. Until this number of assignments have been observed in the test time period the health test will be disabled.", + "display_name": "Assignment Locality Minimum Assignments", + "name": "impala_assignment_locality_minimum", + "value": "10" + }, + { + "desc": "When computing the overall IMPALA health, consider Impala StateStore's health", + "display_name": "Impala StateStore Role Health Test", + "name": "impala_statestore_health_enabled", + "value": "true" + }, + { + "desc": "For advanced use only, key-value pairs (one on each line) to be added (verbatim) to Impala Daemon command-line flags. Applies to all roles in this service. Key names should begin with a hyphen(-). For example: -log_filename=foo.log", + "display_name": "Impala Command Line Argument Advanced Configuration Snippet (Safety Valve)", + "name": "impala_cmd_args_safety_valve", + "value": null + }, + { + "desc": "Timeout in milliseconds for all HBase RPCs made by Impala. Overrides configuration in HBase service.", + "display_name": "HBase RPC Timeout", + "name": "hbase_rpc_timeout", + "value": "3000" + }, + { + "desc": "Name of the Hive service that this Impala service instance depends on", + "display_name": "Hive Service", + "name": "hive_service", + "value": null + }, + { + "desc": "Enable collection of audit events from the service's roles.", + "display_name": "Enable Collection", + "name": "navigator_audit_enabled", + "value": "true" + }, + { + "desc": "A list specifying the rules to run to determine which Fair Scheduler configuration to use. Typically edited using the Rules configuration UI. This configuration only has effect on Impala versions 1.3 or greater.", + "display_name": "Fair Scheduler Configuration Rules", + "name": "impala_schedule_rules", + "value": "[]" + }, + { + "desc": "If true, LDAP-based authentication for users is enabled.", + "display_name": "Enable LDAP Authentication", + "name": "enable_ldap_auth", + "value": "false" + }, + { + "desc": "Enable HDFS short circuit read. This allows a client co-located with the DataNode to read HDFS file blocks directly. This gives a performance boost to distributed clients that are aware of locality.", + "display_name": "Enable HDFS Short Circuit Read", + "name": "dfs_client_read_shortcircuit", + "value": "true" + }, + { + "desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.", + "display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)", + "name": "smon_derived_configs_safety_valve", + "value": null + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/impala-statestore.json b/sahara/plugins/cdh/resources/impala-statestore.json new file mode 100644 index 00000000..1f1c2a38 --- /dev/null +++ b/sahara/plugins/cdh/resources/impala-statestore.json @@ -0,0 +1,206 @@ +[ + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.", + "display_name": "Log Directory Free Space Monitoring Percentage Thresholds", + "name": "log_directory_free_space_percentage_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.", + "display_name": "Web Metric Collection", + "name": "statestore_web_metric_collection_enabled", + "value": "true" + }, + { + "desc": "The amount of time allowed after this role is started that failures of health checks that rely on communication with this role will be tolerated.", + "display_name": "Health Check Startup Tolerance", + "name": "statestore_startup_tolerance", + "value": "5" + }, + { + "desc": "For advanced use only, key-value pairs (one on each line) to be added (verbatim) to StateStore command line flags.", + "display_name": "Statestore Command Line Argument Advanced Configuration Snippet (Safety Valve)", + "name": "statestore_cmd_args_safety_valve", + "value": null + }, + { + "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.", + "display_name": "File Descriptor Monitoring Thresholds", + "name": "statestore_fd_thresholds", + "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}" + }, + { + "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup I/O Weight", + "name": "rm_io_weight", + "value": "500" + }, + { + "desc": "Enables the health test that the Impala StateStore's process state is consistent with the role configuration", + "display_name": "Impala StateStore Process Health Test", + "name": "statestore_scm_health_enabled", + "value": "true" + }, + { + "desc": "Port where StateStoreService is exported.", + "display_name": "StateStore Service Port", + "name": "state_store_port", + "value": "24000" + }, + { + "desc": "When computing the overall Impala StateStore health, consider the host's health.", + "display_name": "Impala StateStore Host Health Test", + "name": "statestore_host_health_enabled", + "value": "true" + }, + { + "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.", + "display_name": "Automatically Restart Process", + "name": "process_auto_restart", + "value": "true" + }, + { + "desc": "The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific role. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger configured for a DataNode that fires if the DataNode has more than 1500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Role Triggers", + "name": "role_triggers", + "value": "[]" + }, + { + "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Soft Limit", + "name": "rm_memory_soft_limit", + "value": "-1" + }, + { + "desc": "Username for Statestore webserver authentication.", + "display_name": "Statestore Web Server Username", + "name": "webserver_htpassword_user", + "value": null + }, + { + "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup CPU Shares", + "name": "rm_cpu_shares", + "value": "1024" + }, + { + "desc": "Maximum number of tasks allowed to be pending at the thread manager underlying the State Store Thrift server (0 allows infinitely many pending tasks)", + "display_name": "Maximum StateStore Pending Tasks", + "name": "state_store_pending_task_count_max", + "value": "0" + }, + { + "desc": "Directory where StateStore will place its log files.", + "display_name": "StateStore Log Directory", + "name": "log_dir", + "value": "/var/log/statestore" + }, + { + "desc": "The period to review when computing unexpected exits.", + "display_name": "Unexpected Exits Monitoring Period", + "name": "unexpected_exits_window", + "value": "5" + }, + { + "desc": "Port where StateStore debug web server runs.", + "display_name": "StateStore HTTP Server Port", + "name": "statestore_webserver_port", + "value": "25010" + }, + { + "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Hard Limit", + "name": "rm_memory_hard_limit", + "value": "-1" + }, + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.", + "display_name": "Log Directory Free Space Monitoring Absolute Thresholds", + "name": "log_directory_free_space_absolute_thresholds", + "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" + }, + { + "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.", + "display_name": "Maximum Process File Descriptors", + "name": "rlimit_fds", + "value": null + }, + { + "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", + "display_name": "Enable Health Alerts for this Role", + "name": "enable_alerts", + "value": "true" + }, + { + "desc": "The maximum size, in megabytes, per log file for Impala StateStore logs. Typically used by log4j.", + "display_name": "Impala StateStore Max Log Size", + "name": "max_log_size", + "value": "200" + }, + { + "desc": "Number of worker threads for the thread manager underlying the State Store Thrift server.", + "display_name": "StateStore Worker Threads", + "name": "state_store_num_server_worker_threads", + "value": "4" + }, + { + "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.", + "display_name": "Unexpected Exits Thresholds", + "name": "unexpected_exits_thresholds", + "value": "{\"critical\":\"any\",\"warning\":\"never\"}" + }, + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "Verbose logging level for the GLog logger. These messages are always logged at 'INFO' log level, so this setting has no effect if Logging Threshold is set to 'WARN' or above. ", + "display_name": "StateStore Verbose Log Level", + "name": "log_verbose_level", + "value": "1" + }, + { + "desc": "The health test thresholds on the resident size of the process.", + "display_name": "Resident Set Size Thresholds", + "name": "process_resident_set_size_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; 1 means buffer WARNING only, ...)", + "display_name": "StateStore Log Buffer Level", + "name": "logbuflevel", + "value": "0" + }, + { + "desc": "The location of the debug webserver's SSL certificate file, in .pem format. If empty, webserver SSL support is not enabled.", + "display_name": "SSL Certificate File Location", + "name": "webserver_certificate_file", + "value": null + }, + { + "desc": "Enable/Disable StateStore web server. This web server contains useful information about StateStore daemon.", + "display_name": "Enable StateStore Web Server", + "name": "statestore_enable_webserver", + "value": "true" + }, + { + "desc": "Password for Statestore webserver authentication.", + "display_name": "Statestore Web Server User Password", + "name": "webserver_htpassword_password", + "value": null + }, + { + "desc": "The minimum log level for Impala StateStore logs", + "display_name": "Impala StateStore Logging Threshold", + "name": "log_threshold", + "value": "INFO" + }, + { + "desc": "The health test thresholds on the duration of the metrics request to the web server.", + "display_name": "Web Metric Collection Duration", + "name": "statestore_web_metric_collection_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}" + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/ks_indexer-hbase_indexer.json b/sahara/plugins/cdh/resources/ks_indexer-hbase_indexer.json new file mode 100644 index 00000000..0dfa6d62 --- /dev/null +++ b/sahara/plugins/cdh/resources/ks_indexer-hbase_indexer.json @@ -0,0 +1,206 @@ +[ + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.", + "display_name": "Log Directory Free Space Monitoring Percentage Thresholds", + "name": "log_directory_free_space_percentage_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "Whether or not periodic stacks collection is enabled.", + "display_name": "Stacks Collection Enabled", + "name": "stacks_collection_enabled", + "value": "false" + }, + { + "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.", + "display_name": "File Descriptor Monitoring Thresholds", + "name": "hbase_indexer_fd_thresholds", + "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}" + }, + { + "desc": "The method that will be used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that expose an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected that HTTP endpoint is periodically scraped.", + "display_name": "Stacks Collection Method", + "name": "stacks_collection_method", + "value": "jstack" + }, + { + "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.", + "display_name": "Automatically Restart Process", + "name": "process_auto_restart", + "value": "false" + }, + { + "desc": "The maximum number of rolled log files to keep for Lily HBase Indexer logs. Typically used by log4j.", + "display_name": "Lily HBase Indexer Maximum Log File Backups", + "name": "max_log_backup_index", + "value": "10" + }, + { + "desc": "The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific role. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger configured for a DataNode that fires if the DataNode has more than 1500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Role Triggers", + "name": "role_triggers", + "value": "[]" + }, + { + "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Soft Limit", + "name": "rm_memory_soft_limit", + "value": "-1" + }, + { + "desc": "The group that the HBase Indexer process should run as.", + "display_name": "System Group", + "name": "hbase_indexer_process_groupname", + "value": "hbase" + }, + { + "desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.", + "display_name": "Java Configuration Options for Lily HBase Indexer", + "name": "hbase_indexer_java_opts", + "value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:-CMSConcurrentMTEnabled -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled" + }, + { + "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Hard Limit", + "name": "rm_memory_hard_limit", + "value": "-1" + }, + { + "desc": "The period to review when computing unexpected exits.", + "display_name": "Unexpected Exits Monitoring Period", + "name": "unexpected_exits_window", + "value": "5" + }, + { + "desc": "The amount of stacks data that will be retained. After the retention limit is hit, the oldest data will be deleted.", + "display_name": "Stacks Collection Data Retention", + "name": "stacks_collection_data_retention", + "value": "104857600" + }, + { + "desc": "For advanced use only, a string to be inserted into hbase-indexer-site.xml for this role only.", + "display_name": "Lily HBase Indexer Advanced Configuration Snippet (Safety Valve) for hbase-indexer-site.xml", + "name": "hbase_indexer_config_safety_valve", + "value": null + }, + { + "desc": "For advanced use only, a string to be inserted into log4j.properties for this role only.", + "display_name": "Lily HBase Indexer Logging Advanced Configuration Snippet (Safety Valve)", + "name": "log4j_safety_valve", + "value": null + }, + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.", + "display_name": "Log Directory Free Space Monitoring Absolute Thresholds", + "name": "log_directory_free_space_absolute_thresholds", + "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" + }, + { + "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.", + "display_name": "Maximum Process File Descriptors", + "name": "rlimit_fds", + "value": null + }, + { + "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", + "display_name": "Enable Health Alerts for this Role", + "name": "enable_alerts", + "value": "true" + }, + { + "desc": "The maximum size, in megabytes, per log file for Lily HBase Indexer logs. Typically used by log4j.", + "display_name": "Lily HBase Indexer Max Log Size", + "name": "max_log_size", + "value": "200" + }, + { + "desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.", + "display_name": "Kill When Out of Memory", + "name": "oom_sigkill_enabled", + "value": "true" + }, + { + "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.", + "display_name": "Dump Heap When Out of Memory", + "name": "oom_heap_dump_enabled", + "value": "false" + }, + { + "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.", + "display_name": "Unexpected Exits Thresholds", + "name": "unexpected_exits_thresholds", + "value": "{\"critical\":\"any\",\"warning\":\"never\"}" + }, + { + "desc": "Directory where HBase Indexer will place its log files.", + "display_name": "HBase Indexer Log Directory", + "name": "hbase_indexer_log_dir", + "value": "/var/log/hbase-solr" + }, + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "When computing the overall Lily HBase Indexer health, consider the host's health.", + "display_name": "Lily HBase Indexer Host Health Test", + "name": "hbase_indexer_host_health_enabled", + "value": "true" + }, + { + "desc": "The directory in which stacks logs will be placed. If not set, stacks will be logged into a stackssubdirectory of the role's log directory.", + "display_name": "Stacks Collection Directory", + "name": "stacks_collection_directory", + "value": null + }, + { + "desc": "Enables the health test that the Lily HBase Indexer's process state is consistent with the role configuration", + "display_name": "Lily HBase Indexer Process Health Test", + "name": "hbase_indexer_scm_health_enabled", + "value": "true" + }, + { + "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup I/O Weight", + "name": "rm_io_weight", + "value": "500" + }, + { + "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup CPU Shares", + "name": "rm_cpu_shares", + "value": "1024" + }, + { + "desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it doesn't exist. However, if this directory already exists, role user must have write access to this directory. If this directory is shared amongst multiple roles, it should have 1777 permissions. Note that the heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.", + "display_name": "Heap Dump Directory", + "name": "oom_heap_dump_dir", + "value": "/tmp" + }, + { + "desc": "The frequency with which stacks will be collected.", + "display_name": "Stacks Collection Frequency", + "name": "stacks_collection_frequency", + "value": "5.0" + }, + { + "desc": "The minimum log level for Lily HBase Indexer logs", + "display_name": "Lily HBase Indexer Logging Threshold", + "name": "log_threshold", + "value": "INFO" + }, + { + "desc": "The user that the HBase Indexer process should run as.", + "display_name": "System User", + "name": "hbase_indexer_process_username", + "value": "hbase" + }, + { + "desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.", + "display_name": "Java Heap Size of Lily HBase Indexer in Bytes", + "name": "hbase_indexer_java_heapsize", + "value": "1073741824" + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/ks_indexer-service.json b/sahara/plugins/cdh/resources/ks_indexer-service.json new file mode 100644 index 00000000..ff96caa5 --- /dev/null +++ b/sahara/plugins/cdh/resources/ks_indexer-service.json @@ -0,0 +1,68 @@ +[ + { + "desc": "Name of the Solr service that this Key-Value Store Indexer service instance depends on", + "display_name": "Solr Service", + "name": "solr_service", + "value": null + }, + { + "desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", + "display_name": "Enable Service Level Health Alerts", + "name": "enable_alerts", + "value": "true" + }, + { + "desc": "Name of the HBase service that this Key-Value Store Indexer service instance depends on", + "display_name": "HBase Service", + "name": "hbase_service", + "value": null + }, + { + "desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.", + "display_name": "Key-Value Indexer Service Environment Advanced Configuration Snippet (Safety Valve)", + "name": "ks_indexer_env_safety_valve", + "value": null + }, + { + "desc": "Text that goes verbatim into custom-mimetypes.xml file used by HBase Indexers.", + "display_name": "Custom Mime-types File", + "name": "custom_mimetypes_file", + "value": "\n\n\nThe configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific service. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger that fires if there are more than 10 DataNodes with more than 500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Service Triggers", + "name": "service_triggers", + "value": "[]" + }, + { + "desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.", + "display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)", + "name": "smon_derived_configs_safety_valve", + "value": null + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/sentry-sentry_server.json b/sahara/plugins/cdh/resources/sentry-sentry_server.json new file mode 100644 index 00000000..bae08eb4 --- /dev/null +++ b/sahara/plugins/cdh/resources/sentry-sentry_server.json @@ -0,0 +1,200 @@ +[ + { + "desc": "When computing the overall Sentry Server health, consider the host's health.", + "display_name": "Sentry Server Host Health Test", + "name": "sentry_server_host_health_enabled", + "value": "true" + }, + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.", + "display_name": "Log Directory Free Space Monitoring Percentage Thresholds", + "name": "log_directory_free_space_percentage_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "Whether or not periodic stacks collection is enabled.", + "display_name": "Stacks Collection Enabled", + "name": "stacks_collection_enabled", + "value": "false" + }, + { + "desc": "The method that will be used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that expose an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected that HTTP endpoint is periodically scraped.", + "display_name": "Stacks Collection Method", + "name": "stacks_collection_method", + "value": "jstack" + }, + { + "desc": "The maximum size, in megabytes, per log file for Sentry Server logs. Typically used by log4j.", + "display_name": "Sentry Server Max Log Size", + "name": "max_log_size", + "value": "200" + }, + { + "desc": "The maximum number of rolled log files to keep for Sentry Server logs. Typically used by log4j.", + "display_name": "Sentry Server Maximum Log File Backups", + "name": "max_log_backup_index", + "value": "10" + }, + { + "desc": "The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific role. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger configured for a DataNode that fires if the DataNode has more than 1500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Role Triggers", + "name": "role_triggers", + "value": "[]" + }, + { + "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Soft Limit", + "name": "rm_memory_soft_limit", + "value": "-1" + }, + { + "desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.", + "display_name": "Java Configuration Options for Sentry Server", + "name": "sentry_server_java_opts", + "value": "" + }, + { + "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Hard Limit", + "name": "rm_memory_hard_limit", + "value": "-1" + }, + { + "desc": "The period to review when computing unexpected exits.", + "display_name": "Unexpected Exits Monitoring Period", + "name": "unexpected_exits_window", + "value": "5" + }, + { + "desc": "Directory where Sentry Server will place its log files.", + "display_name": "Sentry Server Log Directory", + "name": "sentry_server_log_dir", + "value": "/var/log/sentry" + }, + { + "desc": "The amount of stacks data that will be retained. After the retention limit is hit, the oldest data will be deleted.", + "display_name": "Stacks Collection Data Retention", + "name": "stacks_collection_data_retention", + "value": "104857600" + }, + { + "desc": "For advanced use only, a string to be inserted into log4j.properties for this role only.", + "display_name": "Sentry Server Logging Advanced Configuration Snippet (Safety Valve)", + "name": "log4j_safety_valve", + "value": null + }, + { + "desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.", + "display_name": "Java Heap Size of Sentry Server in Bytes", + "name": "sentry_server_java_heapsize", + "value": "1073741824" + }, + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.", + "display_name": "Log Directory Free Space Monitoring Absolute Thresholds", + "name": "log_directory_free_space_absolute_thresholds", + "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" + }, + { + "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.", + "display_name": "Maximum Process File Descriptors", + "name": "rlimit_fds", + "value": null + }, + { + "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", + "display_name": "Enable Health Alerts for this Role", + "name": "enable_alerts", + "value": "true" + }, + { + "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.", + "display_name": "Automatically Restart Process", + "name": "process_auto_restart", + "value": "false" + }, + { + "desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.", + "display_name": "Kill When Out of Memory", + "name": "oom_sigkill_enabled", + "value": "true" + }, + { + "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.", + "display_name": "Dump Heap When Out of Memory", + "name": "oom_heap_dump_enabled", + "value": "false" + }, + { + "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.", + "display_name": "Unexpected Exits Thresholds", + "name": "unexpected_exits_thresholds", + "value": "{\"critical\":\"any\",\"warning\":\"never\"}" + }, + { + "desc": "RPC port number of Sentry Server.", + "display_name": "Sentry Server RPC Port", + "name": "sentry_service_server_rpc_port", + "value": "8038" + }, + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "The directory in which stacks logs will be placed. If not set, stacks will be logged into a stackssubdirectory of the role's log directory.", + "display_name": "Stacks Collection Directory", + "name": "stacks_collection_directory", + "value": null + }, + { + "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup I/O Weight", + "name": "rm_io_weight", + "value": "500" + }, + { + "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.", + "display_name": "File Descriptor Monitoring Thresholds", + "name": "sentry_server_fd_thresholds", + "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}" + }, + { + "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup CPU Shares", + "name": "rm_cpu_shares", + "value": "1024" + }, + { + "desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it doesn't exist. However, if this directory already exists, role user must have write access to this directory. If this directory is shared amongst multiple roles, it should have 1777 permissions. Note that the heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.", + "display_name": "Heap Dump Directory", + "name": "oom_heap_dump_dir", + "value": "/tmp" + }, + { + "desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.", + "display_name": "Sentry Service Environment Advanced Configuration Snippet (Safety Valve)", + "name": "sentry_env_safety_valve", + "value": null + }, + { + "desc": "The frequency with which stacks will be collected.", + "display_name": "Stacks Collection Frequency", + "name": "stacks_collection_frequency", + "value": "5.0" + }, + { + "desc": "The minimum log level for Sentry Server logs", + "display_name": "Sentry Server Logging Threshold", + "name": "log_threshold", + "value": "INFO" + }, + { + "desc": "Enables the health test that the Sentry Server's process state is consistent with the role configuration", + "display_name": "Sentry Server Process Health Test", + "name": "sentry_server_scm_health_enabled", + "value": "true" + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/sentry-service.json b/sahara/plugins/cdh/resources/sentry-service.json new file mode 100644 index 00000000..9acaa934 --- /dev/null +++ b/sahara/plugins/cdh/resources/sentry-service.json @@ -0,0 +1,140 @@ +[ + { + "desc": "Password for Sentry Server database.", + "display_name": "Sentry Server Database Password", + "name": "sentry_server_database_password", + "value": "" + }, + { + "desc": "\nConfigures the rules for event tracking and coalescing. This feature is\nused to define equivalency between different audit events. When\nevents match, according to a set of configurable parameters, only one\nentry in the audit list is generated for all the matching events.\n
\n\n\nTracking works by keeping a reference to events when they first appear,\nand comparing other incoming events against the \"tracked\" events according\nto the rules defined here.\n
\n\nEvent trackers are defined in a JSON object like the following:
\n\n\n{\n \"timeToLive\" : [integer],\n \"fields\" : [\n {\n \"type\" : [string],\n \"name\" : [string]\n }\n ]\n}\n\n\n
\nWhere:\n
\n\n\nEach field has an evaluator type associated with it. The evaluator defines\nhow the field data is to be compared. The following evaluators are\navailable:\n
\n\n\nThe following is the list of fields that can be used to compare Sentry events:\n
\n\nThe configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific service. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger that fires if there are more than 10 DataNodes with more than 500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Service Triggers", + "name": "service_triggers", + "value": "[]" + }, + { + "desc": "When computing the overall SENTRY health, consider Sentry Server's health", + "display_name": "Sentry Server Role Health Test", + "name": "sentry_sentry_server_health_enabled", + "value": "true" + }, + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "Port number of Sentry Server database.", + "display_name": "Sentry Server Database Port", + "name": "sentry_server_database_port", + "value": "3306" + }, + { + "desc": "If an end user is in one of these admin groups, that user has administrative privileges on the Sentry Server.", + "display_name": "Admin Groups", + "name": "sentry_service_admin_group", + "value": "hive,impala,hue" + }, + { + "desc": "For advanced use only, a string to be inserted into the client configuration for navigator.client.properties.", + "display_name": "Sentry Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties", + "name": "navigator_client_config_safety_valve", + "value": null + }, + { + "desc": "Enable collection of audit events from the service's roles.", + "display_name": "Enable Collection", + "name": "navigator_audit_enabled", + "value": "true" + }, + { + "desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.", + "display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)", + "name": "smon_derived_configs_safety_valve", + "value": null + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/solr-gateway.json b/sahara/plugins/cdh/resources/solr-gateway.json new file mode 100644 index 00000000..54cbc7ea --- /dev/null +++ b/sahara/plugins/cdh/resources/solr-gateway.json @@ -0,0 +1,20 @@ +[ + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "The directory where the client configs will be deployed", + "display_name": "Deploy Directory", + "name": "client_config_root_dir", + "value": "/etc/solr" + }, + { + "desc": "The priority level that the client configuration will have in the Alternatives system on the hosts. Higher priority levels will cause Alternatives to prefer this configuration over any others.", + "display_name": "Alternatives Priority", + "name": "client_config_priority", + "value": "90" + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/solr-service.json b/sahara/plugins/cdh/resources/solr-service.json new file mode 100644 index 00000000..ec66d5b7 --- /dev/null +++ b/sahara/plugins/cdh/resources/solr-service.json @@ -0,0 +1,110 @@ +[ + { + "desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", + "display_name": "Enable Service Level Health Alerts", + "name": "enable_alerts", + "value": "true" + }, + { + "desc": "The user that this service's processes should run as.", + "display_name": "System User", + "name": "process_username", + "value": "solr" + }, + { + "desc": "The group that this service's processes should run as.", + "display_name": "System Group", + "name": "process_groupname", + "value": "solr" + }, + { + "desc": "The class to use in Sentry authorization for user to group mapping. Sentry authorization may be configured to use either Hadoop groups or local groups defined in the policy file. When configured with Hadoop groups, Sentry will ask the HDFS Namenode for group mapping for a given user to determine authorization access.", + "display_name": "Sentry User to Group Mapping Class", + "name": "solr_sentry_provider", + "value": "org.apache.sentry.provider.file.HadoopGroupResourceAuthorizationProvider" + }, + { + "desc": "Name of the ZooKeeper service that this Search service instance depends on", + "display_name": "ZooKeeper Service", + "name": "zookeeper_service", + "value": null + }, + { + "desc": "Choose the authentication mechanism used by Solr.", + "display_name": "Solr Secure Authentication", + "name": "solr_security_authentication", + "value": "simple" + }, + { + "desc": "ZooKeeper znode used to store information about this Solr service.", + "display_name": "ZooKeeper Znode", + "name": "zookeeper_znode", + "value": "/solr" + }, + { + "desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.", + "display_name": "Solr Service Environment Advanced Configuration Snippet (Safety Valve)", + "name": "solr_env_safety_valve", + "value": null + }, + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "HDFS directory used for storage by this Solr service.", + "display_name": "HDFS Data Directory", + "name": "hdfs_data_dir", + "value": "/solr" + }, + { + "desc": "If Solr does not respond on its web URL within this time interval, the Catalina process is killed.", + "display_name": "Solrd Watchdog Timeout", + "name": "solrd_watchdog_timeout", + "value": "30" + }, + { + "desc": "Use Sentry to enable role-based, fine-grained authorization. Sentry is supported only on Search 1.1 or later and CDH 5 or later deployments and requires authentication to be turned on for Solr..", + "display_name": "Enable Sentry Authorization", + "name": "solr_sentry_enabled", + "value": "false" + }, + { + "desc": "HDFS path to the global policy file for Sentry authorization. This should be a relative path (and not a full HDFS URL). The global policy file must be in Sentry policy file format.", + "display_name": "Sentry Global Policy File", + "name": "sentry_solr_provider_resource", + "value": "/user/solr/sentry/sentry-provider.ini" + }, + { + "desc": "For advanced use only, a string to be inserted into sentry-site.xml. Applies to configurations of all roles in this service except client configuration.", + "display_name": "Solr Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml", + "name": "solr_sentry_safety_valve", + "value": null + }, + { + "desc": "The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific service. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger that fires if there are more than 10 DataNodes with more than 500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Service Triggers", + "name": "service_triggers", + "value": "[]" + }, + { + "desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.", + "display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)", + "name": "smon_derived_configs_safety_valve", + "value": null + }, + { + "desc": "Name of the HDFS service that this Search service instance depends on", + "display_name": "HDFS Service", + "name": "hdfs_service", + "value": null + }, + { + "desc": "The health test thresholds of the overall Solr Server health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Solr Servers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Solr Servers falls below the critical threshold.", + "display_name": "Healthy Solr Server Monitoring Thresholds", + "name": "solr_solr_servers_healthy_thresholds", + "value": "{\"critical\":\"90.0\",\"warning\":\"95.0\"}" + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/solr-solr_server.json b/sahara/plugins/cdh/resources/solr-solr_server.json new file mode 100644 index 00000000..0f9ebab5 --- /dev/null +++ b/sahara/plugins/cdh/resources/solr-solr_server.json @@ -0,0 +1,272 @@ +[ + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.", + "display_name": "Log Directory Free Space Monitoring Percentage Thresholds", + "name": "log_directory_free_space_percentage_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "The method that will be used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that expose an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected that HTTP endpoint is periodically scraped.", + "display_name": "Stacks Collection Method", + "name": "stacks_collection_method", + "value": "jstack" + }, + { + "desc": "Whether or not periodic stacks collection is enabled.", + "display_name": "Stacks Collection Enabled", + "name": "stacks_collection_enabled", + "value": "false" + }, + { + "desc": "Enable caching of HDFS blocks in Solr. There is one block cache per Solr collection. configured to use off-heap memory, Maximum Off-Heap Memory must be set high enough to account for all block caches.", + "display_name": "HDFS Block Cache", + "name": "solr_hdfs_blockcache_enabled", + "value": "true" + }, + { + "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.", + "display_name": "Unexpected Exits Thresholds", + "name": "unexpected_exits_thresholds", + "value": "{\"critical\":\"any\",\"warning\":\"never\"}" + }, + { + "desc": "Number of blocks per cache slab. The size of the cache is 8 KB (the block size) times the number of blocks per slab times the number of slabs.", + "display_name": "HDFS Block Cache Blocks per Slab", + "name": "solr_hdfs_blockcache_blocksperbank", + "value": "16384" + }, + { + "desc": "The health test thresholds on the duration of the metrics request to the web server.", + "display_name": "Web Metric Collection Duration", + "name": "solr_server_web_metric_collection_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}" + }, + { + "desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.", + "display_name": "Java Heap Size of Solr Server in Bytes", + "name": "solr_java_heapsize", + "value": "1073741824" + }, + { + "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.", + "display_name": "Automatically Restart Process", + "name": "process_auto_restart", + "value": "false" + }, + { + "desc": "The maximum number of rolled log files to keep for Solr Server logs. Typically used by log4j.", + "display_name": "Solr Server Maximum Log File Backups", + "name": "max_log_backup_index", + "value": "10" + }, + { + "desc": "The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific role. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger configured for a DataNode that fires if the DataNode has more than 1500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Role Triggers", + "name": "role_triggers", + "value": "[]" + }, + { + "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Soft Limit", + "name": "rm_memory_soft_limit", + "value": "-1" + }, + { + "desc": "Enables the health test that the Solr Server's process state is consistent with the role configuration", + "display_name": "Solr Server Process Health Test", + "name": "solr_server_scm_health_enabled", + "value": "true" + }, + { + "desc": "The health test thresholds on the duration of the Solr Server API request.", + "display_name": "Solr Server API Liveness Request Duration", + "name": "solr_core_status_collection_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}" + }, + { + "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Hard Limit", + "name": "rm_memory_hard_limit", + "value": "-1" + }, + { + "desc": "The period to review when computing unexpected exits.", + "display_name": "Unexpected Exits Monitoring Period", + "name": "unexpected_exits_window", + "value": "5" + }, + { + "desc": "When computing the overall Solr Server health, consider the host's health.", + "display_name": "Solr Server Host Health Test", + "name": "solr_server_host_health_enabled", + "value": "true" + }, + { + "desc": "The amount of stacks data that will be retained. After the retention limit is hit, the oldest data will be deleted.", + "display_name": "Stacks Collection Data Retention", + "name": "stacks_collection_data_retention", + "value": "104857600" + }, + { + "desc": "For advanced use only, a string to be inserted into log4j.properties for this role only.", + "display_name": "Solr Server Logging Advanced Configuration Snippet (Safety Valve)", + "name": "log4j_safety_valve", + "value": null + }, + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.", + "display_name": "Log Directory Free Space Monitoring Absolute Thresholds", + "name": "log_directory_free_space_absolute_thresholds", + "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" + }, + { + "desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather status of Solr Cores from the Solr Server with a simple API request.", + "display_name": "Solr Server API Liveness", + "name": "solr_core_status_collection_health_enabled", + "value": "true" + }, + { + "desc": "Use off-heap memory when caching HDFS blocks in Solr.", + "display_name": "HDFS Block Cache Off-Heap Memory", + "name": "solr_hdfs_blockcache_direct_memory_allocation", + "value": "true" + }, + { + "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.", + "display_name": "Maximum Process File Descriptors", + "name": "rlimit_fds", + "value": null + }, + { + "desc": "The health test thresholds for the weighted average time spent in Java garbage collection. Specified as a percentage of elapsed wall clock time.", + "display_name": "Garbage Collection Duration Thresholds", + "name": "solr_server_gc_duration_thresholds", + "value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}" + }, + { + "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", + "display_name": "Enable Health Alerts for this Role", + "name": "enable_alerts", + "value": "true" + }, + { + "desc": "The maximum size, in megabytes, per log file for Solr Server logs. Typically used by log4j.", + "display_name": "Solr Server Max Log Size", + "name": "max_log_size", + "value": "200" + }, + { + "desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.", + "display_name": "Kill When Out of Memory", + "name": "oom_sigkill_enabled", + "value": "true" + }, + { + "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.", + "display_name": "Dump Heap When Out of Memory", + "name": "oom_heap_dump_enabled", + "value": "false" + }, + { + "desc": "Admin port of the Solr Server.", + "display_name": "Solr Admin Port", + "name": "solr_admin_port", + "value": "8984" + }, + { + "desc": "Directory on local file system where Solr Server keeps the configurations for collections.", + "display_name": "Solr Data Directory", + "name": "solr_data_dir", + "value": "/var/lib/solr" + }, + { + "desc": "Maximum amount of off-heap memory in bytes that may be allocated by the Java process. Passed to Java -XX:MaxDirectMemorySize. If unset, defaults to the size of the heap.", + "display_name": "Java Direct Memory Size of Solr Server in Bytes", + "name": "solr_java_direct_memory_size", + "value": "1073741824" + }, + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "The directory in which stacks logs will be placed. If not set, stacks will be logged into a stackssubdirectory of the role's log directory.", + "display_name": "Stacks Collection Directory", + "name": "stacks_collection_directory", + "value": null + }, + { + "desc": "The period to review when computing the moving average of garbage collection time.", + "display_name": "Garbage Collection Duration Monitoring Period", + "name": "solr_server_gc_duration_window", + "value": "5" + }, + { + "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup I/O Weight", + "name": "rm_io_weight", + "value": "500" + }, + { + "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup CPU Shares", + "name": "rm_cpu_shares", + "value": "1024" + }, + { + "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.", + "display_name": "File Descriptor Monitoring Thresholds", + "name": "solr_server_fd_thresholds", + "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}" + }, + { + "desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it doesn't exist. However, if this directory already exists, role user must have write access to this directory. If this directory is shared amongst multiple roles, it should have 1777 permissions. Note that the heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.", + "display_name": "Heap Dump Directory", + "name": "oom_heap_dump_dir", + "value": "/tmp" + }, + { + "desc": "Number of slabs per block cache. The size of the cache is 8 KB (the block size) times the number of blocks per slab times the number of slabs.", + "display_name": "HDFS Block Cache Number of Slabs", + "name": "solr_hdfs_blockcache_slab_count", + "value": "1" + }, + { + "desc": "The frequency with which stacks will be collected.", + "display_name": "Stacks Collection Frequency", + "name": "stacks_collection_frequency", + "value": "5.0" + }, + { + "desc": "Directory where Solr Server will place its log files.", + "display_name": "Solr Server Log Directory", + "name": "solr_log_dir", + "value": "/var/log/solr" + }, + { + "desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.", + "display_name": "Web Metric Collection", + "name": "solr_server_web_metric_collection_enabled", + "value": "true" + }, + { + "desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.", + "display_name": "Java Configuration Options for Solr Server", + "name": "solr_java_opts", + "value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:-CMSConcurrentMTEnabled -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled" + }, + { + "desc": "The minimum log level for Solr Server logs", + "display_name": "Solr Server Logging Threshold", + "name": "log_threshold", + "value": "INFO" + }, + { + "desc": "HTTP port of Solr Server.", + "display_name": "Solr HTTP Port", + "name": "solr_http_port", + "value": "8983" + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/sqoop-service.json b/sahara/plugins/cdh/resources/sqoop-service.json new file mode 100644 index 00000000..8076dc2b --- /dev/null +++ b/sahara/plugins/cdh/resources/sqoop-service.json @@ -0,0 +1,56 @@ +[ + { + "desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", + "display_name": "Enable Service Level Health Alerts", + "name": "enable_alerts", + "value": "true" + }, + { + "desc": "When computing the overall SQOOP health, consider Sqoop 2 Server's health", + "display_name": "Sqoop 2 Server Role Health Test", + "name": "sqoop_sqoop_server_health_enabled", + "value": "true" + }, + { + "desc": "The group that this service's processes should run as.", + "display_name": "System Group", + "name": "process_groupname", + "value": "sqoop" + }, + { + "desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.", + "display_name": "Sqoop 2 Service Environment Advanced Configuration Snippet (Safety Valve)", + "name": "sqoop_env_safety_valve", + "value": null + }, + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "The user that this service's processes should run as.", + "display_name": "System User", + "name": "process_username", + "value": "sqoop2" + }, + { + "desc": "The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific service. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger that fires if there are more than 10 DataNodes with more than 500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Service Triggers", + "name": "service_triggers", + "value": "[]" + }, + { + "desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.", + "display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)", + "name": "smon_derived_configs_safety_valve", + "value": null + }, + { + "desc": "MapReduce jobs are run against this service.", + "display_name": "MapReduce Service", + "name": "mapreduce_yarn_service", + "value": null + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/resources/sqoop-sqoop_server.json b/sahara/plugins/cdh/resources/sqoop-sqoop_server.json new file mode 100644 index 00000000..939c364d --- /dev/null +++ b/sahara/plugins/cdh/resources/sqoop-sqoop_server.json @@ -0,0 +1,218 @@ +[ + { + "desc": "Directory where Sqoop 2 Server will place its log files.", + "display_name": "Sqoop 2 Server Log Directory", + "name": "sqoop_log_dir", + "value": "/var/log/sqoop2" + }, + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.", + "display_name": "Log Directory Free Space Monitoring Percentage Thresholds", + "name": "log_directory_free_space_percentage_thresholds", + "value": "{\"critical\":\"never\",\"warning\":\"never\"}" + }, + { + "desc": "Whether or not periodic stacks collection is enabled.", + "display_name": "Stacks Collection Enabled", + "name": "stacks_collection_enabled", + "value": "false" + }, + { + "desc": "Admin port of Sqoop 2 Server. (Note: This configuration only applies from CDH 4.3 onwards.)", + "display_name": "Sqoop 2 Admin Port", + "name": "sqoop_admin_port", + "value": "8005" + }, + { + "desc": "Maximum number of clients allowed to connect to the Sqoop 2 Server.", + "display_name": "Maximum Client Connections", + "name": "max_client_connections", + "value": "10" + }, + { + "desc": "The method that will be used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that expose an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected that HTTP endpoint is periodically scraped.", + "display_name": "Stacks Collection Method", + "name": "stacks_collection_method", + "value": "jstack" + }, + { + "desc": "For advanced use only, a string to be inserted into sqoop.properties for this role only.", + "display_name": "Sqoop 2 Server Advanced Configuration Snippet (Safety Valve) for sqoop.properties", + "name": "sqoop_config_safety_valve", + "value": null + }, + { + "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.", + "display_name": "Automatically Restart Process", + "name": "process_auto_restart", + "value": "false" + }, + { + "desc": "The maximum number of rolled log files to keep for Sqoop 2 Server logs. Typically used by log4j.", + "display_name": "Sqoop 2 Server Maximum Log File Backups", + "name": "max_log_backup_index", + "value": "10" + }, + { + "desc": "The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific role. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger configured for a DataNode that fires if the DataNode has more than 1500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", + "display_name": "Role Triggers", + "name": "role_triggers", + "value": "[]" + }, + { + "desc": "HTTP port of Sqoop 2 Server. (Note: This configuration only applies from CDH 4.3 onwards.)", + "display_name": "Sqoop 2 HTTP Port", + "name": "sqoop_http_port", + "value": "12000" + }, + { + "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Soft Limit", + "name": "rm_memory_soft_limit", + "value": "-1" + }, + { + "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.", + "display_name": "File Descriptor Monitoring Thresholds", + "name": "sqoop_server_fd_thresholds", + "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}" + }, + { + "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", + "display_name": "Cgroup Memory Hard Limit", + "name": "rm_memory_hard_limit", + "value": "-1" + }, + { + "desc": "The period to review when computing unexpected exits.", + "display_name": "Unexpected Exits Monitoring Period", + "name": "unexpected_exits_window", + "value": "5" + }, + { + "desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.", + "display_name": "Java Heap Size of Sqoop 2 Server in Bytes", + "name": "sqoop_java_heapsize", + "value": "1073741824" + }, + { + "desc": "For advanced use only, a string to be inserted into log4j.properties for this role only.", + "display_name": "Sqoop 2 Server Logging Advanced Configuration Snippet (Safety Valve)", + "name": "log4j_safety_valve", + "value": null + }, + { + "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.", + "display_name": "Log Directory Free Space Monitoring Absolute Thresholds", + "name": "log_directory_free_space_absolute_thresholds", + "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" + }, + { + "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.", + "display_name": "Maximum Process File Descriptors", + "name": "rlimit_fds", + "value": null + }, + { + "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", + "display_name": "Enable Health Alerts for this Role", + "name": "enable_alerts", + "value": "true" + }, + { + "desc": "The maximum size, in megabytes, per log file for Sqoop 2 Server logs. Typically used by log4j.", + "display_name": "Sqoop 2 Server Max Log Size", + "name": "max_log_size", + "value": "200" + }, + { + "desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.", + "display_name": "Kill When Out of Memory", + "name": "oom_sigkill_enabled", + "value": "true" + }, + { + "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.", + "display_name": "Dump Heap When Out of Memory", + "name": "oom_heap_dump_enabled", + "value": "false" + }, + { + "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.", + "display_name": "Unexpected Exits Thresholds", + "name": "unexpected_exits_thresholds", + "value": "{\"critical\":\"any\",\"warning\":\"never\"}" + }, + { + "desc": "Directory where the Sqoop 2 Server will place its metastore data.", + "display_name": "Sqoop 2 Server Metastore Directory", + "name": "sqoop_metastore_data_dir", + "value": "/var/lib/sqoop2" + }, + { + "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", + "display_name": "Enable Configuration Change Alerts", + "name": "enable_config_alerts", + "value": "false" + }, + { + "desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.", + "display_name": "Java Configuration Options for Sqoop 2 Server", + "name": "sqoop_java_opts", + "value": "" + }, + { + "desc": "The directory in which stacks logs will be placed. If not set, stacks will be logged into a stackssubdirectory of the role's log directory.", + "display_name": "Stacks Collection Directory", + "name": "stacks_collection_directory", + "value": null + }, + { + "desc": "When computing the overall Sqoop 2 Server health, consider the host's health.", + "display_name": "Sqoop 2 Server Host Health Test", + "name": "sqoop_server_host_health_enabled", + "value": "true" + }, + { + "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup I/O Weight", + "name": "rm_io_weight", + "value": "500" + }, + { + "desc": "Enables the health test that the Sqoop 2 Server's process state is consistent with the role configuration", + "display_name": "Sqoop 2 Server Process Health Test", + "name": "sqoop_server_scm_health_enabled", + "value": "true" + }, + { + "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.", + "display_name": "Cgroup CPU Shares", + "name": "rm_cpu_shares", + "value": "1024" + }, + { + "desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it doesn't exist. However, if this directory already exists, role user must have write access to this directory. If this directory is shared amongst multiple roles, it should have 1777 permissions. Note that the heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.", + "display_name": "Heap Dump Directory", + "name": "oom_heap_dump_dir", + "value": "/tmp" + }, + { + "desc": "The amount of stacks data that will be retained. After the retention limit is hit, the oldest data will be deleted.", + "display_name": "Stacks Collection Data Retention", + "name": "stacks_collection_data_retention", + "value": "104857600" + }, + { + "desc": "The frequency with which stacks will be collected.", + "display_name": "Stacks Collection Frequency", + "name": "stacks_collection_frequency", + "value": "5.0" + }, + { + "desc": "The minimum log level for Sqoop 2 Server logs", + "display_name": "Sqoop 2 Server Logging Threshold", + "name": "log_threshold", + "value": "INFO" + } +] \ No newline at end of file diff --git a/sahara/plugins/cdh/utils.py b/sahara/plugins/cdh/utils.py index d145231c..99f13587 100644 --- a/sahara/plugins/cdh/utils.py +++ b/sahara/plugins/cdh/utils.py @@ -73,6 +73,38 @@ def get_hbase_master(cluster): return u.get_instance(cluster, 'MASTER') +def get_flumes(cluster): + return u.get_instances(cluster, 'AGENT') + + +def get_sentry(cluster): + return u.get_instance(cluster, 'SENTRY_SERVER') + + +def get_solrs(cluster): + return u.get_instances(cluster, 'SOLR_SERVER') + + +def get_sqoop(cluster): + return u.get_instance(cluster, 'SQOOP_SERVER') + + +def get_hbase_indexers(cluster): + return u.get_instances(cluster, 'HBASE_INDEXER') + + +def get_catalogserver(cluster): + return u.get_instance(cluster, 'CATALOGSERVER') + + +def get_statestore(cluster): + return u.get_instance(cluster, 'STATESTORE') + + +def get_impalads(cluster): + return u.get_instances(cluster, 'IMPALAD') + + def convert_process_configs(configs): p_dict = { "CLOUDERA": ['MANAGER'], @@ -90,7 +122,15 @@ def convert_process_configs(configs): "SPARK_ON_YARN": ['SPARK_YARN_HISTORY_SERVER'], "ZOOKEEPER": ['SERVER'], "MASTER": ['MASTER'], - "REGIONSERVER": ['REGIONSERVER'] + "REGIONSERVER": ['REGIONSERVER'], + "FLUME": ['AGENT'], + "CATALOGSERVER": ['CATALOGSERVER'], + "STATESTORE": ['STATESTORE'], + "IMPALAD": ['IMPALAD'], + "KS_INDEXER": ['HBASE_INDEXER'], + "SENTRY": ['SENTRY_SERVER'], + "SOLR": ['SOLR_SERVER'], + "SQOOP": ['SQOOP_SERVER'] } if isinstance(configs, res.Resource): configs = configs.to_dict() diff --git a/sahara/plugins/cdh/validation.py b/sahara/plugins/cdh/validation.py index 43c6cdc5..bdae36e3 100644 --- a/sahara/plugins/cdh/validation.py +++ b/sahara/plugins/cdh/validation.py @@ -139,6 +139,86 @@ def validate_cluster_creating(cluster): raise ex.InvalidComponentCountException('MASTER', _('at least 1'), hbm_count) + a_count = _get_inst_count(cluster, 'AGENT') + if a_count >= 1: + if dn_count < 1: + raise ex.RequiredServiceMissingException( + 'DATANODE', required_by='FLUME_AGENT') + + ss1_count = _get_inst_count(cluster, 'SENTRY_SERVER') + if ss1_count not in [0, 1]: + raise ex.InvalidComponentCountException('SENTRY_SERVER', _('0 or 1'), + ss1_count) + if ss1_count == 1: + if dn_count < 1: + raise ex.RequiredServiceMissingException( + 'DATANODE', required_by='SENTRY_SERVER') + if zk_count < 1: + raise ex.RequiredServiceMissingException( + 'ZOOKEEPER', required_by='SENTRY_SERVER') + + ss2_count = _get_inst_count(cluster, 'SOLR_SERVER') + if ss2_count >= 1: + if dn_count < 1: + raise ex.RequiredServiceMissingException( + 'DATANODE', required_by='SOLR_SERVER') + if zk_count < 1: + raise ex.RequiredServiceMissingException( + 'ZOOKEEPER', required_by='SOLR_SERVER') + + s2s_count = _get_inst_count(cluster, 'SQOOP_SERVER') + if s2s_count not in [0, 1]: + raise ex.InvalidComponentCountException('SQOOP_SERVER', _('0 or 1'), + s2s_count) + if s2s_count == 1: + if dn_count < 1: + raise ex.RequiredServiceMissingException( + 'DATANODE', required_by='SQOOP_SERVER') + if nm_count < 1: + raise ex.RequiredServiceMissingException( + 'NODEMANAGER', required_by='SQOOP_SERVER') + if hs_count != 1: + raise ex.RequiredServiceMissingException( + 'JOBHISTORY', required_by='SQOOP_SERVER') + + lhbi_count = _get_inst_count(cluster, 'HBASE_INDEXER') + if lhbi_count >= 1: + if dn_count < 1: + raise ex.RequiredServiceMissingException( + 'DATANODE', required_by='HBASE_INDEXER') + if zk_count < 1: + raise ex.RequiredServiceMissingException( + 'ZOOKEEPER', required_by='HBASE_INDEXER') + if ss2_count < 1: + raise ex.RequiredServiceMissingException( + 'SOLR_SERVER', required_by='HBASE_INDEXER') + if hbm_count < 1: + raise ex.RequiredServiceMissingException( + 'HBASE_MASTER', required_by='HBASE_INDEXER') + + ics_count = _get_inst_count(cluster, 'CATALOGSERVER') + iss_count = _get_inst_count(cluster, 'STATESTORE') + id_count = _get_inst_count(cluster, 'IMPALAD') + if ics_count not in [0, 1]: + raise ex.InvalidComponentCountException('CATALOGSERVER', _('0 or 1'), + ics_count) + if iss_count not in [0, 1]: + raise ex.InvalidComponentCountException('STATESTORE', _('0 or 1'), + iss_count) + if ics_count == 1: + if iss_count != 1: + raise ex.RequiredServiceMissingException( + 'STATESTORE', required_by='IMPALA') + if id_count < 1: + raise ex.RequiredServiceMissingException( + 'IMPALAD', required_by='IMPALA') + if dn_count < 1: + raise ex.RequiredServiceMissingException( + 'DATANODE', required_by='IMPALA') + if hms_count < 1: + raise ex.RequiredServiceMissingException( + 'HIVEMETASTORE', required_by='IMPALA') + def validate_additional_ng_scaling(cluster, additional): rm = cu.get_resourcemanager(cluster)