[ { "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.", "display_name": "Log Directory Free Space Monitoring Percentage Thresholds", "name": "log_directory_free_space_percentage_thresholds", "value": "{\"critical\":\"never\",\"warning\":\"never\"}" }, { "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.", "display_name": "File Descriptor Monitoring Thresholds", "name": "resourcemanager_fd_thresholds", "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}" }, { "desc": "

This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.

Each rule has some or all of the following fields:


Example:{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}

This rule will send events to Cloudera Manager for every StringIndexOutOfBoundsException, up to a maximum of 10 every minute.

", "display_name": "Rules to Extract Events from Log Files", "name": "log_event_whitelist", "value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}\n" }, { "desc": "An XML string that will be inserted verbatim into the Fair Scheduler allocations file. For CDH5, overrides the configuration set using the Pools configuration UI. For CDH4, this is the only way to configure the Fair Scheduler for YARN.", "display_name": "Fair Scheduler XML Advanced Configuration Snippet (Safety Valve)", "name": "resourcemanager_fair_scheduler_configuration", "value": null }, { "desc": "Whether or not periodic stacks collection is enabled.", "display_name": "Stacks Collection Enabled", "name": "stacks_collection_enabled", "value": "false" }, { "desc": "For advanced use only, a string to be inserted into nodes_allow.txt for this role only.", "display_name": "ResourceManager Advanced Configuration Snippet (Safety Valve) for nodes_allow.txt", "name": "rm_hosts_allow_safety_valve", "value": null }, { "desc": "The maximum number of application attempts. This is a global setting for all ApplicationMasters. Each ApplicationMaster can specify its individual maximum through the API, but if the individual maximum is more than the global maximum, the ResourceManager overrides it.", "display_name": "ApplicationMaster Maximum Attempts", "name": "yarn_resourcemanager_am_max_retries", "value": "2" }, { "desc": "For advanced use only, a string to be inserted into nodes_exclude.txt for this role only.", "display_name": "ResourceManager Advanced Configuration Snippet (Safety Valve) for nodes_exclude.txt", "name": "rm_hosts_exclude_safety_valve", "value": null }, { "desc": "The health test thresholds on the duration of the metrics request to the web server.", "display_name": "Web Metric Collection Duration", "name": "resourcemanager_web_metric_collection_thresholds", "value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}" }, { "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.", "display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds", "name": "heap_dump_directory_free_space_percentage_thresholds", "value": "{\"critical\":\"never\",\"warning\":\"never\"}" }, { "desc": "The maximum number of completed applications that the ResourceManager keeps.", "display_name": "Max Completed Applications", "name": "yarn_resourcemanager_max_completed_applications", "value": "10000" }, { "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", "display_name": "Cgroup Memory Hard Limit", "name": "rm_memory_hard_limit", "value": "-1" }, { "desc": "For advanced use only, a string to be inserted into yarn-site.xml for this role only.", "display_name": "ResourceManager Advanced Configuration Snippet (Safety Valve) for yarn-site.xml", "name": "resourcemanager_config_safety_valve", "value": null }, { "desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.", "display_name": "Java Configuration Options for ResourceManager", "name": "resource_manager_java_opts", "value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:-CMSConcurrentMTEnabled -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled" }, { "desc": "Enables the health test that the ResourceManager's process state is consistent with the role configuration", "display_name": "ResourceManager Process Health Test", "name": "resourcemanager_scm_health_enabled", "value": "true" }, { "desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.", "display_name": "Stacks Collection Method", "name": "stacks_collection_method", "value": "jstack" }, { "desc": "Directory where ResourceManager will place its log files.", "display_name": "ResourceManager Log Directory", "name": "resource_manager_log_dir", "value": "/var/log/hadoop-yarn" }, { "desc": "The smallest amount of physical memory, in MiB, that can be requested for a container. If using the Capacity or FIFO scheduler (or any scheduler, prior to CDH 5), memory requests will be rounded up to the nearest multiple of this number.", "display_name": "Container Memory Minimum", "name": "yarn_scheduler_minimum_allocation_mb", "value": "1024" }, { "desc": "The number of threads used to handle applications manager requests.", "display_name": "Client Thread Count", "name": "yarn_resourcemanager_client_thread_count", "value": "50" }, { "desc": "The periodic interval that the ResourceManager will check whether containers are still alive.", "display_name": "Container Monitor Interval", "name": "yarn_resourcemanager_container_liveness_monitor_interval_ms", "value": "600000" }, { "desc": "The HTTP port of the ResourceManager web application.", "display_name": "ResourceManager Web Application HTTP Port", "name": "resourcemanager_webserver_port", "value": "8088" }, { "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.", "display_name": "Automatically Restart Process", "name": "process_auto_restart", "value": "false" }, { "desc": "The expiry interval to wait until a NodeManager is considered dead.", "display_name": "NodeManager Monitor Expiry", "name": "yarn_nm_liveness_monitor_expiry_interval_ms", "value": "600000" }, { "desc": "The largest amount of physical memory, in MiB, that can be requested for a container.", "display_name": "Container Memory Maximum", "name": "yarn_scheduler_maximum_allocation_mb", "value": "65536" }, { "desc": "The largest number of virtual CPU cores that can be requested for a container. This parameter has no effect prior to CDH 4.4.", "display_name": "Container Virtual CPU Cores Maximum", "name": "yarn_scheduler_maximum_allocation_vcores", "value": "32" }, { "desc": "The health test thresholds on the swap memory usage of the process.", "display_name": "Process Swap Memory Thresholds", "name": "process_swap_memory_thresholds", "value": "{\"critical\":\"never\",\"warning\":\"any\"}" }, { "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", "display_name": "Cgroup Memory Soft Limit", "name": "rm_memory_soft_limit", "value": "-1" }, { "desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.", "display_name": "Web Metric Collection", "name": "resourcemanager_web_metric_collection_enabled", "value": "true" }, { "desc": "Number of threads to handle resource tracker calls.", "display_name": "Resource Tracker Thread Count", "name": "yarn_resourcemanager_resource_tracker_client_thread_count", "value": "50" }, { "desc": "If using the Fair Scheduler, virtual core requests will be rounded up to the nearest multiple of this number. This parameter has no effect prior to CDH 5.", "display_name": "Container Virtual CPU Cores Increment", "name": "yarn_scheduler_increment_allocation_vcores", "value": "1" }, { "desc": "The address of the scheduler interface in the ResourceManager.", "display_name": "Scheduler Address", "name": "yarn_resourcemanager_scheduler_address", "value": "8030" }, { "desc": "For advanced use only, a string to be inserted into mapred-site.xml for this role only.", "display_name": "ResourceManager Advanced Configuration Snippet (Safety Valve) for mapred-site.xml", "name": "resourcemanager_mapred_safety_valve", "value": null }, { "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.", "display_name": "Log Directory Free Space Monitoring Absolute Thresholds", "name": "log_directory_free_space_absolute_thresholds", "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" }, { "desc": "The HTTPS port of the ResourceManager web application.", "display_name": "ResourceManager Web Application HTTPS Port (SSL)", "name": "resourcemanager_webserver_https_port", "value": "8090" }, { "desc": "The period to review when computing the moving average of garbage collection time.", "display_name": "Garbage Collection Duration Monitoring Period", "name": "resourcemanager_gc_duration_window", "value": "5" }, { "desc": "For applications that request containers on particular racks, the number of scheduling opportunities since the last container assignment to wait before accepting a placement on another rack. Expressed as a float between 0 and 1, which, as a fraction of the cluster size, is the number of scheduling opportunities to pass up. If not set, this means don't pass up any scheduling opportunities. Requires Fair Scheduler continuous scheduling to be disabled. If continuous scheduling is enabled, yarn.scheduler.fair.locality-delay-rack-ms should be used instead.", "display_name": "Fair Scheduler Rack Locality Threshold", "name": "resourcemanager_fair_scheduler_locality_threshold_rack", "value": null }, { "desc": "The period to review when computing unexpected exits.", "display_name": "Unexpected Exits Monitoring Period", "name": "unexpected_exits_window", "value": "5" }, { "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.", "display_name": "Cgroup I/O Weight", "name": "rm_io_weight", "value": "500" }, { "desc": "Advanced Configuration Snippet (Safety Valve) for Hadoop Metrics2. Properties will be inserted into hadoop-metrics2.properties.", "display_name": "Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)", "name": "hadoop_metrics2_safety_valve", "value": null }, { "desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.", "display_name": "Stacks Collection Data Retention", "name": "stacks_collection_data_retention", "value": "104857600" }, { "desc": "The address of the applications manager interface in the ResourceManager.", "display_name": "ResourceManager Address", "name": "yarn_resourcemanager_address", "value": "8032" }, { "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.", "display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds", "name": "heap_dump_directory_free_space_absolute_thresholds", "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" }, { "desc": "For advanced use only, a string to be inserted into log4j.properties for this role only.", "display_name": "ResourceManager Logging Advanced Configuration Snippet (Safety Valve)", "name": "log4j_safety_valve", "value": null }, { "desc": "Enables multiple Fair Scheduler container assignments in one heartbeat, which improves cluster throughput when there are many small tasks to run.", "display_name": "Fair Scheduler Assign Multiple Tasks", "name": "resourcemanager_fair_scheduler_assign_multiple", "value": "false" }, { "desc": "The health test thresholds for the weighted average time spent in Java garbage collection. Specified as a percentage of elapsed wall clock time.", "display_name": "Garbage Collection Duration Thresholds", "name": "resourcemanager_gc_duration_thresholds", "value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}" }, { "desc": "When enabled, any applications that were running on the cluster when the ResourceManager died will be recovered when the ResourceManager next starts. Note: If RM-HA is enabled, then this configuration is always enabled.", "display_name": "Enable ResourceManager Recovery", "name": "yarn_resourcemanager_recovery_enabled", "value": "false" }, { "desc": "The periodic interval that the ResourceManager will check whether ApplicationMasters is still alive.", "display_name": "ApplicationMaster Monitor Interval", "name": "yarn_resourcemanager_amliveliness_monitor_interval_ms", "value": "1000" }, { "desc": "For applications that request containers on particular nodes, the minimum time in milliseconds the Fair Scheduler waits before accepting a placement on another node. Requires Fair Scheduler continuous scheduling to be enabled. If continuous scheduling is disabled, yarn.scheduler.fair.locality.threshold.node should be used instead.", "display_name": "Fair Scheduler Node Locality Delay", "name": "yarn_scheduler_fair_locality_delay_node_ms", "value": "2000" }, { "desc": "Number of threads used to handle the ResourceManager admin interface.", "display_name": "Admin Client Thread Count", "name": "yarn_resourcemanager_admin_client_thread_count", "value": "1" }, { "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.", "display_name": "Maximum Process File Descriptors", "name": "rlimit_fds", "value": null }, { "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", "display_name": "Enable Health Alerts for this Role", "name": "enable_alerts", "value": "true" }, { "desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.", "display_name": "Java Heap Size of ResourceManager in Bytes", "name": "resource_manager_java_heapsize", "value": "1073741824" }, { "desc": "The maximum size, in megabytes, per log file for ResourceManager logs. Typically used by log4j or logback.", "display_name": "ResourceManager Max Log Size", "name": "max_log_size", "value": "200" }, { "desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.", "display_name": "Kill When Out of Memory", "name": "oom_sigkill_enabled", "value": "true" }, { "desc": "When enabled, if a pool's minimum share is not met for some period of time, the Fair Scheduler preempts applications in other pools. Preemption guarantees that production applications are not starved while also allowing the cluster to be used for experimental and research applications. To minimize wasted computation, the Fair Scheduler preempts the most recently launched applications.", "display_name": "Fair Scheduler Preemption", "name": "resourcemanager_fair_scheduler_preemption", "value": "false" }, { "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.", "display_name": "Dump Heap When Out of Memory", "name": "oom_heap_dump_enabled", "value": "false" }, { "desc": "The smallest number of virtual CPU cores that can be requested for a container. If using the Capacity or FIFO scheduler (or any scheduler, prior to CDH 5), virtual core requests will be rounded up to the nearest multiple of this number. This parameter has no effect prior to CDH 4.4.", "display_name": "Container Virtual CPU Cores Minimum", "name": "yarn_scheduler_minimum_allocation_vcores", "value": "1" }, { "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.", "display_name": "Unexpected Exits Thresholds", "name": "unexpected_exits_thresholds", "value": "{\"critical\":\"any\",\"warning\":\"never\"}" }, { "desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.", "display_name": "ResourceManager Environment Advanced Configuration Snippet (Safety Valve)", "name": "RESOURCEMANAGER_role_env_safety_valve", "value": null }, { "desc": "If using the Fair Scheduler, memory requests will be rounded up to the nearest multiple of this number. This parameter has no effect prior to CDH 5.", "display_name": "Container Memory Increment", "name": "yarn_scheduler_increment_allocation_mb", "value": "512" }, { "desc": "

The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.

Each trigger has all of the following fields:

For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file-descriptors opened:

[{\"triggerName\": \"sample-trigger\",\n  \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n  \"streamThreshold\": 0, \"enabled\": \"true\"}]

See the trigger rules documentation for more details on how to write triggers using tsquery.

The JSON format is evolving and may change in the future and, as a result, backward compatibility is not guaranteed between releases at this time.

", "display_name": "Role Triggers", "name": "role_triggers", "value": "[]" }, { "desc": "If enabled, the ResourceManager binds to the wildcard address (\"0.0.0.0\") on all of its ports.", "display_name": "Bind ResourceManager to Wildcard Address", "name": "yarn_rm_bind_wildcard", "value": "false" }, { "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", "display_name": "Enable Configuration Change Alerts", "name": "enable_config_alerts", "value": "false" }, { "desc": "For applications that request containers on particular racks, the minimum time in milliseconds the Fair Scheduler waits before accepting a placement on another rack. Requires Fair Scheduler continuous scheduling to be enabled. If continuous scheduling is disabled, yarn.scheduler.fair.locality.threshold.rack should be used instead.", "display_name": "Fair Scheduler Rack Locality Delay", "name": "yarn_scheduler_fair_locality_delay_rack_ms", "value": "4000" }, { "desc": "Enable continuous scheduling in the Fair Scheduler. When enabled, scheduling decisions are decoupled from NodeManager heartbeats, leading to faster resource allocations.", "display_name": "Enable Fair Scheduler Continuous Scheduling", "name": "yarn_scheduler_fair_continuous_scheduling_enabled", "value": "true" }, { "desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a stacks subdirectory of the role's log directory.", "display_name": "Stacks Collection Directory", "name": "stacks_collection_directory", "value": null }, { "desc": "When computing the overall ResourceManager health, consider the host's health.", "display_name": "ResourceManager Host Health Test", "name": "resourcemanager_host_health_enabled", "value": "true" }, { "desc": "The number of threads used to handle requests through the scheduler interface.", "display_name": "Scheduler Thread Count", "name": "yarn_resourcemanager_scheduler_client_thread_count", "value": "50" }, { "desc": "The maximum number of rolled log files to keep for ResourceManager logs. Typically used by log4j or logback.", "display_name": "ResourceManager Maximum Log File Backups", "name": "max_log_backup_index", "value": "10" }, { "desc": "When enabled, the Fair Scheduler will assign shares to individual apps based on their size, rather than providing an equal share to all apps regardless of size.", "display_name": "Fair Scheduler Size-Based Weight", "name": "resourcemanager_fair_scheduler_size_based_weight", "value": "false" }, { "desc": "For applications that request containers on particular nodes, the number of scheduling opportunities since the last container assignment to wait before accepting a placement on another node. Expressed as a float between 0 and 1, which, as a fraction of the cluster size, is the number of scheduling opportunities to pass up. If not set, this means don't pass up any scheduling opportunities. Requires Fair Scheduler continuous scheduling to be disabled. If continuous scheduling is enabled, yarn.scheduler.fair.locality-delay-node-ms should be used instead.", "display_name": "Fair Scheduler Node Locality Threshold", "name": "resourcemanager_fair_scheduler_locality_threshold_node", "value": null }, { "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.", "display_name": "Cgroup CPU Shares", "name": "rm_cpu_shares", "value": "1024" }, { "desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.", "display_name": "Heap Dump Directory", "name": "oom_heap_dump_dir", "value": "/tmp" }, { "desc": "When enabled, ResourceManager has proxy user privileges.", "display_name": "Enable ResourceManger Proxy User Privileges", "name": "yarn_resourcemanager_proxy_user_privileges_enabled", "value": "true" }, { "desc": "The frequency with which stacks are collected.", "display_name": "Stacks Collection Frequency", "name": "stacks_collection_frequency", "value": "5.0" }, { "desc": "When set to true, the Fair Scheduler uses the username as the default pool name, in the event that a pool name is not specified. When set to false, all applications are run in a shared pool, called default.", "display_name": "Fair Scheduler User As Default Queue", "name": "resourcemanager_fair_scheduler_user_as_default_queue", "value": "true" }, { "desc": "The address of the resource tracker interface in the ResourceManager.", "display_name": "Resource Tracker Address", "name": "yarn_resourcemanager_resource_tracker_address", "value": "8031" }, { "desc": "The expiry interval to wait until an ApplicationMaster is considered dead.", "display_name": "ApplicationMaster Monitor Expiry", "name": "yarn_am_liveness_monitor_expiry_interval_ms", "value": "600000" }, { "desc": "The class to use as the resource scheduler. FairScheduler is only supported in CDH 4.2.1 and later.", "display_name": "Scheduler Class", "name": "yarn_resourcemanager_scheduler_class", "value": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler" }, { "desc": "The minimum log level for ResourceManager logs", "display_name": "ResourceManager Logging Threshold", "name": "log_threshold", "value": "INFO" }, { "desc": "The amount of time allowed after this role is started that failures of health checks that rely on communication with this role will be tolerated.", "display_name": "Health Check Startup Tolerance", "name": "resourcemanager_startup_tolerance_minutes", "value": "5" }, { "desc": "The address of the admin interface in the ResourceManager.", "display_name": "Administration Address", "name": "yarn_resourcemanager_admin_address", "value": "8033" }, { "desc": "The periodic interval that the ResourceManager will check whether NodeManagers are still alive.", "display_name": "NodeManager Monitor Interval", "name": "yarn_resourcemanager_nm_liveness_monitor_interval_ms", "value": "1000" }, { "desc": "Enter an XML string that represents the Capacity Scheduler configuration.", "display_name": "Capacity Scheduler Configuration", "name": "resourcemanager_capacity_scheduler_configuration", "value": "\n\n \n yarn.scheduler.capacity.root.queues\n default\n \n \n yarn.scheduler.capacity.root.capacity\n 100\n \n \n yarn.scheduler.capacity.root.default.capacity\n 100\n \n\n" } ]