[ { "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.", "display_name": "Log Directory Free Space Monitoring Percentage Thresholds", "name": "log_directory_free_space_percentage_thresholds", "value": "{\"critical\":\"never\",\"warning\":\"never\"}" }, { "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.", "display_name": "File Descriptor Monitoring Thresholds", "name": "httpfs_fd_thresholds", "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}" }, { "desc": "Whether or not periodic stacks collection is enabled.", "display_name": "Stacks Collection Enabled", "name": "stacks_collection_enabled", "value": "false" }, { "desc": "The user that the HttpFS server process should run as.", "display_name": "System User", "name": "httpfs_process_username", "value": "httpfs" }, { "desc": "The group that the HttpFS server process should run as.", "display_name": "System Group", "name": "httpfs_process_groupname", "value": "httpfs" }, { "desc": "When computing the overall HttpFS health, consider the host's health.", "display_name": "HttpFS Host Health Test", "name": "httpfs_host_health_enabled", "value": "true" }, { "desc": "The method that will be used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected that HTTP endpoint is periodically scraped.", "display_name": "Stacks Collection Method", "name": "stacks_collection_method", "value": "jstack" }, { "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.", "display_name": "Automatically Restart Process", "name": "process_auto_restart", "value": "false" }, { "desc": "The maximum number of rolled log files to keep for HttpFS logs. Typically used by log4j.", "display_name": "HttpFS Maximum Log File Backups", "name": "max_log_backup_index", "value": "10" }, { "desc": "
The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.
Each trigger has all of the following fields:
triggerName
(mandatory) - the name of the trigger. This value must be unique for the specific role. triggerExpression
(mandatory) - a tsquery expression representing the trigger. streamThreshold
(optional) - the maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. enabled
(optional) - by default set to 'true'. If set to 'false' the trigger will not be evaluated.For example, here is a JSON formatted trigger configured for a DataNode that fires if the DataNode has more than 1500 file-descriptors opened:
[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]
Consult the trigger rules documentation for more details on how to write triggers using tsquery.
The JSON format is evolving and may change in the future and as a result backward compatibility is not guaranteed between releases at this time.
", "display_name": "Role Triggers", "name": "role_triggers", "value": "[]" }, { "desc": "Enables the health test that the HttpFS's process state is consistent with the role configuration", "display_name": "HttpFS Process Health Test", "name": "httpfs_scm_health_enabled", "value": "true" }, { "desc": "Password of the keystore used by HttpFS role for SSL.", "display_name": "HttpFS Keystore Password", "name": "httpfs_https_keystore_password", "value": null }, { "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", "display_name": "Cgroup Memory Soft Limit", "name": "rm_memory_soft_limit", "value": "-1" }, { "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.", "display_name": "Cgroup Memory Hard Limit", "name": "rm_memory_hard_limit", "value": "-1" }, { "desc": "The period to review when computing unexpected exits.", "display_name": "Unexpected Exits Monitoring Period", "name": "unexpected_exits_window", "value": "5" }, { "desc": "The amount of stacks data that will be retained. After the retention limit is reached, the oldest data will be deleted.", "display_name": "Stacks Collection Data Retention", "name": "stacks_collection_data_retention", "value": "104857600" }, { "desc": "Directory where HttpFS will place its log files.", "display_name": "HttpFS Log Directory", "name": "httpfs_log_dir", "value": "/var/log/hadoop-httpfs" }, { "desc": "For advanced use only, a string to be inserted into log4j.properties for this role only.", "display_name": "HttpFS Logging Advanced Configuration Snippet (Safety Valve)", "name": "log4j_safety_valve", "value": null }, { "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.", "display_name": "Log Directory Free Space Monitoring Absolute Thresholds", "name": "log_directory_free_space_absolute_thresholds", "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}" }, { "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.", "display_name": "Maximum Process File Descriptors", "name": "rlimit_fds", "value": null }, { "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold", "display_name": "Enable Health Alerts for this Role", "name": "enable_alerts", "value": "true" }, { "desc": "For advanced use only, a string to be inserted into httpfs-site.xml for this role only.", "display_name": "HttpFS Advanced Configuration Snippet (Safety Valve) for httpfs-site.xml", "name": "httpfs_config_safety_valve", "value": null }, { "desc": "The maximum size, in megabytes, per log file for HttpFS logs. Typically used by log4j.", "display_name": "HttpFS Max Log Size", "name": "max_log_size", "value": "200" }, { "desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.", "display_name": "Kill When Out of Memory", "name": "oom_sigkill_enabled", "value": "true" }, { "desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.", "display_name": "Java Configuration Options for HttpFS", "name": "httpfs_java_opts", "value": "" }, { "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.", "display_name": "Dump Heap When Out of Memory", "name": "oom_heap_dump_enabled", "value": "false" }, { "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.", "display_name": "Unexpected Exits Thresholds", "name": "unexpected_exits_thresholds", "value": "{\"critical\":\"any\",\"warning\":\"never\"}" }, { "desc": "The secret to use for signing client authentication tokens.", "display_name": "Signature Secret", "name": "hdfs_httpfs_signature_secret", "value": "hadoop httpfs secret" }, { "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.", "display_name": "Enable Configuration Change Alerts", "name": "enable_config_alerts", "value": "false" }, { "desc": "Location of the keystore file used by HttpFS role for SSL.", "display_name": "HttpFS Keystore File", "name": "httpfs_https_keystore_file", "value": "/var/run/hadoop-httpfs/.keystore" }, { "desc": "The directory in which stacks logs will be placed. If not set, stacks will be logged into a stacks subdirectory of the role's log directory.", "display_name": "Stacks Collection Directory", "name": "stacks_collection_directory", "value": null }, { "desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.", "display_name": "Java Heap Size of HttpFS in Bytes", "name": "httpfs_java_heapsize", "value": "268435456" }, { "desc": "The port for the administration interface.", "display_name": "Administration Port", "name": "hdfs_httpfs_admin_port", "value": "14001" }, { "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.", "display_name": "Cgroup I/O Weight", "name": "rm_io_weight", "value": "500" }, { "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.", "display_name": "Cgroup CPU Shares", "name": "rm_cpu_shares", "value": "1024" }, { "desc": "The HTTP port where the REST interface to HDFS is available.", "display_name": "HTTP Port", "name": "hdfs_httpfs_http_port", "value": "14000" }, { "desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it doesn't exist. However, if this directory already exists, role user must have write access to this directory. If this directory is shared amongst multiple roles, it should have 1777 permissions. Note that the heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.", "display_name": "Heap Dump Directory", "name": "oom_heap_dump_dir", "value": "/tmp" }, { "desc": "The frequency with which stacks will be collected.", "display_name": "Stacks Collection Frequency", "name": "stacks_collection_frequency", "value": "5.0" }, { "desc": "The minimum log level for HttpFS logs", "display_name": "HttpFS Logging Threshold", "name": "log_threshold", "value": "INFO" }, { "desc": "Use SSL for HttpFS.", "display_name": "Use SSL", "name": "httpfs_use_ssl", "value": "false" } ]