Merge "Add Kafka to CDH 5.5 and CDH 5.7"

This commit is contained in:
Jenkins 2016-08-19 16:44:25 +00:00 committed by Gerrit Code Review
commit cdab2c2bfb
19 changed files with 2746 additions and 10 deletions

View File

@ -0,0 +1,3 @@
---
features:
- Kafka was added in CDH 5.5 and CDH 5.7

View File

@ -39,6 +39,7 @@ SQOOP_SERVICE_TYPE = 'SQOOP'
KS_INDEXER_SERVICE_TYPE = 'KS_INDEXER'
IMPALA_SERVICE_TYPE = 'IMPALA'
KMS_SERVICE_TYPE = 'KMS'
KAFKA_SERVICE_TYPE = 'KAFKA'
c_helper = config_helper.ConfigHelperV550()
@ -53,6 +54,7 @@ class ClouderaUtilsV550(cu.ClouderaUtils):
KMS_SERVICE_NAME = 'kms01'
CM_API_VERSION = 8
NAME_SERVICE = 'nameservice01'
KAFKA_SERVICE_NAME = 'kafka01'
def __init__(self):
cu.ClouderaUtils.__init__(self)
@ -86,6 +88,8 @@ class ClouderaUtilsV550(cu.ClouderaUtils):
return cm_cluster.get_service(self.HDFS_SERVICE_NAME)
elif role in ['YARN_STANDBYRM']:
return cm_cluster.get_service(self.YARN_SERVICE_NAME)
elif role in ['KAFKA_BROKER']:
return cm_cluster.get_service(self.KAFKA_SERVICE_NAME)
else:
return super(ClouderaUtilsV550, self).get_service_by_role(
role, cluster, instance)
@ -141,6 +145,9 @@ class ClouderaUtilsV550(cu.ClouderaUtils):
if self.pu.get_kms(cluster):
cm_cluster.create_service(self.KMS_SERVICE_NAME,
KMS_SERVICE_TYPE)
if len(self.pu.get_kafka_brokers(cluster)) > 0:
cm_cluster.create_service(self.KAFKA_SERVICE_NAME,
KAFKA_SERVICE_TYPE)
def await_agents(self, cluster, instances):
self._await_agents(cluster, instances, c_helper.AWAIT_AGENTS_TIMEOUT)
@ -222,6 +229,11 @@ class ClouderaUtilsV550(cu.ClouderaUtils):
kms.update_config(self._get_configs(KMS_SERVICE_TYPE,
cluster=cluster))
if len(self.pu.get_kafka_brokers(cluster)) > 0:
kafka = cm_cluster.get_service(self.KAFKA_SERVICE_NAME)
kafka.update_config(self._get_configs(KAFKA_SERVICE_TYPE,
cluster=cluster))
def _get_configs(self, service, cluster=None, instance=None):
def get_hadoop_dirs(mount_points, suffix):
return ','.join([x + suffix for x in mount_points])
@ -364,10 +376,15 @@ class ClouderaUtilsV550(cu.ClouderaUtils):
self.pu.db_helper.get_sentry_db_password(cluster)
}
}
kafka_confs = {
'KAFKA': {
'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME
}
}
all_confs = s_cfg.merge_configs(all_confs, hue_confs)
all_confs = s_cfg.merge_configs(all_confs, hive_confs)
all_confs = s_cfg.merge_configs(all_confs, sentry_confs)
all_confs = s_cfg.merge_configs(all_confs, kafka_confs)
all_confs = s_cfg.merge_configs(all_confs, cluster.cluster_configs)
if instance:

View File

@ -236,6 +236,13 @@ class ConfigHelperV550(c_h.ConfigHelper):
self.kms_kms_confs = self._load_and_init_configs(
'kms-kms.json', 'KMS', 'node')
self.kafka_service = self._load_and_init_configs(
'kafka-service.json', 'KAFKA', 'cluster')
self.kafka_kafka_broker = self._load_and_init_configs(
'kafka-kafka_broker.json', 'KAFKA', 'node')
self.kafka_kafka_mirror_maker = self._load_and_init_configs(
'kafka-kafka_mirror_maker.json', 'KAFKA', 'node')
def get_required_anti_affinity(self, cluster):
return self._get_config_value(cluster, self.REQUIRE_ANTI_AFFINITY)

View File

@ -47,6 +47,8 @@ PACKAGES = [
'impala-state-store',
'impala-catalog',
'impala-shell',
'kafka',
'kafka-server'
'keytrustee-keyprovider',
'oozie',
'oracle-j2sdk1.7',

View File

@ -42,6 +42,7 @@ class PluginUtilsV550(pu.AbstractPluginUtils):
'IMPALAD': 'ID',
'JOBHISTORY': 'JS',
'JOURNALNODE': 'JN',
'KAFKA_BROKER': 'KB',
'KMS': 'KMS',
'MASTER': 'M',
'NAMENODE': 'NN',
@ -97,6 +98,9 @@ class PluginUtilsV550(pu.AbstractPluginUtils):
def get_stdb_rm(self, cluster):
return u.get_instance(cluster, 'YARN_STANDBYRM')
def get_kafka_brokers(self, cluster):
return u.get_instances(cluster, 'KAFKA_BROKER')
def convert_process_configs(self, configs):
p_dict = {
"CLOUDERA": ['MANAGER'],
@ -124,9 +128,10 @@ class PluginUtilsV550(pu.AbstractPluginUtils):
"SOLR": ['SOLR_SERVER'],
"SQOOP": ['SQOOP_SERVER'],
"KMS": ['KMS'],
'YARN_GATEWAY': ['YARN_GATEWAY'],
'HDFS_GATEWAY': ['HDFS_GATEWAY'],
"JOURNALNODE": ['JOURNALNODE']
"YARN_GATEWAY": ['YARN_GATEWAY'],
"HDFS_GATEWAY": ['HDFS_GATEWAY'],
"JOURNALNODE": ['JOURNALNODE'],
"KAFKA": ['KAFKA_BROKER']
}
if isinstance(configs, res.Resource):
configs = configs.to_dict()

View File

@ -0,0 +1,476 @@
[
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "The port to give out to producers, consumers, and other brokers to use in establishing connections. This only needs to be set if this port is different from the port the server should bind to.",
"display_name": "Advertised Port",
"name": "advertised.port",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Additional Broker Java Options parameter.",
"display_name": "Suppress Parameter Validation: Additional Broker Java Options",
"name": "role_config_suppression_broker_java_opts",
"value": "false"
},
{
"desc": "If set, this is the hostname given out to producers, consumers, and other brokers to use in establishing connections. Never set this property at the group level; it should always be overriden on instance level.",
"display_name": "Advertised Host",
"name": "advertised.host.name",
"value": null
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HTTP Metric Report Host parameter.",
"display_name": "Suppress Parameter Validation: HTTP Metric Report Host",
"name": "role_config_suppression_kafka.http.metrics.host",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_kafka_kafka_broker_scm_health",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Server JKS Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Server JKS Keystore File Password",
"name": "role_config_suppression_ssl_server_keystore_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Certificate Trust Store Password",
"name": "role_config_suppression_ssl_client_truststore_password",
"value": "false"
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that Kafka Broker might connect to. This is used when Kafka Broker is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "Kafka Broker TLS/SSL Certificate Trust Store File",
"name": "ssl_client_truststore_location",
"value": ""
},
{
"desc": "Port the HTTP metric reporter listens on.",
"display_name": "HTTP Metric Report Port",
"name": "kafka.http.metrics.port",
"value": "24042"
},
{
"desc": "Kafka broker port.",
"display_name": "TCP Port",
"name": "port",
"value": "9092"
},
{
"desc": "The log for a topic partition is stored as a directory of segment files. This setting controls the size to which a segment file can grow before a new segment is rolled over in the log. This value should be larger than message.max.bytes.",
"display_name": "Segment File Size",
"name": "log.segment.bytes",
"value": "1073741824"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>kafka-monitoring.properties</strong> for this role only.",
"display_name": "Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka-monitoring.properties",
"name": "kafka-monitoring.properties_role_safety_valve",
"value": null
},
{
"desc": "The frequency, in milliseconds, that the log cleaner checks whether any log segment is eligible for deletion, per retention policies.",
"display_name": "Data Retention Check Interval",
"name": "log.retention.check.interval.ms",
"value": "300000"
},
{
"desc": "The maximum number of rolled log files to keep for Kafka Broker logs. Typically used by log4j or logback.",
"display_name": "Kafka Broker Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Advertised Host parameter.",
"display_name": "Suppress Parameter Validation: Advertised Host",
"name": "role_config_suppression_advertised.host.name",
"value": "false"
},
{
"desc": "The minimum log level for Kafka Broker logs",
"display_name": "Kafka Broker Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "When computing the overall Kafka Broker health, consider the host's health.",
"display_name": "Kafka Broker Host Health Test",
"name": "kafka_broker_host_health_enabled",
"value": "true"
},
{
"desc": "Maximum size for the Java process heap memory. Passed to Java -Xmx. Measured in megabytes. Kafka does not generally require setting large heap sizes. It is better to let the file system cache utilize the available memory.",
"display_name": "Java Heap Size of Broker",
"name": "broker_max_heap_size",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_kafka_kafka_broker_swap_memory_usage",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Log Directory",
"name": "role_config_suppression_log_dir",
"value": "false"
},
{
"desc": "The number of I/O threads that the server uses for executing requests. You should have at least as many threads as you have disks.",
"display_name": "Number of I/O Threads",
"name": "num.io.threads",
"value": "8"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Server JKS Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Server JKS Keystore File Location",
"name": "role_config_suppression_ssl_server_keystore_location",
"value": "false"
},
{
"desc": "Protocol to be used for inter-broker communication.",
"display_name": "Inter Broker Protocol",
"name": "security.inter.broker.protocol",
"value": "PLAINTEXT"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Advanced Configuration Snippet (Safety Valve) for ssl.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Advanced Configuration Snippet (Safety Valve) for ssl.properties",
"name": "role_config_suppression_ssl.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>kafka.properties</strong> for this role only.",
"display_name": "Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka.properties",
"name": "kafka.properties_role_safety_valve",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Server JKS Keystore Key Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Server JKS Keystore Key Password",
"name": "role_config_suppression_ssl_server_keystore_keypassword",
"value": "false"
},
{
"desc": "The maximum time before a new log segment is rolled out (in hours). Secondary to the log.retention.ms property. The special value of -1 is interpreted as unlimited. This property is deprecated in Kafka 1.4.0. Use log.retention.ms.",
"display_name": "Data Retention Hours",
"name": "log.retention.hours",
"value": "168"
},
{
"desc": "Kafka broker secure port.",
"display_name": "TLS/SSL Port",
"name": "ssl_port",
"value": "9093"
},
{
"desc": "The amount of data to retain in the log for each topic-partition. This is the limit per partition: multiply by the number of partitions to get the total data retained for the topic. The special value of -1 is interpreted as unlimited. If both log.retention.ms and log.retention.bytes are set, a segment is deleted when either limit is exceeded.",
"display_name": "Data Retention Size",
"name": "log.retention.bytes",
"value": "-1"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Host the HTTP metric reporter binds to.",
"display_name": "HTTP Metric Report Host",
"name": "kafka.http.metrics.host",
"value": "0.0.0.0"
},
{
"desc": "The maximum size, in megabytes, per log file for Kafka Broker logs. Typically used by log4j or logback.",
"display_name": "Kafka Broker Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka-monitoring.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka-monitoring.properties",
"name": "role_config_suppression_kafka-monitoring.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "kafka_broker_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Kafka Broker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KAFKA_BROKER_role_env_safety_valve",
"value": null
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Port for JMX.",
"display_name": "JMX Port",
"name": "jmx_port",
"value": "9393"
},
{
"desc": "The log directory for log files of the role Kafka Broker.",
"display_name": "Kafka Broker Log Directory",
"name": "log_dir",
"value": "/var/log/kafka"
},
{
"desc": "The maximum time before a new log segment is rolled out. This property is used in Cloudera Kafka 1.4.0 and later in place of log.roll.hours.",
"display_name": "Data Log Roll Time",
"name": "log.roll.ms",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka.properties",
"name": "role_config_suppression_kafka.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The password that protects the private key contained in the JKS keystore used when Kafka Broker is acting as a TLS/SSL server.",
"display_name": "Kafka Broker TLS/SSL Server JKS Keystore Key Password",
"name": "ssl_server_keystore_keypassword",
"value": ""
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Kafka Broker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_kafka_kafka_broker_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Data Directories parameter.",
"display_name": "Suppress Parameter Validation: Data Directories",
"name": "role_config_suppression_log.dirs",
"value": "false"
},
{
"desc": "The maximum time before a new log segment is rolled out. If both log.retention.ms and log.retention.bytes are set, a segment is deleted when either limit is exceeded. The special value of -1 is interpreted as unlimited. This property is used in Kafka 1.4.0 and later in place of log.retention.hours.",
"display_name": "Data Retention Time",
"name": "log.retention.ms",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Certificate Trust Store File",
"name": "role_config_suppression_ssl_client_truststore_location",
"value": "false"
},
{
"desc": "The maximum time before a new log segment is rolled out (in hours). This property is deprecated in Cloudera Kafka 1.4.0; use log.roll.ms.",
"display_name": "Data Log Roll Hours",
"name": "log.roll.hours",
"value": "168"
},
{
"desc": "These arguments are passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags are passed here.",
"display_name": "Additional Broker Java Options",
"name": "broker_java_opts",
"value": "-server -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+DisableExplicitGC -Djava.awt.headless=true"
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when Kafka Broker is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "Kafka Broker TLS/SSL Server JKS Keystore File Location",
"name": "ssl_server_keystore_location",
"value": ""
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_kafka_kafka_broker_host_health",
"value": "false"
},
{
"desc": "A list of one or more directories in which Kafka data is stored. Each new partition created is placed in the directory that currently has the fewest partitions. Each directory should be on its own separate drive.",
"display_name": "Data Directories",
"name": "log.dirs",
"value": "/var/local/kafka/data"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Segment File Size parameter.",
"display_name": "Suppress Parameter Validation: Segment File Size",
"name": "role_config_suppression_log.segment.bytes",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Broker ID parameter.",
"display_name": "Suppress Parameter Validation: Broker ID",
"name": "role_config_suppression_broker.id",
"value": "false"
},
{
"desc": "The password for the Kafka Broker JKS keystore file.",
"display_name": "Kafka Broker TLS/SSL Server JKS Keystore File Password",
"name": "ssl_server_keystore_password",
"value": ""
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_kafka_kafka_broker_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_kafka_broker_role_env_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>ssl.properties</strong> for this role only.",
"display_name": "Kafka Broker Advanced Configuration Snippet (Safety Valve) for ssl.properties",
"name": "ssl.properties_role_safety_valve",
"value": null
},
{
"desc": "Encrypt communication between clients and Kafka Broker using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for Kafka Broker",
"name": "ssl_enabled",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "ID uniquely identifying each broker. Never set this property at the group level; it should always be overridden on instance level.",
"display_name": "Broker ID",
"name": "broker.id",
"value": null
},
{
"desc": "Maximum number of connections allowed from each IP address.",
"display_name": "Maximum Connections per IP Address",
"name": "max.connections.per.ip",
"value": null
},
{
"desc": "Enables the health test that the Kafka Broker's process state is consistent with the role configuration",
"display_name": "Kafka Broker Process Health Test",
"name": "kafka_broker_scm_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Number of I/O Threads parameter.",
"display_name": "Suppress Parameter Validation: Number of I/O Threads",
"name": "role_config_suppression_num.io.threads",
"value": "false"
},
{
"desc": "The password for the Kafka Broker TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "Kafka Broker TLS/SSL Certificate Trust Store Password",
"name": "ssl_client_truststore_password",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Heap Size of Broker parameter.",
"display_name": "Suppress Parameter Validation: Java Heap Size of Broker",
"name": "role_config_suppression_broker_max_heap_size",
"value": "false"
},
{
"desc": "Client authentication mode for SSL connections. Default is none, could be set to \"required\", i.e., client authentication is required or to \"requested\", i.e., client authentication is requested and client without certificates can still connect.",
"display_name": "SSL Client Authentication",
"name": "ssl.client.auth",
"value": "none"
},
{
"desc": "Authenticate a SASL connection with zookeeper, if Kerberos authentication is enabled. It also allows a broker to set SASL ACL on zookeeper nodes which locks these nodes down so that only kafka broker can modify.",
"display_name": "Authenticate Zookeeper Connection",
"name": "authenticate.zookeeper.connection",
"value": "true"
}
]

View File

@ -0,0 +1,482 @@
[
{
"desc": "The maximum size, in megabytes, per log file for Kafka MirrorMaker logs. Typically used by log4j or logback.",
"display_name": "Kafka MirrorMaker Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "Stop the entire mirror maker when a send failure occurs.",
"display_name": "Abort on Send Failure",
"name": "abort.on.send.failure",
"value": "true"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Server JKS Keystore Key Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Server JKS Keystore Key Password",
"name": "role_config_suppression_ssl_server_keystore_keypassword",
"value": "false"
},
{
"desc": "Maximum number of bytes that can be buffered between producer and consumer. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Queue Size",
"name": "queue.byte.size",
"value": "100000000"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Server JKS Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Server JKS Keystore File Password",
"name": "role_config_suppression_ssl_server_keystore_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Certificate Trust Store Password",
"name": "role_config_suppression_ssl_client_truststore_password",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_kafka_kafka_mirror_maker_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_kafka_mirror_maker_role_env_safety_valve",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Only required if source Kafka cluster requires client authentication.",
"display_name": "Source Kafka Cluster's Client Auth",
"name": "source.ssl.client.auth",
"value": "false"
},
{
"desc": "When computing the overall Kafka MirrorMaker health, consider the host's health.",
"display_name": "Kafka MirrorMaker Host Health Test",
"name": "kafka_mirror_maker_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_client.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_client.properties",
"name": "role_config_suppression_ssl_client.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for Kafka MirrorMaker logs. Typically used by log4j or logback.",
"display_name": "Kafka MirrorMaker Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Topic Blacklist parameter.",
"display_name": "Suppress Parameter Validation: Topic Blacklist",
"name": "role_config_suppression_blacklist",
"value": "false"
},
{
"desc": "The minimum log level for Kafka MirrorMaker logs",
"display_name": "Kafka MirrorMaker Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Offset commit interval in milliseconds.",
"display_name": "Offset Commit Interval",
"name": "offset.commit.interval.ms",
"value": "60000"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>mirror_maker_producers.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_producers.properties",
"name": "mirror_maker_producers.properties_role_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_kafka_kafka_mirror_maker_host_health",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "kafka_mirror_maker_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_server.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_server.properties",
"name": "role_config_suppression_ssl_server.properties_role_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "The password for the Kafka MirrorMaker JKS keystore file.",
"display_name": "Kafka MirrorMaker TLS/SSL Server JKS Keystore File Password",
"name": "ssl_server_keystore_password",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Log Directory",
"name": "role_config_suppression_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Destination Broker List parameter.",
"display_name": "Suppress Parameter Validation: Destination Broker List",
"name": "role_config_suppression_bootstrap.servers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Server JKS Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Server JKS Keystore File Location",
"name": "role_config_suppression_ssl_server_keystore_location",
"value": "false"
},
{
"desc": "Number of producer instances. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Number of Producers",
"name": "num.producers",
"value": "1"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>ssl_client.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_client.properties",
"name": "ssl_client.properties_role_safety_valve",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Kafka MirrorMaker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KAFKA_MIRROR_MAKER_role_env_safety_valve",
"value": null
},
{
"desc": "Name of the consumer group used by MirrorMaker. When multiple role instances are configured with the same topics and same group ID, the role instances load-balance replication for the topics. When multiple role instances are configured with the same topics but different group ID, each role instance replicates all the events for those topics - this can be used to replicate the source cluster into multiple destination clusters.",
"display_name": "Consumer Group ID",
"name": "group.id",
"value": "cloudera_mirrormaker"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_kafka_kafka_mirror_maker_swap_memory_usage",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Consumer Group ID parameter.",
"display_name": "Suppress Parameter Validation: Consumer Group ID",
"name": "role_config_suppression_group.id",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MirrorMaker Consumer Rebalance Listener Arguments parameter.",
"display_name": "Suppress Parameter Validation: MirrorMaker Consumer Rebalance Listener Arguments",
"name": "role_config_suppression_consumer.rebalance.listener.args",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that Kafka MirrorMaker might connect to. This is used when Kafka MirrorMaker is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "Kafka MirrorMaker TLS/SSL Certificate Trust Store File",
"name": "ssl_client_truststore_location",
"value": ""
},
{
"desc": "Enables the health test that the Kafka MirrorMaker's process state is consistent with the role configuration",
"display_name": "Kafka MirrorMaker Process Health Test",
"name": "kafka_mirror_maker_scm_health_enabled",
"value": "true"
},
{
"desc": "Number of messages that are buffered between producer and consumer. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Message Queue Size",
"name": "queue.size",
"value": "10000"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "A consumer rebalance listener of type ConsumerRebalanceListener to be invoked when MirrorMaker's consumer rebalances.",
"display_name": "MirrorMaker Consumer Rebalance Listener",
"name": "consumer.rebalance.listener",
"value": ""
},
{
"desc": "Run with MirrorMaker settings that eliminate potential loss of data. This impacts performance, but is highly recommended. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Avoid Data Loss",
"name": "no.data.loss",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_producers.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_producers.properties",
"name": "role_config_suppression_mirror_maker_producers.properties_role_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>mirror_maker_consumers.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_consumers.properties",
"name": "mirror_maker_consumers.properties_role_safety_valve",
"value": null
},
{
"desc": "Protocol to be used for communication with source kafka cluster.",
"display_name": "Source Kafka Cluster's Security Protocol",
"name": "source.security.protocol",
"value": "PLAINTEXT"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MirrorMaker Consumer Rebalance Listener parameter.",
"display_name": "Suppress Parameter Validation: MirrorMaker Consumer Rebalance Listener",
"name": "role_config_suppression_consumer.rebalance.listener",
"value": "false"
},
{
"desc": "Protocol to be used for communication with destination kafka cluster.",
"display_name": "Destination Kafka Cluster's Security Protocol",
"name": "destination.security.protocol",
"value": "PLAINTEXT"
},
{
"desc": "Regular expression that represents a set of topics to mirror. Note that whitelist and blacklist parameters are mutually exclusive. If both are defined, only the whilelist is used.",
"display_name": "Topic Whitelist",
"name": "whitelist",
"value": ""
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_kafka_kafka_mirror_maker_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Topic Whitelist parameter.",
"display_name": "Suppress Parameter Validation: Topic Whitelist",
"name": "role_config_suppression_whitelist",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Certificate Trust Store File",
"name": "role_config_suppression_ssl_client_truststore_location",
"value": "false"
},
{
"desc": "Port for JMX.",
"display_name": "JMX Port",
"name": "jmx_port",
"value": "9394"
},
{
"desc": "Arguments used by MirrorMaker message handler.",
"display_name": "MirrorMaker Message Handler Arguments",
"name": "message.handler.args",
"value": ""
},
{
"desc": "List of brokers on destination cluster. This should be more than one, for high availability, but there's no need to list all brokers.",
"display_name": "Destination Broker List",
"name": "bootstrap.servers",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Source Broker List parameter.",
"display_name": "Suppress Parameter Validation: Source Broker List",
"name": "role_config_suppression_source.bootstrap.servers",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_kafka_kafka_mirror_maker_file_descriptor",
"value": "false"
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when Kafka MirrorMaker is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "Kafka MirrorMaker TLS/SSL Server JKS Keystore File Location",
"name": "ssl_server_keystore_location",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MirrorMaker Message Handler parameter.",
"display_name": "Suppress Parameter Validation: MirrorMaker Message Handler",
"name": "role_config_suppression_message.handler",
"value": "false"
},
{
"desc": "Only required if destination Kafka cluster requires client authentication.",
"display_name": "Destination Kafka Cluster's Client Auth",
"name": "destination.ssl.client.auth",
"value": "false"
},
{
"desc": "A MirrorMaker message handler of type MirrorMakerMessageHandler that will process every record in-between producer and consumer.",
"display_name": "MirrorMaker Message Handler",
"name": "message.handler",
"value": ""
},
{
"desc": "List of brokers on source cluster. This should be more than one, for high availability, but there's no need to list all brokers.",
"display_name": "Source Broker List",
"name": "source.bootstrap.servers",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_consumers.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_consumers.properties",
"name": "role_config_suppression_mirror_maker_consumers.properties_role_safety_valve",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>ssl_server.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_server.properties",
"name": "ssl_server.properties_role_safety_valve",
"value": null
},
{
"desc": "Number of consumer threads.",
"display_name": "Number of Consumer Threads",
"name": "num.streams",
"value": "1"
},
{
"desc": "The password that protects the private key contained in the JKS keystore used when Kafka MirrorMaker is acting as a TLS/SSL server.",
"display_name": "Kafka MirrorMaker TLS/SSL Server JKS Keystore Key Password",
"name": "ssl_server_keystore_keypassword",
"value": ""
},
{
"desc": "Encrypt communication between clients and Kafka MirrorMaker using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for Kafka MirrorMaker",
"name": "ssl_enabled",
"value": "false"
},
{
"desc": "Regular expression that represents a set of topics to avoid mirroring. Note that whitelist and blacklist parameters are mutually exclusive. If both are defined, only the whilelist is used. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Topic Blacklist",
"name": "blacklist",
"value": ""
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Arguments used by MirrorMaker consumer rebalance listener.",
"display_name": "MirrorMaker Consumer Rebalance Listener Arguments",
"name": "consumer.rebalance.listener.args",
"value": ""
},
{
"desc": "The log directory for log files of the role Kafka MirrorMaker.",
"display_name": "Kafka MirrorMaker Log Directory",
"name": "log_dir",
"value": "/var/log/kafka"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MirrorMaker Message Handler Arguments parameter.",
"display_name": "Suppress Parameter Validation: MirrorMaker Message Handler Arguments",
"name": "role_config_suppression_message.handler.args",
"value": "false"
},
{
"desc": "The password for the Kafka MirrorMaker TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "Kafka MirrorMaker TLS/SSL Certificate Trust Store Password",
"name": "ssl_client_truststore_password",
"value": ""
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
}
]

View File

@ -0,0 +1,374 @@
[
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Controlled Shutdown Maximum Attempts parameter.",
"display_name": "Suppress Parameter Validation: Controlled Shutdown Maximum Attempts",
"name": "service_config_suppression_controlled.shutdown.max.retries",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_kafka_service_env_safety_valve",
"value": "false"
},
{
"desc": "The default number of partitions for automatically created topics.",
"display_name": "Default Number of Partitions",
"name": "num.partitions",
"value": "1"
},
{
"desc": "The amount of time to retain delete messages for log compacted topics. Once a consumer has seen an original message you need to ensure it also sees the delete message. If you removed the delete message too quickly, this might not happen. As a result there is a configurable delete retention time.",
"display_name": "Log Compaction Delete Record Retention Time",
"name": "log.cleaner.delete.retention.ms",
"value": "604800000"
},
{
"desc": "Enables auto creation of topics on the server. If this is set to true, then attempts to produce, consume, or fetch metadata for a non-existent topic automatically create the topic with the default replication factor and number of partitions.",
"display_name": "Topic Auto Creation",
"name": "auto.create.topics.enable",
"value": "true"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Number of threads used to replicate messages from leaders. Increasing this value increases the degree of I/O parallelism in the follower broker.",
"display_name": "Number of Replica Fetchers",
"name": "num.replica.fetchers",
"value": "1"
},
{
"desc": "Enables Kafka monitoring.",
"display_name": "Enable Kafka Monitoring (Note: Requires Kafka-1.3.0 parcel or higher)",
"name": "monitoring.enabled",
"value": "true"
},
{
"desc": "If automatic leader rebalancing is enabled, the controller tries to balance leadership for partitions among the brokers by periodically returning leadership for each partition to the preferred replica, if it is available.",
"display_name": "Enable Automatic Leader Rebalancing",
"name": "auto.leader.rebalance.enable",
"value": "true"
},
{
"desc": "Number of unsuccessful controlled shutdown attempts before executing an unclean shutdown. For example, the default value of 3 means that the system will attempt a controlled shutdown 3 times before executing an unclean shutdown.",
"display_name": "Controlled Shutdown Maximum Attempts",
"name": "controlled.shutdown.max.retries",
"value": "3"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "The number of partitions for the offset commit topic. Since changing this after deployment is currently unsupported, we recommend using a higher setting for production (for example, 100-200).",
"display_name": "Offset Commit Topic Number of Partitions",
"name": "offsets.topic.num.partitions",
"value": "50"
},
{
"desc": "If a follower has not sent any fetch requests, nor has it consumed up to the leader's log end offset during this time, the leader removes the follower from the ISR set.",
"display_name": "Allowed Replica Time Lag",
"name": "replica.lag.time.max.ms",
"value": "10000"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "kafka"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Replica Maximum Fetch Size parameter.",
"display_name": "Suppress Parameter Validation: Replica Maximum Fetch Size",
"name": "service_config_suppression_replica.fetch.max.bytes",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "The replication factor for the offset commit topic. A higher setting is recommended in order to ensure higher availability (for example, 3 or 4) . If the offsets topic is created when there are fewer brokers than the replication factor, then the offsets topic is created with fewer replicas.",
"display_name": "Offset Commit Topic Replication Factor",
"name": "offsets.topic.replication.factor",
"value": "3"
},
{
"desc": "Whether to suppress configuration warnings produced by the Kafka Broker Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Kafka Broker Count Validator",
"name": "service_config_suppression_kafka_broker_count_validator",
"value": "false"
},
{
"desc": "Controls how frequently the log cleaner will attempt to clean the log. This ratio bounds the maximum space wasted in the log by duplicates. For example, at 0.5 at most 50% of the log could be duplicates. A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log.",
"display_name": "Log Cleaner Clean Ratio",
"name": "log.cleaner.min.cleanable.ratio",
"value": "0.5"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "kafka"
},
{
"desc": "Enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so might result in data loss.",
"display_name": "Enable Unclean Leader Election",
"name": "unclean.leader.election.enable",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Default Replication Factor parameter.",
"display_name": "Suppress Parameter Validation: Default Replication Factor",
"name": "service_config_suppression_default.replication.factor",
"value": "false"
},
{
"desc": "Enables controlled shutdown of the broker. If enabled, the broker moves all leaders on it to other brokers before shutting itself down. This reduces the unavailability window during shutdown.",
"display_name": "Enable Controlled Shutdown",
"name": "controlled.shutdown.enable",
"value": "true"
},
{
"desc": "The frequency with which to check for leader imbalance.",
"display_name": "Leader Imbalance Check Interval",
"name": "leader.imbalance.check.interval.seconds",
"value": "300"
},
{
"desc": "The maximum number of bytes to fetch for each partition in fetch requests replicas send to the leader. This value should be larger than message.max.bytes.",
"display_name": "Replica Maximum Fetch Size",
"name": "replica.fetch.max.bytes",
"value": "1048576"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Session Timeout parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Session Timeout",
"name": "service_config_suppression_zookeeper.session.timeout.ms",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Number of Replica Fetchers parameter.",
"display_name": "Suppress Parameter Validation: Number of Replica Fetchers",
"name": "service_config_suppression_num.replica.fetchers",
"value": "false"
},
{
"desc": "Any producer distinguished by clientId will get throttled if it produces more bytes than this value per-second. Only respected by Kafka 2.0 or later.",
"display_name": "Default Producer Quota",
"name": "quota.producer.default",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Kafka Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KAFKA_service_env_safety_valve",
"value": null
},
{
"desc": "The maximum size of a message that the server can receive. It is important that this property be in sync with the maximum fetch size the consumers use, or else an unruly producer could publish messages too large for consumers to consume.",
"display_name": "Maximum Message Size",
"name": "message.max.bytes",
"value": "1000000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Leader Imbalance Allowed Per Broker parameter.",
"display_name": "Suppress Parameter Validation: Leader Imbalance Allowed Per Broker",
"name": "service_config_suppression_leader.imbalance.per.broker.percentage",
"value": "false"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Enables the log cleaner to compact topics with cleanup.policy=compact on this cluster.",
"display_name": "Enable Log Compaction",
"name": "log.cleaner.enable",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Minimum Number of Replicas in ISR parameter.",
"display_name": "Suppress Parameter Validation: Minimum Number of Replicas in ISR",
"name": "service_config_suppression_min.insync.replicas",
"value": "false"
},
{
"desc": "ZNode in ZooKeeper that should be used as a root for this Kafka cluster.",
"display_name": "ZooKeeper Root",
"name": "zookeeper.chroot",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the Kafka MirrorMaker Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Kafka MirrorMaker Count Validator",
"name": "service_config_suppression_kafka_mirror_maker_count_validator",
"value": "false"
},
{
"desc": "The number of messages written to a log partition before triggering an fsync on the log. Setting this lower syncs data to disk more often, but has a major impact on performance. We recommend use of replication for durability rather than depending on single-server fsync; however, this setting can be used to be extra certain. If used in conjunction with log.flush.interval.ms, the log is flushed when either criteria is met.",
"display_name": "Log Flush Message Interval",
"name": "log.flush.interval.messages",
"value": null
},
{
"desc": "The number of background threads to use for log cleaning.",
"display_name": "Number of Log Cleaner Threads",
"name": "log.cleaner.threads",
"value": "1"
},
{
"desc": "Enable Kerberos authentication for this KAFKA service.",
"display_name": "Enable Kerberos Authentication",
"name": "kerberos.auth.enable",
"value": "false"
},
{
"desc": "List of metric reporter class names. HTTP reporter is included by default.",
"display_name": "List of Metric Reporters",
"name": "kafka.metrics.reporters",
"value": "nl.techop.kafka.KafkaHttpMetricsReporter"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Root parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Root",
"name": "service_config_suppression_zookeeper.chroot",
"value": "false"
},
{
"desc": "The frequency, in ms, with which the log flusher checks whether any log is eligible to be flushed to disk.",
"display_name": "Log Flush Scheduler Interval",
"name": "log.flush.scheduler.interval.ms",
"value": null
},
{
"desc": "The minimum number of replicas in the in-sync replica needed to satisfy a produce request where required.acks=-1 (that is, all).",
"display_name": "Minimum Number of Replicas in ISR",
"name": "min.insync.replicas",
"value": "1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Offset Commit Topic Replication Factor parameter.",
"display_name": "Suppress Parameter Validation: Offset Commit Topic Replication Factor",
"name": "service_config_suppression_offsets.topic.replication.factor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Leader Imbalance Check Interval parameter.",
"display_name": "Suppress Parameter Validation: Leader Imbalance Check Interval",
"name": "service_config_suppression_leader.imbalance.check.interval.seconds",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "If the server fails to send a heartbeat to ZooKeeper within this period of time, it is considered dead. If set too low, ZooKeeper might falsely consider a server dead; if set too high, ZooKeeper might take too long to recognize a dead server.",
"display_name": "ZooKeeper Session Timeout",
"name": "zookeeper.session.timeout.ms",
"value": "6000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Maximum Message Size parameter.",
"display_name": "Suppress Parameter Validation: Maximum Message Size",
"name": "service_config_suppression_message.max.bytes",
"value": "false"
},
{
"desc": "Enables topic deletion using admin tools. When delete topic is disabled, deleting topics through the admin tools has no effect.",
"display_name": "Enable Delete Topic",
"name": "delete.topic.enable",
"value": "true"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "If a replica falls more than this number of messages behind the leader, the leader removes the follower from the ISR and treats it as dead. This property is deprecated in Kafka 1.4.0; higher versions use only replica.lag.time.max.ms.",
"display_name": "Allowed Replica Message Lag",
"name": "replica.lag.max.messages",
"value": "4000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Default Number of Partitions parameter.",
"display_name": "Suppress Parameter Validation: Default Number of Partitions",
"name": "service_config_suppression_num.partitions",
"value": "false"
},
{
"desc": "Name of the ZooKeeper service that this Kafka service instance depends on",
"display_name": "ZooKeeper Service",
"name": "zookeeper_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "The total memory used for log deduplication across all cleaner threads. This memory is statically allocated and will not cause GC problems.",
"display_name": "Log Cleaner Deduplication Buffer Size",
"name": "log.cleaner.dedupe.buffer.size",
"value": "134217728"
},
{
"desc": "Any consumer distinguished by clientId/consumer group will get throttled if it fetches more bytes than this value per-second. Only respected by Kafka 2.0 or later.",
"display_name": "Default Consumer Quota",
"name": "quota.consumer.default",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Offset Commit Topic Number of Partitions parameter.",
"display_name": "Suppress Parameter Validation: Offset Commit Topic Number of Partitions",
"name": "service_config_suppression_offsets.topic.num.partitions",
"value": "false"
},
{
"desc": "The default replication factor for automatically created topics.",
"display_name": "Default Replication Factor",
"name": "default.replication.factor",
"value": "1"
},
{
"desc": "The maximum time between fsync calls on the log. If used in conjuction with log.flush.interval.messages, the log is flushed when either criteria is met.",
"display_name": "Log Flush Time Interval",
"name": "log.flush.interval.ms",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the List of Metric Reporters parameter.",
"display_name": "Suppress Parameter Validation: List of Metric Reporters",
"name": "service_config_suppression_kafka.metrics.reporters",
"value": "false"
},
{
"desc": "The percentage of leader imbalance allowed per broker. The controller rebalances leadership if this ratio goes above the configured value per broker.",
"display_name": "Leader Imbalance Allowed Per Broker",
"name": "leader.imbalance.per.broker.percentage",
"value": "10"
}
]

View File

@ -60,6 +60,7 @@ class VersionHandler(avm.BaseVersionHandler):
"SQOOP": ['SQOOP_SERVER'],
"SENTRY": ['SENTRY_SERVER'],
"KMS": ['KMS'],
"KAFKA": ['KAFKA_BROKER'],
"YARN_GATEWAY": [],
"RESOURCEMANAGER": [],

View File

@ -39,6 +39,7 @@ SQOOP_SERVICE_TYPE = 'SQOOP'
KS_INDEXER_SERVICE_TYPE = 'KS_INDEXER'
IMPALA_SERVICE_TYPE = 'IMPALA'
KMS_SERVICE_TYPE = 'KMS'
KAFKA_SERVICE_TYPE = 'KAFKA'
c_helper = config_helper.ConfigHelperV570()
@ -51,6 +52,7 @@ class ClouderaUtilsV570(cu.ClouderaUtils):
IMPALA_SERVICE_NAME = 'impala01'
SENTRY_SERVICE_NAME = 'sentry01'
KMS_SERVICE_NAME = 'kms01'
KAFKA_SERVICE_NAME = 'kafka01'
CM_API_VERSION = 8
NAME_SERVICE = 'nameservice01'
@ -86,6 +88,8 @@ class ClouderaUtilsV570(cu.ClouderaUtils):
return cm_cluster.get_service(self.HDFS_SERVICE_NAME)
elif role in ['YARN_STANDBYRM']:
return cm_cluster.get_service(self.YARN_SERVICE_NAME)
elif role in ['KAFKA_BROKER']:
return cm_cluster.get_service(self.KAFKA_SERVICE_NAME)
else:
return super(ClouderaUtilsV570, self).get_service_by_role(
role, cluster, instance)
@ -141,6 +145,9 @@ class ClouderaUtilsV570(cu.ClouderaUtils):
if self.pu.get_kms(cluster):
cm_cluster.create_service(self.KMS_SERVICE_NAME,
KMS_SERVICE_TYPE)
if len(self.pu.get_kafka_brokers(cluster)) > 0:
cm_cluster.create_service(self.KAFKA_SERVICE_NAME,
KAFKA_SERVICE_TYPE)
def await_agents(self, cluster, instances):
self._await_agents(cluster, instances, c_helper.AWAIT_AGENTS_TIMEOUT)
@ -221,6 +228,10 @@ class ClouderaUtilsV570(cu.ClouderaUtils):
kms = cm_cluster.get_service(self.KMS_SERVICE_NAME)
kms.update_config(self._get_configs(KMS_SERVICE_TYPE,
cluster=cluster))
if len(self.pu.get_kafka_brokers(cluster)) > 0:
kafka = cm_cluster.get_service(self.KAFKA_SERVICE_NAME)
kafka.update_config(self._get_configs(KAFKA_SERVICE_TYPE,
cluster=cluster))
def _get_configs(self, service, cluster=None, instance=None):
def get_hadoop_dirs(mount_points, suffix):
@ -364,10 +375,15 @@ class ClouderaUtilsV570(cu.ClouderaUtils):
self.pu.db_helper.get_sentry_db_password(cluster)
}
}
kafka_confs = {
'KAFKA': {
'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME
}
}
all_confs = s_cfg.merge_configs(all_confs, hue_confs)
all_confs = s_cfg.merge_configs(all_confs, hive_confs)
all_confs = s_cfg.merge_configs(all_confs, sentry_confs)
all_confs = s_cfg.merge_configs(all_confs, kafka_confs)
all_confs = s_cfg.merge_configs(all_confs, cluster.cluster_configs)
if instance:

View File

@ -236,6 +236,13 @@ class ConfigHelperV570(c_h.ConfigHelper):
self.kms_kms_confs = self._load_and_init_configs(
'kms-kms.json', 'KMS', 'node')
self.kafka_service = self._load_and_init_configs(
'kafka-service.json', 'KAFKA', 'cluster')
self.kafka_kafka_broker = self._load_and_init_configs(
'kafka-kafka_broker.json', 'KAFKA', 'node')
self.kafka_kafka_mirror_maker = self._load_and_init_configs(
'kafka-kafka_mirror_maker.json', 'KAFKA', 'node')
def get_required_anti_affinity(self, cluster):
return self._get_config_value(cluster, self.REQUIRE_ANTI_AFFINITY)

View File

@ -47,6 +47,8 @@ PACKAGES = [
'impala-state-store',
'impala-catalog',
'impala-shell',
'kafka',
'kafka-server'
'keytrustee-keyprovider',
'oozie',
'oracle-j2sdk1.7',

View File

@ -42,6 +42,7 @@ class PluginUtilsV570(pu.AbstractPluginUtils):
'IMPALAD': 'ID',
'JOBHISTORY': 'JS',
'JOURNALNODE': 'JN',
'KAFKA_BROKER': 'KB',
'KMS': 'KMS',
'MASTER': 'M',
'NAMENODE': 'NN',
@ -97,6 +98,9 @@ class PluginUtilsV570(pu.AbstractPluginUtils):
def get_stdb_rm(self, cluster):
return u.get_instance(cluster, 'YARN_STANDBYRM')
def get_kafka_brokers(self, cluster):
return u.get_instances(cluster, 'KAFKA_BROKER')
def convert_process_configs(self, configs):
p_dict = {
"CLOUDERA": ['MANAGER'],
@ -124,9 +128,10 @@ class PluginUtilsV570(pu.AbstractPluginUtils):
"SOLR": ['SOLR_SERVER'],
"SQOOP": ['SQOOP_SERVER'],
"KMS": ['KMS'],
'YARN_GATEWAY': ['YARN_GATEWAY'],
'HDFS_GATEWAY': ['HDFS_GATEWAY'],
"JOURNALNODE": ['JOURNALNODE']
"YARN_GATEWAY": ['YARN_GATEWAY'],
"HDFS_GATEWAY": ['HDFS_GATEWAY'],
"JOURNALNODE": ['JOURNALNODE'],
"KAFKA": ['KAFKA_BROKER']
}
if isinstance(configs, res.Resource):
configs = configs.to_dict()

View File

@ -0,0 +1,476 @@
[
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "The port to give out to producers, consumers, and other brokers to use in establishing connections. This only needs to be set if this port is different from the port the server should bind to.",
"display_name": "Advertised Port",
"name": "advertised.port",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Additional Broker Java Options parameter.",
"display_name": "Suppress Parameter Validation: Additional Broker Java Options",
"name": "role_config_suppression_broker_java_opts",
"value": "false"
},
{
"desc": "If set, this is the hostname given out to producers, consumers, and other brokers to use in establishing connections. Never set this property at the group level; it should always be overriden on instance level.",
"display_name": "Advertised Host",
"name": "advertised.host.name",
"value": null
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HTTP Metric Report Host parameter.",
"display_name": "Suppress Parameter Validation: HTTP Metric Report Host",
"name": "role_config_suppression_kafka.http.metrics.host",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_kafka_kafka_broker_scm_health",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Server JKS Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Server JKS Keystore File Password",
"name": "role_config_suppression_ssl_server_keystore_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Certificate Trust Store Password",
"name": "role_config_suppression_ssl_client_truststore_password",
"value": "false"
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that Kafka Broker might connect to. This is used when Kafka Broker is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "Kafka Broker TLS/SSL Certificate Trust Store File",
"name": "ssl_client_truststore_location",
"value": ""
},
{
"desc": "Port the HTTP metric reporter listens on.",
"display_name": "HTTP Metric Report Port",
"name": "kafka.http.metrics.port",
"value": "24042"
},
{
"desc": "Kafka broker port.",
"display_name": "TCP Port",
"name": "port",
"value": "9092"
},
{
"desc": "The log for a topic partition is stored as a directory of segment files. This setting controls the size to which a segment file can grow before a new segment is rolled over in the log. This value should be larger than message.max.bytes.",
"display_name": "Segment File Size",
"name": "log.segment.bytes",
"value": "1073741824"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>kafka-monitoring.properties</strong> for this role only.",
"display_name": "Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka-monitoring.properties",
"name": "kafka-monitoring.properties_role_safety_valve",
"value": null
},
{
"desc": "The frequency, in milliseconds, that the log cleaner checks whether any log segment is eligible for deletion, per retention policies.",
"display_name": "Data Retention Check Interval",
"name": "log.retention.check.interval.ms",
"value": "300000"
},
{
"desc": "The maximum number of rolled log files to keep for Kafka Broker logs. Typically used by log4j or logback.",
"display_name": "Kafka Broker Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Advertised Host parameter.",
"display_name": "Suppress Parameter Validation: Advertised Host",
"name": "role_config_suppression_advertised.host.name",
"value": "false"
},
{
"desc": "The minimum log level for Kafka Broker logs",
"display_name": "Kafka Broker Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "When computing the overall Kafka Broker health, consider the host's health.",
"display_name": "Kafka Broker Host Health Test",
"name": "kafka_broker_host_health_enabled",
"value": "true"
},
{
"desc": "Maximum size for the Java process heap memory. Passed to Java -Xmx. Measured in megabytes. Kafka does not generally require setting large heap sizes. It is better to let the file system cache utilize the available memory.",
"display_name": "Java Heap Size of Broker",
"name": "broker_max_heap_size",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_kafka_kafka_broker_swap_memory_usage",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Log Directory",
"name": "role_config_suppression_log_dir",
"value": "false"
},
{
"desc": "The number of I/O threads that the server uses for executing requests. You should have at least as many threads as you have disks.",
"display_name": "Number of I/O Threads",
"name": "num.io.threads",
"value": "8"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Server JKS Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Server JKS Keystore File Location",
"name": "role_config_suppression_ssl_server_keystore_location",
"value": "false"
},
{
"desc": "Protocol to be used for inter-broker communication.",
"display_name": "Inter Broker Protocol",
"name": "security.inter.broker.protocol",
"value": "PLAINTEXT"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Advanced Configuration Snippet (Safety Valve) for ssl.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Advanced Configuration Snippet (Safety Valve) for ssl.properties",
"name": "role_config_suppression_ssl.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>kafka.properties</strong> for this role only.",
"display_name": "Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka.properties",
"name": "kafka.properties_role_safety_valve",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Server JKS Keystore Key Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Server JKS Keystore Key Password",
"name": "role_config_suppression_ssl_server_keystore_keypassword",
"value": "false"
},
{
"desc": "The maximum time before a new log segment is rolled out (in hours). Secondary to the log.retention.ms property. The special value of -1 is interpreted as unlimited. This property is deprecated in Kafka 1.4.0. Use log.retention.ms.",
"display_name": "Data Retention Hours",
"name": "log.retention.hours",
"value": "168"
},
{
"desc": "Kafka broker secure port.",
"display_name": "TLS/SSL Port",
"name": "ssl_port",
"value": "9093"
},
{
"desc": "The amount of data to retain in the log for each topic-partition. This is the limit per partition: multiply by the number of partitions to get the total data retained for the topic. The special value of -1 is interpreted as unlimited. If both log.retention.ms and log.retention.bytes are set, a segment is deleted when either limit is exceeded.",
"display_name": "Data Retention Size",
"name": "log.retention.bytes",
"value": "-1"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Host the HTTP metric reporter binds to.",
"display_name": "HTTP Metric Report Host",
"name": "kafka.http.metrics.host",
"value": "0.0.0.0"
},
{
"desc": "The maximum size, in megabytes, per log file for Kafka Broker logs. Typically used by log4j or logback.",
"display_name": "Kafka Broker Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka-monitoring.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka-monitoring.properties",
"name": "role_config_suppression_kafka-monitoring.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "kafka_broker_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Kafka Broker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KAFKA_BROKER_role_env_safety_valve",
"value": null
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Port for JMX.",
"display_name": "JMX Port",
"name": "jmx_port",
"value": "9393"
},
{
"desc": "The log directory for log files of the role Kafka Broker.",
"display_name": "Kafka Broker Log Directory",
"name": "log_dir",
"value": "/var/log/kafka"
},
{
"desc": "The maximum time before a new log segment is rolled out. This property is used in Cloudera Kafka 1.4.0 and later in place of log.roll.hours.",
"display_name": "Data Log Roll Time",
"name": "log.roll.ms",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka.properties",
"name": "role_config_suppression_kafka.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The password that protects the private key contained in the JKS keystore used when Kafka Broker is acting as a TLS/SSL server.",
"display_name": "Kafka Broker TLS/SSL Server JKS Keystore Key Password",
"name": "ssl_server_keystore_keypassword",
"value": ""
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Kafka Broker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_kafka_kafka_broker_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Data Directories parameter.",
"display_name": "Suppress Parameter Validation: Data Directories",
"name": "role_config_suppression_log.dirs",
"value": "false"
},
{
"desc": "The maximum time before a new log segment is rolled out. If both log.retention.ms and log.retention.bytes are set, a segment is deleted when either limit is exceeded. The special value of -1 is interpreted as unlimited. This property is used in Kafka 1.4.0 and later in place of log.retention.hours.",
"display_name": "Data Retention Time",
"name": "log.retention.ms",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Certificate Trust Store File",
"name": "role_config_suppression_ssl_client_truststore_location",
"value": "false"
},
{
"desc": "The maximum time before a new log segment is rolled out (in hours). This property is deprecated in Cloudera Kafka 1.4.0; use log.roll.ms.",
"display_name": "Data Log Roll Hours",
"name": "log.roll.hours",
"value": "168"
},
{
"desc": "These arguments are passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags are passed here.",
"display_name": "Additional Broker Java Options",
"name": "broker_java_opts",
"value": "-server -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+DisableExplicitGC -Djava.awt.headless=true"
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when Kafka Broker is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "Kafka Broker TLS/SSL Server JKS Keystore File Location",
"name": "ssl_server_keystore_location",
"value": ""
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_kafka_kafka_broker_host_health",
"value": "false"
},
{
"desc": "A list of one or more directories in which Kafka data is stored. Each new partition created is placed in the directory that currently has the fewest partitions. Each directory should be on its own separate drive.",
"display_name": "Data Directories",
"name": "log.dirs",
"value": "/var/local/kafka/data"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Segment File Size parameter.",
"display_name": "Suppress Parameter Validation: Segment File Size",
"name": "role_config_suppression_log.segment.bytes",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Broker ID parameter.",
"display_name": "Suppress Parameter Validation: Broker ID",
"name": "role_config_suppression_broker.id",
"value": "false"
},
{
"desc": "The password for the Kafka Broker JKS keystore file.",
"display_name": "Kafka Broker TLS/SSL Server JKS Keystore File Password",
"name": "ssl_server_keystore_password",
"value": ""
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_kafka_kafka_broker_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_kafka_broker_role_env_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>ssl.properties</strong> for this role only.",
"display_name": "Kafka Broker Advanced Configuration Snippet (Safety Valve) for ssl.properties",
"name": "ssl.properties_role_safety_valve",
"value": null
},
{
"desc": "Encrypt communication between clients and Kafka Broker using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for Kafka Broker",
"name": "ssl_enabled",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "ID uniquely identifying each broker. Never set this property at the group level; it should always be overridden on instance level.",
"display_name": "Broker ID",
"name": "broker.id",
"value": null
},
{
"desc": "Maximum number of connections allowed from each IP address.",
"display_name": "Maximum Connections per IP Address",
"name": "max.connections.per.ip",
"value": null
},
{
"desc": "Enables the health test that the Kafka Broker's process state is consistent with the role configuration",
"display_name": "Kafka Broker Process Health Test",
"name": "kafka_broker_scm_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Number of I/O Threads parameter.",
"display_name": "Suppress Parameter Validation: Number of I/O Threads",
"name": "role_config_suppression_num.io.threads",
"value": "false"
},
{
"desc": "The password for the Kafka Broker TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "Kafka Broker TLS/SSL Certificate Trust Store Password",
"name": "ssl_client_truststore_password",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Heap Size of Broker parameter.",
"display_name": "Suppress Parameter Validation: Java Heap Size of Broker",
"name": "role_config_suppression_broker_max_heap_size",
"value": "false"
},
{
"desc": "Client authentication mode for SSL connections. Default is none, could be set to \"required\", i.e., client authentication is required or to \"requested\", i.e., client authentication is requested and client without certificates can still connect.",
"display_name": "SSL Client Authentication",
"name": "ssl.client.auth",
"value": "none"
},
{
"desc": "Authenticate a SASL connection with zookeeper, if Kerberos authentication is enabled. It also allows a broker to set SASL ACL on zookeeper nodes which locks these nodes down so that only kafka broker can modify.",
"display_name": "Authenticate Zookeeper Connection",
"name": "authenticate.zookeeper.connection",
"value": "true"
}
]

View File

@ -0,0 +1,482 @@
[
{
"desc": "The maximum size, in megabytes, per log file for Kafka MirrorMaker logs. Typically used by log4j or logback.",
"display_name": "Kafka MirrorMaker Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "Stop the entire mirror maker when a send failure occurs.",
"display_name": "Abort on Send Failure",
"name": "abort.on.send.failure",
"value": "true"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Server JKS Keystore Key Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Server JKS Keystore Key Password",
"name": "role_config_suppression_ssl_server_keystore_keypassword",
"value": "false"
},
{
"desc": "Maximum number of bytes that can be buffered between producer and consumer. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Queue Size",
"name": "queue.byte.size",
"value": "100000000"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Server JKS Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Server JKS Keystore File Password",
"name": "role_config_suppression_ssl_server_keystore_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Certificate Trust Store Password",
"name": "role_config_suppression_ssl_client_truststore_password",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_kafka_kafka_mirror_maker_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_kafka_mirror_maker_role_env_safety_valve",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Only required if source Kafka cluster requires client authentication.",
"display_name": "Source Kafka Cluster's Client Auth",
"name": "source.ssl.client.auth",
"value": "false"
},
{
"desc": "When computing the overall Kafka MirrorMaker health, consider the host's health.",
"display_name": "Kafka MirrorMaker Host Health Test",
"name": "kafka_mirror_maker_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_client.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_client.properties",
"name": "role_config_suppression_ssl_client.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for Kafka MirrorMaker logs. Typically used by log4j or logback.",
"display_name": "Kafka MirrorMaker Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Topic Blacklist parameter.",
"display_name": "Suppress Parameter Validation: Topic Blacklist",
"name": "role_config_suppression_blacklist",
"value": "false"
},
{
"desc": "The minimum log level for Kafka MirrorMaker logs",
"display_name": "Kafka MirrorMaker Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Offset commit interval in milliseconds.",
"display_name": "Offset Commit Interval",
"name": "offset.commit.interval.ms",
"value": "60000"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>mirror_maker_producers.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_producers.properties",
"name": "mirror_maker_producers.properties_role_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_kafka_kafka_mirror_maker_host_health",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "kafka_mirror_maker_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_server.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_server.properties",
"name": "role_config_suppression_ssl_server.properties_role_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "The password for the Kafka MirrorMaker JKS keystore file.",
"display_name": "Kafka MirrorMaker TLS/SSL Server JKS Keystore File Password",
"name": "ssl_server_keystore_password",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Log Directory",
"name": "role_config_suppression_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Destination Broker List parameter.",
"display_name": "Suppress Parameter Validation: Destination Broker List",
"name": "role_config_suppression_bootstrap.servers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Server JKS Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Server JKS Keystore File Location",
"name": "role_config_suppression_ssl_server_keystore_location",
"value": "false"
},
{
"desc": "Number of producer instances. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Number of Producers",
"name": "num.producers",
"value": "1"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>ssl_client.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_client.properties",
"name": "ssl_client.properties_role_safety_valve",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Kafka MirrorMaker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KAFKA_MIRROR_MAKER_role_env_safety_valve",
"value": null
},
{
"desc": "Name of the consumer group used by MirrorMaker. When multiple role instances are configured with the same topics and same group ID, the role instances load-balance replication for the topics. When multiple role instances are configured with the same topics but different group ID, each role instance replicates all the events for those topics - this can be used to replicate the source cluster into multiple destination clusters.",
"display_name": "Consumer Group ID",
"name": "group.id",
"value": "cloudera_mirrormaker"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_kafka_kafka_mirror_maker_swap_memory_usage",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Consumer Group ID parameter.",
"display_name": "Suppress Parameter Validation: Consumer Group ID",
"name": "role_config_suppression_group.id",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MirrorMaker Consumer Rebalance Listener Arguments parameter.",
"display_name": "Suppress Parameter Validation: MirrorMaker Consumer Rebalance Listener Arguments",
"name": "role_config_suppression_consumer.rebalance.listener.args",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that Kafka MirrorMaker might connect to. This is used when Kafka MirrorMaker is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "Kafka MirrorMaker TLS/SSL Certificate Trust Store File",
"name": "ssl_client_truststore_location",
"value": ""
},
{
"desc": "Enables the health test that the Kafka MirrorMaker's process state is consistent with the role configuration",
"display_name": "Kafka MirrorMaker Process Health Test",
"name": "kafka_mirror_maker_scm_health_enabled",
"value": "true"
},
{
"desc": "Number of messages that are buffered between producer and consumer. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Message Queue Size",
"name": "queue.size",
"value": "10000"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "A consumer rebalance listener of type ConsumerRebalanceListener to be invoked when MirrorMaker's consumer rebalances.",
"display_name": "MirrorMaker Consumer Rebalance Listener",
"name": "consumer.rebalance.listener",
"value": ""
},
{
"desc": "Run with MirrorMaker settings that eliminate potential loss of data. This impacts performance, but is highly recommended. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Avoid Data Loss",
"name": "no.data.loss",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_producers.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_producers.properties",
"name": "role_config_suppression_mirror_maker_producers.properties_role_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>mirror_maker_consumers.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_consumers.properties",
"name": "mirror_maker_consumers.properties_role_safety_valve",
"value": null
},
{
"desc": "Protocol to be used for communication with source kafka cluster.",
"display_name": "Source Kafka Cluster's Security Protocol",
"name": "source.security.protocol",
"value": "PLAINTEXT"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MirrorMaker Consumer Rebalance Listener parameter.",
"display_name": "Suppress Parameter Validation: MirrorMaker Consumer Rebalance Listener",
"name": "role_config_suppression_consumer.rebalance.listener",
"value": "false"
},
{
"desc": "Protocol to be used for communication with destination kafka cluster.",
"display_name": "Destination Kafka Cluster's Security Protocol",
"name": "destination.security.protocol",
"value": "PLAINTEXT"
},
{
"desc": "Regular expression that represents a set of topics to mirror. Note that whitelist and blacklist parameters are mutually exclusive. If both are defined, only the whilelist is used.",
"display_name": "Topic Whitelist",
"name": "whitelist",
"value": ""
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_kafka_kafka_mirror_maker_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Topic Whitelist parameter.",
"display_name": "Suppress Parameter Validation: Topic Whitelist",
"name": "role_config_suppression_whitelist",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Certificate Trust Store File",
"name": "role_config_suppression_ssl_client_truststore_location",
"value": "false"
},
{
"desc": "Port for JMX.",
"display_name": "JMX Port",
"name": "jmx_port",
"value": "9394"
},
{
"desc": "Arguments used by MirrorMaker message handler.",
"display_name": "MirrorMaker Message Handler Arguments",
"name": "message.handler.args",
"value": ""
},
{
"desc": "List of brokers on destination cluster. This should be more than one, for high availability, but there's no need to list all brokers.",
"display_name": "Destination Broker List",
"name": "bootstrap.servers",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Source Broker List parameter.",
"display_name": "Suppress Parameter Validation: Source Broker List",
"name": "role_config_suppression_source.bootstrap.servers",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_kafka_kafka_mirror_maker_file_descriptor",
"value": "false"
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when Kafka MirrorMaker is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "Kafka MirrorMaker TLS/SSL Server JKS Keystore File Location",
"name": "ssl_server_keystore_location",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MirrorMaker Message Handler parameter.",
"display_name": "Suppress Parameter Validation: MirrorMaker Message Handler",
"name": "role_config_suppression_message.handler",
"value": "false"
},
{
"desc": "Only required if destination Kafka cluster requires client authentication.",
"display_name": "Destination Kafka Cluster's Client Auth",
"name": "destination.ssl.client.auth",
"value": "false"
},
{
"desc": "A MirrorMaker message handler of type MirrorMakerMessageHandler that will process every record in-between producer and consumer.",
"display_name": "MirrorMaker Message Handler",
"name": "message.handler",
"value": ""
},
{
"desc": "List of brokers on source cluster. This should be more than one, for high availability, but there's no need to list all brokers.",
"display_name": "Source Broker List",
"name": "source.bootstrap.servers",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_consumers.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_consumers.properties",
"name": "role_config_suppression_mirror_maker_consumers.properties_role_safety_valve",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>ssl_server.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_server.properties",
"name": "ssl_server.properties_role_safety_valve",
"value": null
},
{
"desc": "Number of consumer threads.",
"display_name": "Number of Consumer Threads",
"name": "num.streams",
"value": "1"
},
{
"desc": "The password that protects the private key contained in the JKS keystore used when Kafka MirrorMaker is acting as a TLS/SSL server.",
"display_name": "Kafka MirrorMaker TLS/SSL Server JKS Keystore Key Password",
"name": "ssl_server_keystore_keypassword",
"value": ""
},
{
"desc": "Encrypt communication between clients and Kafka MirrorMaker using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for Kafka MirrorMaker",
"name": "ssl_enabled",
"value": "false"
},
{
"desc": "Regular expression that represents a set of topics to avoid mirroring. Note that whitelist and blacklist parameters are mutually exclusive. If both are defined, only the whilelist is used. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Topic Blacklist",
"name": "blacklist",
"value": ""
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Arguments used by MirrorMaker consumer rebalance listener.",
"display_name": "MirrorMaker Consumer Rebalance Listener Arguments",
"name": "consumer.rebalance.listener.args",
"value": ""
},
{
"desc": "The log directory for log files of the role Kafka MirrorMaker.",
"display_name": "Kafka MirrorMaker Log Directory",
"name": "log_dir",
"value": "/var/log/kafka"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MirrorMaker Message Handler Arguments parameter.",
"display_name": "Suppress Parameter Validation: MirrorMaker Message Handler Arguments",
"name": "role_config_suppression_message.handler.args",
"value": "false"
},
{
"desc": "The password for the Kafka MirrorMaker TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "Kafka MirrorMaker TLS/SSL Certificate Trust Store Password",
"name": "ssl_client_truststore_password",
"value": ""
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
}
]

View File

@ -0,0 +1,374 @@
[
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Controlled Shutdown Maximum Attempts parameter.",
"display_name": "Suppress Parameter Validation: Controlled Shutdown Maximum Attempts",
"name": "service_config_suppression_controlled.shutdown.max.retries",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_kafka_service_env_safety_valve",
"value": "false"
},
{
"desc": "The default number of partitions for automatically created topics.",
"display_name": "Default Number of Partitions",
"name": "num.partitions",
"value": "1"
},
{
"desc": "The amount of time to retain delete messages for log compacted topics. Once a consumer has seen an original message you need to ensure it also sees the delete message. If you removed the delete message too quickly, this might not happen. As a result there is a configurable delete retention time.",
"display_name": "Log Compaction Delete Record Retention Time",
"name": "log.cleaner.delete.retention.ms",
"value": "604800000"
},
{
"desc": "Enables auto creation of topics on the server. If this is set to true, then attempts to produce, consume, or fetch metadata for a non-existent topic automatically create the topic with the default replication factor and number of partitions.",
"display_name": "Topic Auto Creation",
"name": "auto.create.topics.enable",
"value": "true"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Number of threads used to replicate messages from leaders. Increasing this value increases the degree of I/O parallelism in the follower broker.",
"display_name": "Number of Replica Fetchers",
"name": "num.replica.fetchers",
"value": "1"
},
{
"desc": "Enables Kafka monitoring.",
"display_name": "Enable Kafka Monitoring (Note: Requires Kafka-1.3.0 parcel or higher)",
"name": "monitoring.enabled",
"value": "true"
},
{
"desc": "If automatic leader rebalancing is enabled, the controller tries to balance leadership for partitions among the brokers by periodically returning leadership for each partition to the preferred replica, if it is available.",
"display_name": "Enable Automatic Leader Rebalancing",
"name": "auto.leader.rebalance.enable",
"value": "true"
},
{
"desc": "Number of unsuccessful controlled shutdown attempts before executing an unclean shutdown. For example, the default value of 3 means that the system will attempt a controlled shutdown 3 times before executing an unclean shutdown.",
"display_name": "Controlled Shutdown Maximum Attempts",
"name": "controlled.shutdown.max.retries",
"value": "3"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "The number of partitions for the offset commit topic. Since changing this after deployment is currently unsupported, we recommend using a higher setting for production (for example, 100-200).",
"display_name": "Offset Commit Topic Number of Partitions",
"name": "offsets.topic.num.partitions",
"value": "50"
},
{
"desc": "If a follower has not sent any fetch requests, nor has it consumed up to the leader's log end offset during this time, the leader removes the follower from the ISR set.",
"display_name": "Allowed Replica Time Lag",
"name": "replica.lag.time.max.ms",
"value": "10000"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "kafka"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Replica Maximum Fetch Size parameter.",
"display_name": "Suppress Parameter Validation: Replica Maximum Fetch Size",
"name": "service_config_suppression_replica.fetch.max.bytes",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "The replication factor for the offset commit topic. A higher setting is recommended in order to ensure higher availability (for example, 3 or 4) . If the offsets topic is created when there are fewer brokers than the replication factor, then the offsets topic is created with fewer replicas.",
"display_name": "Offset Commit Topic Replication Factor",
"name": "offsets.topic.replication.factor",
"value": "3"
},
{
"desc": "Whether to suppress configuration warnings produced by the Kafka Broker Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Kafka Broker Count Validator",
"name": "service_config_suppression_kafka_broker_count_validator",
"value": "false"
},
{
"desc": "Controls how frequently the log cleaner will attempt to clean the log. This ratio bounds the maximum space wasted in the log by duplicates. For example, at 0.5 at most 50% of the log could be duplicates. A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log.",
"display_name": "Log Cleaner Clean Ratio",
"name": "log.cleaner.min.cleanable.ratio",
"value": "0.5"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "kafka"
},
{
"desc": "Enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so might result in data loss.",
"display_name": "Enable Unclean Leader Election",
"name": "unclean.leader.election.enable",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Default Replication Factor parameter.",
"display_name": "Suppress Parameter Validation: Default Replication Factor",
"name": "service_config_suppression_default.replication.factor",
"value": "false"
},
{
"desc": "Enables controlled shutdown of the broker. If enabled, the broker moves all leaders on it to other brokers before shutting itself down. This reduces the unavailability window during shutdown.",
"display_name": "Enable Controlled Shutdown",
"name": "controlled.shutdown.enable",
"value": "true"
},
{
"desc": "The frequency with which to check for leader imbalance.",
"display_name": "Leader Imbalance Check Interval",
"name": "leader.imbalance.check.interval.seconds",
"value": "300"
},
{
"desc": "The maximum number of bytes to fetch for each partition in fetch requests replicas send to the leader. This value should be larger than message.max.bytes.",
"display_name": "Replica Maximum Fetch Size",
"name": "replica.fetch.max.bytes",
"value": "1048576"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Session Timeout parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Session Timeout",
"name": "service_config_suppression_zookeeper.session.timeout.ms",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Number of Replica Fetchers parameter.",
"display_name": "Suppress Parameter Validation: Number of Replica Fetchers",
"name": "service_config_suppression_num.replica.fetchers",
"value": "false"
},
{
"desc": "Any producer distinguished by clientId will get throttled if it produces more bytes than this value per-second. Only respected by Kafka 2.0 or later.",
"display_name": "Default Producer Quota",
"name": "quota.producer.default",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Kafka Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KAFKA_service_env_safety_valve",
"value": null
},
{
"desc": "The maximum size of a message that the server can receive. It is important that this property be in sync with the maximum fetch size the consumers use, or else an unruly producer could publish messages too large for consumers to consume.",
"display_name": "Maximum Message Size",
"name": "message.max.bytes",
"value": "1000000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Leader Imbalance Allowed Per Broker parameter.",
"display_name": "Suppress Parameter Validation: Leader Imbalance Allowed Per Broker",
"name": "service_config_suppression_leader.imbalance.per.broker.percentage",
"value": "false"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Enables the log cleaner to compact topics with cleanup.policy=compact on this cluster.",
"display_name": "Enable Log Compaction",
"name": "log.cleaner.enable",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Minimum Number of Replicas in ISR parameter.",
"display_name": "Suppress Parameter Validation: Minimum Number of Replicas in ISR",
"name": "service_config_suppression_min.insync.replicas",
"value": "false"
},
{
"desc": "ZNode in ZooKeeper that should be used as a root for this Kafka cluster.",
"display_name": "ZooKeeper Root",
"name": "zookeeper.chroot",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the Kafka MirrorMaker Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Kafka MirrorMaker Count Validator",
"name": "service_config_suppression_kafka_mirror_maker_count_validator",
"value": "false"
},
{
"desc": "The number of messages written to a log partition before triggering an fsync on the log. Setting this lower syncs data to disk more often, but has a major impact on performance. We recommend use of replication for durability rather than depending on single-server fsync; however, this setting can be used to be extra certain. If used in conjunction with log.flush.interval.ms, the log is flushed when either criteria is met.",
"display_name": "Log Flush Message Interval",
"name": "log.flush.interval.messages",
"value": null
},
{
"desc": "The number of background threads to use for log cleaning.",
"display_name": "Number of Log Cleaner Threads",
"name": "log.cleaner.threads",
"value": "1"
},
{
"desc": "Enable Kerberos authentication for this KAFKA service.",
"display_name": "Enable Kerberos Authentication",
"name": "kerberos.auth.enable",
"value": "false"
},
{
"desc": "List of metric reporter class names. HTTP reporter is included by default.",
"display_name": "List of Metric Reporters",
"name": "kafka.metrics.reporters",
"value": "nl.techop.kafka.KafkaHttpMetricsReporter"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Root parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Root",
"name": "service_config_suppression_zookeeper.chroot",
"value": "false"
},
{
"desc": "The frequency, in ms, with which the log flusher checks whether any log is eligible to be flushed to disk.",
"display_name": "Log Flush Scheduler Interval",
"name": "log.flush.scheduler.interval.ms",
"value": null
},
{
"desc": "The minimum number of replicas in the in-sync replica needed to satisfy a produce request where required.acks=-1 (that is, all).",
"display_name": "Minimum Number of Replicas in ISR",
"name": "min.insync.replicas",
"value": "1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Offset Commit Topic Replication Factor parameter.",
"display_name": "Suppress Parameter Validation: Offset Commit Topic Replication Factor",
"name": "service_config_suppression_offsets.topic.replication.factor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Leader Imbalance Check Interval parameter.",
"display_name": "Suppress Parameter Validation: Leader Imbalance Check Interval",
"name": "service_config_suppression_leader.imbalance.check.interval.seconds",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "If the server fails to send a heartbeat to ZooKeeper within this period of time, it is considered dead. If set too low, ZooKeeper might falsely consider a server dead; if set too high, ZooKeeper might take too long to recognize a dead server.",
"display_name": "ZooKeeper Session Timeout",
"name": "zookeeper.session.timeout.ms",
"value": "6000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Maximum Message Size parameter.",
"display_name": "Suppress Parameter Validation: Maximum Message Size",
"name": "service_config_suppression_message.max.bytes",
"value": "false"
},
{
"desc": "Enables topic deletion using admin tools. When delete topic is disabled, deleting topics through the admin tools has no effect.",
"display_name": "Enable Delete Topic",
"name": "delete.topic.enable",
"value": "true"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "If a replica falls more than this number of messages behind the leader, the leader removes the follower from the ISR and treats it as dead. This property is deprecated in Kafka 1.4.0; higher versions use only replica.lag.time.max.ms.",
"display_name": "Allowed Replica Message Lag",
"name": "replica.lag.max.messages",
"value": "4000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Default Number of Partitions parameter.",
"display_name": "Suppress Parameter Validation: Default Number of Partitions",
"name": "service_config_suppression_num.partitions",
"value": "false"
},
{
"desc": "Name of the ZooKeeper service that this Kafka service instance depends on",
"display_name": "ZooKeeper Service",
"name": "zookeeper_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "The total memory used for log deduplication across all cleaner threads. This memory is statically allocated and will not cause GC problems.",
"display_name": "Log Cleaner Deduplication Buffer Size",
"name": "log.cleaner.dedupe.buffer.size",
"value": "134217728"
},
{
"desc": "Any consumer distinguished by clientId/consumer group will get throttled if it fetches more bytes than this value per-second. Only respected by Kafka 2.0 or later.",
"display_name": "Default Consumer Quota",
"name": "quota.consumer.default",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Offset Commit Topic Number of Partitions parameter.",
"display_name": "Suppress Parameter Validation: Offset Commit Topic Number of Partitions",
"name": "service_config_suppression_offsets.topic.num.partitions",
"value": "false"
},
{
"desc": "The default replication factor for automatically created topics.",
"display_name": "Default Replication Factor",
"name": "default.replication.factor",
"value": "1"
},
{
"desc": "The maximum time between fsync calls on the log. If used in conjuction with log.flush.interval.messages, the log is flushed when either criteria is met.",
"display_name": "Log Flush Time Interval",
"name": "log.flush.interval.ms",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the List of Metric Reporters parameter.",
"display_name": "Suppress Parameter Validation: List of Metric Reporters",
"name": "service_config_suppression_kafka.metrics.reporters",
"value": "false"
},
{
"desc": "The percentage of leader imbalance allowed per broker. The controller rebalances leadership if this ratio goes above the configured value per broker.",
"display_name": "Leader Imbalance Allowed Per Broker",
"name": "leader.imbalance.per.broker.percentage",
"value": "10"
}
]

View File

@ -60,6 +60,7 @@ class VersionHandler(avm.BaseVersionHandler):
"SQOOP": ['SQOOP_SERVER'],
"SENTRY": ['SENTRY_SERVER'],
"KMS": ['KMS'],
"KAFKA": ['KAFKA_BROKER'],
"YARN_GATEWAY": [],
"RESOURCEMANAGER": [],

View File

@ -65,7 +65,10 @@ json_files = [
'impala-impalad.json',
'impala-statestore.json',
'kms-service.json',
'kms-kms.json'
'kms-kms.json',
'kafka-kafka_broker.json',
'kafka-kafka_mirror_maker.json',
'kafka-service.json'
]

View File

@ -65,7 +65,10 @@ json_files = [
'impala-impalad.json',
'impala-statestore.json',
'kms-service.json',
'kms-kms.json'
'kms-kms.json',
'kafka-kafka_broker.json',
'kafka-kafka_mirror_maker.json',
'kafka-service.json'
]