1a48e06b54
It looks like 6 hours is too infrequent and is enough time for the disk to fill up when we're busy. Instead, purge old snapshots every 2 hours, which looks like it should give us plenty of headroom with our current usage pattern. Change-Id: Ieb92d052e633e9326c41367442f036cc333c40f2
37 lines
1.5 KiB
Django/Jinja
37 lines
1.5 KiB
Django/Jinja
dataDir=/data
|
|
dataLogDir=/datalog
|
|
# The number of milliseconds of each tick
|
|
tickTime=2000
|
|
# The number of ticks that the initial
|
|
# synchronization phase can take
|
|
initLimit=10
|
|
# The number of ticks that can pass between
|
|
# sending a request and getting an acknowledgement
|
|
syncLimit=5
|
|
# When enabled, ZooKeeper auto purge feature retains the autopurge.
|
|
# snapRetainCount most recent snapshots and the corresponding
|
|
# transaction logs in the dataDir and dataLogDir respectively and
|
|
# deletes the rest. Defaults to 3. Minimum value is 3.
|
|
autopurge.snapRetainCount=3
|
|
# The frequency in hours to look for and purge old snapshots,
|
|
# defaults to 0 (disabled). The number of retained snapshots can
|
|
# be separately controlled through snapRetainCount and
|
|
# defaults to the minimum value of 3. This will quickly fill the
|
|
# disk in production if not enabled. Works on ZK >=3.4.
|
|
autopurge.purgeInterval=2
|
|
maxClientCnxns=60
|
|
standaloneEnabled=true
|
|
admin.enableServer=true
|
|
4lw.commands.whitelist=srvr, stat, dump, mntr
|
|
clientPort=2181
|
|
secureClientPort=2281
|
|
ssl.keyStore.location=/tls/keys/keystore.pem
|
|
ssl.trustStore.location=/tls/certs/cacert.pem
|
|
{% for host in groups['zookeeper'] %}
|
|
server.{{ host | regex_replace('^zk(\\d+)\\.open.*\\.org$', '\\1') | int }}={{ (hostvars[host].public_v4) }}:2888:3888
|
|
{% endfor %}
|
|
sslQuorum=true
|
|
serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory
|
|
ssl.quorum.keyStore.location=/tls/keys/keystore.pem
|
|
ssl.quorum.trustStore.location=/tls/certs/cacert.pem
|