Compress ZKObject data

To save space in ZooKeeper compress the data of ZKObjects. This way we
can reduce the amount of data stored in some cases by a factor of 15x
(e.g. for some job znodes).

In case for data that is not yet compressed the ZKObject will fall back
to loading the stored JSON data directly.

Change-Id: Ibb59d3dfc1db0537ff6d28705832f0717d45b632
This commit is contained in:
Simon Westphahl 2022-01-07 11:00:24 +01:00 committed by James E. Blair
parent 488c99dab3
commit b9f35466a8
2 changed files with 21 additions and 3 deletions

View File

@ -4,3 +4,13 @@
- run-dstat
- role: ensure-zookeeper
zookeeper_use_tls: true
tasks:
# This is needed to run opendev unit test jobs under bionic/focal,
# but may not be necessary in later releases
- name: Increase open file limit
become: True
lineinfile:
path: /etc/security/limits.conf
line: '* soft nofile 4096'
- name: Reset SSH connection
meta: reset_connection

View File

@ -15,6 +15,7 @@
import json
import time
import contextlib
import zlib
from kazoo.exceptions import (
KazooException, NodeExistsError, NoNodeError, ZookeeperError)
@ -195,7 +196,12 @@ class ZKObject:
path = self.getPath()
while context.sessionIsValid():
try:
data, zstat = context.client.get(path)
compressed_data, zstat = context.client.get(path)
try:
data = zlib.decompress(compressed_data)
except zlib.error:
# Fallback for old, uncompressed data
data = compressed_data
self._set(**self.deserialize(data, context))
self._set(_zstat=zstat)
return
@ -225,11 +231,13 @@ class ZKObject:
path = self.getPath()
while context.sessionIsValid():
try:
compressed_data = zlib.compress(data)
if create:
real_path, zstat = context.client.create(
path, data, makepath=True, include_data=True)
path, compressed_data, makepath=True,
include_data=True)
else:
zstat = context.client.set(path, data,
zstat = context.client.set(path, compressed_data,
version=self._zstat.version)
self._set(_zstat=zstat)
return