Compress sharded ZK data
To save space in ZooKeeper and reduce network traffic, compress any data that we might shard (ie, the larger data blobs). Change-Id: I9865949e6eb1b0faa2a0d6460e26de4b43ee8f4d
This commit is contained in:
parent
118d45b1f2
commit
6b60a954f7
|
@ -14,6 +14,7 @@
|
|||
|
||||
import io
|
||||
from contextlib import suppress
|
||||
import zlib
|
||||
|
||||
from kazoo.exceptions import NoNodeError
|
||||
from kazoo.client import TransactionRequest
|
||||
|
@ -53,7 +54,7 @@ class RawShardIO(io.RawIOBase):
|
|||
|
||||
def _getData(self, path):
|
||||
data, _ = self.client.get(path)
|
||||
return data
|
||||
return zlib.decompress(data)
|
||||
|
||||
def readall(self):
|
||||
read_buffer = io.BytesIO()
|
||||
|
@ -66,6 +67,8 @@ class RawShardIO(io.RawIOBase):
|
|||
byte_count = len(shard_data)
|
||||
# Only write one key at a time and defer writing the rest to the caller
|
||||
shard_bytes = bytes(shard_data[0:NODE_BYTE_SIZE_LIMIT])
|
||||
shard_bytes = zlib.compress(shard_bytes)
|
||||
assert(len(shard_bytes) < NODE_BYTE_SIZE_LIMIT)
|
||||
kw = dict(sequence=True)
|
||||
if not self.is_transaction:
|
||||
kw['makepath'] = True
|
||||
|
|
Loading…
Reference in New Issue