Don't use transactions when sharding large events

The size of Zookeeper transactions is limited by the `jute.maxbuffer`
setting. It is the same setting that also defines the maximum allowed
size of a Znode.

This means that it doesn't make sense to use Zookeeper transactions
in combination with sharding.

Change-Id: Ie5c46e06c91d5e5c2288481a6d7b5d081db124d6
This commit is contained in:
Simon Westphahl 2021-08-02 10:16:02 +02:00
parent f8f7509ff2
commit 3a67c47b2b
1 changed files with 4 additions and 11 deletions

View File

@ -244,25 +244,18 @@ class ZooKeeperEventQueue(ZooKeeperSimpleBase, Iterable):
# Get a unique data node
data_id = str(uuid.uuid4())
data_root = f'{EVENT_DATA_ROOT}/{data_id}'
data_path = f'{data_root}/seq'
side_channel_data = json.dumps(data['event_data']).encode("utf-8")
data = data.copy()
del data['event_data']
data['event_data_path'] = data_root
encoded_data = json.dumps(data).encode("utf-8")
tr = self.kazoo_client.transaction()
tr.create(data_root)
with sharding.BufferedShardWriter(tr, data_path) as stream:
with sharding.BufferedShardWriter(
self.kazoo_client, data_root) as stream:
stream.write(side_channel_data)
tr.create(event_path, encoded_data, sequence=True)
resp = self.client.commitTransaction(tr)
return resp[-1]
else:
return self.kazoo_client.create(
event_path, encoded_data, sequence=True)
return self.kazoo_client.create(
event_path, encoded_data, sequence=True)
def _iterEvents(self):
try: