Auto-adjusting consumer fetch size
Related to #42 Adds new ConsumerFetchSizeTooSmall exception that is thrown when `_decode_message_set_iter` gets a BufferUnderflowError but has not yet yielded a message In this event, SimpleConsumer will increase the fetch size by 1.5 and continue the fetching loop while _not_ increasing the offset (basically just retries the request with a larger fetch size) Once the consumer fetch size has been increased, it will remain increased while SimpleConsumer fetches from that partition
This commit is contained in:
@@ -13,7 +13,7 @@ from kafka.common import (
|
||||
from kafka.util import (
|
||||
read_short_string, read_int_string, relative_unpack,
|
||||
write_short_string, write_int_string, group_by_topic_and_partition,
|
||||
BufferUnderflowError, ChecksumError
|
||||
BufferUnderflowError, ChecksumError, ConsumerFetchSizeTooSmall
|
||||
)
|
||||
|
||||
log = logging.getLogger("kafka")
|
||||
@@ -110,17 +110,21 @@ class KafkaProtocol(object):
|
||||
recurse easily.
|
||||
"""
|
||||
cur = 0
|
||||
read_message = False
|
||||
while cur < len(data):
|
||||
try:
|
||||
((offset, ), cur) = relative_unpack('>q', data, cur)
|
||||
(msg, cur) = read_int_string(data, cur)
|
||||
for (offset, message) in KafkaProtocol._decode_message(msg,
|
||||
offset):
|
||||
for (offset, message) in KafkaProtocol._decode_message(msg, offset):
|
||||
read_message = True
|
||||
yield OffsetAndMessage(offset, message)
|
||||
|
||||
except BufferUnderflowError:
|
||||
# If we get a partial read of a message, stop
|
||||
raise StopIteration()
|
||||
if read_message is False:
|
||||
# If we get a partial read of a message, but haven't yielded anyhting
|
||||
# there's a problem
|
||||
raise ConsumerFetchSizeTooSmall()
|
||||
else:
|
||||
raise StopIteration()
|
||||
|
||||
@classmethod
|
||||
def _decode_message(cls, data, offset):
|
||||
|
||||
Reference in New Issue
Block a user