Handle PartialMessage / ConsumerFetchSizeTooSmall in SimpleConsumer
This commit is contained in:
@@ -32,6 +32,7 @@ from ..common import (
|
|||||||
UnknownTopicOrPartitionError, NotLeaderForPartitionError,
|
UnknownTopicOrPartitionError, NotLeaderForPartitionError,
|
||||||
OffsetOutOfRangeError, FailedPayloadsError, check_error
|
OffsetOutOfRangeError, FailedPayloadsError, check_error
|
||||||
)
|
)
|
||||||
|
from kafka.protocol.message import PartialMessage
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
@@ -407,32 +408,34 @@ class SimpleConsumer(Consumer):
|
|||||||
|
|
||||||
partition = resp.partition
|
partition = resp.partition
|
||||||
buffer_size = partitions[partition]
|
buffer_size = partitions[partition]
|
||||||
try:
|
|
||||||
for message in resp.messages:
|
# Check for partial message
|
||||||
if message.offset < self.fetch_offsets[partition]:
|
if resp.messages and isinstance(resp.messages[-1].message, PartialMessage):
|
||||||
log.debug('Skipping message %s because its offset is less than the consumer offset',
|
|
||||||
message)
|
# If buffer is at max and all we got was a partial message
|
||||||
continue
|
# raise ConsumerFetchSizeTooSmall
|
||||||
# Put the message in our queue
|
|
||||||
self.queue.put((partition, message))
|
|
||||||
self.fetch_offsets[partition] = message.offset + 1
|
|
||||||
except ConsumerFetchSizeTooSmall:
|
|
||||||
if (self.max_buffer_size is not None and
|
if (self.max_buffer_size is not None and
|
||||||
buffer_size == self.max_buffer_size):
|
buffer_size == self.max_buffer_size and
|
||||||
log.error('Max fetch size %d too small',
|
len(resp.messages) == 1):
|
||||||
self.max_buffer_size)
|
|
||||||
raise
|
log.error('Max fetch size %d too small', self.max_buffer_size)
|
||||||
|
raise ConsumerFetchSizeTooSmall()
|
||||||
|
|
||||||
if self.max_buffer_size is None:
|
if self.max_buffer_size is None:
|
||||||
buffer_size *= 2
|
buffer_size *= 2
|
||||||
else:
|
else:
|
||||||
buffer_size = min(buffer_size * 2,
|
buffer_size = min(buffer_size * 2, self.max_buffer_size)
|
||||||
self.max_buffer_size)
|
|
||||||
log.warning('Fetch size too small, increase to %d (2x) '
|
log.warning('Fetch size too small, increase to %d (2x) '
|
||||||
'and retry', buffer_size)
|
'and retry', buffer_size)
|
||||||
retry_partitions[partition] = buffer_size
|
retry_partitions[partition] = buffer_size
|
||||||
except ConsumerNoMoreData as e:
|
resp.messages.pop()
|
||||||
log.debug('Iteration was ended by %r', e)
|
|
||||||
except StopIteration:
|
for message in resp.messages:
|
||||||
# Stop iterating through this partition
|
if message.offset < self.fetch_offsets[partition]:
|
||||||
log.debug('Done iterating over partition %s', partition)
|
log.debug('Skipping message %s because its offset is less than the consumer offset',
|
||||||
|
message)
|
||||||
|
continue
|
||||||
|
# Put the message in our queue
|
||||||
|
self.queue.put((partition, message))
|
||||||
|
self.fetch_offsets[partition] = message.offset + 1
|
||||||
partitions = retry_partitions
|
partitions = retry_partitions
|
||||||
|
|||||||
Reference in New Issue
Block a user