Fix typos

This commit is contained in:
Jeff Widman 2016-11-08 01:29:45 -08:00
parent 77591afa78
commit cd74c1fe40
12 changed files with 18 additions and 18 deletions

View File

@ -123,7 +123,7 @@ Consumers
Producers
* KAFKA-3388: Fix expiration of batches sitting in the accumulator (dpkp PR 699)
* KAFKA-3197: when max.in.flight.request.per.connection = 1, attempt to guarantee ordering (dpkp PR 698)
* Dont use soon-to-be-reserved keyword await as function name (FutureProduceResult) (dpkp PR 697)
* Don't use soon-to-be-reserved keyword await as function name (FutureProduceResult) (dpkp PR 697)
Clients
* Fix socket leaks in KafkaClient (dpkp PR 696)
@ -241,7 +241,7 @@ Documentation
* Migrate load_example.py to KafkaProducer / KafkaConsumer
Internals
* Dont override system rcvbuf or sndbuf unless configured explicitly (dpkp PR 557)
* Don't override system rcvbuf or sndbuf unless configured explicitly (dpkp PR 557)
* Some attributes may not exist in __del__ if we failed assertions
* Break up some circular references and close client wake pipes on __del__ (aisch PR 554)

View File

@ -9,7 +9,7 @@ if [ -z "$SCALA_VERSION" ]; then
fi
# On travis CI, empty KAFKA_VERSION means skip integration tests
# so we dont try to get binaries
# so we don't try to get binaries
# Otherwise it means test all official releases, so we get all of them!
if [ -z "$KAFKA_VERSION" -a -z "$TRAVIS" ]; then
KAFKA_VERSION=$OFFICIAL_RELEASES

View File

@ -148,7 +148,7 @@ Producers
---------
* KAFKA-3388: Fix expiration of batches sitting in the accumulator (dpkp PR 699)
* KAFKA-3197: when max.in.flight.request.per.connection = 1, attempt to guarantee ordering (dpkp PR 698)
* Dont use soon-to-be-reserved keyword await as function name (FutureProduceResult) (dpkp PR 697)
* Don't use soon-to-be-reserved keyword await as function name (FutureProduceResult) (dpkp PR 697)
Clients
-------
@ -292,7 +292,7 @@ Documentation
Internals
---------
* Dont override system rcvbuf or sndbuf unless configured explicitly (dpkp PR 557)
* Don't override system rcvbuf or sndbuf unless configured explicitly (dpkp PR 557)
* Some attributes may not exist in __del__ if we failed assertions
* Break up some circular references and close client wake pipes on __del__ (aisch PR 554)

View File

@ -20,7 +20,7 @@ KafkaConsumer
message.offset, message.key,
message.value))
# consume earliest available messages, dont commit offsets
# consume earliest available messages, don't commit offsets
KafkaConsumer(auto_offset_reset='earliest', enable_auto_commit=False)
# consume json messages

View File

@ -576,7 +576,7 @@ class SimpleClient(object):
if leader in self.brokers:
self.topics_to_brokers[topic_part] = self.brokers[leader]
# If Unknown Broker, fake BrokerMetadata so we dont lose the id
# If Unknown Broker, fake BrokerMetadata so we don't lose the id
# (not sure how this could happen. server could be in bad state)
else:
self.topics_to_brokers[topic_part] = BrokerMetadata(

View File

@ -362,7 +362,7 @@ class KafkaClient(object):
return
def is_disconnected(self, node_id):
"""Check whether the node connection has been disconnected failed.
"""Check whether the node connection has been disconnected or failed.
A disconnected node has either been closed or has failed. Connection
failures are usually transient and can be resumed in the next ready()
@ -497,7 +497,7 @@ class KafkaClient(object):
else:
task_future.success(result)
# If we got a future that is already done, dont block in _poll
# If we got a future that is already done, don't block in _poll
if future and future.is_done:
timeout = 0
else:

View File

@ -140,7 +140,7 @@ class BrokerConnection(object):
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version is None
state_chance_callback (callable): function to be called when the
state_change_callback (callable): function to be called when the
connection state changes from CONNECTING to CONNECTED etc.
metrics (kafka.metrics.Metrics): Optionally provide a metrics
instance for capturing network IO stats. Default: None.
@ -291,7 +291,7 @@ class BrokerConnection(object):
' Disconnecting.', self, ret)
self.close()
# Connection timedout
# Connection timed out
elif time.time() > request_timeout + self.last_attempt:
log.error('Connection attempt to %s timed out', self)
self.close() # error=TimeoutError ?

View File

@ -84,7 +84,7 @@ class KafkaConsumer(six.Iterator):
auto_offset_reset (str): A policy for resetting offsets on
OffsetOutOfRange errors: 'earliest' will move to the oldest
available message, 'latest' will move to the most recent. Any
ofther value will raise the exception. Default: 'latest'.
other value will raise the exception. Default: 'latest'.
enable_auto_commit (bool): If true the consumer's offset will be
periodically committed in the background. Default: True.
auto_commit_interval_ms (int): milliseconds between automatic
@ -194,7 +194,7 @@ class KafkaConsumer(six.Iterator):
sasl_plain_username (str): username for sasl PLAIN authentication.
Default: None
sasl_plain_password (str): password for sasl PLAIN authentication.
Defualt: None
Default: None
Note:
Configuration parameters are described in more detail at
@ -596,7 +596,7 @@ class KafkaConsumer(six.Iterator):
one greater than the newest available message.
Highwater offsets are returned in FetchResponse messages, so will
not be available if not FetchRequests have been sent for this partition
not be available if no FetchRequests have been sent for this partition
yet.
Arguments:

View File

@ -104,7 +104,7 @@ def _send_upstream(queue, client, codec, batch_time, batch_size,
msgset = defaultdict(list)
# Merging messages will require a bit more work to manage correctly
# for now, dont look for new batches if we have old ones to retry
# for now, don't look for new batches if we have old ones to retry
if request_tries:
count = 0
log.debug('Skipping new batch collection to handle retries')

View File

@ -70,7 +70,7 @@ class TestFailover(KafkaIntegrationTestCase):
# kill leader for partition
self._kill_leader(topic, partition)
# expect failure, but dont wait more than 60 secs to recover
# expect failure, but don't wait more than 60 secs to recover
recovered = False
started = time.time()
timeout = 60

View File

@ -65,6 +65,6 @@ def test_murmur2_java_compatibility():
def test_murmur2_not_ascii():
# Verify no regression of murmur2() bug encoding py2 bytes that dont ascii encode
# Verify no regression of murmur2() bug encoding py2 bytes that don't ascii encode
murmur2(b'\xa4')
murmur2(b'\x81' * 1000)

View File

@ -31,7 +31,7 @@ def test_end_to_end(kafka_broker, compression):
# LZ4 requires 0.8.2
if version() < (0, 8, 2):
return
# LZ4 python libs dont work on python2.6
# LZ4 python libs don't work on python2.6
elif sys.version_info < (2, 7):
return