summaryrefslogtreecommitdiff
path: root/kafka
diff options
context:
space:
mode:
Diffstat (limited to 'kafka')
-rw-r--r--kafka/client.py2
-rw-r--r--kafka/client_async.py4
-rw-r--r--kafka/conn.py4
-rw-r--r--kafka/consumer/group.py6
-rw-r--r--kafka/producer/base.py2
5 files changed, 9 insertions, 9 deletions
diff --git a/kafka/client.py b/kafka/client.py
index 247905a..3de563c 100644
--- a/kafka/client.py
+++ b/kafka/client.py
@@ -576,7 +576,7 @@ class SimpleClient(object):
if leader in self.brokers:
self.topics_to_brokers[topic_part] = self.brokers[leader]
- # If Unknown Broker, fake BrokerMetadata so we dont lose the id
+ # If Unknown Broker, fake BrokerMetadata so we don't lose the id
# (not sure how this could happen. server could be in bad state)
else:
self.topics_to_brokers[topic_part] = BrokerMetadata(
diff --git a/kafka/client_async.py b/kafka/client_async.py
index 0849c7b..03a2f00 100644
--- a/kafka/client_async.py
+++ b/kafka/client_async.py
@@ -362,7 +362,7 @@ class KafkaClient(object):
return
def is_disconnected(self, node_id):
- """Check whether the node connection has been disconnected failed.
+ """Check whether the node connection has been disconnected or failed.
A disconnected node has either been closed or has failed. Connection
failures are usually transient and can be resumed in the next ready()
@@ -497,7 +497,7 @@ class KafkaClient(object):
else:
task_future.success(result)
- # If we got a future that is already done, dont block in _poll
+ # If we got a future that is already done, don't block in _poll
if future and future.is_done:
timeout = 0
else:
diff --git a/kafka/conn.py b/kafka/conn.py
index 6af0d8f..9f5b3f0 100644
--- a/kafka/conn.py
+++ b/kafka/conn.py
@@ -140,7 +140,7 @@ class BrokerConnection(object):
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version is None
- state_chance_callback (callable): function to be called when the
+ state_change_callback (callable): function to be called when the
connection state changes from CONNECTING to CONNECTED etc.
metrics (kafka.metrics.Metrics): Optionally provide a metrics
instance for capturing network IO stats. Default: None.
@@ -291,7 +291,7 @@ class BrokerConnection(object):
' Disconnecting.', self, ret)
self.close()
- # Connection timedout
+ # Connection timed out
elif time.time() > request_timeout + self.last_attempt:
log.error('Connection attempt to %s timed out', self)
self.close() # error=TimeoutError ?
diff --git a/kafka/consumer/group.py b/kafka/consumer/group.py
index 3ab68a7..5550d54 100644
--- a/kafka/consumer/group.py
+++ b/kafka/consumer/group.py
@@ -84,7 +84,7 @@ class KafkaConsumer(six.Iterator):
auto_offset_reset (str): A policy for resetting offsets on
OffsetOutOfRange errors: 'earliest' will move to the oldest
available message, 'latest' will move to the most recent. Any
- ofther value will raise the exception. Default: 'latest'.
+ other value will raise the exception. Default: 'latest'.
enable_auto_commit (bool): If true the consumer's offset will be
periodically committed in the background. Default: True.
auto_commit_interval_ms (int): milliseconds between automatic
@@ -194,7 +194,7 @@ class KafkaConsumer(six.Iterator):
sasl_plain_username (str): username for sasl PLAIN authentication.
Default: None
sasl_plain_password (str): password for sasl PLAIN authentication.
- Defualt: None
+ Default: None
Note:
Configuration parameters are described in more detail at
@@ -596,7 +596,7 @@ class KafkaConsumer(six.Iterator):
one greater than the newest available message.
Highwater offsets are returned in FetchResponse messages, so will
- not be available if not FetchRequests have been sent for this partition
+ not be available if no FetchRequests have been sent for this partition
yet.
Arguments:
diff --git a/kafka/producer/base.py b/kafka/producer/base.py
index 8471818..30b6fd7 100644
--- a/kafka/producer/base.py
+++ b/kafka/producer/base.py
@@ -104,7 +104,7 @@ def _send_upstream(queue, client, codec, batch_time, batch_size,
msgset = defaultdict(list)
# Merging messages will require a bit more work to manage correctly
- # for now, dont look for new batches if we have old ones to retry
+ # for now, don't look for new batches if we have old ones to retry
if request_tries:
count = 0
log.debug('Skipping new batch collection to handle retries')