diff options
author | Jeff Widman <jeff@jeffwidman.com> | 2016-11-08 01:29:45 -0800 |
---|---|---|
committer | Jeff Widman <jeff@jeffwidman.com> | 2016-11-14 03:16:41 -0800 |
commit | cd74c1fe4053385151175b5100cb0a0d43bc31f6 (patch) | |
tree | bc430da90685195db6532311f37781ac734479e5 | |
parent | 77591afa789a4752f4d385228bea980f448f6a08 (diff) | |
download | kafka-python-cd74c1fe4053385151175b5100cb0a0d43bc31f6.tar.gz |
Fix typos
-rw-r--r-- | CHANGES.md | 4 | ||||
-rwxr-xr-x | build_integration.sh | 2 | ||||
-rw-r--r-- | docs/changelog.rst | 4 | ||||
-rw-r--r-- | docs/usage.rst | 2 | ||||
-rw-r--r-- | kafka/client.py | 2 | ||||
-rw-r--r-- | kafka/client_async.py | 4 | ||||
-rw-r--r-- | kafka/conn.py | 4 | ||||
-rw-r--r-- | kafka/consumer/group.py | 6 | ||||
-rw-r--r-- | kafka/producer/base.py | 2 | ||||
-rw-r--r-- | test/test_failover_integration.py | 2 | ||||
-rw-r--r-- | test/test_partitioner.py | 2 | ||||
-rw-r--r-- | test/test_producer.py | 2 |
12 files changed, 18 insertions, 18 deletions
@@ -123,7 +123,7 @@ Consumers Producers * KAFKA-3388: Fix expiration of batches sitting in the accumulator (dpkp PR 699) * KAFKA-3197: when max.in.flight.request.per.connection = 1, attempt to guarantee ordering (dpkp PR 698) -* Dont use soon-to-be-reserved keyword await as function name (FutureProduceResult) (dpkp PR 697) +* Don't use soon-to-be-reserved keyword await as function name (FutureProduceResult) (dpkp PR 697) Clients * Fix socket leaks in KafkaClient (dpkp PR 696) @@ -241,7 +241,7 @@ Documentation * Migrate load_example.py to KafkaProducer / KafkaConsumer Internals -* Dont override system rcvbuf or sndbuf unless configured explicitly (dpkp PR 557) +* Don't override system rcvbuf or sndbuf unless configured explicitly (dpkp PR 557) * Some attributes may not exist in __del__ if we failed assertions * Break up some circular references and close client wake pipes on __del__ (aisch PR 554) diff --git a/build_integration.sh b/build_integration.sh index fabf656..04299a8 100755 --- a/build_integration.sh +++ b/build_integration.sh @@ -9,7 +9,7 @@ if [ -z "$SCALA_VERSION" ]; then fi # On travis CI, empty KAFKA_VERSION means skip integration tests -# so we dont try to get binaries +# so we don't try to get binaries # Otherwise it means test all official releases, so we get all of them! if [ -z "$KAFKA_VERSION" -a -z "$TRAVIS" ]; then KAFKA_VERSION=$OFFICIAL_RELEASES diff --git a/docs/changelog.rst b/docs/changelog.rst index c56a432..ab2a092 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -148,7 +148,7 @@ Producers --------- * KAFKA-3388: Fix expiration of batches sitting in the accumulator (dpkp PR 699) * KAFKA-3197: when max.in.flight.request.per.connection = 1, attempt to guarantee ordering (dpkp PR 698) -* Dont use soon-to-be-reserved keyword await as function name (FutureProduceResult) (dpkp PR 697) +* Don't use soon-to-be-reserved keyword await as function name (FutureProduceResult) (dpkp PR 697) Clients ------- @@ -292,7 +292,7 @@ Documentation Internals --------- -* Dont override system rcvbuf or sndbuf unless configured explicitly (dpkp PR 557) +* Don't override system rcvbuf or sndbuf unless configured explicitly (dpkp PR 557) * Some attributes may not exist in __del__ if we failed assertions * Break up some circular references and close client wake pipes on __del__ (aisch PR 554) diff --git a/docs/usage.rst b/docs/usage.rst index 0ee9894..22fe20d 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -20,7 +20,7 @@ KafkaConsumer message.offset, message.key, message.value)) - # consume earliest available messages, dont commit offsets + # consume earliest available messages, don't commit offsets KafkaConsumer(auto_offset_reset='earliest', enable_auto_commit=False) # consume json messages diff --git a/kafka/client.py b/kafka/client.py index 247905a..3de563c 100644 --- a/kafka/client.py +++ b/kafka/client.py @@ -576,7 +576,7 @@ class SimpleClient(object): if leader in self.brokers: self.topics_to_brokers[topic_part] = self.brokers[leader] - # If Unknown Broker, fake BrokerMetadata so we dont lose the id + # If Unknown Broker, fake BrokerMetadata so we don't lose the id # (not sure how this could happen. server could be in bad state) else: self.topics_to_brokers[topic_part] = BrokerMetadata( diff --git a/kafka/client_async.py b/kafka/client_async.py index 0849c7b..03a2f00 100644 --- a/kafka/client_async.py +++ b/kafka/client_async.py @@ -362,7 +362,7 @@ class KafkaClient(object): return def is_disconnected(self, node_id): - """Check whether the node connection has been disconnected failed. + """Check whether the node connection has been disconnected or failed. A disconnected node has either been closed or has failed. Connection failures are usually transient and can be resumed in the next ready() @@ -497,7 +497,7 @@ class KafkaClient(object): else: task_future.success(result) - # If we got a future that is already done, dont block in _poll + # If we got a future that is already done, don't block in _poll if future and future.is_done: timeout = 0 else: diff --git a/kafka/conn.py b/kafka/conn.py index 6af0d8f..9f5b3f0 100644 --- a/kafka/conn.py +++ b/kafka/conn.py @@ -140,7 +140,7 @@ class BrokerConnection(object): api_version_auto_timeout_ms (int): number of milliseconds to throw a timeout exception from the constructor when checking the broker api version. Only applies if api_version is None - state_chance_callback (callable): function to be called when the + state_change_callback (callable): function to be called when the connection state changes from CONNECTING to CONNECTED etc. metrics (kafka.metrics.Metrics): Optionally provide a metrics instance for capturing network IO stats. Default: None. @@ -291,7 +291,7 @@ class BrokerConnection(object): ' Disconnecting.', self, ret) self.close() - # Connection timedout + # Connection timed out elif time.time() > request_timeout + self.last_attempt: log.error('Connection attempt to %s timed out', self) self.close() # error=TimeoutError ? diff --git a/kafka/consumer/group.py b/kafka/consumer/group.py index 3ab68a7..5550d54 100644 --- a/kafka/consumer/group.py +++ b/kafka/consumer/group.py @@ -84,7 +84,7 @@ class KafkaConsumer(six.Iterator): auto_offset_reset (str): A policy for resetting offsets on OffsetOutOfRange errors: 'earliest' will move to the oldest available message, 'latest' will move to the most recent. Any - ofther value will raise the exception. Default: 'latest'. + other value will raise the exception. Default: 'latest'. enable_auto_commit (bool): If true the consumer's offset will be periodically committed in the background. Default: True. auto_commit_interval_ms (int): milliseconds between automatic @@ -194,7 +194,7 @@ class KafkaConsumer(six.Iterator): sasl_plain_username (str): username for sasl PLAIN authentication. Default: None sasl_plain_password (str): password for sasl PLAIN authentication. - Defualt: None + Default: None Note: Configuration parameters are described in more detail at @@ -596,7 +596,7 @@ class KafkaConsumer(six.Iterator): one greater than the newest available message. Highwater offsets are returned in FetchResponse messages, so will - not be available if not FetchRequests have been sent for this partition + not be available if no FetchRequests have been sent for this partition yet. Arguments: diff --git a/kafka/producer/base.py b/kafka/producer/base.py index 8471818..30b6fd7 100644 --- a/kafka/producer/base.py +++ b/kafka/producer/base.py @@ -104,7 +104,7 @@ def _send_upstream(queue, client, codec, batch_time, batch_size, msgset = defaultdict(list) # Merging messages will require a bit more work to manage correctly - # for now, dont look for new batches if we have old ones to retry + # for now, don't look for new batches if we have old ones to retry if request_tries: count = 0 log.debug('Skipping new batch collection to handle retries') diff --git a/test/test_failover_integration.py b/test/test_failover_integration.py index 9c2163c..2439b58 100644 --- a/test/test_failover_integration.py +++ b/test/test_failover_integration.py @@ -70,7 +70,7 @@ class TestFailover(KafkaIntegrationTestCase): # kill leader for partition self._kill_leader(topic, partition) - # expect failure, but dont wait more than 60 secs to recover + # expect failure, but don't wait more than 60 secs to recover recovered = False started = time.time() timeout = 60 diff --git a/test/test_partitioner.py b/test/test_partitioner.py index 2b5fe62..47470e1 100644 --- a/test/test_partitioner.py +++ b/test/test_partitioner.py @@ -65,6 +65,6 @@ def test_murmur2_java_compatibility(): def test_murmur2_not_ascii(): - # Verify no regression of murmur2() bug encoding py2 bytes that dont ascii encode + # Verify no regression of murmur2() bug encoding py2 bytes that don't ascii encode murmur2(b'\xa4') murmur2(b'\x81' * 1000) diff --git a/test/test_producer.py b/test/test_producer.py index 125737b..136d85f 100644 --- a/test/test_producer.py +++ b/test/test_producer.py @@ -31,7 +31,7 @@ def test_end_to_end(kafka_broker, compression): # LZ4 requires 0.8.2 if version() < (0, 8, 2): return - # LZ4 python libs dont work on python2.6 + # LZ4 python libs don't work on python2.6 elif sys.version_info < (2, 7): return |