summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZack Dever <zack.dever@rd.io>2015-12-07 13:37:30 -0800
committerZack Dever <zack.dever@rd.io>2015-12-07 13:37:30 -0800
commit753d8dca136178a4c2ecb0cda8d4ec371805455f (patch)
tree83225fb95731551cbb9c5a5aeb6fb08a3ec9f0ad
parentefc3d4f466c0d6630c9fff09fb1b90035c5351d7 (diff)
parenta678260d3622a0decd2d123ac0cfc445084eed60 (diff)
downloadkafka-python-753d8dca136178a4c2ecb0cda8d4ec371805455f.tar.gz
Merge branch 'master' into 0.9
-rw-r--r--.travis.yml20
-rw-r--r--CHANGES.md38
-rw-r--r--MANIFEST.in3
-rw-r--r--README.rst30
-rwxr-xr-xbuild_integration.sh2
-rw-r--r--docs/conf.py8
-rw-r--r--docs/index.rst24
-rw-r--r--kafka/client.py10
-rw-r--r--kafka/consumer/kafka.py4
-rw-r--r--kafka/producer/base.py35
-rw-r--r--kafka/version.py2
-rw-r--r--servers/0.8.2.2/resources/kafka.properties124
-rw-r--r--servers/0.8.2.2/resources/log4j.properties24
-rw-r--r--servers/0.8.2.2/resources/zookeeper.properties21
-rw-r--r--servers/0.9.0.0/resources/kafka.properties124
-rw-r--r--servers/0.9.0.0/resources/log4j.properties24
-rw-r--r--servers/0.9.0.0/resources/zookeeper.properties21
-rw-r--r--setup.py1
-rw-r--r--test/test_producer.py12
-rw-r--r--test/test_producer_integration.py6
-rw-r--r--tox.ini12
-rwxr-xr-xtravis_selector.sh2
22 files changed, 485 insertions, 62 deletions
diff --git a/.travis.yml b/.travis.yml
index 136c19f..1f0baa6 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,6 +5,7 @@ python:
- 2.7
- 3.3
- 3.4
+ - 3.5
- pypy
env:
@@ -12,18 +13,27 @@ env:
- KAFKA_VERSION=0.8.0
- KAFKA_VERSION=0.8.1
- KAFKA_VERSION=0.8.1.1
- - KAFKA_VERSION=0.8.2.1
+ - KAFKA_VERSION=0.8.2.2
+ - KAFKA_VERSION=0.9.0.0
+
+sudo: false
+
+addons:
+ apt:
+ packages:
+ - libsnappy-dev
+
+cache:
+ directories:
+ - $HOME/.cache/pip
+ - servers/
before_install:
- - sudo apt-get install libsnappy-dev
- ./build_integration.sh
install:
- pip install tox coveralls
- pip install .
- # Deal with issue on Travis builders re: multiprocessing.Queue :(
- # See https://github.com/travis-ci/travis-cookbooks/issues/155
- - sudo rm -rf /dev/shm && sudo ln -s /run/shm /dev/shm
deploy:
provider: pypi
diff --git a/CHANGES.md b/CHANGES.md
index c94cbd5..b3c9094 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,41 @@
+# 0.9.5 (Dec 6, 2015)
+
+Consumers
+* Initial support for consumer coordinator: offsets only (toddpalino PR 420)
+* Allow blocking until some messages are received in SimpleConsumer (saaros PR 457)
+* Support subclass config changes in KafkaConsumer (zackdever PR 446)
+* Support retry semantics in MultiProcessConsumer (barricadeio PR 456)
+* Support partition_info in MultiProcessConsumer (scrapinghub PR 418)
+* Enable seek() to an absolute offset in SimpleConsumer (haosdent PR 412)
+* Add KafkaConsumer.close() (ucarion PR 426)
+
+Producers
+* Catch client.reinit() exceptions in async producer (dpkp)
+* Producer.stop() now blocks until async thread completes (dpkp PR 485)
+* Catch errors during load_metadata_for_topics in async producer (bschopman PR 467)
+* Add compression-level support for codecs that support it (trbs PR 454)
+* Fix translation of Java murmur2 code, fix byte encoding for Python 3 (chrischamberlin PR 439)
+* Only call stop() on not-stopped producer objects (docker-hub PR 435)
+* Allow null payload for deletion feature (scrapinghub PR 409)
+
+Clients
+* Use non-blocking io for broker aware requests (ecanzonieri PR 473)
+* Use debug logging level for metadata request (ecanzonieri PR 415)
+* Catch KafkaUnavailableError in _send_broker_aware_request (mutability PR 436)
+* Lower logging level on replica not available and commit (ecanzonieri PR 415)
+
+Documentation
+* Update docs and links wrt maintainer change (mumrah -> dpkp)
+
+Internals
+* Add py35 to tox testing
+* Update travis config to use container infrastructure
+* Add 0.8.2.2 and 0.9.0.0 resources for integration tests; update default official releases
+* new pylint disables for pylint 1.5.1 (zackdever PR 481)
+* Fix python3 / python2 comments re queue/Queue (dpkp)
+* Add Murmur2Partitioner to kafka __all__ imports (dpkp Issue 471)
+* Include LICENSE in PyPI sdist (koobs PR 441)
+
# 0.9.4 (June 11, 2015)
Consumers
diff --git a/MANIFEST.in b/MANIFEST.in
index 1731afa..01e6a4d 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,2 +1,5 @@
recursive-include kafka *.py
+include README.rst
include LICENSE
+include AUTHORS.md
+include CHANGES.md
diff --git a/README.rst b/README.rst
index c05a240..7eaf68d 100644
--- a/README.rst
+++ b/README.rst
@@ -12,42 +12,48 @@ Kafka Python client
:target: http://kafka-python.readthedocs.org/en/latest/
:alt: Full documentation available on ReadTheDocs
-`Full documentation available on ReadTheDocs`_
-
This module provides low-level protocol support for Apache Kafka as well as
high-level consumer and producer classes. Request batching is supported by the
protocol as well as broker-aware request routing. Gzip and Snappy compression
is also supported for message sets.
-http://kafka.apache.org/
+Coordinated Consumer Group support is under development - see Issue #38.
+
+Full documentation available on `Read the Docs <https://kafka-python.readthedocs.org/en/latest/>`_
On Freenode IRC at #kafka-python, as well as #apache-kafka
For general discussion of kafka-client design and implementation (not python specific),
see https://groups.google.com/forum/#!forum/kafka-clients
+For information about Apache Kafka generally, see https://kafka.apache.org/
+
License
----------
-Copyright 2015, David Arthur under Apache License, v2.0. See `LICENSE`
+Apache License, v2.0. See `LICENSE <https://github.com/dpkp/kafka-python/blob/master/LICENSE>`_
+Copyright 2015, David Arthur, Dana Powers, and Contributors
+(See `AUTHORS <https://github.com/dpkp/kafka-python/blob/master/AUTHORS.md>`_)
Status
----------
-The current stable version of this package is `0.9.4`_ and is compatible with:
+The current stable version of this package is
+`0.9.5 <https://github.com/dpkp/kafka-python/releases/tag/v0.9.5>`_
+and is compatible with:
Kafka broker versions
-- 0.8.2.1 [offset management currently ZK only -- does not support ConsumerCoordinator offset management APIs]
+- 0.9.0.0
+- 0.8.2.2
+- 0.8.2.1
- 0.8.1.1
- 0.8.1
- 0.8.0
Python versions
-- 2.6 (tested on 2.6.9)
-- 2.7 (tested on 2.7.9)
-- 3.3 (tested on 3.3.5)
+- 3.5 (tested on 3.5.0)
- 3.4 (tested on 3.4.2)
+- 3.3 (tested on 3.3.5)
+- 2.7 (tested on 2.7.9)
+- 2.6 (tested on 2.6.9)
- pypy (tested on pypy 2.5.0 / python 2.7.8)
-
-.. _Full documentation available on ReadTheDocs: http://kafka-python.readthedocs.org/en/latest/
-.. _0.9.4: https://github.com/dpkp/kafka-python/releases/tag/v0.9.4
diff --git a/build_integration.sh b/build_integration.sh
index 5395bb8..aa9c399 100755
--- a/build_integration.sh
+++ b/build_integration.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# Versions available for testing via binary distributions
-OFFICIAL_RELEASES="0.8.0 0.8.1 0.8.1.1 0.8.2.1"
+OFFICIAL_RELEASES="0.8.1.1 0.8.2.2 0.9.0.0"
# Useful configuration vars, with sensible defaults
if [ -z "$SCALA_VERSION" ]; then
diff --git a/docs/conf.py b/docs/conf.py
index 2979560..dc68fd4 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -49,7 +49,7 @@ master_doc = 'index'
# General information about the project.
project = u'kafka-python'
-copyright = u'2015, David Arthur'
+copyright = u'2015 - David Arthur, Dana Powers, and Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -203,7 +203,7 @@ latex_elements = {
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'kafka-python.tex', u'kafka-python Documentation',
- u'David Arthur', 'manual'),
+ u'Dana Powers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
@@ -233,7 +233,7 @@ latex_documents = [
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kafka-python', u'kafka-python Documentation',
- [u'David Arthur'], 1)
+ [u'Dana Powers'], 1)
]
# If true, show URL addresses after external links.
@@ -247,7 +247,7 @@ man_pages = [
# dir menu entry, description, category)
texinfo_documents = [
('index', 'kafka-python', u'kafka-python Documentation',
- u'David Arthur', 'kafka-python', 'One line description of project.',
+ u'Dana Powers', 'kafka-python', 'One line description of project.',
'Miscellaneous'),
]
diff --git a/docs/index.rst b/docs/index.rst
index b7e7ccd..fa77a8e 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -6,37 +6,45 @@ high-level consumer and producer classes. Request batching is supported by the
protocol as well as broker-aware request routing. Gzip and Snappy compression
is also supported for message sets.
-http://kafka.apache.org/
+Coordinated Consumer Group support is under development - see Issue #38.
On Freenode IRC at #kafka-python, as well as #apache-kafka
For general discussion of kafka-client design and implementation (not python specific),
see https://groups.google.com/forum/m/#!forum/kafka-clients
+For information about Apache Kafka generally, see https://kafka.apache.org/
+
Status
------
-The current stable version of this package is `0.9.4 <https://github.com/dpkp/kafka-python/releases/tag/v0.9.4>`_ and is compatible with:
+The current stable version of this package is `0.9.5 <https://github.com/dpkp/kafka-python/releases/tag/v0.9.5>`_ and is compatible with:
Kafka broker versions
-* 0.8.2.1 [offset management currently ZK only -- does not support ConsumerCoordinator offset management APIs]
+* 0.9.0.0
+* 0.8.2.2
+* 0.8.2.1
* 0.8.1.1
* 0.8.1
* 0.8.0
Python versions
-* 2.6 (tested on 2.6.9)
-* 2.7 (tested on 2.7.9)
-* 3.3 (tested on 3.3.5)
+* 3.5 (tested on 3.5.0)
* 3.4 (tested on 3.4.2)
+* 3.3 (tested on 3.3.5)
+* 2.7 (tested on 2.7.9)
+* 2.6 (tested on 2.6.9)
* pypy (tested on pypy 2.5.0 / python 2.7.8)
License
-------
-Copyright 2015, David Arthur under Apache License, v2.0. See `LICENSE <https://github.com/dpkp/kafka-python/blob/master/LICENSE>`_.
+Apache License, v2.0. See `LICENSE <https://github.com/dpkp/kafka-python/blob/master/LICENSE>`_.
+
+Copyright 2015, David Arthur, Dana Powers, and Contributors
+(See `AUTHORS <https://github.com/dpkp/kafka-python/blob/master/AUTHORS.md>`_).
Contents
@@ -45,9 +53,9 @@ Contents
.. toctree::
:maxdepth: 2
+ usage
install
tests
- usage
API reference </apidoc/modules>
Indices and tables
diff --git a/kafka/client.py b/kafka/client.py
index 810fa46..9018bb4 100644
--- a/kafka/client.py
+++ b/kafka/client.py
@@ -236,13 +236,13 @@ class KafkaClient(object):
responses[topic_partition] = None
continue
else:
- connections_by_socket[conn.get_connected_socket()] = (conn, broker)
+ connections_by_socket[conn.get_connected_socket()] = (conn, broker, requestId)
conn = None
while connections_by_socket:
sockets = connections_by_socket.keys()
rlist, _, _ = select.select(sockets, [], [], None)
- conn, broker = connections_by_socket.pop(rlist[0])
+ conn, broker, requestId = connections_by_socket.pop(rlist[0])
try:
response = conn.recv(requestId)
except ConnectionError as e:
@@ -607,11 +607,7 @@ class KafkaClient(object):
else:
decoder = KafkaProtocol.decode_produce_response
- try:
- resps = self._send_broker_aware_request(payloads, encoder, decoder)
- except Exception:
- if fail_on_error:
- raise
+ resps = self._send_broker_aware_request(payloads, encoder, decoder)
return [resp if not callback else callback(resp) for resp in resps
if resp is not None and
diff --git a/kafka/consumer/kafka.py b/kafka/consumer/kafka.py
index 21b2bf6..3ef106c 100644
--- a/kafka/consumer/kafka.py
+++ b/kafka/consumer/kafka.py
@@ -269,6 +269,10 @@ class KafkaConsumer(object):
# Reset message iterator in case we were in the middle of one
self._reset_message_iterator()
+ def close(self):
+ """Close this consumer's underlying client."""
+ self._client.close()
+
def next(self):
"""Return the next available message
diff --git a/kafka/producer/base.py b/kafka/producer/base.py
index 8774c66..39b1f84 100644
--- a/kafka/producer/base.py
+++ b/kafka/producer/base.py
@@ -78,9 +78,17 @@ def _send_upstream(queue, client, codec, batch_time, batch_size,
retrying messages after stop_event is set, defaults to 30.
"""
request_tries = {}
- client.reinit()
- stop_at = None
+ while not stop_event.is_set():
+ try:
+ client.reinit()
+ except Exception as e:
+ log.warn('Async producer failed to connect to brokers; backoff for %s(ms) before retrying', retry_options.backoff_ms)
+ time.sleep(float(retry_options.backoff_ms) / 1000)
+ else:
+ break
+
+ stop_at = None
while not (stop_event.is_set() and queue.empty() and not request_tries):
# Handle stop_timeout
@@ -407,17 +415,26 @@ class Producer(object):
raise
return resp
- def stop(self, timeout=1):
+ def stop(self, timeout=None):
"""
- Stop the producer. Optionally wait for the specified timeout before
- forcefully cleaning up.
+ Stop the producer (async mode). Blocks until async thread completes.
"""
+ if timeout is not None:
+ log.warning('timeout argument to stop() is deprecated - '
+ 'it will be removed in future release')
+
+ if not self.async:
+ log.warning('producer.stop() called, but producer is not async')
+ return
+
+ if self.stopped:
+ log.warning('producer.stop() called, but producer is already stopped')
+ return
+
if self.async:
self.queue.put((STOP_ASYNC_PRODUCER, None, None))
- self.thread.join(timeout)
-
- if self.thread.is_alive():
- self.thread_stop_event.set()
+ self.thread_stop_event.set()
+ self.thread.join()
if hasattr(self, '_cleanup_func'):
# Remove cleanup handler now that we've stopped
diff --git a/kafka/version.py b/kafka/version.py
index cd64b48..9272695 100644
--- a/kafka/version.py
+++ b/kafka/version.py
@@ -1 +1 @@
-__version__ = '0.9.5-dev'
+__version__ = '0.9.5'
diff --git a/servers/0.8.2.2/resources/kafka.properties b/servers/0.8.2.2/resources/kafka.properties
new file mode 100644
index 0000000..685aed1
--- /dev/null
+++ b/servers/0.8.2.2/resources/kafka.properties
@@ -0,0 +1,124 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id={broker_id}
+
+############################# Socket Server Settings #############################
+
+# The port the socket server listens on
+port={port}
+
+# Hostname the broker will bind to. If not set, the server will bind to all interfaces
+host.name={host}
+
+# Hostname the broker will advertise to producers and consumers. If not set, it uses the
+# value for "host.name" if configured. Otherwise, it will use the value returned from
+# java.net.InetAddress.getCanonicalHostName().
+#advertised.host.name=<hostname routable by clients>
+
+# The port to publish to ZooKeeper for clients to use. If this is not set,
+# it will publish the same port that the broker binds to.
+#advertised.port=<port accessible by clients>
+
+# The number of threads handling network requests
+num.network.threads=2
+
+# The number of threads doing disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=1048576
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=1048576
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs={tmp_dir}/data
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions={partitions}
+default.replication.factor={replicas}
+
+## Short Replica Lag -- Drops failed brokers out of ISR
+replica.lag.time.max.ms=1000
+replica.socket.timeout.ms=1000
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
+# segments don't drop below log.retention.bytes.
+#log.retention.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=536870912
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=60000
+
+# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
+log.cleaner.enable=false
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect={zk_host}:{zk_port}/{zk_chroot}
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=1000000
+# We want to expire kafka broker sessions quickly when brokers die b/c we restart them quickly
+zookeeper.session.timeout.ms=500
diff --git a/servers/0.8.2.2/resources/log4j.properties b/servers/0.8.2.2/resources/log4j.properties
new file mode 100644
index 0000000..f863b3b
--- /dev/null
+++ b/servers/0.8.2.2/resources/log4j.properties
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+log4j.rootLogger=INFO, stdout
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.logger.kafka=DEBUG, stdout
+log4j.logger.org.I0Itec.zkclient.ZkClient=INFO, stdout
+log4j.logger.org.apache.zookeeper=INFO, stdout
diff --git a/servers/0.8.2.2/resources/zookeeper.properties b/servers/0.8.2.2/resources/zookeeper.properties
new file mode 100644
index 0000000..e3fd097
--- /dev/null
+++ b/servers/0.8.2.2/resources/zookeeper.properties
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# the directory where the snapshot is stored.
+dataDir={tmp_dir}
+# the port at which the clients will connect
+clientPort={port}
+clientPortAddress={host}
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=0
diff --git a/servers/0.9.0.0/resources/kafka.properties b/servers/0.9.0.0/resources/kafka.properties
new file mode 100644
index 0000000..685aed1
--- /dev/null
+++ b/servers/0.9.0.0/resources/kafka.properties
@@ -0,0 +1,124 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id={broker_id}
+
+############################# Socket Server Settings #############################
+
+# The port the socket server listens on
+port={port}
+
+# Hostname the broker will bind to. If not set, the server will bind to all interfaces
+host.name={host}
+
+# Hostname the broker will advertise to producers and consumers. If not set, it uses the
+# value for "host.name" if configured. Otherwise, it will use the value returned from
+# java.net.InetAddress.getCanonicalHostName().
+#advertised.host.name=<hostname routable by clients>
+
+# The port to publish to ZooKeeper for clients to use. If this is not set,
+# it will publish the same port that the broker binds to.
+#advertised.port=<port accessible by clients>
+
+# The number of threads handling network requests
+num.network.threads=2
+
+# The number of threads doing disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=1048576
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=1048576
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs={tmp_dir}/data
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions={partitions}
+default.replication.factor={replicas}
+
+## Short Replica Lag -- Drops failed brokers out of ISR
+replica.lag.time.max.ms=1000
+replica.socket.timeout.ms=1000
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
+# segments don't drop below log.retention.bytes.
+#log.retention.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=536870912
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=60000
+
+# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
+log.cleaner.enable=false
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect={zk_host}:{zk_port}/{zk_chroot}
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=1000000
+# We want to expire kafka broker sessions quickly when brokers die b/c we restart them quickly
+zookeeper.session.timeout.ms=500
diff --git a/servers/0.9.0.0/resources/log4j.properties b/servers/0.9.0.0/resources/log4j.properties
new file mode 100644
index 0000000..f863b3b
--- /dev/null
+++ b/servers/0.9.0.0/resources/log4j.properties
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+log4j.rootLogger=INFO, stdout
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.logger.kafka=DEBUG, stdout
+log4j.logger.org.I0Itec.zkclient.ZkClient=INFO, stdout
+log4j.logger.org.apache.zookeeper=INFO, stdout
diff --git a/servers/0.9.0.0/resources/zookeeper.properties b/servers/0.9.0.0/resources/zookeeper.properties
new file mode 100644
index 0000000..e3fd097
--- /dev/null
+++ b/servers/0.9.0.0/resources/zookeeper.properties
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# the directory where the snapshot is stored.
+dataDir={tmp_dir}
+# the port at which the clients will connect
+clientPort={port}
+clientPortAddress={host}
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=0
diff --git a/setup.py b/setup.py
index d50d28d..5fc2417 100644
--- a/setup.py
+++ b/setup.py
@@ -64,6 +64,7 @@ setup(
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
+ "Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
diff --git a/test/test_producer.py b/test/test_producer.py
index 3c026e8..31282bf 100644
--- a/test/test_producer.py
+++ b/test/test_producer.py
@@ -111,19 +111,19 @@ class TestKafkaProducer(unittest.TestCase):
with self.assertRaises(FailedPayloadsError):
producer.send_messages('foobar', b'test message')
- def test_cleanup_stop_is_called_on_not_stopped_object(self):
+ def test_cleanup_is_not_called_on_stopped_producer(self):
producer = Producer(MagicMock(), async=True)
producer.stopped = True
- with patch('kafka.producer.base.Producer.stop') as base_stop:
+ with patch.object(producer, 'stop') as mocked_stop:
producer._cleanup_func(producer)
- self.assertEqual(base_stop.call_count, 0)
+ self.assertEqual(mocked_stop.call_count, 0)
- def test_cleanup_stop_is_not_called_on_stopped_object(self):
+ def test_cleanup_is_called_on_running_producer(self):
producer = Producer(MagicMock(), async=True)
producer.stopped = False
- with patch('kafka.producer.base.Producer.stop') as base_stop:
+ with patch.object(producer, 'stop') as mocked_stop:
producer._cleanup_func(producer)
- self.assertEqual(base_stop.call_count, 1)
+ self.assertEqual(mocked_stop.call_count, 1)
class TestKafkaProducerSendUpstream(unittest.TestCase):
diff --git a/test/test_producer_integration.py b/test/test_producer_integration.py
index 46b6851..c99ed63 100644
--- a/test/test_producer_integration.py
+++ b/test/test_producer_integration.py
@@ -204,13 +204,11 @@ class TestKafkaProducerIntegration(KafkaIntegrationTestCase):
resp = producer.send_messages(self.topic, self.msg("one"))
self.assertEqual(len(resp), 0)
- # wait for the server to report a new highwatermark
- while self.current_offset(self.topic, partition) == start_offset:
- time.sleep(0.1)
+ # flush messages
+ producer.stop()
self.assert_fetch_offset(partition, start_offset, [ self.msg("one") ])
- producer.stop()
@kafka_versions("all")
def test_batched_simple_producer__triggers_by_message(self):
diff --git a/tox.ini b/tox.ini
index a69dc99..1ee1e16 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,10 +1,8 @@
[tox]
-envlist = lint, py26, py27, pypy, py33, py34, docs
+envlist = lint, py26, py27, pypy, py33, py34, py35, docs
[testenv]
deps =
- six
- unittest2
nose
nose-timer
coverage
@@ -17,16 +15,20 @@ setenv =
PROJECT_ROOT = {toxinidir}
passenv = KAFKA_VERSION
-[testenv:py33]
+[testenv:py26]
deps =
+ six
+ unittest2
nose
nose-timer
coverage
mock
python-snappy
-[testenv:py34]
+[testenv:py27]
deps =
+ six
+ unittest2
nose
nose-timer
coverage
diff --git a/travis_selector.sh b/travis_selector.sh
index 7a2f45f..5700cbc 100755
--- a/travis_selector.sh
+++ b/travis_selector.sh
@@ -3,6 +3,8 @@
if [ $1 == "pypy" ]; then
echo "pypy"
+elif [ $1 == "3.5" ]; then
+ echo "py35"
elif [ $1 == "3.4" ]; then
echo "py34"
elif [ $1 == "3.3" ]; then