summaryrefslogtreecommitdiff
path: root/kafka/producer/simple.py
diff options
context:
space:
mode:
Diffstat (limited to 'kafka/producer/simple.py')
-rw-r--r--kafka/producer/simple.py54
1 files changed, 0 insertions, 54 deletions
diff --git a/kafka/producer/simple.py b/kafka/producer/simple.py
deleted file mode 100644
index f334a49..0000000
--- a/kafka/producer/simple.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from __future__ import absolute_import
-
-from itertools import cycle
-import logging
-import random
-
-from kafka.vendor.six.moves import range
-
-from kafka.producer.base import Producer
-
-
-log = logging.getLogger(__name__)
-
-
-class SimpleProducer(Producer):
- """A simple, round-robin producer.
-
- See Producer class for Base Arguments
-
- Additional Arguments:
- random_start (bool, optional): randomize the initial partition which
- the first message block will be published to, otherwise
- if false, the first message block will always publish
- to partition 0 before cycling through each partition,
- defaults to True.
- """
- def __init__(self, *args, **kwargs):
- self.partition_cycles = {}
- self.random_start = kwargs.pop('random_start', True)
- super(SimpleProducer, self).__init__(*args, **kwargs)
-
- def _next_partition(self, topic):
- if topic not in self.partition_cycles:
- if not self.client.has_metadata_for_topic(topic):
- self.client.ensure_topic_exists(topic)
-
- self.partition_cycles[topic] = cycle(self.client.get_partition_ids_for_topic(topic))
-
- # Randomize the initial partition that is returned
- if self.random_start:
- num_partitions = len(self.client.get_partition_ids_for_topic(topic))
- for _ in range(random.randint(0, num_partitions-1)):
- next(self.partition_cycles[topic])
-
- return next(self.partition_cycles[topic])
-
- def send_messages(self, topic, *msg):
- partition = self._next_partition(topic)
- return super(SimpleProducer, self).send_messages(
- topic, partition, *msg
- )
-
- def __repr__(self):
- return '<SimpleProducer batch=%s>' % (self.async_send,)