summaryrefslogtreecommitdiff
path: root/test/test_consumer_integration.py
diff options
context:
space:
mode:
Diffstat (limited to 'test/test_consumer_integration.py')
-rw-r--r--test/test_consumer_integration.py134
1 files changed, 131 insertions, 3 deletions
diff --git a/test/test_consumer_integration.py b/test/test_consumer_integration.py
index 193a570..4b5e78a 100644
--- a/test/test_consumer_integration.py
+++ b/test/test_consumer_integration.py
@@ -1,16 +1,23 @@
import logging
import os
+import time
from six.moves import xrange
import six
from . import unittest
from kafka import (
- KafkaConsumer, MultiProcessConsumer, SimpleConsumer, create_message, create_gzip_message
+ KafkaConsumer, MultiProcessConsumer, SimpleConsumer, create_message,
+ create_gzip_message, KafkaProducer
)
from kafka.consumer.base import MAX_FETCH_BUFFER_SIZE_BYTES
-from kafka.errors import ConsumerFetchSizeTooSmall, OffsetOutOfRangeError
-from kafka.structs import ProduceRequestPayload, TopicPartition
+from kafka.errors import (
+ ConsumerFetchSizeTooSmall, OffsetOutOfRangeError, UnsupportedVersionError,
+ KafkaTimeoutError
+)
+from kafka.structs import (
+ ProduceRequestPayload, TopicPartition, OffsetAndTimestamp
+)
from test.fixtures import ZookeeperFixture, KafkaFixture
from test.testutil import (
@@ -88,6 +95,12 @@ class TestConsumerIntegration(KafkaIntegrationTestCase):
**configs)
return consumer
+ def kafka_producer(self, **configs):
+ brokers = '%s:%d' % (self.server.host, self.server.port)
+ producer = KafkaProducer(
+ bootstrap_servers=brokers, **configs)
+ return producer
+
def test_simple_consumer(self):
self.send_messages(0, range(0, 100))
self.send_messages(1, range(100, 200))
@@ -624,3 +637,118 @@ class TestConsumerIntegration(KafkaIntegrationTestCase):
fetched_msgs = [next(consumer) for i in range(10)]
self.assertEqual(len(fetched_msgs), 10)
+
+ @kafka_versions('>=0.10.1')
+ def test_kafka_consumer_offsets_for_time(self):
+ late_time = int(time.time()) * 1000
+ middle_time = late_time - 1000
+ early_time = late_time - 2000
+ tp = TopicPartition(self.topic, 0)
+
+ kafka_producer = self.kafka_producer()
+ early_msg = kafka_producer.send(
+ self.topic, partition=0, value=b"first",
+ timestamp_ms=early_time).get()
+ late_msg = kafka_producer.send(
+ self.topic, partition=0, value=b"last",
+ timestamp_ms=late_time).get()
+
+ consumer = self.kafka_consumer()
+ offsets = consumer.offsets_for_times({tp: early_time})
+ self.assertEqual(len(offsets), 1)
+ self.assertEqual(offsets[tp].offset, early_msg.offset)
+ self.assertEqual(offsets[tp].timestamp, early_time)
+
+ offsets = consumer.offsets_for_times({tp: middle_time})
+ self.assertEqual(offsets[tp].offset, late_msg.offset)
+ self.assertEqual(offsets[tp].timestamp, late_time)
+
+ offsets = consumer.offsets_for_times({tp: late_time})
+ self.assertEqual(offsets[tp].offset, late_msg.offset)
+ self.assertEqual(offsets[tp].timestamp, late_time)
+
+ offsets = consumer.offsets_for_times({})
+ self.assertEqual(offsets, {})
+
+ # Out of bound timestamps check
+
+ offsets = consumer.offsets_for_times({tp: 0})
+ self.assertEqual(offsets[tp].offset, early_msg.offset)
+ self.assertEqual(offsets[tp].timestamp, early_time)
+
+ offsets = consumer.offsets_for_times({tp: 9999999999999})
+ self.assertEqual(offsets[tp], None)
+
+ # Beginning/End offsets
+
+ offsets = consumer.beginning_offsets([tp])
+ self.assertEqual(offsets, {
+ tp: early_msg.offset,
+ })
+ offsets = consumer.end_offsets([tp])
+ self.assertEqual(offsets, {
+ tp: late_msg.offset + 1
+ })
+
+ @kafka_versions('>=0.10.1')
+ def test_kafka_consumer_offsets_search_many_partitions(self):
+ tp0 = TopicPartition(self.topic, 0)
+ tp1 = TopicPartition(self.topic, 1)
+
+ kafka_producer = self.kafka_producer()
+ send_time = int(time.time() * 1000)
+ p0msg = kafka_producer.send(
+ self.topic, partition=0, value=b"XXX",
+ timestamp_ms=send_time).get()
+ p1msg = kafka_producer.send(
+ self.topic, partition=1, value=b"XXX",
+ timestamp_ms=send_time).get()
+
+ consumer = self.kafka_consumer()
+ offsets = consumer.offsets_for_times({
+ tp0: send_time,
+ tp1: send_time
+ })
+
+ self.assertEqual(offsets, {
+ tp0: OffsetAndTimestamp(p0msg.offset, send_time),
+ tp1: OffsetAndTimestamp(p1msg.offset, send_time)
+ })
+
+ offsets = consumer.beginning_offsets([tp0, tp1])
+ self.assertEqual(offsets, {
+ tp0: p0msg.offset,
+ tp1: p1msg.offset
+ })
+
+ offsets = consumer.end_offsets([tp0, tp1])
+ self.assertEqual(offsets, {
+ tp0: p0msg.offset + 1,
+ tp1: p1msg.offset + 1
+ })
+
+ @kafka_versions('<0.10.1')
+ def test_kafka_consumer_offsets_for_time_old(self):
+ consumer = self.kafka_consumer()
+ tp = TopicPartition(self.topic, 0)
+
+ with self.assertRaises(UnsupportedVersionError):
+ consumer.offsets_for_times({tp: int(time.time())})
+
+ with self.assertRaises(UnsupportedVersionError):
+ consumer.beginning_offsets([tp])
+
+ with self.assertRaises(UnsupportedVersionError):
+ consumer.end_offsets([tp])
+
+ @kafka_versions('>=0.10.1')
+ def test_kafka_consumer_offsets_for_times_errors(self):
+ consumer = self.kafka_consumer()
+ tp = TopicPartition(self.topic, 0)
+ bad_tp = TopicPartition(self.topic, 100)
+
+ with self.assertRaises(ValueError):
+ consumer.offsets_for_times({tp: -1})
+
+ with self.assertRaises(KafkaTimeoutError):
+ consumer.offsets_for_times({bad_tp: 0})