diff options
Diffstat (limited to 'test')
-rw-r--r-- | test/test_client.py | 20 | ||||
-rw-r--r-- | test/test_failover_integration.py | 4 | ||||
-rw-r--r-- | test/test_producer.py | 10 | ||||
-rw-r--r-- | test/test_protocol.py | 2 | ||||
-rw-r--r-- | test/test_util.py | 2 |
5 files changed, 19 insertions, 19 deletions
diff --git a/test/test_client.py b/test/test_client.py index 00e888c..8c62eb9 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -9,7 +9,7 @@ from kafka import KafkaClient from kafka.common import ( ProduceRequestPayload, BrokerMetadata, - TopicAndPartition, KafkaUnavailableError, + TopicPartition, KafkaUnavailableError, LeaderNotAvailableError, UnknownTopicOrPartitionError, KafkaTimeoutError, ConnectionError ) @@ -145,12 +145,12 @@ class TestKafkaClient(unittest.TestCase): # client loads metadata at init client = KafkaClient(hosts=['broker_1:4567']) self.assertDictEqual({ - TopicAndPartition('topic_1', 0): brokers[1], - TopicAndPartition('topic_noleader', 0): None, - TopicAndPartition('topic_noleader', 1): None, - TopicAndPartition('topic_3', 0): brokers[0], - TopicAndPartition('topic_3', 1): brokers[1], - TopicAndPartition('topic_3', 2): brokers[0]}, + TopicPartition('topic_1', 0): brokers[1], + TopicPartition('topic_noleader', 0): None, + TopicPartition('topic_noleader', 1): None, + TopicPartition('topic_3', 0): brokers[0], + TopicPartition('topic_3', 1): brokers[1], + TopicPartition('topic_3', 2): brokers[0]}, client.topics_to_brokers) # if we ask for metadata explicitly, it should raise errors @@ -260,7 +260,7 @@ class TestKafkaClient(unittest.TestCase): self.assertEqual(brokers[0], leader) self.assertDictEqual({ - TopicAndPartition('topic_one_partition', 0): brokers[0]}, + TopicPartition('topic_one_partition', 0): brokers[0]}, client.topics_to_brokers) @patch('kafka.client.KafkaClient._get_conn') @@ -312,8 +312,8 @@ class TestKafkaClient(unittest.TestCase): client = KafkaClient(hosts=['broker_1:4567']) self.assertDictEqual( { - TopicAndPartition('topic_noleader', 0): None, - TopicAndPartition('topic_noleader', 1): None + TopicPartition('topic_noleader', 0): None, + TopicPartition('topic_noleader', 1): None }, client.topics_to_brokers) diff --git a/test/test_failover_integration.py b/test/test_failover_integration.py index 8c5efe2..04c9e2b 100644 --- a/test/test_failover_integration.py +++ b/test/test_failover_integration.py @@ -3,7 +3,7 @@ import os import time from kafka import KafkaClient, SimpleConsumer, KeyedProducer -from kafka.common import TopicAndPartition, FailedPayloadsError, ConnectionError +from kafka.common import TopicPartition, FailedPayloadsError, ConnectionError from kafka.producer.base import Producer from test.fixtures import ZookeeperFixture, KafkaFixture @@ -202,7 +202,7 @@ class TestFailover(KafkaIntegrationTestCase): break def _kill_leader(self, topic, partition): - leader = self.client.topics_to_brokers[TopicAndPartition(topic, partition)] + leader = self.client.topics_to_brokers[TopicPartition(topic, partition)] broker = self.brokers[leader.nodeId] broker.close() return broker diff --git a/test/test_producer.py b/test/test_producer.py index cbc1773..f62b97a 100644 --- a/test/test_producer.py +++ b/test/test_producer.py @@ -10,7 +10,7 @@ from . import unittest from kafka import KafkaClient, SimpleProducer, KeyedProducer from kafka.common import ( AsyncProducerQueueFull, FailedPayloadsError, NotLeaderForPartitionError, - ProduceResponsePayload, RetryOptions, TopicAndPartition + ProduceResponsePayload, RetryOptions, TopicPartition ) from kafka.producer.base import Producer, _send_upstream from kafka.protocol import CODEC_NONE @@ -156,7 +156,7 @@ class TestKafkaProducerSendUpstream(unittest.TestCase): # lets create a queue and add 10 messages for 1 partition for i in range(10): - self.queue.put((TopicAndPartition("test", 0), "msg %i", "key %i")) + self.queue.put((TopicPartition("test", 0), "msg %i", "key %i")) self._run_process() @@ -172,7 +172,7 @@ class TestKafkaProducerSendUpstream(unittest.TestCase): # lets create a queue and add 10 messages for 10 different partitions # to show how retries should work ideally for i in range(10): - self.queue.put((TopicAndPartition("test", i), "msg %i", "key %i")) + self.queue.put((TopicPartition("test", i), "msg %i", "key %i")) # Mock offsets counter for closure offsets = collections.defaultdict(lambda: collections.defaultdict(lambda: 0)) @@ -206,7 +206,7 @@ class TestKafkaProducerSendUpstream(unittest.TestCase): # lets create a queue and add 10 messages for 10 different partitions # to show how retries should work ideally for i in range(10): - self.queue.put((TopicAndPartition("test", i), "msg %i" % i, "key %i" % i)) + self.queue.put((TopicPartition("test", i), "msg %i" % i, "key %i" % i)) def send_side_effect(reqs, *args, **kwargs): return [FailedPayloadsError(req) for req in reqs] @@ -226,7 +226,7 @@ class TestKafkaProducerSendUpstream(unittest.TestCase): def test_async_producer_not_leader(self): for i in range(10): - self.queue.put((TopicAndPartition("test", i), "msg %i", "key %i")) + self.queue.put((TopicPartition("test", i), "msg %i", "key %i")) # Mock offsets counter for closure offsets = collections.defaultdict(lambda: collections.defaultdict(lambda: 0)) diff --git a/test/test_protocol.py b/test/test_protocol.py index 6c79829..4c5f379 100644 --- a/test/test_protocol.py +++ b/test/test_protocol.py @@ -12,7 +12,7 @@ from kafka.common import ( OffsetResponsePayload, OffsetCommitResponsePayload, OffsetFetchResponsePayload, ProduceRequestPayload, FetchRequestPayload, Message, ChecksumError, ProduceResponsePayload, FetchResponsePayload, OffsetAndMessage, - BrokerMetadata, TopicMetadata, PartitionMetadata, TopicAndPartition, + BrokerMetadata, TopicMetadata, PartitionMetadata, KafkaUnavailableError, UnsupportedCodecError, ConsumerFetchSizeTooSmall, ProtocolError, ConsumerMetadataResponse ) diff --git a/test/test_util.py b/test/test_util.py index ea3783e..7f0432b 100644 --- a/test/test_util.py +++ b/test/test_util.py @@ -104,7 +104,7 @@ class UtilTest(unittest.TestCase): kafka.util.relative_unpack('>hh', '\x00', 0) def test_group_by_topic_and_partition(self): - t = kafka.common.TopicAndPartition + t = kafka.common.TopicPartition l = [ t("a", 1), |