Skip to content

Commit bc4cc43

Browse files
committed
Don't use kafka.common internally
This finishes the split from `kafka.common` to `kafka.errors`/`kafka.structs`.
1 parent 81cda59 commit bc4cc43

File tree

16 files changed

+32
-36
lines changed

16 files changed

+32
-36
lines changed

kafka/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,8 @@ def emit(self, record):
2525
from kafka.protocol import (
2626
create_message, create_gzip_message, create_snappy_message)
2727
from kafka.partitioner import RoundRobinPartitioner, HashedPartitioner, Murmur2Partitioner
28-
from kafka.structs import TopicPartition, OffsetAndMetadata
2928
from kafka.serializer import Serializer, Deserializer
29+
from kafka.structs import TopicPartition, OffsetAndMetadata
3030

3131
# To be deprecated when KafkaProducer interface is released
3232
from kafka.client import SimpleClient

kafka/consumer/multiprocess.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
from kafka.vendor.six.moves import queue # pylint: disable=import-error
1010

11-
from kafka.common import KafkaError
11+
from kafka.errors import KafkaError
1212
from kafka.consumer.base import (
1313
Consumer,
1414
AUTO_COMMIT_MSG_COUNT, AUTO_COMMIT_INTERVAL,
@@ -92,7 +92,7 @@ def _mp_consume(client, group, topic, message_queue, size, events, **consumer_op
9292

9393
except KafkaError as e:
9494
# Retry with exponential backoff
95-
log.error("Problem communicating with Kafka (%s), retrying in %d seconds..." % (e, interval))
95+
log.exception("Problem communicating with Kafka, retrying in %d seconds...", interval)
9696
time.sleep(interval)
9797
interval = interval*2 if interval*2 < MAX_BACKOFF_SECONDS else MAX_BACKOFF_SECONDS
9898

kafka/consumer/simple.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,13 @@
2424
ITER_TIMEOUT_SECONDS,
2525
NO_MESSAGES_WAIT_TIME_SECONDS
2626
)
27-
from kafka.common import (
28-
FetchRequestPayload, KafkaError, OffsetRequestPayload,
29-
ConsumerFetchSizeTooSmall,
27+
from kafka.errors import (
28+
KafkaError, ConsumerFetchSizeTooSmall,
3029
UnknownTopicOrPartitionError, NotLeaderForPartitionError,
3130
OffsetOutOfRangeError, FailedPayloadsError, check_error
3231
)
3332
from kafka.protocol.message import PartialMessage
33+
from kafka.structs import FetchRequestPayload, OffsetRequestPayload
3434

3535

3636
log = logging.getLogger(__name__)

kafka/coordinator/assignors/roundrobin.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@
77
from kafka.vendor import six
88

99
from kafka.coordinator.assignors.abstract import AbstractPartitionAssignor
10-
from kafka.common import TopicPartition
1110
from kafka.coordinator.protocol import ConsumerProtocolMemberMetadata, ConsumerProtocolMemberAssignment
11+
from kafka.structs import TopicPartition
1212

1313
log = logging.getLogger(__name__)
1414

kafka/coordinator/consumer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from kafka.coordinator.assignors.range import RangePartitionAssignor
1212
from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor
1313
from kafka.coordinator.protocol import ConsumerProtocol
14-
from kafka import errors as Errors
14+
import kafka.errors as Errors
1515
from kafka.future import Future
1616
from kafka.metrics import AnonMeasurable
1717
from kafka.metrics.stats import Avg, Count, Max, Rate

kafka/producer/base.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,13 @@
1414

1515
from kafka.vendor import six
1616

17-
from kafka.structs import (
18-
ProduceRequestPayload, ProduceResponsePayload, TopicPartition, RetryOptions)
1917
from kafka.errors import (
2018
kafka_errors, UnsupportedCodecError, FailedPayloadsError,
2119
RequestTimedOutError, AsyncProducerQueueFull, UnknownError,
2220
RETRY_ERROR_TYPES, RETRY_BACKOFF_ERROR_TYPES, RETRY_REFRESH_ERROR_TYPES)
2321
from kafka.protocol import CODEC_NONE, ALL_CODECS, create_message_set
22+
from kafka.structs import (
23+
ProduceRequestPayload, ProduceResponsePayload, TopicPartition, RetryOptions)
2424

2525
log = logging.getLogger('kafka.producer')
2626

kafka/producer/kafka.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,18 +10,18 @@
1010

1111
from kafka.vendor import six
1212

13-
from kafka import errors as Errors
13+
import kafka.errors as Errors
1414
from kafka.client_async import KafkaClient, selectors
1515
from kafka.codec import has_gzip, has_snappy, has_lz4
1616
from kafka.metrics import MetricConfig, Metrics
1717
from kafka.partitioner.default import DefaultPartitioner
18+
from kafka.producer.future import FutureRecordMetadata, FutureProduceResult
19+
from kafka.producer.record_accumulator import AtomicInteger, RecordAccumulator
20+
from kafka.producer.sender import Sender
1821
from kafka.record.default_records import DefaultRecordBatchBuilder
1922
from kafka.record.legacy_records import LegacyRecordBatchBuilder
2023
from kafka.serializer import Serializer
2124
from kafka.structs import TopicPartition
22-
from kafka.producer.future import FutureRecordMetadata, FutureProduceResult
23-
from kafka.producer.record_accumulator import AtomicInteger, RecordAccumulator
24-
from kafka.producer.sender import Sender
2525

2626

2727
log = logging.getLogger(__name__)

kafka/producer/record_accumulator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,12 @@
66
import threading
77
import time
88

9-
from kafka import errors as Errors
9+
import kafka.errors as Errors
1010
from kafka.producer.buffer import SimpleBufferPool
1111
from kafka.producer.future import FutureRecordMetadata, FutureProduceResult
12-
from kafka.structs import TopicPartition
1312
from kafka.record.memory_records import MemoryRecordsBuilder
1413
from kafka.record.legacy_records import LegacyRecordBatchBuilder
14+
from kafka.structs import TopicPartition
1515

1616

1717
log = logging.getLogger(__name__)

kafka/protocol/legacy.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515

1616
from kafka.codec import gzip_encode, snappy_encode
1717
from kafka.errors import ProtocolError, UnsupportedCodecError
18-
from kafka.structs import ConsumerMetadataResponse
1918
from kafka.util import (
2019
crc32, read_short_string, relative_unpack,
2120
write_int_string, group_by_topic_and_partition)
@@ -322,7 +321,7 @@ def encode_consumer_metadata_request(cls, client_id, correlation_id, payloads):
322321
@classmethod
323322
def decode_consumer_metadata_response(cls, data):
324323
"""
325-
Decode bytes to a ConsumerMetadataResponse
324+
Decode bytes to a kafka.structs.ConsumerMetadataResponse
326325
327326
Arguments:
328327
data: bytes to decode
@@ -331,7 +330,7 @@ def decode_consumer_metadata_response(cls, data):
331330
(host, cur) = read_short_string(data, cur)
332331
((port,), cur) = relative_unpack('>i', data, cur)
333332

334-
return ConsumerMetadataResponse(error, nodeId, host, port)
333+
return kafka.structs.ConsumerMetadataResponse(error, nodeId, host, port)
335334

336335
@classmethod
337336
def encode_offset_commit_request(cls, group, payloads):

kafka/structs.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,3 @@
9393
# Limit value: int >= 0, 0 means no retries
9494
RetryOptions = namedtuple("RetryOptions",
9595
["limit", "backoff_ms", "retry_on_timeouts"])
96-
97-
98-
# Support legacy imports from kafka.common
99-
from kafka.errors import *

test/test_client_async.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,13 @@
1313
import pytest
1414

1515
from kafka.client_async import KafkaClient, IdleConnectionManager
16+
from kafka.cluster import ClusterMetadata
1617
from kafka.conn import ConnectionStates
1718
import kafka.errors as Errors
1819
from kafka.future import Future
1920
from kafka.protocol.metadata import MetadataResponse, MetadataRequest
2021
from kafka.protocol.produce import ProduceRequest
2122
from kafka.structs import BrokerMetadata
22-
from kafka.cluster import ClusterMetadata
23-
from kafka.future import Future
2423

2524

2625
@pytest.fixture

test/test_conn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from kafka.protocol.metadata import MetadataRequest
1414
from kafka.protocol.produce import ProduceRequest
1515

16-
import kafka.common as Errors
16+
import kafka.errors as Errors
1717

1818

1919
@pytest.fixture

test/test_coordinator.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
import pytest
66

77
from kafka.client_async import KafkaClient
8-
from kafka.structs import TopicPartition, OffsetAndMetadata
98
from kafka.consumer.subscription_state import (
109
SubscriptionState, ConsumerRebalanceListener)
1110
from kafka.coordinator.assignors.range import RangePartitionAssignor
@@ -21,6 +20,7 @@
2120
OffsetCommitRequest, OffsetCommitResponse,
2221
OffsetFetchRequest, OffsetFetchResponse)
2322
from kafka.protocol.metadata import MetadataResponse
23+
from kafka.structs import TopicPartition, OffsetAndMetadata
2424
from kafka.util import WeakMethod
2525

2626

@@ -34,7 +34,7 @@ def coordinator(client):
3434

3535

3636
def test_init(client, coordinator):
37-
# metadata update on init
37+
# metadata update on init
3838
assert client.cluster._need_update is True
3939
assert WeakMethod(coordinator._handle_metadata_update) in client.cluster._listeners
4040

@@ -542,7 +542,7 @@ def test_send_offset_fetch_request_success(patched_coord, partitions):
542542
response = OffsetFetchResponse[0]([('foobar', [(0, 123, b'', 0), (1, 234, b'', 0)])])
543543
_f.success(response)
544544
patched_coord._handle_offset_fetch_response.assert_called_with(
545-
future, response)
545+
future, response)
546546

547547

548548
@pytest.mark.parametrize('response,error,dead', [

test/test_fetcher.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,16 +12,16 @@
1212
CompletedFetch, ConsumerRecord, Fetcher, NoOffsetForPartitionError
1313
)
1414
from kafka.consumer.subscription_state import SubscriptionState
15+
from kafka.future import Future
1516
from kafka.metrics import Metrics
1617
from kafka.protocol.fetch import FetchRequest, FetchResponse
1718
from kafka.protocol.offset import OffsetResponse
18-
from kafka.structs import TopicPartition
19-
from kafka.future import Future
2019
from kafka.errors import (
2120
StaleMetadata, LeaderNotAvailableError, NotLeaderForPartitionError,
2221
UnknownTopicOrPartitionError, OffsetOutOfRangeError
2322
)
2423
from kafka.record.memory_records import MemoryRecordsBuilder, MemoryRecords
24+
from kafka.structs import TopicPartition
2525

2626

2727
@pytest.fixture

test/test_util.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@
55
from . import unittest
66

77
import kafka.errors
8-
import kafka.util
98
import kafka.structs
9+
import kafka.util
1010

1111

1212
class UtilTest(unittest.TestCase):

test/testutil.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,12 @@
1111
from . import unittest
1212

1313
from kafka import SimpleClient, create_message
14-
from kafka.errors import LeaderNotAvailableError, KafkaTimeoutError, InvalidTopicError
15-
from kafka.structs import OffsetRequestPayload, ProduceRequestPayload, \
16-
NotLeaderForPartitionError, UnknownTopicOrPartitionError, \
17-
FailedPayloadsError
14+
from kafka.errors import (
15+
LeaderNotAvailableError, KafkaTimeoutError, InvalidTopicError,
16+
NotLeaderForPartitionError, UnknownTopicOrPartitionError,
17+
FailedPayloadsError
18+
)
19+
from kafka.structs import OffsetRequestPayload, ProduceRequestPayload
1820
from test.fixtures import random_string, version_str_to_list, version as kafka_version #pylint: disable=wrong-import-order
1921

2022
def kafka_versions(*versions):

0 commit comments

Comments
 (0)