From 3841edf5cdf775080df8ecfd1cd49d5a45fcbdce Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 17:14:34 -0800 Subject: [PATCH 01/18] six.add_metaclass --- kafka/consumer/subscription_state.py | 3 +-- kafka/metrics/compound_stat.py | 4 +--- kafka/metrics/measurable_stat.py | 4 +--- kafka/metrics/metrics_reporter.py | 5 +---- kafka/metrics/stat.py | 5 +---- kafka/metrics/stats/sampled_stat.py | 4 +--- kafka/producer/transaction_manager.py | 3 +-- kafka/protocol/abstract.py | 5 +---- kafka/protocol/api.py | 8 ++------ kafka/record/abc.py | 14 ++++---------- kafka/sasl/abc.py | 5 +---- 11 files changed, 15 insertions(+), 45 deletions(-) diff --git a/kafka/consumer/subscription_state.py b/kafka/consumer/subscription_state.py index f99f01615..ecadd17a1 100644 --- a/kafka/consumer/subscription_state.py +++ b/kafka/consumer/subscription_state.py @@ -509,8 +509,7 @@ def is_fetchable(self): return not self.paused and self.has_valid_position -@six.add_metaclass(abc.ABCMeta) -class ConsumerRebalanceListener(object): +class ConsumerRebalanceListener(object, metaclass=abc.ABCMeta): """ A callback interface that the user can implement to trigger custom actions when the set of partitions assigned to the consumer changes. diff --git a/kafka/metrics/compound_stat.py b/kafka/metrics/compound_stat.py index f5b482da2..73788b265 100644 --- a/kafka/metrics/compound_stat.py +++ b/kafka/metrics/compound_stat.py @@ -3,11 +3,9 @@ import abc from kafka.metrics.stat import AbstractStat -from kafka.vendor.six import add_metaclass -@add_metaclass(abc.ABCMeta) -class AbstractCompoundStat(AbstractStat): +class AbstractCompoundStat(AbstractStat, metaclass=abc.ABCMeta): """ A compound stat is a stat where a single measurement and associated data structure feeds many metrics. This is the example for a diff --git a/kafka/metrics/measurable_stat.py b/kafka/metrics/measurable_stat.py index 08222b144..3eec18ad9 100644 --- a/kafka/metrics/measurable_stat.py +++ b/kafka/metrics/measurable_stat.py @@ -4,11 +4,9 @@ from kafka.metrics.measurable import AbstractMeasurable from kafka.metrics.stat import AbstractStat -from kafka.vendor.six import add_metaclass -@add_metaclass(abc.ABCMeta) -class AbstractMeasurableStat(AbstractStat, AbstractMeasurable): +class AbstractMeasurableStat(AbstractStat, AbstractMeasurable, metaclass=abc.ABCMeta): """ An AbstractMeasurableStat is an AbstractStat that is also an AbstractMeasurable (i.e. can produce a single floating point value). diff --git a/kafka/metrics/metrics_reporter.py b/kafka/metrics/metrics_reporter.py index 8df2e9ea6..e38528c42 100644 --- a/kafka/metrics/metrics_reporter.py +++ b/kafka/metrics/metrics_reporter.py @@ -2,11 +2,8 @@ import abc -from kafka.vendor.six import add_metaclass - -@add_metaclass(abc.ABCMeta) -class AbstractMetricsReporter(object): +class AbstractMetricsReporter(object, metaclass=abc.ABCMeta): """ An abstract class to allow things to listen as new metrics are created so they can be reported. diff --git a/kafka/metrics/stat.py b/kafka/metrics/stat.py index 8825d2783..8c76b5415 100644 --- a/kafka/metrics/stat.py +++ b/kafka/metrics/stat.py @@ -2,11 +2,8 @@ import abc -from kafka.vendor.six import add_metaclass - -@add_metaclass(abc.ABCMeta) -class AbstractStat(object): +class AbstractStat(object, metaclass=abc.ABCMeta): """ An AbstractStat is a quantity such as average, max, etc that is computed off the stream of updates to a sensor diff --git a/kafka/metrics/stats/sampled_stat.py b/kafka/metrics/stats/sampled_stat.py index fe8970dbf..8f978a8b7 100644 --- a/kafka/metrics/stats/sampled_stat.py +++ b/kafka/metrics/stats/sampled_stat.py @@ -3,11 +3,9 @@ import abc from kafka.metrics.measurable_stat import AbstractMeasurableStat -from kafka.vendor.six import add_metaclass -@add_metaclass(abc.ABCMeta) -class AbstractSampledStat(AbstractMeasurableStat): +class AbstractSampledStat(AbstractMeasurableStat, metaclass=abc.ABCMeta): """ An AbstractSampledStat records a single scalar value measured over one or more samples. Each sample is recorded over a configurable diff --git a/kafka/producer/transaction_manager.py b/kafka/producer/transaction_manager.py index a44d7d9b3..33df2df98 100644 --- a/kafka/producer/transaction_manager.py +++ b/kafka/producer/transaction_manager.py @@ -535,8 +535,7 @@ def exception(self): return self._error -@six.add_metaclass(abc.ABCMeta) -class TxnRequestHandler(object): +class TxnRequestHandler(object, metaclass=abc.ABCMeta): def __init__(self, transaction_manager, result=None): self.transaction_manager = transaction_manager self.retry_backoff_ms = transaction_manager.retry_backoff_ms diff --git a/kafka/protocol/abstract.py b/kafka/protocol/abstract.py index 7ce5fc18f..2adbc232d 100644 --- a/kafka/protocol/abstract.py +++ b/kafka/protocol/abstract.py @@ -2,11 +2,8 @@ import abc -from kafka.vendor.six import add_metaclass - -@add_metaclass(abc.ABCMeta) -class AbstractType(object): +class AbstractType(object, metaclass=abc.ABCMeta): @abc.abstractmethod def encode(cls, value): # pylint: disable=no-self-argument pass diff --git a/kafka/protocol/api.py b/kafka/protocol/api.py index 9cd5767c1..69e8be545 100644 --- a/kafka/protocol/api.py +++ b/kafka/protocol/api.py @@ -5,8 +5,6 @@ from kafka.protocol.struct import Struct from kafka.protocol.types import Int16, Int32, String, Schema, Array, TaggedFields -from kafka.vendor.six import add_metaclass - class RequestHeader(Struct): SCHEMA = Schema( @@ -51,8 +49,7 @@ class ResponseHeaderV2(Struct): ) -@add_metaclass(abc.ABCMeta) -class Request(Struct): +class Request(Struct, metaclass=abc.ABCMeta): FLEXIBLE_VERSION = False @abc.abstractproperty @@ -88,8 +85,7 @@ def build_header(self, correlation_id, client_id): return RequestHeader(self, correlation_id=correlation_id, client_id=client_id) -@add_metaclass(abc.ABCMeta) -class Response(Struct): +class Response(Struct, metaclass=abc.ABCMeta): FLEXIBLE_VERSION = False @abc.abstractproperty diff --git a/kafka/record/abc.py b/kafka/record/abc.py index c78f0da69..ac8f88f14 100644 --- a/kafka/record/abc.py +++ b/kafka/record/abc.py @@ -2,11 +2,8 @@ import abc -from kafka.vendor.six import add_metaclass - -@add_metaclass(abc.ABCMeta) -class ABCRecord(object): +class ABCRecord(object, metaclass=abc.ABCMeta): __slots__ = () @abc.abstractproperty @@ -57,8 +54,7 @@ def headers(self): """ -@add_metaclass(abc.ABCMeta) -class ABCRecordBatchBuilder(object): +class ABCRecordBatchBuilder(object, metaclass=abc.ABCMeta): __slots__ = () @abc.abstractmethod @@ -97,8 +93,7 @@ def build(self): """ -@add_metaclass(abc.ABCMeta) -class ABCRecordBatch(object): +class ABCRecordBatch(object, metaclass=abc.ABCMeta): """ For v2 encapsulates a RecordBatch, for v0/v1 a single (maybe compressed) message. """ @@ -126,8 +121,7 @@ def magic(self): """ -@add_metaclass(abc.ABCMeta) -class ABCRecords(object): +class ABCRecords(object, metaclass=abc.ABCMeta): __slots__ = () @abc.abstractmethod diff --git a/kafka/sasl/abc.py b/kafka/sasl/abc.py index 0577888a9..3c036558e 100644 --- a/kafka/sasl/abc.py +++ b/kafka/sasl/abc.py @@ -2,11 +2,8 @@ import abc -from kafka.vendor.six import add_metaclass - -@add_metaclass(abc.ABCMeta) -class SaslMechanism(object): +class SaslMechanism(object, metaclass=abc.ABCMeta): @abc.abstractmethod def __init__(self, **config): pass From 6327d38c751f956878b3ab02a21b44d12f4b11ac Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 17:15:39 -0800 Subject: [PATCH 02/18] six.moves --- kafka/benchmarks/producer_performance.py | 2 -- kafka/codec.py | 1 - kafka/sasl/msk.py | 1 - pylint.rc | 2 +- test/integration/conftest.py | 2 +- test/integration/fixtures.py | 3 +-- test/integration/test_consumer_integration.py | 1 - test/test_codec.py | 1 - 8 files changed, 3 insertions(+), 10 deletions(-) diff --git a/kafka/benchmarks/producer_performance.py b/kafka/benchmarks/producer_performance.py index 1a1092960..df26459a4 100644 --- a/kafka/benchmarks/producer_performance.py +++ b/kafka/benchmarks/producer_performance.py @@ -10,8 +10,6 @@ import time import traceback -from kafka.vendor.six.moves import range - from kafka import KafkaProducer diff --git a/kafka/codec.py b/kafka/codec.py index b73df060d..a1a1b22cc 100644 --- a/kafka/codec.py +++ b/kafka/codec.py @@ -6,7 +6,6 @@ import struct from kafka.vendor import six -from kafka.vendor.six.moves import range _XERIAL_V1_HEADER = (-126, b'S', b'N', b'A', b'P', b'P', b'Y', 0, 1, 1) _XERIAL_V1_FORMAT = 'bccccccBii' diff --git a/kafka/sasl/msk.py b/kafka/sasl/msk.py index 7ec03215d..0f271d2cf 100644 --- a/kafka/sasl/msk.py +++ b/kafka/sasl/msk.py @@ -16,7 +16,6 @@ from kafka.errors import KafkaConfigurationError from kafka.sasl.abc import SaslMechanism -from kafka.vendor.six.moves import urllib log = logging.getLogger(__name__) diff --git a/pylint.rc b/pylint.rc index 851275bcc..12be07f50 100644 --- a/pylint.rc +++ b/pylint.rc @@ -1,6 +1,6 @@ [TYPECHECK] ignored-classes=SyncManager,_socketobject -ignored-modules=kafka.vendor.six.moves +ignored-modules= generated-members=py.* [MESSAGES CONTROL] diff --git a/test/integration/conftest.py b/test/integration/conftest.py index 8af729296..9a5527259 100644 --- a/test/integration/conftest.py +++ b/test/integration/conftest.py @@ -1,11 +1,11 @@ from __future__ import absolute_import import os +from urllib.parse import urlparse import uuid import pytest -from kafka.vendor.six.moves.urllib.parse import urlparse # pylint: disable=E0611,F0401 from test.testutil import env_kafka_version, random_string from test.integration.fixtures import KafkaFixture, ZookeeperFixture diff --git a/test/integration/fixtures.py b/test/integration/fixtures.py index b9baf5223..d1534090b 100644 --- a/test/integration/fixtures.py +++ b/test/integration/fixtures.py @@ -8,11 +8,10 @@ import socket import subprocess import time +from urllib.parse import urlparse import uuid import py -from kafka.vendor.six.moves import range -from kafka.vendor.six.moves.urllib.parse import urlparse # pylint: disable=E0611,F0401 from kafka import errors, KafkaAdminClient, KafkaClient, KafkaConsumer, KafkaProducer from kafka.errors import InvalidReplicationFactorError, KafkaTimeoutError diff --git a/test/integration/test_consumer_integration.py b/test/integration/test_consumer_integration.py index 6060dc830..8f63619c7 100644 --- a/test/integration/test_consumer_integration.py +++ b/test/integration/test_consumer_integration.py @@ -6,7 +6,6 @@ except ImportError: from mock import patch, ANY import pytest -from kafka.vendor.six.moves import range import kafka.codec from kafka.errors import KafkaTimeoutError, UnsupportedCodecError, UnsupportedVersionError diff --git a/test/test_codec.py b/test/test_codec.py index 24159c253..cb5c4c843 100644 --- a/test/test_codec.py +++ b/test/test_codec.py @@ -4,7 +4,6 @@ import struct import pytest -from kafka.vendor.six.moves import range from kafka.codec import ( has_snappy, has_lz4, has_zstd, From 9d5cc3768805d72d1ba09eb83893139f3e53dfbc Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 17:21:12 -0800 Subject: [PATCH 03/18] six.PY2 / PY3 --- kafka/benchmarks/varint_speed.py | 23 +++++++---------------- kafka/client_async.py | 3 --- kafka/codec.py | 5 ----- kafka/conn.py | 20 ++------------------ kafka/partitioner/default.py | 7 ------- kafka/sasl/scram.py | 9 ++------- kafka/util.py | 24 ++++++++++-------------- test/test_conn.py | 7 ------- 8 files changed, 21 insertions(+), 77 deletions(-) diff --git a/kafka/benchmarks/varint_speed.py b/kafka/benchmarks/varint_speed.py index b2628a1b5..27e133b39 100644 --- a/kafka/benchmarks/varint_speed.py +++ b/kafka/benchmarks/varint_speed.py @@ -269,22 +269,13 @@ def size_of_varint_2(value): return 10 -if six.PY3: - def _read_byte(memview, pos): - """ Read a byte from memoryview as an integer - - Raises: - IndexError: if position is out of bounds - """ - return memview[pos] -else: - def _read_byte(memview, pos): - """ Read a byte from memoryview as an integer - - Raises: - IndexError: if position is out of bounds - """ - return ord(memview[pos]) +def _read_byte(memview, pos): + """ Read a byte from memoryview as an integer + + Raises: + IndexError: if position is out of bounds + """ + return memview[pos] def decode_varint_1(buffer, pos=0): diff --git a/kafka/client_async.py b/kafka/client_async.py index de20c218d..b1017e064 100644 --- a/kafka/client_async.py +++ b/kafka/client_async.py @@ -33,9 +33,6 @@ from kafka.vendor import socketpair # noqa: F401 from kafka.version import __version__ -if six.PY2: - ConnectionError = None - log = logging.getLogger('kafka.client') diff --git a/kafka/codec.py b/kafka/codec.py index a1a1b22cc..a47d93928 100644 --- a/kafka/codec.py +++ b/kafka/codec.py @@ -5,7 +5,6 @@ import platform import struct -from kafka.vendor import six _XERIAL_V1_HEADER = (-126, b'S', b'N', b'A', b'P', b'P', b'Y', 0, 1, 1) _XERIAL_V1_FORMAT = 'bccccccBii' @@ -148,10 +147,6 @@ def snappy_encode(payload, xerial_compatible=True, xerial_blocksize=32*1024): # buffer... likely a python-snappy bug, so just use a slice copy chunker = lambda payload, i, size: payload[i:size+i] - elif six.PY2: - # Sliced buffer avoids additional copies - # pylint: disable-msg=undefined-variable - chunker = lambda payload, i, size: buffer(payload, i, size) else: # snappy.compress does not like raw memoryviews, so we have to convert # tobytes, which is a copy... oh well. it's the thought that counts. diff --git a/kafka/conn.py b/kafka/conn.py index 9c79184aa..ecd52faca 100755 --- a/kafka/conn.py +++ b/kafka/conn.py @@ -17,8 +17,6 @@ import threading import time -from kafka.vendor import six - import kafka.errors as Errors from kafka.future import Future from kafka.metrics.stats import Avg, Count, Max, Rate @@ -40,11 +38,6 @@ from kafka.version import __version__ -if six.PY2: - ConnectionError = socket.error - TimeoutError = socket.error - BlockingIOError = Exception - log = logging.getLogger(__name__) DEFAULT_KAFKA_PORT = 9092 @@ -720,13 +713,9 @@ def _send_bytes(self, data): except (SSLWantReadError, SSLWantWriteError): break except (ConnectionError, TimeoutError) as e: - if six.PY2 and e.errno == errno.EWOULDBLOCK: - break raise except BlockingIOError: - if six.PY3: - break - raise + break return total_sent def _send_bytes_blocking(self, data): @@ -1183,17 +1172,12 @@ def _recv(self): except (SSLWantReadError, SSLWantWriteError): break except (ConnectionError, TimeoutError) as e: - if six.PY2 and e.errno == errno.EWOULDBLOCK: - break log.exception('%s: Error receiving network data' ' closing socket', self) err = Errors.KafkaConnectionError(e) break except BlockingIOError: - if six.PY3: - break - # For PY2 this is a catchall and should be re-raised - raise + break # Only process bytes if there was no connection exception if err is None: diff --git a/kafka/partitioner/default.py b/kafka/partitioner/default.py index d0914c682..6bd90eeb1 100644 --- a/kafka/partitioner/default.py +++ b/kafka/partitioner/default.py @@ -2,8 +2,6 @@ import random -from kafka.vendor import six - class DefaultPartitioner(object): """Default partitioner. @@ -43,11 +41,6 @@ def murmur2(data): Returns: MurmurHash2 of data """ - # Python2 bytes is really a str, causing the bitwise operations below to fail - # so convert to bytearray. - if six.PY2: - data = bytearray(bytes(data)) - length = len(data) seed = 0x9747b28c # 'm' and 'r' are mixing constants generated offline. diff --git a/kafka/sasl/scram.py b/kafka/sasl/scram.py index d8cd071a7..75cd6f6da 100644 --- a/kafka/sasl/scram.py +++ b/kafka/sasl/scram.py @@ -8,18 +8,13 @@ from kafka.sasl.abc import SaslMechanism -from kafka.vendor import six log = logging.getLogger(__name__) -if six.PY2: - def xor_bytes(left, right): - return bytearray(ord(lb) ^ ord(rb) for lb, rb in zip(left, right)) -else: - def xor_bytes(left, right): - return bytes(lb ^ rb for lb, rb in zip(left, right)) +def xor_bytes(left, right): + return bytes(lb ^ rb for lb, rb in zip(left, right)) class SaslMechanismScram(SaslMechanism): diff --git a/kafka/util.py b/kafka/util.py index 6bc4c7051..e6f7acf2a 100644 --- a/kafka/util.py +++ b/kafka/util.py @@ -10,20 +10,16 @@ from kafka.vendor import six -if six.PY3: - MAX_INT = 2 ** 31 - TO_SIGNED = 2 ** 32 - - def crc32(data): - crc = binascii.crc32(data) - # py2 and py3 behave a little differently - # CRC is encoded as a signed int in kafka protocol - # so we'll convert the py3 unsigned result to signed - if crc >= MAX_INT: - crc -= TO_SIGNED - return crc -else: - from binascii import crc32 # noqa: F401 +MAX_INT = 2 ** 31 +TO_SIGNED = 2 ** 32 + +def crc32(data): + crc = binascii.crc32(data) + # CRC is encoded as a signed int in kafka protocol + # so we'll convert the unsigned result to signed + if crc >= MAX_INT: + crc -= TO_SIGNED + return crc class Timer: diff --git a/test/test_conn.py b/test/test_conn.py index 3fa64066e..457d5e170 100644 --- a/test/test_conn.py +++ b/test/test_conn.py @@ -22,13 +22,6 @@ import kafka.errors as Errors -from kafka.vendor import six - -if six.PY2: - ConnectionError = socket.error - TimeoutError = socket.error - BlockingIOError = Exception - @pytest.fixture def dns_lookup(mocker): From ab7d120f405d8dea8d904b325ffb7e99bcfb4be3 Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 17:51:59 -0800 Subject: [PATCH 04/18] Convert remaining six usages to py3 equivalents --- kafka/admin/client.py | 3 +- kafka/benchmarks/varint_speed.py | 8 +-- kafka/client_async.py | 6 +-- kafka/cluster.py | 6 +-- kafka/consumer/fetcher.py | 28 +++++----- kafka/consumer/group.py | 10 ++-- kafka/consumer/subscription_state.py | 22 ++++---- kafka/coordinator/assignors/range.py | 6 +-- kafka/coordinator/assignors/roundrobin.py | 6 +-- .../assignors/sticky/partition_movements.py | 4 +- .../assignors/sticky/sticky_assignor.py | 43 ++++++++------- kafka/coordinator/base.py | 4 +- kafka/coordinator/consumer.py | 32 ++++++----- kafka/producer/kafka.py | 4 +- kafka/producer/sender.py | 14 +++-- kafka/producer/transaction_manager.py | 10 ++-- kafka/sasl/msk.py | 1 + kafka/util.py | 3 +- test/integration/test_consumer_group.py | 11 ++-- test/test_assignors.py | 53 +++++++++---------- test/test_sender.py | 2 - test/test_subscription_state.py | 5 +- 22 files changed, 125 insertions(+), 156 deletions(-) diff --git a/kafka/admin/client.py b/kafka/admin/client.py index f21ac97f9..c804ecae7 100644 --- a/kafka/admin/client.py +++ b/kafka/admin/client.py @@ -8,7 +8,6 @@ import time from . import ConfigResourceType -from kafka.vendor import six from kafka.admin.acl_resource import ACLOperation, ACLPermissionType, ACLFilter, ACL, ResourcePattern, ResourceType, \ ACLResourcePatternType, valid_acl_operations @@ -1418,7 +1417,7 @@ def _list_consumer_group_offsets_request(self, group_id, partitions=None): topics_partitions_dict = defaultdict(set) for topic, partition in partitions: topics_partitions_dict[topic].add(partition) - topics_partitions = list(six.iteritems(topics_partitions_dict)) + topics_partitions = list(topics_partitions_dict.items()) return OffsetFetchRequest[version](group_id, topics_partitions) def _list_consumer_group_offsets_process_response(self, response): diff --git a/kafka/benchmarks/varint_speed.py b/kafka/benchmarks/varint_speed.py index 27e133b39..506a9ecf5 100644 --- a/kafka/benchmarks/varint_speed.py +++ b/kafka/benchmarks/varint_speed.py @@ -1,7 +1,6 @@ #!/usr/bin/env python from __future__ import print_function import pyperf -from kafka.vendor import six test_data = [ @@ -114,7 +113,10 @@ def encode_varint_1(num): return buf[:i + 1] -def encode_varint_2(value, int2byte=six.int2byte): +def int2byte(i): + return bytes((i,)) + +def encode_varint_2(value): value = (value << 1) ^ (value >> 63) bits = value & 0x7f @@ -141,7 +143,7 @@ def encode_varint_3(value, buf): return value -def encode_varint_4(value, int2byte=six.int2byte): +def encode_varint_4(value): value = (value << 1) ^ (value >> 63) if value <= 0x7f: # 1 byte diff --git a/kafka/client_async.py b/kafka/client_async.py index b1017e064..b0acb92aa 100644 --- a/kafka/client_async.py +++ b/kafka/client_async.py @@ -16,8 +16,6 @@ # vendored backport module from kafka.vendor import selectors34 as selectors -from kafka.vendor import six - from kafka.cluster import ClusterMetadata from kafka.conn import BrokerConnection, ConnectionStates, get_ip_port_afi from kafka import errors as Errors @@ -784,7 +782,7 @@ def _poll(self, timeout): if conn not in processed and conn.connected() and conn._sock.pending(): self._pending_completion.extend(conn.recv()) - for conn in six.itervalues(self._conns): + for conn in self._conns.values(): if conn.requests_timed_out(): timed_out = conn.timed_out_ifrs() timeout_ms = (timed_out[0][2] - timed_out[0][1]) * 1000 @@ -923,7 +921,7 @@ def add_topic(self, topic): def _next_ifr_request_timeout_ms(self): if self._conns: - return min([conn.next_ifr_request_timeout_ms() for conn in six.itervalues(self._conns)]) + return min([conn.next_ifr_request_timeout_ms() for conn in self._conns.values()]) else: return float('inf') diff --git a/kafka/cluster.py b/kafka/cluster.py index 9e819246e..315b1d225 100644 --- a/kafka/cluster.py +++ b/kafka/cluster.py @@ -8,8 +8,6 @@ import threading import time -from kafka.vendor import six - from kafka import errors as Errors from kafka.conn import get_ip_port_afi from kafka.future import Future @@ -133,7 +131,7 @@ def available_partitions_for_topic(self, topic): if topic not in self._partitions: return None return set([partition for partition, metadata - in six.iteritems(self._partitions[topic]) + in self._partitions[topic].items() if metadata.leader != -1]) def leader_for_partition(self, partition): @@ -435,7 +433,7 @@ def collect_hosts(hosts, randomize=True): randomize the returned list. """ - if isinstance(hosts, six.string_types): + if isinstance(hosts, str): hosts = hosts.strip().split(',') result = [] diff --git a/kafka/consumer/fetcher.py b/kafka/consumer/fetcher.py index 8e524790c..6e52dc80d 100644 --- a/kafka/consumer/fetcher.py +++ b/kafka/consumer/fetcher.py @@ -7,8 +7,6 @@ import sys import time -from kafka.vendor import six - import kafka.errors as Errors from kafka.future import Future from kafka.metrics.stats import Avg, Count, Max, Rate @@ -55,7 +53,7 @@ class RecordTooLargeError(Errors.KafkaError): pass -class Fetcher(six.Iterator): +class Fetcher(object): DEFAULT_CONFIG = { 'key_deserializer': None, 'value_deserializer': None, @@ -147,7 +145,7 @@ def send_fetches(self): List of Futures: each future resolves to a FetchResponse """ futures = [] - for node_id, (request, fetch_offsets) in six.iteritems(self._create_fetch_requests()): + for node_id, (request, fetch_offsets) in self._create_fetch_requests().items(): log.debug("Sending FetchRequest to node %s", node_id) self._nodes_with_pending_fetch_requests.add(node_id) future = self._client.send(node_id, request, wakeup=False) @@ -421,7 +419,7 @@ def _reset_offset_if_needed(self, partition, timestamp, offset): def _reset_offsets_async(self, timestamps): timestamps_by_node = self._group_list_offset_requests(timestamps) - for node_id, timestamps_and_epochs in six.iteritems(timestamps_by_node): + for node_id, timestamps_and_epochs in timestamps_by_node.items(): if not self._client.ready(node_id): continue partitions = set(timestamps_and_epochs.keys()) @@ -434,7 +432,7 @@ def on_success(timestamps_and_epochs, result): self._subscriptions.reset_failed(partitions_to_retry, time.time() + self.config['retry_backoff_ms'] / 1000) self._client.cluster.request_update() - for partition, offset in six.iteritems(fetched_offsets): + for partition, offset in fetched_offsets.items(): ts, _epoch = timestamps_and_epochs[partition] self._reset_offset_if_needed(partition, ts, offset.offset) @@ -484,7 +482,7 @@ def on_fail(err): if not list_offsets_future.is_done: list_offsets_future.failure(err) - for node_id, timestamps in six.iteritems(timestamps_by_node): + for node_id, timestamps in timestamps_by_node.items(): _f = self._send_list_offsets_request(node_id, timestamps) _f.add_callback(on_success, remaining_responses) _f.add_errback(on_fail) @@ -492,7 +490,7 @@ def on_fail(err): def _group_list_offset_requests(self, timestamps): timestamps_by_node = collections.defaultdict(dict) - for partition, timestamp in six.iteritems(timestamps): + for partition, timestamp in timestamps.items(): node_id = self._client.cluster.leader_for_partition(partition) if node_id is None: self._client.add_topic(partition.topic) @@ -512,7 +510,7 @@ def _send_list_offsets_request(self, node_id, timestamps_and_epochs): if self.config['isolation_level'] == 'read_committed' and version < 2: raise Errors.UnsupportedVersionError('read_committed isolation level requires ListOffsetsRequest >= v2') by_topic = collections.defaultdict(list) - for tp, (timestamp, leader_epoch) in six.iteritems(timestamps_and_epochs): + for tp, (timestamp, leader_epoch) in timestamps_and_epochs.items(): if version >= 4: data = (tp.partition, leader_epoch, timestamp) elif version >= 1: @@ -525,11 +523,11 @@ def _send_list_offsets_request(self, node_id, timestamps_and_epochs): request = ListOffsetsRequest[version]( -1, self._isolation_level, - list(six.iteritems(by_topic))) + list(by_topic.items())) else: request = ListOffsetsRequest[version]( -1, - list(six.iteritems(by_topic))) + list(by_topic.items())) # Client returns a future that only fails on network issues # so create a separate future and attach a callback to update it @@ -697,7 +695,7 @@ def _create_fetch_requests(self): partition, position.offset) requests = {} - for node_id, next_partitions in six.iteritems(fetchable): + for node_id, next_partitions in fetchable.items(): if version >= 7 and self.config['enable_incremental_fetch_sessions']: if node_id not in self._session_handlers: self._session_handlers[node_id] = FetchSessionHandler(node_id) @@ -741,7 +739,7 @@ def _create_fetch_requests(self): session.to_forget) fetch_offsets = {} - for tp, partition_data in six.iteritems(next_partitions): + for tp, partition_data in next_partitions.items(): if version <= 8: offset = partition_data[1] else: @@ -1283,7 +1281,7 @@ def to_send(self): # Return as list of [(topic, [(partition, ...), ...]), ...] # so it can be passed directly to encoder partition_data = collections.defaultdict(list) - for tp, partition_info in six.iteritems(self._to_send): + for tp, partition_info in self._to_send.items(): partition_data[tp.topic].append(partition_info) return list(partition_data.items()) @@ -1334,7 +1332,7 @@ def record(self, partition, num_bytes, num_records): if not self.unrecorded_partitions: self.sensors.bytes_fetched.record(self.fetch_metrics.total_bytes) self.sensors.records_fetched.record(self.fetch_metrics.total_records) - for topic, metrics in six.iteritems(self.topic_fetch_metrics): + for topic, metrics in self.topic_fetch_metrics.items(): self.sensors.record_topic_fetch_metrics(topic, metrics.total_bytes, metrics.total_records) diff --git a/kafka/consumer/group.py b/kafka/consumer/group.py index 49fa3e261..b2d79fb44 100644 --- a/kafka/consumer/group.py +++ b/kafka/consumer/group.py @@ -8,8 +8,6 @@ from kafka.errors import KafkaConfigurationError, UnsupportedVersionError -from kafka.vendor import six - from kafka.client_async import KafkaClient, selectors from kafka.consumer.fetcher import Fetcher from kafka.consumer.subscription_state import SubscriptionState @@ -25,7 +23,7 @@ log = logging.getLogger(__name__) -class KafkaConsumer(six.Iterator): +class KafkaConsumer(object): """Consume records from a Kafka cluster. The consumer will transparently handle the failure of servers in the Kafka @@ -1027,7 +1025,7 @@ def metrics(self, raw=False): return self._metrics.metrics.copy() metrics = {} - for k, v in six.iteritems(self._metrics.metrics.copy()): + for k, v in self._metrics.metrics.copy().items(): if k.group not in metrics: metrics[k.group] = {} if k.name not in metrics[k.group]: @@ -1072,7 +1070,7 @@ def offsets_for_times(self, timestamps): raise UnsupportedVersionError( "offsets_for_times API not supported for cluster version {}" .format(self.config['api_version'])) - for tp, ts in six.iteritems(timestamps): + for tp, ts in timestamps.items(): timestamps[tp] = int(ts) if ts < 0: raise ValueError( @@ -1183,7 +1181,7 @@ def _update_fetch_positions(self, timeout_ms=None): def _message_generator_v2(self): timeout_ms = 1000 * max(0, self._consumer_timeout - time.time()) record_map = self.poll(timeout_ms=timeout_ms, update_offsets=False) - for tp, records in six.iteritems(record_map): + for tp, records in record_map.items(): # Generators are stateful, and it is possible that the tp / records # here may become stale during iteration -- i.e., we seek to a # different offset, pause consumption, or lose assignment. diff --git a/kafka/consumer/subscription_state.py b/kafka/consumer/subscription_state.py index ecadd17a1..7dc3345f9 100644 --- a/kafka/consumer/subscription_state.py +++ b/kafka/consumer/subscription_state.py @@ -18,8 +18,6 @@ import threading import time -from kafka.vendor import six - import kafka.errors as Errors from kafka.protocol.list_offsets import OffsetResetStrategy from kafka.structs import OffsetAndMetadata @@ -167,7 +165,7 @@ def change_subscription(self, topics): if not self.partitions_auto_assigned(): raise Errors.IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE) - if isinstance(topics, six.string_types): + if isinstance(topics, str): topics = [topics] if self.subscription == set(topics): @@ -256,13 +254,13 @@ def assign_from_subscribed(self, assignments): def _set_assignment(self, partition_states, randomize=False): """Batch partition assignment by topic (self.assignment is OrderedDict)""" self.assignment.clear() - topics = [tp.topic for tp in six.iterkeys(partition_states)] + topics = [tp.topic for tp in partition_states] if randomize: random.shuffle(topics) topic_partitions = OrderedDict({topic: [] for topic in topics}) - for tp in six.iterkeys(partition_states): + for tp in partition_states: topic_partitions[tp.topic].append(tp) - for topic in six.iterkeys(topic_partitions): + for topic in topic_partitions: for tp in topic_partitions[topic]: self.assignment[tp] = partition_states[tp] @@ -324,7 +322,7 @@ def paused_partitions(self): def fetchable_partitions(self): """Return ordered list of TopicPartitions that should be Fetched.""" fetchable = list() - for partition, state in six.iteritems(self.assignment): + for partition, state in self.assignment.items(): if state.is_fetchable(): fetchable.append(partition) return fetchable @@ -338,7 +336,7 @@ def partitions_auto_assigned(self): def all_consumed_offsets(self): """Returns consumed offsets as {TopicPartition: OffsetAndMetadata}""" all_consumed = {} - for partition, state in six.iteritems(self.assignment): + for partition, state in self.assignment.items(): if state.has_valid_position: all_consumed[partition] = state.position return all_consumed @@ -371,7 +369,7 @@ def is_offset_reset_needed(self, partition): @synchronized def has_all_fetch_positions(self): - for state in six.itervalues(self.assignment): + for state in self.assignment.values(): if not state.has_valid_position: return False return True @@ -379,7 +377,7 @@ def has_all_fetch_positions(self): @synchronized def missing_fetch_positions(self): missing = set() - for partition, state in six.iteritems(self.assignment): + for partition, state in self.assignment.items(): if state.is_missing_position(): missing.add(partition) return missing @@ -391,7 +389,7 @@ def has_valid_position(self, partition): @synchronized def reset_missing_positions(self): partitions_with_no_offsets = set() - for tp, state in six.iteritems(self.assignment): + for tp, state in self.assignment.items(): if state.is_missing_position(): if self._default_offset_reset_strategy == OffsetResetStrategy.NONE: partitions_with_no_offsets.add(tp) @@ -404,7 +402,7 @@ def reset_missing_positions(self): @synchronized def partitions_needing_reset(self): partitions = set() - for tp, state in six.iteritems(self.assignment): + for tp, state in self.assignment.items(): if state.awaiting_reset and state.is_reset_allowed(): partitions.add(tp) return partitions diff --git a/kafka/coordinator/assignors/range.py b/kafka/coordinator/assignors/range.py index d639d5b75..6da6c3c91 100644 --- a/kafka/coordinator/assignors/range.py +++ b/kafka/coordinator/assignors/range.py @@ -4,8 +4,6 @@ import itertools import logging -from kafka.vendor import six - from kafka.coordinator.assignors.abstract import AbstractPartitionAssignor from kafka.coordinator.protocol import ConsumerProtocolMemberMetadata_v0, ConsumerProtocolMemberAssignment_v0 @@ -35,7 +33,7 @@ class RangePartitionAssignor(AbstractPartitionAssignor): @classmethod def assign(cls, cluster, group_subscriptions): consumers_per_topic = collections.defaultdict(list) - for member_id, subscription in six.iteritems(group_subscriptions): + for member_id, subscription in group_subscriptions.items(): for topic in subscription.topics: consumers_per_topic[topic].append((subscription.group_instance_id, member_id)) @@ -47,7 +45,7 @@ def assign(cls, cluster, group_subscriptions): grouped = {k: list(g) for k, g in itertools.groupby(consumers_per_topic[topic], key=lambda ids: ids[0] is not None)} consumers_per_topic[topic] = sorted(grouped.get(True, [])) + sorted(grouped.get(False, [])) # sorted static members first, then sorted dynamic - for topic, consumers_for_topic in six.iteritems(consumers_per_topic): + for topic, consumers_for_topic in consumers_per_topic.items(): partitions = cluster.partitions_for_topic(topic) if partitions is None: log.warning('No partition metadata for topic %s', topic) diff --git a/kafka/coordinator/assignors/roundrobin.py b/kafka/coordinator/assignors/roundrobin.py index 8d83972cc..05d319497 100644 --- a/kafka/coordinator/assignors/roundrobin.py +++ b/kafka/coordinator/assignors/roundrobin.py @@ -4,8 +4,6 @@ import itertools import logging -from kafka.vendor import six - from kafka.coordinator.assignors.abstract import AbstractPartitionAssignor from kafka.coordinator.protocol import ConsumerProtocolMemberMetadata_v0, ConsumerProtocolMemberAssignment_v0 from kafka.structs import TopicPartition @@ -51,7 +49,7 @@ class RoundRobinPartitionAssignor(AbstractPartitionAssignor): @classmethod def assign(cls, cluster, group_subscriptions): all_topics = set() - for subscription in six.itervalues(group_subscriptions): + for subscription in group_subscriptions.values(): all_topics.update(subscription.topics) all_topic_partitions = [] @@ -68,7 +66,7 @@ def assign(cls, cluster, group_subscriptions): assignment = collections.defaultdict(lambda: collections.defaultdict(list)) # Sort static and dynamic members separately to maintain stable static assignments - ungrouped = [(subscription.group_instance_id, member_id) for member_id, subscription in six.iteritems(group_subscriptions)] + ungrouped = [(subscription.group_instance_id, member_id) for member_id, subscription in group_subscriptions.items()] grouped = {k: list(g) for k, g in itertools.groupby(ungrouped, key=lambda ids: ids[0] is not None)} member_list = sorted(grouped.get(True, [])) + sorted(grouped.get(False, [])) # sorted static members first, then sorted dynamic member_iter = itertools.cycle(member_list) diff --git a/kafka/coordinator/assignors/sticky/partition_movements.py b/kafka/coordinator/assignors/sticky/partition_movements.py index 8851e4cda..78f2eb22c 100644 --- a/kafka/coordinator/assignors/sticky/partition_movements.py +++ b/kafka/coordinator/assignors/sticky/partition_movements.py @@ -2,8 +2,6 @@ from collections import defaultdict, namedtuple from copy import deepcopy -from kafka.vendor import six - log = logging.getLogger(__name__) @@ -74,7 +72,7 @@ def get_partition_to_be_moved(self, partition, old_consumer, new_consumer): return next(iter(self.partition_movements_by_topic[partition.topic][reverse_pair])) def are_sticky(self): - for topic, movements in six.iteritems(self.partition_movements_by_topic): + for topic, movements in self.partition_movements_by_topic.items(): movement_pairs = set(movements.keys()) if self._has_cycles(movement_pairs): log.error( diff --git a/kafka/coordinator/assignors/sticky/sticky_assignor.py b/kafka/coordinator/assignors/sticky/sticky_assignor.py index 3166356fe..243c26709 100644 --- a/kafka/coordinator/assignors/sticky/sticky_assignor.py +++ b/kafka/coordinator/assignors/sticky/sticky_assignor.py @@ -10,7 +10,6 @@ from kafka.protocol.struct import Struct from kafka.protocol.types import String, Array, Int32 from kafka.structs import TopicPartition -from kafka.vendor import six log = logging.getLogger(__name__) @@ -110,7 +109,7 @@ def balance(self): # narrow down the reassignment scope to only those partitions that can actually be reassigned fixed_partitions = set() - for partition in six.iterkeys(self.partition_to_all_potential_consumers): + for partition in self.partition_to_all_potential_consumers: if not self._can_partition_participate_in_reassignment(partition): fixed_partitions.add(partition) for fixed_partition in fixed_partitions: @@ -119,7 +118,7 @@ def balance(self): # narrow down the reassignment scope to only those consumers that are subject to reassignment fixed_assignments = {} - for consumer in six.iterkeys(self.consumer_to_all_potential_partitions): + for consumer in self.consumer_to_all_potential_partitions: if not self._can_consumer_participate_in_reassignment(consumer): self._remove_consumer_from_current_subscriptions_and_maintain_order(consumer) fixed_assignments[consumer] = self.current_assignment[consumer] @@ -148,7 +147,7 @@ def balance(self): self.current_partition_consumer.update(prebalance_partition_consumers) # add the fixed assignments (those that could not change) back - for consumer, partitions in six.iteritems(fixed_assignments): + for consumer, partitions in fixed_assignments.items(): self.current_assignment[consumer] = partitions self._add_consumer_to_current_subscriptions_and_maintain_order(consumer) @@ -156,8 +155,8 @@ def get_final_assignment(self, member_id): assignment = defaultdict(list) for topic_partition in self.current_assignment[member_id]: assignment[topic_partition.topic].append(topic_partition.partition) - assignment = {k: sorted(v) for k, v in six.iteritems(assignment)} - return six.viewitems(assignment) + assignment = {k: sorted(v) for k, v in assignment.items()} + return assignment.items() def _initialize(self, cluster): self._init_current_assignments(self.members) @@ -170,7 +169,7 @@ def _initialize(self, cluster): for p in partitions: partition = TopicPartition(topic=topic, partition=p) self.partition_to_all_potential_consumers[partition] = [] - for consumer_id, member_metadata in six.iteritems(self.members): + for consumer_id, member_metadata in self.members.items(): self.consumer_to_all_potential_partitions[consumer_id] = [] for topic in member_metadata.subscription: if cluster.partitions_for_topic(topic) is None: @@ -190,7 +189,7 @@ def _init_current_assignments(self, members): # for each partition we create a map of its consumers by generation sorted_partition_consumers_by_generation = {} - for consumer, member_metadata in six.iteritems(members): + for consumer, member_metadata in members.items(): for partitions in member_metadata.partitions: if partitions in sorted_partition_consumers_by_generation: consumers = sorted_partition_consumers_by_generation[partitions] @@ -209,7 +208,7 @@ def _init_current_assignments(self, members): # previous_assignment holds the prior ConsumerGenerationPair (before current) of each partition # current and previous consumers are the last two consumers of each partition in the above sorted map - for partitions, consumers in six.iteritems(sorted_partition_consumers_by_generation): + for partitions, consumers in sorted_partition_consumers_by_generation.items(): generations = sorted(consumers.keys(), reverse=True) self.current_assignment[consumers[generations[0]]].append(partitions) # now update previous assignment if any @@ -220,7 +219,7 @@ def _init_current_assignments(self, members): self.is_fresh_assignment = len(self.current_assignment) == 0 - for consumer_id, partitions in six.iteritems(self.current_assignment): + for consumer_id, partitions in self.current_assignment.items(): for partition in partitions: self.current_partition_consumer[partition] = consumer_id @@ -230,14 +229,14 @@ def _are_subscriptions_identical(self): true, if both potential consumers of partitions and potential partitions that consumers can consume are the same """ - if not has_identical_list_elements(list(six.itervalues(self.partition_to_all_potential_consumers))): + if not has_identical_list_elements(list(self.partition_to_all_potential_consumers.values())): return False - return has_identical_list_elements(list(six.itervalues(self.consumer_to_all_potential_partitions))) + return has_identical_list_elements(list(self.consumer_to_all_potential_partitions.values())) def _populate_sorted_partitions(self): # set of topic partitions with their respective potential consumers all_partitions = set((tp, tuple(consumers)) - for tp, consumers in six.iteritems(self.partition_to_all_potential_consumers)) + for tp, consumers in self.partition_to_all_potential_consumers.items()) partitions_sorted_by_num_of_potential_consumers = sorted(all_partitions, key=partitions_comparator_key) self.sorted_partitions = [] @@ -246,7 +245,7 @@ def _populate_sorted_partitions(self): # then we just need to simply list partitions in a round robin fashion (from consumers with # most assigned partitions to those with least) assignments = deepcopy(self.current_assignment) - for consumer_id, partitions in six.iteritems(assignments): + for consumer_id, partitions in assignments.items(): to_remove = [] for partition in partitions: if partition not in self.partition_to_all_potential_consumers: @@ -255,7 +254,7 @@ def _populate_sorted_partitions(self): partitions.remove(partition) sorted_consumers = SortedSet( - iterable=[(consumer, tuple(partitions)) for consumer, partitions in six.iteritems(assignments)], + iterable=[(consumer, tuple(partitions)) for consumer, partitions in assignments.items()], key=subscriptions_comparator_key, ) # at this point, sorted_consumers contains an ascending-sorted list of consumers based on @@ -267,7 +266,7 @@ def _populate_sorted_partitions(self): remaining_partitions = assignments[consumer] # from partitions that had a different consumer before, # keep only those that are assigned to this consumer now - previous_partitions = set(six.iterkeys(self.previous_assignment)).intersection(set(remaining_partitions)) + previous_partitions = set(self.previous_assignment.keys()).intersection(set(remaining_partitions)) if previous_partitions: # if there is a partition of this consumer that was assigned to another consumer before # mark it as good options for reassignment @@ -292,7 +291,7 @@ def _populate_partitions_to_reassign(self): self.unassigned_partitions = deepcopy(self.sorted_partitions) assignments_to_remove = [] - for consumer_id, partitions in six.iteritems(self.current_assignment): + for consumer_id, partitions in self.current_assignment.items(): if consumer_id not in self.members: # if a consumer that existed before (and had some partition assignments) is now removed, # remove it from current_assignment @@ -325,7 +324,7 @@ def _populate_partitions_to_reassign(self): def _initialize_current_subscriptions(self): self.sorted_current_subscriptions = SortedSet( - iterable=[(consumer, tuple(partitions)) for consumer, partitions in six.iteritems(self.current_assignment)], + iterable=[(consumer, tuple(partitions)) for consumer, partitions in self.current_assignment.items()], key=subscriptions_comparator_key, ) @@ -352,7 +351,7 @@ def _is_balanced(self): # create a mapping from partitions to the consumer assigned to them all_assigned_partitions = {} - for consumer_id, consumer_partitions in six.iteritems(self.current_assignment): + for consumer_id, consumer_partitions in self.current_assignment.items(): for partition in consumer_partitions: if partition in all_assigned_partitions: log.error("{} is assigned to more than one consumer.".format(partition)) @@ -491,11 +490,11 @@ def _get_balance_score(assignment): """ score = 0 consumer_to_assignment = {} - for consumer_id, partitions in six.iteritems(assignment): + for consumer_id, partitions in assignment.items(): consumer_to_assignment[consumer_id] = len(partitions) consumers_to_explore = set(consumer_to_assignment.keys()) - for consumer_id in consumer_to_assignment.keys(): + for consumer_id in consumer_to_assignment: if consumer_id in consumers_to_explore: consumers_to_explore.remove(consumer_id) for other_consumer_id in consumers_to_explore: @@ -593,7 +592,7 @@ def assign(cls, cluster, members): dict: {member_id: MemberAssignment} """ members_metadata = {} - for consumer, member_metadata in six.iteritems(members): + for consumer, member_metadata in members.items(): members_metadata[consumer] = cls.parse_member_metadata(member_metadata) executor = StickyAssignmentExecutor(cluster, members_metadata) diff --git a/kafka/coordinator/base.py b/kafka/coordinator/base.py index e1d8d8336..b4bbb1f45 100644 --- a/kafka/coordinator/base.py +++ b/kafka/coordinator/base.py @@ -8,8 +8,6 @@ import warnings import weakref -from kafka.vendor import six - from kafka.coordinator.heartbeat import Heartbeat from kafka import errors as Errors from kafka.future import Future @@ -710,7 +708,7 @@ def _on_join_leader(self, response): group_assignment = self._perform_assignment(response.leader_id, response.group_protocol, members) - for member_id, assignment in six.iteritems(group_assignment): + for member_id, assignment in group_assignment.items(): if not isinstance(assignment, bytes): group_assignment[member_id] = assignment.encode() diff --git a/kafka/coordinator/consumer.py b/kafka/coordinator/consumer.py index a7aac4352..8860966f8 100644 --- a/kafka/coordinator/consumer.py +++ b/kafka/coordinator/consumer.py @@ -6,8 +6,6 @@ import logging import time -from kafka.vendor import six - from kafka.coordinator.base import BaseCoordinator, Generation from kafka.coordinator.assignors.range import RangePartitionAssignor from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor @@ -367,7 +365,7 @@ def _perform_assignment(self, leader_id, assignment_strategy, members): log.debug("Finished assignment for group %s: %s", self.group_id, assignments) group_assignment = {} - for member_id, assignment in six.iteritems(assignments): + for member_id, assignment in assignments.items(): group_assignment[member_id] = assignment return group_assignment @@ -421,7 +419,7 @@ def refresh_committed_offsets_if_needed(self, timeout_ms=None): offsets = self.fetch_committed_offsets(missing_fetch_positions, timeout_ms=timeout_ms) except Errors.KafkaTimeoutError: return False - for partition, offset in six.iteritems(offsets): + for partition, offset in offsets.items(): log.debug("Setting offset for partition %s to the committed offset %s", partition, offset.offset) self._subscription.seek(partition, offset.offset) return True @@ -640,7 +638,7 @@ def _send_offset_commit_request(self, offsets): # create the offset commit request offset_data = collections.defaultdict(dict) - for tp, offset in six.iteritems(offsets): + for tp, offset in offsets.items(): offset_data[tp.topic][tp.partition] = offset version = self._client.api_version(OffsetCommitRequest, max_version=7) @@ -675,8 +673,8 @@ def _send_offset_commit_request(self, offsets): partition, offset.offset, offset.metadata - ) for partition, offset in six.iteritems(partitions)] - ) for topic, partitions in six.iteritems(offset_data)] + ) for partition, offset in partitions.items()] + ) for topic, partitions in offset_data.items()] ) elif version == 1: request = OffsetCommitRequest[version]( @@ -691,8 +689,8 @@ def _send_offset_commit_request(self, offsets): offset.offset, -1, # timestamp, unused offset.metadata - ) for partition, offset in six.iteritems(partitions)] - ) for topic, partitions in six.iteritems(offset_data)] + ) for partition, offset in partitions.items()] + ) for topic, partitions in offset_data.items()] ) elif version <= 4: request = OffsetCommitRequest[version]( @@ -705,8 +703,8 @@ def _send_offset_commit_request(self, offsets): partition, offset.offset, offset.metadata - ) for partition, offset in six.iteritems(partitions)] - ) for topic, partitions in six.iteritems(offset_data)] + ) for partition, offset in partitions.items()] + ) for topic, partitions in offset_data.items()] ) elif version <= 5: request = OffsetCommitRequest[version]( @@ -718,8 +716,8 @@ def _send_offset_commit_request(self, offsets): partition, offset.offset, offset.metadata - ) for partition, offset in six.iteritems(partitions)] - ) for topic, partitions in six.iteritems(offset_data)] + ) for partition, offset in partitions.items()] + ) for topic, partitions in offset_data.items()] ) elif version <= 6: request = OffsetCommitRequest[version]( @@ -732,8 +730,8 @@ def _send_offset_commit_request(self, offsets): offset.offset, offset.leader_epoch, offset.metadata - ) for partition, offset in six.iteritems(partitions)] - ) for topic, partitions in six.iteritems(offset_data)] + ) for partition, offset in partitions.items()] + ) for topic, partitions in offset_data.items()] ) else: request = OffsetCommitRequest[version]( @@ -747,8 +745,8 @@ def _send_offset_commit_request(self, offsets): offset.offset, offset.leader_epoch, offset.metadata - ) for partition, offset in six.iteritems(partitions)] - ) for topic, partitions in six.iteritems(offset_data)] + ) for partition, offset in partitions.items()] + ) for topic, partitions in offset_data.items()] ) log.debug("Sending offset-commit request with %s for group %s to %s", diff --git a/kafka/producer/kafka.py b/kafka/producer/kafka.py index 005274c82..529df0176 100644 --- a/kafka/producer/kafka.py +++ b/kafka/producer/kafka.py @@ -8,8 +8,6 @@ import warnings import weakref -from kafka.vendor import six - import kafka.errors as Errors from kafka.client_async import KafkaClient, selectors from kafka.codec import has_gzip, has_snappy, has_lz4, has_zstd @@ -1012,7 +1010,7 @@ def metrics(self, raw=False): return self._metrics.metrics.copy() metrics = {} - for k, v in six.iteritems(self._metrics.metrics.copy()): + for k, v in self._metrics.metrics.copy().items(): if k.group not in metrics: metrics[k.group] = {} if k.name not in metrics[k.group]: diff --git a/kafka/producer/sender.py b/kafka/producer/sender.py index b7c38a4f5..029f600f8 100644 --- a/kafka/producer/sender.py +++ b/kafka/producer/sender.py @@ -7,8 +7,6 @@ import threading import time -from kafka.vendor import six - from kafka import errors as Errors from kafka.metrics.measurable import AnonMeasurable from kafka.metrics.stats import Avg, Max, Rate @@ -86,7 +84,7 @@ def _get_expired_inflight_batches(self, now=None): """Get the in-flight batches that has reached delivery timeout.""" expired_batches = [] to_remove = [] - for tp, queue in six.iteritems(self._in_flight_batches): + for tp, queue in self._in_flight_batches.items(): while queue: _created_at, batch = queue[0] if batch.has_reached_delivery_timeout(self._accumulator.delivery_timeout_ms): @@ -206,7 +204,7 @@ def _send_producer_data(self, now=None): batches_by_node = self._accumulator.drain( self._metadata, ready_nodes, self.config['max_request_size'], now=now) - for batch_list in six.itervalues(batches_by_node): + for batch_list in batches_by_node.values(): for batch in batch_list: item = (batch.created, batch) queue = self._in_flight_batches[batch.topic_partition] @@ -214,7 +212,7 @@ def _send_producer_data(self, now=None): if self.config['guarantee_message_order']: # Mute all the partitions drained - for batch_list in six.itervalues(batches_by_node): + for batch_list in batches_by_node.values(): for batch in batch_list: self._accumulator.muted.add(batch.topic_partition) @@ -272,7 +270,7 @@ def _send_producer_data(self, now=None): # metadata expiry time poll_timeout_ms = 0 - for node_id, request in six.iteritems(requests): + for node_id, request in requests.items(): batches = batches_by_node[node_id] log.debug('%s: Sending Produce Request: %r', str(self), request) (self._client.send(node_id, request, wakeup=False) @@ -584,7 +582,7 @@ def _create_produce_requests(self, collated): dict: {node_id: ProduceRequest} (version depends on client api_versions) """ requests = {} - for node_id, batches in six.iteritems(collated): + for node_id, batches in collated.items(): if batches: requests[node_id] = self._produce_request( node_id, self.config['acks'], @@ -608,7 +606,7 @@ def _produce_request(self, node_id, acks, timeout, batches): version = self._client.api_version(ProduceRequest, max_version=8) topic_partition_data = [ (topic, list(partition_info.items())) - for topic, partition_info in six.iteritems(produce_records_by_partition)] + for topic, partition_info in produce_records_by_partition.items()] transactional_id = self._transaction_manager.transactional_id if self._transaction_manager else None if version >= 3: return ProduceRequest[version]( diff --git a/kafka/producer/transaction_manager.py b/kafka/producer/transaction_manager.py index 33df2df98..5a84bd069 100644 --- a/kafka/producer/transaction_manager.py +++ b/kafka/producer/transaction_manager.py @@ -6,8 +6,6 @@ import logging import threading -from kafka.vendor import six - try: # enum in stdlib as of py3.4 from enum import IntEnum # pylint: disable=import-error @@ -686,7 +684,7 @@ def handle_response(self, response): for topic, partition_data in response.results for partition, error_code in partition_data} - for tp, error in six.iteritems(results): + for tp, error in results.items(): if error is Errors.NoError: continue elif error in (Errors.CoordinatorNotAvailableError, Errors.NotCoordinatorError): @@ -875,7 +873,7 @@ def handle_response(self, response): log.debug("Successfully added partition for consumer group %s to transaction", self.consumer_group_id) # note the result is not completed until the TxnOffsetCommit returns - for tp, offset in six.iteritems(self.offsets): + for tp, offset in self.offsets.items(): self.transaction_manager._pending_txn_offset_commits[tp] = offset handler = TxnOffsetCommitHandler(self.transaction_manager, self.consumer_group_id, self.transaction_manager._pending_txn_offset_commits, self._result) @@ -913,7 +911,7 @@ def _build_request(self): version = 0 topic_data = collections.defaultdict(list) - for tp, offset in six.iteritems(self.offsets): + for tp, offset in self.offsets.items(): if version >= 2: partition_data = (tp.partition, offset.offset, offset.leader_epoch, offset.metadata) else: @@ -947,7 +945,7 @@ def handle_response(self, response): for topic, partition_data in response.topics for partition, error_code in partition_data} - for tp, error in six.iteritems(errors): + for tp, error in errors.items(): if error is Errors.NoError: log.debug("Successfully added offsets for %s from consumer group %s to transaction.", tp, self.consumer_group_id) diff --git a/kafka/sasl/msk.py b/kafka/sasl/msk.py index 0f271d2cf..4d23729a4 100644 --- a/kafka/sasl/msk.py +++ b/kafka/sasl/msk.py @@ -6,6 +6,7 @@ import json import logging import string +import urllib # needed for AWS_MSK_IAM authentication: try: diff --git a/kafka/util.py b/kafka/util.py index e6f7acf2a..5ce4b50c3 100644 --- a/kafka/util.py +++ b/kafka/util.py @@ -7,7 +7,6 @@ import weakref from kafka.errors import KafkaTimeoutError -from kafka.vendor import six MAX_INT = 2 ** 31 @@ -72,7 +71,7 @@ def ensure_valid_topic_name(topic): # https://github.com/apache/kafka/blob/39eb31feaeebfb184d98cc5d94da9148c2319d81/clients/src/main/java/org/apache/kafka/common/internals/Topic.java if topic is None: raise TypeError('All topics must not be None') - if not isinstance(topic, six.string_types): + if not isinstance(topic, str): raise TypeError('All topics must be strings') if len(topic) == 0: raise ValueError('All topics must be non-empty strings') diff --git a/test/integration/test_consumer_group.py b/test/integration/test_consumer_group.py index eed570074..c0b4857a1 100644 --- a/test/integration/test_consumer_group.py +++ b/test/integration/test_consumer_group.py @@ -4,7 +4,6 @@ import time import pytest -from kafka.vendor import six from kafka.conn import ConnectionStates from kafka.consumer.group import KafkaConsumer @@ -60,7 +59,7 @@ def consumer_thread(i): api_version_auto_timeout_ms=5000, heartbeat_interval_ms=500) while not stop[i].is_set(): - for tp, records in six.iteritems(consumers[i].poll(timeout_ms=200)): + for tp, records in consumers[i].poll(timeout_ms=200).items(): messages[i][tp].extend(records) consumers[i].close(timeout_ms=500) consumers[i] = None @@ -84,7 +83,7 @@ def consumer_thread(i): time.sleep(1) continue - unassigned_consumers = {c for c, consumer in six.iteritems(consumers) if not consumer.assignment()} + unassigned_consumers = {c for c, consumer in consumers.items() if not consumer.assignment()} if unassigned_consumers: logging.info('Waiting for consumer assignments: %s', unassigned_consumers) time.sleep(1) @@ -95,14 +94,14 @@ def consumer_thread(i): # Verify all consumers are in the same generation # then log state and break while loop generations = set([consumer._coordinator._generation.generation_id - for consumer in six.itervalues(consumers)]) + for consumer in consumers.values()]) # New generation assignment is not complete until # coordinator.rejoining = False - rejoining = set([c for c, consumer in six.iteritems(consumers) if consumer._coordinator.rejoining]) + rejoining = set([c for c, consumer in consumers.items() if consumer._coordinator.rejoining]) if not rejoining and len(generations) == 1: - for c, consumer in six.iteritems(consumers): + for c, consumer in consumers.items(): logging.info("[%s] %s %s: %s", c, consumer._coordinator._generation.generation_id, consumer._coordinator._generation.member_id, diff --git a/test/test_assignors.py b/test/test_assignors.py index f2ecc0325..e79fae353 100644 --- a/test/test_assignors.py +++ b/test/test_assignors.py @@ -12,7 +12,6 @@ from kafka.coordinator.assignors.sticky.sticky_assignor import StickyPartitionAssignor from kafka.coordinator.protocol import ConsumerProtocolMemberAssignment_v0 from kafka.coordinator.subscription import Subscription -from kafka.vendor import six @pytest.fixture(autouse=True) @@ -111,7 +110,7 @@ def test_sticky_assignor1(mocker): del subscriptions['C1'] member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, sticky_assignment[member].partitions()) sticky_assignment = StickyPartitionAssignor.assign(cluster, member_metadata) @@ -154,7 +153,7 @@ def test_sticky_assignor2(mocker): 'C2': {'t0', 't1', 't2'}, } member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, []) sticky_assignment = StickyPartitionAssignor.assign(cluster, member_metadata) @@ -167,7 +166,7 @@ def test_sticky_assignor2(mocker): del subscriptions['C0'] member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, sticky_assignment[member].partitions()) sticky_assignment = StickyPartitionAssignor.assign(cluster, member_metadata) @@ -326,7 +325,7 @@ def test_sticky_add_remove_consumer_one_topic(mocker): 'C2': {'t'}, } member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata( topics, assignment[member].partitions() if member in assignment else [] ) @@ -338,7 +337,7 @@ def test_sticky_add_remove_consumer_one_topic(mocker): 'C2': {'t'}, } member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, assignment[member].partitions()) assignment = StickyPartitionAssignor.assign(cluster, member_metadata) @@ -367,7 +366,7 @@ def test_sticky_add_remove_topic_two_consumers(mocker): 'C2': {'t1', 't2'}, } member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, sticky_assignment[member].partitions()) sticky_assignment = StickyPartitionAssignor.assign(cluster, member_metadata) @@ -382,7 +381,7 @@ def test_sticky_add_remove_topic_two_consumers(mocker): 'C2': {'t2'}, } member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, sticky_assignment[member].partitions()) sticky_assignment = StickyPartitionAssignor.assign(cluster, member_metadata) @@ -413,7 +412,7 @@ def test_sticky_reassignment_after_one_consumer_leaves(mocker): del subscriptions['C10'] member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, assignment[member].partitions()) assignment = StickyPartitionAssignor.assign(cluster, member_metadata) @@ -435,7 +434,7 @@ def test_sticky_reassignment_after_one_consumer_added(mocker): subscriptions['C10'] = {'t'} member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata( topics, assignment[member].partitions() if member in assignment else [] ) @@ -452,7 +451,7 @@ def test_sticky_same_subscriptions(mocker): subscriptions = defaultdict(set) for i in range(1, 9): - for j in range(1, len(six.viewkeys(partitions)) + 1): + for j in range(1, len(partitions) + 1): subscriptions['C{}'.format(i)].add('t{}'.format(j)) member_metadata = make_member_metadata(subscriptions) @@ -462,7 +461,7 @@ def test_sticky_same_subscriptions(mocker): del subscriptions['C5'] member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, assignment[member].partitions()) assignment = StickyPartitionAssignor.assign(cluster, member_metadata) verify_validity_and_balance(subscriptions, assignment) @@ -488,7 +487,7 @@ def test_sticky_large_assignment_with_multiple_consumers_leaving(mocker): verify_validity_and_balance(subscriptions, assignment) member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, assignment[member].partitions()) for i in range(50): @@ -517,7 +516,7 @@ def test_new_subscription(mocker): subscriptions['C0'].add('t1') member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, []) assignment = StickyPartitionAssignor.assign(cluster, member_metadata) @@ -540,7 +539,7 @@ def test_move_existing_assignments(mocker): } member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, member_assignments[member]) assignment = StickyPartitionAssignor.assign(cluster, member_metadata) @@ -560,7 +559,7 @@ def test_stickiness(mocker): assignment = StickyPartitionAssignor.assign(cluster, member_metadata) verify_validity_and_balance(subscriptions, assignment) partitions_assigned = {} - for consumer, consumer_assignment in six.iteritems(assignment): + for consumer, consumer_assignment in assignment.items(): assert ( len(consumer_assignment.partitions()) <= 1 ), 'Consumer {} is assigned more topic partitions than expected.'.format(consumer) @@ -570,14 +569,14 @@ def test_stickiness(mocker): # removing the potential group leader del subscriptions['C1'] member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, assignment[member].partitions()) assignment = StickyPartitionAssignor.assign(cluster, member_metadata) verify_validity_and_balance(subscriptions, assignment) assert StickyPartitionAssignor._latest_partition_movements.are_sticky() - for consumer, consumer_assignment in six.iteritems(assignment): + for consumer, consumer_assignment in assignment.items(): assert ( len(consumer_assignment.partitions()) <= 1 ), 'Consumer {} is assigned more topic partitions than expected.'.format(consumer) @@ -625,7 +624,7 @@ def test_no_exceptions_when_only_subscribed_topic_is_deleted(mocker): 'C': {}, } member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, sticky_assignment[member].partitions()) cluster = create_cluster(mocker, topics={}, topics_partitions={}) @@ -644,7 +643,7 @@ def test_conflicting_previous_assignments(mocker): 'C2': {'t'}, } member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): # assume both C1 and C2 have partition 1 assigned to them in generation 1 member_metadata[member] = StickyPartitionAssignor._metadata(topics, [TopicPartition('t', 0), TopicPartition('t', 0)], 1) @@ -676,7 +675,7 @@ def test_reassignment_with_random_subscriptions_and_changes(mocker, execution_nu subscriptions['C{}'.format(i)].update(topics_sample) member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, assignment[member].partitions()) assignment = StickyPartitionAssignor.assign(cluster, member_metadata) @@ -778,7 +777,7 @@ def test_assignment_with_conflicting_previous_generations(mocker, execution_numb 'C3': 2, } member_metadata = {} - for member in six.iterkeys(member_assignments): + for member in member_assignments: member_metadata[member] = StickyPartitionAssignor._metadata({'t'}, member_assignments[member], member_generations[member]) assignment = StickyPartitionAssignor.assign(cluster, member_metadata) @@ -788,7 +787,7 @@ def test_assignment_with_conflicting_previous_generations(mocker, execution_numb def make_member_metadata(subscriptions): member_metadata = {} - for member, topics in six.iteritems(subscriptions): + for member, topics in subscriptions.items(): member_metadata[member] = StickyPartitionAssignor._metadata(topics, []) return member_metadata @@ -813,9 +812,9 @@ def verify_validity_and_balance(subscriptions, assignment): :param subscriptions topic subscriptions of each consumer :param assignment: given assignment for balance check """ - assert six.viewkeys(subscriptions) == six.viewkeys(assignment) + assert subscriptions.keys() == assignment.keys() - consumers = sorted(six.viewkeys(assignment)) + consumers = sorted(assignment.keys()) for i in range(len(consumers)): consumer = consumers[i] partitions = assignment[consumer].partitions() @@ -846,7 +845,7 @@ def verify_validity_and_balance(subscriptions, assignment): assignments_by_topic = group_partitions_by_topic(partitions) other_assignments_by_topic = group_partitions_by_topic(other_partitions) if len(partitions) > len(other_partitions): - for topic in six.iterkeys(assignments_by_topic): + for topic in assignments_by_topic: assert topic not in other_assignments_by_topic, ( 'Error: Some partitions can be moved from {} ({} partitions) ' 'to {} ({} partitions) ' @@ -855,7 +854,7 @@ def verify_validity_and_balance(subscriptions, assignment): 'Assignments: {}'.format(consumer, len(partitions), other_consumer, len(other_partitions), subscriptions, assignment) ) if len(other_partitions) > len(partitions): - for topic in six.iterkeys(other_assignments_by_topic): + for topic in other_assignments_by_topic: assert topic not in assignments_by_topic, ( 'Error: Some partitions can be moved from {} ({} partitions) ' 'to {} ({} partitions) ' diff --git a/test/test_sender.py b/test/test_sender.py index 567f1b2ad..c2246abb6 100644 --- a/test/test_sender.py +++ b/test/test_sender.py @@ -11,8 +11,6 @@ except ImportError: from mock import call -from kafka.vendor import six - from kafka.client_async import KafkaClient from kafka.cluster import ClusterMetadata import kafka.errors as Errors diff --git a/test/test_subscription_state.py b/test/test_subscription_state.py index 773606525..11fdb3baf 100644 --- a/test/test_subscription_state.py +++ b/test/test_subscription_state.py @@ -4,7 +4,6 @@ from kafka import TopicPartition from kafka.consumer.subscription_state import SubscriptionState, TopicPartitionState -from kafka.vendor import six def test_type_error(): @@ -44,8 +43,8 @@ def test_assign_from_subscribed(): s.assign_from_subscribed([TopicPartition('foo', 0), TopicPartition('foo', 1)]) assert set(s.assignment.keys()) == set([TopicPartition('foo', 0), TopicPartition('foo', 1)]) - assert all([isinstance(tps, TopicPartitionState) for tps in six.itervalues(s.assignment)]) - assert all([not tps.has_valid_position for tps in six.itervalues(s.assignment)]) + assert all([isinstance(tps, TopicPartitionState) for tps in s.assignment.values()]) + assert all([not tps.has_valid_position for tps in s.assignment.values()]) def test_change_subscription_after_assignment(): From 5760dcb5b72d80339884a5c3cfaae8e4e995b7b7 Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 17:54:56 -0800 Subject: [PATCH 05/18] remove kafka.vendor.six --- kafka/vendor/six.py | 1004 ------------------------------------------- 1 file changed, 1004 deletions(-) delete mode 100644 kafka/vendor/six.py diff --git a/kafka/vendor/six.py b/kafka/vendor/six.py deleted file mode 100644 index 319821353..000000000 --- a/kafka/vendor/six.py +++ /dev/null @@ -1,1004 +0,0 @@ -# pylint: skip-file - -# Copyright (c) 2010-2020 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -"""Utilities for writing code that runs on Python 2 and 3""" - -from __future__ import absolute_import - -import functools -import itertools -import operator -import sys -import types - -__author__ = "Benjamin Peterson " -__version__ = "1.16.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -PY34 = sys.version_info[0:2] >= (3, 4) - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - - # Don't del it here, cause with gc disabled this "leaks" to garbage. - # Note: This is a kafka-python customization, details at: - # https://github.com/dpkp/kafka-python/pull/979#discussion_r100403389 - # del X - -if PY34: - from importlib.util import spec_from_loader -else: - spec_from_loader = None - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - try: - # This is a bit ugly, but it avoids running this again by - # removing this descriptor. - delattr(obj.__class__, self.name) - except AttributeError: - pass - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def find_spec(self, fullname, path, target=None): - if fullname in self.known_modules: - return spec_from_loader(fullname, self) - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - - def create_module(self, spec): - return self.load_module(spec.name) - - def exec_module(self, module): - pass - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), - MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), - MovedAttribute("getoutput", "commands", "subprocess"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections", "IterableUserDict", "UserDict"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), -] -# Add windows specific modules. -if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] - -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("splitvalue", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), - MovedAttribute("parse_http_list", "urllib2", "urllib.request"), - MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - def create_unbound_method(func, cls): - return func - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - def create_unbound_method(func, cls): - return types.MethodType(func, None, cls) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) - - viewkeys = operator.methodcaller("keys") - - viewvalues = operator.methodcaller("values") - - viewitems = operator.methodcaller("items") -else: - def iterkeys(d, **kw): - return d.iterkeys(**kw) - - def itervalues(d, **kw): - return d.itervalues(**kw) - - def iteritems(d, **kw): - return d.iteritems(**kw) - - def iterlists(d, **kw): - return d.iterlists(**kw) - - viewkeys = operator.methodcaller("viewkeys") - - viewvalues = operator.methodcaller("viewvalues") - - viewitems = operator.methodcaller("viewitems") - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - - def u(s): - return s - unichr = chr - import struct - int2byte = struct.Struct(">B").pack - del struct - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - del io - _assertCountEqual = "assertCountEqual" - if sys.version_info[1] <= 1: - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - _assertNotRegex = "assertNotRegexpMatches" - else: - _assertRaisesRegex = "assertRaisesRegex" - _assertRegex = "assertRegex" - _assertNotRegex = "assertNotRegex" -else: - def b(s): - return s - # Workaround for standalone backslash - - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - - def byte2int(bs): - return ord(bs[0]) - - def indexbytes(buf, i): - return ord(buf[i]) - iterbytes = functools.partial(itertools.imap, ord) - import StringIO - StringIO = BytesIO = StringIO.StringIO - _assertCountEqual = "assertItemsEqual" - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - _assertNotRegex = "assertNotRegexpMatches" -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -def assertCountEqual(self, *args, **kwargs): - return getattr(self, _assertCountEqual)(*args, **kwargs) - - -def assertRaisesRegex(self, *args, **kwargs): - return getattr(self, _assertRaisesRegex)(*args, **kwargs) - - -def assertRegex(self, *args, **kwargs): - return getattr(self, _assertRegex)(*args, **kwargs) - - -def assertNotRegex(self, *args, **kwargs): - return getattr(self, _assertNotRegex)(*args, **kwargs) - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - def reraise(tp, value, tb=None): - try: - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - finally: - value = None - tb = None - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - exec_("""def reraise(tp, value, tb=None): - try: - raise tp, value, tb - finally: - tb = None -""") - - -if sys.version_info[:2] > (3,): - exec_("""def raise_from(value, from_value): - try: - raise value from from_value - finally: - value = None -""") -else: - def raise_from(value, from_value): - raise value - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) -if sys.version_info[:2] < (3, 3): - _print = print_ - - def print_(*args, **kwargs): - fp = kwargs.get("file", sys.stdout) - flush = kwargs.pop("flush", False) - _print(*args, **kwargs) - if flush and fp is not None: - fp.flush() - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - # This does exactly the same what the :func:`py3:functools.update_wrapper` - # function does on Python versions after 3.2. It sets the ``__wrapped__`` - # attribute on ``wrapper`` object and it doesn't raise an error if any of - # the attributes mentioned in ``assigned`` and ``updated`` are missing on - # ``wrapped`` object. - def _update_wrapper(wrapper, wrapped, - assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - for attr in assigned: - try: - value = getattr(wrapped, attr) - except AttributeError: - continue - else: - setattr(wrapper, attr, value) - for attr in updated: - getattr(wrapper, attr).update(getattr(wrapped, attr, {})) - wrapper.__wrapped__ = wrapped - return wrapper - _update_wrapper.__doc__ = functools.update_wrapper.__doc__ - - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - return functools.partial(_update_wrapper, wrapped=wrapped, - assigned=assigned, updated=updated) - wraps.__doc__ = functools.wraps.__doc__ - -else: - wraps = functools.wraps - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(type): - - def __new__(cls, name, this_bases, d): - if sys.version_info[:2] >= (3, 7): - # This version introduced PEP 560 that requires a bit - # of extra care (we mimic what is done by __build_class__). - resolved_bases = types.resolve_bases(bases) - if resolved_bases is not bases: - d['__orig_bases__'] = bases - else: - resolved_bases = bases - return meta(name, resolved_bases, d) - - @classmethod - def __prepare__(cls, name, this_bases): - return meta.__prepare__(name, bases) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - if hasattr(cls, '__qualname__'): - orig_vars['__qualname__'] = cls.__qualname__ - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def ensure_binary(s, encoding='utf-8', errors='strict'): - """Coerce **s** to six.binary_type. - - For Python 2: - - `unicode` -> encoded to `str` - - `str` -> `str` - - For Python 3: - - `str` -> encoded to `bytes` - - `bytes` -> `bytes` - """ - if isinstance(s, binary_type): - return s - if isinstance(s, text_type): - return s.encode(encoding, errors) - raise TypeError("not expecting type '%s'" % type(s)) - - -def ensure_str(s, encoding='utf-8', errors='strict'): - """Coerce *s* to `str`. - - For Python 2: - - `unicode` -> encoded to `str` - - `str` -> `str` - - For Python 3: - - `str` -> `str` - - `bytes` -> decoded to `str` - """ - # Optimization: Fast return for the common case. - if type(s) is str: - return s - if PY2 and isinstance(s, text_type): - return s.encode(encoding, errors) - elif PY3 and isinstance(s, binary_type): - return s.decode(encoding, errors) - elif not isinstance(s, (text_type, binary_type)): - raise TypeError("not expecting type '%s'" % type(s)) - return s - - -def ensure_text(s, encoding='utf-8', errors='strict'): - """Coerce *s* to six.text_type. - - For Python 2: - - `unicode` -> `unicode` - - `str` -> `unicode` - - For Python 3: - - `str` -> `str` - - `bytes` -> decoded to `str` - """ - if isinstance(s, binary_type): - return s.decode(encoding, errors) - elif isinstance(s, text_type): - return s - else: - raise TypeError("not expecting type '%s'" % type(s)) - - -def python_2_unicode_compatible(klass): - """ - A class decorator that defines __unicode__ and __str__ methods under Python 2. - Under Python 3 it does nothing. - - To support Python 2 and 3 with a single code base, define a __str__ method - returning text and apply this decorator to the class. - """ - if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) - klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') - return klass - - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) From 1ec857269582e585e6578acc2939094c86b25c0e Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 17:55:33 -0800 Subject: [PATCH 06/18] Drop kafka.vendor.enum34 --- kafka/admin/acl_resource.py | 7 +- kafka/admin/config_resource.py | 7 +- kafka/consumer/subscription_state.py | 7 +- kafka/producer/producer_batch.py | 8 +- kafka/producer/transaction_manager.py | 8 +- kafka/protocol/admin.py | 7 +- kafka/vendor/enum34.py | 841 -------------------------- 7 files changed, 6 insertions(+), 879 deletions(-) delete mode 100644 kafka/vendor/enum34.py diff --git a/kafka/admin/acl_resource.py b/kafka/admin/acl_resource.py index 8ae1e978d..9ab9a899f 100644 --- a/kafka/admin/acl_resource.py +++ b/kafka/admin/acl_resource.py @@ -1,11 +1,6 @@ from __future__ import absolute_import -# enum in stdlib as of py3.4 -try: - from enum import IntEnum # pylint: disable=import-error -except ImportError: - # vendored backport module - from kafka.vendor.enum34 import IntEnum +from enum import IntEnum from kafka.errors import IllegalArgumentError diff --git a/kafka/admin/config_resource.py b/kafka/admin/config_resource.py index 06754ba9f..2ffdbf0ff 100644 --- a/kafka/admin/config_resource.py +++ b/kafka/admin/config_resource.py @@ -1,11 +1,6 @@ from __future__ import absolute_import -# enum in stdlib as of py3.4 -try: - from enum import IntEnum # pylint: disable=import-error -except ImportError: - # vendored backport module - from kafka.vendor.enum34 import IntEnum +from enum import IntEnum class ConfigResourceType(IntEnum): diff --git a/kafka/consumer/subscription_state.py b/kafka/consumer/subscription_state.py index 7dc3345f9..bed76fda4 100644 --- a/kafka/consumer/subscription_state.py +++ b/kafka/consumer/subscription_state.py @@ -6,12 +6,7 @@ from collections.abc import Sequence except ImportError: from collections import Sequence -try: - # enum in stdlib as of py3.4 - from enum import IntEnum # pylint: disable=import-error -except ImportError: - # vendored backport module - from kafka.vendor.enum34 import IntEnum +from enum import IntEnum import logging import random import re diff --git a/kafka/producer/producer_batch.py b/kafka/producer/producer_batch.py index 198a3a0c5..7a082bac8 100644 --- a/kafka/producer/producer_batch.py +++ b/kafka/producer/producer_batch.py @@ -1,15 +1,9 @@ from __future__ import absolute_import, division +from enum import IntEnum import logging import time -try: - # enum in stdlib as of py3.4 - from enum import IntEnum # pylint: disable=import-error -except ImportError: - # vendored backport module - from kafka.vendor.enum34 import IntEnum - import kafka.errors as Errors from kafka.producer.future import FutureRecordMetadata, FutureProduceResult diff --git a/kafka/producer/transaction_manager.py b/kafka/producer/transaction_manager.py index 5a84bd069..b10344525 100644 --- a/kafka/producer/transaction_manager.py +++ b/kafka/producer/transaction_manager.py @@ -2,17 +2,11 @@ import abc import collections +from enum import IntEnum import heapq import logging import threading -try: - # enum in stdlib as of py3.4 - from enum import IntEnum # pylint: disable=import-error -except ImportError: - # vendored backport module - from kafka.vendor.enum34 import IntEnum - import kafka.errors as Errors from kafka.protocol.add_offsets_to_txn import AddOffsetsToTxnRequest from kafka.protocol.add_partitions_to_txn import AddPartitionsToTxnRequest diff --git a/kafka/protocol/admin.py b/kafka/protocol/admin.py index 32b75df4b..afc615770 100644 --- a/kafka/protocol/admin.py +++ b/kafka/protocol/admin.py @@ -1,11 +1,6 @@ from __future__ import absolute_import -# enum in stdlib as of py3.4 -try: - from enum import IntEnum # pylint: disable=import-error -except ImportError: - # vendored backport module - from kafka.vendor.enum34 import IntEnum +from enum import IntEnum from kafka.protocol.api import Request, Response from kafka.protocol.types import Array, Boolean, Bytes, Int8, Int16, Int32, Int64, Schema, String, Float64, CompactString, CompactArray, TaggedFields, BitField diff --git a/kafka/vendor/enum34.py b/kafka/vendor/enum34.py deleted file mode 100644 index 5f64bd2d8..000000000 --- a/kafka/vendor/enum34.py +++ /dev/null @@ -1,841 +0,0 @@ -# pylint: skip-file -# vendored from: -# https://bitbucket.org/stoneleaf/enum34/src/58c4cd7174ca35f164304c8a6f0a4d47b779c2a7/enum/__init__.py?at=1.1.6 - -"""Python Enumerations""" - -import sys as _sys - -__all__ = ['Enum', 'IntEnum', 'unique'] - -version = 1, 1, 6 - -pyver = float('%s.%s' % _sys.version_info[:2]) - -try: - any -except NameError: - def any(iterable): - for element in iterable: - if element: - return True - return False - -try: - from collections import OrderedDict -except ImportError: - OrderedDict = None - -try: - basestring -except NameError: - # In Python 2 basestring is the ancestor of both str and unicode - # in Python 3 it's just str, but was missing in 3.1 - basestring = str - -try: - unicode -except NameError: - # In Python 3 unicode no longer exists (it's just str) - unicode = str - -class _RouteClassAttributeToGetattr(object): - """Route attribute access on a class to __getattr__. - - This is a descriptor, used to define attributes that act differently when - accessed through an instance and through a class. Instance access remains - normal, but access to an attribute through a class will be routed to the - class's __getattr__ method; this is done by raising AttributeError. - - """ - def __init__(self, fget=None): - self.fget = fget - - def __get__(self, instance, ownerclass=None): - if instance is None: - raise AttributeError() - return self.fget(instance) - - def __set__(self, instance, value): - raise AttributeError("can't set attribute") - - def __delete__(self, instance): - raise AttributeError("can't delete attribute") - - -def _is_descriptor(obj): - """Returns True if obj is a descriptor, False otherwise.""" - return ( - hasattr(obj, '__get__') or - hasattr(obj, '__set__') or - hasattr(obj, '__delete__')) - - -def _is_dunder(name): - """Returns True if a __dunder__ name, False otherwise.""" - return (name[:2] == name[-2:] == '__' and - name[2:3] != '_' and - name[-3:-2] != '_' and - len(name) > 4) - - -def _is_sunder(name): - """Returns True if a _sunder_ name, False otherwise.""" - return (name[0] == name[-1] == '_' and - name[1:2] != '_' and - name[-2:-1] != '_' and - len(name) > 2) - - -def _make_class_unpicklable(cls): - """Make the given class un-picklable.""" - def _break_on_call_reduce(self, protocol=None): - raise TypeError('%r cannot be pickled' % self) - cls.__reduce_ex__ = _break_on_call_reduce - cls.__module__ = '' - - -class _EnumDict(dict): - """Track enum member order and ensure member names are not reused. - - EnumMeta will use the names found in self._member_names as the - enumeration member names. - - """ - def __init__(self): - super(_EnumDict, self).__init__() - self._member_names = [] - - def __setitem__(self, key, value): - """Changes anything not dundered or not a descriptor. - - If a descriptor is added with the same name as an enum member, the name - is removed from _member_names (this may leave a hole in the numerical - sequence of values). - - If an enum member name is used twice, an error is raised; duplicate - values are not checked for. - - Single underscore (sunder) names are reserved. - - Note: in 3.x __order__ is simply discarded as a not necessary piece - leftover from 2.x - - """ - if pyver >= 3.0 and key in ('_order_', '__order__'): - return - elif key == '__order__': - key = '_order_' - if _is_sunder(key): - if key != '_order_': - raise ValueError('_names_ are reserved for future Enum use') - elif _is_dunder(key): - pass - elif key in self._member_names: - # descriptor overwriting an enum? - raise TypeError('Attempted to reuse key: %r' % key) - elif not _is_descriptor(value): - if key in self: - # enum overwriting a descriptor? - raise TypeError('Key already defined as: %r' % self[key]) - self._member_names.append(key) - super(_EnumDict, self).__setitem__(key, value) - - -# Dummy value for Enum as EnumMeta explicity checks for it, but of course until -# EnumMeta finishes running the first time the Enum class doesn't exist. This -# is also why there are checks in EnumMeta like `if Enum is not None` -Enum = None - - -class EnumMeta(type): - """Metaclass for Enum""" - @classmethod - def __prepare__(metacls, cls, bases): - return _EnumDict() - - def __new__(metacls, cls, bases, classdict): - # an Enum class is final once enumeration items have been defined; it - # cannot be mixed with other types (int, float, etc.) if it has an - # inherited __new__ unless a new __new__ is defined (or the resulting - # class will fail). - if type(classdict) is dict: - original_dict = classdict - classdict = _EnumDict() - for k, v in original_dict.items(): - classdict[k] = v - - member_type, first_enum = metacls._get_mixins_(bases) - __new__, save_new, use_args = metacls._find_new_(classdict, member_type, - first_enum) - # save enum items into separate mapping so they don't get baked into - # the new class - members = dict((k, classdict[k]) for k in classdict._member_names) - for name in classdict._member_names: - del classdict[name] - - # py2 support for definition order - _order_ = classdict.get('_order_') - if _order_ is None: - if pyver < 3.0: - try: - _order_ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])] - except TypeError: - _order_ = [name for name in sorted(members.keys())] - else: - _order_ = classdict._member_names - else: - del classdict['_order_'] - if pyver < 3.0: - _order_ = _order_.replace(',', ' ').split() - aliases = [name for name in members if name not in _order_] - _order_ += aliases - - # check for illegal enum names (any others?) - invalid_names = set(members) & set(['mro']) - if invalid_names: - raise ValueError('Invalid enum member name(s): %s' % ( - ', '.join(invalid_names), )) - - # save attributes from super classes so we know if we can take - # the shortcut of storing members in the class dict - base_attributes = set([a for b in bases for a in b.__dict__]) - # create our new Enum type - enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict) - enum_class._member_names_ = [] # names in random order - if OrderedDict is not None: - enum_class._member_map_ = OrderedDict() - else: - enum_class._member_map_ = {} # name->value map - enum_class._member_type_ = member_type - - # Reverse value->name map for hashable values. - enum_class._value2member_map_ = {} - - # instantiate them, checking for duplicates as we go - # we instantiate first instead of checking for duplicates first in case - # a custom __new__ is doing something funky with the values -- such as - # auto-numbering ;) - if __new__ is None: - __new__ = enum_class.__new__ - for member_name in _order_: - value = members[member_name] - if not isinstance(value, tuple): - args = (value, ) - else: - args = value - if member_type is tuple: # special case for tuple enums - args = (args, ) # wrap it one more time - if not use_args or not args: - enum_member = __new__(enum_class) - if not hasattr(enum_member, '_value_'): - enum_member._value_ = value - else: - enum_member = __new__(enum_class, *args) - if not hasattr(enum_member, '_value_'): - enum_member._value_ = member_type(*args) - value = enum_member._value_ - enum_member._name_ = member_name - enum_member.__objclass__ = enum_class - enum_member.__init__(*args) - # If another member with the same value was already defined, the - # new member becomes an alias to the existing one. - for name, canonical_member in enum_class._member_map_.items(): - if canonical_member.value == enum_member._value_: - enum_member = canonical_member - break - else: - # Aliases don't appear in member names (only in __members__). - enum_class._member_names_.append(member_name) - # performance boost for any member that would not shadow - # a DynamicClassAttribute (aka _RouteClassAttributeToGetattr) - if member_name not in base_attributes: - setattr(enum_class, member_name, enum_member) - # now add to _member_map_ - enum_class._member_map_[member_name] = enum_member - try: - # This may fail if value is not hashable. We can't add the value - # to the map, and by-value lookups for this value will be - # linear. - enum_class._value2member_map_[value] = enum_member - except TypeError: - pass - - - # If a custom type is mixed into the Enum, and it does not know how - # to pickle itself, pickle.dumps will succeed but pickle.loads will - # fail. Rather than have the error show up later and possibly far - # from the source, sabotage the pickle protocol for this class so - # that pickle.dumps also fails. - # - # However, if the new class implements its own __reduce_ex__, do not - # sabotage -- it's on them to make sure it works correctly. We use - # __reduce_ex__ instead of any of the others as it is preferred by - # pickle over __reduce__, and it handles all pickle protocols. - unpicklable = False - if '__reduce_ex__' not in classdict: - if member_type is not object: - methods = ('__getnewargs_ex__', '__getnewargs__', - '__reduce_ex__', '__reduce__') - if not any(m in member_type.__dict__ for m in methods): - _make_class_unpicklable(enum_class) - unpicklable = True - - - # double check that repr and friends are not the mixin's or various - # things break (such as pickle) - for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'): - class_method = getattr(enum_class, name) - obj_method = getattr(member_type, name, None) - enum_method = getattr(first_enum, name, None) - if name not in classdict and class_method is not enum_method: - if name == '__reduce_ex__' and unpicklable: - continue - setattr(enum_class, name, enum_method) - - # method resolution and int's are not playing nice - # Python's less than 2.6 use __cmp__ - - if pyver < 2.6: - - if issubclass(enum_class, int): - setattr(enum_class, '__cmp__', getattr(int, '__cmp__')) - - elif pyver < 3.0: - - if issubclass(enum_class, int): - for method in ( - '__le__', - '__lt__', - '__gt__', - '__ge__', - '__eq__', - '__ne__', - '__hash__', - ): - setattr(enum_class, method, getattr(int, method)) - - # replace any other __new__ with our own (as long as Enum is not None, - # anyway) -- again, this is to support pickle - if Enum is not None: - # if the user defined their own __new__, save it before it gets - # clobbered in case they subclass later - if save_new: - setattr(enum_class, '__member_new__', enum_class.__dict__['__new__']) - setattr(enum_class, '__new__', Enum.__dict__['__new__']) - return enum_class - - def __bool__(cls): - """ - classes/types should always be True. - """ - return True - - def __call__(cls, value, names=None, module=None, type=None, start=1): - """Either returns an existing member, or creates a new enum class. - - This method is used both when an enum class is given a value to match - to an enumeration member (i.e. Color(3)) and for the functional API - (i.e. Color = Enum('Color', names='red green blue')). - - When used for the functional API: `module`, if set, will be stored in - the new class' __module__ attribute; `type`, if set, will be mixed in - as the first base class. - - Note: if `module` is not set this routine will attempt to discover the - calling module by walking the frame stack; if this is unsuccessful - the resulting class will not be pickleable. - - """ - if names is None: # simple value lookup - return cls.__new__(cls, value) - # otherwise, functional API: we're creating a new Enum type - return cls._create_(value, names, module=module, type=type, start=start) - - def __contains__(cls, member): - return isinstance(member, cls) and member.name in cls._member_map_ - - def __delattr__(cls, attr): - # nicer error message when someone tries to delete an attribute - # (see issue19025). - if attr in cls._member_map_: - raise AttributeError( - "%s: cannot delete Enum member." % cls.__name__) - super(EnumMeta, cls).__delattr__(attr) - - def __dir__(self): - return (['__class__', '__doc__', '__members__', '__module__'] + - self._member_names_) - - @property - def __members__(cls): - """Returns a mapping of member name->value. - - This mapping lists all enum members, including aliases. Note that this - is a copy of the internal mapping. - - """ - return cls._member_map_.copy() - - def __getattr__(cls, name): - """Return the enum member matching `name` - - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - - """ - if _is_dunder(name): - raise AttributeError(name) - try: - return cls._member_map_[name] - except KeyError: - raise AttributeError(name) - - def __getitem__(cls, name): - return cls._member_map_[name] - - def __iter__(cls): - return (cls._member_map_[name] for name in cls._member_names_) - - def __reversed__(cls): - return (cls._member_map_[name] for name in reversed(cls._member_names_)) - - def __len__(cls): - return len(cls._member_names_) - - __nonzero__ = __bool__ - - def __repr__(cls): - return "" % cls.__name__ - - def __setattr__(cls, name, value): - """Block attempts to reassign Enum members. - - A simple assignment to the class namespace only changes one of the - several possible ways to get an Enum member from the Enum class, - resulting in an inconsistent Enumeration. - - """ - member_map = cls.__dict__.get('_member_map_', {}) - if name in member_map: - raise AttributeError('Cannot reassign members.') - super(EnumMeta, cls).__setattr__(name, value) - - def _create_(cls, class_name, names=None, module=None, type=None, start=1): - """Convenience method to create a new Enum class. - - `names` can be: - - * A string containing member names, separated either with spaces or - commas. Values are auto-numbered from 1. - * An iterable of member names. Values are auto-numbered from 1. - * An iterable of (member name, value) pairs. - * A mapping of member name -> value. - - """ - if pyver < 3.0: - # if class_name is unicode, attempt a conversion to ASCII - if isinstance(class_name, unicode): - try: - class_name = class_name.encode('ascii') - except UnicodeEncodeError: - raise TypeError('%r is not representable in ASCII' % class_name) - metacls = cls.__class__ - if type is None: - bases = (cls, ) - else: - bases = (type, cls) - classdict = metacls.__prepare__(class_name, bases) - _order_ = [] - - # special processing needed for names? - if isinstance(names, basestring): - names = names.replace(',', ' ').split() - if isinstance(names, (tuple, list)) and isinstance(names[0], basestring): - names = [(e, i+start) for (i, e) in enumerate(names)] - - # Here, names is either an iterable of (name, value) or a mapping. - item = None # in case names is empty - for item in names: - if isinstance(item, basestring): - member_name, member_value = item, names[item] - else: - member_name, member_value = item - classdict[member_name] = member_value - _order_.append(member_name) - # only set _order_ in classdict if name/value was not from a mapping - if not isinstance(item, basestring): - classdict['_order_'] = ' '.join(_order_) - enum_class = metacls.__new__(metacls, class_name, bases, classdict) - - # TODO: replace the frame hack if a blessed way to know the calling - # module is ever developed - if module is None: - try: - module = _sys._getframe(2).f_globals['__name__'] - except (AttributeError, ValueError): - pass - if module is None: - _make_class_unpicklable(enum_class) - else: - enum_class.__module__ = module - - return enum_class - - @staticmethod - def _get_mixins_(bases): - """Returns the type for creating enum members, and the first inherited - enum class. - - bases: the tuple of bases that was given to __new__ - - """ - if not bases or Enum is None: - return object, Enum - - - # double check that we are not subclassing a class with existing - # enumeration members; while we're at it, see if any other data - # type has been mixed in so we can use the correct __new__ - member_type = first_enum = None - for base in bases: - if (base is not Enum and - issubclass(base, Enum) and - base._member_names_): - raise TypeError("Cannot extend enumerations") - # base is now the last base in bases - if not issubclass(base, Enum): - raise TypeError("new enumerations must be created as " - "`ClassName([mixin_type,] enum_type)`") - - # get correct mix-in type (either mix-in type of Enum subclass, or - # first base if last base is Enum) - if not issubclass(bases[0], Enum): - member_type = bases[0] # first data type - first_enum = bases[-1] # enum type - else: - for base in bases[0].__mro__: - # most common: (IntEnum, int, Enum, object) - # possible: (, , - # , , - # ) - if issubclass(base, Enum): - if first_enum is None: - first_enum = base - else: - if member_type is None: - member_type = base - - return member_type, first_enum - - if pyver < 3.0: - @staticmethod - def _find_new_(classdict, member_type, first_enum): - """Returns the __new__ to be used for creating the enum members. - - classdict: the class dictionary given to __new__ - member_type: the data type whose __new__ will be used by default - first_enum: enumeration to check for an overriding __new__ - - """ - # now find the correct __new__, checking to see of one was defined - # by the user; also check earlier enum classes in case a __new__ was - # saved as __member_new__ - __new__ = classdict.get('__new__', None) - if __new__: - return None, True, True # __new__, save_new, use_args - - N__new__ = getattr(None, '__new__') - O__new__ = getattr(object, '__new__') - if Enum is None: - E__new__ = N__new__ - else: - E__new__ = Enum.__dict__['__new__'] - # check all possibles for __member_new__ before falling back to - # __new__ - for method in ('__member_new__', '__new__'): - for possible in (member_type, first_enum): - try: - target = possible.__dict__[method] - except (AttributeError, KeyError): - target = getattr(possible, method, None) - if target not in [ - None, - N__new__, - O__new__, - E__new__, - ]: - if method == '__member_new__': - classdict['__new__'] = target - return None, False, True - if isinstance(target, staticmethod): - target = target.__get__(member_type) - __new__ = target - break - if __new__ is not None: - break - else: - __new__ = object.__new__ - - # if a non-object.__new__ is used then whatever value/tuple was - # assigned to the enum member name will be passed to __new__ and to the - # new enum member's __init__ - if __new__ is object.__new__: - use_args = False - else: - use_args = True - - return __new__, False, use_args - else: - @staticmethod - def _find_new_(classdict, member_type, first_enum): - """Returns the __new__ to be used for creating the enum members. - - classdict: the class dictionary given to __new__ - member_type: the data type whose __new__ will be used by default - first_enum: enumeration to check for an overriding __new__ - - """ - # now find the correct __new__, checking to see of one was defined - # by the user; also check earlier enum classes in case a __new__ was - # saved as __member_new__ - __new__ = classdict.get('__new__', None) - - # should __new__ be saved as __member_new__ later? - save_new = __new__ is not None - - if __new__ is None: - # check all possibles for __member_new__ before falling back to - # __new__ - for method in ('__member_new__', '__new__'): - for possible in (member_type, first_enum): - target = getattr(possible, method, None) - if target not in ( - None, - None.__new__, - object.__new__, - Enum.__new__, - ): - __new__ = target - break - if __new__ is not None: - break - else: - __new__ = object.__new__ - - # if a non-object.__new__ is used then whatever value/tuple was - # assigned to the enum member name will be passed to __new__ and to the - # new enum member's __init__ - if __new__ is object.__new__: - use_args = False - else: - use_args = True - - return __new__, save_new, use_args - - -######################################################## -# In order to support Python 2 and 3 with a single -# codebase we have to create the Enum methods separately -# and then use the `type(name, bases, dict)` method to -# create the class. -######################################################## -temp_enum_dict = {} -temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n" - -def __new__(cls, value): - # all enum instances are actually created during class construction - # without calling this method; this method is called by the metaclass' - # __call__ (i.e. Color(3) ), and by pickle - if type(value) is cls: - # For lookups like Color(Color.red) - value = value.value - #return value - # by-value search for a matching enum member - # see if it's in the reverse mapping (for hashable values) - try: - if value in cls._value2member_map_: - return cls._value2member_map_[value] - except TypeError: - # not there, now do long search -- O(n) behavior - for member in cls._member_map_.values(): - if member.value == value: - return member - raise ValueError("%s is not a valid %s" % (value, cls.__name__)) -temp_enum_dict['__new__'] = __new__ -del __new__ - -def __repr__(self): - return "<%s.%s: %r>" % ( - self.__class__.__name__, self._name_, self._value_) -temp_enum_dict['__repr__'] = __repr__ -del __repr__ - -def __str__(self): - return "%s.%s" % (self.__class__.__name__, self._name_) -temp_enum_dict['__str__'] = __str__ -del __str__ - -if pyver >= 3.0: - def __dir__(self): - added_behavior = [ - m - for cls in self.__class__.mro() - for m in cls.__dict__ - if m[0] != '_' and m not in self._member_map_ - ] - return (['__class__', '__doc__', '__module__', ] + added_behavior) - temp_enum_dict['__dir__'] = __dir__ - del __dir__ - -def __format__(self, format_spec): - # mixed-in Enums should use the mixed-in type's __format__, otherwise - # we can get strange results with the Enum name showing up instead of - # the value - - # pure Enum branch - if self._member_type_ is object: - cls = str - val = str(self) - # mix-in branch - else: - cls = self._member_type_ - val = self.value - return cls.__format__(val, format_spec) -temp_enum_dict['__format__'] = __format__ -del __format__ - - -#################################### -# Python's less than 2.6 use __cmp__ - -if pyver < 2.6: - - def __cmp__(self, other): - if type(other) is self.__class__: - if self is other: - return 0 - return -1 - return NotImplemented - raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__)) - temp_enum_dict['__cmp__'] = __cmp__ - del __cmp__ - -else: - - def __le__(self, other): - raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__)) - temp_enum_dict['__le__'] = __le__ - del __le__ - - def __lt__(self, other): - raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__)) - temp_enum_dict['__lt__'] = __lt__ - del __lt__ - - def __ge__(self, other): - raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__)) - temp_enum_dict['__ge__'] = __ge__ - del __ge__ - - def __gt__(self, other): - raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__)) - temp_enum_dict['__gt__'] = __gt__ - del __gt__ - - -def __eq__(self, other): - if type(other) is self.__class__: - return self is other - return NotImplemented -temp_enum_dict['__eq__'] = __eq__ -del __eq__ - -def __ne__(self, other): - if type(other) is self.__class__: - return self is not other - return NotImplemented -temp_enum_dict['__ne__'] = __ne__ -del __ne__ - -def __hash__(self): - return hash(self._name_) -temp_enum_dict['__hash__'] = __hash__ -del __hash__ - -def __reduce_ex__(self, proto): - return self.__class__, (self._value_, ) -temp_enum_dict['__reduce_ex__'] = __reduce_ex__ -del __reduce_ex__ - -# _RouteClassAttributeToGetattr is used to provide access to the `name` -# and `value` properties of enum members while keeping some measure of -# protection from modification, while still allowing for an enumeration -# to have members named `name` and `value`. This works because enumeration -# members are not set directly on the enum class -- __getattr__ is -# used to look them up. - -@_RouteClassAttributeToGetattr -def name(self): - return self._name_ -temp_enum_dict['name'] = name -del name - -@_RouteClassAttributeToGetattr -def value(self): - return self._value_ -temp_enum_dict['value'] = value -del value - -@classmethod -def _convert(cls, name, module, filter, source=None): - """ - Create a new Enum subclass that replaces a collection of global constants - """ - # convert all constants from source (or module) that pass filter() to - # a new Enum called name, and export the enum and its members back to - # module; - # also, replace the __reduce_ex__ method so unpickling works in - # previous Python versions - module_globals = vars(_sys.modules[module]) - if source: - source = vars(source) - else: - source = module_globals - members = dict((name, value) for name, value in source.items() if filter(name)) - cls = cls(name, members, module=module) - cls.__reduce_ex__ = _reduce_ex_by_name - module_globals.update(cls.__members__) - module_globals[name] = cls - return cls -temp_enum_dict['_convert'] = _convert -del _convert - -Enum = EnumMeta('Enum', (object, ), temp_enum_dict) -del temp_enum_dict - -# Enum has now been created -########################### - -class IntEnum(int, Enum): - """Enum where members are also (and must be) ints""" - -def _reduce_ex_by_name(self, proto): - return self.name - -def unique(enumeration): - """Class decorator that ensures only unique members exist in an enumeration.""" - duplicates = [] - for name, member in enumeration.__members__.items(): - if name != member.name: - duplicates.append((name, member.name)) - if duplicates: - duplicate_names = ', '.join( - ["%s -> %s" % (alias, name) for (alias, name) in duplicates] - ) - raise ValueError('duplicate names found in %r: %s' % - (enumeration, duplicate_names) - ) - return enumeration From ff9ab745445d11f5df78e102ba0768e9510e5def Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 17:57:00 -0800 Subject: [PATCH 07/18] Remove kafka.vendor.selectors34 --- kafka/client_async.py | 8 +- kafka/conn.py | 9 +- kafka/vendor/selectors34.py | 641 ------------------------------------ test/test_client_async.py | 8 +- 4 files changed, 3 insertions(+), 663 deletions(-) delete mode 100644 kafka/vendor/selectors34.py diff --git a/kafka/client_async.py b/kafka/client_async.py index b0acb92aa..e8654641a 100644 --- a/kafka/client_async.py +++ b/kafka/client_async.py @@ -4,18 +4,12 @@ import copy import logging import random +import selectors import socket import threading import time import weakref -# selectors in stdlib as of py3.4 -try: - import selectors # pylint: disable=import-error -except ImportError: - # vendored backport module - from kafka.vendor import selectors34 as selectors - from kafka.cluster import ClusterMetadata from kafka.conn import BrokerConnection, ConnectionStates, get_ip_port_afi from kafka import errors as Errors diff --git a/kafka/conn.py b/kafka/conn.py index ecd52faca..8558be66b 100755 --- a/kafka/conn.py +++ b/kafka/conn.py @@ -5,14 +5,7 @@ import io import logging from random import uniform - -# selectors in stdlib as of py3.4 -try: - import selectors # pylint: disable=import-error -except ImportError: - # vendored backport module - from kafka.vendor import selectors34 as selectors - +import selectors import socket import threading import time diff --git a/kafka/vendor/selectors34.py b/kafka/vendor/selectors34.py deleted file mode 100644 index 787490340..000000000 --- a/kafka/vendor/selectors34.py +++ /dev/null @@ -1,641 +0,0 @@ -# pylint: skip-file -# vendored from https://github.com/berkerpeksag/selectors34 -# at commit ff61b82168d2cc9c4922ae08e2a8bf94aab61ea2 (unreleased, ~1.2) -# -# Original author: Charles-Francois Natali (c.f.natali[at]gmail.com) -# Maintainer: Berker Peksag (berker.peksag[at]gmail.com) -# Also see https://pypi.python.org/pypi/selectors34 -"""Selectors module. - -This module allows high-level and efficient I/O multiplexing, built upon the -`select` module primitives. - -The following code adapted from trollius.selectors. -""" -from __future__ import absolute_import - -from abc import ABCMeta, abstractmethod -from collections import namedtuple -try: - from collections.abc import Mapping -except ImportError: - from collections import Mapping -from errno import EINTR -import math -import select -import sys - -from kafka.vendor import six - - -def _wrap_error(exc, mapping, key): - if key not in mapping: - return - new_err_cls = mapping[key] - new_err = new_err_cls(*exc.args) - - # raise a new exception with the original traceback - if hasattr(exc, '__traceback__'): - traceback = exc.__traceback__ - else: - traceback = sys.exc_info()[2] - six.reraise(new_err_cls, new_err, traceback) - - -# generic events, that must be mapped to implementation-specific ones -EVENT_READ = (1 << 0) -EVENT_WRITE = (1 << 1) - - -def _fileobj_to_fd(fileobj): - """Return a file descriptor from a file object. - - Parameters: - fileobj -- file object or file descriptor - - Returns: - corresponding file descriptor - - Raises: - ValueError if the object is invalid - """ - if isinstance(fileobj, six.integer_types): - fd = fileobj - else: - try: - fd = int(fileobj.fileno()) - except (AttributeError, TypeError, ValueError): - raise ValueError("Invalid file object: " - "{0!r}".format(fileobj)) - if fd < 0: - raise ValueError("Invalid file descriptor: {0}".format(fd)) - return fd - - -SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) -"""Object used to associate a file object to its backing file descriptor, -selected event mask and attached data.""" - - -class _SelectorMapping(Mapping): - """Mapping of file objects to selector keys.""" - - def __init__(self, selector): - self._selector = selector - - def __len__(self): - return len(self._selector._fd_to_key) - - def __getitem__(self, fileobj): - try: - fd = self._selector._fileobj_lookup(fileobj) - return self._selector._fd_to_key[fd] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - def __iter__(self): - return iter(self._selector._fd_to_key) - -# Using six.add_metaclass() decorator instead of six.with_metaclass() because -# the latter leaks temporary_class to garbage with gc disabled -@six.add_metaclass(ABCMeta) -class BaseSelector(object): - """Selector abstract base class. - - A selector supports registering file objects to be monitored for specific - I/O events. - - A file object is a file descriptor or any object with a `fileno()` method. - An arbitrary object can be attached to the file object, which can be used - for example to store context information, a callback, etc. - - A selector can use various implementations (select(), poll(), epoll()...) - depending on the platform. The default `Selector` class uses the most - efficient implementation on the current platform. - """ - - @abstractmethod - def register(self, fileobj, events, data=None): - """Register a file object. - - Parameters: - fileobj -- file object or file descriptor - events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) - data -- attached data - - Returns: - SelectorKey instance - - Raises: - ValueError if events is invalid - KeyError if fileobj is already registered - OSError if fileobj is closed or otherwise is unacceptable to - the underlying system call (if a system call is made) - - Note: - OSError may or may not be raised - """ - raise NotImplementedError - - @abstractmethod - def unregister(self, fileobj): - """Unregister a file object. - - Parameters: - fileobj -- file object or file descriptor - - Returns: - SelectorKey instance - - Raises: - KeyError if fileobj is not registered - - Note: - If fileobj is registered but has since been closed this does - *not* raise OSError (even if the wrapped syscall does) - """ - raise NotImplementedError - - def modify(self, fileobj, events, data=None): - """Change a registered file object monitored events or attached data. - - Parameters: - fileobj -- file object or file descriptor - events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) - data -- attached data - - Returns: - SelectorKey instance - - Raises: - Anything that unregister() or register() raises - """ - self.unregister(fileobj) - return self.register(fileobj, events, data) - - @abstractmethod - def select(self, timeout=None): - """Perform the actual selection, until some monitored file objects are - ready or a timeout expires. - - Parameters: - timeout -- if timeout > 0, this specifies the maximum wait time, in - seconds - if timeout <= 0, the select() call won't block, and will - report the currently ready file objects - if timeout is None, select() will block until a monitored - file object becomes ready - - Returns: - list of (key, events) for ready file objects - `events` is a bitwise mask of EVENT_READ|EVENT_WRITE - """ - raise NotImplementedError - - def close(self): - """Close the selector. - - This must be called to make sure that any underlying resource is freed. - """ - pass - - def get_key(self, fileobj): - """Return the key associated to a registered file object. - - Returns: - SelectorKey for this file object - """ - mapping = self.get_map() - if mapping is None: - raise RuntimeError('Selector is closed') - try: - return mapping[fileobj] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - @abstractmethod - def get_map(self): - """Return a mapping of file objects to selector keys.""" - raise NotImplementedError - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - -class _BaseSelectorImpl(BaseSelector): - """Base selector implementation.""" - - def __init__(self): - # this maps file descriptors to keys - self._fd_to_key = {} - # read-only mapping returned by get_map() - self._map = _SelectorMapping(self) - - def _fileobj_lookup(self, fileobj): - """Return a file descriptor from a file object. - - This wraps _fileobj_to_fd() to do an exhaustive search in case - the object is invalid but we still have it in our map. This - is used by unregister() so we can unregister an object that - was previously registered even if it is closed. It is also - used by _SelectorMapping. - """ - try: - return _fileobj_to_fd(fileobj) - except ValueError: - # Do an exhaustive search. - for key in self._fd_to_key.values(): - if key.fileobj is fileobj: - return key.fd - # Raise ValueError after all. - raise - - def register(self, fileobj, events, data=None): - if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): - raise ValueError("Invalid events: {0!r}".format(events)) - - key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) - - if key.fd in self._fd_to_key: - raise KeyError("{0!r} (FD {1}) is already registered" - .format(fileobj, key.fd)) - - self._fd_to_key[key.fd] = key - return key - - def unregister(self, fileobj): - try: - key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - return key - - def modify(self, fileobj, events, data=None): - # TODO: Subclasses can probably optimize this even further. - try: - key = self._fd_to_key[self._fileobj_lookup(fileobj)] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - if events != key.events: - self.unregister(fileobj) - key = self.register(fileobj, events, data) - elif data != key.data: - # Use a shortcut to update the data. - key = key._replace(data=data) - self._fd_to_key[key.fd] = key - return key - - def close(self): - self._fd_to_key.clear() - self._map = None - - def get_map(self): - return self._map - - def _key_from_fd(self, fd): - """Return the key associated to a given file descriptor. - - Parameters: - fd -- file descriptor - - Returns: - corresponding key, or None if not found - """ - try: - return self._fd_to_key[fd] - except KeyError: - return None - - -class SelectSelector(_BaseSelectorImpl): - """Select-based selector.""" - - def __init__(self): - super(SelectSelector, self).__init__() - self._readers = set() - self._writers = set() - - def register(self, fileobj, events, data=None): - key = super(SelectSelector, self).register(fileobj, events, data) - if events & EVENT_READ: - self._readers.add(key.fd) - if events & EVENT_WRITE: - self._writers.add(key.fd) - return key - - def unregister(self, fileobj): - key = super(SelectSelector, self).unregister(fileobj) - self._readers.discard(key.fd) - self._writers.discard(key.fd) - return key - - if sys.platform == 'win32': - def _select(self, r, w, _, timeout=None): - r, w, x = select.select(r, w, w, timeout) - return r, w + x, [] - else: - _select = staticmethod(select.select) - - def select(self, timeout=None): - timeout = None if timeout is None else max(timeout, 0) - ready = [] - try: - r, w, _ = self._select(self._readers, self._writers, [], timeout) - except select.error as exc: - if exc.args[0] == EINTR: - return ready - else: - raise - r = set(r) - w = set(w) - for fd in r | w: - events = 0 - if fd in r: - events |= EVENT_READ - if fd in w: - events |= EVENT_WRITE - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - -if hasattr(select, 'poll'): - - class PollSelector(_BaseSelectorImpl): - """Poll-based selector.""" - - def __init__(self): - super(PollSelector, self).__init__() - self._poll = select.poll() - - def register(self, fileobj, events, data=None): - key = super(PollSelector, self).register(fileobj, events, data) - poll_events = 0 - if events & EVENT_READ: - poll_events |= select.POLLIN - if events & EVENT_WRITE: - poll_events |= select.POLLOUT - self._poll.register(key.fd, poll_events) - return key - - def unregister(self, fileobj): - key = super(PollSelector, self).unregister(fileobj) - self._poll.unregister(key.fd) - return key - - def select(self, timeout=None): - if timeout is None: - timeout = None - elif timeout <= 0: - timeout = 0 - else: - # poll() has a resolution of 1 millisecond, round away from - # zero to wait *at least* timeout seconds. - timeout = int(math.ceil(timeout * 1e3)) - ready = [] - try: - fd_event_list = self._poll.poll(timeout) - except select.error as exc: - if exc.args[0] == EINTR: - return ready - else: - raise - for fd, event in fd_event_list: - events = 0 - if event & ~select.POLLIN: - events |= EVENT_WRITE - if event & ~select.POLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - -if hasattr(select, 'epoll'): - - class EpollSelector(_BaseSelectorImpl): - """Epoll-based selector.""" - - def __init__(self): - super(EpollSelector, self).__init__() - self._epoll = select.epoll() - - def fileno(self): - return self._epoll.fileno() - - def register(self, fileobj, events, data=None): - key = super(EpollSelector, self).register(fileobj, events, data) - epoll_events = 0 - if events & EVENT_READ: - epoll_events |= select.EPOLLIN - if events & EVENT_WRITE: - epoll_events |= select.EPOLLOUT - self._epoll.register(key.fd, epoll_events) - return key - - def unregister(self, fileobj): - key = super(EpollSelector, self).unregister(fileobj) - try: - self._epoll.unregister(key.fd) - except IOError: - # This can happen if the FD was closed since it - # was registered. - pass - return key - - def select(self, timeout=None): - if timeout is None: - timeout = -1 - elif timeout <= 0: - timeout = 0 - else: - # epoll_wait() has a resolution of 1 millisecond, round away - # from zero to wait *at least* timeout seconds. - timeout = math.ceil(timeout * 1e3) * 1e-3 - - # epoll_wait() expects `maxevents` to be greater than zero; - # we want to make sure that `select()` can be called when no - # FD is registered. - max_ev = max(len(self._fd_to_key), 1) - - ready = [] - try: - fd_event_list = self._epoll.poll(timeout, max_ev) - except IOError as exc: - if exc.errno == EINTR: - return ready - else: - raise - for fd, event in fd_event_list: - events = 0 - if event & ~select.EPOLLIN: - events |= EVENT_WRITE - if event & ~select.EPOLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - def close(self): - self._epoll.close() - super(EpollSelector, self).close() - - -if hasattr(select, 'devpoll'): - - class DevpollSelector(_BaseSelectorImpl): - """Solaris /dev/poll selector.""" - - def __init__(self): - super(DevpollSelector, self).__init__() - self._devpoll = select.devpoll() - - def fileno(self): - return self._devpoll.fileno() - - def register(self, fileobj, events, data=None): - key = super(DevpollSelector, self).register(fileobj, events, data) - poll_events = 0 - if events & EVENT_READ: - poll_events |= select.POLLIN - if events & EVENT_WRITE: - poll_events |= select.POLLOUT - self._devpoll.register(key.fd, poll_events) - return key - - def unregister(self, fileobj): - key = super(DevpollSelector, self).unregister(fileobj) - self._devpoll.unregister(key.fd) - return key - - def select(self, timeout=None): - if timeout is None: - timeout = None - elif timeout <= 0: - timeout = 0 - else: - # devpoll() has a resolution of 1 millisecond, round away from - # zero to wait *at least* timeout seconds. - timeout = math.ceil(timeout * 1e3) - ready = [] - try: - fd_event_list = self._devpoll.poll(timeout) - except OSError as exc: - if exc.errno == EINTR: - return ready - else: - raise - for fd, event in fd_event_list: - events = 0 - if event & ~select.POLLIN: - events |= EVENT_WRITE - if event & ~select.POLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - def close(self): - self._devpoll.close() - super(DevpollSelector, self).close() - - -if hasattr(select, 'kqueue'): - - class KqueueSelector(_BaseSelectorImpl): - """Kqueue-based selector.""" - - def __init__(self): - super(KqueueSelector, self).__init__() - self._kqueue = select.kqueue() - - def fileno(self): - return self._kqueue.fileno() - - def register(self, fileobj, events, data=None): - key = super(KqueueSelector, self).register(fileobj, events, data) - if events & EVENT_READ: - kev = select.kevent(key.fd, select.KQ_FILTER_READ, - select.KQ_EV_ADD) - self._kqueue.control([kev], 0, 0) - if events & EVENT_WRITE: - kev = select.kevent(key.fd, select.KQ_FILTER_WRITE, - select.KQ_EV_ADD) - self._kqueue.control([kev], 0, 0) - return key - - def unregister(self, fileobj): - key = super(KqueueSelector, self).unregister(fileobj) - if key.events & EVENT_READ: - kev = select.kevent(key.fd, select.KQ_FILTER_READ, - select.KQ_EV_DELETE) - try: - self._kqueue.control([kev], 0, 0) - except OSError: - # This can happen if the FD was closed since it - # was registered. - pass - if key.events & EVENT_WRITE: - kev = select.kevent(key.fd, select.KQ_FILTER_WRITE, - select.KQ_EV_DELETE) - try: - self._kqueue.control([kev], 0, 0) - except OSError: - # See comment above. - pass - return key - - def select(self, timeout=None): - timeout = None if timeout is None else max(timeout, 0) - max_ev = len(self._fd_to_key) - ready = [] - try: - kev_list = self._kqueue.control(None, max_ev, timeout) - except OSError as exc: - if exc.errno == EINTR: - return ready - else: - raise - for kev in kev_list: - fd = kev.ident - flag = kev.filter - events = 0 - if flag == select.KQ_FILTER_READ: - events |= EVENT_READ - if flag == select.KQ_FILTER_WRITE: - events |= EVENT_WRITE - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - def close(self): - self._kqueue.close() - super(KqueueSelector, self).close() - - -# Choose the best implementation, roughly: -# epoll|kqueue|devpoll > poll > select. -# select() also can't accept a FD > FD_SETSIZE (usually around 1024) -if 'KqueueSelector' in globals(): - DefaultSelector = KqueueSelector -elif 'EpollSelector' in globals(): - DefaultSelector = EpollSelector -elif 'DevpollSelector' in globals(): - DefaultSelector = DevpollSelector -elif 'PollSelector' in globals(): - DefaultSelector = PollSelector -else: - DefaultSelector = SelectSelector diff --git a/test/test_client_async.py b/test/test_client_async.py index acc400f9c..84128e7d4 100644 --- a/test/test_client_async.py +++ b/test/test_client_async.py @@ -1,12 +1,6 @@ from __future__ import absolute_import, division -# selectors in stdlib as of py3.4 -try: - import selectors # pylint: disable=import-error -except ImportError: - # vendored backport module - import kafka.vendor.selectors34 as selectors - +import selectors import socket import time From 39611df7d3fb5237600fcd38ac5f50bf15c0d380 Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 17:57:43 -0800 Subject: [PATCH 08/18] Remove kafka.vendor.socketpair --- kafka/client_async.py | 3 -- kafka/vendor/socketpair.py | 75 -------------------------------------- 2 files changed, 78 deletions(-) delete mode 100644 kafka/vendor/socketpair.py diff --git a/kafka/client_async.py b/kafka/client_async.py index e8654641a..9b1ec0ea3 100644 --- a/kafka/client_async.py +++ b/kafka/client_async.py @@ -20,9 +20,6 @@ from kafka.protocol.broker_api_versions import BROKER_API_VERSIONS from kafka.protocol.metadata import MetadataRequest from kafka.util import Dict, Timer, WeakMethod, ensure_valid_topic_name -# Although this looks unused, it actually monkey-patches socket.socketpair() -# and should be left in as long as we're using socket.socketpair() in this file -from kafka.vendor import socketpair # noqa: F401 from kafka.version import __version__ diff --git a/kafka/vendor/socketpair.py b/kafka/vendor/socketpair.py deleted file mode 100644 index 54d908767..000000000 --- a/kafka/vendor/socketpair.py +++ /dev/null @@ -1,75 +0,0 @@ -# pylint: skip-file -# vendored from https://github.com/mhils/backports.socketpair -from __future__ import absolute_import - -import sys -import socket -import errno - -_LOCALHOST = '127.0.0.1' -_LOCALHOST_V6 = '::1' - -if not hasattr(socket, "socketpair"): - # Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain. - def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): - if family == socket.AF_INET: - host = _LOCALHOST - elif family == socket.AF_INET6: - host = _LOCALHOST_V6 - else: - raise ValueError("Only AF_INET and AF_INET6 socket address families " - "are supported") - if type != socket.SOCK_STREAM: - raise ValueError("Only SOCK_STREAM socket type is supported") - if proto != 0: - raise ValueError("Only protocol zero is supported") - - # We create a connected TCP socket. Note the trick with - # setblocking(False) that prevents us from having to create a thread. - lsock = socket.socket(family, type, proto) - try: - lsock.bind((host, 0)) - lsock.listen(min(socket.SOMAXCONN, 128)) - # On IPv6, ignore flow_info and scope_id - addr, port = lsock.getsockname()[:2] - csock = socket.socket(family, type, proto) - try: - csock.setblocking(False) - if sys.version_info >= (3, 0): - try: - csock.connect((addr, port)) - except (BlockingIOError, InterruptedError): - pass - else: - try: - csock.connect((addr, port)) - except socket.error as e: - if e.errno != errno.WSAEWOULDBLOCK: - raise - csock.setblocking(True) - ssock, _ = lsock.accept() - except Exception: - csock.close() - raise - finally: - lsock.close() - - # Authenticating avoids using a connection from something else - # able to connect to {host}:{port} instead of us. - # We expect only AF_INET and AF_INET6 families. - try: - if ( - ssock.getsockname() != csock.getpeername() - or csock.getsockname() != ssock.getpeername() - ): - raise ConnectionError("Unexpected peer connection") - except: - # getsockname() and getpeername() can fail - # if either socket isn't connected. - ssock.close() - csock.close() - raise - - return (ssock, csock) - - socket.socketpair = socketpair From 0aef6ccc10ecf70f629b87c113d14a7e67d662e4 Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 18:09:46 -0800 Subject: [PATCH 09/18] Remove py2 logging.NullHandler shim --- kafka/__init__.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/kafka/__init__.py b/kafka/__init__.py index 41a014072..f3ecadc48 100644 --- a/kafka/__init__.py +++ b/kafka/__init__.py @@ -8,14 +8,8 @@ # Set default logging handler to avoid "No handler found" warnings. import logging -try: # Python 2.7+ - from logging import NullHandler -except ImportError: - class NullHandler(logging.Handler): - def emit(self, record): - pass -logging.getLogger(__name__).addHandler(NullHandler()) +logging.getLogger(__name__).addHandler(logging.NullHandler()) from kafka.admin import KafkaAdminClient From 5f26083f02b38f3b243de11176c914dec5cfda2b Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 18:10:01 -0800 Subject: [PATCH 10/18] Remove py2 OrderedDict shim --- kafka/client_async.py | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/kafka/client_async.py b/kafka/client_async.py index 9b1ec0ea3..102a84caa 100644 --- a/kafka/client_async.py +++ b/kafka/client_async.py @@ -1180,14 +1180,6 @@ def send_and_receive(self, node_id, request): return future.value -# OrderedDict requires python2.7+ -try: - from collections import OrderedDict -except ImportError: - # If we dont have OrderedDict, we'll fallback to dict with O(n) priority reads - OrderedDict = dict - - class IdleConnectionManager(object): def __init__(self, connections_max_idle_ms): if connections_max_idle_ms > 0: @@ -1196,7 +1188,7 @@ def __init__(self, connections_max_idle_ms): self.connections_max_idle = float('inf') self.next_idle_close_check_time = None self.update_next_idle_close_check_time(time.time()) - self.lru_connections = OrderedDict() + self.lru_connections = collections.OrderedDict() def update(self, conn_id): # order should reflect last-update @@ -1234,13 +1226,7 @@ def poll_expired_connection(self): oldest_conn_id = None oldest_ts = None - if OrderedDict is dict: - for conn_id, ts in self.lru_connections.items(): - if oldest_conn_id is None or ts < oldest_ts: - oldest_conn_id = conn_id - oldest_ts = ts - else: - (oldest_conn_id, oldest_ts) = next(iter(self.lru_connections.items())) + (oldest_conn_id, oldest_ts) = next(iter(self.lru_connections.items())) self.update_next_idle_close_check_time(oldest_ts) From fc7927aebf773a49a4f1c0bd7a34865af99197cd Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 18:10:31 -0800 Subject: [PATCH 11/18] Remove old py2 collections.Sequence import --- kafka/consumer/subscription_state.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/kafka/consumer/subscription_state.py b/kafka/consumer/subscription_state.py index bed76fda4..0a8be676e 100644 --- a/kafka/consumer/subscription_state.py +++ b/kafka/consumer/subscription_state.py @@ -2,10 +2,7 @@ import abc from collections import OrderedDict -try: - from collections.abc import Sequence -except ImportError: - from collections import Sequence +from collections.abc import Sequence from enum import IntEnum import logging import random From e7a33a81abdb74f1b222f79ec12038e0fbf51470 Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 18:11:08 -0800 Subject: [PATCH 12/18] Remove py2 urlparse import shim --- kafka/socks5_wrapper.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/kafka/socks5_wrapper.py b/kafka/socks5_wrapper.py index 6715f2093..e8f3c9682 100755 --- a/kafka/socks5_wrapper.py +++ b/kafka/socks5_wrapper.py @@ -1,13 +1,9 @@ -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse - import errno import logging import random import socket import struct +from urllib.parse import urlparse log = logging.getLogger(__name__) From 8afb1fdbe95b4169851217116666e88ec5c477c3 Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 18:11:48 -0800 Subject: [PATCH 13/18] Remove py2 unittest/mock shim --- requirements-dev.txt | 1 - test/integration/test_consumer_integration.py | 5 +---- test/record/test_default_records.py | 8 ++++---- test/record/test_legacy_records.py | 8 ++++---- test/sasl/test_gssapi.py | 5 +---- test/sasl/test_msk.py | 6 +----- test/test_conn.py | 5 +---- test/test_sender.py | 5 +---- 8 files changed, 13 insertions(+), 30 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 8de5e28d4..840bdb7f3 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,7 +3,6 @@ crc32c docker-py flake8 lz4 -mock; python_version < '3.3' py pylint pyperf diff --git a/test/integration/test_consumer_integration.py b/test/integration/test_consumer_integration.py index 8f63619c7..c7b725383 100644 --- a/test/integration/test_consumer_integration.py +++ b/test/integration/test_consumer_integration.py @@ -1,10 +1,7 @@ import logging import time +from unittest.mock import patch, ANY -try: - from unittest.mock import patch, ANY -except ImportError: - from mock import patch, ANY import pytest import kafka.codec diff --git a/test/record/test_default_records.py b/test/record/test_default_records.py index 540705d50..5fb708066 100644 --- a/test/record/test_default_records.py +++ b/test/record/test_default_records.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals + +from unittest.mock import patch + import pytest -try: - from unittest.mock import patch -except ImportError: - from mock import patch + import kafka.codec from kafka.record.default_records import ( DefaultRecordBatch, DefaultRecordBatchBuilder diff --git a/test/record/test_legacy_records.py b/test/record/test_legacy_records.py index c692d35a1..e4a8685b6 100644 --- a/test/record/test_legacy_records.py +++ b/test/record/test_legacy_records.py @@ -1,9 +1,9 @@ from __future__ import unicode_literals + +from unittest.mock import patch + import pytest -try: - from unittest.mock import patch -except ImportError: - from mock import patch + from kafka.record.legacy_records import ( LegacyRecordBatch, LegacyRecordBatchBuilder ) diff --git a/test/sasl/test_gssapi.py b/test/sasl/test_gssapi.py index aa1d86b9e..2939cc196 100644 --- a/test/sasl/test_gssapi.py +++ b/test/sasl/test_gssapi.py @@ -1,9 +1,6 @@ from __future__ import absolute_import -try: - from unittest import mock -except ImportError: - import mock +from unittest import mock from kafka.sasl import get_sasl_mechanism import kafka.sasl.gssapi diff --git a/test/sasl/test_msk.py b/test/sasl/test_msk.py index f3cc46ce8..ec51eb4c6 100644 --- a/test/sasl/test_msk.py +++ b/test/sasl/test_msk.py @@ -1,14 +1,10 @@ import datetime import json import sys +from unittest import mock from kafka.sasl.msk import AwsMskIamClient, SaslMechanismAwsMskIam -try: - from unittest import mock -except ImportError: - import mock - def client_factory(token=None): if sys.version_info >= (3, 3): diff --git a/test/test_conn.py b/test/test_conn.py index 457d5e170..73a32397c 100644 --- a/test/test_conn.py +++ b/test/test_conn.py @@ -3,11 +3,8 @@ from errno import EALREADY, EINPROGRESS, EISCONN, ECONNRESET import socket +from unittest import mock -try: - from unittest import mock -except ImportError: - import mock import pytest from kafka.conn import BrokerConnection, ConnectionStates diff --git a/test/test_sender.py b/test/test_sender.py index c2246abb6..4e52fb5d5 100644 --- a/test/test_sender.py +++ b/test/test_sender.py @@ -4,12 +4,9 @@ import collections import io import time +from unittest.mock import call import pytest -try: - from unittest.mock import call -except ImportError: - from mock import call from kafka.client_async import KafkaClient from kafka.cluster import ClusterMetadata From 5d3e6faac1ecfa94cc0f120e08dc8e0bd69c5ec2 Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 18:18:56 -0800 Subject: [PATCH 14/18] Drop __future__ imports --- kafka/__init__.py | 2 -- kafka/admin/__init__.py | 2 -- kafka/admin/__main__.py | 2 -- kafka/admin/acl_resource.py | 2 -- kafka/admin/client.py | 2 -- kafka/admin/config_resource.py | 2 -- kafka/admin/new_partitions.py | 3 --- kafka/admin/new_topic.py | 3 --- kafka/benchmarks/consumer_performance.py | 2 -- kafka/benchmarks/load_example.py | 1 - kafka/benchmarks/producer_performance.py | 2 -- kafka/benchmarks/record_batch_compose.py | 1 - kafka/benchmarks/record_batch_read.py | 1 - kafka/benchmarks/varint_speed.py | 1 - kafka/cli/admin/__init__.py | 2 -- kafka/cli/admin/cluster/__init__.py | 2 -- kafka/cli/admin/cluster/describe.py | 3 --- kafka/cli/admin/configs/__init__.py | 2 -- kafka/cli/admin/configs/describe.py | 2 -- kafka/cli/admin/consumer_groups/__init__.py | 2 -- kafka/cli/admin/consumer_groups/delete.py | 3 --- kafka/cli/admin/consumer_groups/describe.py | 3 --- kafka/cli/admin/consumer_groups/list.py | 3 --- kafka/cli/admin/consumer_groups/list_offsets.py | 3 --- kafka/cli/admin/log_dirs/__init__.py | 2 -- kafka/cli/admin/log_dirs/describe.py | 3 --- kafka/cli/admin/topics/__init__.py | 2 -- kafka/cli/admin/topics/create.py | 2 -- kafka/cli/admin/topics/delete.py | 3 --- kafka/cli/admin/topics/describe.py | 3 --- kafka/cli/admin/topics/list.py | 3 --- kafka/cli/consumer/__init__.py | 2 -- kafka/cli/producer/__init__.py | 2 -- kafka/client_async.py | 2 -- kafka/cluster.py | 2 -- kafka/codec.py | 2 -- kafka/conn.py | 2 -- kafka/consumer/__init__.py | 2 -- kafka/consumer/__main__.py | 2 -- kafka/consumer/fetcher.py | 2 -- kafka/consumer/group.py | 2 -- kafka/consumer/subscription_state.py | 2 -- kafka/coordinator/assignors/abstract.py | 2 -- kafka/coordinator/assignors/range.py | 2 -- kafka/coordinator/assignors/roundrobin.py | 2 -- kafka/coordinator/base.py | 2 -- kafka/coordinator/consumer.py | 2 -- kafka/coordinator/heartbeat.py | 2 -- kafka/coordinator/protocol.py | 2 -- kafka/coordinator/subscription.py | 3 --- kafka/errors.py | 2 -- kafka/future.py | 2 -- kafka/metrics/__init__.py | 2 -- kafka/metrics/compound_stat.py | 2 -- kafka/metrics/dict_reporter.py | 2 -- kafka/metrics/kafka_metric.py | 2 -- kafka/metrics/measurable.py | 2 -- kafka/metrics/measurable_stat.py | 2 -- kafka/metrics/metric_config.py | 2 -- kafka/metrics/metric_name.py | 2 -- kafka/metrics/metrics.py | 2 -- kafka/metrics/metrics_reporter.py | 2 -- kafka/metrics/quota.py | 3 --- kafka/metrics/stat.py | 2 -- kafka/metrics/stats/__init__.py | 2 -- kafka/metrics/stats/avg.py | 2 -- kafka/metrics/stats/count.py | 2 -- kafka/metrics/stats/histogram.py | 2 -- kafka/metrics/stats/max_stat.py | 2 -- kafka/metrics/stats/min_stat.py | 2 -- kafka/metrics/stats/percentile.py | 3 --- kafka/metrics/stats/percentiles.py | 2 -- kafka/metrics/stats/rate.py | 2 -- kafka/metrics/stats/sampled_stat.py | 2 -- kafka/metrics/stats/sensor.py | 2 -- kafka/metrics/stats/total.py | 2 -- kafka/partitioner/__init__.py | 2 -- kafka/partitioner/default.py | 2 -- kafka/producer/__init__.py | 2 -- kafka/producer/__main__.py | 2 -- kafka/producer/future.py | 2 -- kafka/producer/kafka.py | 2 -- kafka/producer/producer_batch.py | 2 -- kafka/producer/record_accumulator.py | 2 -- kafka/producer/sender.py | 2 -- kafka/producer/transaction_manager.py | 2 -- kafka/protocol/__init__.py | 3 --- kafka/protocol/abstract.py | 2 -- kafka/protocol/add_offsets_to_txn.py | 2 -- kafka/protocol/add_partitions_to_txn.py | 2 -- kafka/protocol/admin.py | 2 -- kafka/protocol/api.py | 2 -- kafka/protocol/api_versions.py | 2 -- kafka/protocol/commit.py | 2 -- kafka/protocol/end_txn.py | 2 -- kafka/protocol/fetch.py | 2 -- kafka/protocol/find_coordinator.py | 2 -- kafka/protocol/group.py | 2 -- kafka/protocol/init_producer_id.py | 2 -- kafka/protocol/list_offsets.py | 2 -- kafka/protocol/message.py | 2 -- kafka/protocol/metadata.py | 2 -- kafka/protocol/offset_for_leader_epoch.py | 2 -- kafka/protocol/parser.py | 2 -- kafka/protocol/produce.py | 2 -- kafka/protocol/sasl_authenticate.py | 2 -- kafka/protocol/sasl_handshake.py | 2 -- kafka/protocol/struct.py | 2 -- kafka/protocol/txn_offset_commit.py | 2 -- kafka/protocol/types.py | 2 -- kafka/record/abc.py | 2 -- kafka/record/memory_records.py | 1 - kafka/sasl/__init__.py | 2 -- kafka/sasl/abc.py | 2 -- kafka/sasl/gssapi.py | 2 -- kafka/sasl/msk.py | 2 -- kafka/sasl/oauth.py | 2 -- kafka/sasl/plain.py | 2 -- kafka/sasl/scram.py | 2 -- kafka/sasl/sspi.py | 2 -- kafka/serializer/__init__.py | 2 -- kafka/serializer/abstract.py | 2 -- kafka/structs.py | 1 - kafka/util.py | 2 -- test/__init__.py | 2 -- test/conftest.py | 2 -- test/integration/conftest.py | 2 -- test/integration/fixtures.py | 2 -- test/integration/test_producer_integration.py | 2 -- test/record/test_default_records.py | 1 - test/record/test_legacy_records.py | 2 -- test/record/test_records.py | 1 - test/sasl/test_gssapi.py | 2 -- test/service.py | 2 -- test/test_assignors.py | 1 - test/test_client_async.py | 2 -- test/test_cluster.py | 1 - test/test_codec.py | 2 -- test/test_conn.py | 1 - test/test_consumer.py | 2 -- test/test_coordinator.py | 1 - test/test_fetcher.py | 1 - test/test_partitioner.py | 2 -- test/test_producer.py | 2 -- test/test_producer_batch.py | 1 - test/test_record_accumulator.py | 1 - test/test_sender.py | 1 - test/test_subscription_state.py | 2 -- test/test_util.py | 1 - test/testutil.py | 2 -- 150 files changed, 298 deletions(-) diff --git a/kafka/__init__.py b/kafka/__init__.py index f3ecadc48..e394188f8 100644 --- a/kafka/__init__.py +++ b/kafka/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - __title__ = 'kafka' from kafka.version import __version__ __author__ = 'Dana Powers' diff --git a/kafka/admin/__init__.py b/kafka/admin/__init__.py index c240fc6d0..c67fb9e6a 100644 --- a/kafka/admin/__init__.py +++ b/kafka/admin/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.admin.config_resource import ConfigResource, ConfigResourceType from kafka.admin.client import KafkaAdminClient from kafka.admin.acl_resource import (ACL, ACLFilter, ResourcePattern, ResourcePatternFilter, ACLOperation, diff --git a/kafka/admin/__main__.py b/kafka/admin/__main__.py index 776063869..74dc8b33b 100644 --- a/kafka/admin/__main__.py +++ b/kafka/admin/__main__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import sys from kafka.cli.admin import run_cli diff --git a/kafka/admin/acl_resource.py b/kafka/admin/acl_resource.py index 9ab9a899f..2ac4b4f43 100644 --- a/kafka/admin/acl_resource.py +++ b/kafka/admin/acl_resource.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from enum import IntEnum from kafka.errors import IllegalArgumentError diff --git a/kafka/admin/client.py b/kafka/admin/client.py index c804ecae7..144309174 100644 --- a/kafka/admin/client.py +++ b/kafka/admin/client.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - from collections import defaultdict import copy import itertools diff --git a/kafka/admin/config_resource.py b/kafka/admin/config_resource.py index 2ffdbf0ff..347ff915f 100644 --- a/kafka/admin/config_resource.py +++ b/kafka/admin/config_resource.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from enum import IntEnum diff --git a/kafka/admin/new_partitions.py b/kafka/admin/new_partitions.py index 429b2e190..bc3b8172e 100644 --- a/kafka/admin/new_partitions.py +++ b/kafka/admin/new_partitions.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class NewPartitions(object): """A class for new partition creation on existing topics. Note that the length of new_assignments, if specified, must be the difference between the new total number of partitions and the existing number of partitions. diff --git a/kafka/admin/new_topic.py b/kafka/admin/new_topic.py index e43c52226..931e73448 100644 --- a/kafka/admin/new_topic.py +++ b/kafka/admin/new_topic.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class NewTopic(object): """ A class for new topic creation Arguments: diff --git a/kafka/benchmarks/consumer_performance.py b/kafka/benchmarks/consumer_performance.py index c35a164c2..e76264145 100644 --- a/kafka/benchmarks/consumer_performance.py +++ b/kafka/benchmarks/consumer_performance.py @@ -1,8 +1,6 @@ #!/usr/bin/env python # Adapted from https://github.com/mrafayaleem/kafka-jython -from __future__ import absolute_import, print_function - import argparse import pprint import sys diff --git a/kafka/benchmarks/load_example.py b/kafka/benchmarks/load_example.py index 29796a74c..6f24e9b37 100644 --- a/kafka/benchmarks/load_example.py +++ b/kafka/benchmarks/load_example.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -from __future__ import print_function import argparse import logging diff --git a/kafka/benchmarks/producer_performance.py b/kafka/benchmarks/producer_performance.py index df26459a4..df7c51f29 100644 --- a/kafka/benchmarks/producer_performance.py +++ b/kafka/benchmarks/producer_performance.py @@ -1,8 +1,6 @@ #!/usr/bin/env python # Adapted from https://github.com/mrafayaleem/kafka-jython -from __future__ import absolute_import, print_function - import argparse import pprint import sys diff --git a/kafka/benchmarks/record_batch_compose.py b/kafka/benchmarks/record_batch_compose.py index 5b07fd59a..b7d05e37f 100644 --- a/kafka/benchmarks/record_batch_compose.py +++ b/kafka/benchmarks/record_batch_compose.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -from __future__ import print_function import hashlib import itertools import os diff --git a/kafka/benchmarks/record_batch_read.py b/kafka/benchmarks/record_batch_read.py index 2ef32298d..ab5ae5227 100644 --- a/kafka/benchmarks/record_batch_read.py +++ b/kafka/benchmarks/record_batch_read.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -from __future__ import print_function import hashlib import itertools import os diff --git a/kafka/benchmarks/varint_speed.py b/kafka/benchmarks/varint_speed.py index 506a9ecf5..de57b6198 100644 --- a/kafka/benchmarks/varint_speed.py +++ b/kafka/benchmarks/varint_speed.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -from __future__ import print_function import pyperf diff --git a/kafka/cli/admin/__init__.py b/kafka/cli/admin/__init__.py index d16aab802..dd20ea6de 100644 --- a/kafka/cli/admin/__init__.py +++ b/kafka/cli/admin/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import argparse import json import logging diff --git a/kafka/cli/admin/cluster/__init__.py b/kafka/cli/admin/cluster/__init__.py index 735228565..7876c0afa 100644 --- a/kafka/cli/admin/cluster/__init__.py +++ b/kafka/cli/admin/cluster/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import sys from .describe import DescribeCluster diff --git a/kafka/cli/admin/cluster/describe.py b/kafka/cli/admin/cluster/describe.py index 6a2ff06e5..40cc4c0ac 100644 --- a/kafka/cli/admin/cluster/describe.py +++ b/kafka/cli/admin/cluster/describe.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class DescribeCluster: @classmethod diff --git a/kafka/cli/admin/configs/__init__.py b/kafka/cli/admin/configs/__init__.py index 7ec6d1042..75b555f40 100644 --- a/kafka/cli/admin/configs/__init__.py +++ b/kafka/cli/admin/configs/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import sys from .describe import DescribeConfigs diff --git a/kafka/cli/admin/configs/describe.py b/kafka/cli/admin/configs/describe.py index 3ff366667..0f32a744f 100644 --- a/kafka/cli/admin/configs/describe.py +++ b/kafka/cli/admin/configs/describe.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.admin.config_resource import ConfigResource diff --git a/kafka/cli/admin/consumer_groups/__init__.py b/kafka/cli/admin/consumer_groups/__init__.py index cfb1bdb4f..dea4513cb 100644 --- a/kafka/cli/admin/consumer_groups/__init__.py +++ b/kafka/cli/admin/consumer_groups/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import sys from .delete import DeleteConsumerGroups diff --git a/kafka/cli/admin/consumer_groups/delete.py b/kafka/cli/admin/consumer_groups/delete.py index 5724ae551..40fc593ac 100644 --- a/kafka/cli/admin/consumer_groups/delete.py +++ b/kafka/cli/admin/consumer_groups/delete.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class DeleteConsumerGroups: @classmethod diff --git a/kafka/cli/admin/consumer_groups/describe.py b/kafka/cli/admin/consumer_groups/describe.py index 02298e9a4..e1f3393e2 100644 --- a/kafka/cli/admin/consumer_groups/describe.py +++ b/kafka/cli/admin/consumer_groups/describe.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class DescribeConsumerGroups: @classmethod diff --git a/kafka/cli/admin/consumer_groups/list.py b/kafka/cli/admin/consumer_groups/list.py index 6c02f3bee..dfeb954f3 100644 --- a/kafka/cli/admin/consumer_groups/list.py +++ b/kafka/cli/admin/consumer_groups/list.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class ListConsumerGroups: @classmethod diff --git a/kafka/cli/admin/consumer_groups/list_offsets.py b/kafka/cli/admin/consumer_groups/list_offsets.py index 7c05c5aae..ff8f33acb 100644 --- a/kafka/cli/admin/consumer_groups/list_offsets.py +++ b/kafka/cli/admin/consumer_groups/list_offsets.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class ListConsumerGroupOffsets: @classmethod diff --git a/kafka/cli/admin/log_dirs/__init__.py b/kafka/cli/admin/log_dirs/__init__.py index 3a4a94351..46f5a254f 100644 --- a/kafka/cli/admin/log_dirs/__init__.py +++ b/kafka/cli/admin/log_dirs/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import sys from .describe import DescribeLogDirs diff --git a/kafka/cli/admin/log_dirs/describe.py b/kafka/cli/admin/log_dirs/describe.py index a0c301f82..6c3c27bea 100644 --- a/kafka/cli/admin/log_dirs/describe.py +++ b/kafka/cli/admin/log_dirs/describe.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class DescribeLogDirs: @classmethod diff --git a/kafka/cli/admin/topics/__init__.py b/kafka/cli/admin/topics/__init__.py index dbf569445..bcb1973af 100644 --- a/kafka/cli/admin/topics/__init__.py +++ b/kafka/cli/admin/topics/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import sys from .create import CreateTopic diff --git a/kafka/cli/admin/topics/create.py b/kafka/cli/admin/topics/create.py index d033f6b30..0844b411b 100644 --- a/kafka/cli/admin/topics/create.py +++ b/kafka/cli/admin/topics/create.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.admin.new_topic import NewTopic diff --git a/kafka/cli/admin/topics/delete.py b/kafka/cli/admin/topics/delete.py index a88400ef2..70a1e749b 100644 --- a/kafka/cli/admin/topics/delete.py +++ b/kafka/cli/admin/topics/delete.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class DeleteTopic: @classmethod diff --git a/kafka/cli/admin/topics/describe.py b/kafka/cli/admin/topics/describe.py index 2e96871d6..b053524b4 100644 --- a/kafka/cli/admin/topics/describe.py +++ b/kafka/cli/admin/topics/describe.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class DescribeTopics: @classmethod diff --git a/kafka/cli/admin/topics/list.py b/kafka/cli/admin/topics/list.py index 2dbf3828e..2c5d48ccc 100644 --- a/kafka/cli/admin/topics/list.py +++ b/kafka/cli/admin/topics/list.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class ListTopics: @classmethod diff --git a/kafka/cli/consumer/__init__.py b/kafka/cli/consumer/__init__.py index 32be19665..89a6fb0f4 100644 --- a/kafka/cli/consumer/__init__.py +++ b/kafka/cli/consumer/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function - import argparse import logging diff --git a/kafka/cli/producer/__init__.py b/kafka/cli/producer/__init__.py index 01d94b666..38d94c30f 100644 --- a/kafka/cli/producer/__init__.py +++ b/kafka/cli/producer/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function - import argparse import logging import sys diff --git a/kafka/client_async.py b/kafka/client_async.py index 102a84caa..734efb65b 100644 --- a/kafka/client_async.py +++ b/kafka/client_async.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import collections import copy import logging diff --git a/kafka/cluster.py b/kafka/cluster.py index 315b1d225..09832fde2 100644 --- a/kafka/cluster.py +++ b/kafka/cluster.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import collections import copy import logging diff --git a/kafka/codec.py b/kafka/codec.py index a47d93928..7177a646f 100644 --- a/kafka/codec.py +++ b/kafka/codec.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import gzip import io import platform diff --git a/kafka/conn.py b/kafka/conn.py index 8558be66b..a213a4c53 100755 --- a/kafka/conn.py +++ b/kafka/conn.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import copy import errno import io diff --git a/kafka/consumer/__init__.py b/kafka/consumer/__init__.py index e09bcc1b8..5341d5648 100644 --- a/kafka/consumer/__init__.py +++ b/kafka/consumer/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.consumer.group import KafkaConsumer __all__ = [ diff --git a/kafka/consumer/__main__.py b/kafka/consumer/__main__.py index 0356a1aae..c2de706aa 100644 --- a/kafka/consumer/__main__.py +++ b/kafka/consumer/__main__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import sys from kafka.cli.consumer import run_cli diff --git a/kafka/consumer/fetcher.py b/kafka/consumer/fetcher.py index 6e52dc80d..7d67fb825 100644 --- a/kafka/consumer/fetcher.py +++ b/kafka/consumer/fetcher.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import collections import copy import itertools diff --git a/kafka/consumer/group.py b/kafka/consumer/group.py index b2d79fb44..dd6986154 100644 --- a/kafka/consumer/group.py +++ b/kafka/consumer/group.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import copy import logging import re diff --git a/kafka/consumer/subscription_state.py b/kafka/consumer/subscription_state.py index 0a8be676e..578e70a07 100644 --- a/kafka/consumer/subscription_state.py +++ b/kafka/consumer/subscription_state.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc from collections import OrderedDict from collections.abc import Sequence diff --git a/kafka/coordinator/assignors/abstract.py b/kafka/coordinator/assignors/abstract.py index a6fe970d2..3cdc2cace 100644 --- a/kafka/coordinator/assignors/abstract.py +++ b/kafka/coordinator/assignors/abstract.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc import logging diff --git a/kafka/coordinator/assignors/range.py b/kafka/coordinator/assignors/range.py index 6da6c3c91..307ae0b76 100644 --- a/kafka/coordinator/assignors/range.py +++ b/kafka/coordinator/assignors/range.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import collections import itertools import logging diff --git a/kafka/coordinator/assignors/roundrobin.py b/kafka/coordinator/assignors/roundrobin.py index 05d319497..f73a10679 100644 --- a/kafka/coordinator/assignors/roundrobin.py +++ b/kafka/coordinator/assignors/roundrobin.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import collections import itertools import logging diff --git a/kafka/coordinator/base.py b/kafka/coordinator/base.py index b4bbb1f45..12e7ac40b 100644 --- a/kafka/coordinator/base.py +++ b/kafka/coordinator/base.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import abc import copy import logging diff --git a/kafka/coordinator/consumer.py b/kafka/coordinator/consumer.py index 8860966f8..0bce86aaf 100644 --- a/kafka/coordinator/consumer.py +++ b/kafka/coordinator/consumer.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import collections import copy import functools diff --git a/kafka/coordinator/heartbeat.py b/kafka/coordinator/heartbeat.py index edc9f4a36..fb40c302b 100644 --- a/kafka/coordinator/heartbeat.py +++ b/kafka/coordinator/heartbeat.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import copy import logging import time diff --git a/kafka/coordinator/protocol.py b/kafka/coordinator/protocol.py index bfa1c4695..51957a468 100644 --- a/kafka/coordinator/protocol.py +++ b/kafka/coordinator/protocol.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.struct import Struct from kafka.protocol.types import Array, Bytes, Int16, Int32, Schema, String from kafka.structs import TopicPartition diff --git a/kafka/coordinator/subscription.py b/kafka/coordinator/subscription.py index ca49c1bc0..b5c47994d 100644 --- a/kafka/coordinator/subscription.py +++ b/kafka/coordinator/subscription.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class Subscription(object): __slots__ = ('_metadata', '_group_instance_id') def __init__(self, metadata, group_instance_id): diff --git a/kafka/errors.py b/kafka/errors.py index dffa35f35..96fdcb223 100644 --- a/kafka/errors.py +++ b/kafka/errors.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import inspect import sys diff --git a/kafka/future.py b/kafka/future.py index 2af061ee7..5d53c2192 100644 --- a/kafka/future.py +++ b/kafka/future.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import functools import logging import threading diff --git a/kafka/metrics/__init__.py b/kafka/metrics/__init__.py index 2a62d6334..22427e967 100644 --- a/kafka/metrics/__init__.py +++ b/kafka/metrics/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.metrics.compound_stat import NamedMeasurable from kafka.metrics.dict_reporter import DictReporter from kafka.metrics.kafka_metric import KafkaMetric diff --git a/kafka/metrics/compound_stat.py b/kafka/metrics/compound_stat.py index 73788b265..656e1122c 100644 --- a/kafka/metrics/compound_stat.py +++ b/kafka/metrics/compound_stat.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc from kafka.metrics.stat import AbstractStat diff --git a/kafka/metrics/dict_reporter.py b/kafka/metrics/dict_reporter.py index 0b98fe1e4..49af60476 100644 --- a/kafka/metrics/dict_reporter.py +++ b/kafka/metrics/dict_reporter.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import logging import threading diff --git a/kafka/metrics/kafka_metric.py b/kafka/metrics/kafka_metric.py index fef684850..97799245b 100644 --- a/kafka/metrics/kafka_metric.py +++ b/kafka/metrics/kafka_metric.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import time diff --git a/kafka/metrics/measurable.py b/kafka/metrics/measurable.py index b06d4d789..ef096f31d 100644 --- a/kafka/metrics/measurable.py +++ b/kafka/metrics/measurable.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc diff --git a/kafka/metrics/measurable_stat.py b/kafka/metrics/measurable_stat.py index 3eec18ad9..f16798c5a 100644 --- a/kafka/metrics/measurable_stat.py +++ b/kafka/metrics/measurable_stat.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc from kafka.metrics.measurable import AbstractMeasurable diff --git a/kafka/metrics/metric_config.py b/kafka/metrics/metric_config.py index 7e5ead1fe..008dfa6d8 100644 --- a/kafka/metrics/metric_config.py +++ b/kafka/metrics/metric_config.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import sys diff --git a/kafka/metrics/metric_name.py b/kafka/metrics/metric_name.py index b8ab2a3ad..db351829c 100644 --- a/kafka/metrics/metric_name.py +++ b/kafka/metrics/metric_name.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import copy diff --git a/kafka/metrics/metrics.py b/kafka/metrics/metrics.py index 41a37db58..f00833837 100644 --- a/kafka/metrics/metrics.py +++ b/kafka/metrics/metrics.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import logging import sys import time diff --git a/kafka/metrics/metrics_reporter.py b/kafka/metrics/metrics_reporter.py index e38528c42..fd7de8d5e 100644 --- a/kafka/metrics/metrics_reporter.py +++ b/kafka/metrics/metrics_reporter.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc diff --git a/kafka/metrics/quota.py b/kafka/metrics/quota.py index 36a30c44e..2cf6e9089 100644 --- a/kafka/metrics/quota.py +++ b/kafka/metrics/quota.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class Quota(object): """An upper or lower bound for metrics""" __slots__ = ('_bound', '_upper') diff --git a/kafka/metrics/stat.py b/kafka/metrics/stat.py index 8c76b5415..2588493a7 100644 --- a/kafka/metrics/stat.py +++ b/kafka/metrics/stat.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc diff --git a/kafka/metrics/stats/__init__.py b/kafka/metrics/stats/__init__.py index a3d535dfd..04afb61ae 100644 --- a/kafka/metrics/stats/__init__.py +++ b/kafka/metrics/stats/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.metrics.stats.avg import Avg from kafka.metrics.stats.count import Count from kafka.metrics.stats.histogram import Histogram diff --git a/kafka/metrics/stats/avg.py b/kafka/metrics/stats/avg.py index 906d95573..c7e7dc485 100644 --- a/kafka/metrics/stats/avg.py +++ b/kafka/metrics/stats/avg.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.metrics.stats.sampled_stat import AbstractSampledStat diff --git a/kafka/metrics/stats/count.py b/kafka/metrics/stats/count.py index 6cd6d2abe..060bd2d99 100644 --- a/kafka/metrics/stats/count.py +++ b/kafka/metrics/stats/count.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.metrics.stats.sampled_stat import AbstractSampledStat diff --git a/kafka/metrics/stats/histogram.py b/kafka/metrics/stats/histogram.py index 2c8afbfb3..019373184 100644 --- a/kafka/metrics/stats/histogram.py +++ b/kafka/metrics/stats/histogram.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import math diff --git a/kafka/metrics/stats/max_stat.py b/kafka/metrics/stats/max_stat.py index 9c5eeb6fd..5e0382b3b 100644 --- a/kafka/metrics/stats/max_stat.py +++ b/kafka/metrics/stats/max_stat.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.metrics.stats.sampled_stat import AbstractSampledStat diff --git a/kafka/metrics/stats/min_stat.py b/kafka/metrics/stats/min_stat.py index 6bebe57e0..a728fa2eb 100644 --- a/kafka/metrics/stats/min_stat.py +++ b/kafka/metrics/stats/min_stat.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import sys from kafka.metrics.stats.sampled_stat import AbstractSampledStat diff --git a/kafka/metrics/stats/percentile.py b/kafka/metrics/stats/percentile.py index 75e64ce5e..17cbb1fc1 100644 --- a/kafka/metrics/stats/percentile.py +++ b/kafka/metrics/stats/percentile.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - class Percentile(object): __slots__ = ('_metric_name', '_percentile') diff --git a/kafka/metrics/stats/percentiles.py b/kafka/metrics/stats/percentiles.py index 2cb2d84de..574557a59 100644 --- a/kafka/metrics/stats/percentiles.py +++ b/kafka/metrics/stats/percentiles.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.metrics import AnonMeasurable, NamedMeasurable from kafka.metrics.compound_stat import AbstractCompoundStat from kafka.metrics.stats import Histogram diff --git a/kafka/metrics/stats/rate.py b/kafka/metrics/stats/rate.py index 4d0ba0f27..6005bdb47 100644 --- a/kafka/metrics/stats/rate.py +++ b/kafka/metrics/stats/rate.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.metrics.measurable_stat import AbstractMeasurableStat from kafka.metrics.stats.sampled_stat import AbstractSampledStat diff --git a/kafka/metrics/stats/sampled_stat.py b/kafka/metrics/stats/sampled_stat.py index 8f978a8b7..b3bbfb046 100644 --- a/kafka/metrics/stats/sampled_stat.py +++ b/kafka/metrics/stats/sampled_stat.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc from kafka.metrics.measurable_stat import AbstractMeasurableStat diff --git a/kafka/metrics/stats/sensor.py b/kafka/metrics/stats/sensor.py index 9f7ac45f5..b539618ab 100644 --- a/kafka/metrics/stats/sensor.py +++ b/kafka/metrics/stats/sensor.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import threading import time diff --git a/kafka/metrics/stats/total.py b/kafka/metrics/stats/total.py index a78e99733..d43ceee18 100644 --- a/kafka/metrics/stats/total.py +++ b/kafka/metrics/stats/total.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.metrics.measurable_stat import AbstractMeasurableStat diff --git a/kafka/partitioner/__init__.py b/kafka/partitioner/__init__.py index 21a3bbb66..eed1dca69 100644 --- a/kafka/partitioner/__init__.py +++ b/kafka/partitioner/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.partitioner.default import DefaultPartitioner, murmur2 diff --git a/kafka/partitioner/default.py b/kafka/partitioner/default.py index 6bd90eeb1..8004020c0 100644 --- a/kafka/partitioner/default.py +++ b/kafka/partitioner/default.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import random diff --git a/kafka/producer/__init__.py b/kafka/producer/__init__.py index 576c772a0..869dbb3dc 100644 --- a/kafka/producer/__init__.py +++ b/kafka/producer/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.producer.kafka import KafkaProducer __all__ = [ diff --git a/kafka/producer/__main__.py b/kafka/producer/__main__.py index e5fd1b1d1..7b3215fc8 100644 --- a/kafka/producer/__main__.py +++ b/kafka/producer/__main__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import sys from kafka.cli.producer import run_cli diff --git a/kafka/producer/future.py b/kafka/producer/future.py index 13392a96e..50fabbb3c 100644 --- a/kafka/producer/future.py +++ b/kafka/producer/future.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import collections import threading diff --git a/kafka/producer/kafka.py b/kafka/producer/kafka.py index 529df0176..5b4badde3 100644 --- a/kafka/producer/kafka.py +++ b/kafka/producer/kafka.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import atexit import copy import logging diff --git a/kafka/producer/producer_batch.py b/kafka/producer/producer_batch.py index 7a082bac8..71dea4fbd 100644 --- a/kafka/producer/producer_batch.py +++ b/kafka/producer/producer_batch.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - from enum import IntEnum import logging import time diff --git a/kafka/producer/record_accumulator.py b/kafka/producer/record_accumulator.py index 1add95a3b..e779b1668 100644 --- a/kafka/producer/record_accumulator.py +++ b/kafka/producer/record_accumulator.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import collections import copy import logging diff --git a/kafka/producer/sender.py b/kafka/producer/sender.py index 029f600f8..7df76ea24 100644 --- a/kafka/producer/sender.py +++ b/kafka/producer/sender.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import collections import copy import heapq diff --git a/kafka/producer/transaction_manager.py b/kafka/producer/transaction_manager.py index b10344525..6c7a5f5b4 100644 --- a/kafka/producer/transaction_manager.py +++ b/kafka/producer/transaction_manager.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import abc import collections from enum import IntEnum diff --git a/kafka/protocol/__init__.py b/kafka/protocol/__init__.py index 025447f99..ff9c68306 100644 --- a/kafka/protocol/__init__.py +++ b/kafka/protocol/__init__.py @@ -1,6 +1,3 @@ -from __future__ import absolute_import - - API_KEYS = { 0: 'Produce', 1: 'Fetch', diff --git a/kafka/protocol/abstract.py b/kafka/protocol/abstract.py index 2adbc232d..e0d89433b 100644 --- a/kafka/protocol/abstract.py +++ b/kafka/protocol/abstract.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc diff --git a/kafka/protocol/add_offsets_to_txn.py b/kafka/protocol/add_offsets_to_txn.py index fa2509330..bc6805b83 100644 --- a/kafka/protocol/add_offsets_to_txn.py +++ b/kafka/protocol/add_offsets_to_txn.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Int16, Int32, Int64, Schema, String diff --git a/kafka/protocol/add_partitions_to_txn.py b/kafka/protocol/add_partitions_to_txn.py index fdf28f4ae..b6a95ceaf 100644 --- a/kafka/protocol/add_partitions_to_txn.py +++ b/kafka/protocol/add_partitions_to_txn.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Array, Int16, Int32, Int64, Schema, String diff --git a/kafka/protocol/admin.py b/kafka/protocol/admin.py index afc615770..56ef656e6 100644 --- a/kafka/protocol/admin.py +++ b/kafka/protocol/admin.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from enum import IntEnum from kafka.protocol.api import Request, Response diff --git a/kafka/protocol/api.py b/kafka/protocol/api.py index 69e8be545..c7a477cac 100644 --- a/kafka/protocol/api.py +++ b/kafka/protocol/api.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc from kafka.protocol.struct import Struct diff --git a/kafka/protocol/api_versions.py b/kafka/protocol/api_versions.py index e7cedd954..7638bf46e 100644 --- a/kafka/protocol/api_versions.py +++ b/kafka/protocol/api_versions.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from io import BytesIO from kafka.protocol.api import Request, Response diff --git a/kafka/protocol/commit.py b/kafka/protocol/commit.py index 4cbc43afd..74141184b 100644 --- a/kafka/protocol/commit.py +++ b/kafka/protocol/commit.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Array, Int16, Int32, Int64, Schema, String diff --git a/kafka/protocol/end_txn.py b/kafka/protocol/end_txn.py index 96d6cc514..5a4089fa6 100644 --- a/kafka/protocol/end_txn.py +++ b/kafka/protocol/end_txn.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Boolean, Int16, Int32, Int64, Schema, String diff --git a/kafka/protocol/fetch.py b/kafka/protocol/fetch.py index 036a37eb8..cc8652774 100644 --- a/kafka/protocol/fetch.py +++ b/kafka/protocol/fetch.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import collections from kafka.protocol.api import Request, Response diff --git a/kafka/protocol/find_coordinator.py b/kafka/protocol/find_coordinator.py index be5b45ded..caad15bfd 100644 --- a/kafka/protocol/find_coordinator.py +++ b/kafka/protocol/find_coordinator.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Int8, Int16, Int32, Schema, String diff --git a/kafka/protocol/group.py b/kafka/protocol/group.py index 383f3cd2a..5d35ab219 100644 --- a/kafka/protocol/group.py +++ b/kafka/protocol/group.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import collections from kafka.protocol.api import Request, Response diff --git a/kafka/protocol/init_producer_id.py b/kafka/protocol/init_producer_id.py index 8426fe00b..102ea4770 100644 --- a/kafka/protocol/init_producer_id.py +++ b/kafka/protocol/init_producer_id.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Int16, Int32, Int64, Schema, String diff --git a/kafka/protocol/list_offsets.py b/kafka/protocol/list_offsets.py index 2e36dd660..99f85f12e 100644 --- a/kafka/protocol/list_offsets.py +++ b/kafka/protocol/list_offsets.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Array, Int8, Int16, Int32, Int64, Schema, String diff --git a/kafka/protocol/message.py b/kafka/protocol/message.py index 4c5c031b8..03d1098c0 100644 --- a/kafka/protocol/message.py +++ b/kafka/protocol/message.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import io import time diff --git a/kafka/protocol/metadata.py b/kafka/protocol/metadata.py index eb632371c..715b48595 100644 --- a/kafka/protocol/metadata.py +++ b/kafka/protocol/metadata.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Array, Boolean, Int16, Int32, Schema, String, BitField diff --git a/kafka/protocol/offset_for_leader_epoch.py b/kafka/protocol/offset_for_leader_epoch.py index 8465588a3..64c9d1751 100644 --- a/kafka/protocol/offset_for_leader_epoch.py +++ b/kafka/protocol/offset_for_leader_epoch.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Array, CompactArray, CompactString, Int16, Int32, Int64, Schema, String, TaggedFields diff --git a/kafka/protocol/parser.py b/kafka/protocol/parser.py index 8683a952a..84d9e8718 100644 --- a/kafka/protocol/parser.py +++ b/kafka/protocol/parser.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import collections import logging diff --git a/kafka/protocol/produce.py b/kafka/protocol/produce.py index 94edd0f80..4f07ca002 100644 --- a/kafka/protocol/produce.py +++ b/kafka/protocol/produce.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Int16, Int32, Int64, String, Array, Schema, Bytes diff --git a/kafka/protocol/sasl_authenticate.py b/kafka/protocol/sasl_authenticate.py index a2b9b1988..0be8b54a4 100644 --- a/kafka/protocol/sasl_authenticate.py +++ b/kafka/protocol/sasl_authenticate.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Bytes, Int16, Int64, Schema, String diff --git a/kafka/protocol/sasl_handshake.py b/kafka/protocol/sasl_handshake.py index e91c856ca..fa792cf67 100644 --- a/kafka/protocol/sasl_handshake.py +++ b/kafka/protocol/sasl_handshake.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Array, Int16, Schema, String diff --git a/kafka/protocol/struct.py b/kafka/protocol/struct.py index d4adb8832..b482326fa 100644 --- a/kafka/protocol/struct.py +++ b/kafka/protocol/struct.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from io import BytesIO from kafka.protocol.abstract import AbstractType diff --git a/kafka/protocol/txn_offset_commit.py b/kafka/protocol/txn_offset_commit.py index df1b1bd1e..cea96e9f8 100644 --- a/kafka/protocol/txn_offset_commit.py +++ b/kafka/protocol/txn_offset_commit.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from kafka.protocol.api import Request, Response from kafka.protocol.types import Array, Int16, Int32, Int64, Schema, String diff --git a/kafka/protocol/types.py b/kafka/protocol/types.py index 8949ce471..812c5e74d 100644 --- a/kafka/protocol/types.py +++ b/kafka/protocol/types.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import struct from struct import error diff --git a/kafka/record/abc.py b/kafka/record/abc.py index ac8f88f14..908ad30b7 100644 --- a/kafka/record/abc.py +++ b/kafka/record/abc.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc diff --git a/kafka/record/memory_records.py b/kafka/record/memory_records.py index 9df733059..d203dd8c7 100644 --- a/kafka/record/memory_records.py +++ b/kafka/record/memory_records.py @@ -18,7 +18,6 @@ # # So we can iterate over batches just by knowing offsets of Length. Magic is # used to construct the correct class for Batch itself. -from __future__ import division import struct diff --git a/kafka/sasl/__init__.py b/kafka/sasl/__init__.py index 90f05e733..047f3529b 100644 --- a/kafka/sasl/__init__.py +++ b/kafka/sasl/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import platform from kafka.sasl.gssapi import SaslMechanismGSSAPI diff --git a/kafka/sasl/abc.py b/kafka/sasl/abc.py index 3c036558e..30a58d888 100644 --- a/kafka/sasl/abc.py +++ b/kafka/sasl/abc.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc diff --git a/kafka/sasl/gssapi.py b/kafka/sasl/gssapi.py index 4785b1b75..9b10efbe7 100644 --- a/kafka/sasl/gssapi.py +++ b/kafka/sasl/gssapi.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import struct # needed for SASL_GSSAPI authentication: diff --git a/kafka/sasl/msk.py b/kafka/sasl/msk.py index 4d23729a4..10896ce1f 100644 --- a/kafka/sasl/msk.py +++ b/kafka/sasl/msk.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import datetime import hashlib import hmac diff --git a/kafka/sasl/oauth.py b/kafka/sasl/oauth.py index f1e959cb6..b4951744b 100644 --- a/kafka/sasl/oauth.py +++ b/kafka/sasl/oauth.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc import logging diff --git a/kafka/sasl/plain.py b/kafka/sasl/plain.py index 81443f5fe..2af75acfb 100644 --- a/kafka/sasl/plain.py +++ b/kafka/sasl/plain.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import logging from kafka.sasl.abc import SaslMechanism diff --git a/kafka/sasl/scram.py b/kafka/sasl/scram.py index 75cd6f6da..420d88327 100644 --- a/kafka/sasl/scram.py +++ b/kafka/sasl/scram.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import base64 import hashlib import hmac diff --git a/kafka/sasl/sspi.py b/kafka/sasl/sspi.py index f4c95d037..2aa686479 100644 --- a/kafka/sasl/sspi.py +++ b/kafka/sasl/sspi.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import logging # Windows-only diff --git a/kafka/serializer/__init__.py b/kafka/serializer/__init__.py index 90cd93ab2..168277519 100644 --- a/kafka/serializer/__init__.py +++ b/kafka/serializer/__init__.py @@ -1,3 +1 @@ -from __future__ import absolute_import - from kafka.serializer.abstract import Serializer, Deserializer diff --git a/kafka/serializer/abstract.py b/kafka/serializer/abstract.py index 18ad8d69c..b656b5cef 100644 --- a/kafka/serializer/abstract.py +++ b/kafka/serializer/abstract.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import abc diff --git a/kafka/structs.py b/kafka/structs.py index 16ba0daac..eb8255e6a 100644 --- a/kafka/structs.py +++ b/kafka/structs.py @@ -1,5 +1,4 @@ """ Other useful structs """ -from __future__ import absolute_import from collections import namedtuple diff --git a/kafka/util.py b/kafka/util.py index 5ce4b50c3..29482bce1 100644 --- a/kafka/util.py +++ b/kafka/util.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import binascii import functools import re diff --git a/test/__init__.py b/test/__init__.py index 329277dc6..7a0230c28 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - # Set default logging handler to avoid "No handler found" warnings. import logging logging.basicConfig(level=logging.INFO) diff --git a/test/conftest.py b/test/conftest.py index b65593a86..263fc0f3c 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import pytest diff --git a/test/integration/conftest.py b/test/integration/conftest.py index 9a5527259..e4307d01c 100644 --- a/test/integration/conftest.py +++ b/test/integration/conftest.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import os from urllib.parse import urlparse import uuid diff --git a/test/integration/fixtures.py b/test/integration/fixtures.py index d1534090b..0b660e624 100644 --- a/test/integration/fixtures.py +++ b/test/integration/fixtures.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import atexit import base64 import logging diff --git a/test/integration/test_producer_integration.py b/test/integration/test_producer_integration.py index 037a82834..c7e6ad4c9 100644 --- a/test/integration/test_producer_integration.py +++ b/test/integration/test_producer_integration.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from contextlib import contextmanager import platform import time diff --git a/test/record/test_default_records.py b/test/record/test_default_records.py index 5fb708066..17f0de5ba 100644 --- a/test/record/test_default_records.py +++ b/test/record/test_default_records.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals from unittest.mock import patch diff --git a/test/record/test_legacy_records.py b/test/record/test_legacy_records.py index e4a8685b6..454a3360b 100644 --- a/test/record/test_legacy_records.py +++ b/test/record/test_legacy_records.py @@ -1,5 +1,3 @@ -from __future__ import unicode_literals - from unittest.mock import patch import pytest diff --git a/test/record/test_records.py b/test/record/test_records.py index 65010d88f..81874d145 100644 --- a/test/record/test_records.py +++ b/test/record/test_records.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals import pytest from kafka.record import MemoryRecords, MemoryRecordsBuilder from kafka.errors import CorruptRecordError diff --git a/test/sasl/test_gssapi.py b/test/sasl/test_gssapi.py index 2939cc196..cdebf2812 100644 --- a/test/sasl/test_gssapi.py +++ b/test/sasl/test_gssapi.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from unittest import mock from kafka.sasl import get_sasl_mechanism diff --git a/test/service.py b/test/service.py index a53fab8da..408351544 100644 --- a/test/service.py +++ b/test/service.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import logging import os import re diff --git a/test/test_assignors.py b/test/test_assignors.py index e79fae353..1056a7c1a 100644 --- a/test/test_assignors.py +++ b/test/test_assignors.py @@ -1,5 +1,4 @@ # pylint: skip-file -from __future__ import absolute_import from collections import defaultdict from random import randint, sample diff --git a/test/test_client_async.py b/test/test_client_async.py index 84128e7d4..b4811d346 100644 --- a/test/test_client_async.py +++ b/test/test_client_async.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division - import selectors import socket import time diff --git a/test/test_cluster.py b/test/test_cluster.py index 730b27cb6..d1dfb9353 100644 --- a/test/test_cluster.py +++ b/test/test_cluster.py @@ -1,5 +1,4 @@ # pylint: skip-file -from __future__ import absolute_import import socket diff --git a/test/test_codec.py b/test/test_codec.py index cb5c4c843..90c53a3fb 100644 --- a/test/test_codec.py +++ b/test/test_codec.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import platform import struct diff --git a/test/test_conn.py b/test/test_conn.py index 73a32397c..d8db22966 100644 --- a/test/test_conn.py +++ b/test/test_conn.py @@ -1,5 +1,4 @@ # pylint: skip-file -from __future__ import absolute_import from errno import EALREADY, EINPROGRESS, EISCONN, ECONNRESET import socket diff --git a/test/test_consumer.py b/test/test_consumer.py index 0d9477729..60d101d69 100644 --- a/test/test_consumer.py +++ b/test/test_consumer.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import pytest from kafka import KafkaConsumer, TopicPartition diff --git a/test/test_coordinator.py b/test/test_coordinator.py index b7db5ad19..3032a4973 100644 --- a/test/test_coordinator.py +++ b/test/test_coordinator.py @@ -1,5 +1,4 @@ # pylint: skip-file -from __future__ import absolute_import import time import pytest diff --git a/test/test_fetcher.py b/test/test_fetcher.py index 4794563ed..71c726bc8 100644 --- a/test/test_fetcher.py +++ b/test/test_fetcher.py @@ -1,5 +1,4 @@ # pylint: skip-file -from __future__ import absolute_import import logging import pytest diff --git a/test/test_partitioner.py b/test/test_partitioner.py index 853fbf69e..c347fe02c 100644 --- a/test/test_partitioner.py +++ b/test/test_partitioner.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import pytest from kafka.partitioner import DefaultPartitioner, murmur2 diff --git a/test/test_producer.py b/test/test_producer.py index e79c682a7..970932733 100644 --- a/test/test_producer.py +++ b/test/test_producer.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import gc import platform import threading diff --git a/test/test_producer_batch.py b/test/test_producer_batch.py index 7d959cbe9..e47e10a65 100644 --- a/test/test_producer_batch.py +++ b/test/test_producer_batch.py @@ -1,5 +1,4 @@ # pylint: skip-file -from __future__ import absolute_import, division import pytest diff --git a/test/test_record_accumulator.py b/test/test_record_accumulator.py index 0f61c21cf..aaea6da58 100644 --- a/test/test_record_accumulator.py +++ b/test/test_record_accumulator.py @@ -1,5 +1,4 @@ # pylint: skip-file -from __future__ import absolute_import, division import pytest diff --git a/test/test_sender.py b/test/test_sender.py index 4e52fb5d5..72b8f9a56 100644 --- a/test/test_sender.py +++ b/test/test_sender.py @@ -1,5 +1,4 @@ # pylint: skip-file -from __future__ import absolute_import import collections import io diff --git a/test/test_subscription_state.py b/test/test_subscription_state.py index 11fdb3baf..1e8de7a3b 100644 --- a/test/test_subscription_state.py +++ b/test/test_subscription_state.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import pytest from kafka import TopicPartition diff --git a/test/test_util.py b/test/test_util.py index f9e8a2b51..5db5fc8e0 100644 --- a/test/test_util.py +++ b/test/test_util.py @@ -1,5 +1,4 @@ # pylint: skip-file -from __future__ import absolute_import import pytest diff --git a/test/testutil.py b/test/testutil.py index b5dab1c02..1e1f8e198 100644 --- a/test/testutil.py +++ b/test/testutil.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import os import random import re From 4c77a19391f90203c9152ac2d635ff748127ef02 Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Thu, 20 Nov 2025 20:07:07 -0800 Subject: [PATCH 15/18] Remove more py2 workarounds --- kafka/admin/client.py | 12 ++---------- kafka/cli/producer/__init__.py | 7 +------ kafka/consumer/fetcher.py | 3 --- kafka/producer/kafka.py | 14 +------------- 4 files changed, 4 insertions(+), 32 deletions(-) diff --git a/kafka/admin/client.py b/kafka/admin/client.py index 144309174..3f3112443 100644 --- a/kafka/admin/client.py +++ b/kafka/admin/client.py @@ -417,11 +417,7 @@ def _send_request_to_controller(self, request): raise RuntimeError("This should never happen, please file a bug with full stacktrace if encountered") def _parse_topic_request_response(self, topic_error_tuples, request, response, tries): - # Also small py2/py3 compatibility -- py3 can ignore extra values - # during unpack via: for x, y, *rest in list_of_values. py2 cannot. - # So for now we have to map across the list and explicitly drop any - # extra values (usually the error_message) - for topic, error_code in map(lambda e: e[:2], topic_error_tuples): + for topic, error_code, *_ in topic_error_tuples: error_type = Errors.for_code(error_code) if tries and error_type is Errors.NotControllerError: # No need to inspect the rest of the errors for @@ -436,12 +432,8 @@ def _parse_topic_request_response(self, topic_error_tuples, request, response, t return True def _parse_topic_partition_request_response(self, request, response, tries): - # Also small py2/py3 compatibility -- py3 can ignore extra values - # during unpack via: for x, y, *rest in list_of_values. py2 cannot. - # So for now we have to map across the list and explicitly drop any - # extra values (usually the error_message) for topic, partition_results in response.replication_election_results: - for partition_id, error_code in map(lambda e: e[:2], partition_results): + for partition_id, error_code, *_ in partition_results: error_type = Errors.for_code(error_code) if tries and error_type is Errors.NotControllerError: # No need to inspect the rest of the errors for diff --git a/kafka/cli/producer/__init__.py b/kafka/cli/producer/__init__.py index 38d94c30f..d9210317d 100644 --- a/kafka/cli/producer/__init__.py +++ b/kafka/cli/producer/__init__.py @@ -65,15 +65,10 @@ def log_result(res_or_err): else: logger.info("Message produced: %s", res_or_err) - try: - input_py23 = raw_input - except NameError: - input_py23 = input - try: while True: try: - value = input_py23() + value = input() except EOFError: value = sys.stdin.read().rstrip('\n') if not value: diff --git a/kafka/consumer/fetcher.py b/kafka/consumer/fetcher.py index 7d67fb825..8c2a41ba0 100644 --- a/kafka/consumer/fetcher.py +++ b/kafka/consumer/fetcher.py @@ -949,9 +949,6 @@ def _maybe_skip_record(self, record): def __bool__(self): return self.record_iterator is not None - # py2 - __nonzero__ = __bool__ - def drain(self): if self.record_iterator is not None: self.record_iterator = None diff --git a/kafka/producer/kafka.py b/kafka/producer/kafka.py index 5b4badde3..dcbccbaca 100644 --- a/kafka/producer/kafka.py +++ b/kafka/producer/kafka.py @@ -579,19 +579,7 @@ def wrapper(): def _unregister_cleanup(self): if getattr(self, '_cleanup', None): - if hasattr(atexit, 'unregister'): - atexit.unregister(self._cleanup) # pylint: disable=no-member - - # py2 requires removing from private attribute... - else: - - # ValueError on list.remove() if the exithandler no longer exists - # but that is fine here - try: - atexit._exithandlers.remove( # pylint: disable=no-member - (self._cleanup, (), {})) - except ValueError: - pass + atexit.unregister(self._cleanup) self._cleanup = None def __del__(self): From 9d27775145c14413402110fb21d1dd9b5d3722e3 Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Fri, 21 Nov 2025 09:28:16 -0800 Subject: [PATCH 16/18] Drop py2/py3.4/py3.5; add py3.14 to pyproject.toml --- pyproject.toml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d575a8959..312c45656 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,11 +14,7 @@ classifiers = [ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.4", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", @@ -27,6 +23,7 @@ classifiers = [ "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries :: Python Modules", @@ -38,7 +35,7 @@ crc32c = ["crc32c"] lz4 = ["lz4"] snappy = ["python-snappy"] zstd = ["zstandard"] -testing = ["pytest", "mock; python_version < '3.3'", "pytest-mock", "pytest-timeout"] +testing = ["pytest", "pytest-mock", "pytest-timeout"] benchmarks = ["pyperf"] [tool.setuptools] From d6131733919594a934b1b05b5f9bcdb072b31fd4 Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Fri, 21 Nov 2025 09:28:51 -0800 Subject: [PATCH 17/18] Update gh workflow to py3.14 --- .github/workflows/python-package.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 79dcebc22..21acc4a32 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -35,7 +35,7 @@ jobs: - "3.9.0" - "4.0.0" python: - - "3.13" + - "3.14" include: #- python: "pypy3.9" # kafka: "2.6.0" @@ -50,6 +50,8 @@ jobs: kafka: "4.0.0" - python: "3.12" kafka: "4.0.0" + - python: "3.13" + kafka: "4.0.0" steps: - uses: actions/checkout@v6 From 7b3b73e2ef424d9ba31e079503e9d74f6b82f6bb Mon Sep 17 00:00:00 2001 From: Dana Powers Date: Fri, 21 Nov 2025 10:37:14 -0800 Subject: [PATCH 18/18] Doc updates for compatibility --- docs/compatibility.rst | 3 ++- kafka/admin/client.py | 3 +-- kafka/consumer/group.py | 3 +-- kafka/producer/kafka.py | 13 ++++++------- 4 files changed, 10 insertions(+), 12 deletions(-) diff --git a/docs/compatibility.rst b/docs/compatibility.rst index a3bd706f2..58457f17d 100644 --- a/docs/compatibility.rst +++ b/docs/compatibility.rst @@ -16,6 +16,7 @@ Although kafka-python is tested and expected to work on recent broker versions, not all features are supported. Please see github open issues for feature tracking. PRs welcome! -kafka-python is tested on python 2.7, and 3.8-3.13. +kafka-python is tested on python 3.8-3.14. +python 2.7 was supported through kafka-python release 2.3. Builds and tests via Github Actions Workflows. See https://github.com/dpkp/kafka-python/actions diff --git a/kafka/admin/client.py b/kafka/admin/client.py index 3f3112443..6bdac5613 100644 --- a/kafka/admin/client.py +++ b/kafka/admin/client.py @@ -119,8 +119,7 @@ class KafkaAdminClient(object): ssl_crlfile (str): Optional filename containing the CRL to check for certificate expiration. By default, no CRL check is done. When providing a file, only the leaf certificate will be checked against - this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+. - Default: None. + this CRL. Default: None. api_version (tuple): Specify which Kafka API version to use. If set to None, KafkaClient will attempt to infer the broker version by probing various APIs. Example: (0, 10, 2). Default: None diff --git a/kafka/consumer/group.py b/kafka/consumer/group.py index dd6986154..7c8b1cb2f 100644 --- a/kafka/consumer/group.py +++ b/kafka/consumer/group.py @@ -201,8 +201,7 @@ class KafkaConsumer(object): ssl_crlfile (str): Optional filename containing the CRL to check for certificate expiration. By default, no CRL check is done. When providing a file, only the leaf certificate will be checked against - this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+. - Default: None. + this CRL. Default: None. ssl_ciphers (str): optionally set the available ciphers for ssl connections. It should be a string in the OpenSSL cipher list format. If no cipher can be selected (because compile-time options diff --git a/kafka/producer/kafka.py b/kafka/producer/kafka.py index dcbccbaca..a937138a6 100644 --- a/kafka/producer/kafka.py +++ b/kafka/producer/kafka.py @@ -292,21 +292,20 @@ class KafkaProducer(object): will be ignored. Default: None. ssl_check_hostname (bool): flag to configure whether ssl handshake should verify that the certificate matches the brokers hostname. - default: true. + Default: True. ssl_cafile (str): optional filename of ca file to use in certificate - verification. default: none. + verification. Default: None. ssl_certfile (str): optional filename of file in pem format containing the client certificate, as well as any ca certificates needed to - establish the certificate's authenticity. default: none. + establish the certificate's authenticity. Default: None. ssl_keyfile (str): optional filename containing the client private key. - default: none. + Default: None. ssl_password (str): optional password to be used when loading the - certificate chain. default: none. + certificate chain. Default: None. ssl_crlfile (str): optional filename containing the CRL to check for certificate expiration. By default, no CRL check is done. When providing a file, only the leaf certificate will be checked against - this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+. - default: none. + this CRL. Default: None. ssl_ciphers (str): optionally set the available ciphers for ssl connections. It should be a string in the OpenSSL cipher list format. If no cipher can be selected (because compile-time options