Skip to content

Commit d54aaf6

Browse files
carsonipjeffwidman
authored andcommitted
Fix slots usage and use more slots
Use empty slots for ABC classes, otherwise classes which inherit from them will still have __dict__. Also use __slots__ for more classes.
1 parent bb1c13e commit d54aaf6

File tree

4 files changed

+26
-0
lines changed

4 files changed

+26
-0
lines changed

kafka/record/abc.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
class ABCRecord(object):
66
__metaclass__ = abc.ABCMeta
7+
__slots__ = ()
78

89
@abc.abstractproperty
910
def offset(self):
@@ -45,6 +46,7 @@ def headers(self):
4546

4647
class ABCRecordBatchBuilder(object):
4748
__metaclass__ = abc.ABCMeta
49+
__slots__ = ()
4850

4951
@abc.abstractmethod
5052
def append(self, offset, timestamp, key, value, headers=None):
@@ -87,6 +89,7 @@ class ABCRecordBatch(object):
8789
compressed) message.
8890
"""
8991
__metaclass__ = abc.ABCMeta
92+
__slots__ = ()
9093

9194
@abc.abstractmethod
9295
def __iter__(self):
@@ -97,6 +100,7 @@ def __iter__(self):
97100

98101
class ABCRecords(object):
99102
__metaclass__ = abc.ABCMeta
103+
__slots__ = ()
100104

101105
@abc.abstractmethod
102106
def __init__(self, buffer):

kafka/record/default_records.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,8 @@
7070

7171
class DefaultRecordBase(object):
7272

73+
__slots__ = ()
74+
7375
HEADER_STRUCT = struct.Struct(
7476
">q" # BaseOffset => Int64
7577
"i" # Length => Int32
@@ -116,6 +118,9 @@ def _assert_has_codec(self, compression_type):
116118

117119
class DefaultRecordBatch(DefaultRecordBase, ABCRecordBatch):
118120

121+
__slots__ = ("_buffer", "_header_data", "_pos", "_num_records",
122+
"_next_record_index", "_decompressed")
123+
119124
def __init__(self, buffer):
120125
self._buffer = bytearray(buffer)
121126
self._header_data = self.HEADER_STRUCT.unpack_from(self._buffer)
@@ -358,6 +363,11 @@ class DefaultRecordBatchBuilder(DefaultRecordBase, ABCRecordBatchBuilder):
358363
# 5 bytes length + 10 bytes timestamp + 5 bytes offset + 1 byte attributes
359364
MAX_RECORD_OVERHEAD = 21
360365

366+
__slots__ = ("_magic", "_compression_type", "_batch_size", "_is_transactional",
367+
"_producer_id", "_producer_epoch", "_base_sequence",
368+
"_first_timestamp", "_max_timestamp", "_last_offset", "_num_records",
369+
"_buffer")
370+
361371
def __init__(
362372
self, magic, compression_type, is_transactional,
363373
producer_id, producer_epoch, base_sequence, batch_size):

kafka/record/legacy_records.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,8 @@
5757

5858
class LegacyRecordBase(object):
5959

60+
__slots__ = ()
61+
6062
HEADER_STRUCT_V0 = struct.Struct(
6163
">q" # BaseOffset => Int64
6264
"i" # Length => Int32
@@ -127,6 +129,9 @@ def _assert_has_codec(self, compression_type):
127129

128130
class LegacyRecordBatch(ABCRecordBatch, LegacyRecordBase):
129131

132+
__slots__ = ("_buffer", "_magic", "_offset", "_crc", "_timestamp",
133+
"_attributes", "_decompressed")
134+
130135
def __init__(self, buffer, magic):
131136
self._buffer = memoryview(buffer)
132137
self._magic = magic
@@ -336,6 +341,8 @@ def __repr__(self):
336341

337342
class LegacyRecordBatchBuilder(ABCRecordBatchBuilder, LegacyRecordBase):
338343

344+
__slots__ = ("_magic", "_compression_type", "_batch_size", "_buffer")
345+
339346
def __init__(self, magic, compression_type, batch_size):
340347
self._magic = magic
341348
self._compression_type = compression_type

kafka/record/memory_records.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,8 @@ class MemoryRecords(ABCRecords):
3737
# Minimum space requirements for Record V0
3838
MIN_SLICE = LOG_OVERHEAD + LegacyRecordBatch.RECORD_OVERHEAD_V0
3939

40+
__slots__ = ("_buffer", "_pos", "_next_slice", "_remaining_bytes")
41+
4042
def __init__(self, bytes_data):
4143
self._buffer = bytes_data
4244
self._pos = 0
@@ -110,6 +112,9 @@ def next_batch(self, _min_slice=MIN_SLICE,
110112

111113
class MemoryRecordsBuilder(object):
112114

115+
__slots__ = ("_builder", "_batch_size", "_buffer", "_next_offset", "_closed",
116+
"_bytes_written")
117+
113118
def __init__(self, magic, compression_type, batch_size):
114119
assert magic in [0, 1, 2], "Not supported magic"
115120
assert compression_type in [0, 1, 2, 3], "Not valid compression type"

0 commit comments

Comments
 (0)