Skip to content

Commit 580520b

Browse files
committed
Minor Exception cleanup
1 parent a699f6a commit 580520b

File tree

7 files changed

+10
-10
lines changed

7 files changed

+10
-10
lines changed

kafka/conn.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
SSLWantReadError = ssl.SSLWantReadError
4848
SSLWantWriteError = ssl.SSLWantWriteError
4949
SSLZeroReturnError = ssl.SSLZeroReturnError
50-
except:
50+
except AttributeError:
5151
# support older ssl libraries
5252
log.warning('Old SSL module detected.'
5353
' SSL error handling may not operate cleanly.'

kafka/consumer/fetcher.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -478,8 +478,8 @@ def _unpack_message_set(self, tp, records):
478478
# caught by the generator. We want all exceptions to be raised
479479
# back to the user. See Issue 545
480480
except StopIteration as e:
481-
log.exception('StopIteration raised unpacking messageset: %s', e)
482-
raise Exception('StopIteration raised unpacking messageset')
481+
log.exception('StopIteration raised unpacking messageset')
482+
raise RuntimeError('StopIteration raised unpacking messageset')
483483

484484
def __iter__(self): # pylint: disable=non-iterator-returned
485485
return self

kafka/metrics/metric_name.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,9 @@ def __init__(self, name, group, description=None, tags=None):
5050
tags (dict, optional): Additional key/val attributes of the metric.
5151
"""
5252
if not (name and group):
53-
raise Exception('name and group must be non-empty.')
53+
raise ValueError('name and group must be non-empty.')
5454
if tags is not None and not isinstance(tags, dict):
55-
raise Exception('tags must be a dict if present.')
55+
raise ValueError('tags must be a dict if present.')
5656

5757
self._name = name
5858
self._group = group

kafka/protocol/types.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def repr(self, value):
148148
field_val = value[i]
149149
key_vals.append('%s=%s' % (self.names[i], self.fields[i].repr(field_val)))
150150
return '(' + ', '.join(key_vals) + ')'
151-
except:
151+
except Exception:
152152
return repr(value)
153153

154154

test/fixtures.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ def open(self):
162162
time.sleep(backoff)
163163
tries += 1
164164
else:
165-
raise Exception('Failed to start Zookeeper before max_timeout')
165+
raise RuntimeError('Failed to start Zookeeper before max_timeout')
166166
self.out("Done!")
167167
atexit.register(self.close)
168168

@@ -302,7 +302,7 @@ def open(self):
302302
time.sleep(backoff)
303303
tries += 1
304304
else:
305-
raise Exception('Failed to start KafkaInstance before max_timeout')
305+
raise RuntimeError('Failed to start KafkaInstance before max_timeout')
306306
self.out("Done!")
307307
self.running = True
308308
atexit.register(self.close)

test/test_failover_integration.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ def _send_random_messages(self, producer, topic, partition, n):
197197
while True:
198198
try:
199199
producer.send_messages(topic, partition, msg.encode('utf-8'))
200-
except:
200+
except Exception:
201201
log.exception('failure in _send_random_messages - retrying')
202202
continue
203203
else:

test/testutil.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def tearDown(self):
113113
def current_offset(self, topic, partition):
114114
try:
115115
offsets, = self.client.send_offset_request([OffsetRequestPayload(topic, partition, -1, 1)])
116-
except:
116+
except Exception:
117117
# XXX: We've seen some UnknownErrors here and can't debug w/o server logs
118118
self.zk.child.dump_logs()
119119
self.server.child.dump_logs()

0 commit comments

Comments
 (0)