Skip to content

Commit 4e72089

Browse files
author
David Noble
committed
Changes
* Gzipped recordings are working well enough * Smarter chunked- and csv-file differencing is in place (including corrections to two recordings) Verified: 100% test pass on OS X
1 parent 02995a0 commit 4e72089

9 files changed

+14714
-18300
lines changed
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11

2-
generates_timeorder,__mv_generates_timeorder,generating,__mv_generating,retainsevents,__mv_retainsevents,streaming,__mv_streaming
3-
0,,1,,0,,1,
2+
generates_timeorder,__mv_generates_timeorder,generating,__mv_generating,local,__mv_local,retainsevents,__mv_retainsevents,streaming,__mv_streaming
3+
0,,1,,0,,0,,1,

tests/searchcommands/recordings/scpv1/Splunk-6.3/pypygeneratetext.execute.output

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11

2-
_time,__mv__time,event_no,__mv_event_no,_raw,__mv__raw
2+
_time,__mv__time,_serial,__mv__serial,_raw,__mv__raw
33
1434910895.01,,1,,1. Hello World!,
44
1434910895.01,,2,,2. Hello World!,
55
1434910895.01,,3,,3. Hello World!,

tests/searchcommands/recordings/scpv2/Splunk-6.3/TestInternals.test_record_writer_with_recordings.1443140714.71.output

Lines changed: 0 additions & 18253 deletions
This file was deleted.

tests/searchcommands/recordings/scpv2/Splunk-6.3/TestInternals.test_record_writer_with_recordings.1443154424.42.output

Lines changed: 14490 additions & 0 deletions
Large diffs are not rendered by default.

tests/searchcommands/recordings/scpv2/Splunk-6.3/pypygeneratetext.output

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
chunked 1.0,38,0
22
{"generating":true,"type":"streaming"}
3-
chunked 1.0,17,37842
4-
{"finished":true}_time,__mv__time,event_no,__mv_event_no,_raw,__mv__raw
3+
chunked 1.0,17,39840
4+
{"finished":true}_time,__mv__time,_serial,__mv__serial,_raw,__mv__raw
55
1433261371.25,,1,,1. Hello World!,
66
1433261371.25,,2,,2. Hello World!,
77
1433261371.25,,3,,3. Hello World!,

tests/searchcommands/test_internals_v2.py

Lines changed: 70 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020
from splunklib.searchcommands.internals import MetadataDecoder, MetadataEncoder, Recorder, RecordWriterV2
2121
from splunklib.searchcommands import SearchMetric
22-
from collections import deque, OrderedDict
22+
from collections import deque, namedtuple, OrderedDict
2323
from cStringIO import StringIO
2424
from functools import wraps
2525
from glob import iglob
@@ -63,7 +63,7 @@ def random_dict():
6363
# contain utf-8 encoded byte strings or--better still--unicode strings. This is because the json package
6464
# converts all bytes strings to unicode strings before serializing them.
6565

66-
return {'a': random_float(), 'b': random_unicode(), '福 酒吧': {'fu': random_float(), 'bar': random_float()}}
66+
return OrderedDict((('a', random_float()), ('b', random_unicode()), ('福 酒吧', OrderedDict((('fu', random_float()), ('bar', random_float()))))))
6767

6868

6969
def random_float():
@@ -260,8 +260,8 @@ def test_record_writer_with_recordings(self):
260260

261261
for input_file in iglob(base_path + '*.input.gz'):
262262

263-
with gzip.open(input_file, 'rb') as f:
264-
test_data = pickle.load(f)
263+
with gzip.open(input_file, 'rb') as ifile:
264+
test_data = pickle.load(ifile)
265265

266266
writer = RecordWriterV2(StringIO(), maxresultrows=10) # small for the purposes of this unit test
267267
write_record = writer.write_record
@@ -282,13 +282,76 @@ def test_record_writer_with_recordings(self):
282282

283283
writer.flush(finished=True)
284284

285-
with io.open(os.path.splitext(os.path.splitext(input_file)[0])[0] + '.output', 'rb') as f:
286-
expected = f.read()
285+
# Read expected data
287286

288-
self.assertMultiLineEqual(writer._ofile.getvalue(), expected)
287+
expected_path = os.path.splitext(os.path.splitext(input_file)[0])[0] + '.output'
288+
289+
with io.open(expected_path, 'rb') as ifile:
290+
expected = ifile.read()
291+
292+
expected = self._load_chunks(StringIO(expected))
293+
294+
# Read observed data
295+
296+
ifile = writer._ofile
297+
ifile.seek(0)
298+
299+
observed = self._load_chunks(ifile)
300+
301+
# Write observed data (as an aid to diagnostics)
302+
303+
observed_path = expected_path + '.observed'
304+
observed_value = ifile.getvalue()
305+
306+
with io.open(observed_path, 'wb') as ifile:
307+
ifile.write(observed_value)
308+
309+
self._compare_chunks(observed, expected)
289310

290311
return
291312

313+
def _compare_chunks(self, chunks_1, chunks_2):
314+
self.assertEqual(len(chunks_1), len(chunks_2))
315+
n = 0
316+
for chunk_1, chunk_2 in izip(chunks_1, chunks_2):
317+
self.assertDictEqual(
318+
chunk_1.metadata, chunk_2.metadata,
319+
'Chunk {0}: metadata error: "{1}" != "{2}"'.format(n, chunk_1.metadata, chunk_2.metadata))
320+
self.assertMultiLineEqual(chunk_1.body, chunk_2.body, 'Chunk {0}: data error'.format(n))
321+
n += 1
322+
return
323+
324+
def _load_chunks(self, ifile):
325+
import re
326+
327+
pattern = re.compile(r'chunked 1.0,(?P<metadata_length>\d+),(?P<body_length>\d+)\n')
328+
decoder = json.JSONDecoder()
329+
330+
chunks = []
331+
332+
while True:
333+
334+
line = ifile.readline()
335+
336+
if len(line) == 0:
337+
break
338+
339+
match = pattern.match(line)
340+
self.assertIsNotNone(match)
341+
342+
metadata_length = int(match.group('metadata_length'))
343+
metadata = ifile.read(metadata_length)
344+
metadata = decoder.decode(metadata)
345+
346+
body_length = int(match.group('body_length'))
347+
body = ifile.read(body_length) if body_length > 0 else ''
348+
349+
chunks.append(TestInternals._Chunk(metadata, body))
350+
351+
return chunks
352+
353+
_Chunk = namedtuple('Chunk', (b'metadata', b'body'))
354+
292355
_dictionary = {
293356
'a': 1,
294357
'b': 2,

0 commit comments

Comments
 (0)