Skip to content

Commit 237882c

Browse files
committed
new tests added
1 parent 7655248 commit 237882c

18 files changed

+725
-290
lines changed

tests/__init__.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ def load_tests(loader, tests, pattern):
1919
suite.addTests(loader.loadTestsFromModule(restore_test))
2020
suite.addTests(loader.loadTestsFromModule(validate_test))
2121
suite.addTests(loader.loadTestsFromModule(retention_test))
22+
suite.addTests(loader.loadTestsFromModule(ptrack))
2223
suite.addTests(loader.loadTestsFromModule(ptrack_clean))
2324
suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
2425
suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace))
@@ -34,14 +35,12 @@ def load_tests(loader, tests, pattern):
3435
suite.addTests(loader.loadTestsFromModule(false_positive))
3536
suite.addTests(loader.loadTestsFromModule(compression))
3637
suite.addTests(loader.loadTestsFromModule(page))
37-
suite.addTests(loader.loadTestsFromModule(ptrack))
3838
suite.addTests(loader.loadTestsFromModule(archive))
3939

4040
return suite
4141

4242
# ToDo:
4343
# archive:
44-
# discrepancy of instance`s PGDATA and node`s PGDATA should lead to archive-push refusal to work
4544
# discrepancy of instance`s SYSTEMID and node`s SYSTEMID should lead to archive-push refusal to work
4645
# replica:
4746
# backup should exit with correct error message if some master* option is missing

tests/archive.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ def test_arhive_push_file_exists(self):
229229
# Clean after yourself
230230
self.del_test_dir(module_name, fname)
231231

232-
@unittest.expectedFailure
232+
#@unittest.expectedFailure
233233
def test_replica_archive(self):
234234
"""make node withput archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica"""
235235
fname = self.id().split('.')[3]
@@ -256,7 +256,7 @@ def test_replica_archive(self):
256256

257257
# Settings for Replica
258258
self.restore_node(backup_dir, 'master', replica)
259-
self.set_replica(master, replica)
259+
self.set_replica(master, replica, synchronous=True)
260260
self.set_archiving(backup_dir, 'replica', replica, replica=True)
261261
replica.start({"-t": "600"})
262262

@@ -280,7 +280,7 @@ def test_replica_archive(self):
280280
node.cleanup()
281281
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
282282
node.append_conf('postgresql.auto.conf', 'port = {0}'.format(node.port))
283-
node.start({"-t": "600"})
283+
node.start()
284284
# CHECK DATA CORRECTNESS
285285
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
286286
self.assertEqual(before, after)
@@ -299,7 +299,7 @@ def test_replica_archive(self):
299299
node.cleanup()
300300
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
301301
node.append_conf('postgresql.auto.conf', 'port = {0}'.format(node.port))
302-
node.start({"-t": "600"})
302+
node.start()
303303
# CHECK DATA CORRECTNESS
304304
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
305305
self.assertEqual(before, after)

tests/expected/option_help.out

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
55

66
pg_probackup version
77

8-
pg_probackup init -B backup-path
8+
pg_probackup init -B backup-path [-l]
99

1010
pg_probackup set-config -B backup-dir --instance=instance_name
1111
[--log-level=log-level]
@@ -26,7 +26,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
2626
pg_probackup show-config -B backup-dir --instance=instance_name
2727

2828
pg_probackup backup -B backup-path -b backup-mode --instance=instance_name
29-
[-C] [--stream [-S slot-name]] [--backup-pg-log]
29+
[-C] [-l] [--stream [-S slot-name]] [--backup-pg-log]
3030
[-j num-threads] [--archive-timeout=archive-timeout]
3131
[--compress-algorithm=compress-algorithm]
3232
[--compress-level=compress-level]
@@ -37,20 +37,20 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
3737
[--replica-timeout=timeout]
3838

3939
pg_probackup restore -B backup-dir --instance=instance_name
40-
[-D pgdata-dir] [-i backup-id] [--progress]
40+
[-D pgdata-dir] [-l] [-i backup-id] [--progress]
4141
[--time=time|--xid=xid [--inclusive=boolean]]
4242
[--timeline=timeline] [-T OLDDIR=NEWDIR]
4343

4444
pg_probackup validate -B backup-dir [--instance=instance_name]
45-
[-i backup-id] [--progress]
45+
[-i backup-id] [-l] [--progress]
4646
[--time=time|--xid=xid [--inclusive=boolean]]
4747
[--timeline=timeline]
4848

4949
pg_probackup show -B backup-dir
5050
[--instance=instance_name [-i backup-id]]
5151

5252
pg_probackup delete -B backup-dir --instance=instance_name
53-
[--wal] [-i backup-id | --expired]
53+
[--wal] [-i backup-id | --expired] [-l]
5454

5555
pg_probackup add-instance -B backup-dir -D pgdata-dir
5656
--instance=instance_name

tests/false_positive.py

Lines changed: 0 additions & 108 deletions
Original file line numberDiff line numberDiff line change
@@ -10,114 +10,6 @@
1010

1111
class FalsePositive(ProbackupTest, unittest.TestCase):
1212

13-
# @unittest.skip("skip")
14-
# @unittest.expectedFailure
15-
def test_pgpro561(self):
16-
"""
17-
make node with archiving, make stream backup, restore it to node1,
18-
check that archiving is not successful on node1
19-
"""
20-
fname = self.id().split('.')[3]
21-
node1 = self.make_simple_node(base_dir="{0}/{1}/node1".format(module_name, fname),
22-
set_replication=True,
23-
initdb_params=['--data-checksums'],
24-
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
25-
)
26-
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
27-
self.init_pb(backup_dir)
28-
self.add_instance(backup_dir, 'node1', node1)
29-
self.set_archiving(backup_dir, 'node1', node1)
30-
node1.start()
31-
32-
backup_id = self.backup_node(backup_dir, 'node1', node1, options=["--stream"])
33-
34-
node2 = self.make_simple_node(base_dir="{0}/{1}/node2".format(module_name, fname))
35-
node2.cleanup()
36-
37-
node1.psql(
38-
"postgres",
39-
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
40-
41-
self.backup_node(backup_dir, 'node1', node1, backup_type='page', options=["--stream"])
42-
self.restore_node(backup_dir, 'node1', data_dir=node2.data_dir)
43-
node2.append_conf('postgresql.auto.conf', 'port = {0}'.format(node2.port))
44-
node2.start({"-t": "600"})
45-
46-
timeline_node1 = node1.get_control_data()["Latest checkpoint's TimeLineID"]
47-
timeline_node2 = node2.get_control_data()["Latest checkpoint's TimeLineID"]
48-
self.assertEqual(timeline_node1, timeline_node2, "Timelines on Master and Node1 should be equal. This is unexpected")
49-
50-
archive_command_node1 = node1.safe_psql("postgres", "show archive_command")
51-
archive_command_node2 = node2.safe_psql("postgres", "show archive_command")
52-
self.assertEqual(archive_command_node1, archive_command_node2, "Archive command on Master and Node should be equal. This is unexpected")
53-
54-
result = node2.safe_psql("postgres", "select last_failed_wal from pg_stat_get_archiver() where last_failed_wal is not NULL")
55-
# self.assertEqual(res, six.b(""), 'Restored Node1 failed to archive segment {0} due to having the same archive command as Master'.format(res.rstrip()))
56-
if result == "":
57-
self.assertEqual(1, 0, 'Error is expected due to Master and Node1 having the common archive and archive_command')
58-
59-
# Clean after yourself
60-
self.del_test_dir(module_name, fname)
61-
62-
# @unittest.skip("skip")
63-
def pgpro688(self):
64-
"""make node with archiving, make backup, get Recovery Time, validate to Recovery Time. Waiting PGPRO-688. RESOLVED"""
65-
fname = self.id().split('.')[3]
66-
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
67-
set_replication=True,
68-
initdb_params=['--data-checksums'],
69-
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
70-
)
71-
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
72-
self.init_pb(backup_dir)
73-
self.add_instance(backup_dir, 'node', node)
74-
self.set_archiving(backup_dir, 'node', node)
75-
node.start()
76-
77-
backup_id = self.backup_node(backup_dir, 'node', node)
78-
recovery_time = self.show_pb(backup_dir, 'node', backup_id)['recovery-time']
79-
80-
# Uncommenting this section will make this test True Positive
81-
#node.safe_psql("postgres", "select pg_create_restore_point('123')")
82-
#node.safe_psql("postgres", "select txid_current()")
83-
#node.safe_psql("postgres", "select pg_switch_xlog()")
84-
####
85-
86-
#try:
87-
self.validate_pb(backup_dir, 'node', options=["--time='{0}'".format(recovery_time)])
88-
# we should die here because exception is what we expect to happen
89-
# self.assertEqual(1, 0, "Expecting Error because it should not be possible safely validate 'Recovery Time' without wal record with timestamp.\n Output: {0} \n CMD: {1}".format(
90-
# repr(self.output), self.cmd))
91-
# except ProbackupException as e:
92-
# self.assertTrue('WARNING: recovery can be done up to time {0}'.format(recovery_time) in e.message,
93-
# '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
94-
95-
# Clean after yourself
96-
self.del_test_dir(module_name, fname)
97-
98-
# @unittest.skip("skip")
99-
def pgpro702_688(self):
100-
"""make node without archiving, make stream backup, get Recovery Time, validate to Recovery Time"""
101-
fname = self.id().split('.')[3]
102-
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
103-
set_replication=True,
104-
initdb_params=['--data-checksums'],
105-
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
106-
)
107-
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
108-
self.init_pb(backup_dir)
109-
self.add_instance(backup_dir, 'node', node)
110-
node.start()
111-
112-
backup_id = self.backup_node(backup_dir, 'node', node, options=["--stream"])
113-
recovery_time = self.show_pb(backup_dir, 'node', backup_id)['recovery-time']
114-
115-
self.assertIn(six.b("INFO: backup validation completed successfully on"),
116-
self.validate_pb(backup_dir, 'node', node, options=["--time='{0}'".format(recovery_time)]))
117-
118-
# Clean after yourself
119-
self.del_test_dir(module_name, fname)
120-
12113
# @unittest.skip("skip")
12214
@unittest.expectedFailure
12315
def test_validate_wal_lost_segment(self):

tests/helpers/ptrack_helpers.py

Lines changed: 42 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,7 @@ def make_simple_node(
175175
# Allow replication in pg_hba.conf
176176
if set_replication:
177177
node.set_replication_conf()
178+
node.append_conf("postgresql.auto.conf", "max_wal_senders = 10")
178179
return node
179180

180181
def create_tblspace_in_node(self, node, tblspc_name, cfs=False):
@@ -295,27 +296,17 @@ def check_ptrack_sanity(self, idx_dict):
295296
idx_dict['type'], idx_dict))
296297

297298
def check_ptrack_recovery(self, idx_dict):
298-
success = True
299299
size = idx_dict['size']
300300
for PageNum in range(size):
301301
if idx_dict['ptrack'][PageNum] != 1:
302-
if self.verbose:
303-
print('Recovery for Page Number {0} of Type {1} was conducted, but ptrack value is {2}. THIS IS BAD'.format(
304-
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]))
305-
print(idx_dict)
306-
success = False
307-
self.assertEqual(success, True)
302+
self.assertTrue(False, 'Recovery for Page Number {0} of Type {1} was conducted, but ptrack value is {2}. THIS IS BAD\n IDX_DICT: {3}'.format(
303+
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum], idx_dict))
308304

309305
def check_ptrack_clean(self, idx_dict, size):
310-
success = True
311306
for PageNum in range(size):
312307
if idx_dict['ptrack'][PageNum] != 0:
313-
if self.verbose:
314-
print('Ptrack for Page Number {0} of Type {1} should be clean, but ptrack value is {2}. THIS IS BAD'.format(
315-
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]))
316-
print(idx_dict)
317-
success = False
318-
self.assertEqual(success, True, '')
308+
self.assertTrue(False, 'Ptrack for Page Number {0} of Type {1} should be clean, but ptrack value is {2}.\n THIS IS BAD\n IDX_DICT: {3}'.format(
309+
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum], idx_dict))
319310

320311
def run_pb(self, command):
321312
try:
@@ -365,7 +356,7 @@ def del_instance(self, backup_dir, instance):
365356
def clean_pb(self, backup_dir):
366357
shutil.rmtree(backup_dir, ignore_errors=True)
367358

368-
def backup_node(self, backup_dir, instance, node=False, data_dir=False, backup_type="full", options=[]):
359+
def backup_node(self, backup_dir, instance, node, data_dir=False, backup_type="full", options=[]):
369360
if not node and not data_dir:
370361
print('You must provide ether node or data_dir for backup')
371362
exit(1)
@@ -379,7 +370,7 @@ def backup_node(self, backup_dir, instance, node=False, data_dir=False, backup_t
379370
cmd_list = [
380371
"backup",
381372
"-B", backup_dir,
382-
"-D", pgdata,
373+
# "-D", pgdata,
383374
"-p", "%i" % node.port,
384375
"-d", "postgres",
385376
"--instance={0}".format(instance)
@@ -560,8 +551,8 @@ def set_replica(self, master, replica, replica_name='replica', synchronous=False
560551
"primary_conninfo = 'user={0} port={1} application_name={2} sslmode=prefer sslcompression=1'".format(
561552
self.user, master.port, replica_name))
562553
if synchronous:
563-
master.append_conf('postgresql.auto.conf', 'synchronous_standby_names="{0}"'.format(replica_name))
564-
master.append_conf('postgresql.auto.conf', 'synchronous_commit="remote_apply"')
554+
master.append_conf('postgresql.auto.conf', "synchronous_standby_names='{0}'".format(replica_name))
555+
master.append_conf('postgresql.auto.conf', "synchronous_commit='remote_apply'")
565556
master.reload()
566557

567558
def wrong_wal_clean(self, node, wal_size):
@@ -604,3 +595,36 @@ def del_test_dir(self, module_name, fname):
604595
os.rmdir(os.path.join(self.tmp_path, module_name))
605596
except:
606597
pass
598+
599+
def pgdata_content(self, directory):
600+
""" return dict with directory content"""
601+
dirs_to_ignore = ['pg_xlog', 'pg_wal', 'pg_log', 'pg_stat_tmp', 'pg_subtrans', 'pg_notify']
602+
files_to_ignore = ['postmaster.pid', 'postmaster.opts']
603+
suffixes_to_ignore = ('_ptrack', '_vm', '_fsm')
604+
directory_dict = {}
605+
directory_dict['pgdata'] = directory
606+
directory_dict['files'] = {}
607+
for root, dirs, files in os.walk(directory, followlinks=True):
608+
dirs[:] = [d for d in dirs if d not in dirs_to_ignore]
609+
for file in files:
610+
if file in files_to_ignore or file.endswith(suffixes_to_ignore):
611+
continue
612+
file = os.path.join(root,file)
613+
file_relpath = os.path.relpath(file, directory)
614+
directory_dict['files'][file_relpath] = hashlib.md5(open(file, 'rb').read()).hexdigest()
615+
return directory_dict
616+
617+
def compare_pgdata(self, original_pgdata, restored_pgdata):
618+
""" return dict with directory content"""
619+
fail = False
620+
error_message = ''
621+
for file in original_pgdata['files']:
622+
if file in restored_pgdata['files']:
623+
if original_pgdata['files'][file] != restored_pgdata['files'][file]:
624+
error_message += '\nChecksumm mismatch.\n File_old: {0}\n File_new: {1}'.format(
625+
os.path.join(original_pgdata['pgdata'], file), os.path.join(restored_pgdata['pgdata'], file))
626+
fail = True
627+
else:
628+
error_message += '\nFile dissappearance. File: {0}/{1}'.format(restored_pgdata['pgdata'], file)
629+
fail = True
630+
self.assertFalse(fail, error_message)

tests/pgpro589.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def test_pgpro589(self):
8787
repr(self.output), self.cmd))
8888
except ProbackupException as e:
8989
self.assertTrue(
90-
'INFO: wait for LSN' in e.message
90+
'INFO: wait for WAL segment' in e.message
9191
and 'ERROR: switched WAL segment' in e.message
9292
and 'could not be archived' in e.message,
9393
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))

0 commit comments

Comments
 (0)