Skip to content

Commit 442cef1

Browse files
committed
PGPRO-688 tests added
1 parent 6db02b6 commit 442cef1

File tree

5 files changed

+174
-33
lines changed

5 files changed

+174
-33
lines changed

tests/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,12 @@
55
retention_test, ptrack_clean, ptrack_cluster, \
66
ptrack_move_to_tablespace, ptrack_recovery, ptrack_vacuum, \
77
ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \
8-
ptrack_vacuum_full, ptrack_vacuum_truncate
8+
ptrack_vacuum_full, ptrack_vacuum_truncate, pgpro668
99

1010

1111
def load_tests(loader, tests, pattern):
1212
suite = unittest.TestSuite()
13+
suite.addTests(loader.loadTestsFromModule(pgpro668))
1314
suite.addTests(loader.loadTestsFromModule(init_test))
1415
suite.addTests(loader.loadTestsFromModule(option_test))
1516
suite.addTests(loader.loadTestsFromModule(show_test))

tests/backup_test.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -114,10 +114,13 @@ def test_page_backup_without_full(self):
114114
# @unittest.skip("123")
115115
def test_ptrack_threads(self):
116116
"""ptrack multi thread backup mode"""
117-
node = self.make_bnode(
118-
base_dir="tmp_dirs/backup/ptrack_threads_4",
119-
options={"ptrack_enable": "on", 'max_wal_senders': '2'}
120-
)
117+
fname = self.id().split('.')[3]
118+
print '{0} started'.format(fname)
119+
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
120+
set_archiving=True,
121+
initdb_params=['--data-checksums'],
122+
pg_options={'wal_level': 'replica', "ptrack_enable": "on", 'max_wal_senders': '2'}
123+
)
121124
node.start()
122125
self.assertEqual(self.init_pb(node), six.b(""))
123126

@@ -143,8 +146,6 @@ def test_ptrack_threads_stream(self):
143146
initdb_params=['--data-checksums'],
144147
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'}
145148
)
146-
# node.append_conf("pg_hba.conf", "local replication all trust")
147-
# node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust")
148149
node.start()
149150
self.assertEqual(self.init_pb(node), six.b(""))
150151

@@ -165,5 +166,4 @@ def test_ptrack_threads_stream(self):
165166
))
166167

167168
self.assertEqual(self.show_pb(node)[1]['Status'], six.b("OK"))
168-
169169
node.stop()

tests/pgpro668.py

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
import unittest
2+
import os
3+
import six
4+
from .ptrack_helpers import ProbackupTest, ProbackupException
5+
from datetime import datetime, timedelta
6+
from testgres import stop_all
7+
import subprocess
8+
from sys import exit
9+
10+
11+
class SomeTest(ProbackupTest, unittest.TestCase):
12+
13+
def __init__(self, *args, **kwargs):
14+
super(SomeTest, self).__init__(*args, **kwargs)
15+
16+
# @classmethod
17+
# def tearDownClass(cls):
18+
# stop_all()
19+
20+
def test_archive_node_backup_stream_restore_to_recovery_time(self):
21+
"""
22+
make node with archiving, make stream backup,
23+
get Recovery Time, try to make pitr to Recovery Time
24+
"""
25+
fname = self.id().split('.')[3]
26+
print '{0} started'.format(fname)
27+
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
28+
set_archiving=True,
29+
set_replication=True,
30+
initdb_params=['--data-checksums'],
31+
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
32+
)
33+
node.start()
34+
35+
self.assertEqual(self.init_pb(node), six.b(""))
36+
id = self.backup_pb(node, backup_type='full', options=["--stream"])
37+
recovery_time = self.show_pb(node, id=id)['recovery-time']
38+
39+
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
40+
node.cleanup()
41+
42+
self.restore_pb(node, options=['--time="{0}"'.format(recovery_time)])
43+
node.start({"-t": "600"})
44+
self.assertEqual(True, node.status())
45+
46+
def test_validate_to_recovery_time(self):
47+
"""
48+
make node with archiving, make stream backup,
49+
get Recovery Time, validate to Recovery Time
50+
Should fail. Waiting PGPRO-688
51+
"""
52+
fname = self.id().split('.')[3]
53+
print '{0} started'.format(fname)
54+
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
55+
set_archiving=True,
56+
set_replication=True,
57+
initdb_params=['--data-checksums'],
58+
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
59+
)
60+
node.start()
61+
62+
self.assertEqual(self.init_pb(node), six.b(""))
63+
id = self.backup_pb(node, backup_type='full', options=["--stream"])
64+
recovery_time = self.show_pb(node, id=id)['recovery-time']
65+
66+
# Optional
67+
#node.psql("postgres", "select pg_create_restore_point('123')")
68+
#node.psql("postgres", "select txid_current()")
69+
#node.psql("postgres", "select pg_switch_xlog()")
70+
self.assertIn(six.b("INFO: backup validation completed successfully on"),
71+
self.validate_pb(node, options=["--time='{0}'".format(recovery_time)]))
72+
####
73+
74+
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
75+
node.cleanup()
76+
77+
self.restore_pb(node, options=['--time="{0}"'.format(recovery_time)])
78+
node.start({"-t": "600"})
79+
self.assertEqual(True, node.status())
80+
81+
def test_archive_node_backup_stream_additional_commit_pitr(self):
82+
"""
83+
make node with archiving, make stream backup, create table t_heap,
84+
try to make pitr to Recovery Time, check that t_heap do not exists
85+
"""
86+
fname = self.id().split('.')[3]
87+
print '{0} started'.format(fname)
88+
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
89+
set_archiving=True,
90+
set_replication=True,
91+
initdb_params=['--data-checksums'],
92+
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
93+
)
94+
node.start()
95+
96+
self.assertEqual(self.init_pb(node), six.b(""))
97+
id = self.backup_pb(node, backup_type='full', options=["--stream"])
98+
node.psql("postgres", "create table t_heap(a int)")
99+
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
100+
node.cleanup()
101+
recovery_time = self.show_pb(node, id=id)['recovery-time']
102+
self.restore_pb(node,
103+
options=["-j", "4", '--time="{0}"'.format(recovery_time)]
104+
)
105+
node.start({"-t": "600"})
106+
res = node.psql("postgres", 'select * from t_heap')
107+
self.assertEqual(True, 'does not exist' in res[2])
108+
109+
110+
# Need test for validate time with autonomous backup without archiving
111+
# We need to forbid validation of autonomous backup by time or xid
112+
# if archiving is not set

tests/ptrack_helpers.py

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -152,25 +152,25 @@ def arcwal_dir(self, node):
152152
def backup_dir(self, node):
153153
return os.path.abspath("%s/backup" % node.base_dir)
154154

155-
def make_bnode(self, base_dir=None, allows_streaming=False, options={}):
156-
real_base_dir = os.path.join(self.dir_path, base_dir)
157-
shutil.rmtree(real_base_dir, ignore_errors=True)
158-
159-
node = get_new_node('test', base_dir=real_base_dir)
160-
node.init(allows_streaming=allows_streaming)
161-
162-
if not allows_streaming:
163-
node.append_conf("postgresql.auto.conf", "wal_level = hot_standby")
164-
node.append_conf("postgresql.auto.conf", "archive_mode = on")
165-
node.append_conf(
166-
"postgresql.auto.conf",
167-
"""archive_command = 'cp "%%p" "%s/%%f"'""" % os.path.abspath(self.arcwal_dir(node))
168-
)
169-
170-
for key, value in six.iteritems(options):
171-
node.append_conf("postgresql.conf", "%s = %s" % (key, value))
172-
173-
return node
155+
# def make_bnode(self, base_dir=None, allows_streaming=False, options={}):
156+
# real_base_dir = os.path.join(self.dir_path, base_dir)
157+
# shutil.rmtree(real_base_dir, ignore_errors=True)
158+
#
159+
# node = get_new_node('test', base_dir=real_base_dir)
160+
# node.init(allows_streaming=allows_streaming)
161+
#
162+
# if not allows_streaming:
163+
# node.append_conf("postgresql.auto.conf", "wal_level = hot_standby")
164+
# node.append_conf("postgresql.auto.conf", "archive_mode = on")
165+
# node.append_conf(
166+
# "postgresql.auto.conf",
167+
# """archive_command = 'cp "%%p" "%s/%%f"'""" % os.path.abspath(self.arcwal_dir(node))
168+
# )
169+
#
170+
# for key, value in six.iteritems(options):
171+
# node.append_conf("postgresql.conf", "%s = %s" % (key, value))
172+
#
173+
# return node
174174

175175
# def print_started(self, fname):
176176
# print
@@ -318,7 +318,7 @@ def check_ptrack_clean(self, idx_dict, size):
318318

319319
def run_pb(self, command):
320320
try:
321-
# print [self.probackup_path] + command
321+
print [self.probackup_path] + command
322322
output = subprocess.check_output(
323323
[self.probackup_path] + command,
324324
stderr=subprocess.STDOUT,
@@ -417,12 +417,12 @@ def show_pb(self, node, id=None, options=[], as_text=False):
417417
body = body[::-1]
418418
# split string in list with string for every header element
419419
header_split = re.split(" +", header)
420-
# CRUNCH, remove last item, because it empty, like that ''
420+
# CRUNCH, remove last item, because it`s empty, like that ''
421421
header_split.pop()
422422
for backup_record in body:
423423
# split string in list with string for every backup record element
424424
backup_record_split = re.split(" +", backup_record)
425-
# CRUNCH, remove last item, because it empty, like that ''
425+
# CRUNCH, remove last item, because it`s empty, like that ''
426426
backup_record_split.pop()
427427
if len(header_split) != len(backup_record_split):
428428
print warning.format(

tests/validate_test.py

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
from datetime import datetime, timedelta
66
from testgres import stop_all
77
import subprocess
8+
from sys import exit
89

910

1011
class ValidateTest(ProbackupTest, unittest.TestCase):
@@ -19,6 +20,34 @@ def __init__(self, *args, **kwargs):
1920
# except:
2021
# pass
2122

23+
# @unittest.skip("123")
24+
def test_validate_time(self):
25+
"""recovery to latest from full backup"""
26+
fname = self.id().split('.')[3]
27+
print '\n {0} started'.format(fname)
28+
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
29+
set_archiving=True,
30+
initdb_params=['--data-checksums'],
31+
pg_options={'wal_level': 'replica'}
32+
)
33+
node.start()
34+
35+
pgbench = node.pgbench(
36+
stdout=subprocess.PIPE,
37+
stderr=subprocess.STDOUT,
38+
options=["-c", "4", "-T", "10"]
39+
)
40+
pgbench.wait()
41+
pgbench.stdout.close()
42+
43+
self.assertEqual(self.init_pb(node), six.b(""))
44+
id = self.backup_pb(node)
45+
recovery_time = self.show_pb(node, id=id)['recovery-time']
46+
47+
self.assertIn(six.b("INFO: backup validation completed successfully on"),
48+
self.validate_pb(node, options=["--time='{0}'".format(recovery_time)]))
49+
node.stop()
50+
2251
# @unittest.skip("123")
2352
def test_validate_wal_1(self):
2453
"""recovery to latest from full backup"""
@@ -176,6 +205,7 @@ def test_validate_wal_lost_segment_1(self):
176205
)
177206
node.stop()
178207

208+
# @unittest.skip("123")
179209
def test_validate_wal_lost_segment_2(self):
180210
"""Loose segment located between backups """
181211
fname = self.id().split('.')[3]
@@ -224,12 +254,10 @@ def test_validate_wal_lost_segment_2(self):
224254
)
225255
self.delete_pb(node, id=self.show_pb(node)[1]['ID'])
226256

227-
228257
##### Hole Smokes, Batman! We just lost a wal segment and know nothing about it
229258
##### We need archive-push ASAP
230259
self.backup_pb(node, backup_type='full')
231-
self.assertEqual(False,
232-
'validation completed successfully' in self.validate_pb(node))
260+
self.assertEqual(False, 'validation completed successfully' in self.validate_pb(node))
233261
########
234262

235263
node.stop()

0 commit comments

Comments
 (0)