Skip to content

Commit b1e849a

Browse files
committed
validate rework, self.cmd and self.output added
1 parent 777ab09 commit b1e849a

14 files changed

+230
-92
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
/tests/__pycache__/
3030
/tests/tmp_dirs/
3131
/tests/*pyc
32+
/helpers/*pyc
3233

3334
# Extra files
3435
/datapagemap.c

tests/helpers/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
__all__ = ['ptrack_helpers', 'expected_errors']
2+
#from . import *

tests/ptrack_helpers.py renamed to tests/helpers/ptrack_helpers.py

Lines changed: 41 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,8 @@ def __init__(self, *args, **kwargs):
136136
self.test_env["LC_MESSAGES"] = "C"
137137
self.test_env["LC_TIME"] = "C"
138138

139-
self.dir_path = os.path.dirname(os.path.realpath(__file__))
139+
self.helpers_path = os.path.dirname(os.path.realpath(__file__))
140+
self.dir_path = os.path.abspath(os.path.join(self.helpers_path, os.pardir))
140141
try:
141142
os.makedirs(os.path.join(self.dir_path, "tmp_dirs"))
142143
except:
@@ -213,12 +214,16 @@ def get_md5_per_page_for_fork(self, file, size):
213214
os.close(file)
214215
return md5_per_page
215216

216-
def get_ptrack_bits_per_page_for_fork(self, file, size):
217+
def get_ptrack_bits_per_page_for_fork(self, node, file, size):
218+
if self.get_pgpro_edition(node) == 'enterprise':
219+
header_size = 48
220+
else:
221+
header_size = 24
217222
ptrack_bits_for_fork = []
218223
byte_size = os.path.getsize(file + '_ptrack')
219-
byte_size_minus_header = byte_size - 24
224+
byte_size_minus_header = byte_size - header_size
220225
file = os.open(file + '_ptrack', os.O_RDONLY)
221-
os.lseek(file, 24, 0)
226+
os.lseek(file, header_size, 0)
222227
lot_of_bytes = os.read(file, byte_size_minus_header)
223228
for byte in lot_of_bytes:
224229
byte_inverted = bin(ord(byte))[2:].rjust(8, '0')[::-1]
@@ -295,7 +300,7 @@ def check_ptrack_clean(self, idx_dict, size):
295300

296301
def run_pb(self, command, async=False):
297302
try:
298-
#print [self.probackup_path] + command
303+
self.cmd = [' '.join(map(str,[self.probackup_path] + command))]
299304
if async is True:
300305
return subprocess.Popen(
301306
[self.probackup_path] + command,
@@ -304,23 +309,21 @@ def run_pb(self, command, async=False):
304309
env=self.test_env
305310
)
306311
else:
307-
output = subprocess.check_output(
312+
self.output = subprocess.check_output(
308313
[self.probackup_path] + command,
309314
stderr=subprocess.STDOUT,
310315
env=self.test_env
311316
)
312317
if command[0] == 'backup':
313-
if '-q' in command or '--quiet' in command:
314-
return None
315-
elif '-v' in command or '--verbose' in command:
316-
return output
317-
else:
318-
# return backup ID
319-
for line in output.splitlines():
320-
if 'INFO: Backup' and 'completed' in line:
321-
return line.split()[2]
318+
# return backup ID
319+
for line in self.output.splitlines():
320+
if 'INFO: Backup' and 'completed' in line:
321+
return line.split()[2]
322+
# backup_id = line.split()[2]
323+
# return {'cmd': cmd, 'output': output, 'backup_id': backup_id}
322324
else:
323-
return output
325+
return self.output
326+
# return {'cmd': cmd, 'output': output}
324327
except subprocess.CalledProcessError as e:
325328
raise ProbackupException(e.output, e.cmd)
326329

@@ -481,25 +484,34 @@ def get_recovery_conf(self, node):
481484
out_dict[key.strip()] = value.strip(" '").replace("'\n", "")
482485
return out_dict
483486

484-
def set_archiving_conf(self, node, archive_dir):
487+
def set_archiving_conf(self, node, archive_dir=False, replica=False):
488+
if not archive_dir:
489+
archive_dir = self.arcwal_dir(node)
490+
491+
if replica:
492+
archive_mode = 'always'
493+
node.append_conf('postgresql.auto.conf', 'hot_standby = on')
494+
else:
495+
archive_mode = 'on'
496+
485497
node.append_conf(
486498
"postgresql.auto.conf",
487499
"wal_level = archive"
488500
)
489501
node.append_conf(
490502
"postgresql.auto.conf",
491-
"archive_mode = on"
503+
"archive_mode = {0}".format(archive_mode)
492504
)
493505
if os.name == 'posix':
494506
node.append_conf(
495507
"postgresql.auto.conf",
496508
"archive_command = 'test ! -f {0}/%f && cp %p {0}/%f'".format(archive_dir)
497509
)
498-
elif os.name == 'nt':
499-
node.append_conf(
500-
"postgresql.auto.conf",
501-
"archive_command = 'copy %p {0}\\%f'".format(archive_dir)
502-
)
510+
#elif os.name == 'nt':
511+
# node.append_conf(
512+
# "postgresql.auto.conf",
513+
# "archive_command = 'copy %p {0}\\%f'".format(archive_dir)
514+
# )
503515

504516
def wrong_wal_clean(self, node, wal_size):
505517
wals_dir = os.path.join(self.backup_dir(node), "wal")
@@ -517,4 +529,9 @@ def guc_wal_block_size(self, node):
517529
var = node.execute("postgres", "select setting from pg_settings where name = 'wal_block_size'")
518530
return int(var[0][0])
519531

520-
# def ptrack_node(self, ptrack_enable=False, wal_level='minimal', max_wal_senders='2', allow_replication=True)
532+
def get_pgpro_edition(self, node):
533+
if node.execute("postgres", "select exists(select 1 from pg_proc where proname = 'pgpro_edition')")[0][0]:
534+
var = node.execute("postgres", "select pgpro_edition()")
535+
return str(var[0][0])
536+
else:
537+
return False

tests/ptrack_clean.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def test_ptrack_clean(self):
4545
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
4646
# get ptrack for every idx
4747
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
48-
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
48+
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
4949
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
5050

5151
# Update everything, vacuum it and make PTRACK BACKUP
@@ -62,7 +62,7 @@ def test_ptrack_clean(self):
6262
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
6363
# # get ptrack for every idx
6464
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
65-
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
65+
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
6666
# check that ptrack bits are cleaned
6767
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
6868

@@ -81,7 +81,7 @@ def test_ptrack_clean(self):
8181
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
8282
# # get ptrack for every idx
8383
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
84-
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
84+
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
8585
# check that ptrack bits are cleaned
8686
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
8787

tests/ptrack_cluster.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def test_ptrack_cluster_btree(self):
6363
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
6464
# get ptrack for every idx
6565
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
66-
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
66+
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
6767

6868
# compare pages and check ptrack sanity
6969
self.check_ptrack_sanity(idx_ptrack[i])
@@ -120,7 +120,7 @@ def test_ptrack_cluster_spgist(self):
120120
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
121121
# get ptrack for every idx
122122
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
123-
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
123+
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
124124

125125
# compare pages and check ptrack sanity
126126
self.check_ptrack_sanity(idx_ptrack[i])
@@ -177,7 +177,7 @@ def test_ptrack_cluster_brin(self):
177177
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
178178
# get ptrack for every idx
179179
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
180-
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
180+
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
181181

182182
# compare pages and check ptrack sanity
183183
self.check_ptrack_sanity(idx_ptrack[i])
@@ -234,7 +234,7 @@ def test_ptrack_cluster_gist(self):
234234
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
235235
# get ptrack for every idx
236236
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
237-
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
237+
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
238238

239239
# compare pages and check ptrack sanity
240240
self.check_ptrack_sanity(idx_ptrack[i])
@@ -291,7 +291,7 @@ def test_ptrack_cluster_gin(self):
291291
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
292292
# get ptrack for every idx
293293
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
294-
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
294+
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
295295

296296
# compare pages and check ptrack sanity
297297
self.check_ptrack_sanity(idx_ptrack[i])

tests/ptrack_move_to_tablespace.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def test_ptrack_recovery(self):
5050
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
5151
# get ptrack for every idx
5252
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
53-
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
53+
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
5454
# check that ptrack has correct bits after recovery
5555
self.check_ptrack_recovery(idx_ptrack[i])
5656

tests/ptrack_recovery.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def test_ptrack_recovery(self):
5252
for i in idx_ptrack:
5353
# get ptrack for every idx
5454
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
55-
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
55+
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
5656
# check that ptrack has correct bits after recovery
5757
self.check_ptrack_recovery(idx_ptrack[i])
5858

tests/ptrack_vacuum.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def test_ptrack_vacuum(self):
5151
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
5252
for i in idx_ptrack:
5353
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
54-
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
54+
node, idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
5555
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
5656

5757
# Delete some rows, vacuum it and make checkpoint
@@ -69,7 +69,7 @@ def test_ptrack_vacuum(self):
6969
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
7070
# get ptrack for every idx
7171
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
72-
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
72+
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
7373

7474
# compare pages and check ptrack sanity
7575
self.check_ptrack_sanity(idx_ptrack[i])

tests/ptrack_vacuum_bits_frozen.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def test_ptrack_vacuum_bits_frozen(self):
6262
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
6363
# get ptrack for every idx
6464
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
65-
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
65+
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
6666

6767
# compare pages and check ptrack sanity
6868
self.check_ptrack_sanity(idx_ptrack[i])

tests/ptrack_vacuum_bits_visibility.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def test_ptrack_vacuum_bits_visibility(self):
6262
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
6363
# get ptrack for every idx
6464
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
65-
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
65+
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
6666

6767
# compare pages and check ptrack sanity
6868
self.check_ptrack_sanity(idx_ptrack[i])

tests/ptrack_vacuum_full.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def test_ptrack_vacuum_full(self):
6565
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
6666
# get ptrack for every idx
6767
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
68-
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
68+
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
6969

7070
# compare pages and check ptrack sanity, the most important part
7171
self.check_ptrack_sanity(idx_ptrack[i])

tests/ptrack_vacuum_truncate.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def test_ptrack_vacuum_truncate(self):
6464
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
6565
# get ptrack for every idx
6666
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
67-
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
67+
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
6868

6969
# compare pages and check ptrack sanity
7070
self.check_ptrack_sanity(idx_ptrack[i])

tests/replica.py

Lines changed: 65 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,16 +13,15 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
1313
def __init__(self, *args, **kwargs):
1414
super(ReplicaTest, self).__init__(*args, **kwargs)
1515

16-
@classmethod
17-
def tearDownClass(cls):
18-
stop_all()
16+
# @classmethod
17+
# def tearDownClass(cls):
18+
# stop_all()
1919

2020
# @unittest.skip("skip")
2121
# @unittest.expectedFailure
22-
def test_make_simple_replica(self):
22+
def test_replica_stream_full_backup(self):
2323
"""
24-
make node with archiving, make stream backup,
25-
get Recovery Time, try to make pitr to Recovery Time
24+
make full stream backup from replica
2625
"""
2726
fname = self.id().split('.')[3]
2827
master = self.make_simple_node(base_dir="tmp_dirs/replica/{0}/master".format(fname),
@@ -55,9 +54,69 @@ def test_make_simple_replica(self):
5554
slave.append_conf('recovery.conf',
5655
"primary_conninfo = 'user=gsmol port={0} sslmode=prefer sslcompression=1'".format(master.port))
5756
slave.start({"-t": "600"})
57+
# Replica Ready
5858

59+
# Check replica
5960
after = slave.execute("postgres", "SELECT * FROM t_heap")
6061
self.assertEqual(before, after)
6162

63+
# master.execute("postgres", "checkpoint")
64+
master.execute("postgres", "create table t1(a int)")
65+
66+
# Make backup from replica
6267
self.assertEqual(self.init_pb(slave), six.b(""))
6368
self.backup_pb(slave, backup_type='full', options=['--stream'])
69+
70+
@unittest.skip("skip")
71+
def test_replica_archive_full_backup(self):
72+
"""
73+
make full archive backup from replica
74+
"""
75+
fname = self.id().split('.')[3]
76+
master = self.make_simple_node(base_dir="tmp_dirs/replica/{0}/master".format(fname),
77+
set_archiving=True,
78+
set_replication=True,
79+
initdb_params=['--data-checksums'],
80+
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
81+
)
82+
master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
83+
master.start()
84+
85+
slave = self.make_simple_node(base_dir="tmp_dirs/replica/{0}/slave".format(fname))
86+
slave_port = slave.port
87+
slave.cleanup()
88+
89+
self.assertEqual(self.init_pb(master), six.b(""))
90+
self.backup_pb(master, backup_type='full', options=['--stream'])
91+
92+
master.psql(
93+
"postgres",
94+
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
95+
96+
before = master.execute("postgres", "SELECT * FROM t_heap")
97+
98+
id = self.backup_pb(master, backup_type='page', options=['--stream'])
99+
self.restore_pb(backup_dir=self.backup_dir(master), data_dir=slave.data_dir)
100+
101+
# Settings for Replica
102+
slave.append_conf('postgresql.auto.conf', 'port = {0}'.format(slave.port))
103+
slave.append_conf('postgresql.auto.conf', 'hot_standby = on')
104+
# Set Archiving for replica
105+
self.set_archiving_conf(slave, replica=True)
106+
107+
slave.append_conf('recovery.conf', "standby_mode = 'on'")
108+
slave.append_conf('recovery.conf',
109+
"primary_conninfo = 'user=gsmol port={0} sslmode=prefer sslcompression=1'".format(master.port))
110+
slave.start({"-t": "600"})
111+
# Replica Started
112+
113+
# master.execute("postgres", "checkpoint")
114+
115+
# Check replica
116+
after = slave.execute("postgres", "SELECT * FROM t_heap")
117+
self.assertEqual(before, after)
118+
119+
# Make backup from replica
120+
self.assertEqual(self.init_pb(slave), six.b(""))
121+
self.backup_pb(slave, backup_type='full', options=['--archive-timeout=30'])
122+
self.validate_pb(slave)

0 commit comments

Comments
 (0)