@@ -46,6 +46,7 @@ def test_pgpro434_1(self):
46
46
self .add_instance (backup_dir , 'node' , node )
47
47
48
48
# Make backup
49
+ sleep (5 )
49
50
self .backup_node (backup_dir , 'node' , node )
50
51
node .cleanup ()
51
52
@@ -59,8 +60,9 @@ def test_pgpro434_1(self):
59
60
self .del_test_dir (module_name , fname )
60
61
61
62
# @unittest.skip("skip")
63
+ @unittest .expectedFailure
62
64
def test_pgpro434_2 (self ):
63
- """Check that timelines are correct"""
65
+ """Check that timelines are correct. WAITING PGPRO-1053 for --immediate. replace time """
64
66
fname = self .id ().split ('.' )[3 ]
65
67
backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
66
68
node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (module_name , fname ),
@@ -81,15 +83,19 @@ def test_pgpro434_2(self):
81
83
recovery_time = self .show_pb (backup_dir , 'node' , backup_id )["recovery-time" ]
82
84
node .safe_psql (
83
85
"postgres" ,
84
- "insert into t_heap select 100501 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256 ) i" )
86
+ "insert into t_heap select 100501 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,1 ) i" )
85
87
86
88
# SECOND TIMELIN
87
89
node .cleanup ()
88
90
self .restore_node (backup_dir , 'node' , node , options = ["--time={0}" .format (recovery_time )])
89
91
node .start ()
92
+ while node .safe_psql ("postgres" , "select pg_is_in_recovery()" ) == 't\n ' :
93
+ sleep (1 )
90
94
if self .verbose :
91
95
print ('Second timeline' )
92
96
print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
97
+ self .assertFalse (node .execute ("postgres" ,"select exists(select 1 from t_heap where id = 100501)" )[0 ][0 ],
98
+ 'data after restore not equal to original data' )
93
99
node .safe_psql (
94
100
"postgres" ,
95
101
"insert into t_heap select 2 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(100,200) i" )
@@ -103,6 +109,8 @@ def test_pgpro434_2(self):
103
109
node .cleanup ()
104
110
self .restore_node (backup_dir , 'node' , node , options = ["--time={0}" .format (recovery_time )])
105
111
node .start ()
112
+ while node .safe_psql ("postgres" , "select pg_is_in_recovery()" ) == 't\n ' :
113
+ sleep (1 )
106
114
if self .verbose :
107
115
print ('third timeline' )
108
116
print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
@@ -120,6 +128,8 @@ def test_pgpro434_2(self):
120
128
node .cleanup ()
121
129
self .restore_node (backup_dir , 'node' , node , options = ["--time={0}" .format (recovery_time )])
122
130
node .start ()
131
+ while node .safe_psql ("postgres" , "select pg_is_in_recovery()" ) == 't\n ' :
132
+ sleep (1 )
123
133
if self .verbose :
124
134
print ('Fourth timeline' )
125
135
print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
@@ -128,6 +138,8 @@ def test_pgpro434_2(self):
128
138
node .cleanup ()
129
139
self .restore_node (backup_dir , 'node' , node , options = ["--time={0}" .format (recovery_time )])
130
140
node .start ()
141
+ while node .safe_psql ("postgres" , "select pg_is_in_recovery()" ) == 't\n ' :
142
+ sleep (1 )
131
143
if self .verbose :
132
144
print ('Fifth timeline' )
133
145
print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
@@ -136,6 +148,8 @@ def test_pgpro434_2(self):
136
148
node .cleanup ()
137
149
self .restore_node (backup_dir , 'node' , node , options = ["--time={0}" .format (recovery_time )])
138
150
node .start ()
151
+ while node .safe_psql ("postgres" , "select pg_is_in_recovery()" ) == 't\n ' :
152
+ sleep (1 )
139
153
if self .verbose :
140
154
print ('Sixth timeline' )
141
155
print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
@@ -147,7 +161,7 @@ def test_pgpro434_2(self):
147
161
'data after restore not equal to original data' )
148
162
149
163
# Clean after yourself
150
- # self.del_test_dir(module_name, fname)
164
+ self .del_test_dir (module_name , fname )
151
165
152
166
# @unittest.skip("skip")
153
167
def test_pgpro434_3 (self ):
@@ -219,7 +233,7 @@ def test_arhive_push_file_exists(self):
219
233
220
234
os .remove (file )
221
235
sleep (5 )
222
- node .safe_psql ('postgres' , 'select pg_switch_xlog ()' )
236
+ node .safe_psql ('postgres' , 'select pg_switch_wal ()' )
223
237
224
238
with open (log_file , 'r' ) as f :
225
239
log_content = f .read ()
@@ -229,9 +243,10 @@ def test_arhive_push_file_exists(self):
229
243
# Clean after yourself
230
244
self .del_test_dir (module_name , fname )
231
245
232
- #@unittest.expectedFailure
246
+ # @unittest.expectedFailure
247
+ # @unittest.skip("skip")
233
248
def test_replica_archive (self ):
234
- """make node withput archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica"""
249
+ """make node without archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica"""
235
250
fname = self .id ().split ('.' )[3 ]
236
251
backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
237
252
master = self .make_simple_node (base_dir = "{0}/{1}/master" .format (module_name , fname ),
@@ -240,6 +255,7 @@ def test_replica_archive(self):
240
255
pg_options = {'wal_level' : 'replica' , 'max_wal_senders' : '2' , 'checkpoint_timeout' : '30s' }
241
256
)
242
257
self .init_pb (backup_dir )
258
+ # ADD INSTANCE 'MASTER'
243
259
self .add_instance (backup_dir , 'master' , master )
244
260
# force more frequent wal switch
245
261
master .start ()
@@ -258,7 +274,7 @@ def test_replica_archive(self):
258
274
self .restore_node (backup_dir , 'master' , replica )
259
275
self .set_replica (master , replica , synchronous = True )
260
276
self .set_archiving (backup_dir , 'replica' , replica , replica = True )
261
- replica .start ({ "-t" : "600" } )
277
+ replica .start ()
262
278
263
279
# Check data correctness on replica
264
280
after = replica .safe_psql ("postgres" , "SELECT * FROM t_heap" )
@@ -269,6 +285,7 @@ def test_replica_archive(self):
269
285
"postgres" ,
270
286
"insert into t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(256,512) i" )
271
287
before = master .safe_psql ("postgres" , "SELECT * FROM t_heap" )
288
+ # ADD INSTANCE 'REPLICA'
272
289
self .add_instance (backup_dir , 'replica' , replica )
273
290
backup_id = self .backup_node (backup_dir , 'replica' , replica , options = ['--archive-timeout=30' ,
274
291
'--master-host=localhost' , '--master-db=postgres' ,'--master-port={0}' .format (master .port )])
@@ -306,3 +323,62 @@ def test_replica_archive(self):
306
323
307
324
# Clean after yourself
308
325
self .del_test_dir (module_name , fname )
326
+
327
+ # @unittest.expectedFailure
328
+ # @unittest.skip("skip")
329
+ def test_master_and_replica_concurrent_archiving (self ):
330
+ """make node 'master 'with archiving, take archive backup and turn it into replica, set replica with archiving, make archive backup from replica, make archive backup from master"""
331
+ fname = self .id ().split ('.' )[3 ]
332
+ backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
333
+ master = self .make_simple_node (base_dir = "{0}/{1}/master" .format (module_name , fname ),
334
+ set_replication = True ,
335
+ initdb_params = ['--data-checksums' ],
336
+ pg_options = {'wal_level' : 'replica' , 'max_wal_senders' : '2' , 'checkpoint_timeout' : '30s' }
337
+ )
338
+ replica = self .make_simple_node (base_dir = "{0}/{1}/replica" .format (module_name , fname ))
339
+ replica .cleanup ()
340
+
341
+ self .init_pb (backup_dir )
342
+ # ADD INSTANCE 'MASTER'
343
+ self .add_instance (backup_dir , 'master' , master )
344
+ self .set_archiving (backup_dir , 'master' , master )
345
+ master .start ()
346
+
347
+ master .psql (
348
+ "postgres" ,
349
+ "create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i" )
350
+
351
+ # TAKE FULL ARCHIVE BACKUP FROM MASTER
352
+ self .backup_node (backup_dir , 'master' , master )
353
+ # GET LOGICAL CONTENT FROM MASTER
354
+ before = master .safe_psql ("postgres" , "SELECT * FROM t_heap" )
355
+ # GET PHYSICAL CONTENT FROM MASTER
356
+ pgdata_master = self .pgdata_content (master .data_dir )
357
+
358
+ # Settings for Replica
359
+ self .restore_node (backup_dir , 'master' , replica )
360
+ # CHECK PHYSICAL CORRECTNESS on REPLICA
361
+ pgdata_replica = self .pgdata_content (replica .data_dir )
362
+ self .compare_pgdata (pgdata_master , pgdata_replica )
363
+
364
+ self .set_replica (master , replica , synchronous = True )
365
+ # ADD INSTANCE REPLICA
366
+ self .add_instance (backup_dir , 'replica' , replica )
367
+ # SET ARCHIVING FOR REPLICA
368
+ self .set_archiving (backup_dir , 'replica' , replica , replica = True )
369
+ replica .start ()
370
+
371
+ # CHECK LOGICAL CORRECTNESS on REPLICA
372
+ after = replica .safe_psql ("postgres" , "SELECT * FROM t_heap" )
373
+ self .assertEqual (before , after )
374
+
375
+ # TAKE FULL ARCHIVE BACKUP FROM REPLICA
376
+ backup_id = self .backup_node (backup_dir , 'replica' , replica , options = ['--archive-timeout=30' ,
377
+ '--master-host=localhost' , '--master-db=postgres' ,'--master-port={0}' .format (master .port )])
378
+ self .validate_pb (backup_dir , 'replica' )
379
+ self .assertEqual ('OK' , self .show_pb (backup_dir , 'replica' , backup_id )['status' ])
380
+
381
+ # TAKE FULL ARCHIVE BACKUP FROM MASTER
382
+ backup_id = self .backup_node (backup_dir , 'master' , master )
383
+ self .validate_pb (backup_dir , 'master' )
384
+ self .assertEqual ('OK' , self .show_pb (backup_dir , 'master' , backup_id )['status' ])
0 commit comments