22import unittest
33from .helpers .ptrack_helpers import ProbackupTest , ProbackupException
44from datetime import datetime , timedelta
5- import subprocess , time
5+ import subprocess
6+ import time
67
78
89module_name = 'page'
910
1011
1112class PageBackupTest (ProbackupTest , unittest .TestCase ):
1213
13- # # @unittest.skip("skip")
14- # # @unittest.expectedFailure
15- # def test_page_check_archive_enabled(self):
16- # """make node, take page backup without enabled archive, should result in error"""
17- # self.maxDiff = None
18- # fname = self.id().split('.')[3]
19- # backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
20- # node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
21- # set_replication=True,
22- # initdb_params=['--data-checksums'],
23- # pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s', 'ptrack_enable': 'on'}
24- # )
25- #
26- # self.init_pb(backup_dir)
27- # self.add_instance(backup_dir, 'node', node)
28- # node.start()
29- #
30- # try:
31- # self.backup_node(backup_dir, 'node', node, backup_type='page', options=['--stream'])
32- # # we should die here because exception is what we expect to happen
33- # self.assertEqual(1, 0, "Expecting Error because archive_mode disabled.\n Output: {0} \n CMD: {1}".format(
34- # repr(self.output), self.cmd))
35- # except ProbackupException as e:
36- # self.assertIn('ERROR: Archiving must be enabled for PAGE backup\n', e.message,
37- # '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
38- #
39- # # Clean after yourself
40- # self.del_test_dir(module_name, fname)
14+ # @unittest.skip("skip")
15+ def test_page_vacuum_truncate (self ):
16+ """make node, create table, take full backup,
17+ delete last 3 pages, vacuum relation,
18+ take page backup, take second page backup,
19+ restore last page backup and check data correctness"""
20+ fname = self .id ().split ('.' )[3 ]
21+ backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
22+ node = self .make_simple_node (
23+ base_dir = "{0}/{1}/node" .format (module_name , fname ),
24+ set_replication = True ,
25+ initdb_params = ['--data-checksums' ],
26+ pg_options = {
27+ 'wal_level' : 'replica' ,
28+ 'max_wal_senders' : '2' ,
29+ 'checkpoint_timeout' : '300s'
30+ }
31+ )
32+ node_restored = self .make_simple_node (
33+ base_dir = "{0}/{1}/node_restored" .format (module_name , fname ),
34+ )
35+
36+ self .init_pb (backup_dir )
37+ self .add_instance (backup_dir , 'node' , node )
38+ self .set_archiving (backup_dir , 'node' , node )
39+ node_restored .cleanup ()
40+ node .start ()
41+ self .create_tblspace_in_node (node , 'somedata' )
42+
43+ node .safe_psql (
44+ "postgres" ,
45+ "create sequence t_seq; "
46+ "create table t_heap tablespace somedata as select i as id, "
47+ "md5(i::text) as text, "
48+ "md5(repeat(i::text,10))::tsvector as tsvector "
49+ "from generate_series(0,1024) i;"
50+ )
51+ node .safe_psql (
52+ "postgres" ,
53+ "vacuum t_heap"
54+ )
55+
56+ self .backup_node (backup_dir , 'node' , node )
57+
58+ node .safe_psql (
59+ "postgres" ,
60+ "delete from t_heap where ctid >= '(11,0)'"
61+ )
62+ node .safe_psql (
63+ "postgres" ,
64+ "vacuum t_heap"
65+ )
66+
67+ self .backup_node (
68+ backup_dir , 'node' , node , backup_type = 'page' ,
69+ options = ['--log-level-file=verbose' ]
70+ )
71+
72+ self .backup_node (
73+ backup_dir , 'node' , node , backup_type = 'page' ,
74+ options = ['--log-level-file=verbose' ]
75+ )
76+
77+ if self .paranoia :
78+ pgdata = self .pgdata_content (node .data_dir )
79+
80+ old_tablespace = self .get_tblspace_path (node , 'somedata' )
81+ new_tablespace = self .get_tblspace_path (node_restored , 'somedata_new' )
82+
83+ self .restore_node (
84+ backup_dir , 'node' , node_restored ,
85+ options = ["-j" , "4" , "-T" , "{0}={1}" .format (
86+ old_tablespace , new_tablespace )]
87+ )
88+
89+ # Physical comparison
90+ if self .paranoia :
91+ pgdata_restored = self .pgdata_content (node_restored .data_dir )
92+ self .compare_pgdata (pgdata , pgdata_restored )
93+
94+ node_restored .append_conf (
95+ "postgresql.auto.conf" , "port = {0}" .format (node_restored .port ))
96+ node_restored .start ()
97+
98+ # Clean after yourself
99+ # self.del_test_dir(module_name, fname)
41100
42101 # @unittest.skip("skip")
43102 def test_page_stream (self ):
44103 """make archive node, take full and page stream backups, restore them and check data correctness"""
45104 self .maxDiff = None
46105 fname = self .id ().split ('.' )[3 ]
47106 backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
48- node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (module_name , fname ),
107+ node = self .make_simple_node (
108+ base_dir = "{0}/{1}/node" .format (module_name , fname ),
49109 set_replication = True ,
50110 initdb_params = ['--data-checksums' ],
51- pg_options = {'wal_level' : 'replica' , 'max_wal_senders' : '2' , 'checkpoint_timeout' : '30s' , 'ptrack_enable' : 'on' }
111+ pg_options = {
112+ 'wal_level' : 'replica' ,
113+ 'max_wal_senders' : '2' ,
114+ 'checkpoint_timeout' : '30s' ,
115+ 'ptrack_enable' : 'on' }
52116 )
53117
54118 self .init_pb (backup_dir )
@@ -59,33 +123,50 @@ def test_page_stream(self):
59123 # FULL BACKUP
60124 node .safe_psql (
61125 "postgres" ,
62- "create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i" )
126+ "create table t_heap as select i as id, md5(i::text) as text, "
127+ "md5(i::text)::tsvector as tsvector "
128+ "from generate_series(0,100) i" )
129+
63130 full_result = node .execute ("postgres" , "SELECT * FROM t_heap" )
64- full_backup_id = self .backup_node (backup_dir , 'node' , node , backup_type = 'full' , options = ['--stream' ])
131+ full_backup_id = self .backup_node (
132+ backup_dir , 'node' , node ,
133+ backup_type = 'full' , options = ['--stream' ])
65134
66- #PAGE BACKUP
135+ # PAGE BACKUP
67136 node .safe_psql (
68137 "postgres" ,
69- "insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(100,200) i" )
138+ "insert into t_heap select i as id, md5(i::text) as text, "
139+ "md5(i::text)::tsvector as tsvector "
140+ "from generate_series(100,200) i" )
70141 page_result = node .execute ("postgres" , "SELECT * FROM t_heap" )
71- page_backup_id = self .backup_node (backup_dir , 'node' , node , backup_type = 'page' , options = ['--stream' ])
142+ page_backup_id = self .backup_node (
143+ backup_dir , 'node' , node ,
144+ backup_type = 'page' , options = ['--stream' ])
72145
73146 # Drop Node
74147 node .cleanup ()
75148
76149 # Check full backup
77- self .assertIn ("INFO: Restore of backup {0} completed." .format (full_backup_id ),
78- self .restore_node (backup_dir , 'node' , node , backup_id = full_backup_id , options = ["-j" , "4" ]),
79- '\n Unexpected Error Message: {0}\n CMD: {1}' .format (repr (self .output ), self .cmd ))
150+ self .assertIn (
151+ "INFO: Restore of backup {0} completed." .format (full_backup_id ),
152+ self .restore_node (
153+ backup_dir , 'node' , node ,
154+ backup_id = full_backup_id , options = ["-j" , "4" ]),
155+ '\n Unexpected Error Message: {0}\n '
156+ ' CMD: {1}' .format (repr (self .output ), self .cmd ))
80157 node .start ()
81158 full_result_new = node .execute ("postgres" , "SELECT * FROM t_heap" )
82159 self .assertEqual (full_result , full_result_new )
83160 node .cleanup ()
84161
85162 # Check page backup
86- self .assertIn ("INFO: Restore of backup {0} completed." .format (page_backup_id ),
87- self .restore_node (backup_dir , 'node' , node , backup_id = page_backup_id , options = ["-j" , "4" ]),
88- '\n Unexpected Error Message: {0}\n CMD: {1}' .format (repr (self .output ), self .cmd ))
163+ self .assertIn (
164+ "INFO: Restore of backup {0} completed." .format (page_backup_id ),
165+ self .restore_node (
166+ backup_dir , 'node' , node ,
167+ backup_id = page_backup_id , options = ["-j" , "4" ]),
168+ '\n Unexpected Error Message: {0}\n '
169+ ' CMD: {1}' .format (repr (self .output ), self .cmd ))
89170 node .start ()
90171 page_result_new = node .execute ("postgres" , "SELECT * FROM t_heap" )
91172 self .assertEqual (page_result , page_result_new )
0 commit comments