@@ -2487,5 +2487,124 @@ def test_merge_correct_inheritance_1(self):
24872487
24882488 self .del_test_dir (module_name , fname )
24892489
2490+ # @unittest.skip("skip")
2491+ # @unittest.expectedFailure
2492+ def test_multi_timeline_merge (self ):
2493+ """
2494+ Check that backup in PAGE mode choose
2495+ parent backup correctly:
2496+ t12 /---P-->
2497+ ...
2498+ t3 /---->
2499+ t2 /---->
2500+ t1 -F-----D->
2501+
2502+ P must have F as parent
2503+ """
2504+ fname = self .id ().split ('.' )[3 ]
2505+ backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
2506+ node = self .make_simple_node (
2507+ base_dir = os .path .join (module_name , fname , 'node' ),
2508+ set_replication = True ,
2509+ initdb_params = ['--data-checksums' ],
2510+ pg_options = {'autovacuum' : 'off' })
2511+
2512+ self .init_pb (backup_dir )
2513+ self .add_instance (backup_dir , 'node' , node )
2514+ self .set_archiving (backup_dir , 'node' , node )
2515+ node .slow_start ()
2516+
2517+ node .safe_psql ("postgres" , "create extension pageinspect" )
2518+
2519+ try :
2520+ node .safe_psql (
2521+ "postgres" ,
2522+ "create extension amcheck" )
2523+ except QueryException as e :
2524+ node .safe_psql (
2525+ "postgres" ,
2526+ "create extension amcheck_next" )
2527+
2528+ node .pgbench_init (scale = 20 )
2529+ full_id = self .backup_node (backup_dir , 'node' , node )
2530+
2531+ pgbench = node .pgbench (options = ['-T' , '10' , '-c' , '1' , '--no-vacuum' ])
2532+ pgbench .wait ()
2533+
2534+ self .backup_node (backup_dir , 'node' , node , backup_type = 'delta' )
2535+
2536+ node .cleanup ()
2537+ self .restore_node (
2538+ backup_dir , 'node' , node , backup_id = full_id ,
2539+ options = [
2540+ '--recovery-target=immediate' ,
2541+ '--recovery-target-action=promote' ])
2542+
2543+ node .slow_start ()
2544+
2545+ pgbench = node .pgbench (options = ['-T' , '10' , '-c' , '1' , '--no-vacuum' ])
2546+ pgbench .wait ()
2547+
2548+ # create timelines
2549+ for i in range (2 , 7 ):
2550+ node .cleanup ()
2551+ self .restore_node (
2552+ backup_dir , 'node' , node ,
2553+ options = [
2554+ '--recovery-target=latest' ,
2555+ '--recovery-target-action=promote' ,
2556+ '--recovery-target-timeline={0}' .format (i )])
2557+ node .slow_start ()
2558+
2559+ # at this point there is i+1 timeline
2560+ pgbench = node .pgbench (options = ['-T' , '20' , '-c' , '1' , '--no-vacuum' ])
2561+ pgbench .wait ()
2562+
2563+ # create backup at 2, 4 and 6 timeline
2564+ if i % 2 == 0 :
2565+ self .backup_node (backup_dir , 'node' , node , backup_type = 'page' )
2566+
2567+ page_id = self .backup_node (backup_dir , 'node' , node , backup_type = 'page' )
2568+ pgdata = self .pgdata_content (node .data_dir )
2569+
2570+ self .merge_backup (backup_dir , 'node' , page_id )
2571+
2572+ result = node .safe_psql (
2573+ "postgres" , "select * from pgbench_accounts" )
2574+
2575+ node_restored = self .make_simple_node (
2576+ base_dir = os .path .join (module_name , fname , 'node_restored' ))
2577+ node_restored .cleanup ()
2578+
2579+ self .restore_node (backup_dir , 'node' , node_restored )
2580+ pgdata_restored = self .pgdata_content (node_restored .data_dir )
2581+
2582+ self .set_auto_conf (node_restored , {'port' : node_restored .port })
2583+ node_restored .slow_start ()
2584+
2585+ result_new = node_restored .safe_psql (
2586+ "postgres" , "select * from pgbench_accounts" )
2587+
2588+ self .assertEqual (result , result_new )
2589+
2590+ self .compare_pgdata (pgdata , pgdata_restored )
2591+
2592+ self .checkdb_node (
2593+ backup_dir ,
2594+ 'node' ,
2595+ options = [
2596+ '--amcheck' ,
2597+ '-d' , 'postgres' , '-p' , str (node .port )])
2598+
2599+ self .checkdb_node (
2600+ backup_dir ,
2601+ 'node' ,
2602+ options = [
2603+ '--amcheck' ,
2604+ '-d' , 'postgres' , '-p' , str (node_restored .port )])
2605+
2606+ # Clean after yourself
2607+ self .del_test_dir (module_name , fname )
2608+
24902609# 1. Need new test with corrupted FULL backup
24912610# 2. different compression levels
0 commit comments