@@ -72,12 +72,10 @@ maybe_run_cluster_dependent_tests() ->
7272run_cluster_dependent_tests (SecondaryNode ) ->
7373 SecondaryNodeS = atom_to_list (SecondaryNode ),
7474
75- cover :stop (SecondaryNode ),
7675 ok = control_action (stop_app , []),
77- ok = control_action ( reset , [] ),
76+ ok = safe_reset ( ),
7877 ok = control_action (cluster , [SecondaryNodeS ]),
7978 ok = control_action (start_app , []),
80- cover :start (SecondaryNode ),
8179 ok = control_action (start_app , SecondaryNode , [], []),
8280
8381 io :format (" Running cluster dependent tests with node ~p~n " , [SecondaryNode ]),
@@ -908,7 +906,7 @@ test_cluster_management2(SecondaryNode) ->
908906 ok = assert_ram_node (),
909907
910908 % % join cluster as a ram node
911- ok = control_action ( reset , [] ),
909+ ok = safe_reset ( ),
912910 ok = control_action (force_cluster , [SecondaryNodeS , " invalid1@invalid" ]),
913911 ok = control_action (start_app , []),
914912 ok = control_action (stop_app , []),
@@ -965,29 +963,30 @@ test_cluster_management2(SecondaryNode) ->
965963 ok = assert_disc_node (),
966964
967965 % % turn a disk node into a ram node
968- ok = control_action (reset , []),
966+ % %
967+ % % can't use safe_reset here since for some reason nodes()==[] and
968+ % % yet w/o stopping coverage things break
969+ with_suspended_cover (
970+ [SecondaryNode ], fun () -> ok = control_action (reset , []) end ),
969971 ok = control_action (cluster , [SecondaryNodeS ]),
970972 ok = control_action (start_app , []),
971973 ok = control_action (stop_app , []),
972974 ok = assert_ram_node (),
973975
974976 % % NB: this will log an inconsistent_database error, which is harmless
975- % % Turning cover on / off is OK even if we're not in general using cover,
976- % % it just turns the engine on / off, doesn't actually log anything.
977- cover :stop ([SecondaryNode ]),
978- true = disconnect_node (SecondaryNode ),
979- pong = net_adm :ping (SecondaryNode ),
980- cover :start ([SecondaryNode ]),
977+ with_suspended_cover (
978+ [SecondaryNode ], fun () ->
979+ true = disconnect_node (SecondaryNode ),
980+ pong = net_adm :ping (SecondaryNode )
981+ end ),
981982
982983 % % leaving a cluster as a ram node
983- ok = control_action ( reset , [] ),
984+ ok = safe_reset ( ),
984985 % % ...and as a disk node
985986 ok = control_action (cluster , [SecondaryNodeS , NodeS ]),
986987 ok = control_action (start_app , []),
987988 ok = control_action (stop_app , []),
988- cover :stop (SecondaryNode ),
989- ok = control_action (reset , []),
990- cover :start (SecondaryNode ),
989+ ok = safe_reset (),
991990
992991 % % attempt to leave cluster when no other node is alive
993992 ok = control_action (cluster , [SecondaryNodeS , NodeS ]),
@@ -1002,22 +1001,39 @@ test_cluster_management2(SecondaryNode) ->
10021001 control_action (cluster , [SecondaryNodeS ]),
10031002
10041003 % % leave system clustered, with the secondary node as a ram node
1005- ok = control_action (force_reset , []),
1004+ with_suspended_cover (
1005+ [SecondaryNode ], fun () -> ok = control_action (force_reset , []) end ),
10061006 ok = control_action (start_app , []),
10071007 % % Yes, this is rather ugly. But since we're a clustered Mnesia
10081008 % % node and we're telling another clustered node to reset itself,
10091009 % % we will get disconnected half way through causing a
10101010 % % badrpc. This never happens in real life since rabbitmqctl is
1011- % % not a clustered Mnesia node.
1012- cover :stop (SecondaryNode ),
1013- {badrpc , nodedown } = control_action (force_reset , SecondaryNode , [], []),
1014- pong = net_adm :ping (SecondaryNode ),
1015- cover :start (SecondaryNode ),
1011+ % % not a clustered Mnesia node and is a hidden node.
1012+ with_suspended_cover (
1013+ [SecondaryNode ],
1014+ fun () ->
1015+ {badrpc , nodedown } =
1016+ control_action (force_reset , SecondaryNode , [], []),
1017+ pong = net_adm :ping (SecondaryNode )
1018+ end ),
10161019 ok = control_action (cluster , SecondaryNode , [NodeS ], []),
10171020 ok = control_action (start_app , SecondaryNode , [], []),
10181021
10191022 passed .
10201023
1024+ % % 'cover' does not cope at all well with nodes disconnecting, which
1025+ % % happens as part of reset. So we turn it off temporarily. That is ok
1026+ % % even if we're not in general using cover, it just turns the engine
1027+ % % on / off and doesn't log anything.
1028+ safe_reset () -> with_suspended_cover (
1029+ nodes (), fun () -> control_action (reset , []) end ).
1030+
1031+ with_suspended_cover (Nodes , Fun ) ->
1032+ cover :stop (Nodes ),
1033+ Res = Fun (),
1034+ cover :start (Nodes ),
1035+ Res .
1036+
10211037test_user_management () ->
10221038
10231039 % % lots if stuff that should fail
@@ -2388,10 +2404,10 @@ test_dropwhile(VQ0) ->
23882404 fun (N , Props ) -> Props # message_properties {expiry = N } end , VQ0 ),
23892405
23902406 % % drop the first 5 messages
2391- {undefined , VQ2 } = rabbit_variable_queue :dropwhile (
2392- fun (# message_properties { expiry = Expiry }) ->
2393- Expiry =< 5
2394- end , false , VQ1 ),
2407+ {_ , undefined , VQ2 } = rabbit_variable_queue :dropwhile (
2408+ fun (# message_properties { expiry = Expiry }) ->
2409+ Expiry =< 5
2410+ end , false , VQ1 ),
23952411
23962412 % % fetch five now
23972413 VQ3 = lists :foldl (fun (_N , VQN ) ->
@@ -2408,11 +2424,11 @@ test_dropwhile(VQ0) ->
24082424test_dropwhile_varying_ram_duration (VQ0 ) ->
24092425 VQ1 = variable_queue_publish (false , 1 , VQ0 ),
24102426 VQ2 = rabbit_variable_queue :set_ram_duration_target (0 , VQ1 ),
2411- {undefined , VQ3 } = rabbit_variable_queue :dropwhile (
2412- fun (_ ) -> false end , false , VQ2 ),
2427+ {_ , undefined , VQ3 } = rabbit_variable_queue :dropwhile (
2428+ fun (_ ) -> false end , false , VQ2 ),
24132429 VQ4 = rabbit_variable_queue :set_ram_duration_target (infinity , VQ3 ),
24142430 VQ5 = variable_queue_publish (false , 1 , VQ4 ),
2415- {undefined , VQ6 } =
2431+ {_ , undefined , VQ6 } =
24162432 rabbit_variable_queue :dropwhile (fun (_ ) -> false end , false , VQ5 ),
24172433 VQ6 .
24182434
0 commit comments