Skip to content

Commit 41d6940

Browse files
author
Brett Hazen
committed
Roll back regression test to work with new testing infrastructure
1 parent 957fc93 commit 41d6940

File tree

2 files changed

+22
-19
lines changed

2 files changed

+22
-19
lines changed

regression_test_wrapper.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@ if [ -z $1 ]; then
88
exit 1
99
fi
1010

11-
ALL_BACKEND_TEST_CASES="always_pass_test,verify_riak_stats,verify_down,verify_staged_clustering,verify_leave,partition_repair,verify_build_cluster,riak_control_authentication,always_fail_test,basic_command_line,jmx_verify,verify_aae,verify_claimant,verify_object_limits,ensemble_interleave,ensemble_byzantine,gh_riak_core_155,pb_security,verify_search,verify_handoff,verify_capabilities,replication/repl_fs_stat_caching,replication/replication2,verify_handoff_mixed,verify_bitcask_tombstone2_upgrade,replication_object_reformat"
11+
REPL_TEST_CASES="replication,replication_object_reformat,replication2,repl_fs_stat_caching"
12+
ALL_BACKEND_TEST_CASES="always_pass_test,verify_riak_stats,verify_down,verify_staged_clustering,verify_leave,partition_repair,verify_build_cluster,riak_control_authentication,always_fail_test,basic_command_line,jmx_verify,verify_aae,verify_claimant,verify_object_limits,ensemble_interleave,ensemble_byzantine,gh_riak_core_155,pb_security,verify_search,verify_handoff,verify_capabilities,verify_handoff_mixed,verify_bitcask_tombstone2_upgrade,${REPL_TEST_CASES}"
1213

1314
BITCASK_BACKEND_TEST_CASES="$ALL_BACKEND_TEST_CASES,loaded_upgrade"
1415
ELEVELDB_BACKEND_TEST_CASES="verify_2i_aae,loaded_upgrade"

tests/replication/replication.erl

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,10 @@ confirm() ->
1616
{diff_batch_size, 10}
1717
]}
1818
],
19-
rt_config:set_advanced_conf(all, Conf),
20-
[ANodes, BNodes] = rt_cluster:build_clusters([3, 3]),
19+
rt:set_advanced_conf(all, Conf),
20+
[ANodes, BNodes] = rt:build_clusters([3, 3]),
21+
rt:wait_for_cluster_service(ANodes, riak_repl),
22+
rt:wait_for_cluster_service(BNodes, riak_repl),
2123
replication(ANodes, BNodes, false),
2224
pass.
2325

@@ -106,7 +108,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) ->
106108
%% check that the keys we wrote initially aren't replicated yet, because
107109
%% we've disabled fullsync_on_connect
108110
lager:info("Check keys written before repl was connected are not present"),
109-
Res2 = rt_systest:read(BFirst, 1, 100, TestBucket, 2),
111+
Res2 = rt:systest_read(BFirst, 1, 100, TestBucket, 2),
110112
?assertEqual(100, length(Res2)),
111113

112114
start_and_wait_until_fullsync_complete(LeaderA),
@@ -143,7 +145,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) ->
143145

144146
rt:log_to_nodes(AllNodes, "Testing master failover: stopping ~p", [LeaderA]),
145147
lager:info("Testing master failover: stopping ~p", [LeaderA]),
146-
rt_node:stop(LeaderA),
148+
rt:stop(LeaderA),
147149
rt:wait_until_unpingable(LeaderA),
148150
wait_until_leader(ASecond),
149151

@@ -166,7 +168,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) ->
166168
LeaderB = rpc:call(BFirst, riak_repl_leader, leader_node, []),
167169

168170
lager:info("Testing client failover: stopping ~p", [LeaderB]),
169-
rt_node:stop(LeaderB),
171+
rt:stop(LeaderB),
170172
rt:wait_until_unpingable(LeaderB),
171173
BSecond = hd(BNodes -- [LeaderB]),
172174
wait_until_leader(BSecond),
@@ -197,7 +199,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) ->
197199
%%
198200

199201
lager:info("Restarting down node ~p", [LeaderA]),
200-
rt_node:start(LeaderA),
202+
rt:start(LeaderA),
201203
rt:wait_until_pingable(LeaderA),
202204
rt:wait_until_no_pending_changes(ANodes),
203205
wait_until_leader_converge(ANodes),
@@ -265,7 +267,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) ->
265267
end,
266268

267269
lager:info("Restarting down node ~p", [LeaderB]),
268-
rt_node:start(LeaderB),
270+
rt:start(LeaderB),
269271
rt:wait_until_pingable(LeaderB),
270272

271273
case nodes_all_have_version(ANodes, "1.1.0") of
@@ -306,19 +308,19 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) ->
306308
FullsyncOnly, 2)),
307309

308310
lager:info("Check the fullsync only bucket didn't replicate the writes"),
309-
Res6 = rt_systest:read(BSecond, 1, 100, FullsyncOnly, 2),
311+
Res6 = rt:systest_read(BSecond, 1, 100, FullsyncOnly, 2),
310312
?assertEqual(100, length(Res6)),
311313

312314
lager:info("Check the realtime only bucket that was written to offline "
313315
"isn't replicated"),
314-
Res7 = rt_systest:read(BSecond, 1, 100, RealtimeOnly, 2),
316+
Res7 = rt:systest_read(BSecond, 1, 100, RealtimeOnly, 2),
315317
?assertEqual(100, length(Res7));
316318
_ ->
317319
timer:sleep(1000)
318320
end,
319321

320322
lager:info("Check the {repl, false} bucket didn't replicate"),
321-
Res8 = rt_systest:read(BSecond, 1, 100, NoRepl, 2),
323+
Res8 = rt:systest_read(BSecond, 1, 100, NoRepl, 2),
322324
?assertEqual(100, length(Res8)),
323325

324326
%% do a fullsync, make sure that fullsync_only is replicated, but
@@ -332,7 +334,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) ->
332334
FullsyncOnly, 2)),
333335

334336
lager:info("Check realtime only bucket didn't replicate"),
335-
Res10 = rt_systest:read(BSecond, 1, 100, RealtimeOnly, 2),
337+
Res10 = rt:systest_read(BSecond, 1, 100, RealtimeOnly, 2),
336338
?assertEqual(100, length(Res10)),
337339

338340

@@ -347,14 +349,14 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) ->
347349
RealtimeOnly, 2)),
348350

349351
lager:info("Check the older keys in the realtime bucket did not replicate"),
350-
Res12 = rt_systest:read(BSecond, 1, 100, RealtimeOnly, 2),
352+
Res12 = rt:systest_read(BSecond, 1, 100, RealtimeOnly, 2),
351353
?assertEqual(100, length(Res12));
352354
_ ->
353355
ok
354356
end,
355357

356358
lager:info("Check {repl, false} bucket didn't replicate"),
357-
Res13 = rt_systest:read(BSecond, 1, 100, NoRepl, 2),
359+
Res13 = rt:systest_read(BSecond, 1, 100, NoRepl, 2),
358360
?assertEqual(100, length(Res13));
359361
_ ->
360362
ok
@@ -517,7 +519,7 @@ start_and_wait_until_fullsync_complete(Node, Retries) ->
517519
lager:info("waiting for fullsync count to be ~p", [Count]),
518520

519521
lager:info("Starting fullsync on ~p (~p)", [Node,
520-
rt:node_version(rt:node_id(Node))]),
522+
rtdev:node_version(rtdev:node_id(Node))]),
521523
rpc:call(Node, riak_repl_console, start_fullsync, [[]]),
522524

523525
%% sleep because of the old bug where stats will crash if you call it too
@@ -685,20 +687,20 @@ wait_until_no_connection(Node) ->
685687
wait_for_reads(Node, Start, End, Bucket, R) ->
686688
rt:wait_until(Node,
687689
fun(_) ->
688-
rt_systest:read(Node, Start, End, Bucket, R) == []
690+
rt:systest_read(Node, Start, End, Bucket, R) == []
689691
end),
690-
Reads = rt_systest:read(Node, Start, End, Bucket, R),
692+
Reads = rt:systest_read(Node, Start, End, Bucket, R),
691693
lager:info("Reads: ~p", [Reads]),
692694
length(Reads).
693695

694696
do_write(Node, Start, End, Bucket, W) ->
695-
case rt_systest:write(Node, Start, End, Bucket, W) of
697+
case rt:systest_write(Node, Start, End, Bucket, W) of
696698
[] ->
697699
[];
698700
Errors ->
699701
lager:warning("~p errors while writing: ~p",
700702
[length(Errors), Errors]),
701703
timer:sleep(1000),
702-
lists:flatten([rt_systest:write(Node, S, S, Bucket, W) ||
704+
lists:flatten([rt:systest_write(Node, S, S, Bucket, W) ||
703705
{S, _Error} <- Errors])
704706
end.

0 commit comments

Comments
 (0)