|
| 1 | +-module(verify_listkeys_eqcfsm). |
| 2 | +-compile(export_all). |
| 3 | + |
| 4 | +-include_lib("eqc/include/eqc.hrl"). |
| 5 | +-include_lib("eqc/include/eqc_fsm.hrl"). |
| 6 | +-include_lib("eunit/include/eunit.hrl"). |
| 7 | + |
| 8 | +-behaviour(riak_test). |
| 9 | +-export([confirm/0]). |
| 10 | + |
| 11 | +-define(NUM_TESTS, 5). |
| 12 | +-define(PREFIX, {x, x}). |
| 13 | +-define(DEVS(N), lists:concat(["dev", N, "@127.0.0.1"])). |
| 14 | +-define(DEV(N), list_to_atom(?DEVS(N))). |
| 15 | + |
| 16 | +-record(state, { |
| 17 | + bucket_type = undefined, |
| 18 | + bucket = undefined, |
| 19 | + nodes_up = [], |
| 20 | + nodes_down = [], |
| 21 | + cluster_nodes = [], |
| 22 | + num_keys = 0, |
| 23 | + key_filter = undefined |
| 24 | + }). |
| 25 | + |
| 26 | +%% ==================================================================== |
| 27 | +%% riak_test callback |
| 28 | +%% ==================================================================== |
| 29 | +confirm() -> |
| 30 | + ?assert(eqc:quickcheck(eqc:numtests(?NUM_TESTS, ?MODULE:prop_test()))), |
| 31 | + pass. |
| 32 | +%% ==================================================================== |
| 33 | +%% EQC generators |
| 34 | +%% ==================================================================== |
| 35 | +g_num_nodes() -> |
| 36 | + oneof([2, 3, 4, 5]). |
| 37 | + |
| 38 | +g_num_keys() -> |
| 39 | + choose(10, 1000). |
| 40 | + |
| 41 | +g_uuid() -> |
| 42 | + noshrink(eqc_gen:bind(eqc_gen:bool(), fun(_) -> druuid:v4_str() end)). |
| 43 | + |
| 44 | +g_bucket_type() -> |
| 45 | + oneof(bucket_types()). |
| 46 | + |
| 47 | +g_key_filter() -> |
| 48 | + %% Create a key filter function. |
| 49 | + %% There will always be at least 10 keys |
| 50 | + %% due to the lower bound of object count |
| 51 | + %% generator. |
| 52 | + MatchKeys = [list_to_binary(integer_to_list(X)) || X <- lists:seq(1,10)], |
| 53 | + KeyFilter = |
| 54 | + fun(X) -> |
| 55 | + lists:member(X, MatchKeys) |
| 56 | + end, |
| 57 | + frequency([{4, none}, {2, KeyFilter}]). |
| 58 | + |
| 59 | +%% ==================================================================== |
| 60 | +%% EQC Properties |
| 61 | +%% ==================================================================== |
| 62 | +prop_test() -> |
| 63 | + ?FORALL(Cmds, noshrink(commands(?MODULE)), |
| 64 | + ?WHENFAIL( |
| 65 | + begin |
| 66 | + _ = lager:error("*********************** FAILED!!!!" |
| 67 | + "*******************") |
| 68 | + end, |
| 69 | + ?TRAPEXIT( |
| 70 | + begin |
| 71 | + lager:info("======================== Will run commands:"), |
| 72 | + [lager:info(" Command : ~p~n", [Cmd]) || Cmd <- Cmds], |
| 73 | + {H, S, Res} = run_commands(?MODULE, Cmds), |
| 74 | + lager:info("======================== Ran commands"), |
| 75 | + clean_nodes(S), |
| 76 | + aggregate(zip(state_names(H),command_names(Cmds)), |
| 77 | + equals(Res, ok)) |
| 78 | + end))). |
| 79 | + |
| 80 | +%% ==================================================================== |
| 81 | +%% EQC FSM state transitions |
| 82 | +%% ==================================================================== |
| 83 | +initial_state() -> |
| 84 | + building_cluster. |
| 85 | + |
| 86 | +building_cluster(_S) -> |
| 87 | + [ |
| 88 | + {preloading_data, {call, ?MODULE, setup_cluster, [g_num_nodes()]}} |
| 89 | + ]. |
| 90 | + |
| 91 | +preloading_data(S) -> |
| 92 | + [ |
| 93 | + {verifying_data, {call, ?MODULE, preload_data, [g_bucket_type(), g_uuid(), hd(S#state.nodes_up), |
| 94 | + g_num_keys(), g_key_filter()]}} |
| 95 | + ]. |
| 96 | + |
| 97 | +verifying_data(S) -> |
| 98 | + [ |
| 99 | + {tearing_down_nodes, {call, ?MODULE, verify, [S#state.bucket_type, S#state.bucket, S#state.nodes_up, |
| 100 | + S#state.num_keys, S#state.key_filter]}} |
| 101 | + ]. |
| 102 | + |
| 103 | +tearing_down_nodes(S) -> |
| 104 | + [ |
| 105 | + {stopped, {call, ?MODULE, clean_nodes, [S#state.nodes_up]}} |
| 106 | + ]. |
| 107 | + |
| 108 | +stopped(_S) -> |
| 109 | + []. |
| 110 | + |
| 111 | +%% ==================================================================== |
| 112 | +%% EQC FSM State Data |
| 113 | +%% ==================================================================== |
| 114 | +initial_state_data() -> |
| 115 | + #state{}. |
| 116 | + |
| 117 | +next_state_data(building_cluster, preloading_data, S, _, {call, _, setup_cluster, [NumNodes]}) -> |
| 118 | + S#state{ nodes_up = node_list(NumNodes) }; |
| 119 | +next_state_data(preloading_data, verifying_data, S, _, {call, _, preload_data, |
| 120 | + [{BucketType, _}, Bucket, _Nodes, NumKeys, KeyFilter]}) -> |
| 121 | + S#state{ bucket_type = BucketType, bucket = Bucket, num_keys = NumKeys, key_filter = KeyFilter }; |
| 122 | +next_state_data(_From, _To, S, _R, _C) -> |
| 123 | + S. |
| 124 | + |
| 125 | +%% ==================================================================== |
| 126 | +%% EQC FSM preconditions |
| 127 | +%% ==================================================================== |
| 128 | +precondition(_From,_To,_S,{call,_,_,_}) -> |
| 129 | + true. |
| 130 | + |
| 131 | +%% ==================================================================== |
| 132 | +%% EQC FSM postconditions |
| 133 | +%% ==================================================================== |
| 134 | +postcondition(_From,_To,_S,{call,_,setup_cluster,_},Res) -> |
| 135 | + ok == Res; |
| 136 | +postcondition(_From,_To,_S,{call,_,verify,_},{error, Reason}) -> |
| 137 | + lager:info("Error: ~p", [Reason]), |
| 138 | + false; |
| 139 | +postcondition(_From,_To,S,{call,_,verify,_},KeyLists) -> |
| 140 | + ExpectedKeys = expected_keys(S#state.num_keys, S#state.key_filter), |
| 141 | + lists:all(fun(true) -> true; (_) -> false end, |
| 142 | + [assert_equal(ExpectedKeys, Keys) || Keys <- KeyLists]); |
| 143 | +postcondition(_From,_To,_S,{call,_,_,_},_Res) -> |
| 144 | + true. |
| 145 | + |
| 146 | +%% ==================================================================== |
| 147 | +%% callback functions |
| 148 | +%% ==================================================================== |
| 149 | +clean_nodes({stopped, _S}) -> |
| 150 | + lager:info("Clean-up already completed."); |
| 151 | +clean_nodes({_, S}) -> |
| 152 | + lager:info("Running clean_nodes with S:~p", [S]), |
| 153 | + clean_nodes(S#state.nodes_up); |
| 154 | +clean_nodes([]) -> |
| 155 | + lager:info("clean_nodes: no cluster to clean"); |
| 156 | +clean_nodes(Nodes) -> |
| 157 | + lager:info("Running clean_nodes with Nodes:~p", [Nodes]), |
| 158 | + CleanupFun = |
| 159 | + fun(N) -> |
| 160 | + lager:info("Wiping out node ~p for good", [N]), |
| 161 | + rt:clean_data_dir(N) |
| 162 | + end, |
| 163 | + lager:info("======================== Taking all nodes down ~p", [Nodes]), |
| 164 | + rt:pmap(CleanupFun, Nodes), |
| 165 | + rt:teardown(). |
| 166 | + |
| 167 | +preload_data({BucketType, _}, Bucket, Node, NumKeys, _KeyFilter) -> |
| 168 | + lager:info("*******************[CMD] First node ~p", [Node]), |
| 169 | + lager:info("Writing to bucket ~p", [Bucket]), |
| 170 | + put_keys(Node, {BucketType, Bucket}, NumKeys). |
| 171 | + |
| 172 | +setup_cluster(NumNodes) -> |
| 173 | + lager:info("Deploying cluster of size ~p", [NumNodes]), |
| 174 | + Nodes = rt:build_cluster(NumNodes), |
| 175 | + rt:wait_until_nodes_ready(Nodes), |
| 176 | + Node = hd(Nodes), |
| 177 | + rt:wait_until_transfers_complete(Nodes), |
| 178 | + [begin |
| 179 | + rt:create_and_activate_bucket_type(Node, BucketType, [{n_val, NVal}]), |
| 180 | + rt:wait_until_bucket_type_status(BucketType, active, Nodes) |
| 181 | + end || {BucketType, NVal} <- bucket_types()], |
| 182 | + ok. |
| 183 | + |
| 184 | +verify(BucketType, Bucket, Nodes, _NumKeys, KeyFilter) -> |
| 185 | + [list_filter_sort(Node, {BucketType, Bucket}, KeyFilter) || Node <- Nodes]. |
| 186 | + |
| 187 | +%% ==================================================================== |
| 188 | +%% Helpers |
| 189 | +%% ==================================================================== |
| 190 | +assert_equal(Expected, Actual) -> |
| 191 | + case Expected -- Actual of |
| 192 | + [] -> ok; |
| 193 | + Diff -> lager:info("Expected -- Actual: ~p", [Diff]) |
| 194 | + end, |
| 195 | + length(Actual) == length(Expected) |
| 196 | + andalso Actual == Expected. |
| 197 | + |
| 198 | +bucket_types() -> |
| 199 | + [{<<"n_val_one">>, 1}, |
| 200 | + {<<"n_val_two">>, 2}, |
| 201 | + {<<"n_val_three">>, 3}, |
| 202 | + {<<"n_val_four">>, 4}, |
| 203 | + {<<"n_val_five">>, 5}]. |
| 204 | + |
| 205 | +expected_keys(NumKeys, FilterFun) -> |
| 206 | + KeysPair = {ok, [list_to_binary(["", integer_to_list(Ki)]) || |
| 207 | + Ki <- lists:seq(0, NumKeys - 1)]}, |
| 208 | + sort_keys(filter_keys(KeysPair, FilterFun)). |
| 209 | + |
| 210 | +filter_keys({ok, Keys}, none) -> |
| 211 | + Keys; |
| 212 | +filter_keys({ok, Keys}, FilterFun) -> |
| 213 | + lists:filter(FilterFun, Keys); |
| 214 | +filter_keys({error, _}=Error, _) -> |
| 215 | + Error. |
| 216 | + |
| 217 | +list_filter_sort(Node, Bucket, KeyFilter) -> |
| 218 | + %% Move client to state |
| 219 | + {ok, C} = riak:client_connect(Node), |
| 220 | + sort_keys(filter_keys(riak_client:list_keys(Bucket, C), KeyFilter)). |
| 221 | + |
| 222 | +node_list(NumNodes) -> |
| 223 | + NodesN = lists:seq(1, NumNodes), |
| 224 | + [?DEV(N) || N <- NodesN]. |
| 225 | + |
| 226 | +put_keys(Node, Bucket, Num) -> |
| 227 | + lager:info("*******************[CMD] Putting ~p keys into bucket ~p on node ~p", [Num, Bucket, Node]), |
| 228 | + Pid = rt:pbc(Node), |
| 229 | + try |
| 230 | + Keys = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], |
| 231 | + Vals = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], |
| 232 | + [riakc_pb_socket:put(Pid, riakc_obj:new(Bucket, Key, Val)) || {Key, Val} <- lists:zip(Keys, Vals)] |
| 233 | + after |
| 234 | + catch(riakc_pb_socket:stop(Pid)) |
| 235 | + end. |
| 236 | + |
| 237 | +sort_keys({error, _}=Error) -> |
| 238 | + Error; |
| 239 | +sort_keys(Keys) -> |
| 240 | + lists:usort(Keys). |
| 241 | + |
0 commit comments