|
43 | 43 | %% archive), but we don't want them to process so many inputs that |
44 | 44 | %% they consume their blocking queues before handing off. |
45 | 45 |
|
46 | | -%% Please Note: Under rare circumstances, this test may fail with a |
47 | | -%% "{badmatch,{error,[{vnode_down,noproc}]}}' error. This is not a |
48 | | -%% failure of this test but rather a side effect of a race condition |
49 | | -%% in riak_core_vnode_proxy. It manifests due to the fact that the |
50 | | -%% test is attempting to send a command to a vnode that is in fact |
51 | | -%% down, however monitor only works by issuing a command and getting |
52 | | -%% a PID. In some instances, get_vnode_pid fails because vnode shutdown |
53 | | -%% is queued up in the mailbox before monitor node. Unfortunately, the |
54 | | -%% fix would require a fundamental shift in the architecture of |
55 | | -%% riak_core, which at the time of this writing is not feasible for |
56 | | -%% this rare failure case. |
57 | 46 | -module(pipe_verify_handoff_blocking). |
58 | 47 |
|
59 | 48 | -export([ |
@@ -175,7 +164,11 @@ queue_filler(Node, Pipe, Inputs, Count) -> |
175 | 164 | {stop, Owner} -> Owner ! {done, Count} |
176 | 165 | after 0 -> |
177 | 166 | {{value, I}, Q} = queue:out(Inputs), |
178 | | - ok = rpc:call(Node, riak_pipe, queue_work, [Pipe, I]), |
| 167 | + case rpc:call(Node, riak_pipe, queue_work, [Pipe, I]) of |
| 168 | + ok -> ok; |
| 169 | + {error,[{vnode_down,noproc}]} -> ok; %% OK if vnode has moved to different node - next request should succeed |
| 170 | + Other -> throw(Other) |
| 171 | + end, |
179 | 172 | queue_filler(Node, Pipe, queue:in(I, Q), Count+1) |
180 | 173 | end. |
181 | 174 |
|
|
0 commit comments