1
1
# frozen_string_literal: true
2
2
3
+ require 'json'
3
4
require 'testing_helper'
4
5
5
6
class TestAgainstClusterBroken < TestingWrapper
@@ -37,20 +38,36 @@ def teardown
37
38
"ClusterDownError: #{ @cluster_down_error_count } = "
38
39
end
39
40
40
- def test_a_replica_is_down
41
- sacrifice = @controller . select_sacrifice_of_replica
42
- do_test_a_node_is_down ( sacrifice , number_of_keys : NUMBER_OF_KEYS )
41
+ def test_client_patience
42
+ prepare_test_data ( number_of_keys : NUMBER_OF_KEYS )
43
+
44
+ # a replica
45
+ kill_a_node ( @controller . select_sacrifice_of_replica )
46
+ wait_for_cluster_to_be_ready ( wait_attempts : MAX_ATTEMPTS )
47
+ do_assertions ( number_of_keys : NUMBER_OF_KEYS )
48
+ refute ( @captured_commands . count ( 'cluster' , 'nodes' ) . zero? , @captured_commands . to_a . map ( &:command ) )
49
+
50
+ # a primary
51
+ kill_a_node ( @controller . select_sacrifice_of_primary )
52
+ wait_for_cluster_to_be_ready ( wait_attempts : MAX_ATTEMPTS )
53
+ do_assertions ( number_of_keys : NUMBER_OF_KEYS )
43
54
refute ( @captured_commands . count ( 'cluster' , 'nodes' ) . zero? , @captured_commands . to_a . map ( &:command ) )
44
- end
45
55
46
- def test_a_primary_is_down
47
- sacrifice = @controller . select_sacrifice_of_primary
48
- do_test_a_node_is_down ( sacrifice , number_of_keys : NUMBER_OF_KEYS )
56
+ # recovery
57
+ revive_dead_nodes
58
+ wait_for_cluster_to_be_ready ( wait_attempts : MAX_ATTEMPTS )
59
+ do_assertions ( number_of_keys : NUMBER_OF_KEYS )
49
60
refute ( @captured_commands . count ( 'cluster' , 'nodes' ) . zero? , @captured_commands . to_a . map ( &:command ) )
50
61
end
51
62
52
63
private
53
64
65
+ def prepare_test_data ( number_of_keys :)
66
+ number_of_keys . times { |i | @client . call ( 'SET' , "pre-#{ i } " , i ) }
67
+ number_of_keys . times { |i | @client . pipelined { |pi | pi . call ( 'SET' , "pre-pipelined-#{ i } " , i ) } }
68
+ wait_for_replication
69
+ end
70
+
54
71
def wait_for_replication
55
72
client_side_timeout = TEST_TIMEOUT_SEC + 1.0
56
73
server_side_timeout = ( TEST_TIMEOUT_SEC * 1000 ) . to_i
@@ -59,61 +76,45 @@ def wait_for_replication
59
76
end
60
77
end
61
78
62
- def do_test_a_node_is_down ( sacrifice , number_of_keys :)
63
- prepare_test_data ( number_of_keys : number_of_keys )
64
-
65
- kill_a_node ( sacrifice , kill_attempts : MAX_ATTEMPTS )
66
- wait_for_cluster_to_be_ready ( wait_attempts : MAX_ATTEMPTS )
67
-
68
- assert_equal ( 'PONG' , @client . call ( 'PING' ) , 'Case: PING' )
69
- do_assertions_without_pipelining ( number_of_keys : number_of_keys )
70
- do_assertions_with_pipelining ( number_of_keys : number_of_keys )
71
- end
79
+ def wait_for_cluster_to_be_ready ( wait_attempts :)
80
+ loop do
81
+ raise MaxRetryExceeded if wait_attempts <= 0
72
82
73
- def prepare_test_data ( number_of_keys :)
74
- number_of_keys . times { |i | @client . call ( 'SET' , "pre-#{ i } " , i ) }
75
- number_of_keys . times { |i | @client . pipelined { |pi | pi . call ( 'SET' , "pre-pipelined-#{ i } " , i ) } }
76
- wait_for_replication
83
+ wait_attempts -= 1
84
+ break if @client . call ( 'PING' ) == 'PONG'
85
+ rescue ::RedisClient ::Cluster ::NodeMightBeDown
86
+ @cluster_down_error_count += 1
87
+ ensure
88
+ sleep WAIT_SEC
89
+ end
77
90
end
78
91
79
- def kill_a_node ( sacrifice , kill_attempts : )
92
+ def kill_a_node ( sacrifice )
80
93
refute_nil ( sacrifice , "#{ sacrifice . config . host } :#{ sacrifice . config . port } " )
81
94
82
- loop do
83
- raise MaxRetryExceeded if kill_attempts <= 0
95
+ `docker compose ps --format json` . lines . map { |line | JSON . parse ( line ) } . each do |service |
96
+ published_ports = service . fetch ( 'Publishers' ) . map { |e | e . fetch ( 'PublishedPort' ) } . uniq
97
+ next unless published_ports . include? ( sacrifice . config . port )
84
98
85
- kill_attempts -= 1
86
- sacrifice . call ( 'SHUTDOWN' , 'NOSAVE' )
87
- rescue ::RedisClient ::CommandError => e
88
- raise unless e . message . include? ( 'Errors trying to SHUTDOWN' )
89
- rescue ::RedisClient ::ConnectionError
99
+ service_name = service . fetch ( 'Service' )
100
+ system ( "docker compose --progress quiet pause #{ service_name } " , exception : true )
90
101
break
91
- ensure
92
- sleep WAIT_SEC
93
102
end
94
103
95
104
assert_raises ( ::RedisClient ::ConnectionError ) { sacrifice . call ( 'PING' ) }
96
105
end
97
106
98
- def wait_for_cluster_to_be_ready ( wait_attempts :)
99
- loop do
100
- raise MaxRetryExceeded if wait_attempts <= 0
101
-
102
- wait_attempts -= 1
103
- break if @client . call ( 'PING' ) == 'PONG'
104
- rescue ::RedisClient ::Cluster ::NodeMightBeDown
105
- @cluster_down_error_count += 1
106
- ensure
107
- sleep WAIT_SEC
107
+ def revive_dead_nodes
108
+ `docker compose ps --format json --status paused` . lines . map { |line | JSON . parse ( line ) } . each do |service |
109
+ service_name = service . fetch ( 'Service' )
110
+ system ( "docker compose --progress quiet unpause #{ service_name } " , exception : true )
108
111
end
109
112
end
110
113
111
- def do_assertions_without_pipelining ( number_of_keys :)
114
+ def do_assertions ( number_of_keys :)
112
115
number_of_keys . times { |i | assert_equal ( i . to_s , @client . call ( 'GET' , "pre-#{ i } " ) , "Case: pre-#{ i } : GET" ) }
113
116
number_of_keys . times { |i | assert_equal ( 'OK' , @client . call ( 'SET' , "post-#{ i } " , i ) , "Case: post-#{ i } : SET" ) }
114
- end
115
117
116
- def do_assertions_with_pipelining ( number_of_keys :)
117
118
want = Array . new ( number_of_keys , &:to_s )
118
119
got = @client . pipelined { |pi | number_of_keys . times { |i | pi . call ( 'GET' , "pre-pipelined-#{ i } " ) } }
119
120
assert_equal ( want , got , 'Case: pre-pipelined: GET' )
0 commit comments