@@ -34,9 +34,14 @@ class MockMetadata(object):
34
34
35
35
def __init__ (self ):
36
36
self .hosts = {
37
- DefaultEndPoint ("192.168.1.0" ): Host (DefaultEndPoint ("192.168.1.0" ), SimpleConvictionPolicy ),
38
- DefaultEndPoint ("192.168.1.1" ): Host (DefaultEndPoint ("192.168.1.1" ), SimpleConvictionPolicy ),
39
- DefaultEndPoint ("192.168.1.2" ): Host (DefaultEndPoint ("192.168.1.2" ), SimpleConvictionPolicy )
37
+ 'uuid1' : Host (endpoint = DefaultEndPoint ("192.168.1.0" ), conviction_policy_factory = SimpleConvictionPolicy , host_id = 'uuid1' ),
38
+ 'uuid2' : Host (endpoint = DefaultEndPoint ("192.168.1.1" ), conviction_policy_factory = SimpleConvictionPolicy , host_id = 'uuid2' ),
39
+ 'uuid3' : Host (endpoint = DefaultEndPoint ("192.168.1.2" ), conviction_policy_factory = SimpleConvictionPolicy , host_id = 'uuid3' )
40
+ }
41
+ self ._host_id_by_endpoint = {
42
+ DefaultEndPoint ("192.168.1.0" ): 'uuid1' ,
43
+ DefaultEndPoint ("192.168.1.1" ): 'uuid2' ,
44
+ DefaultEndPoint ("192.168.1.2" ): 'uuid3' ,
40
45
}
41
46
for host in self .hosts .values ():
42
47
host .set_up ()
@@ -45,6 +50,7 @@ def __init__(self):
45
50
self .cluster_name = None
46
51
self .partitioner = None
47
52
self .token_map = {}
53
+ self .removed_hosts = []
48
54
49
55
def get_host (self , endpoint_or_address , port = None ):
50
56
if not isinstance (endpoint_or_address , EndPoint ):
@@ -53,7 +59,8 @@ def get_host(self, endpoint_or_address, port=None):
53
59
(port is None or host .broadcast_rpc_port is None or host .broadcast_rpc_port == port )):
54
60
return host
55
61
else :
56
- return self .hosts .get (endpoint_or_address )
62
+ host_id = self ._host_id_by_endpoint .get (endpoint_or_address )
63
+ return self .hosts .get (host_id )
57
64
58
65
def all_hosts (self ):
59
66
return self .hosts .values ()
@@ -62,6 +69,26 @@ def rebuild_token_map(self, partitioner, token_map):
62
69
self .partitioner = partitioner
63
70
self .token_map = token_map
64
71
72
+ def add_or_return_host (self , host ):
73
+ try :
74
+ return self .hosts [host .host_id ], False
75
+ except KeyError :
76
+ self ._host_id_by_endpoint [host .endpoint ] = host .host_id
77
+ self .hosts [host .host_id ] = host
78
+ return host , True
79
+
80
+ def update_host (self , host , old_endpoint ):
81
+ host , created = self .add_or_return_host (host )
82
+ self ._host_id_by_endpoint [host .endpoint ] = host .host_id
83
+ self ._host_id_by_endpoint .pop (old_endpoint , False )
84
+
85
+ def all_hosts_items (self ):
86
+ return list (self .hosts .items ())
87
+
88
+ def remove_host_by_host_id (self , host_id ):
89
+ self .removed_hosts .append (self .hosts .pop (host_id , False ))
90
+ return bool (self .hosts .pop (host_id , False ))
91
+
65
92
66
93
class MockCluster (object ):
67
94
@@ -76,20 +103,20 @@ class MockCluster(object):
76
103
def __init__ (self ):
77
104
self .metadata = MockMetadata ()
78
105
self .added_hosts = []
79
- self .removed_hosts = []
80
106
self .scheduler = Mock (spec = _Scheduler )
81
107
self .executor = Mock (spec = ThreadPoolExecutor )
82
108
self .profile_manager .profiles [EXEC_PROFILE_DEFAULT ] = ExecutionProfile (RoundRobinPolicy ())
83
109
self .endpoint_factory = DefaultEndPointFactory ().configure (self )
84
110
self .ssl_options = None
85
111
86
- def add_host (self , endpoint , datacenter , rack , signal = False , refresh_nodes = True ):
87
- host = Host (endpoint , SimpleConvictionPolicy , datacenter , rack )
112
+ def add_host (self , endpoint , datacenter , rack , signal = False , refresh_nodes = True , host_id = None ):
113
+ host = Host (endpoint , SimpleConvictionPolicy , datacenter , rack , host_id = host_id )
114
+ host , _ = self .metadata .add_or_return_host (host )
88
115
self .added_hosts .append (host )
89
116
return host , True
90
117
91
118
def remove_host (self , host ):
92
- self . removed_hosts . append ( host )
119
+ pass
93
120
94
121
def on_up (self , host ):
95
122
pass
@@ -121,20 +148,20 @@ def __init__(self):
121
148
self .endpoint = DefaultEndPoint ("192.168.1.0" )
122
149
self .original_endpoint = self .endpoint
123
150
self .local_results = [
124
- ["schema_version" , "cluster_name" , "data_center" , "rack" , "partitioner" , "release_version" , "tokens" ],
125
- [["a" , "foocluster" , "dc1" , "rack1" , "Murmur3Partitioner" , "2.2.0" , ["0" , "100" , "200" ]]]
151
+ ["rpc_address" , " schema_version" , "cluster_name" , "data_center" , "rack" , "partitioner" , "release_version" , "tokens" , "host_id " ],
152
+ [["192.168.1.0" , " a" , "foocluster" , "dc1" , "rack1" , "Murmur3Partitioner" , "2.2.0" , ["0" , "100" , "200" ], "uuid1" ]]
126
153
]
127
154
128
155
self .peer_results = [
129
156
["rpc_address" , "peer" , "schema_version" , "data_center" , "rack" , "tokens" , "host_id" ],
130
- [["192.168.1.1" , "10.0.0.1" , "a" , "dc1" , "rack1" , ["1" , "101" , "201" ], "uuid1 " ],
131
- ["192.168.1.2" , "10.0.0.2" , "a" , "dc1" , "rack1" , ["2" , "102" , "202" ], "uuid2 " ]]
157
+ [["192.168.1.1" , "10.0.0.1" , "a" , "dc1" , "rack1" , ["1" , "101" , "201" ], "uuid2 " ],
158
+ ["192.168.1.2" , "10.0.0.2" , "a" , "dc1" , "rack1" , ["2" , "102" , "202" ], "uuid3 " ]]
132
159
]
133
160
134
161
self .peer_results_v2 = [
135
162
["native_address" , "native_port" , "peer" , "peer_port" , "schema_version" , "data_center" , "rack" , "tokens" , "host_id" ],
136
- [["192.168.1.1" , 9042 , "10.0.0.1" , 7042 , "a" , "dc1" , "rack1" , ["1" , "101" , "201" ], "uuid1 " ],
137
- ["192.168.1.2" , 9042 , "10.0.0.2" , 7040 , "a" , "dc1" , "rack1" , ["2" , "102" , "202" ], "uuid2 " ]]
163
+ [["192.168.1.1" , 9042 , "10.0.0.1" , 7042 , "a" , "dc1" , "rack1" , ["1" , "101" , "201" ], "uuid2 " ],
164
+ ["192.168.1.2" , 9042 , "10.0.0.2" , 7040 , "a" , "dc1" , "rack1" , ["2" , "102" , "202" ], "uuid3 " ]]
138
165
]
139
166
self .wait_for_responses = Mock (return_value = _node_meta_results (self .local_results , self .peer_results ))
140
167
@@ -154,15 +181,15 @@ def sleep(self, amount):
154
181
class ControlConnectionTest (unittest .TestCase ):
155
182
156
183
_matching_schema_preloaded_results = _node_meta_results (
157
- local_results = (["schema_version" , "cluster_name" , "data_center" , "rack" , "partitioner" , "release_version" , "tokens" , "host_id" ],
158
- [["a" , "foocluster" , "dc1" , "rack1" , "Murmur3Partitioner" , "2.2.0" , ["0" , "100" , "200" ], "uuid1" ]]),
184
+ local_results = (["rpc_address" , " schema_version" , "cluster_name" , "data_center" , "rack" , "partitioner" , "release_version" , "tokens" , "host_id" ],
185
+ [["192.168.1.0" , " a" , "foocluster" , "dc1" , "rack1" , "Murmur3Partitioner" , "2.2.0" , ["0" , "100" , "200" ], "uuid1" ]]),
159
186
peer_results = (["rpc_address" , "peer" , "schema_version" , "data_center" , "rack" , "tokens" , "host_id" ],
160
187
[["192.168.1.1" , "10.0.0.1" , "a" , "dc1" , "rack1" , ["1" , "101" , "201" ], "uuid2" ],
161
188
["192.168.1.2" , "10.0.0.2" , "a" , "dc1" , "rack1" , ["2" , "102" , "202" ], "uuid3" ]]))
162
189
163
190
_nonmatching_schema_preloaded_results = _node_meta_results (
164
- local_results = (["schema_version" , "cluster_name" , "data_center" , "rack" , "partitioner" , "release_version" , "tokens" , "host_id" ],
165
- [["a" , "foocluster" , "dc1" , "rack1" , "Murmur3Partitioner" , "2.2.0" , ["0" , "100" , "200" ], "uuid1" ]]),
191
+ local_results = (["rpc_address" , " schema_version" , "cluster_name" , "data_center" , "rack" , "partitioner" , "release_version" , "tokens" , "host_id" ],
192
+ [["192.168.1.0" , " a" , "foocluster" , "dc1" , "rack1" , "Murmur3Partitioner" , "2.2.0" , ["0" , "100" , "200" ], "uuid1" ]]),
166
193
peer_results = (["rpc_address" , "peer" , "schema_version" , "data_center" , "rack" , "tokens" , "host_id" ],
167
194
[["192.168.1.1" , "10.0.0.1" , "a" , "dc1" , "rack1" , ["1" , "101" , "201" ], "uuid2" ],
168
195
["192.168.1.2" , "10.0.0.2" , "b" , "dc1" , "rack1" , ["2" , "102" , "202" ], "uuid3" ]]))
@@ -240,10 +267,11 @@ def test_wait_for_schema_agreement_rpc_lookup(self):
240
267
If the rpc_address is 0.0.0.0, the "peer" column should be used instead.
241
268
"""
242
269
self .connection .peer_results [1 ].append (
243
- ["0.0.0.0" , PEER_IP , "b" , "dc1" , "rack1" , ["3" , "103" , "203" ]]
270
+ ["0.0.0.0" , PEER_IP , "b" , "dc1" , "rack1" , ["3" , "103" , "203" ], "uuid6" ]
244
271
)
245
- host = Host (DefaultEndPoint ("0.0.0.0" ), SimpleConvictionPolicy )
246
- self .cluster .metadata .hosts [DefaultEndPoint ("foobar" )] = host
272
+ host = Host (DefaultEndPoint ("0.0.0.0" ), SimpleConvictionPolicy , host_id = 'uuid6' )
273
+ self .cluster .metadata .hosts [host .host_id ] = host
274
+ self .cluster .metadata ._host_id_by_endpoint [DefaultEndPoint (PEER_IP )] = host .host_id
247
275
host .is_up = False
248
276
249
277
# even though the new host has a different schema version, it's
@@ -285,7 +313,7 @@ def refresh_and_validate_added_hosts():
285
313
del self .connection .peer_results [:]
286
314
self .connection .peer_results .extend ([
287
315
["rpc_address" , "peer" , "schema_version" , "data_center" , "rack" , "tokens" , "host_id" ],
288
- [["192.168.1.3" , "10.0.0.1" , "a" , "dc1" , "rack1" , ["1" , "101" , "201" ], 'uuid5 ' ],
316
+ [["192.168.1.3" , "10.0.0.1" , "a" , "dc1" , "rack1" , ["1" , "101" , "201" ], 'uuid6 ' ],
289
317
# all others are invalid
290
318
[None , None , "a" , "dc1" , "rack1" , ["1" , "101" , "201" ], 'uuid1' ],
291
319
["192.168.1.7" , "10.0.0.1" , "a" , None , "rack1" , ["1" , "101" , "201" ], 'uuid2' ],
@@ -299,7 +327,7 @@ def refresh_and_validate_added_hosts():
299
327
del self .connection .peer_results [:]
300
328
self .connection .peer_results .extend ([
301
329
["native_address" , "native_port" , "peer" , "peer_port" , "schema_version" , "data_center" , "rack" , "tokens" , "host_id" ],
302
- [["192.168.1.4" , 9042 , "10.0.0.1" , 7042 , "a" , "dc1" , "rack1" , ["1" , "101" , "201" ], "uuid1 " ],
330
+ [["192.168.1.4" , 9042 , "10.0.0.1" , 7042 , "a" , "dc1" , "rack1" , ["1" , "101" , "201" ], "uuid6 " ],
303
331
# all others are invalid
304
332
[None , 9042 , None , 7040 , "a" , "dc1" , "rack1" , ["2" , "102" , "202" ], "uuid2" ],
305
333
["192.168.1.5" , 9042 , "10.0.0.2" , 7040 , "a" , None , "rack1" , ["2" , "102" , "202" ], "uuid2" ],
@@ -336,29 +364,29 @@ def test_refresh_nodes_and_tokens_no_partitioner(self):
336
364
Test handling of an unknown partitioner.
337
365
"""
338
366
# set the partitioner column to None
339
- self .connection .local_results [1 ][0 ][4 ] = None
367
+ self .connection .local_results [1 ][0 ][5 ] = None
340
368
self .control_connection .refresh_node_list_and_token_map ()
341
369
meta = self .cluster .metadata
342
370
self .assertEqual (meta .partitioner , None )
343
371
self .assertEqual (meta .token_map , {})
344
372
345
373
def test_refresh_nodes_and_tokens_add_host (self ):
346
374
self .connection .peer_results [1 ].append (
347
- ["192.168.1.3" , "10.0.0.3" , "a" , "dc1" , "rack1" , ["3" , "103" , "203" ], "uuid3 " ]
375
+ ["192.168.1.3" , "10.0.0.3" , "a" , "dc1" , "rack1" , ["3" , "103" , "203" ], "uuid4 " ]
348
376
)
349
377
self .cluster .scheduler .schedule = lambda delay , f , * args , ** kwargs : f (* args , ** kwargs )
350
378
self .control_connection .refresh_node_list_and_token_map ()
351
379
self .assertEqual (1 , len (self .cluster .added_hosts ))
352
380
self .assertEqual (self .cluster .added_hosts [0 ].address , "192.168.1.3" )
353
381
self .assertEqual (self .cluster .added_hosts [0 ].datacenter , "dc1" )
354
382
self .assertEqual (self .cluster .added_hosts [0 ].rack , "rack1" )
355
- self .assertEqual (self .cluster .added_hosts [0 ].host_id , "uuid3 " )
383
+ self .assertEqual (self .cluster .added_hosts [0 ].host_id , "uuid4 " )
356
384
357
385
def test_refresh_nodes_and_tokens_remove_host (self ):
358
386
del self .connection .peer_results [1 ][1 ]
359
387
self .control_connection .refresh_node_list_and_token_map ()
360
- self .assertEqual (1 , len (self .cluster .removed_hosts ))
361
- self .assertEqual (self .cluster .removed_hosts [0 ].address , "192.168.1.2" )
388
+ self .assertEqual (1 , len (self .cluster .metadata . removed_hosts ))
389
+ self .assertEqual (self .cluster .metadata . removed_hosts [0 ].address , "192.168.1.2" )
362
390
363
391
def test_refresh_nodes_and_tokens_timeout (self ):
364
392
@@ -423,7 +451,7 @@ def test_handle_status_change(self):
423
451
}
424
452
self .cluster .scheduler .reset_mock ()
425
453
self .control_connection ._handle_status_change (event )
426
- host = self .cluster .metadata .hosts [ DefaultEndPoint ('192.168.1.0' )]
454
+ host = self .cluster .metadata .get_host ( DefaultEndPoint ('192.168.1.0' ))
427
455
self .cluster .scheduler .schedule_unique .assert_called_once_with (ANY , self .cluster .on_up , host )
428
456
429
457
self .cluster .scheduler .schedule .reset_mock ()
@@ -440,7 +468,7 @@ def test_handle_status_change(self):
440
468
'address' : ('192.168.1.0' , 9000 )
441
469
}
442
470
self .control_connection ._handle_status_change (event )
443
- host = self .cluster .metadata .hosts [ DefaultEndPoint ('192.168.1.0' )]
471
+ host = self .cluster .metadata .get_host ( DefaultEndPoint ('192.168.1.0' ))
444
472
self .assertIs (host , self .cluster .down_host )
445
473
446
474
def test_handle_schema_change (self ):
@@ -516,7 +544,7 @@ def test_refresh_nodes_and_tokens_add_host_detects_port(self):
516
544
del self .connection .peer_results [:]
517
545
self .connection .peer_results .extend (self .connection .peer_results_v2 )
518
546
self .connection .peer_results [1 ].append (
519
- ["192.168.1.3" , 555 , "10.0.0.3" , 666 , "a" , "dc1" , "rack1" , ["3" , "103" , "203" ], "uuid3 " ]
547
+ ["192.168.1.3" , 555 , "10.0.0.3" , 666 , "a" , "dc1" , "rack1" , ["3" , "103" , "203" ], "uuid4 " ]
520
548
)
521
549
self .connection .wait_for_responses = Mock (return_value = _node_meta_results (
522
550
self .connection .local_results , self .connection .peer_results ))
@@ -536,7 +564,7 @@ def test_refresh_nodes_and_tokens_add_host_detects_invalid_port(self):
536
564
del self .connection .peer_results [:]
537
565
self .connection .peer_results .extend (self .connection .peer_results_v2 )
538
566
self .connection .peer_results [1 ].append (
539
- ["192.168.1.3" , - 1 , "10.0.0.3" , 0 , "a" , "dc1" , "rack1" , ["3" , "103" , "203" ], "uuid3 " ]
567
+ ["192.168.1.3" , - 1 , "10.0.0.3" , 0 , "a" , "dc1" , "rack1" , ["3" , "103" , "203" ], "uuid4 " ]
540
568
)
541
569
self .connection .wait_for_responses = Mock (return_value = _node_meta_results (
542
570
self .connection .local_results , self .connection .peer_results ))
0 commit comments