Skip to content

Commit 9c98bca

Browse files
committed
[RUBY-255] Tests for missing peer columns
1 parent 485e8c4 commit 9c98bca

File tree

2 files changed

+106
-9
lines changed

2 files changed

+106
-9
lines changed
Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
# encoding: utf-8
2+
3+
#--
4+
# Copyright 2013-2016 DataStax, Inc.
5+
#
6+
# Licensed under the Apache License, Version 2.0 (the "License");
7+
# you may not use this file except in compliance with the License.
8+
# You may obtain a copy of the License at
9+
#
10+
# http://www.apache.org/licenses/LICENSE-2.0
11+
#
12+
# Unless required by applicable law or agreed to in writing, software
13+
# distributed under the License is distributed on an "AS IS" BASIS,
14+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
# See the License for the specific language governing permissions and
16+
# limitations under the License.
17+
#++
18+
19+
require File.dirname(__FILE__) + '/integration_test_case.rb'
20+
21+
class ControlConnectionTest < IntegrationTestCase
22+
23+
def self.before_suite
24+
@@ccm_cluster = CCM.setup_cluster(1, 2)
25+
end
26+
27+
def self.after_suite
28+
CCM.remove_cluster(@@ccm_cluster.name)
29+
end
30+
31+
def remove_peer_info(info)
32+
# Make sure to only connect to node1, as each node has its own peers table
33+
allowed_ips = ['127.0.0.1']
34+
round_robin = Cassandra::LoadBalancing::Policies::RoundRobin.new
35+
whitelist = Cassandra::LoadBalancing::Policies::WhiteList.new(allowed_ips, round_robin)
36+
cluster = Cassandra.cluster(load_balancing_policy: whitelist)
37+
session = cluster.connect
38+
39+
value = session.execute("SELECT #{info} FROM system.peers WHERE peer = '127.0.0.2'").first[info]
40+
session.execute("DELETE #{info} FROM system.peers WHERE peer = '127.0.0.2'")
41+
result = session.execute("SELECT #{info} FROM system.peers WHERE peer = '127.0.0.2'").first
42+
assert_nil result[info]
43+
44+
cluster.close
45+
value
46+
end
47+
48+
def restore_peer_info(info, value)
49+
# Make sure to only connect to node1, as each node has its own peers table
50+
allowed_ips = ['127.0.0.1']
51+
round_robin = Cassandra::LoadBalancing::Policies::RoundRobin.new
52+
whitelist = Cassandra::LoadBalancing::Policies::WhiteList.new(allowed_ips, round_robin)
53+
cluster = Cassandra.cluster(load_balancing_policy: whitelist)
54+
session = cluster.connect
55+
56+
session.execute("UPDATE system.peers SET #{info}=? WHERE peer = '127.0.0.2'", arguments: [value])
57+
result = session.execute("SELECT #{info} FROM system.peers WHERE peer = '127.0.0.2'").first
58+
assert_equal value, result[info]
59+
60+
cluster.close
61+
end
62+
63+
# Test for null columns in peer
64+
#
65+
# test_missing_peer_columns tests that the control connection ignores any peers which have missing peer columns.
66+
# Using a simple 2-node cluster, it first removes one of the peer columns of node2 from node1's system.peers
67+
# table. It then uses node1 explicitly as the control connection and verifies that node2 has not been used as a host.
68+
# It finally restores the peer column in node1 so the next test case can continue.g
69+
#
70+
# @since 2.1.7
71+
# @jira_ticket RUBY-255
72+
# @expected_result Node2 should not be used as a host
73+
#
74+
# @test_assumptions A 2-node Cassandra cluster.
75+
# @test_category control_connection
76+
#
77+
def test_missing_peer_columns
78+
peer_info = ['host_id', 'data_center', 'rack', 'rpc_address', 'tokens']
79+
80+
peer_info.each do |info|
81+
begin
82+
original_value = remove_peer_info(info)
83+
cluster = Cassandra.cluster(hosts: ['127.0.0.1'])
84+
assert_equal ['127.0.0.1'], cluster.hosts.map { |h| h.ip.to_s }
85+
cluster.close
86+
ensure
87+
restore_peer_info(info, original_value)
88+
end
89+
end
90+
end
91+
end

support/ccm.rb

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -455,6 +455,7 @@ def start(jvm_arg=nil)
455455

456456
options[:load_balancing_policy] = SameOrderLoadBalancingPolicy.new
457457

458+
total_attempts = 1
458459
until @nodes.all?(&:up?) && @cluster && @cluster.hosts.select(&:up?).count == @nodes.size
459460
attempts = 1
460461

@@ -493,6 +494,11 @@ def start(jvm_arg=nil)
493494
$stderr.puts "not all hosts are up yet, retrying in 1s..."
494495
sleep(1)
495496
end
497+
498+
total_attempts += 1
499+
if total_attempts >= 20
500+
raise "Cluster hosts did not match node count. nodes:#{@nodes.size}, hosts:#{@cluster.hosts.select(&:up?).count}"
501+
end
496502
end
497503

498504
$stderr.puts "creating session"
@@ -871,6 +877,15 @@ def setup_cluster(no_dc = 1, no_nodes_per_dc = 3)
871877
@current_cluster
872878
end
873879

880+
def remove_cluster(name)
881+
cluster = clusters.find {|c| c.name == name}
882+
return unless cluster
883+
ccm.exec('remove', cluster.name)
884+
clusters.delete(cluster)
885+
@current_cluster = nil if @current_cluster.name == name
886+
nil
887+
end
888+
874889
private
875890

876891
def ccm
@@ -918,15 +933,6 @@ def switch_cluster(name)
918933
nil
919934
end
920935

921-
def remove_cluster(name)
922-
cluster = clusters.find {|c| c.name == name}
923-
return unless cluster
924-
ccm.exec('remove', cluster.name)
925-
clusters.delete(cluster)
926-
927-
nil
928-
end
929-
930936
def create_cluster(name, version, datacenters, nodes_per_datacenter)
931937
nodes = Array.new(datacenters, nodes_per_datacenter).join(':')
932938

0 commit comments

Comments
 (0)