Skip to content

Commit 71e90de

Browse files
authored
Merge pull request #1591 from signalfx/clusterslots-ro
ClusterSlots Config and ReadOnly
2 parents b3e0aa2 + 74fdad9 commit 71e90de

File tree

2 files changed

+67
-3
lines changed

2 files changed

+67
-3
lines changed

cluster.go

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ func (opt *ClusterOptions) init() {
8686
opt.MaxRedirects = 3
8787
}
8888

89-
if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil {
89+
if opt.RouteByLatency || opt.RouteRandomly {
9090
opt.ReadOnly = true
9191
}
9292

@@ -153,9 +153,13 @@ func (opt *ClusterOptions) clientOptions() *Options {
153153
IdleTimeout: opt.IdleTimeout,
154154
IdleCheckFrequency: disableIdleCheck,
155155

156-
readOnly: opt.ReadOnly,
157-
158156
TLSConfig: opt.TLSConfig,
157+
// If ClusterSlots is populated, then we probably have an artificial
158+
// cluster whose nodes are not in clustering mode (otherwise there isn't
159+
// much use for ClusterSlots config). This means we cannot execute the
160+
// READONLY command against that node -- setting readOnly to false in such
161+
// situations in the options below will prevent that from happening.
162+
readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
159163
}
160164
}
161165

cluster_test.go

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1020,6 +1020,66 @@ var _ = Describe("ClusterClient", func() {
10201020

10211021
assertClusterClient()
10221022
})
1023+
1024+
Describe("ClusterClient with ClusterSlots with multiple nodes per slot", func() {
1025+
BeforeEach(func() {
1026+
failover = true
1027+
1028+
opt = redisClusterOptions()
1029+
opt.ReadOnly = true
1030+
opt.ClusterSlots = func(ctx context.Context) ([]redis.ClusterSlot, error) {
1031+
slots := []redis.ClusterSlot{{
1032+
Start: 0,
1033+
End: 4999,
1034+
Nodes: []redis.ClusterNode{{
1035+
Addr: ":8220",
1036+
}, {
1037+
Addr: ":8223",
1038+
}},
1039+
}, {
1040+
Start: 5000,
1041+
End: 9999,
1042+
Nodes: []redis.ClusterNode{{
1043+
Addr: ":8221",
1044+
}, {
1045+
Addr: ":8224",
1046+
}},
1047+
}, {
1048+
Start: 10000,
1049+
End: 16383,
1050+
Nodes: []redis.ClusterNode{{
1051+
Addr: ":8222",
1052+
}, {
1053+
Addr: ":8225",
1054+
}},
1055+
}}
1056+
return slots, nil
1057+
}
1058+
client = cluster.newClusterClient(ctx, opt)
1059+
1060+
err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
1061+
return master.FlushDB(ctx).Err()
1062+
})
1063+
Expect(err).NotTo(HaveOccurred())
1064+
1065+
err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error {
1066+
Eventually(func() int64 {
1067+
return client.DBSize(ctx).Val()
1068+
}, 30*time.Second).Should(Equal(int64(0)))
1069+
return nil
1070+
})
1071+
Expect(err).NotTo(HaveOccurred())
1072+
})
1073+
1074+
AfterEach(func() {
1075+
failover = false
1076+
1077+
err := client.Close()
1078+
Expect(err).NotTo(HaveOccurred())
1079+
})
1080+
1081+
assertClusterClient()
1082+
})
10231083
})
10241084

10251085
var _ = Describe("ClusterClient without nodes", func() {

0 commit comments

Comments
 (0)