File tree Expand file tree Collapse file tree 4 files changed +43
-22
lines changed
deploy/helm/listener-operator/crds
rust/operator-binary/src/csi_server Expand file tree Collapse file tree 4 files changed +43
-22
lines changed Original file line number Diff line number Diff line change @@ -39,4 +39,4 @@ walkdir = "2.5.0"
39
39
40
40
[patch ."https://github .com/stackabletech/operator-rs .git" ]
41
41
# stackable-operator = { path = "../operator-rs/crates/stackable-operator" }
42
- # stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "main " }
42
+ stackable-operator = { git = " https://github.com/stackabletech//operator-rs.git" , branch = " feat/listenerclass-stickiness " }
Original file line number Diff line number Diff line change 82
82
- LoadBalancer
83
83
- ClusterIP
84
84
type : string
85
+ stickyNodePorts :
86
+ default : false
87
+ description : |-
88
+ Wether a Pod exposed using a NodePort should be pinned to a specific Kubernetes node.
89
+
90
+ By pinning the Pod to a specific (stable) Kubernetes node, stable addresses can be
91
+ provided using NodePorts. The stickiness is achieved by listener-operator by setting the
92
+ `volume.kubernetes.io/selected-node` annotation on the Listener PVC.
93
+
94
+ However, this only works on setups with long-living nodes. If your nodes are rotated on
95
+ a regular basis, the Pods previously running on a removed node will be stuck in Pending
96
+ until you delete the PVC with the stickiness.
97
+
98
+ Because of this we don't enable stickiness by default to support all environments.
99
+ type : boolean
85
100
required :
86
101
- serviceType
87
102
type : object
Original file line number Diff line number Diff line change @@ -127,26 +127,32 @@ impl csi::v1::controller_server::Controller for ListenerOperatorController {
127
127
. within ( & ns)
128
128
. erase ( ) ,
129
129
} ) ?;
130
+
131
+ // We only configure a node stickiness in case it is enabled and the Service is of type
132
+ // NodePort.
133
+ let accessible_topology = if listener_class. spec . sticky_node_ports
134
+ && listener_class. spec . service_type == listener:: v1alpha1:: ServiceType :: NodePort
135
+ {
136
+ // Pick the top node (as selected by the CSI client) and "stick" to that
137
+ // Since we want clients to have a stable address to connect to
138
+ request
139
+ . accessibility_requirements
140
+ . unwrap_or_default ( )
141
+ . preferred
142
+ . into_iter ( )
143
+ . take ( 1 )
144
+ . collect ( )
145
+ } else {
146
+ Vec :: new ( )
147
+ } ;
148
+
130
149
Ok ( Response :: new ( csi:: v1:: CreateVolumeResponse {
131
150
volume : Some ( csi:: v1:: Volume {
132
151
capacity_bytes : 0 ,
133
152
volume_id : request. name ,
134
153
volume_context : raw_volume_context. into_iter ( ) . collect ( ) ,
135
154
content_source : None ,
136
- accessible_topology : match listener_class. spec . service_type {
137
- // Pick the top node (as selected by the CSI client) and "stick" to that
138
- // Since we want clients to have a stable address to connect to
139
- listener:: v1alpha1:: ServiceType :: NodePort => request
140
- . accessibility_requirements
141
- . unwrap_or_default ( )
142
- . preferred
143
- . into_iter ( )
144
- . take ( 1 )
145
- . collect ( ) ,
146
- // Load balancers and services of type ClusterIP have no relationship to any particular node, so don't try to be sticky
147
- listener:: v1alpha1:: ServiceType :: LoadBalancer
148
- | listener:: v1alpha1:: ServiceType :: ClusterIP => Vec :: new ( ) ,
149
- } ,
155
+ accessible_topology,
150
156
} ) ,
151
157
} ) )
152
158
}
You can’t perform that action at this time.
0 commit comments