Skip to content

Commit 3306fbd

Browse files
cescginaopenshift-merge-bot[bot]
authored andcommitted
Watch cinder endpoint in WatcherDecisionEngine
Watch the cinder KeystoneEndpoint in the WatcherDecisionEngine to detect changes and trigger reconciliation when endpoints are modified. This ensures the decision engine is reconciled if Cinder is enabled or disabled to ensure the storage collector is properly configured. The endpoint URL hashing logic detects when cached endpoint URLs need to be updated and triggers deployment restarts accordingly. Assisted-By: Cursor (claude-4-sonnet) Resolves: OSPRH-20349
1 parent f363102 commit 3306fbd

13 files changed

+289
-0
lines changed

controllers/watcher_common.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@ const (
3636
tlsAPIPublicField = ".spec.tls.api.public.secretName"
3737
topologyField = ".spec.topologyRef.Name"
3838
memcachedInstanceField = ".spec.memcachedInstance"
39+
// service label for cinder endpoint
40+
endpointCinder = "cinder"
3941
)
4042

4143
var (
@@ -66,6 +68,9 @@ var (
6668
topologyField,
6769
memcachedInstanceField,
6870
}
71+
endpointList = []string{
72+
endpointCinder,
73+
}
6974
)
7075

7176
const (

controllers/watcherdecisionengine_controller.go

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@ import (
5353
k8s_errors "k8s.io/apimachinery/pkg/api/errors"
5454
"k8s.io/apimachinery/pkg/fields"
5555
"k8s.io/apimachinery/pkg/types"
56+
"k8s.io/utils/ptr"
5657
)
5758

5859
// WatcherDecisionEngineReconciler reconciles a WatcherDecisionEngine object
@@ -229,6 +230,25 @@ func (r *WatcherDecisionEngineReconciler) Reconcile(ctx context.Context, req ctr
229230
return ctrl.Result{}, err
230231
}
231232

233+
// hash the endpoint URLs of the services this depends on
234+
// By adding the hash to the hash of hashes being added to the deployment
235+
// allows it to get restarted, in case the endpoint changes and it requires
236+
// the current cached ones to be updated.
237+
238+
endpointUrlsHash, err := keystonev1.GetHashforKeystoneEndpointUrlsForServices(
239+
ctx,
240+
helper,
241+
instance.Namespace,
242+
ptr.To(string(endpoint.EndpointInternal)),
243+
endpointList,
244+
)
245+
246+
if err != nil {
247+
return ctrl.Result{}, err
248+
}
249+
250+
configVars["endpointUrlsHash"] = env.SetValue(endpointUrlsHash)
251+
232252
Log.Info(fmt.Sprintf("[DecisionEngine] Getting input hash '%s'", instance.Name))
233253
//
234254
// create hash over all the different input resources to identify if any those changed
@@ -403,6 +423,9 @@ func (r *WatcherDecisionEngineReconciler) SetupWithManager(mgr ctrl.Manager) err
403423
Watches(&keystonev1.KeystoneAPI{},
404424
handler.EnqueueRequestsFromMapFunc(r.findObjectForSrc),
405425
builder.WithPredicates(keystonev1.KeystoneAPIStatusChangedPredicate)).
426+
Watches(&keystonev1.KeystoneEndpoint{},
427+
handler.EnqueueRequestsFromMapFunc(r.findObjectsWithAppSelectorLabelInNamespace),
428+
builder.WithPredicates(keystonev1.KeystoneEndpointStatusChangedPredicate)).
406429
Complete(r)
407430
}
408431

@@ -700,3 +723,36 @@ func getDecisionEngineServiceLabels() map[string]string {
700723
common.AppSelector: WatcherDecisionEngineLabelPrefix,
701724
}
702725
}
726+
727+
func (r *WatcherDecisionEngineReconciler) findObjectsWithAppSelectorLabelInNamespace(ctx context.Context, src client.Object) []reconcile.Request {
728+
requests := []reconcile.Request{}
729+
730+
l := log.FromContext(ctx).WithName("Controllers").WithName("WatcherDecisionEngine")
731+
732+
// if the endpoint has the service label and its in our endpointList, reconcile the CR in the namespace
733+
if svc, ok := src.GetLabels()[common.AppSelector]; ok && util.StringInSlice(svc, endpointList) {
734+
crList := &watcherv1beta1.WatcherDecisionEngineList{}
735+
listOps := &client.ListOptions{
736+
Namespace: src.GetNamespace(),
737+
}
738+
err := r.Client.List(ctx, crList, listOps)
739+
if err != nil {
740+
l.Error(err, fmt.Sprintf("listing %s for namespace: %s", crList.GroupVersionKind().Kind, src.GetNamespace()))
741+
return requests
742+
}
743+
744+
for _, item := range crList.Items {
745+
l.Info(fmt.Sprintf("input source %s changed, reconcile: %s - %s", src.GetName(), item.GetName(), item.GetNamespace()))
746+
747+
requests = append(requests,
748+
reconcile.Request{
749+
NamespacedName: types.NamespacedName{
750+
Name: item.GetName(),
751+
Namespace: item.GetNamespace(),
752+
},
753+
},
754+
)
755+
}
756+
}
757+
return requests
758+
}

tests/functional/watcherdecisionengine_controller_test.go

Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1133,4 +1133,124 @@ heartbeat_in_pthread=false`,
11331133
}, timeout, interval).Should(Succeed())
11341134
})
11351135
})
1136+
When("WatcherDecisionEngine is reconfigured", func() {
1137+
cinderEndpoint := types.NamespacedName{}
1138+
BeforeEach(func() {
1139+
secret := CreateInternalTopLevelSecret()
1140+
DeferCleanup(k8sClient.Delete, ctx, secret)
1141+
prometheusSecret := th.CreateSecret(
1142+
watcherTest.PrometheusSecretName,
1143+
map[string][]byte{
1144+
"host": []byte("prometheus.example.com"),
1145+
"port": []byte("9090"),
1146+
},
1147+
)
1148+
DeferCleanup(k8sClient.Delete, ctx, prometheusSecret)
1149+
DeferCleanup(
1150+
mariadb.DeleteDBService,
1151+
mariadb.CreateDBService(
1152+
watcherTest.WatcherDecisionEngine.Namespace,
1153+
"openstack",
1154+
corev1.ServiceSpec{
1155+
Ports: []corev1.ServicePort{{Port: 3306}},
1156+
},
1157+
),
1158+
)
1159+
mariadb.CreateMariaDBAccountAndSecret(
1160+
watcherTest.WatcherDatabaseAccount,
1161+
mariadbv1.MariaDBAccountSpec{
1162+
UserName: "watcher",
1163+
},
1164+
)
1165+
mariadb.CreateMariaDBDatabase(
1166+
watcherTest.WatcherDecisionEngine.Namespace,
1167+
"watcher",
1168+
mariadbv1.MariaDBDatabaseSpec{
1169+
Name: "watcher",
1170+
},
1171+
)
1172+
mariadb.SimulateMariaDBAccountCompleted(watcherTest.WatcherDatabaseAccount)
1173+
mariadb.SimulateMariaDBDatabaseCompleted(watcherTest.WatcherDatabaseName)
1174+
1175+
DeferCleanup(keystone.DeleteKeystoneAPI, keystone.CreateKeystoneAPI(watcherTest.WatcherDecisionEngine.Namespace))
1176+
1177+
memcachedSpec := memcachedv1.MemcachedSpec{
1178+
MemcachedSpecCore: memcachedv1.MemcachedSpecCore{
1179+
Replicas: ptr.To(int32(1)),
1180+
},
1181+
}
1182+
DeferCleanup(infra.DeleteMemcached, infra.CreateMemcached(watcherTest.WatcherDecisionEngine.Namespace, MemcachedInstance, memcachedSpec))
1183+
infra.SimulateMemcachedReady(watcherTest.MemcachedNamespace)
1184+
1185+
DeferCleanup(th.DeleteInstance, CreateWatcherDecisionEngine(watcherTest.WatcherDecisionEngine, GetDefaultWatcherDecisionEngineSpec()))
1186+
// create watcher applier and watcher api to later check that their observed generation has not changed
1187+
DeferCleanup(th.DeleteInstance, CreateWatcherApplier(watcherTest.WatcherApplier, GetDefaultWatcherApplierSpec()))
1188+
DeferCleanup(th.DeleteInstance, CreateWatcherAPI(watcherTest.WatcherAPI, GetDefaultWatcherAPISpec()))
1189+
1190+
logger.Info("Created cinder endpoint")
1191+
cinderEndpoint = types.NamespacedName{Name: "cinder", Namespace: watcherTest.WatcherDecisionEngine.Namespace}
1192+
DeferCleanup(keystone.DeleteKeystoneEndpoint, keystone.CreateKeystoneEndpoint(cinderEndpoint))
1193+
keystone.SimulateKeystoneEndpointReady(cinderEndpoint)
1194+
1195+
th.SimulateStatefulSetReplicaReady(watcherTest.WatcherDecisionEngineStatefulSet)
1196+
th.SimulateStatefulSetReplicaReady(watcherTest.WatcherApplierStatefulSet)
1197+
th.SimulateStatefulSetReplicaReady(watcherTest.WatcherAPIStatefulSet)
1198+
1199+
th.ExpectCondition(
1200+
watcherTest.WatcherDecisionEngine,
1201+
ConditionGetterFunc(WatcherDecisionEngineConditionGetter),
1202+
condition.ReadyCondition,
1203+
corev1.ConditionTrue,
1204+
)
1205+
1206+
})
1207+
It("updates the deployment if cinder public endpoint gets deleted", func() {
1208+
originalConfigHash := GetEnvVarValue(
1209+
th.GetStatefulSet(watcherTest.WatcherDecisionEngine).Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "")
1210+
Expect(originalConfigHash).NotTo(Equal(""))
1211+
decisionEngineObservedOrig := th.GetStatefulSet(watcherTest.WatcherDecisionEngine).Status.ObservedGeneration
1212+
applierObservedOrig := th.GetStatefulSet(watcherTest.WatcherApplier).Status.ObservedGeneration
1213+
apiObservedOrig := th.GetStatefulSet(watcherTest.WatcherAPI).Status.ObservedGeneration
1214+
1215+
keystone.DeleteKeystoneEndpoint(cinderEndpoint)
1216+
logger.Info("Deleted cinder endpoint")
1217+
// Assert that the CONFIG_HASH of the StateFulSet is changed due to this reconfiguration
1218+
Eventually(func(g Gomega) {
1219+
currentConfigHash := GetEnvVarValue(
1220+
th.GetStatefulSet(watcherTest.WatcherDecisionEngine).Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "")
1221+
g.Expect(originalConfigHash).NotTo(Equal(currentConfigHash))
1222+
1223+
}, timeout, interval).Should(Succeed())
1224+
1225+
// Simulate the StatefulSet replicas to be ready after deleting cinder
1226+
th.SimulateStatefulSetReplicaReady(watcherTest.WatcherDecisionEngineStatefulSet)
1227+
th.SimulateStatefulSetReplicaReady(watcherTest.WatcherApplierStatefulSet)
1228+
th.SimulateStatefulSetReplicaReady(watcherTest.WatcherAPIStatefulSet)
1229+
1230+
// check that the StatefulSet replica watcher-decision-engine is updated while watcher-applier and watcher-api are not updated
1231+
applierObservedNew := th.GetStatefulSet(watcherTest.WatcherApplier).Status.ObservedGeneration
1232+
Expect(applierObservedNew).Should(Equal(applierObservedOrig))
1233+
apiObservedNew := th.GetStatefulSet(watcherTest.WatcherAPI).Status.ObservedGeneration
1234+
Expect(apiObservedNew).Should(Equal(apiObservedOrig))
1235+
decisionEngineObservedNew := th.GetStatefulSet(watcherTest.WatcherDecisionEngine).Status.ObservedGeneration
1236+
Expect(decisionEngineObservedNew).Should(Equal(decisionEngineObservedOrig + 1))
1237+
1238+
})
1239+
It("updates the deployment if cinder internal endpoint is modified", func() {
1240+
originalConfigHash := GetEnvVarValue(
1241+
th.GetStatefulSet(watcherTest.WatcherDecisionEngine).Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "")
1242+
Expect(originalConfigHash).NotTo(Equal(""))
1243+
1244+
keystone.UpdateKeystoneEndpoint(cinderEndpoint, "internal", "https://cinder-test-internal")
1245+
logger.Info("Reconfigured")
1246+
1247+
// Assert that the CONFIG_HASH of the StateFulSet is changed due to this reconfiguration
1248+
Eventually(func(g Gomega) {
1249+
currentConfigHash := GetEnvVarValue(
1250+
th.GetStatefulSet(watcherTest.WatcherDecisionEngine).Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "")
1251+
g.Expect(originalConfigHash).NotTo(Equal(currentConfigHash))
1252+
1253+
}, timeout, interval).Should(Succeed())
1254+
})
1255+
})
11361256
})

tests/kuttl/test-suites/default/deps/kustomization.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ secretGenerator:
2121
- NovaAPIDatabasePassword=password
2222
- NovaCell0DatabasePassword=password
2323
- NovaCell1DatabasePassword=password
24+
- CinderPassword=password
2425
- MetadataSecret=42
2526
name: osp-secret
2627
generatorOptions:
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
../common/cleanup-watcher.yaml
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
apiVersion: watcher.openstack.org/v1beta1
2+
kind: Watcher
3+
metadata:
4+
finalizers:
5+
- openstack.org/watcher
6+
name: watcher-kuttl
7+
namespace: watcher-kuttl-default
8+
status:
9+
# we just want to assert that the watcher is ready in this test
10+
apiServiceReadyCount: 1
11+
applierServiceReadyCount: 1
12+
decisionengineServiceReadyCount: 1
13+
---
14+
apiVersion: kuttl.dev/v1beta1
15+
kind: TestAssert
16+
namespaced: true
17+
commands:
18+
- script: |
19+
set -euxo pipefail
20+
# check that the decision engine correctly detects that there is no cinder service
21+
[ "$(oc logs -n $NAMESPACE watcher-kuttl-decision-engine-0 | grep -c 'Block storage service is not enabled, skipping storage collector')" == 2 ]
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
apiVersion: watcher.openstack.org/v1beta1
2+
kind: Watcher
3+
metadata:
4+
name: watcher-kuttl
5+
namespace: watcher-kuttl-default
6+
spec:
7+
databaseInstance: "openstack"
8+
apiServiceTemplate:
9+
tls:
10+
caBundleSecretName: "combined-ca-bundle"
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
apiVersion: watcher.openstack.org/v1beta1
2+
kind: Watcher
3+
metadata:
4+
finalizers:
5+
- openstack.org/watcher
6+
name: watcher-kuttl
7+
namespace: watcher-kuttl-default
8+
status:
9+
# we just want to assert that the watcher is ready in this test
10+
apiServiceReadyCount: 1
11+
applierServiceReadyCount: 1
12+
decisionengineServiceReadyCount: 1
13+
---
14+
apiVersion: keystone.openstack.org/v1beta1
15+
kind: KeystoneService
16+
metadata:
17+
name: cinderv3
18+
---
19+
apiVersion: keystone.openstack.org/v1beta1
20+
kind: KeystoneEndpoint
21+
metadata:
22+
name: cinderv3
23+
---
24+
apiVersion: kuttl.dev/v1beta1
25+
kind: TestAssert
26+
namespaced: true
27+
commands:
28+
- script: |
29+
set -euxo pipefail
30+
# check that the decision detects that there is a cinder service and
31+
# does not log that storage collector is skipped
32+
[ "$(oc logs -n $NAMESPACE watcher-kuttl-decision-engine-0 |grep -c 'Block storage service is not enabled, skipping storage collector')" == 0 ]
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
apiVersion: core.openstack.org/v1beta1
2+
kind: OpenStackControlPlane
3+
metadata:
4+
name: openstack
5+
spec:
6+
cinder:
7+
enabled: true
8+
template:
9+
databaseInstance: openstack
10+
databaseAccount: cinder
11+
secret: osp-secret
12+
cinderAPI:
13+
replicas: 1
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
apiVersion: watcher.openstack.org/v1beta1
2+
kind: Watcher
3+
metadata:
4+
finalizers:
5+
- openstack.org/watcher
6+
name: watcher-kuttl
7+
namespace: watcher-kuttl-default
8+
status:
9+
# we just want to assert that the watcher is ready in this test
10+
apiServiceReadyCount: 1
11+
applierServiceReadyCount: 1
12+
decisionengineServiceReadyCount: 1
13+
---
14+
apiVersion: kuttl.dev/v1beta1
15+
kind: TestAssert
16+
namespaced: true
17+
commands:
18+
- script: |
19+
set -euxo pipefail
20+
# check that the decision engine correctly detects that there is no cinder service
21+
[ "$(oc logs -n $NAMESPACE watcher-kuttl-decision-engine-0 |grep -c 'Block storage service is not enabled, skipping storage collector')" == 2 ]

0 commit comments

Comments
 (0)