-
Notifications
You must be signed in to change notification settings - Fork 30
TemporalConnection update propagation #102
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 7 commits
aac9f1f
52af07c
d5a4cbc
57b4684
3bbc280
fdc6662
ee68d71
ce38023
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,66 @@ | ||
| --- | ||
| apiVersion: rbac.authorization.k8s.io/v1 | ||
| kind: ClusterRole | ||
| metadata: | ||
| name: manager-role | ||
| rules: | ||
| - apiGroups: | ||
| - "" | ||
| resources: | ||
| - secrets | ||
| verbs: | ||
| - get | ||
| - list | ||
| - watch | ||
| - apiGroups: | ||
| - apps | ||
| resources: | ||
| - deployments | ||
| verbs: | ||
| - create | ||
| - delete | ||
| - get | ||
| - list | ||
| - patch | ||
| - update | ||
| - watch | ||
| - apiGroups: | ||
| - apps | ||
| resources: | ||
| - deployments/scale | ||
| verbs: | ||
| - update | ||
| - apiGroups: | ||
| - temporal.io | ||
| resources: | ||
| - temporalconnections | ||
| verbs: | ||
| - get | ||
| - list | ||
| - watch | ||
| - apiGroups: | ||
| - temporal.io | ||
| resources: | ||
| - temporalworkerdeployments | ||
| verbs: | ||
| - create | ||
| - delete | ||
| - get | ||
| - list | ||
| - patch | ||
| - update | ||
| - watch | ||
| - apiGroups: | ||
| - temporal.io | ||
| resources: | ||
| - temporalworkerdeployments/finalizers | ||
| verbs: | ||
| - update | ||
| - apiGroups: | ||
| - temporal.io | ||
| resources: | ||
| - temporalworkerdeployments/status | ||
| verbs: | ||
| - get | ||
| - patch | ||
| - update | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,26 @@ | ||
| --- | ||
| apiVersion: admissionregistration.k8s.io/v1 | ||
| kind: MutatingWebhookConfiguration | ||
| metadata: | ||
| name: mutating-webhook-configuration | ||
| webhooks: | ||
| - admissionReviewVersions: | ||
| - v1 | ||
| clientConfig: | ||
| service: | ||
| name: webhook-service | ||
| namespace: system | ||
| path: /mutate-temporal-io-temporal-io-v1alpha1-temporalworkerdeployment | ||
| failurePolicy: Fail | ||
| name: mtemporalworker.kb.io | ||
| rules: | ||
| - apiGroups: | ||
| - temporal.io.temporal.io | ||
| apiVersions: | ||
| - v1alpha1 | ||
| operations: | ||
| - CREATE | ||
| - UPDATE | ||
| resources: | ||
| - temporalworkers | ||
| sideEffects: None |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -21,7 +21,9 @@ import ( | |
| ctrl "sigs.k8s.io/controller-runtime" | ||
| "sigs.k8s.io/controller-runtime/pkg/client" | ||
| "sigs.k8s.io/controller-runtime/pkg/controller" | ||
| "sigs.k8s.io/controller-runtime/pkg/handler" | ||
| "sigs.k8s.io/controller-runtime/pkg/log" | ||
| "sigs.k8s.io/controller-runtime/pkg/reconcile" | ||
| ) | ||
|
|
||
| var ( | ||
|
|
@@ -58,7 +60,6 @@ type TemporalWorkerDeploymentReconciler struct { | |
| // | ||
| // For more details, check Reconcile and its Result here: | ||
| // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.15.0/pkg/reconcile | ||
| // TODO(carlydf): Add watching of temporal connection custom resource (may have issue) | ||
| func (r *TemporalWorkerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { | ||
| // TODO(Shivam): Monitor if the time taken for a successful reconciliation loop is closing in on 5 minutes. If so, we | ||
| // may need to increase the timeout value. | ||
|
|
@@ -112,8 +113,9 @@ func (r *TemporalWorkerDeploymentReconciler) Reconcile(ctx context.Context, req | |
|
|
||
| // Get or update temporal client for connection | ||
| temporalClient, ok := r.TemporalClientPool.GetSDKClient(clientpool.ClientPoolKey{ | ||
| HostPort: temporalConnection.Spec.HostPort, | ||
| Namespace: workerDeploy.Spec.WorkerOptions.TemporalNamespace, | ||
| HostPort: temporalConnection.Spec.HostPort, | ||
| Namespace: workerDeploy.Spec.WorkerOptions.TemporalNamespace, | ||
| MutualTLSSecret: temporalConnection.Spec.MutualTLSSecret, | ||
|
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @jlegrone - curious to hear your thoughts on this: I added the name of the However, one area where this breaks is if someone just replaces the contents of an existing secret without changing the name. In this way, the controller does not really get a new client.
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could you solve the problem of content changing without name changing by storing a hash of the name + contents instead of just the name? Or, if that's inefficient because it means you have to read the secret contents all the time just to generate the map key, we could do a different solution for the contents changing, where if an error occurs when calling the temporal APIs that suggests "wrong credentials", we could try reloading them once just in case it's changed. Then, do a namespace describe to make sure they work, and if they work, restart all the workers. I don't think this needs to be solved in this PR though, but if you don't solve the "changed content, not expired, same name" thing, do just create an issue about it.
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. thanks for responding to this; yes, I did think about the having said that, I like the |
||
| }, temporalConnection.Spec.MutualTLSSecret != "") | ||
| if !ok { | ||
| c, err := r.TemporalClientPool.UpsertClient(ctx, clientpool.NewClientOptions{ | ||
|
|
@@ -212,9 +214,35 @@ func (r *TemporalWorkerDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) | |
| return ctrl.NewControllerManagedBy(mgr). | ||
| For(&temporaliov1alpha1.TemporalWorkerDeployment{}). | ||
| Owns(&appsv1.Deployment{}). | ||
| Watches(&temporaliov1alpha1.TemporalConnection{}, handler.EnqueueRequestsFromMapFunc(r.findTWDsUsingConnection)). | ||
Shivs11 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| WithOptions(controller.Options{ | ||
| MaxConcurrentReconciles: 100, | ||
| RecoverPanic: &recoverPanic, | ||
| }). | ||
| Complete(r) | ||
| } | ||
|
|
||
| func (r *TemporalWorkerDeploymentReconciler) findTWDsUsingConnection(ctx context.Context, tc client.Object) []reconcile.Request { | ||
| var requests []reconcile.Request | ||
|
|
||
| // Find all TWDs in same namespace that reference this TC | ||
| var workers temporaliov1alpha1.TemporalWorkerDeploymentList | ||
Shivs11 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| if err := r.List(ctx, &workers, client.InNamespace(tc.GetNamespace())); err != nil { | ||
| return requests | ||
| } | ||
|
|
||
| // Filter to ones using this connection | ||
| for _, worker := range workers.Items { | ||
| if worker.Spec.WorkerOptions.TemporalConnection == tc.GetName() { | ||
| // Add the TWD object as a reconcile request | ||
| requests = append(requests, reconcile.Request{ | ||
| NamespacedName: types.NamespacedName{ | ||
| Name: worker.Name, | ||
| Namespace: worker.Namespace, | ||
| }, | ||
| }) | ||
| } | ||
| } | ||
|
|
||
| return requests | ||
| } | ||
Uh oh!
There was an error while loading. Please reload this page.