Skip to content

Commit 6b4e056

Browse files
authored
pkg/helm: install resources in same namespace as CR (#2424)
This commit fixes a bug that causes all release resources to be created in the namespace that the operator is deployed in, not the namespace that the CR is deployed in.
1 parent 14aa72d commit 6b4e056

File tree

4 files changed

+68
-41
lines changed

4 files changed

+68
-41
lines changed

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010

1111
### Bug Fixes
1212

13+
- Fixed a regression in the helm-operator that caused all releases to be deployed in the same namespace that the operator was deployed in, regardless of which namespace the CR was created in. Now release resources are created in the same namespace as the CR. ([#2414](https://github.com/operator-framework/operator-sdk/pull/2414))
14+
1315
## v0.14.0
1416

1517
### Added

hack/tests/e2e-helm.sh

Lines changed: 38 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -10,20 +10,24 @@ ROOTDIR="$(pwd)"
1010
TMPDIR="$(mktemp -d)"
1111
trap_add 'rm -rf $TMPDIR' EXIT
1212

13+
test_namespace="test-e2e-helm"
14+
1315
deploy_operator() {
14-
kubectl create -f "$OPERATORDIR/deploy/service_account.yaml"
15-
kubectl create -f "$OPERATORDIR/deploy/role.yaml"
16-
kubectl create -f "$OPERATORDIR/deploy/role_binding.yaml"
1716
kubectl create -f "$OPERATORDIR/deploy/crds/helm.example.com_nginxes_crd.yaml"
18-
kubectl create -f "$OPERATORDIR/deploy/operator.yaml"
17+
kubectl create -f "$OPERATORDIR/deploy/service_account.yaml"
18+
kubectl create -f "$OPERATORDIR/deploy/cluster_role.yaml"
19+
kubectl create -f "$OPERATORDIR/deploy/cluster_role_binding.yaml"
20+
kubectl create -f "$OPERATORDIR/deploy/cluster_operator.yaml"
21+
kubectl create namespace ${test_namespace}
1922
}
2023

2124
remove_operator() {
25+
kubectl delete --ignore-not-found namespace ${test_namespace}
2226
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/service_account.yaml"
23-
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/role.yaml"
24-
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/role_binding.yaml"
27+
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/cluster_role.yaml"
28+
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/cluster_role_binding.yaml"
2529
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/crds/helm.example.com_nginxes_crd.yaml"
26-
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/operator.yaml"
30+
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/cluster_operator.yaml"
2731
}
2832

2933
test_operator() {
@@ -46,68 +50,69 @@ test_operator() {
4650
exit 1
4751
fi
4852

53+
4954
# verify that the metrics endpoint exists
50-
if ! timeout 1m bash -c -- "until kubectl run --attach --rm --restart=Never test-metrics --image=$metrics_test_image -- curl -sfo /dev/null http://nginx-operator-metrics:8383/metrics; do sleep 1; done";
55+
if ! timeout 1m bash -c -- "until kubectl run --attach --rm --restart=Never test-metrics --image=${metrics_test_image} -- curl -sfo /dev/null http://nginx-operator-metrics:8383/metrics; do sleep 1; done";
5156
then
5257
echo "Failed to verify that metrics endpoint exists"
5358
kubectl logs deployment/nginx-operator
5459
exit 1
5560
fi
5661

5762
# create CR
58-
kubectl create -f deploy/crds/helm.example.com_v1alpha1_nginx_cr.yaml
59-
trap_add 'kubectl delete --ignore-not-found -f ${OPERATORDIR}/deploy/crds/helm.example.com_v1alpha1_nginx_cr.yaml' EXIT
60-
if ! timeout 1m bash -c -- 'until kubectl get nginxes.helm.example.com example-nginx -o jsonpath="{..status.deployedRelease.name}" | grep "example-nginx"; do sleep 1; done';
63+
kubectl create --namespace=${test_namespace} -f deploy/crds/helm.example.com_v1alpha1_nginx_cr.yaml
64+
trap_add "kubectl delete --namespace=${test_namespace} --ignore-not-found -f ${OPERATORDIR}/deploy/crds/helm.example.com_v1alpha1_nginx_cr.yaml" EXIT
65+
if ! timeout 1m bash -c -- "until kubectl get --namespace=${test_namespace} nginxes.helm.example.com example-nginx -o jsonpath='{..status.deployedRelease.name}' | grep 'example-nginx'; do sleep 1; done";
6166
then
6267
kubectl logs deployment/nginx-operator
6368
exit 1
6469
fi
6570

6671
# verify that the custom resource metrics endpoint exists
67-
if ! timeout 1m bash -c -- "until kubectl run --attach --rm --restart=Never test-cr-metrics --image=$metrics_test_image -- curl -sfo /dev/null http://nginx-operator-metrics:8686/metrics; do sleep 1; done";
72+
if ! timeout 1m bash -c -- "until kubectl run --attach --rm --restart=Never test-cr-metrics --image=${metrics_test_image} -- curl -sfo /dev/null http://nginx-operator-metrics:8686/metrics; do sleep 1; done";
6873
then
6974
echo "Failed to verify that custom resource metrics endpoint exists"
7075
kubectl logs deployment/nginx-operator
7176
exit 1
7277
fi
7378

74-
release_name=$(kubectl get nginxes.helm.example.com example-nginx -o jsonpath="{..status.deployedRelease.name}")
75-
nginx_deployment=$(kubectl get deployment -l "app.kubernetes.io/instance=${release_name}" -o jsonpath="{..metadata.name}")
79+
release_name=$(kubectl get --namespace=${test_namespace} nginxes.helm.example.com example-nginx -o jsonpath="{..status.deployedRelease.name}")
80+
nginx_deployment=$(kubectl get --namespace=${test_namespace} deployment -l "app.kubernetes.io/instance=${release_name}" -o jsonpath="{..metadata.name}")
7681

77-
if ! timeout 1m kubectl rollout status deployment/${nginx_deployment};
82+
if ! timeout 1m kubectl rollout --namespace=${test_namespace} status deployment/${nginx_deployment};
7883
then
79-
kubectl describe pods -l "app.kubernetes.io/instance=${release_name}"
80-
kubectl describe deployments ${nginx_deployment}
84+
kubectl describe --namespace=${test_namespace} pods -l "app.kubernetes.io/instance=${release_name}"
85+
kubectl describe --namespace=${test_namespace} deployments ${nginx_deployment}
8186
kubectl logs deployment/nginx-operator
8287
exit 1
8388
fi
8489

85-
nginx_service=$(kubectl get service -l "app.kubernetes.io/instance=${release_name}" -o jsonpath="{..metadata.name}")
86-
kubectl get service ${nginx_service}
90+
nginx_service=$(kubectl get --namespace=${test_namespace} service -l "app.kubernetes.io/instance=${release_name}" -o jsonpath="{..metadata.name}")
91+
kubectl get --namespace=${test_namespace} service ${nginx_service}
8792

8893
# scale deployment replicas to 2 and verify the
8994
# deployment automatically scales back down to 1.
90-
kubectl scale deployment/${nginx_deployment} --replicas=2
91-
if ! timeout 1m bash -c -- "until test \$(kubectl get deployment/${nginx_deployment} -o jsonpath='{..spec.replicas}') -eq 1; do sleep 1; done";
95+
kubectl scale --namespace=${test_namespace} deployment/${nginx_deployment} --replicas=2
96+
if ! timeout 1m bash -c -- "until test \$(kubectl get --namespace=${test_namespace} deployment/${nginx_deployment} -o jsonpath='{..spec.replicas}') -eq 1; do sleep 1; done";
9297
then
93-
kubectl describe pods -l "app.kubernetes.io/instance=${release_name}"
94-
kubectl describe deployments ${nginx_deployment}
98+
kubectl describe --namespace=${test_namespace} pods -l "app.kubernetes.io/instance=${release_name}"
99+
kubectl describe --namespace=${test_namespace} deployments ${nginx_deployment}
95100
kubectl logs deployment/nginx-operator
96101
exit 1
97102
fi
98103

99104
# update CR to replicaCount=2 and verify the deployment
100105
# automatically scales up to 2 replicas.
101-
kubectl patch nginxes.helm.example.com example-nginx -p '[{"op":"replace","path":"/spec/replicaCount","value":2}]' --type=json
102-
if ! timeout 1m bash -c -- "until test \$(kubectl get deployment/${nginx_deployment} -o jsonpath='{..spec.replicas}') -eq 2; do sleep 1; done";
106+
kubectl patch --namespace=${test_namespace} nginxes.helm.example.com example-nginx -p '[{"op":"replace","path":"/spec/replicaCount","value":2}]' --type=json
107+
if ! timeout 1m bash -c -- "until test \$(kubectl get --namespace=${test_namespace} deployment/${nginx_deployment} -o jsonpath='{..spec.replicas}') -eq 2; do sleep 1; done";
103108
then
104-
kubectl describe pods -l "app.kubernetes.io/instance=${release_name}"
105-
kubectl describe deployments ${nginx_deployment}
109+
kubectl describe --namespace=${test_namespace} pods -l "app.kubernetes.io/instance=${release_name}"
110+
kubectl describe --namespace=${test_namespace} deployments ${nginx_deployment}
106111
kubectl logs deployment/nginx-operator
107112
exit 1
108113
fi
109114

110-
kubectl delete -f deploy/crds/helm.example.com_v1alpha1_nginx_cr.yaml --wait=true
115+
kubectl delete --namespace=${test_namespace} -f deploy/crds/helm.example.com_v1alpha1_nginx_cr.yaml --wait=true
111116
kubectl logs deployment/nginx-operator | grep "Uninstalled release" | grep "${release_name}"
112117
}
113118

@@ -131,6 +136,11 @@ operator-sdk build "$DEST_IMAGE"
131136
load_image_if_kind "$DEST_IMAGE"
132137
sed -i".bak" -E -e "s|REPLACE_IMAGE|$DEST_IMAGE|g" deploy/operator.yaml; rm -f deploy/operator.yaml.bak
133138
sed -i".bak" -E -e 's|Always|Never|g' deploy/operator.yaml; rm -f deploy/operator.yaml.bak
139+
140+
kubectl create --dry-run -f "deploy/operator.yaml" -o json | jq '((.spec.template.spec.containers[] | select(.name == "nginx-operator").env[]) | select(.name == "WATCH_NAMESPACE")) |= {"name":"WATCH_NAMESPACE", "value":""}' | kubectl create --dry-run -f - -o yaml > deploy/cluster_operator.yaml
141+
kubectl create --dry-run -f "deploy/role.yaml" -o json | jq '.kind = "ClusterRole"' | kubectl create --dry-run -f - -o yaml > deploy/cluster_role.yaml
142+
kubectl create --dry-run -f "deploy/role_binding.yaml" -o json | jq '.subjects[0].namespace= "default"' | jq '.roleRef.kind= "ClusterRole"' | jq '.kind = "ClusterRoleBinding"' | kubectl create --dry-run -f - -o yaml > deploy/cluster_role_binding.yaml
143+
134144
# kind has an issue with certain image registries (ex. redhat's), so use a
135145
# different test pod image.
136146
METRICS_TEST_IMAGE="fedora:latest"

pkg/helm/client/client.go

Lines changed: 26 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -28,25 +28,17 @@ import (
2828
cached "k8s.io/client-go/discovery/cached"
2929
"k8s.io/client-go/rest"
3030
"k8s.io/client-go/tools/clientcmd"
31+
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
3132
"sigs.k8s.io/controller-runtime/pkg/manager"
3233
)
3334

34-
// NewFromManager returns a Kubernetes client that can be used with
35-
// a Tiller server.
36-
func NewFromManager(mgr manager.Manager) (*kube.Client, error) {
37-
c, err := NewRESTClientGetter(mgr)
38-
if err != nil {
39-
return nil, err
40-
}
41-
return kube.New(c), nil
42-
}
43-
4435
var _ genericclioptions.RESTClientGetter = &restClientGetter{}
4536

4637
type restClientGetter struct {
4738
restConfig *rest.Config
4839
discoveryClient discovery.CachedDiscoveryInterface
4940
restMapper meta.RESTMapper
41+
namespaceConfig clientcmd.ClientConfig
5042
}
5143

5244
func (c *restClientGetter) ToRESTConfig() (*rest.Config, error) {
@@ -62,10 +54,32 @@ func (c *restClientGetter) ToRESTMapper() (meta.RESTMapper, error) {
6254
}
6355

6456
func (c *restClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig {
57+
return c.namespaceConfig
58+
}
59+
60+
var _ clientcmd.ClientConfig = &namespaceClientConfig{}
61+
62+
type namespaceClientConfig struct {
63+
namespace string
64+
}
65+
66+
func (c namespaceClientConfig) RawConfig() (clientcmdapi.Config, error) {
67+
return clientcmdapi.Config{}, nil
68+
}
69+
70+
func (c namespaceClientConfig) ClientConfig() (*rest.Config, error) {
71+
return nil, nil
72+
}
73+
74+
func (c namespaceClientConfig) Namespace() (string, bool, error) {
75+
return c.namespace, false, nil
76+
}
77+
78+
func (c namespaceClientConfig) ConfigAccess() clientcmd.ConfigAccess {
6579
return nil
6680
}
6781

68-
func NewRESTClientGetter(mgr manager.Manager) (genericclioptions.RESTClientGetter, error) {
82+
func NewRESTClientGetter(mgr manager.Manager, ns string) (genericclioptions.RESTClientGetter, error) {
6983
cfg := mgr.GetConfig()
7084
dc, err := discovery.NewDiscoveryClientForConfig(cfg)
7185
if err != nil {
@@ -78,6 +92,7 @@ func NewRESTClientGetter(mgr manager.Manager) (genericclioptions.RESTClientGette
7892
restConfig: cfg,
7993
discoveryClient: cdc,
8094
restMapper: rm,
95+
namespaceConfig: &namespaceClientConfig{ns},
8196
}, nil
8297
}
8398

pkg/helm/release/manager_factory.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,11 +77,11 @@ func (f managerFactory) NewManager(cr *unstructured.Unstructured, overrideValues
7777

7878
// Get the necessary clients and client getters. Use a client that injects the CR
7979
// as an owner reference into all resources templated by the chart.
80-
rcg, err := client.NewRESTClientGetter(f.mgr)
80+
rcg, err := client.NewRESTClientGetter(f.mgr, cr.GetNamespace())
8181
if err != nil {
8282
return nil, fmt.Errorf("failed to get REST client getter from manager: %w", err)
8383
}
84-
kubeClient := kube.New(nil)
84+
kubeClient := kube.New(rcg)
8585
ownerRef := metav1.NewControllerRef(cr, cr.GroupVersionKind())
8686
ownerRefClient := client.NewOwnerRefInjectingClient(*kubeClient, *ownerRef)
8787

0 commit comments

Comments
 (0)