Skip to content

Commit b4edc84

Browse files
Merge pull request #3637 from Azure/ARO-20543-user-provided-pull-secret-new
Test to confirm HCP cluster can accept and use user provided pull secrets
2 parents 0b37186 + 983d381 commit b4edc84

14 files changed

+912
-0
lines changed

test/e2e/cluster_pullsecret.go

Lines changed: 367 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,367 @@
1+
// Copyright 2025 Microsoft Corporation
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
package e2e
16+
17+
import (
18+
"context"
19+
"encoding/base64"
20+
"encoding/json"
21+
"fmt"
22+
"os"
23+
"path/filepath"
24+
"time"
25+
26+
. "github.com/onsi/ginkgo/v2"
27+
. "github.com/onsi/gomega"
28+
29+
corev1 "k8s.io/api/core/v1"
30+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31+
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
32+
"k8s.io/apimachinery/pkg/runtime/schema"
33+
"k8s.io/client-go/dynamic"
34+
"k8s.io/client-go/kubernetes"
35+
36+
"github.com/Azure/ARO-HCP/test/util/framework"
37+
"github.com/Azure/ARO-HCP/test/util/labels"
38+
"github.com/Azure/ARO-HCP/test/util/verifiers"
39+
)
40+
41+
var _ = Describe("Customer", func() {
42+
BeforeEach(func() {
43+
// per test initialization
44+
})
45+
46+
It("should be able to create an HCP cluster and manage pull secrets",
47+
labels.RequireNothing,
48+
labels.Critical,
49+
labels.Positive,
50+
labels.AroRpApiCompatible,
51+
func(ctx context.Context) {
52+
const (
53+
customerClusterName = "pullsecret-hcp-cluster"
54+
testPullSecretHost = "host.example.com"
55+
testPullSecretPassword = "my_password"
56+
testPullSecretEmail = "noreply@example.com"
57+
pullSecretName = "additional-pull-secret"
58+
pullSecretNamespace = "kube-system"
59+
)
60+
tc := framework.NewTestContext()
61+
62+
By("checking pull secret file exists")
63+
pullSecretFilePath := filepath.Join(tc.PullSecretPath(), "pull-secret")
64+
if _, err := os.Stat(pullSecretFilePath); os.IsNotExist(err) {
65+
Skip(fmt.Sprintf("Pull secret file not found at %s, skipping test", pullSecretFilePath))
66+
}
67+
68+
if tc.UsePooledIdentities() {
69+
err := tc.AssignIdentityContainers(ctx, 1, 60*time.Second)
70+
Expect(err).NotTo(HaveOccurred())
71+
}
72+
73+
By("creating a resource group")
74+
resourceGroup, err := tc.NewResourceGroup(ctx, "pullsecret-test", tc.Location())
75+
Expect(err).NotTo(HaveOccurred())
76+
77+
By("creating cluster parameters")
78+
clusterParams := framework.NewDefaultClusterParams()
79+
clusterParams.ClusterName = customerClusterName
80+
managedResourceGroupName := framework.SuffixName(*resourceGroup.Name, "-managed", 64)
81+
clusterParams.ManagedResourceGroupName = managedResourceGroupName
82+
83+
By("creating customer resources")
84+
clusterParams, err = tc.CreateClusterCustomerResources(ctx,
85+
resourceGroup,
86+
clusterParams,
87+
map[string]interface{}{},
88+
TestArtifactsFS,
89+
)
90+
Expect(err).NotTo(HaveOccurred())
91+
92+
By("Creating the cluster")
93+
err = tc.CreateHCPClusterFromParam(ctx,
94+
GinkgoLogr,
95+
*resourceGroup.Name,
96+
clusterParams,
97+
45*time.Minute,
98+
)
99+
Expect(err).NotTo(HaveOccurred())
100+
By("Creating the node pool")
101+
nodePoolParams := framework.NewDefaultNodePoolParams()
102+
nodePoolParams.NodePoolName = "np-1"
103+
nodePoolParams.ClusterName = customerClusterName
104+
nodePoolParams.Replicas = int32(2)
105+
err = tc.CreateNodePoolFromParam(ctx,
106+
*resourceGroup.Name,
107+
customerClusterName,
108+
nodePoolParams,
109+
15*time.Minute,
110+
)
111+
Expect(err).NotTo(HaveOccurred())
112+
113+
By("getting credentials")
114+
adminRESTConfig, err := tc.GetAdminRESTConfigForHCPCluster(
115+
ctx,
116+
tc.Get20240610ClientFactoryOrDie(ctx).NewHcpOpenShiftClustersClient(),
117+
*resourceGroup.Name,
118+
customerClusterName,
119+
10*time.Minute,
120+
)
121+
Expect(err).NotTo(HaveOccurred())
122+
123+
By("ensuring the cluster is viable")
124+
err = verifiers.VerifyHCPCluster(ctx, adminRESTConfig)
125+
Expect(err).NotTo(HaveOccurred())
126+
127+
By("creating kubernetes client")
128+
kubeClient, err := kubernetes.NewForConfig(adminRESTConfig)
129+
Expect(err).NotTo(HaveOccurred())
130+
131+
By("creating test pull secret")
132+
username := "test-user"
133+
auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + testPullSecretPassword))
134+
135+
testPullSecret, err := framework.CreateTestDockerConfigSecret(
136+
testPullSecretHost,
137+
username,
138+
testPullSecretPassword,
139+
testPullSecretEmail,
140+
pullSecretName,
141+
pullSecretNamespace,
142+
)
143+
Expect(err).NotTo(HaveOccurred())
144+
145+
By("creating the test pull secret in the cluster")
146+
_, err = kubeClient.CoreV1().Secrets(pullSecretNamespace).Create(ctx, testPullSecret, metav1.CreateOptions{})
147+
Expect(err).NotTo(HaveOccurred())
148+
149+
By("waiting for HCCO to merge the additional pull secret with the global pull secret")
150+
verifier := verifiers.VerifyPullSecretMergedIntoGlobal(testPullSecretHost)
151+
Eventually(func() error {
152+
err := verifier.Verify(ctx, adminRESTConfig)
153+
if err != nil {
154+
GinkgoLogr.Info("Verifier check", "name", verifier.Name(), "status", "failed", "error", err.Error())
155+
}
156+
return err
157+
}, 5*time.Minute, 15*time.Second).Should(Succeed(), "additional pull secret should be merged into global-pull-secret by HCCO")
158+
159+
By("verifying the DaemonSet for global pull secret synchronization is created")
160+
verifier = verifiers.VerifyGlobalPullSecretSyncer()
161+
Eventually(func() error {
162+
err := verifier.Verify(ctx, adminRESTConfig)
163+
if err != nil {
164+
GinkgoLogr.Info("Verifier check", "name", verifier.Name(), "status", "failed", "error", err.Error())
165+
}
166+
return err
167+
}, 1*time.Minute, 10*time.Second).Should(Succeed(), "global-pull-secret-syncer DaemonSet should be created")
168+
169+
By("verifying the pull secret was merged into the global pull secret")
170+
err = verifiers.VerifyPullSecretAuthData(
171+
"global-pull-secret",
172+
pullSecretNamespace,
173+
testPullSecretHost,
174+
auth,
175+
testPullSecretEmail,
176+
).Verify(ctx, adminRESTConfig)
177+
Expect(err).NotTo(HaveOccurred())
178+
179+
By("reading pull-secret file from aro-hcp-qe-pull-secret directory")
180+
pullSecretFileData, err := os.ReadFile(pullSecretFilePath)
181+
Expect(err).NotTo(HaveOccurred(), "failed to read pull-secret file from %s", pullSecretFilePath)
182+
183+
By("parsing pull-secret file")
184+
var pullSecretConfig framework.DockerConfigJSON
185+
err = json.Unmarshal(pullSecretFileData, &pullSecretConfig)
186+
Expect(err).NotTo(HaveOccurred(), "failed to parse pull-secret file")
187+
188+
By("extracting registry.redhat.io credentials")
189+
const redhatRegistryHost = "registry.redhat.io"
190+
redhatRegistryAuth, ok := pullSecretConfig.Auths[redhatRegistryHost]
191+
Expect(ok).To(BeTrue(), "registry.redhat.io credentials not found in pull-secret file")
192+
193+
redhatRegistryAuthString := redhatRegistryAuth.Auth
194+
redhatRegistryEmail := redhatRegistryAuth.Email
195+
196+
By("updating additional-pull-secret to add registry.redhat.io credentials")
197+
// Get the current additional-pull-secret
198+
currentSecret, err := kubeClient.CoreV1().Secrets(pullSecretNamespace).Get(ctx, pullSecretName, metav1.GetOptions{})
199+
Expect(err).NotTo(HaveOccurred(), "failed to get existing additional-pull-secret")
200+
201+
// Parse the current dockerconfigjson
202+
var currentConfig framework.DockerConfigJSON
203+
err = json.Unmarshal(currentSecret.Data[corev1.DockerConfigJsonKey], &currentConfig)
204+
Expect(err).NotTo(HaveOccurred(), "failed to parse current pull secret")
205+
206+
// Add registry.redhat.io credentials to the existing auths
207+
currentConfig.Auths[redhatRegistryHost] = framework.RegistryAuth{
208+
Auth: redhatRegistryAuthString,
209+
Email: redhatRegistryEmail,
210+
}
211+
212+
// Marshal back to JSON
213+
updatedDockerConfigJSON, err := json.Marshal(currentConfig)
214+
Expect(err).NotTo(HaveOccurred())
215+
216+
// Update the secret
217+
currentSecret.Data[corev1.DockerConfigJsonKey] = updatedDockerConfigJSON
218+
_, err = kubeClient.CoreV1().Secrets(pullSecretNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{})
219+
Expect(err).NotTo(HaveOccurred())
220+
221+
By("waiting for HCCO to merge the updated pull secret (with registry.redhat.io) into global pull secret")
222+
verifier = verifiers.VerifyPullSecretMergedIntoGlobal(redhatRegistryHost)
223+
Eventually(func() error {
224+
err := verifier.Verify(ctx, adminRESTConfig)
225+
if err != nil {
226+
GinkgoLogr.Info("Verifier check", "name", verifier.Name(), "status", "failed", "error", err.Error())
227+
}
228+
return err
229+
}, 5*time.Minute, 15*time.Second).Should(Succeed(), "registry.redhat.io pull secret should be merged into global-pull-secret by HCCO")
230+
231+
By("verifying both test registries are now in the global pull secret")
232+
err = verifiers.VerifyPullSecretMergedIntoGlobal(testPullSecretHost).Verify(ctx, adminRESTConfig)
233+
Expect(err).NotTo(HaveOccurred(), "host.example.com should still be in global-pull-secret")
234+
235+
err = verifiers.VerifyPullSecretAuthData(
236+
"global-pull-secret",
237+
pullSecretNamespace,
238+
redhatRegistryHost,
239+
redhatRegistryAuthString,
240+
redhatRegistryEmail,
241+
).Verify(ctx, adminRESTConfig)
242+
Expect(err).NotTo(HaveOccurred())
243+
244+
By("creating dynamic client for operator installation")
245+
dynamicClient, err := dynamic.NewForConfig(adminRESTConfig)
246+
Expect(err).NotTo(HaveOccurred())
247+
248+
By("creating namespace for NFD operator")
249+
const nfdNamespace = "openshift-nfd"
250+
_, err = kubeClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
251+
ObjectMeta: metav1.ObjectMeta{
252+
Name: nfdNamespace,
253+
},
254+
}, metav1.CreateOptions{})
255+
Expect(err).NotTo(HaveOccurred())
256+
257+
By("creating OperatorGroup for NFD operator")
258+
operatorGroupGVR := schema.GroupVersionResource{
259+
Group: "operators.coreos.com",
260+
Version: "v1",
261+
Resource: "operatorgroups",
262+
}
263+
operatorGroup := &unstructured.Unstructured{
264+
Object: map[string]interface{}{
265+
"apiVersion": "operators.coreos.com/v1",
266+
"kind": "OperatorGroup",
267+
"metadata": map[string]interface{}{
268+
"name": "nfd-operator-group",
269+
"namespace": nfdNamespace,
270+
},
271+
"spec": map[string]interface{}{
272+
"targetNamespaces": []interface{}{nfdNamespace},
273+
},
274+
},
275+
}
276+
_, err = dynamicClient.Resource(operatorGroupGVR).Namespace(nfdNamespace).Create(ctx, operatorGroup, metav1.CreateOptions{})
277+
Expect(err).NotTo(HaveOccurred())
278+
279+
By("creating Subscription for NFD operator from redhat-operators catalog")
280+
subscriptionGVR := schema.GroupVersionResource{
281+
Group: "operators.coreos.com",
282+
Version: "v1alpha1",
283+
Resource: "subscriptions",
284+
}
285+
subscription := &unstructured.Unstructured{
286+
Object: map[string]interface{}{
287+
"apiVersion": "operators.coreos.com/v1alpha1",
288+
"kind": "Subscription",
289+
"metadata": map[string]interface{}{
290+
"name": "nfd",
291+
"namespace": nfdNamespace,
292+
},
293+
"spec": map[string]interface{}{
294+
"channel": "stable",
295+
"name": "nfd",
296+
"source": "redhat-operators",
297+
"sourceNamespace": "openshift-marketplace",
298+
"installPlanApproval": "Automatic",
299+
},
300+
},
301+
}
302+
_, err = dynamicClient.Resource(subscriptionGVR).Namespace(nfdNamespace).Create(ctx, subscription, metav1.CreateOptions{})
303+
Expect(err).NotTo(HaveOccurred())
304+
305+
By("waiting for NFD operator to be installed")
306+
verifier = verifiers.VerifyOperatorInstalled(nfdNamespace, "nfd")
307+
Eventually(func() error {
308+
err := verifier.Verify(ctx, adminRESTConfig)
309+
if err != nil {
310+
GinkgoLogr.Info("Verifier check", "name", verifier.Name(), "status", "failed", "error", err.Error())
311+
}
312+
return err
313+
}, 5*time.Minute, 15*time.Second).Should(Succeed(), "NFD operator should be installed successfully")
314+
315+
By("creating NodeFeatureDiscovery CR to deploy NFD worker")
316+
nfdGVR := schema.GroupVersionResource{
317+
Group: "nfd.openshift.io",
318+
Version: "v1",
319+
Resource: "nodefeaturediscoveries",
320+
}
321+
nfdCR := &unstructured.Unstructured{
322+
Object: map[string]interface{}{
323+
"apiVersion": "nfd.openshift.io/v1",
324+
"kind": "NodeFeatureDiscovery",
325+
"metadata": map[string]interface{}{
326+
"name": "nfd-instance",
327+
"namespace": nfdNamespace,
328+
},
329+
"spec": map[string]interface{}{
330+
"operand": map[string]interface{}{
331+
"image": "registry.redhat.io/openshift4/ose-node-feature-discovery:latest",
332+
},
333+
},
334+
},
335+
}
336+
_, err = dynamicClient.Resource(nfdGVR).Namespace(nfdNamespace).Create(ctx, nfdCR, metav1.CreateOptions{})
337+
Expect(err).NotTo(HaveOccurred())
338+
339+
By("waiting for NFD worker DaemonSet to be created")
340+
Eventually(func() error {
341+
daemonSets, err := kubeClient.AppsV1().DaemonSets(nfdNamespace).List(ctx, metav1.ListOptions{})
342+
if err != nil {
343+
return err
344+
}
345+
for _, ds := range daemonSets.Items {
346+
if ds.Name == "nfd-worker" {
347+
if ds.Status.DesiredNumberScheduled > 0 && ds.Status.NumberReady > 0 {
348+
return nil
349+
}
350+
return fmt.Errorf("nfd-worker DaemonSet found but not ready: desired=%d, ready=%d",
351+
ds.Status.DesiredNumberScheduled, ds.Status.NumberReady)
352+
}
353+
}
354+
return fmt.Errorf("nfd-worker DaemonSet not found")
355+
}, 5*time.Minute, 15*time.Second).Should(Succeed(), "NFD worker DaemonSet should be created and have ready pods")
356+
357+
By("waiting for NFD worker pods to be created and verify images from registry.redhat.io can be pulled")
358+
verifier = verifiers.VerifyImagePulled(nfdNamespace, "registry.redhat.io", "ose-node-feature-discovery")
359+
Eventually(func() error {
360+
err := verifier.Verify(ctx, adminRESTConfig)
361+
if err != nil {
362+
GinkgoLogr.Info("Verifier check", "name", verifier.Name(), "status", "failed", "error", err.Error())
363+
}
364+
return err
365+
}, 5*time.Minute, 15*time.Second).Should(Succeed(), "NFD worker images from registry.redhat.io should be pulled successfully with the added pull secret")
366+
})
367+
})

test/testdata/zz_fixture_TestMainListSuitesForEachSuite_dev_cd_check_paralleldev_cd_check_parallel.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ Customer should be able to create node pools with ARM64-based VMs
44
Customer should be able to create an HCP cluster with custom autoscaling
55
Customer should be able to create a HCP cluster without CNI
66
Customer should be able to list HCP clusters without node pools at both subscription and resource group levels
7+
Customer should be able to create an HCP cluster and manage pull secrets
78
Update HCPOpenShiftCluster Positive creates a cluster and updates tags with a PATCH request
89
Customer should be able to create an HCP cluster with back-level version 4.19
910
Customer should be able to create several HCP clusters in their customer resource group, but not in the same managed resource group

test/testdata/zz_fixture_TestMainListSuitesForEachSuite_integration_parallelintegration_parallel.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ Customer should be able to create a HCP cluster without CNI
88
Customer should be able to create an HCP cluster and custom node pool osDisk size using bicep template
99
Customer should be able to list HCP clusters without node pools at both subscription and resource group levels
1010
Customer should not be able to reuse subnets and NSGs between clusters
11+
Customer should be able to create an HCP cluster and manage pull secrets
1112
Customer should create an HCP cluster and validate TLS certificates
1213
Update HCPOpenShiftCluster Negative creates a cluster and fails to update its name with a PATCH request
1314
Update HCPOpenShiftCluster Positive creates a cluster and updates tags with a PATCH request

test/testdata/zz_fixture_TestMainListSuitesForEachSuite_prod_parallelprod_parallel.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ Customer should be able to create a HCP cluster without CNI
77
Customer should be able to create an HCP cluster and custom node pool osDisk size using bicep template
88
Customer should be able to list HCP clusters without node pools at both subscription and resource group levels
99
Customer should not be able to reuse subnets and NSGs between clusters
10+
Customer should be able to create an HCP cluster and manage pull secrets
1011
Customer should create an HCP cluster and validate TLS certificates
1112
Update HCPOpenShiftCluster Negative creates a cluster and fails to update its name with a PATCH request
1213
Update HCPOpenShiftCluster Positive creates a cluster and updates tags with a PATCH request

test/testdata/zz_fixture_TestMainListSuitesForEachSuite_rp_api_compat_all_parallel_01rp_api_compat_all_parallel_development.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ Customer should not be able to create a 4.18 HCP cluster
66
Customer should be able to create a HCP cluster without CNI
77
Customer should be able to list HCP clusters without node pools at both subscription and resource group levels
88
Customer should not be able to reuse subnets and NSGs between clusters
9+
Customer should be able to create an HCP cluster and manage pull secrets
910
Update HCPOpenShiftCluster Positive creates a cluster and updates tags with a PATCH request
1011
Customer should be able to create an HCP cluster with back-level version 4.19
1112
Customer should be able to create several HCP clusters in their customer resource group, but not in the same managed resource group

0 commit comments

Comments
 (0)