Skip to content

Commit b6de613

Browse files
Merge pull request #29530 from QiWang19/imgpolicy-sigstore-verification
OCPNODE-2596: Add SigstoreImageVerification e2e tests
2 parents 1e0a837 + 029f94e commit b6de613

File tree

10 files changed

+525
-0
lines changed

10 files changed

+525
-0
lines changed
Lines changed: 238 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,238 @@
1+
package imagepolicy
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"path/filepath"
7+
"time"
8+
9+
g "github.com/onsi/ginkgo/v2"
10+
o "github.com/onsi/gomega"
11+
machineconfighelper "github.com/openshift/origin/test/extended/machine_config"
12+
exutil "github.com/openshift/origin/test/extended/util"
13+
kapiv1 "k8s.io/api/core/v1"
14+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
15+
"k8s.io/client-go/util/retry"
16+
e2e "k8s.io/kubernetes/test/e2e/framework"
17+
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
18+
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
19+
admissionapi "k8s.io/pod-security-admission/api"
20+
)
21+
22+
const (
23+
testReleaseImageScope = "quay.io/openshift-release-dev/ocp-release@sha256:fbad931c725b2e5b937b295b58345334322bdabb0b67da1c800a53686d7397da"
24+
testReferenceImageScope = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4db234f37ae6712e2f7ed8d13f7fb49971c173d0e4f74613d0121672fa2e01f5"
25+
registriesWorkerPoolMachineConfig = "99-worker-generated-registries"
26+
registriesMasterPoolMachineConfig = "99-master-generated-registries"
27+
testPodName = "signature-validation-test-pod"
28+
workerPool = "worker"
29+
masterPool = "master"
30+
SignatureValidationFaildReason = "SignatureValidationFailed"
31+
)
32+
33+
var _ = g.Describe("[sig-imagepolicy][OCPFeatureGate:SigstoreImageVerification][Serial]", g.Ordered, func() {
34+
defer g.GinkgoRecover()
35+
var (
36+
oc = exutil.NewCLIWithoutNamespace("cluster-image-policy")
37+
tctx = context.Background()
38+
cli = exutil.NewCLIWithPodSecurityLevel("verifysigstore-e2e", admissionapi.LevelBaseline)
39+
clif = cli.KubeFramework()
40+
imgpolicyCli = exutil.NewCLIWithPodSecurityLevel("verifysigstore-imagepolicy-e2e", admissionapi.LevelBaseline)
41+
imgpolicyClif = imgpolicyCli.KubeFramework()
42+
imagePolicyBaseDir = exutil.FixturePath("testdata", "imagepolicy")
43+
invalidPublicKeyClusterImagePolicyFixture = filepath.Join(imagePolicyBaseDir, "invalid-public-key-cluster-image-policy.yaml")
44+
publiKeyRekorClusterImagePolicyFixture = filepath.Join(imagePolicyBaseDir, "public-key-rekor-cluster-image-policy.yaml")
45+
invalidPublicKeyImagePolicyFixture = filepath.Join(imagePolicyBaseDir, "invalid-public-key-image-policy.yaml")
46+
publiKeyRekorImagePolicyFixture = filepath.Join(imagePolicyBaseDir, "public-key-rekor-image-policy.yaml")
47+
)
48+
49+
g.BeforeAll(func() {
50+
if !exutil.IsTechPreviewNoUpgrade(tctx, oc.AdminConfigClient()) {
51+
g.Skip("skipping, this feature is only supported on TechPreviewNoUpgrade clusters")
52+
}
53+
54+
outStr, err := oc.Run("adm", "release", "info", testReleaseImageScope).Args("-o=go-template", "--template={{.digest}}").Output()
55+
if err != nil || outStr == "" {
56+
o.Expect(err).ToNot(o.HaveOccurred())
57+
e2eskipper.Skipf("can't validate %s release image for testing, consider updating the test", testReleaseImageScope)
58+
}
59+
})
60+
61+
g.It("Should fail clusterimagepolicy signature validation root of trust does not match the identity in the signature", func() {
62+
createClusterImagePolicy(oc, invalidPublicKeyClusterImagePolicyFixture)
63+
g.DeferCleanup(deleteClusterImagePolicy, oc, invalidPublicKeyClusterImagePolicyFixture)
64+
65+
pod, err := launchTestPod(tctx, clif, testPodName, testReleaseImageScope)
66+
o.Expect(err).NotTo(o.HaveOccurred())
67+
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)
68+
69+
err = waitForTestPodContainerToFailSignatureValidation(tctx, clif, pod)
70+
o.Expect(err).NotTo(o.HaveOccurred())
71+
})
72+
73+
g.It("Should fail clusterimagepolicy signature validation when scope in allowedRegistries list does not skip signature verification", func() {
74+
// Ensure allowedRegistries do not skip signature verification by adding testReleaseImageScope to the list
75+
allowedRegistries := []string{"quay.io", "registry.redhat.io", "image-registry.openshift-image-registry.svc:5000", testReleaseImageScope}
76+
updateImageConfig(oc, allowedRegistries)
77+
g.DeferCleanup(cleanupImageConfig, oc)
78+
79+
createClusterImagePolicy(oc, invalidPublicKeyClusterImagePolicyFixture)
80+
g.DeferCleanup(deleteClusterImagePolicy, oc, invalidPublicKeyClusterImagePolicyFixture)
81+
82+
pod, err := launchTestPod(tctx, clif, testPodName, testReleaseImageScope)
83+
o.Expect(err).NotTo(o.HaveOccurred())
84+
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)
85+
86+
err = waitForTestPodContainerToFailSignatureValidation(tctx, clif, pod)
87+
o.Expect(err).NotTo(o.HaveOccurred())
88+
})
89+
90+
g.It("Should pass clusterimagepolicy signature validation with signed image", func() {
91+
createClusterImagePolicy(oc, publiKeyRekorClusterImagePolicyFixture)
92+
g.DeferCleanup(deleteClusterImagePolicy, oc, publiKeyRekorClusterImagePolicyFixture)
93+
94+
pod, err := launchTestPod(tctx, clif, testPodName, testReleaseImageScope)
95+
o.Expect(err).NotTo(o.HaveOccurred())
96+
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)
97+
98+
err = e2epod.WaitForPodSuccessInNamespace(tctx, clif.ClientSet, pod.Name, pod.Namespace)
99+
o.Expect(err).NotTo(o.HaveOccurred())
100+
})
101+
102+
g.It("Should fail imagepolicy signature validation in different namespaces root of trust does not match the identity in the signature", func() {
103+
createImagePolicy(oc, invalidPublicKeyImagePolicyFixture, imgpolicyClif.Namespace.Name)
104+
g.DeferCleanup(deleteImagePolicy, oc, invalidPublicKeyImagePolicyFixture, imgpolicyClif.Namespace.Name)
105+
106+
createImagePolicy(oc, invalidPublicKeyImagePolicyFixture, clif.Namespace.Name)
107+
g.DeferCleanup(deleteImagePolicy, oc, invalidPublicKeyImagePolicyFixture, clif.Namespace.Name)
108+
109+
pod, err := launchTestPod(tctx, imgpolicyClif, testPodName, testReferenceImageScope)
110+
o.Expect(err).NotTo(o.HaveOccurred())
111+
g.DeferCleanup(deleteTestPod, tctx, imgpolicyClif, testPodName)
112+
113+
err = waitForTestPodContainerToFailSignatureValidation(tctx, imgpolicyClif, pod)
114+
o.Expect(err).NotTo(o.HaveOccurred())
115+
116+
pod, err = launchTestPod(tctx, clif, testPodName, testReferenceImageScope)
117+
o.Expect(err).NotTo(o.HaveOccurred())
118+
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)
119+
120+
err = waitForTestPodContainerToFailSignatureValidation(tctx, clif, pod)
121+
o.Expect(err).NotTo(o.HaveOccurred())
122+
})
123+
124+
g.It("Should pass imagepolicy signature validation with signed image in namespaces", func() {
125+
createImagePolicy(oc, publiKeyRekorImagePolicyFixture, clif.Namespace.Name)
126+
g.DeferCleanup(deleteImagePolicy, oc, publiKeyRekorImagePolicyFixture, clif.Namespace.Name)
127+
128+
createImagePolicy(oc, publiKeyRekorImagePolicyFixture, imgpolicyClif.Namespace.Name)
129+
g.DeferCleanup(deleteImagePolicy, oc, publiKeyRekorImagePolicyFixture, imgpolicyClif.Namespace.Name)
130+
131+
pod, err := launchTestPod(tctx, clif, testPodName, testReferenceImageScope)
132+
o.Expect(err).NotTo(o.HaveOccurred())
133+
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)
134+
135+
err = e2epod.WaitForPodSuccessInNamespace(tctx, clif.ClientSet, pod.Name, pod.Namespace)
136+
o.Expect(err).NotTo(o.HaveOccurred())
137+
138+
pod, err = launchTestPod(tctx, imgpolicyClif, testPodName, testReferenceImageScope)
139+
o.Expect(err).NotTo(o.HaveOccurred())
140+
g.DeferCleanup(deleteTestPod, tctx, imgpolicyClif, testPodName)
141+
142+
err = e2epod.WaitForPodSuccessInNamespace(tctx, imgpolicyClif.ClientSet, pod.Name, pod.Namespace)
143+
o.Expect(err).NotTo(o.HaveOccurred())
144+
})
145+
})
146+
147+
func createClusterImagePolicy(oc *exutil.CLI, fixture string) {
148+
err := oc.Run("create").Args("-f", fixture).Execute()
149+
o.Expect(err).NotTo(o.HaveOccurred())
150+
time.Sleep(10 * time.Second)
151+
machineconfighelper.WaitForConfigAndPoolComplete(oc, workerPool, registriesWorkerPoolMachineConfig)
152+
machineconfighelper.WaitForConfigAndPoolComplete(oc, masterPool, registriesMasterPoolMachineConfig)
153+
}
154+
155+
func deleteClusterImagePolicy(oc *exutil.CLI, fixture string) error {
156+
return oc.Run("delete").Args("-f", fixture).Execute()
157+
}
158+
159+
func createImagePolicy(oc *exutil.CLI, fixture string, namespace string) {
160+
err := oc.Run("create").Args("-f", fixture, "-n", namespace).Execute()
161+
o.Expect(err).NotTo(o.HaveOccurred())
162+
time.Sleep(10 * time.Second)
163+
machineconfighelper.WaitForConfigAndPoolComplete(oc, workerPool, registriesWorkerPoolMachineConfig)
164+
machineconfighelper.WaitForConfigAndPoolComplete(oc, masterPool, registriesMasterPoolMachineConfig)
165+
}
166+
167+
func deleteImagePolicy(oc *exutil.CLI, fixture string, namespace string) error {
168+
return oc.Run("delete").Args("-f", fixture, "-n", namespace).Execute()
169+
}
170+
171+
func updateImageConfig(oc *exutil.CLI, allowedRegistries []string) {
172+
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
173+
imageConfig, err := oc.AdminConfigClient().ConfigV1().Images().Get(
174+
context.Background(), "cluster", metav1.GetOptions{},
175+
)
176+
if err != nil {
177+
return err
178+
}
179+
imageConfig.Spec.RegistrySources.AllowedRegistries = allowedRegistries
180+
_, err = oc.AdminConfigClient().ConfigV1().Images().Update(
181+
context.Background(), imageConfig, metav1.UpdateOptions{},
182+
)
183+
return err
184+
})
185+
o.Expect(err).NotTo(o.HaveOccurred(), "error updating image config")
186+
time.Sleep(10 * time.Second)
187+
machineconfighelper.WaitForConfigAndPoolComplete(oc, workerPool, registriesWorkerPoolMachineConfig)
188+
machineconfighelper.WaitForConfigAndPoolComplete(oc, masterPool, registriesMasterPoolMachineConfig)
189+
}
190+
191+
func cleanupImageConfig(oc *exutil.CLI) error {
192+
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
193+
imageConfig, err := oc.AdminConfigClient().ConfigV1().Images().Get(
194+
context.Background(), "cluster", metav1.GetOptions{},
195+
)
196+
if err != nil {
197+
return err
198+
}
199+
imageConfig.Spec.RegistrySources.AllowedRegistries = []string{}
200+
_, err = oc.AdminConfigClient().ConfigV1().Images().Update(
201+
context.Background(), imageConfig, metav1.UpdateOptions{},
202+
)
203+
return err
204+
})
205+
}
206+
207+
func launchTestPod(ctx context.Context, f *e2e.Framework, podName, image string) (*kapiv1.Pod, error) {
208+
g.By(fmt.Sprintf("launching the pod: %s", podName))
209+
contName := fmt.Sprintf("%s-container", podName)
210+
pod := &kapiv1.Pod{
211+
TypeMeta: metav1.TypeMeta{
212+
Kind: "Pod",
213+
},
214+
ObjectMeta: metav1.ObjectMeta{
215+
Name: podName,
216+
},
217+
Spec: kapiv1.PodSpec{
218+
Containers: []kapiv1.Container{
219+
{
220+
Name: contName,
221+
Image: image,
222+
ImagePullPolicy: kapiv1.PullAlways,
223+
},
224+
},
225+
RestartPolicy: kapiv1.RestartPolicyNever,
226+
},
227+
}
228+
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
229+
return pod, err
230+
}
231+
232+
func deleteTestPod(ctx context.Context, f *e2e.Framework, podName string) error {
233+
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0))
234+
}
235+
236+
func waitForTestPodContainerToFailSignatureValidation(ctx context.Context, f *e2e.Framework, pod *kapiv1.Pod) error {
237+
return e2epod.WaitForPodContainerToFail(ctx, f.ClientSet, pod.Namespace, pod.Name, 0, SignatureValidationFaildReason, e2e.PodStartShortTimeout)
238+
}

test/extended/include.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ import (
3232
_ "github.com/openshift/origin/test/extended/idling"
3333
_ "github.com/openshift/origin/test/extended/image_ecosystem"
3434
_ "github.com/openshift/origin/test/extended/imageapis"
35+
_ "github.com/openshift/origin/test/extended/imagepolicy"
3536
_ "github.com/openshift/origin/test/extended/images"
3637
_ "github.com/openshift/origin/test/extended/images/trigger"
3738
_ "github.com/openshift/origin/test/extended/kernel"

test/extended/machine_config/helpers.go

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -292,3 +292,82 @@ func WaitForOneMasterNodeToBeReady(oc *exutil.CLI) error {
292292
}, 5*time.Minute, 10*time.Second).Should(o.BeTrue())
293293
return nil
294294
}
295+
296+
// WaitForConfigAndPoolComplete is a helper function that gets a renderedConfig and waits for its pool to complete.
297+
// The return value is the final rendered config.
298+
func WaitForConfigAndPoolComplete(oc *exutil.CLI, pool, mcName string) string {
299+
config, err := WaitForRenderedConfig(oc, pool, mcName)
300+
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v: failed to render machine config %s from pool %s", err, mcName, pool))
301+
302+
err = WaitForPoolComplete(oc, pool, config)
303+
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v: pool %s did not update to config %s", err, pool, config))
304+
return config
305+
}
306+
307+
// WaitForRenderedConfig polls a MachineConfigPool until it has
308+
// included the given mcName in its config, and returns the new
309+
// rendered config name.
310+
func WaitForRenderedConfig(oc *exutil.CLI, pool, mcName string) (string, error) {
311+
return WaitForRenderedConfigs(oc, pool, mcName)
312+
}
313+
314+
// WaitForRenderedConfigs polls a MachineConfigPool until it has
315+
// included the given mcNames in its config, and returns the new
316+
// rendered config name.
317+
func WaitForRenderedConfigs(oc *exutil.CLI, pool string, mcNames ...string) (string, error) {
318+
var renderedConfig string
319+
machineConfigClient, err := machineconfigclient.NewForConfig(oc.KubeFramework().ClientConfig())
320+
o.Expect(err).NotTo(o.HaveOccurred())
321+
found := make(map[string]bool)
322+
o.Eventually(func() bool {
323+
// Set up the list
324+
for _, name := range mcNames {
325+
found[name] = false
326+
}
327+
328+
// Update found based on the MCP
329+
mcp, err := machineConfigClient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pool, metav1.GetOptions{})
330+
if err != nil {
331+
return false
332+
}
333+
for _, mc := range mcp.Spec.Configuration.Source {
334+
if _, ok := found[mc.Name]; ok {
335+
found[mc.Name] = true
336+
}
337+
}
338+
339+
// If any are still false, then they weren't included in the MCP
340+
for _, nameFound := range found {
341+
if !nameFound {
342+
return false
343+
}
344+
}
345+
346+
// All the required names were found
347+
renderedConfig = mcp.Spec.Configuration.Name
348+
return true
349+
}, 5*time.Minute, 10*time.Second).Should(o.BeTrue())
350+
return renderedConfig, nil
351+
}
352+
353+
// WaitForPoolComplete polls a pool until it has completed an update to target
354+
func WaitForPoolComplete(oc *exutil.CLI, pool, target string) error {
355+
machineConfigClient, err := machineconfigclient.NewForConfig(oc.KubeFramework().ClientConfig())
356+
o.Expect(err).NotTo(o.HaveOccurred())
357+
framework.Logf("Waiting for pool %s to complete %s", pool, target)
358+
o.Eventually(func() bool {
359+
mcp, err := machineConfigClient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pool, metav1.GetOptions{})
360+
if err != nil {
361+
framework.Logf("Failed to grab machineconfigpools, error :%v", err)
362+
return false
363+
}
364+
if mcp.Status.Configuration.Name != target {
365+
return false
366+
}
367+
if IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolUpdated) {
368+
return true
369+
}
370+
return false
371+
}, 20*time.Minute, 10*time.Second).Should(o.BeTrue())
372+
return nil
373+
}

0 commit comments

Comments
 (0)