Skip to content

Commit 0912981

Browse files
Merge pull request #29652 from QiWang19/imgsig-events
OCPNODE-2999: Add SigstoreImageVerification e2e tests
2 parents ce68c31 + f15f8c3 commit 0912981

File tree

11 files changed

+582
-0
lines changed

11 files changed

+582
-0
lines changed

pkg/monitortestlibrary/pathologicaleventlibrary/duplicated_event_patterns.go

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -487,6 +487,12 @@ func NewUniversalPathologicalEventMatchers(kubeConfig *rest.Config, finalInterva
487487
vsphereConfigurationTestsRollOutTooOftenMatcher := newVsphereConfigurationTestsRollOutTooOftenEventMatcher(finalIntervals)
488488
registry.AddPathologicalEventMatcherOrDie(vsphereConfigurationTestsRollOutTooOftenMatcher)
489489

490+
newDeferringOperatorNodeUpdateTooOftenEventMatcher := newDeferringOperatorNodeUpdateTooOftenEventMatcher(finalIntervals)
491+
registry.AddPathologicalEventMatcherOrDie(newDeferringOperatorNodeUpdateTooOftenEventMatcher)
492+
493+
newCrioReloadedTooOftenEventMatcher := newCrioReloadedTooOftenEventMatcher(finalIntervals)
494+
registry.AddPathologicalEventMatcherOrDie(newCrioReloadedTooOftenEventMatcher)
495+
490496
return registry
491497
}
492498

@@ -1061,3 +1067,44 @@ func newSingleNodeKubeAPIProgressingEventMatcher(finalIntervals monitorapi.Inter
10611067
allowIfWithinIntervals: ocpKubeAPIServerProgressingInterval,
10621068
}
10631069
}
1070+
1071+
func newDeferringOperatorNodeUpdateTooOftenEventMatcher(finalIntervals monitorapi.Intervals) EventMatcher {
1072+
DeferringOperatorNodeUpdateIntervals := finalIntervals.Filter(func(eventInterval monitorapi.Interval) bool {
1073+
return eventInterval.Source == monitorapi.SourceE2ETest &&
1074+
strings.Contains(eventInterval.Locator.Keys[monitorapi.LocatorE2ETestKey], "imagepolicy signature validation")
1075+
})
1076+
for i := range DeferringOperatorNodeUpdateIntervals {
1077+
DeferringOperatorNodeUpdateIntervals[i].To = DeferringOperatorNodeUpdateIntervals[i].To.Add(time.Minute * 2)
1078+
DeferringOperatorNodeUpdateIntervals[i].From = DeferringOperatorNodeUpdateIntervals[i].From.Add(time.Minute * -2)
1079+
}
1080+
1081+
return &OverlapOtherIntervalsPathologicalEventMatcher{
1082+
delegate: &SimplePathologicalEventMatcher{
1083+
name: "DeferringOperatorNodeUpdateTooOften",
1084+
messageReasonRegex: regexp.MustCompile(`^DeferringOperatorNodeUpdate$`),
1085+
jira: "https://issues.redhat.com/browse/OCPBUGS-52260",
1086+
},
1087+
allowIfWithinIntervals: DeferringOperatorNodeUpdateIntervals,
1088+
}
1089+
}
1090+
1091+
func newCrioReloadedTooOftenEventMatcher(finalInternals monitorapi.Intervals) EventMatcher {
1092+
crioReloadedIntervals := finalInternals.Filter(func(eventInterval monitorapi.Interval) bool {
1093+
return eventInterval.Source == monitorapi.SourceE2ETest &&
1094+
strings.Contains(eventInterval.Locator.Keys[monitorapi.LocatorE2ETestKey], "imagepolicy signature validation")
1095+
})
1096+
for i := range crioReloadedIntervals {
1097+
crioReloadedIntervals[i].To = crioReloadedIntervals[i].To.Add(time.Minute * 2)
1098+
crioReloadedIntervals[i].From = crioReloadedIntervals[i].From.Add(time.Minute * -2)
1099+
}
1100+
1101+
return &OverlapOtherIntervalsPathologicalEventMatcher{
1102+
delegate: &SimplePathologicalEventMatcher{
1103+
name: "CrioReloadedTooOften",
1104+
messageReasonRegex: regexp.MustCompile(`^ServiceReload$`),
1105+
messageHumanRegex: regexp.MustCompile(`Service crio.service was reloaded.`),
1106+
jira: "https://issues.redhat.com/browse/OCPBUGS-52260",
1107+
},
1108+
allowIfWithinIntervals: crioReloadedIntervals,
1109+
}
1110+
}
Lines changed: 248 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,248 @@
1+
package imagepolicy
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"path/filepath"
7+
"time"
8+
9+
g "github.com/onsi/ginkgo/v2"
10+
o "github.com/onsi/gomega"
11+
machineconfighelper "github.com/openshift/origin/test/extended/machine_config"
12+
exutil "github.com/openshift/origin/test/extended/util"
13+
kapiv1 "k8s.io/api/core/v1"
14+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
15+
"k8s.io/client-go/util/retry"
16+
e2e "k8s.io/kubernetes/test/e2e/framework"
17+
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
18+
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
19+
admissionapi "k8s.io/pod-security-admission/api"
20+
)
21+
22+
const (
23+
testReleaseImageScope = "quay.io/openshift-release-dev/ocp-release@sha256:fbad931c725b2e5b937b295b58345334322bdabb0b67da1c800a53686d7397da"
24+
testReferenceImageScope = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4db234f37ae6712e2f7ed8d13f7fb49971c173d0e4f74613d0121672fa2e01f5"
25+
registriesWorkerPoolMachineConfig = "99-worker-generated-registries"
26+
registriesMasterPoolMachineConfig = "99-master-generated-registries"
27+
testPodName = "signature-validation-test-pod"
28+
workerPool = "worker"
29+
masterPool = "master"
30+
SignatureValidationFaildReason = "SignatureValidationFailed"
31+
)
32+
33+
var _ = g.Describe("[sig-imagepolicy][OCPFeatureGate:SigstoreImageVerification][Serial]", g.Ordered, func() {
34+
defer g.GinkgoRecover()
35+
var (
36+
oc = exutil.NewCLIWithoutNamespace("cluster-image-policy")
37+
tctx = context.Background()
38+
cli = exutil.NewCLIWithPodSecurityLevel("verifysigstore-e2e", admissionapi.LevelBaseline)
39+
clif = cli.KubeFramework()
40+
imgpolicyCli = exutil.NewCLIWithPodSecurityLevel("verifysigstore-imagepolicy-e2e", admissionapi.LevelBaseline)
41+
imgpolicyClif = imgpolicyCli.KubeFramework()
42+
imagePolicyBaseDir = exutil.FixturePath("testdata", "imagepolicy")
43+
invalidPublicKeyClusterImagePolicyFixture = filepath.Join(imagePolicyBaseDir, "invalid-public-key-cluster-image-policy.yaml")
44+
publiKeyRekorClusterImagePolicyFixture = filepath.Join(imagePolicyBaseDir, "public-key-rekor-cluster-image-policy.yaml")
45+
invalidPublicKeyImagePolicyFixture = filepath.Join(imagePolicyBaseDir, "invalid-public-key-image-policy.yaml")
46+
publiKeyRekorImagePolicyFixture = filepath.Join(imagePolicyBaseDir, "public-key-rekor-image-policy.yaml")
47+
)
48+
49+
g.BeforeAll(func() {
50+
if !exutil.IsTechPreviewNoUpgrade(tctx, oc.AdminConfigClient()) {
51+
g.Skip("skipping, this feature is only supported on TechPreviewNoUpgrade clusters")
52+
}
53+
54+
outStr, err := oc.Run("adm", "release", "info", testReleaseImageScope).Args("-o=go-template", "--template={{.digest}}").Output()
55+
if err != nil || outStr == "" {
56+
o.Expect(err).ToNot(o.HaveOccurred())
57+
e2eskipper.Skipf("can't validate %s release image for testing, consider updating the test", testReleaseImageScope)
58+
}
59+
})
60+
61+
g.It("Should fail clusterimagepolicy signature validation root of trust does not match the identity in the signature", func() {
62+
createClusterImagePolicy(oc, invalidPublicKeyClusterImagePolicyFixture)
63+
g.DeferCleanup(deleteClusterImagePolicy, oc, invalidPublicKeyClusterImagePolicyFixture)
64+
65+
waitForPoolComplete(oc)
66+
67+
pod, err := launchTestPod(tctx, clif, testPodName, testReleaseImageScope)
68+
o.Expect(err).NotTo(o.HaveOccurred())
69+
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)
70+
71+
err = waitForTestPodContainerToFailSignatureValidation(tctx, clif, pod)
72+
o.Expect(err).NotTo(o.HaveOccurred())
73+
})
74+
75+
g.It("Should fail clusterimagepolicy signature validation when scope in allowedRegistries list does not skip signature verification", func() {
76+
// Ensure allowedRegistries do not skip signature verification by adding testReleaseImageScope to the list
77+
allowedRegistries := []string{"quay.io", "registry.redhat.io", "image-registry.openshift-image-registry.svc:5000", testReleaseImageScope}
78+
updateImageConfig(oc, allowedRegistries)
79+
g.DeferCleanup(cleanupImageConfig, oc)
80+
81+
createClusterImagePolicy(oc, invalidPublicKeyClusterImagePolicyFixture)
82+
g.DeferCleanup(deleteClusterImagePolicy, oc, invalidPublicKeyClusterImagePolicyFixture)
83+
84+
waitForPoolComplete(oc)
85+
86+
pod, err := launchTestPod(tctx, clif, testPodName, testReleaseImageScope)
87+
o.Expect(err).NotTo(o.HaveOccurred())
88+
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)
89+
90+
err = waitForTestPodContainerToFailSignatureValidation(tctx, clif, pod)
91+
o.Expect(err).NotTo(o.HaveOccurred())
92+
})
93+
94+
g.It("Should pass clusterimagepolicy signature validation with signed image", func() {
95+
createClusterImagePolicy(oc, publiKeyRekorClusterImagePolicyFixture)
96+
g.DeferCleanup(deleteClusterImagePolicy, oc, publiKeyRekorClusterImagePolicyFixture)
97+
98+
waitForPoolComplete(oc)
99+
100+
pod, err := launchTestPod(tctx, clif, testPodName, testReleaseImageScope)
101+
o.Expect(err).NotTo(o.HaveOccurred())
102+
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)
103+
104+
err = e2epod.WaitForPodSuccessInNamespace(tctx, clif.ClientSet, pod.Name, pod.Namespace)
105+
o.Expect(err).NotTo(o.HaveOccurred())
106+
})
107+
108+
g.It("Should fail imagepolicy signature validation in different namespaces root of trust does not match the identity in the signature", func() {
109+
createImagePolicy(oc, invalidPublicKeyImagePolicyFixture, imgpolicyClif.Namespace.Name)
110+
g.DeferCleanup(deleteImagePolicy, oc, invalidPublicKeyImagePolicyFixture, imgpolicyClif.Namespace.Name)
111+
112+
createImagePolicy(oc, invalidPublicKeyImagePolicyFixture, clif.Namespace.Name)
113+
g.DeferCleanup(deleteImagePolicy, oc, invalidPublicKeyImagePolicyFixture, clif.Namespace.Name)
114+
115+
waitForPoolComplete(oc)
116+
117+
pod, err := launchTestPod(tctx, imgpolicyClif, testPodName, testReferenceImageScope)
118+
o.Expect(err).NotTo(o.HaveOccurred())
119+
g.DeferCleanup(deleteTestPod, tctx, imgpolicyClif, testPodName)
120+
121+
err = waitForTestPodContainerToFailSignatureValidation(tctx, imgpolicyClif, pod)
122+
o.Expect(err).NotTo(o.HaveOccurred())
123+
124+
pod, err = launchTestPod(tctx, clif, testPodName, testReferenceImageScope)
125+
o.Expect(err).NotTo(o.HaveOccurred())
126+
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)
127+
128+
err = waitForTestPodContainerToFailSignatureValidation(tctx, clif, pod)
129+
o.Expect(err).NotTo(o.HaveOccurred())
130+
})
131+
132+
g.It("Should pass imagepolicy signature validation with signed image in namespaces", func() {
133+
createImagePolicy(oc, publiKeyRekorImagePolicyFixture, clif.Namespace.Name)
134+
g.DeferCleanup(deleteImagePolicy, oc, publiKeyRekorImagePolicyFixture, clif.Namespace.Name)
135+
136+
createImagePolicy(oc, publiKeyRekorImagePolicyFixture, imgpolicyClif.Namespace.Name)
137+
g.DeferCleanup(deleteImagePolicy, oc, publiKeyRekorImagePolicyFixture, imgpolicyClif.Namespace.Name)
138+
139+
waitForPoolComplete(oc)
140+
141+
pod, err := launchTestPod(tctx, clif, testPodName, testReferenceImageScope)
142+
o.Expect(err).NotTo(o.HaveOccurred())
143+
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)
144+
145+
err = e2epod.WaitForPodSuccessInNamespace(tctx, clif.ClientSet, pod.Name, pod.Namespace)
146+
o.Expect(err).NotTo(o.HaveOccurred())
147+
148+
pod, err = launchTestPod(tctx, imgpolicyClif, testPodName, testReferenceImageScope)
149+
o.Expect(err).NotTo(o.HaveOccurred())
150+
g.DeferCleanup(deleteTestPod, tctx, imgpolicyClif, testPodName)
151+
152+
err = e2epod.WaitForPodSuccessInNamespace(tctx, imgpolicyClif.ClientSet, pod.Name, pod.Namespace)
153+
o.Expect(err).NotTo(o.HaveOccurred())
154+
})
155+
})
156+
157+
func createClusterImagePolicy(oc *exutil.CLI, fixture string) {
158+
err := oc.Run("create").Args("-f", fixture).Execute()
159+
o.Expect(err).NotTo(o.HaveOccurred())
160+
}
161+
162+
func deleteClusterImagePolicy(oc *exutil.CLI, fixture string) error {
163+
return oc.Run("delete").Args("-f", fixture).Execute()
164+
}
165+
166+
func createImagePolicy(oc *exutil.CLI, fixture string, namespace string) {
167+
err := oc.Run("create").Args("-f", fixture, "-n", namespace).Execute()
168+
o.Expect(err).NotTo(o.HaveOccurred())
169+
}
170+
171+
func waitForPoolComplete(oc *exutil.CLI) {
172+
time.Sleep(10 * time.Second)
173+
machineconfighelper.WaitForConfigAndPoolComplete(oc, workerPool, registriesWorkerPoolMachineConfig)
174+
machineconfighelper.WaitForConfigAndPoolComplete(oc, masterPool, registriesMasterPoolMachineConfig)
175+
}
176+
177+
func deleteImagePolicy(oc *exutil.CLI, fixture string, namespace string) error {
178+
return oc.Run("delete").Args("-f", fixture, "-n", namespace).Execute()
179+
}
180+
181+
func updateImageConfig(oc *exutil.CLI, allowedRegistries []string) {
182+
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
183+
imageConfig, err := oc.AdminConfigClient().ConfigV1().Images().Get(
184+
context.Background(), "cluster", metav1.GetOptions{},
185+
)
186+
if err != nil {
187+
return err
188+
}
189+
imageConfig.Spec.RegistrySources.AllowedRegistries = allowedRegistries
190+
_, err = oc.AdminConfigClient().ConfigV1().Images().Update(
191+
context.Background(), imageConfig, metav1.UpdateOptions{},
192+
)
193+
return err
194+
})
195+
o.Expect(err).NotTo(o.HaveOccurred(), "error updating image config")
196+
time.Sleep(10 * time.Second)
197+
machineconfighelper.WaitForConfigAndPoolComplete(oc, workerPool, registriesWorkerPoolMachineConfig)
198+
machineconfighelper.WaitForConfigAndPoolComplete(oc, masterPool, registriesMasterPoolMachineConfig)
199+
}
200+
201+
func cleanupImageConfig(oc *exutil.CLI) error {
202+
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
203+
imageConfig, err := oc.AdminConfigClient().ConfigV1().Images().Get(
204+
context.Background(), "cluster", metav1.GetOptions{},
205+
)
206+
if err != nil {
207+
return err
208+
}
209+
imageConfig.Spec.RegistrySources.AllowedRegistries = []string{}
210+
_, err = oc.AdminConfigClient().ConfigV1().Images().Update(
211+
context.Background(), imageConfig, metav1.UpdateOptions{},
212+
)
213+
return err
214+
})
215+
}
216+
217+
func launchTestPod(ctx context.Context, f *e2e.Framework, podName, image string) (*kapiv1.Pod, error) {
218+
g.By(fmt.Sprintf("launching the pod: %s", podName))
219+
contName := fmt.Sprintf("%s-container", podName)
220+
pod := &kapiv1.Pod{
221+
TypeMeta: metav1.TypeMeta{
222+
Kind: "Pod",
223+
},
224+
ObjectMeta: metav1.ObjectMeta{
225+
Name: podName,
226+
},
227+
Spec: kapiv1.PodSpec{
228+
Containers: []kapiv1.Container{
229+
{
230+
Name: contName,
231+
Image: image,
232+
ImagePullPolicy: kapiv1.PullAlways,
233+
},
234+
},
235+
RestartPolicy: kapiv1.RestartPolicyNever,
236+
},
237+
}
238+
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
239+
return pod, err
240+
}
241+
242+
func deleteTestPod(ctx context.Context, f *e2e.Framework, podName string) error {
243+
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0))
244+
}
245+
246+
func waitForTestPodContainerToFailSignatureValidation(ctx context.Context, f *e2e.Framework, pod *kapiv1.Pod) error {
247+
return e2epod.WaitForPodContainerToFail(ctx, f.ClientSet, pod.Namespace, pod.Name, 0, SignatureValidationFaildReason, e2e.PodStartShortTimeout)
248+
}

test/extended/include.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import (
3333
_ "github.com/openshift/origin/test/extended/idling"
3434
_ "github.com/openshift/origin/test/extended/image_ecosystem"
3535
_ "github.com/openshift/origin/test/extended/imageapis"
36+
_ "github.com/openshift/origin/test/extended/imagepolicy"
3637
_ "github.com/openshift/origin/test/extended/images"
3738
_ "github.com/openshift/origin/test/extended/images/trigger"
3839
_ "github.com/openshift/origin/test/extended/kernel"

test/extended/machine_config/helpers.go

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1217,3 +1217,82 @@ func getNodesForPool(ctx context.Context, oc *exutil.CLI, kubeClient *kubernetes
12171217
}
12181218
return nodes, nil
12191219
}
1220+
1221+
// WaitForConfigAndPoolComplete is a helper function that gets a renderedConfig and waits for its pool to complete.
1222+
// The return value is the final rendered config.
1223+
func WaitForConfigAndPoolComplete(oc *exutil.CLI, pool, mcName string) string {
1224+
config, err := WaitForRenderedConfig(oc, pool, mcName)
1225+
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v: failed to render machine config %s from pool %s", err, mcName, pool))
1226+
1227+
err = WaitForPoolComplete(oc, pool, config)
1228+
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v: pool %s did not update to config %s", err, pool, config))
1229+
return config
1230+
}
1231+
1232+
// WaitForRenderedConfig polls a MachineConfigPool until it has
1233+
// included the given mcName in its config, and returns the new
1234+
// rendered config name.
1235+
func WaitForRenderedConfig(oc *exutil.CLI, pool, mcName string) (string, error) {
1236+
return WaitForRenderedConfigs(oc, pool, mcName)
1237+
}
1238+
1239+
// WaitForRenderedConfigs polls a MachineConfigPool until it has
1240+
// included the given mcNames in its config, and returns the new
1241+
// rendered config name.
1242+
func WaitForRenderedConfigs(oc *exutil.CLI, pool string, mcNames ...string) (string, error) {
1243+
var renderedConfig string
1244+
machineConfigClient, err := machineconfigclient.NewForConfig(oc.KubeFramework().ClientConfig())
1245+
o.Expect(err).NotTo(o.HaveOccurred())
1246+
found := make(map[string]bool)
1247+
o.Eventually(func() bool {
1248+
// Set up the list
1249+
for _, name := range mcNames {
1250+
found[name] = false
1251+
}
1252+
1253+
// Update found based on the MCP
1254+
mcp, err := machineConfigClient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pool, metav1.GetOptions{})
1255+
if err != nil {
1256+
return false
1257+
}
1258+
for _, mc := range mcp.Spec.Configuration.Source {
1259+
if _, ok := found[mc.Name]; ok {
1260+
found[mc.Name] = true
1261+
}
1262+
}
1263+
1264+
// If any are still false, then they weren't included in the MCP
1265+
for _, nameFound := range found {
1266+
if !nameFound {
1267+
return false
1268+
}
1269+
}
1270+
1271+
// All the required names were found
1272+
renderedConfig = mcp.Spec.Configuration.Name
1273+
return true
1274+
}, 5*time.Minute, 10*time.Second).Should(o.BeTrue())
1275+
return renderedConfig, nil
1276+
}
1277+
1278+
// WaitForPoolComplete polls a pool until it has completed an update to target
1279+
func WaitForPoolComplete(oc *exutil.CLI, pool, target string) error {
1280+
machineConfigClient, err := machineconfigclient.NewForConfig(oc.KubeFramework().ClientConfig())
1281+
o.Expect(err).NotTo(o.HaveOccurred())
1282+
framework.Logf("Waiting for pool %s to complete %s", pool, target)
1283+
o.Eventually(func() bool {
1284+
mcp, err := machineConfigClient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pool, metav1.GetOptions{})
1285+
if err != nil {
1286+
framework.Logf("Failed to grab machineconfigpools, error :%v", err)
1287+
return false
1288+
}
1289+
if mcp.Status.Configuration.Name != target {
1290+
return false
1291+
}
1292+
if IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolUpdated) {
1293+
return true
1294+
}
1295+
return false
1296+
}, 20*time.Minute, 10*time.Second).Should(o.BeTrue())
1297+
return nil
1298+
}

0 commit comments

Comments
 (0)