Skip to content

Commit 2ba18d8

Browse files
verify: Periodically write valid signatures to an on cluster config map
As a cluster verifies signatures for release digests, flush those to an on cluster config map that ensures that even if the remote endpoint is down the cluster can still verify those signatures.
1 parent abd6802 commit 2ba18d8

File tree

5 files changed

+220
-9
lines changed

5 files changed

+220
-9
lines changed

pkg/cvo/cvo.go

Lines changed: 25 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ import (
4646
"github.com/openshift/cluster-version-operator/pkg/payload/precondition"
4747
preconditioncv "github.com/openshift/cluster-version-operator/pkg/payload/precondition/clusterversion"
4848
"github.com/openshift/cluster-version-operator/pkg/verify"
49+
"github.com/openshift/cluster-version-operator/pkg/verify/verifyconfigmap"
4950
)
5051

5152
const (
@@ -136,6 +137,9 @@ type Operator struct {
136137
// verifier, if provided, will be used to check an update before it is executed.
137138
// Any error will prevent an update payload from being accessed.
138139
verifier verify.Interface
140+
// signatureStore, if set, will be used to periodically persist signatures to
141+
// the cluster as a config map
142+
signatureStore *verify.StorePersister
139143

140144
configSync ConfigSyncWorker
141145
// statusInterval is how often the configSync worker is allowed to retrigger
@@ -238,17 +242,21 @@ func (optr *Operator) InitializeFromPayload(restConfig *rest.Config, burstRestCo
238242
}
239243
// XXX: set this to the cincinnati version in preference
240244
if _, err := semver.Parse(update.ImageRef.Name); err != nil {
241-
return fmt.Errorf("The local release contents name %q is not a valid semantic version - no current version will be reported: %v", update.ImageRef.Name, err)
245+
return fmt.Errorf("the local release contents name %q is not a valid semantic version - no current version will be reported: %v", update.ImageRef.Name, err)
242246
}
243247

244248
optr.releaseCreated = update.ImageRef.CreationTimestamp.Time
245249
optr.releaseVersion = update.ImageRef.Name
246250

247251
// Wraps operator's HTTPClient method to allow releaseVerifier to create http client with up-to-date config.
248252
clientBuilder := &verifyClientBuilder{builder: optr.HTTPClient}
253+
configClient, err := coreclientsetv1.NewForConfig(restConfig)
254+
if err != nil {
255+
return fmt.Errorf("unable to create a configuration client: %v", err)
256+
}
249257

250258
// attempt to load a verifier as defined in the payload
251-
verifier, err := loadConfigMapVerifierDataFromUpdate(update, clientBuilder)
259+
verifier, signatureStore, err := loadConfigMapVerifierDataFromUpdate(update, clientBuilder, configClient)
252260
if err != nil {
253261
return err
254262
}
@@ -259,6 +267,7 @@ func (optr *Operator) InitializeFromPayload(restConfig *rest.Config, burstRestCo
259267
verifier = verify.Reject
260268
}
261269
optr.verifier = verifier
270+
optr.signatureStore = signatureStore
262271

263272
// after the verifier has been loaded, initialize the sync worker with a payload retriever
264273
// which will consume the verifier
@@ -282,7 +291,7 @@ func (optr *Operator) InitializeFromPayload(restConfig *rest.Config, burstRestCo
282291
// It returns an error if the data is not valid, or no verifier if no config map is found. See the verify
283292
// package for more details on the algorithm for verification. If the annotation is set, a verifier or error
284293
// is always returned.
285-
func loadConfigMapVerifierDataFromUpdate(update *payload.Update, clientBuilder verify.ClientBuilder) (verify.Interface, error) {
294+
func loadConfigMapVerifierDataFromUpdate(update *payload.Update, clientBuilder verify.ClientBuilder, configMapClient coreclientsetv1.ConfigMapsGetter) (verify.Interface, *verify.StorePersister, error) {
286295
configMapGVK := corev1.SchemeGroupVersion.WithKind("ConfigMap")
287296
for _, manifest := range update.Manifests {
288297
if manifest.GVK != configMapGVK {
@@ -294,15 +303,21 @@ func loadConfigMapVerifierDataFromUpdate(update *payload.Update, clientBuilder v
294303
src := fmt.Sprintf("the config map %s/%s", manifest.Obj.GetNamespace(), manifest.Obj.GetName())
295304
data, _, err := unstructured.NestedStringMap(manifest.Obj.Object, "data")
296305
if err != nil {
297-
return nil, errors.Wrapf(err, "%s is not valid: %v", src, err)
306+
return nil, nil, errors.Wrapf(err, "%s is not valid: %v", src, err)
298307
}
299308
verifier, err := verify.NewFromConfigMap(src, data, clientBuilder)
300309
if err != nil {
301-
return nil, err
310+
return nil, nil, err
302311
}
303-
return verifier, nil
312+
313+
// allow the verifier to consult the cluster for signature data, and also configure
314+
// a process that writes signatures back to that store
315+
signatureStore := verifyconfigmap.NewStore(configMapClient, nil)
316+
verifier = verifier.WithStores(signatureStore)
317+
persister := verify.NewSignatureStorePersister(signatureStore, verifier)
318+
return verifier, persister, nil
304319
}
305-
return nil, nil
320+
return nil, nil, nil
306321
}
307322

308323
// Run runs the cluster version operator until stopCh is completed. Workers is ignored for now.
@@ -339,6 +354,9 @@ func (optr *Operator) Run(ctx context.Context, workers int) {
339354
utilruntime.HandleError(fmt.Errorf("unable to perform final sync: %v", err))
340355
}
341356
}, time.Second, stopCh)
357+
if optr.signatureStore != nil {
358+
go optr.signatureStore.Run(ctx, optr.minimumUpdateCheckInterval*2)
359+
}
342360

343361
<-stopCh
344362

pkg/cvo/cvo_test.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import (
2828
"k8s.io/apimachinery/pkg/watch"
2929
"k8s.io/client-go/discovery"
3030
"k8s.io/client-go/rest"
31+
kfake "k8s.io/client-go/kubernetes/fake"
3132
ktesting "k8s.io/client-go/testing"
3233
"k8s.io/client-go/util/workqueue"
3334
"k8s.io/klog"
@@ -3417,13 +3418,17 @@ func Test_loadReleaseVerifierFromConfigMap(t *testing.T) {
34173418
}
34183419
for _, tt := range tests {
34193420
t.Run(tt.name, func(t *testing.T) {
3420-
got, err := loadConfigMapVerifierDataFromUpdate(tt.update, verify.DefaultClient)
3421+
f := kfake.NewSimpleClientset()
3422+
got, store, err := loadConfigMapVerifierDataFromUpdate(tt.update, verify.DefaultClient, f.CoreV1())
34213423
if (err != nil) != tt.wantErr {
34223424
t.Fatalf("loadReleaseVerifierFromPayload() error = %v, wantErr %v", err, tt.wantErr)
34233425
}
34243426
if (got != nil) != tt.want {
34253427
t.Fatal(got)
34263428
}
3429+
if tt.want && store == nil {
3430+
t.Fatalf("expected valid store")
3431+
}
34273432
if err != nil {
34283433
return
34293434
}

pkg/verify/persist.go

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
package verify
2+
3+
import (
4+
"context"
5+
"time"
6+
7+
"k8s.io/apimachinery/pkg/util/wait"
8+
"k8s.io/klog"
9+
)
10+
11+
// SignatureSource provides a set of signatures by digest to save.
12+
type SignatureSource interface {
13+
// Signatures returns a list of valid signatures for release digests.
14+
Signatures() map[string][][]byte
15+
}
16+
17+
// PersistentSignatureStore is a store that can save signatures for
18+
// later recovery.
19+
type PersistentSignatureStore interface {
20+
// Store saves the provided signatures or return an error. If context
21+
// reaches its deadline the store should be cancelled.
22+
Store(ctx context.Context, signatures map[string][][]byte) error
23+
}
24+
25+
// StorePersister saves signatures into store periodically.
26+
type StorePersister struct {
27+
store PersistentSignatureStore
28+
signatures SignatureSource
29+
}
30+
31+
// NewSignatureStorePersister creates an instance that can save signatures into the destination
32+
// store.
33+
func NewSignatureStorePersister(dst PersistentSignatureStore, src SignatureSource) *StorePersister {
34+
return &StorePersister{
35+
store: dst,
36+
signatures: src,
37+
}
38+
}
39+
40+
// Run flushes signatures to the provided store every interval or until the context is finished.
41+
// After context is done, it runs one more time to attempt to flush the current state. It does not
42+
// return until that last store completes.
43+
func (p *StorePersister) Run(ctx context.Context, interval time.Duration) {
44+
wait.Until(func() {
45+
if err := p.store.Store(ctx, p.signatures.Signatures()); err != nil {
46+
klog.Warningf("Unable to save signatures: %v", err)
47+
}
48+
}, interval, ctx.Done())
49+
50+
if err := p.store.Store(context.Background(), p.signatures.Signatures()); err != nil {
51+
klog.Warningf("Unable to save signatures during final flush: %v", err)
52+
}
53+
}

pkg/verify/verify.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ func NewReleaseVerifier(verifiers map[string]openpgp.EntityList, locations []*ur
8080
}
8181

8282
// WithStores copies the provided verifier and adds any provided stores to the list.
83-
func (v *ReleaseVerifier) WithStores(stores []SignatureStore) *ReleaseVerifier {
83+
func (v *ReleaseVerifier) WithStores(stores ...SignatureStore) *ReleaseVerifier {
8484
return &ReleaseVerifier{
8585
verifiers: v.verifiers,
8686
locations: v.locations,

pkg/verify/verifyconfigmap/store.go

Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
package verifyconfigmap
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"strings"
7+
"sync"
8+
"time"
9+
10+
"golang.org/x/time/rate"
11+
12+
corev1 "k8s.io/api/core/v1"
13+
"k8s.io/apimachinery/pkg/api/errors"
14+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
15+
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
16+
"k8s.io/client-go/util/retry"
17+
)
18+
19+
// ReleaseLabelConfigMap is a label applied to a configmap inside the
20+
// openshift-config-managed namespace that indicates it contains signatures
21+
// for release image digests. Any binaryData key that starts with the digest
22+
// is added to the list of signatures checked.
23+
const ReleaseLabelConfigMap = "release.openshift.io/verification-signatures"
24+
25+
// Store abstracts retrieving signatures from config maps on a cluster.
26+
type Store struct {
27+
client corev1client.ConfigMapsGetter
28+
ns string
29+
30+
limiter *rate.Limiter
31+
lock sync.Mutex
32+
last []corev1.ConfigMap
33+
}
34+
35+
// NewStore returns a store that can retrieve or persist signatures on a
36+
// cluster. If limiter is not specified it defaults to one call every 30 seconds.
37+
func NewStore(client corev1client.ConfigMapsGetter, limiter *rate.Limiter) *Store {
38+
if limiter == nil {
39+
limiter = rate.NewLimiter(rate.Every(30*time.Second), 1)
40+
}
41+
return &Store{
42+
client: client,
43+
ns: "openshift-config-managed",
44+
}
45+
}
46+
47+
// String displays information about this source for human review.
48+
func (s *Store) String() string {
49+
return fmt.Sprintf("config maps in %s with label %q", s.ns, ReleaseLabelConfigMap)
50+
}
51+
52+
// rememberMostRecentConfigMaps stores a set of config maps containing
53+
// signatures.
54+
func (s *Store) rememberMostRecentConfigMaps(last []corev1.ConfigMap) {
55+
s.lock.Lock()
56+
defer s.lock.Unlock()
57+
s.last = last
58+
}
59+
60+
// mostRecentConfigMaps returns the last cached version of config maps
61+
// containing signatures.
62+
func (s *Store) mostRecentConfigMaps() []corev1.ConfigMap {
63+
s.lock.Lock()
64+
defer s.lock.Unlock()
65+
return s.last
66+
}
67+
68+
// DigestSignatures returns a list of signatures that match the request
69+
// digest out of config maps labelled with ReleaseLabelConfigMap in the
70+
// openshift-config-managed namespace.
71+
func (s *Store) DigestSignatures(ctx context.Context, digest string) ([][]byte, error) {
72+
// avoid repeatedly reloading config maps
73+
items := s.mostRecentConfigMaps()
74+
r := s.limiter.Reserve()
75+
if items == nil || r.OK() {
76+
configMaps, err := s.client.ConfigMaps(s.ns).List(metav1.ListOptions{
77+
LabelSelector: ReleaseLabelConfigMap,
78+
})
79+
if err != nil {
80+
s.rememberMostRecentConfigMaps([]corev1.ConfigMap{})
81+
return nil, err
82+
}
83+
items = configMaps.Items
84+
s.rememberMostRecentConfigMaps(configMaps.Items)
85+
}
86+
87+
var signatures [][]byte
88+
for _, cm := range items {
89+
for k, v := range cm.BinaryData {
90+
if strings.HasPrefix(k, digest) {
91+
signatures = append(signatures, v)
92+
}
93+
}
94+
}
95+
return signatures, nil
96+
}
97+
98+
// Store attempts to persist the provided signatures into a form DigestSignatures will
99+
// retrieve.
100+
func (s *Store) Store(ctx context.Context, signaturesByDigest map[string][][]byte) error {
101+
cm := &corev1.ConfigMap{
102+
ObjectMeta: metav1.ObjectMeta{
103+
Namespace: s.ns,
104+
Name: "signatures-managed",
105+
Labels: map[string]string{
106+
ReleaseLabelConfigMap: "",
107+
},
108+
},
109+
BinaryData: make(map[string][]byte),
110+
}
111+
for digest, signatures := range signaturesByDigest {
112+
for i := 0; i < len(signatures); i++ {
113+
cm.BinaryData[fmt.Sprintf("%s-%d", digest, i)] = signatures[i]
114+
}
115+
}
116+
return retry.OnError(
117+
retry.DefaultRetry,
118+
func(err error) bool { return errors.IsConflict(err) || errors.IsAlreadyExists(err) },
119+
func() error {
120+
existing, err := s.client.ConfigMaps(s.ns).Get(cm.Name, metav1.GetOptions{})
121+
if errors.IsNotFound(err) {
122+
_, err := s.client.ConfigMaps(s.ns).Create(cm)
123+
return err
124+
}
125+
if err != nil {
126+
return err
127+
}
128+
existing.Labels = cm.Labels
129+
existing.BinaryData = cm.BinaryData
130+
existing.Data = cm.Data
131+
_, err = s.client.ConfigMaps(s.ns).Update(existing)
132+
return err
133+
},
134+
)
135+
}

0 commit comments

Comments
 (0)