Skip to content

Commit 28d8b77

Browse files
committed
cmd-sign: add support for signing OCI images
This adds a new `cosa sign --oci` command to sign OCI container images. This is part of the effort to move FCOS to a container-native build flow, where we now produce non-encapsulated container images. The new command works by sending a request to Robosignatory to sign the image manifest digest. Robosignatory returns a detached signature, which we then merge with the original payload to create a cleartext signed message that can be understood by containers/image. This is a short-term solution until we can move to Sigstore. Part of coreos/fedora-coreos-tracker#1969.
1 parent a190948 commit 28d8b77

File tree

1 file changed

+208
-0
lines changed

1 file changed

+208
-0
lines changed

src/cmd-sign

Lines changed: 208 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,13 @@
77

88
import argparse
99
import gi
10+
import json
1011
import os
1112
import shutil
1213
import subprocess
1314
import sys
1415
import tempfile
16+
import time
1517

1618
import boto3
1719

@@ -61,6 +63,8 @@ def parse_args():
6163
group = robosig.add_mutually_exclusive_group(required=True)
6264
group.add_argument("--ostree", help="sign commit", action='store_true')
6365
group.add_argument("--images", help="sign images", action='store_true')
66+
group.add_argument("--oci", metavar='KEY',
67+
help="sign OCI image in meta.json key (e.g. 'base-oscontainer')")
6468
robosig.add_argument("--extra-fedmsg-keys", action='append',
6569
metavar='KEY=VAL', default=[],
6670
help="extra keys to inject into messages")
@@ -71,6 +75,7 @@ def parse_args():
7175
robosig.add_argument("--gpgkeypath", help="path to directory containing "
7276
"public keys to use for signature verification",
7377
default="/etc/pki/rpm-gpg")
78+
robosig.add_argument("--s3-sigstore", help="bucket and prefix to S3 sigstore")
7479
robosig.add_argument("--verify-only", action='store_true',
7580
help="verify only that the sigs are valid and make public")
7681
robosig.set_defaults(func=cmd_robosignatory)
@@ -106,6 +111,10 @@ def cmd_robosignatory(args):
106111
if args.s3 is None:
107112
raise Exception("Missing --s3 for --ostree")
108113
robosign_ostree(args, s3, build, gpgkey)
114+
elif args.oci:
115+
if args.verify_only:
116+
raise Exception("Cannot use --verify-only with --oci")
117+
robosign_oci(args, s3, build, gpgkey)
109118
else:
110119
assert args.images
111120
if args.s3 is None:
@@ -296,5 +305,204 @@ def validate_response(response):
296305
assert response['status'].lower() == 'success', str(response)
297306

298307

308+
def robosign_oci(args, s3, build, gpgkey):
309+
builds = Builds()
310+
311+
# Map of {repo:tag -> digest}. "Identity" is the term used in
312+
# containers-signature(5) to refer to how users will actually be pulling
313+
# the image (which is usually by tag).
314+
identities = {}
315+
for arch in builds.get_build_arches(args.build):
316+
build = builds.get_build_meta(args.build, arch)
317+
image = build.get(args.oci)
318+
if not image:
319+
print(f"skipping signing for missing {args.oci} image on {arch}")
320+
continue
321+
322+
# we sign for every tag we've pushed as
323+
for tag in image['tags']:
324+
identity = f"{image['image']}:{tag}"
325+
if identity not in identities:
326+
identities[identity] = []
327+
identities[identity].append(image['digest'])
328+
329+
# add the git commit of ourselves in the signatures for bookkeeping
330+
creator = 'coreos-assembler'
331+
try:
332+
with open('/cosa/coreos-assembler-git.json') as f:
333+
cosa_git = json.load(f)
334+
creator += ' g' + cosa_git['git']['commit'][:12]
335+
except FileNotFoundError:
336+
pass
337+
338+
with tempfile.TemporaryDirectory(prefix="cosa-sign-", dir="tmp") as d:
339+
# first, create the payloads to be signed
340+
files_to_upload = []
341+
for identity, digests in identities.items():
342+
for digest in digests:
343+
# see https://github.com/containers/container-libs/blob/58b82c921fde7dafbc0da766f1037602cfd5553c/image/docs/containers-signature.5.md?plain=1#L110
344+
data = {
345+
"critical": {
346+
"identity": {
347+
"docker-reference": identity
348+
},
349+
"image": {
350+
"docker-manifest-digest": digest
351+
},
352+
"type": "atomic container signature"
353+
},
354+
"optional": {
355+
"creator": creator,
356+
"timestamp": int(time.time())
357+
}
358+
}
359+
360+
# Make the filename unique per identity file. This is just a
361+
# temporary name. The final naming and structure will be different.
362+
filename = str(abs(hash(str(data))))
363+
path = os.path.join(d, filename)
364+
with open(path, 'w') as f:
365+
# NB: it's important for this to be just one line so that
366+
# we don't have to correct between how gpg canonicalizes
367+
# the input payload differently when it's cleartext signed
368+
# vs detached
369+
json.dump(data, f)
370+
files_to_upload.append({'path': path, 'filename': filename,
371+
'identity': identity, 'digest': digest})
372+
373+
# Upload them to S3. We upload to `staging/` first, and then will move
374+
# them to their final location once they're verified.
375+
sigstore_bucket, sigstore_prefix = get_bucket_and_prefix(args.s3_sigstore)
376+
sigstore_prefix = os.path.join(sigstore_prefix, 'staging')
377+
378+
# First, empty out staging/ so we don't accumulate cruft over time
379+
# https://stackoverflow.com/a/59026702
380+
# Note this assumes we don't run in parallel on the same sigstore
381+
# target, which is the case for us since only one release job can run at
382+
# a time per-stream and the S3 target location is stream-based.
383+
staging_objects = s3.list_objects_v2(Bucket=sigstore_bucket, Prefix=sigstore_prefix)
384+
for object in staging_objects.get('Contents', []):
385+
print('Deleting stale file', object['Key'])
386+
s3.delete_object(Bucket=sigstore_bucket, Key=object['Key'])
387+
388+
# now, upload the ones we want
389+
artifacts = []
390+
for f in files_to_upload:
391+
s3_key = os.path.join(sigstore_prefix, f['filename'])
392+
print(f"Uploading s3://{sigstore_bucket}/{s3_key}")
393+
s3.upload_file(f['path'], sigstore_bucket, s3_key)
394+
artifacts.append({
395+
'file': f"s3://{sigstore_bucket}/{s3_key}",
396+
'checksum': f"sha256:{sha256sum_file(f['path'])}"
397+
})
398+
399+
response = send_request_and_wait_for_response(
400+
request_type='artifacts-sign',
401+
config=args.fedmsg_conf,
402+
request_timeout=ROBOSIGNATORY_REQUEST_TIMEOUT_SEC,
403+
priority=ROBOSIGNATORY_MESSAGE_PRIORITY,
404+
environment=fedenv,
405+
body={
406+
'build_id': args.build,
407+
# We pass a 'basearch' here but we're actually bulk signing
408+
# for all arches in one shot. But we can't omit it because
409+
# Robosignatory logs it. It's not used otherwise.
410+
'basearch': args.arch,
411+
'artifacts': artifacts,
412+
**args.extra_keys
413+
}
414+
)
415+
416+
validate_response(response)
417+
418+
# download sigs, verify, finalize, and upload to final location
419+
def gpg(*args):
420+
subprocess.check_call(['gpg', '--homedir', d, *args])
421+
422+
gpg('--quiet', '--import', gpgkey)
423+
424+
sig_counter = {}
425+
# peel off the '/staging' bit
426+
final_sigstore_prefix = os.path.dirname(sigstore_prefix)
427+
for f in files_to_upload:
428+
stg_s3_key = os.path.join(sigstore_prefix, f['filename'])
429+
stg_sig_s3_key = stg_s3_key + '.sig'
430+
431+
tmp_sig_path = os.path.join(d, f['filename'] + '.sig')
432+
print(f"Downloading s3://{sigstore_bucket}/{stg_sig_s3_key}")
433+
s3.download_file(sigstore_bucket, stg_sig_s3_key, tmp_sig_path)
434+
s3.delete_object(Bucket=sigstore_bucket, Key=stg_s3_key)
435+
s3.delete_object(Bucket=sigstore_bucket, Key=stg_sig_s3_key)
436+
437+
local_artifact = f['path']
438+
439+
print(f"Verifying detached signature for {local_artifact}")
440+
try:
441+
gpg('--verify', tmp_sig_path, local_artifact)
442+
except subprocess.CalledProcessError as e:
443+
# allow unknown signatures in stg
444+
if fedenv != 'stg':
445+
raise e
446+
447+
# This is where the magic happens, from a detached signature, we
448+
# merge it with the original payload to create a cleartext signed
449+
# message so it's a single artifact like c/image expects.
450+
# See also: https://github.com/containers/container-libs/pull/307
451+
with open(tmp_sig_path, 'rb') as fp:
452+
armored_sig = subprocess.check_output(['gpg', '--homedir', d, '--enarmor'], input=fp.read())
453+
armored_sig = str(armored_sig, encoding='utf-8')
454+
455+
# not strictly required, but looks more like a usual cleartext signature
456+
armored_sig = armored_sig.replace('ARMORED FILE', 'SIGNATURE')
457+
458+
with open(local_artifact, 'r') as fp:
459+
original_content = fp.read()
460+
461+
signed_message = "-----BEGIN PGP SIGNED MESSAGE-----\n"
462+
# Right now, we assume Robosignatory (really Sigul), uses SHA256;
463+
# in theory we could parse the signature and get the digest algo
464+
# that was used, but it seems unlikely that Sigul will change this
465+
# before it's sunset, at which pont we would've already moved on
466+
# from this code. If it does, here's one way to do it: call `gpg
467+
# --list-packets` and look for 'digest algo N' and convert N to the
468+
# right string based on
469+
# https://github.com/gpg/gnupg/blob/6771ed4c13226ea8f410d022fa83888930070f70/common/openpgpdefs.h#L185
470+
signed_message += "Hash: SHA256\n\n"
471+
signed_message += original_content + "\n"
472+
signed_message += armored_sig
473+
474+
# just overwrite the original payload; we don't need it anymore
475+
with open(f['path'], 'w') as fp:
476+
fp.write(signed_message)
477+
478+
print(f"Verifying cleartext signature {f['path']}")
479+
try:
480+
gpg('--verify', f['path'])
481+
except subprocess.CalledProcessError as e:
482+
# allow unknown signatures in stg
483+
if fedenv != 'stg':
484+
raise e
485+
486+
# tell c/image that it's a valid signature
487+
# https://github.com/containers/container-libs/blob/58b82c921fde7dafbc0da766f1037602cfd5553c/image/internal/signature/signature.go#L66
488+
signed_message = b'\x00simple-signing\n' + bytes(signed_message, encoding='utf-8')
489+
with open(f['path'], 'wb') as fp:
490+
fp.write(signed_message)
491+
492+
image_repo = f['identity']
493+
# e.g. "quay.io/fedora/fedora-coreos:stable" -> "fedora/fedora-coreos"
494+
_, image_repo = image_repo.split('/', 1)
495+
image_repo, _ = image_repo.split(':')
496+
497+
sig_prefix = f"{image_repo}@{f['digest'].replace(':', '=')}"
498+
sig_number = sig_counter.get(sig_prefix, 0) + 1
499+
sig_counter[sig_prefix] = sig_number
500+
501+
# upload to final location and make public
502+
final_s3_key = os.path.join(final_sigstore_prefix, sig_prefix, f"signature-{sig_number}")
503+
print(f"Uploading {f['path']} to s3://{sigstore_bucket}/{final_s3_key}")
504+
s3.upload_file(f['path'], sigstore_bucket, final_s3_key, ExtraArgs={'ACL': 'public-read'})
505+
506+
299507
if __name__ == '__main__':
300508
sys.exit(main())

0 commit comments

Comments
 (0)