77
88import argparse
99import gi
10+ import json
1011import os
1112import shutil
1213import subprocess
1314import sys
1415import tempfile
16+ import time
1517
1618import boto3
1719
@@ -61,6 +63,7 @@ def parse_args():
6163 group = robosig .add_mutually_exclusive_group (required = True )
6264 group .add_argument ("--ostree" , help = "sign commit" , action = 'store_true' )
6365 group .add_argument ("--images" , help = "sign images" , action = 'store_true' )
66+ group .add_argument ("--oci" , help = "sign OCI images" , action = 'store_true' )
6467 robosig .add_argument ("--extra-fedmsg-keys" , action = 'append' ,
6568 metavar = 'KEY=VAL' , default = [],
6669 help = "extra keys to inject into messages" )
@@ -71,6 +74,9 @@ def parse_args():
7174 robosig .add_argument ("--gpgkeypath" , help = "path to directory containing "
7275 "public keys to use for signature verification" ,
7376 default = "/etc/pki/rpm-gpg" )
77+ robosig .add_argument ("--s3-sigstore" , help = "bucket and prefix to S3 sigstore" )
78+ robosig .add_argument ("--manifest-list-digest" , metavar = "ALGO:DIGEST" ,
79+ help = "digest to manifest list to also sign" )
7480 robosig .add_argument ("--verify-only" , action = 'store_true' ,
7581 help = "verify only that the sigs are valid and make public" )
7682 robosig .set_defaults (func = cmd_robosignatory )
@@ -106,6 +112,10 @@ def cmd_robosignatory(args):
106112 if args .s3 is None :
107113 raise Exception ("Missing --s3 for --ostree" )
108114 robosign_ostree (args , s3 , build , gpgkey )
115+ elif args .oci :
116+ if args .verify_only :
117+ raise Exception ("Cannot use --verify-only with --oci" )
118+ robosign_oci (args , s3 , build , gpgkey )
109119 else :
110120 assert args .images
111121 if args .s3 is None :
@@ -296,5 +306,212 @@ def validate_response(response):
296306 assert response ['status' ].lower () == 'success' , str (response )
297307
298308
309+ def robosign_oci (args , s3 , build , gpgkey ):
310+ builds = Builds ()
311+
312+ # Map of {repo:tag -> [digest1, digest2, ...]}. "Identity" is the term used
313+ # in containers-signature(5) to refer to how users will actually be pulling
314+ # the image (which is usually by tag).
315+ identities = {}
316+ for arch in builds .get_build_arches (args .build ):
317+ build = builds .get_build_meta (args .build , arch )
318+ image = build .get ('base-oscontainer' )
319+ if not image :
320+ print (f"skipping signing for missing OCI image on { arch } " )
321+ continue
322+
323+ # We sign for every tag we've pushed as. Note this code makes it seem
324+ # like we may push to different tags per arch, but that's not the case.
325+ for tag in image ['tags' ]:
326+ identity = f"{ image ['image' ]} :{ tag } "
327+ identities .setdefault (identity , []).append (image ['digest' ])
328+
329+ # For the manifest list digest, reuse the tags from the x86_64 build. As
330+ # mentioned above, it's the same tags on all arches.
331+ if args .manifest_list_digest :
332+ build = builds .get_build_meta (args .build , 'x86_64' )
333+ image = build .get ('base-oscontainer' )
334+ for tag in image ['tags' ]:
335+ identity = f"{ image ['image' ]} :{ tag } "
336+ identities [identity ].append (args .manifest_list_digest )
337+
338+ # add the git commit of ourselves in the signatures for bookkeeping
339+ creator = 'coreos-assembler'
340+ try :
341+ with open ('/cosa/coreos-assembler-git.json' ) as f :
342+ cosa_git = json .load (f )
343+ creator += ' g' + cosa_git ['git' ]['commit' ][:12 ]
344+ except FileNotFoundError :
345+ pass
346+
347+ with tempfile .TemporaryDirectory (prefix = "cosa-sign-" , dir = "tmp" ) as d :
348+ # first, create the payloads to be signed
349+ files_to_upload = []
350+ for identity , digests in identities .items ():
351+ for digest in digests :
352+ # see https://github.com/containers/container-libs/blob/58b82c921fde7dafbc0da766f1037602cfd5553c/image/docs/containers-signature.5.md?plain=1#L110
353+ data = {
354+ "critical" : {
355+ "identity" : {
356+ "docker-reference" : identity
357+ },
358+ "image" : {
359+ "docker-manifest-digest" : digest
360+ },
361+ "type" : "atomic container signature"
362+ },
363+ "optional" : {
364+ "creator" : creator ,
365+ "timestamp" : int (time .time ())
366+ }
367+ }
368+
369+ # Make the filename unique per identity file. This is just a
370+ # temporary name. The final naming and structure will be different.
371+ filename = str (abs (hash (str (data ))))
372+ path = os .path .join (d , filename )
373+ with open (path , 'w' ) as f :
374+ # NB: it's important for this to be just one line so that
375+ # we don't have to correct between how gpg canonicalizes
376+ # the input payload differently when it's cleartext signed
377+ # vs detached
378+ json .dump (data , f )
379+ files_to_upload .append ({'path' : path , 'filename' : filename ,
380+ 'identity' : identity , 'digest' : digest })
381+
382+ # Upload them to S3. We upload to `staging/` first, and then will move
383+ # them to their final location once they're verified.
384+ sigstore_bucket , sigstore_prefix = get_bucket_and_prefix (args .s3_sigstore )
385+ sigstore_prefix = os .path .join (sigstore_prefix , 'staging' )
386+
387+ # First, empty out staging/ so we don't accumulate cruft over time
388+ # https://stackoverflow.com/a/59026702
389+ # Note this assumes we don't run in parallel on the same sigstore
390+ # target, which is the case for us since only one release job can run at
391+ # a time per-stream and the S3 target location is stream-based.
392+ staging_objects = s3 .list_objects_v2 (Bucket = sigstore_bucket , Prefix = sigstore_prefix )
393+ objects_to_delete = [{'Key' : obj ['Key' ]} for obj in staging_objects .get ('Contents' , [])]
394+ if len (objects_to_delete ) > 0 :
395+ print (f'Deleting { len (objects_to_delete )} stale files' )
396+ s3 .delete_objects (Bucket = sigstore_bucket , Delete = {'Objects' : objects_to_delete })
397+
398+ # now, upload the ones we want
399+ artifacts = []
400+ for f in files_to_upload :
401+ s3_key = os .path .join (sigstore_prefix , f ['filename' ])
402+ print (f"Uploading s3://{ sigstore_bucket } /{ s3_key } " )
403+ s3 .upload_file (f ['path' ], sigstore_bucket , s3_key )
404+ artifacts .append ({
405+ 'file' : f"s3://{ sigstore_bucket } /{ s3_key } " ,
406+ 'checksum' : f"sha256:{ sha256sum_file (f ['path' ])} "
407+ })
408+
409+ response = send_request_and_wait_for_response (
410+ request_type = 'artifacts-sign' ,
411+ config = args .fedmsg_conf ,
412+ request_timeout = ROBOSIGNATORY_REQUEST_TIMEOUT_SEC ,
413+ priority = ROBOSIGNATORY_MESSAGE_PRIORITY ,
414+ environment = fedenv ,
415+ body = {
416+ 'build_id' : args .build ,
417+ # We pass a 'basearch' here but we're actually bulk signing
418+ # for all arches in one shot. But we can't omit it because
419+ # Robosignatory logs it. It's not used otherwise.
420+ 'basearch' : args .arch ,
421+ 'artifacts' : artifacts ,
422+ ** args .extra_keys
423+ }
424+ )
425+
426+ validate_response (response )
427+
428+ # download sigs, verify, finalize, and upload to final location
429+ def gpg (* args ):
430+ subprocess .check_call (['gpg' , '--homedir' , d , * args ])
431+
432+ gpg ('--quiet' , '--import' , gpgkey )
433+
434+ sig_counter = {}
435+ # peel off the '/staging' bit
436+ final_sigstore_prefix = os .path .dirname (sigstore_prefix )
437+ for f in files_to_upload :
438+ stg_s3_key = os .path .join (sigstore_prefix , f ['filename' ])
439+ stg_sig_s3_key = stg_s3_key + '.sig'
440+
441+ tmp_sig_path = os .path .join (d , f ['filename' ] + '.sig' )
442+ print (f"Downloading s3://{ sigstore_bucket } /{ stg_sig_s3_key } " )
443+ s3 .download_file (sigstore_bucket , stg_sig_s3_key , tmp_sig_path )
444+ s3 .delete_object (Bucket = sigstore_bucket , Key = stg_s3_key )
445+ s3 .delete_object (Bucket = sigstore_bucket , Key = stg_sig_s3_key )
446+
447+ print (f"Verifying detached signature for { f ['path' ]} " )
448+ try :
449+ gpg ('--verify' , tmp_sig_path , f ['path' ])
450+ except subprocess .CalledProcessError as e :
451+ # allow unknown signatures in stg
452+ if fedenv != 'stg' :
453+ raise e
454+
455+ # This is where the magic happens, from a detached signature, we
456+ # merge it with the original payload to create a cleartext signed
457+ # message so it's a single artifact like c/image expects.
458+ # See also: https://github.com/containers/container-libs/pull/307
459+ with open (tmp_sig_path , 'rb' ) as fp :
460+ armored_sig = subprocess .check_output (['gpg' , '--homedir' , d , '--enarmor' ], input = fp .read ())
461+ armored_sig = str (armored_sig , encoding = 'utf-8' )
462+
463+ # not strictly required, but looks more like a usual cleartext signature
464+ armored_sig = armored_sig .replace ('ARMORED FILE' , 'SIGNATURE' )
465+
466+ with open (f ['path' ], 'r' ) as fp :
467+ original_content = fp .read ()
468+
469+ signed_message = "-----BEGIN PGP SIGNED MESSAGE-----\n "
470+ # Right now, we assume Robosignatory (really Sigul), uses SHA256;
471+ # in theory we could parse the signature and get the digest algo
472+ # that was used, but it seems unlikely that Sigul will change this
473+ # before it's sunset, at which pont we would've already moved on
474+ # from this code. If it does, here's one way to do it: call `gpg
475+ # --list-packets` and look for 'digest algo N' and convert N to the
476+ # right string based on
477+ # https://github.com/gpg/gnupg/blob/6771ed4c13226ea8f410d022fa83888930070f70/common/openpgpdefs.h#L185
478+ signed_message += "Hash: SHA256\n \n "
479+ signed_message += original_content + "\n "
480+ signed_message += armored_sig
481+
482+ # just overwrite the original payload; we don't need it anymore
483+ with open (f ['path' ], 'w' ) as fp :
484+ fp .write (signed_message )
485+
486+ print (f"Verifying cleartext signature { f ['path' ]} " )
487+ try :
488+ gpg ('--verify' , f ['path' ])
489+ except subprocess .CalledProcessError as e :
490+ # allow unknown signatures in stg
491+ if fedenv != 'stg' :
492+ raise e
493+
494+ # tell c/image that it's a valid signature
495+ # https://github.com/containers/container-libs/blob/58b82c921fde7dafbc0da766f1037602cfd5553c/image/internal/signature/signature.go#L66
496+ signed_message = b'\x00 simple-signing\n ' + bytes (signed_message , encoding = 'utf-8' )
497+ with open (f ['path' ], 'wb' ) as fp :
498+ fp .write (signed_message )
499+
500+ image_repo = f ['identity' ]
501+ # e.g. "quay.io/fedora/fedora-coreos:stable" -> "fedora/fedora-coreos"
502+ _ , image_repo = image_repo .split ('/' , 1 )
503+ image_repo , _ = image_repo .split (':' )
504+
505+ # we need to match the format in https://github.com/containers/container-libs/blob/310afd427d1eef3bdcfbcf8a2af7cac2021c8a76/image/docker/registries_d.go#L301
506+ sig_prefix = f"{ image_repo } @{ f ['digest' ].replace (':' , '=' )} "
507+ sig_number = sig_counter .get (sig_prefix , 0 ) + 1
508+ sig_counter [sig_prefix ] = sig_number
509+
510+ # upload to final location and make public
511+ final_s3_key = os .path .join (final_sigstore_prefix , sig_prefix , f"signature-{ sig_number } " )
512+ print (f"Uploading { f ['path' ]} to s3://{ sigstore_bucket } /{ final_s3_key } " )
513+ s3 .upload_file (f ['path' ], sigstore_bucket , final_s3_key , ExtraArgs = {'ACL' : 'public-read' })
514+
515+
299516if __name__ == '__main__' :
300517 sys .exit (main ())
0 commit comments