From 7a04e811f3d379f620514250842cda1f35c46eb8 Mon Sep 17 00:00:00 2001 From: Jonathan Lebon Date: Wed, 10 Sep 2025 20:41:22 -0400 Subject: [PATCH 1/2] cmd-sign: simplify variables switcheroo Create a new variable for the staging location instead of pushing the staging path on the prefix, only to then peel it off later to get back to its original value. --- src/cmd-sign | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/cmd-sign b/src/cmd-sign index 80cf584be0..831942a984 100755 --- a/src/cmd-sign +++ b/src/cmd-sign @@ -385,14 +385,14 @@ def robosign_oci(args, s3, build, gpgkey): # Upload them to S3. We upload to `staging/` first, and then will move # them to their final location once they're verified. sigstore_bucket, sigstore_prefix = get_bucket_and_prefix(args.s3_sigstore) - sigstore_prefix = os.path.join(sigstore_prefix, 'staging') + sigstore_staging = os.path.join(sigstore_prefix, 'staging') # First, empty out staging/ so we don't accumulate cruft over time # https://stackoverflow.com/a/59026702 # Note this assumes we don't run in parallel on the same sigstore # target, which is the case for us since only one release job can run at # a time per-stream and the S3 target location is stream-based. - staging_objects = s3.list_objects_v2(Bucket=sigstore_bucket, Prefix=sigstore_prefix) + staging_objects = s3.list_objects_v2(Bucket=sigstore_bucket, Prefix=sigstore_staging) objects_to_delete = [{'Key': obj['Key']} for obj in staging_objects.get('Contents', [])] if len(objects_to_delete) > 0: print(f'Deleting {len(objects_to_delete)} stale files') @@ -401,7 +401,7 @@ def robosign_oci(args, s3, build, gpgkey): # now, upload the ones we want artifacts = [] for f in files_to_upload: - s3_key = os.path.join(sigstore_prefix, f['filename']) + s3_key = os.path.join(sigstore_staging, f['filename']) print(f"Uploading s3://{sigstore_bucket}/{s3_key}") s3.upload_file(f['path'], sigstore_bucket, s3_key) artifacts.append({ @@ -435,10 +435,8 @@ def robosign_oci(args, s3, build, gpgkey): gpg('--quiet', '--import', gpgkey) sig_counter = {} - # peel off the '/staging' bit - final_sigstore_prefix = os.path.dirname(sigstore_prefix) for f in files_to_upload: - stg_s3_key = os.path.join(sigstore_prefix, f['filename']) + stg_s3_key = os.path.join(sigstore_staging, f['filename']) stg_sig_s3_key = stg_s3_key + '.sig' tmp_sig_path = os.path.join(d, f['filename'] + '.sig') @@ -511,7 +509,7 @@ def robosign_oci(args, s3, build, gpgkey): sig_counter[sig_prefix] = sig_number # upload to final location and make public - final_s3_key = os.path.join(final_sigstore_prefix, sig_prefix, f"signature-{sig_number}") + final_s3_key = os.path.join(sigstore_prefix, sig_prefix, f"signature-{sig_number}") print(f"Uploading {f['path']} to s3://{sigstore_bucket}/{final_s3_key}") s3.upload_file(f['path'], sigstore_bucket, final_s3_key, ExtraArgs={'ACL': 'public-read'}) From 22655ceefb59e050b3bf76a18a32a6088565d823 Mon Sep 17 00:00:00 2001 From: Jonathan Lebon Date: Wed, 10 Sep 2025 20:42:28 -0400 Subject: [PATCH 2/2] cmd-sign: make staging location stream-specific We want to store all the signatures in the same location rather than stream-specific. But ideally we still want the staging location for signing to be stream-specific so that we can safely garbage collect stale files there without worrying about stepping on concurrent runs. Just pick up the stream from the metadata and use that to build the staging location. See also https://github.com/coreos/fedora-coreos-pipeline/pull/1218. --- src/cmd-sign | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/src/cmd-sign b/src/cmd-sign index 831942a984..76b61c6367 100755 --- a/src/cmd-sign +++ b/src/cmd-sign @@ -382,20 +382,25 @@ def robosign_oci(args, s3, build, gpgkey): files_to_upload.append({'path': path, 'filename': filename, 'identity': identity, 'digest': digest}) - # Upload them to S3. We upload to `staging/` first, and then will move - # them to their final location once they're verified. + # work with older releases; we may want to sign some of them + if 'ref' in build: + _, stream = build['ref'].rsplit('/', 1) + else: # let fail if this is somehow missing + stream = build["coreos-assembler.oci-imported-labels"]["fedora-coreos.stream"] + + # Upload them to S3. We upload to `staging/$stream` first, and then will + # move them to their final location once they're verified. sigstore_bucket, sigstore_prefix = get_bucket_and_prefix(args.s3_sigstore) - sigstore_staging = os.path.join(sigstore_prefix, 'staging') + sigstore_staging = os.path.join(sigstore_prefix, 'staging', stream) # First, empty out staging/ so we don't accumulate cruft over time # https://stackoverflow.com/a/59026702 - # Note this assumes we don't run in parallel on the same sigstore - # target, which is the case for us since only one release job can run at - # a time per-stream and the S3 target location is stream-based. + # Note the staging directory is per-stream so that we can handle + # running in parallel across different streams. staging_objects = s3.list_objects_v2(Bucket=sigstore_bucket, Prefix=sigstore_staging) objects_to_delete = [{'Key': obj['Key']} for obj in staging_objects.get('Contents', [])] if len(objects_to_delete) > 0: - print(f'Deleting {len(objects_to_delete)} stale files') + print(f'Deleting {len(objects_to_delete)} stale files in staging') s3.delete_objects(Bucket=sigstore_bucket, Delete={'Objects': objects_to_delete}) # now, upload the ones we want