Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 16 additions & 13 deletions src/cmd-sign
Original file line number Diff line number Diff line change
Expand Up @@ -382,26 +382,31 @@ def robosign_oci(args, s3, build, gpgkey):
files_to_upload.append({'path': path, 'filename': filename,
'identity': identity, 'digest': digest})

# Upload them to S3. We upload to `staging/` first, and then will move
# them to their final location once they're verified.
# work with older releases; we may want to sign some of them
if 'ref' in build:
_, stream = build['ref'].rsplit('/', 1)
else: # let fail if this is somehow missing
stream = build["coreos-assembler.oci-imported-labels"]["fedora-coreos.stream"]

# Upload them to S3. We upload to `staging/$stream` first, and then will
# move them to their final location once they're verified.
sigstore_bucket, sigstore_prefix = get_bucket_and_prefix(args.s3_sigstore)
sigstore_prefix = os.path.join(sigstore_prefix, 'staging')
sigstore_staging = os.path.join(sigstore_prefix, 'staging', stream)

# First, empty out staging/ so we don't accumulate cruft over time
# https://stackoverflow.com/a/59026702
# Note this assumes we don't run in parallel on the same sigstore
# target, which is the case for us since only one release job can run at
# a time per-stream and the S3 target location is stream-based.
staging_objects = s3.list_objects_v2(Bucket=sigstore_bucket, Prefix=sigstore_prefix)
# Note the staging directory is per-stream so that we can handle
# running in parallel across different streams.
staging_objects = s3.list_objects_v2(Bucket=sigstore_bucket, Prefix=sigstore_staging)
objects_to_delete = [{'Key': obj['Key']} for obj in staging_objects.get('Contents', [])]
if len(objects_to_delete) > 0:
print(f'Deleting {len(objects_to_delete)} stale files')
print(f'Deleting {len(objects_to_delete)} stale files in staging')
s3.delete_objects(Bucket=sigstore_bucket, Delete={'Objects': objects_to_delete})

# now, upload the ones we want
artifacts = []
for f in files_to_upload:
s3_key = os.path.join(sigstore_prefix, f['filename'])
s3_key = os.path.join(sigstore_staging, f['filename'])
print(f"Uploading s3://{sigstore_bucket}/{s3_key}")
s3.upload_file(f['path'], sigstore_bucket, s3_key)
artifacts.append({
Expand Down Expand Up @@ -435,10 +440,8 @@ def robosign_oci(args, s3, build, gpgkey):
gpg('--quiet', '--import', gpgkey)

sig_counter = {}
# peel off the '/staging' bit
final_sigstore_prefix = os.path.dirname(sigstore_prefix)
for f in files_to_upload:
stg_s3_key = os.path.join(sigstore_prefix, f['filename'])
stg_s3_key = os.path.join(sigstore_staging, f['filename'])
stg_sig_s3_key = stg_s3_key + '.sig'

tmp_sig_path = os.path.join(d, f['filename'] + '.sig')
Expand Down Expand Up @@ -511,7 +514,7 @@ def robosign_oci(args, s3, build, gpgkey):
sig_counter[sig_prefix] = sig_number

# upload to final location and make public
final_s3_key = os.path.join(final_sigstore_prefix, sig_prefix, f"signature-{sig_number}")
final_s3_key = os.path.join(sigstore_prefix, sig_prefix, f"signature-{sig_number}")
print(f"Uploading {f['path']} to s3://{sigstore_bucket}/{final_s3_key}")
s3.upload_file(f['path'], sigstore_bucket, final_s3_key, ExtraArgs={'ACL': 'public-read'})

Expand Down
Loading