@@ -126,10 +126,11 @@ def main():
126126 build_id = build ["id" ]
127127 build_date = parse_fcos_version_to_timestamp (build_id )
128128 actions_completed = []
129+ print (f"Processing build { build_id } " )
129130
130131 # For each build, iterate over arches first to minimize downloads of meta.json per arch
131132 for arch in build ["arches" ]:
132- print (f"Processing { arch } for build { build_id } " )
133+ print (f"\t Processing { arch } for build { build_id } " )
133134 meta_prefix = os .path .join (prefix , f"{ build_id } /{ arch } /meta.json" )
134135 meta_json = get_json_from_s3 (s3_client , bucket , meta_prefix ) # Download meta.json once per arch
135136 images = get_supported_images (meta_json )
@@ -152,17 +153,17 @@ def main():
152153 # this type run for this build. For all types except `images` we
153154 # can just continue.
154155 if action != "images" :
155- print (f"Build { build_id } has already had { action } pruning completed" )
156+ print (f"\t \t Build { build_id } has already had { action } pruning completed" )
156157 continue
157158 # OK `images` has been pruned before, but we need to check
158159 # that all the images were pruned that match the current policy.
159160 # i.e. there may be additional images we need prune
160161 elif set (images_to_keep ) == set (previous_cleanup .get ("images-kept" , [])):
161- print (f"Build { build_id } has already had { action } pruning completed" )
162+ print (f"\t \t Build { build_id } has already had { action } pruning completed" )
162163 continue
163164
164165 # Pruning actions based on type
165- print (f"Pruning { arch } { action } for { build_id } " )
166+ print (f"\t \t { arch } { action } for { build_id } " )
166167 match action :
167168 case "cloud-uploads" :
168169 prune_cloud_uploads (current_build , cloud_config , args .dry_run )
@@ -180,15 +181,15 @@ def main():
180181 if arch == "x86_64" :
181182 if build_id in barrier_releases :
182183 # Since containers are used for updates we need to keep around containers for barrier releases.
183- print (f"Release { build_id } is a barrier release. Skipping container prune." )
184+ print (f"\t \t \t Release { build_id } is a barrier release. Skipping container prune." )
184185 continue
185186 # Retrieve container tags excluding the stream name since it updates with each release.
186187 container_tags , container_repo = get_container_tags (meta_json , exclude = [stream ])
187188 if container_tags :
188189 for tag in container_tags :
189190 prune_container (tag , args .dry_run , container_repo , args .registry_auth_file )
190191 else :
191- print (f"No container tags to prune for build { build_id } ." )
192+ print (f"\t \t \t No container tags to prune for build { build_id } ." )
192193 actions_completed .append (action ) # Append action to completed list
193194 # Only add policy-cleanup for the build in builds.json if any
194195 # of the cleanup actions were completed.
@@ -337,7 +338,7 @@ def prune_cloud_uploads(build, cloud_config, dry_run):
337338 errors .extend (delete_gcp_image (build , cloud_config , dry_run ))
338339
339340 if errors :
340- print (f"Found errors when removing cloud-uploads for { build .id } :" )
341+ print (f"\t \t \t Found errors when removing cloud-uploads for { build .id } :" )
341342 for e in errors :
342343 print (e )
343344 raise Exception ("Some errors were encountered" )
@@ -348,7 +349,7 @@ def deregister_aws_amis(build, cloud_config, dry_run):
348349 aws_credentials = cloud_config .get ("aws" , {}).get ("credentials" )
349350 amis = build .images .get ("amis" )
350351 if not amis :
351- print (f"No AMI/Snapshot to prune for { build .id } for { build .arch } " )
352+ print (f"\t \t \t No AMI/Snapshot to prune for { build .id } for { build .arch } " )
352353 return errors
353354 for ami in amis :
354355 region_name = ami .get ("name" )
@@ -357,7 +358,7 @@ def deregister_aws_amis(build, cloud_config, dry_run):
357358 # then let's instruct ore to detect the snapshot ID from the AMI.
358359 snapshot_id = ami .get ("snapshot" , "detectFromAMI" )
359360 if dry_run :
360- print (f"Would delete { ami_id } and { snapshot_id } for { build .id } " )
361+ print (f"\t \t \t Would delete { ami_id } and { snapshot_id } for { build .id } " )
361362 continue
362363 if (ami_id or snapshot_id ) and region_name :
363364 try :
@@ -373,13 +374,13 @@ def delete_gcp_image(build, cloud_config, dry_run):
373374 errors = []
374375 gcp = build .images .get ("gcp" )
375376 if not gcp :
376- print (f"No GCP image to prune for { build .id } for { build .arch } " )
377+ print (f"\t \t \t No GCP image to prune for { build .id } for { build .arch } " )
377378 return errors
378379 gcp_image = gcp .get ("image" )
379380 project = gcp .get ("project" ) or "fedora-coreos-cloud"
380381 json_key = cloud_config .get ("gcp" , {}).get ("json-key" )
381382 if dry_run :
382- print (f"Would delete { gcp_image } GCP image for { build .id } " )
383+ print (f"\t \t \t Would delete { gcp_image } GCP image for { build .id } " )
383384 elif gcp_image and json_key and project :
384385 try :
385386 remove_gcp_image (gcp_image , json_key , project )
@@ -400,18 +401,18 @@ def prune_images(s3, build, images_to_keep, dry_run, bucket, prefix):
400401 if name not in images_to_keep :
401402 image_prefix = os .path .join (prefix , f"{ build .id } /{ build .arch } /{ path } " )
402403 if dry_run :
403- print (f"Would prune { bucket } /{ image_prefix } " )
404+ print (f"\t \t \t Would prune { bucket } /{ image_prefix } " )
404405 else :
405406 try :
406407 s3 .delete_object (Bucket = bucket , Key = image_prefix )
407- print (f"Pruned { name } image for { build .id } for { build .arch } " )
408+ print (f"\t \t \t Pruned { name } image for { build .id } for { build .arch } " )
408409 except botocore .exceptions .ClientError as e :
409410 if e .response ['Error' ]['Code' ] == 'NoSuchKey' :
410- print (f"{ bucket } /{ image_prefix } already pruned." )
411+ print (f"\t \t \t { bucket } /{ image_prefix } already pruned." )
411412 else :
412413 errors .append (e )
413414 if errors :
414- print (f"Found errors when pruning images for { build .id } :" )
415+ print (f"\t \t \t Found errors when pruning images for { build .id } :" )
415416 for e in errors :
416417 print (e )
417418 raise Exception ("Some errors were encountered" )
@@ -420,7 +421,7 @@ def prune_images(s3, build, images_to_keep, dry_run, bucket, prefix):
420421def prune_build (s3_client , bucket , prefix , build_id , dry_run ):
421422 build_prefix = os .path .join (prefix , f"{ build_id } /" )
422423 if dry_run :
423- print (f"Would delete all resources in { bucket } /{ build_prefix } ." )
424+ print (f"\t \t \t Would delete all resources in { bucket } /{ build_prefix } ." )
424425 else :
425426 try :
426427 # List all objects under the specified prefix
@@ -430,12 +431,12 @@ def prune_build(s3_client, bucket, prefix, build_id, dry_run):
430431 delete_keys = [{'Key' : obj ['Key' ]} for obj in objects_to_delete ['Contents' ]]
431432 # Delete objects
432433 s3_client .delete_objects (Bucket = bucket , Delete = {'Objects' : delete_keys })
433- print (f"Pruned { build_id } completely from { bucket } /{ build_prefix } ." )
434+ print (f"\t \t \t Pruned { build_id } completely from { bucket } /{ build_prefix } ." )
434435 else :
435- print (f"No objects found to delete in { bucket } /{ build_prefix } ." )
436+ print (f"\t \t \t No objects found to delete in { bucket } /{ build_prefix } ." )
436437 except botocore .exceptions .ClientError as e :
437438 if e .response ['Error' ]['Code' ] == 'NoSuchKey' :
438- print (f"{ bucket } /{ build_prefix } already pruned or doesn't exist." )
439+ print (f"\t \t \t { bucket } /{ build_prefix } already pruned or doesn't exist." )
439440 else :
440441 raise Exception (f"Error pruning { build_id } : { e .response ['Error' ]['Message' ]} " )
441442
@@ -452,7 +453,7 @@ def get_container_tags(meta_json, exclude):
452453
453454def prune_container (tag , dry_run , container_repo , registry_auth_file ):
454455 if dry_run :
455- print (f"Would prune image { container_repo } :{ tag } " )
456+ print (f"\t \t \t Would prune image { container_repo } :{ tag } " )
456457 else :
457458 skopeo_delete (container_repo , tag , registry_auth_file )
458459
@@ -478,7 +479,7 @@ def skopeo_inspect(repo, tag, auth):
478479
479480 # Exit code 2 indicates the image tag does not exist. We will consider it as pruned.
480481 if exit_code == 2 :
481- print (f"Skipping deletion for { repo } :{ tag } since the tag does not exist." )
482+ print (f"\t \t \t Skipping deletion for { repo } :{ tag } since the tag does not exist." )
482483 return False
483484 else :
484485 # Handle other types of errors
@@ -492,7 +493,7 @@ def skopeo_delete(repo, image, auth):
492493 skopeo_args .extend (["--authfile" , auth ])
493494 try :
494495 subprocess .check_output (skopeo_args , stderr = subprocess .STDOUT )
495- print (f"Image { repo } :{ image } deleted successfully." )
496+ print (f"\t \t \t Image { repo } :{ image } deleted successfully." )
496497 except subprocess .CalledProcessError as e :
497498 # Throw an exception in case the delete command fail despite the image existing
498499 raise Exception ("An error occurred during deletion:" , e .output .decode ("utf-8" ))
0 commit comments