3434
3535import argparse
3636import json
37+ import subprocess
3738from urllib .parse import urlparse
3839import pytz
40+ import requests
3941import yaml
4042import collections
4143import datetime
@@ -59,6 +61,8 @@ CACHE_MAX_AGE_METADATA = 60 * 5
5961# is up to date.
6062SUPPORTED = ["amis" , "gcp" ]
6163UNSUPPORTED = ["aliyun" , "azure" , "ibmcloud" , "powervs" ]
64+ # list of known streams with containers
65+ STREAMS = {"next" , "testing" , "stable" , "next-devel" , "testing-devel" , "rawhide" , "branched" }
6266
6367
6468def parse_args ():
@@ -70,6 +74,8 @@ def parse_args():
7074 parser .add_argument ("--gcp-json-key" , help = "GCP Service Account JSON Auth" , default = os .environ .get ("GCP_JSON_AUTH" ))
7175 parser .add_argument ("--acl" , help = "ACL for objects" , action = 'store' , default = 'private' )
7276 parser .add_argument ("--aws-config-file" , default = os .environ .get ("AWS_CONFIG_FILE" ), help = "Path to AWS config file" )
77+ parser .add_argument ("--registry-auth-file" , default = os .environ .get ("REGISTRY_AUTH_FILE" ),
78+ help = "Path to docker registry auth file. Directly passed to skopeo." )
7379 return parser .parse_args ()
7480
7581
@@ -110,6 +116,12 @@ def main():
110116 builds = builds_json_data ["builds" ]
111117 pruned_build_ids = []
112118 images_to_keep = policy .get (stream , {}).get ("images-keep" , [])
119+ barrier_releases = set ()
120+ # Get the update graph for stable streams
121+ if stream in ['stable' , 'testing' , 'next' ]:
122+ update_graph = get_update_graph (stream )['releases' ]
123+ # Keep only the barrier releases
124+ barrier_releases = set ([release ["version" ] for release in update_graph if "barrier" in release ])
113125
114126 # Iterate through builds from oldest to newest
115127 for build in reversed (builds ):
@@ -125,7 +137,7 @@ def main():
125137 current_build = Build (id = build_id , images = images , arch = arch , meta_json = meta_json )
126138
127139 # Iterate over actions (policy types) to apply pruning
128- for action in ['cloud-uploads' , 'images' , 'build' ]:
140+ for action in ['cloud-uploads' , 'images' , 'build' , 'containers' ]:
129141 if action not in policy [stream ]:
130142 continue
131143 action_duration = convert_duration_to_days (policy [stream ][action ])
@@ -162,7 +174,22 @@ def main():
162174 case "build" :
163175 prune_build (s3_client , bucket , prefix , build_id , args .dry_run )
164176 pruned_build_ids .append (build_id )
165-
177+ case "containers" :
178+ # Our containers are manifest listed, which means deleting the container tag
179+ # for one architecture deletes it for all of them. We'll choose to only prune
180+ # for x86_64 since it is the one architecture that exists for all builds.
181+ if arch == "x86_64" :
182+ if build_id in barrier_releases :
183+ # Since containers are used for updates we need to keep around containers for barrier releases.
184+ print (f"Release { build_id } is a barrier release. Skipping container prune." )
185+ continue
186+ # Retrieve container tags excluding the stream name since it updates with each release.
187+ container_tags , container_repo = get_container_tags (meta_json , exclude = [stream ])
188+ if container_tags :
189+ for tag in container_tags :
190+ prune_container (tag , args .dry_run , container_repo , args .registry_auth_file )
191+ else :
192+ print (f"No container tags to prune for build { build_id } ." )
166193 # Update policy-cleanup after pruning actions for the architecture
167194 policy_cleanup = build .setdefault ("policy-cleanup" , {})
168195 for action in policy [stream ].keys (): # Only update actions specified in policy[stream]
@@ -174,6 +201,9 @@ def main():
174201 if "images" not in policy_cleanup :
175202 policy_cleanup ["images" ] = True
176203 policy_cleanup ["images-kept" ] = images_to_keep
204+ case "containers" :
205+ if "containers" not in policy_cleanup :
206+ policy_cleanup ["containers" ] = True
177207
178208 if pruned_build_ids :
179209 if "tombstone-builds" not in builds_json_data :
@@ -414,5 +444,63 @@ def prune_build(s3_client, bucket, prefix, build_id, dry_run):
414444 raise Exception (f"Error pruning { build_id } : { e .response ['Error' ]['Message' ]} " )
415445
416446
447+ def get_container_tags (meta_json , exclude ):
448+ base_oscontainer = meta_json .get ("base-oscontainer" )
449+ if base_oscontainer :
450+ tags = base_oscontainer .get ("tags" , [])
451+ filtered_tags = [tag for tag in tags if tag not in exclude ]
452+ container_repo = base_oscontainer .get ("image" , "" )
453+ return filtered_tags , container_repo
454+ return [], ""
455+
456+
457+ def prune_container (tag , dry_run , container_repo , registry_auth_file ):
458+ if dry_run :
459+ print (f"Would prune image { container_repo } :{ tag } " )
460+ else :
461+ skopeo_delete (container_repo , tag , registry_auth_file )
462+
463+
464+ def get_update_graph (stream ):
465+ url = f"https://builds.coreos.fedoraproject.org/updates/{ stream } .json"
466+ r = requests .get (url , timeout = 5 )
467+ if r .status_code != 200 :
468+ raise Exception (f"Could not download update graph for { stream } . HTTP { r .status_code } " )
469+ return r .json ()
470+
471+
472+ def skopeo_inspect (repo , image , auth ):
473+ skopeo_args = ["skopeo" , "inspect" , "--no-tags" , "--retry-times=10" , f"docker://{ repo } :{ image } " ]
474+ if auth :
475+ skopeo_args .extend (["--authfile" , auth ])
476+ try :
477+ subprocess .check_output (skopeo_args , stderr = subprocess .STDOUT )
478+ return True # Inspection succeeded
479+ except subprocess .CalledProcessError as e :
480+ exit_code = e .returncode
481+ error_message = e .output .decode ("utf-8" )
482+
483+ # Exit code 2 indicates the image tag does not exist. We will consider it as pruned.
484+ if exit_code == 2 :
485+ print (f"Skipping deletion for { repo } :{ image } since the tag does not exist." )
486+ return False
487+ else :
488+ # Handle other types of errors
489+ raise Exception (f"Inspection failed for { repo } :{ image } with exit code { exit_code } : { error_message } " )
490+
491+
492+ def skopeo_delete (repo , image , auth ):
493+ if skopeo_inspect (repo , image , auth ): # Only proceed if inspection succeeds
494+ skopeo_args = ["skopeo" , "delete" , f"docker://{ repo } :{ image } " ]
495+ if auth :
496+ skopeo_args .extend (["--authfile" , auth ])
497+ try :
498+ subprocess .check_output (skopeo_args , stderr = subprocess .STDOUT )
499+ print (f"Image { repo } :{ image } deleted successfully." )
500+ except subprocess .CalledProcessError as e :
501+ # Throw an exception in case the delete command fail despite the image existing
502+ raise Exception ("An error occurred during deletion:" , e .output .decode ("utf-8" ))
503+
504+
417505if __name__ == "__main__" :
418506 main ()
0 commit comments