Skip to content

Commit f9110ae

Browse files
committed
jobs/garbage-collection: Add pruning job
Added GC job which for a given stream will perform pruning operations based on gc-policy.yaml file
1 parent 3d66eef commit f9110ae

File tree

3 files changed

+120
-1
lines changed

3 files changed

+120
-1
lines changed

gc-policy.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
branched:
2+
cloud-uploads: 3y
3+
rawhide:
4+
cloud-uploads: 3y
Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
node {
2+
checkout scm
3+
// these are script global vars
4+
pipeutils = load("utils.groovy")
5+
pipecfg = pipeutils.load_pipecfg()
6+
gc_policy_data = pipeutils.load_gc()
7+
repo = "coreos/fedora-coreos-config"
8+
}
9+
10+
// This job runs the Garbage collection on the selected stream in accordance with the
11+
// gc-policy.yaml available in https://github.com/coreos/fedora-coreos-pipeline. If
12+
// the pruning step succeeds, it uploads the updated builds/builds.json to s3.
13+
properties([
14+
pipelineTriggers([]),
15+
parameters([
16+
choice(name: 'STREAM',
17+
choices: pipeutils.get_streams_choices(pipecfg),
18+
description: 'CoreOS stream to run GC'),
19+
booleanParam(name: 'DRY_RUN',
20+
defaultValue: true,
21+
description: 'Only print what would be deleted')
22+
])
23+
])
24+
25+
def build_description = "[${params.STREAM}]"
26+
def cosa_img = 'quay.io/coreos-assembler/coreos-assembler:main'
27+
def container_env = pipeutils.get_env_vars_for_stream(pipecfg, params.STREAM)
28+
def s3_stream_dir = pipeutils.get_s3_streams_dir(pipecfg, params.STREAM)
29+
def dry_run = params.DRY_RUN ? "--dry-run" : ""
30+
31+
lock(resource: "gc-${params.STREAM}") {
32+
cosaPod(image: cosa_img, env: container_env,
33+
memory: "1024Mi",
34+
serviceAccount: "jenkins") {
35+
try {
36+
currentBuild.description = "${build_description} Running"
37+
38+
stage('Init') {
39+
def branch = params.STREAM
40+
sh "cosa init --branch \"${branch}\" https://github.com/${repo}"
41+
}
42+
43+
// Write YAML data to a new file in cosaPod
44+
def new_gc_policy_path = 'tmp/gc-policy.yaml'
45+
writeYaml file: new_gc_policy_path, data: gc_policy_data
46+
47+
stage('BuildFetch') {
48+
pipeutils.shwrapWithAWSBuildUploadCredentials("""
49+
cosa buildfetch --aws-config-file \${AWS_BUILD_UPLOAD_CONFIG} \
50+
--stream ${params.STREAM} --url=s3://${s3_stream_dir}/builds
51+
""")
52+
}
53+
def originalBuildsJson = readJSON file: 'builds/builds.json'
54+
def originalTimestamp = originalBuildsJson.timestamp
55+
56+
withCredentials([file(variable: 'GCP_KOLA_TESTS_CONFIG', credentialsId: 'gcp-image-upload-config')]) {
57+
def acl = pipecfg.s3.acl ?: 'public-read'
58+
59+
stage('Garbage Collection') {
60+
pipeutils.shwrapWithAWSBuildUploadCredentials("""
61+
cosa cloud-prune --policy ${new_gc_policy_path} \
62+
--stream ${params.STREAM} ${dry_run} \
63+
--gcp-json-key=\${GCP_KOLA_TESTS_CONFIG} \
64+
--acl=${acl} \
65+
--aws-config-file \${AWS_BUILD_UPLOAD_CONFIG}
66+
""")
67+
}
68+
}
69+
70+
def currentBuildsJson = readJSON file: 'builds/builds.json'
71+
def currentTimestamp = currentBuildsJson.timestamp
72+
73+
// If the timestamp on builds.json after the 'Garbage Collection' step
74+
// is the same as before, that means, there were no resources to be pruned
75+
// and hence, no need to update the builds.json.
76+
if (originalTimestamp != currentTimestamp) {
77+
// Nested lock for the Upload Builds JSON step
78+
lock(resource: "builds-json-${params.STREAM}") {
79+
stage('Upload Builds JSON') {
80+
pipeutils.shwrapWithAWSBuildUploadCredentials("""
81+
cosa cloud-prune --policy ${new_gc_policy_path} \
82+
--stream ${params.STREAM} \
83+
--upload-builds-json ${dry_run} \
84+
--aws-config-file \${AWS_BUILD_UPLOAD_CONFIG}
85+
""")
86+
}
87+
}
88+
}
89+
currentBuild.result = 'SUCCESS'
90+
91+
} catch (e) {
92+
currentBuild.result = 'FAILURE'
93+
throw e
94+
} finally {
95+
if (currentBuild.result != 'SUCCESS') {
96+
pipeutils.trySlackSend(message: ":wastebasket: garbage-collection #${env.BUILD_NUMBER} <${env.BUILD_URL}|:jenkins:> <${env.RUN_DISPLAY_URL}|:ocean:> [${params.STREAM}]")
97+
}
98+
}
99+
}
100+
}

utils.groovy

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,21 @@ def validate_pipecfg(pipecfg, is_hotfix) {
9292
}
9393
}
9494

95+
def load_gc() {
96+
def jenkinscfg = load_jenkins_config()
97+
def url = jenkinscfg['pipecfg-url']
98+
def gc_policy_data
99+
100+
if (url == 'in-tree') {
101+
gc_policy_data = readYaml(file: "gc-policy.yaml")
102+
} else {
103+
// assume the user called `load_pipecfg()` in this workdir; if not, let error out
104+
gc_policy_data = readYaml(file: "pipecfg/gc-policy.yaml")
105+
}
106+
107+
return gc_policy_data
108+
}
109+
95110
def add_hotfix_parameters_if_supported() {
96111
def supported = true
97112
if (env.JENKINS_URL in PROTECTED_JENKINSES) {
@@ -200,7 +215,7 @@ def bump_builds_json(stream, buildid, arch, s3_stream_dir, acl) {
200215
// unlock
201216
//
202217
// XXX: should fold this into `cosa buildupload` somehow
203-
lock(resource: "bump-builds-json-${stream}") {
218+
lock(resource: "builds-json-${stream}") {
204219
def remotejson = "s3://${s3_stream_dir}/builds/builds.json"
205220
aws_s3_cp_allow_noent(remotejson, './remote-builds.json')
206221
shwrap("""

0 commit comments

Comments
 (0)