Skip to content

Commit c3f7832

Browse files
committed
jobs/sync-stream-metadata: send Slack message on failure
This unassuming job is actually critical to the release pipeline flow. We have checkbox items in the release checklist to make sure that it ran correctly, but for other things (e.g. reverting a rollout, or adding release notes), there's zero visibility into whether the sync actually happened unless one goes to check the job. Let's just always send a Slack notification if the job fails. Prompted by noticing that it failed recently because of coreos/fedora-coreos-streams#924 only by pure chance (I was looking at the job for another reason). Patch best viewed with whitespace ignored.
1 parent dc58259 commit c3f7832

File tree

1 file changed

+45
-36
lines changed

1 file changed

+45
-36
lines changed

jobs/sync-stream-metadata.Jenkinsfile

Lines changed: 45 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -19,47 +19,56 @@ properties([
1919
])
2020

2121
cosaPod() {
22-
git(url: 'https://github.com/coreos/fedora-coreos-streams',
23-
branch: 'main', credentialsId: 'github-coreosbot-token-username-password')
24-
withCredentials([file(credentialsId: 'aws-build-upload-config', variable: 'AWS_CONFIG_FILE')]) {
25-
def production_streams = pipeutils.streams_of_type(pipecfg, 'production')
22+
try {
23+
git(url: 'https://github.com/coreos/fedora-coreos-streams',
24+
branch: 'main', credentialsId: 'github-coreosbot-token-username-password')
25+
withCredentials([file(credentialsId: 'aws-build-upload-config', variable: 'AWS_CONFIG_FILE')]) {
26+
def production_streams = pipeutils.streams_of_type(pipecfg, 'production')
2627

27-
// NB: we don't use `aws s3 sync` here because it's timestamp-based and
28-
// so our fresh git clone will always seem newer and always get
29-
// uploaded. Instead, we manually copy in the S3 versions, check if
30-
// they're different from the checkout, and copy out the new versions
31-
// if so
32-
production_streams.each{stream ->
33-
for (subdir in ["streams", "updates"]) {
34-
shwrap("aws s3 cp s3://${pipecfg.s3.bucket}/${subdir}/${stream}.json ${subdir}/${stream}.json")
35-
}
36-
if (shwrapRc("git diff --exit-code") != 0) {
37-
shwrap("git reset --hard HEAD")
28+
// NB: we don't use `aws s3 sync` here because it's timestamp-based and
29+
// so our fresh git clone will always seem newer and always get
30+
// uploaded. Instead, we manually copy in the S3 versions, check if
31+
// they're different from the checkout, and copy out the new versions
32+
// if so
33+
production_streams.each{stream ->
3834
for (subdir in ["streams", "updates"]) {
39-
// NB: this metadata by definition is always public
40-
shwrap("""
41-
aws s3 cp --acl public-read --cache-control 'max-age=60' \
42-
${subdir}/${stream}.json s3://${pipecfg.s3.bucket}/${subdir}/${stream}.json
43-
""")
35+
shwrap("aws s3 cp s3://${pipecfg.s3.bucket}/${subdir}/${stream}.json ${subdir}/${stream}.json")
4436
}
45-
pipeutils.tryWithMessagingCredentials() {
46-
shwrap("""
47-
/usr/lib/coreos-assembler/fedmsg-broadcast --fedmsg-conf=\${FEDORA_MESSAGING_CONF} \
48-
stream.metadata.update --stream ${stream}
49-
""")
37+
if (shwrapRc("git diff --exit-code") != 0) {
38+
shwrap("git reset --hard HEAD")
39+
for (subdir in ["streams", "updates"]) {
40+
// NB: this metadata by definition is always public
41+
shwrap("""
42+
aws s3 cp --acl public-read --cache-control 'max-age=60' \
43+
${subdir}/${stream}.json s3://${pipecfg.s3.bucket}/${subdir}/${stream}.json
44+
""")
45+
}
46+
pipeutils.tryWithMessagingCredentials() {
47+
shwrap("""
48+
/usr/lib/coreos-assembler/fedmsg-broadcast --fedmsg-conf=\${FEDORA_MESSAGING_CONF} \
49+
stream.metadata.update --stream ${stream}
50+
""")
51+
}
5052
}
53+
// Currently, we always re-upload release notes. We don't want to
54+
// falsely emit a stream.metadata.update message when only release
55+
// notes changed, and also the way change detection works above
56+
// doesn't mesh well with freshly regenerated data.
57+
// NB: this metadata by definition is always public
58+
shwrap("""
59+
python3 -c 'import sys, yaml, json; json.dump(yaml.safe_load(sys.stdin.read()), sys.stdout)' \
60+
< release-notes/${stream}.yml > release-notes/${stream}.json
61+
aws s3 cp --acl public-read --cache-control 'max-age=60' \
62+
release-notes/${stream}.json s3://${pipecfg.s3.bucket}/release-notes/${stream}.json
63+
""")
5164
}
52-
// Currently, we always re-upload release notes. We don't want to
53-
// falsely emit a stream.metadata.update message when only release
54-
// notes changed, and also the way change detection works above
55-
// doesn't mesh well with freshly regenerated data.
56-
// NB: this metadata by definition is always public
57-
shwrap("""
58-
python3 -c 'import sys, yaml, json; json.dump(yaml.safe_load(sys.stdin.read()), sys.stdout)' \
59-
< release-notes/${stream}.yml > release-notes/${stream}.json
60-
aws s3 cp --acl public-read --cache-control 'max-age=60' \
61-
release-notes/${stream}.json s3://${pipecfg.s3.bucket}/release-notes/${stream}.json
62-
""")
65+
}
66+
} catch (e) {
67+
currentBuild.result = 'FAILURE'
68+
throw e
69+
} finally {
70+
if (currentBuild.result != 'SUCCESS') {
71+
pipeutils.trySlackSend(message: ":arrow_up: sync-stream-metadata #${env.BUILD_NUMBER} <${env.BUILD_URL}|:jenkins:> <${env.RUN_DISPLAY_URL}|:ocean:>")
6372
}
6473
}
6574
}

0 commit comments

Comments
 (0)