Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions .github/workflows/s3-mirror.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,11 @@ env:
S3_BUCKET: solc-bin
S3_REGION: eu-central-1
CLOUDFRONT_DISTRIBUTION_ID: E1O6GT57WUFUHD
R2_BUCKET: solc-bin
R2_ACCOUNT_ID: 1fbd136205d2c780838f0d2c014bf69c
R2_ZONE: EEUR
CLOUDFLARE_ZONE_ID: d63261269eeac3abf4c1b2f52e71ae1b
CLOUDFLARE_CACHE_HOST: binaries.soliditylang-test.org

jobs:
push-to-s3:
Expand Down Expand Up @@ -56,3 +61,19 @@ jobs:
run: |
cd solc-bin/
./sync-s3.sh "$S3_BUCKET" "$CLOUDFRONT_DISTRIBUTION_ID"

- name: Configure the R2 client
run: |
aws configure set aws_access_key_id '${{ secrets.CLOUDFLARE_ACCESS_KEY_ID }}'
aws configure set aws_secret_access_key '${{ secrets.CLOUDFLARE_SECRET_ACCESS_KEY }}'

- name: Sync the R2 bucket
run: |
cd solc-bin/
./sync-r2.sh \
"$R2_ACCOUNT_ID" \
"$R2_ZONE" \
"$R2_BUCKET" \
"$CLOUDFLARE_ZONE_ID" \
"$CLOUDFLARE_CACHE_HOST" \
'${{ secrets.CLOUDFLARE_API_TOKEN }}'
105 changes: 105 additions & 0 deletions sync-r2.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
#!/usr/bin/env bash

#------------------------------------------------------------------------------
# Uploads the content of the local working copy to the Cloudflare R2 bucket
# using the AWS CLI (S3-compatible API). Removes any files that do not exist
# locally. Files in the root directory with names starting with a dot or an
# underscore are not uploaded. After the upload the Cloudflare CDN cache is
# purged for the affected files.
#
# The script assumes that the AWS CLI tool is installed and already configured
# with credentials allowing it to modify the bucket.
#
# NOTE: There's no built-in mechanism for updating an S3 bucket in an atomic
# way. Only individual file updates are atomic. This means that during the
# sync clients will see the intermediate state with some files missing or not
# yet updated. Since the binaries are never modified or removed from the repository
# under normal circumstances, updating file lists last is enough to alleviate this.
#
# When running multiple instances of this script concurrently on different
# revisions it's theoretically possible to end up with any combination of
# their files in the bucket so it should be avoided.
#
# WARNING: The script destructively modifies the working copy. Always run it
# on a fresh clone!
#------------------------------------------------------------------------------

set -eo pipefail

die() { >&2 echo "ERROR: $@" && false; }

r2_account_id="$1"
r2_zone="$2"
r2_bucket_name="$3"
cloudflare_zone_id="$4"
cloudflare_cache_host="$5"
cloudflare_api_token="$6"

(( $# == 6 )) || die "Expected exactly 6 parameters."

r2_endpoint="https://${r2_account_id}.r2.cloudflarestorage.com"
r2_bucket_uri="s3://${r2_bucket_name}"

echo "===> Using Cloudflare R2 bucket '${r2_bucket_name}' (zone: ${r2_zone}) via account ${r2_account_id}"

[[ $(git rev-parse --is-shallow-repository) == false ]] || die "This script requires access to full git history to be able to set file timestamps correctly."

echo "===> Updating file modification timestamps to match commits"
# NOTE: `aws s3 sync` compares file timestamp and size to decide whether to upload it or not.
readarray -t files < <(git ls-files)
for file in "${files[@]}"; do
full_time="$(git log --max-count 1 --pretty=format:%cd --date=iso -- "$file")"
unix_timestamp="$(date --date="$full_time" +%Y%m%d%H%M.%S)"
touch -m -t "$unix_timestamp" "$file"
done

echo "===> Removing files that should not be uploaded to the bucket"
# NOTE: This ensures that they will be deleted from the bucket if they're already there.
# If we used `aws s3 sync --delete --exclude` instead, they would not get deleted.
find . -path './.*' -delete
find . -path './_*' -delete

echo "===> Adding compatibility symlinks for files containing plus signs in the name"
# NOTE: This is a quick'n'dirty workaround for Amazon S3 decoding plus sign in paths
# as a space even though this substitution is only supposed to happen in a query string.
# See https://forums.aws.amazon.com/thread.jspa?threadID=55746
find . \
-regex "^\(.*/\)*[^/]*\+[^/]*$" \
-exec bash -c 'ln --symbolic --no-target-directory "$(basename "{}")" "$(dirname "{}")/$(basename "{}" | tr "+" " ")"' \;

echo "===> Syncing binaries with the Cloudflare R2 bucket"
aws --endpoint-url "$r2_endpoint" s3 sync . "$r2_bucket_uri" --delete --follow-symlinks --no-progress --exclude "*/list.*"

echo "===> Syncing file lists with the Cloudflare R2 bucket"
aws --endpoint-url "$r2_endpoint" s3 sync . "$r2_bucket_uri" --delete --follow-symlinks --no-progress --exclude "*" --include "*/list.*"

echo "===> Purging Cloudflare CDN cache"
# Purge only the files that might change in-place when new binaries are added.
purge_paths=(
"/bin/soljson-nightly.js"
"/soljson.js"
)
while IFS= read -r path; do
purge_paths+=("/${path}")
done < <(find . -wholename '*/list.*' | cut --characters 2-)
while IFS= read -r path; do
purge_paths+=("/${path}")
done < <(find . -wholename '*/*-latest' | cut --characters 2-)
while IFS= read -r path; do
purge_paths+=("/${path}")
done < <(find . -wholename '*/*-latest.*' | cut --characters 2-)

purge_payload='{"files":['
separator=""
for path in "${purge_paths[@]}"; do
purge_payload+="${separator}\"https://${cloudflare_cache_host}${path}\""
separator=","
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I assume the trailing comma at the end of purge_payload will not cause problems?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it's not at the end, it's at the beginning. the separator is "" for the first one (prefix) and then , "others"

done
purge_payload+=']}'

curl --fail --show-error --silent \
-X POST "https://api.cloudflare.com/client/v4/zones/${cloudflare_zone_id}/purge_cache" \
-H "Authorization: Bearer ${cloudflare_api_token}" \
-H "Content-Type: application/json" \
--data "$purge_payload" \
>/dev/null
2 changes: 1 addition & 1 deletion sync-s3.sh
Original file line number Diff line number Diff line change
Expand Up @@ -73,4 +73,4 @@ aws cloudfront create-invalidation \
/soljson.js \
$(find . -wholename '*/list.*' | cut --characters 2-) \
$(find . -wholename '*/*-latest' | cut --characters 2-) \
$(find . -wholename '*/*-latest.*' | cut --characters 2-)
$(find . -wholename '*/*-latest.*' | cut --characters 2-)
Loading