|
| 1 | +""" |
| 2 | +A script to automate downloading CircleCI artifacts. |
| 3 | +
|
| 4 | +Usage: python3 pull_circleci_artifacts.py <TOKEN> <PIPELINE_ID> <SAVE_DIR> |
| 5 | + TOKEN: |
| 6 | + CircleCI "personal access token" of a github (preferably machine) user. |
| 7 | + This is secret! |
| 8 | +
|
| 9 | + PIPELINE_ID: |
| 10 | + A unique string id that represents the CircleCI pipeline, whose artifacts this |
| 11 | + script pulls. |
| 12 | + This pipeline must have exactly one workflow and that workflow must have exactly |
| 13 | + one job. This script waits for the pipeline to finish, and pulls artifacts from |
| 14 | + this job. If the pipeline isn't successful on finish, this script exits with an |
| 15 | + error. |
| 16 | +
|
| 17 | + SAVE_DIR: |
| 18 | + The downloaded artifacts are saved to this directory |
| 19 | +
|
| 20 | +CircleCI API docs: https://circleci.com/docs/api/v2/index.html (useful for understanding |
| 21 | +this code) |
| 22 | +""" |
| 23 | + |
| 24 | +# yup, all these are stdlib modules incase you are wondering |
| 25 | +import concurrent.futures |
| 26 | +import http.client |
| 27 | +import json |
| 28 | +import sys |
| 29 | +import time |
| 30 | +from pathlib import Path |
| 31 | +from urllib import request |
| 32 | + |
| 33 | +_, token, pipeline_id, save_dir = sys.argv |
| 34 | + |
| 35 | +headers = {"Circle-Token": token} |
| 36 | +save_dir = Path(save_dir) |
| 37 | + |
| 38 | +print( |
| 39 | + f"Starting for {pipeline_id = } (and {save_dir = }), now establishing connection..." |
| 40 | +) |
| 41 | + |
| 42 | +cci_api = http.client.HTTPSConnection("circleci.com") |
| 43 | + |
| 44 | + |
| 45 | +def paginate_get_items_and_next(url, next_page=""): |
| 46 | + """ |
| 47 | + Helper to get "items" and "next_page_token" from CircleCI API, used to handle |
| 48 | + pagination. |
| 49 | + """ |
| 50 | + |
| 51 | + # page-token is used for pagination. Initially, it is unspecified. |
| 52 | + url_query = f"{url}?page-token={next_page}" if next_page else url |
| 53 | + cci_api.request("GET", f"/api/v2/{url_query}", headers=headers) |
| 54 | + response = cci_api.getresponse() |
| 55 | + if response.status != 200: |
| 56 | + raise RuntimeError( |
| 57 | + f"Request to '{url}' not successful: {response.status} ({response.reason})" |
| 58 | + ) |
| 59 | + |
| 60 | + response_dict = json.loads(response.read()) |
| 61 | + if "message" in response_dict: |
| 62 | + raise RuntimeError( |
| 63 | + f"Request to '{url}' failed with message - {response_dict['message']}" |
| 64 | + ) |
| 65 | + |
| 66 | + return response_dict["items"], response_dict["next_page_token"] |
| 67 | + |
| 68 | + |
| 69 | +def paginate_get_single_item(url): |
| 70 | + """ |
| 71 | + Helper to get exactly one item from CircleCI paginated APIs |
| 72 | + """ |
| 73 | + items, _ = paginate_get_items_and_next(url) |
| 74 | + if len(items) != 1: |
| 75 | + raise RuntimeError(f"Expected one item, got {len(items)}") |
| 76 | + |
| 77 | + return items[0] |
| 78 | + |
| 79 | + |
| 80 | +def paginate_get_all_items(url): |
| 81 | + """ |
| 82 | + Helper to get all "items" from CircleCI paginated APIs |
| 83 | + """ |
| 84 | + prev_page_tag = "" |
| 85 | + while True: |
| 86 | + items, prev_page_tag = paginate_get_items_and_next(url, prev_page_tag) |
| 87 | + if not items: |
| 88 | + # all artifacts are probably downloaded at this point |
| 89 | + break |
| 90 | + |
| 91 | + yield from items |
| 92 | + if not prev_page_tag: |
| 93 | + # done with pagination, exit |
| 94 | + break |
| 95 | + |
| 96 | + |
| 97 | +def download_artifact(artifact): |
| 98 | + """ |
| 99 | + Helper to download an artifact given an "artifact dict". This can be concurrently |
| 100 | + called in multiple threads to speed up downloads. |
| 101 | + """ |
| 102 | + path = Path(artifact["path"]) |
| 103 | + save_path = save_dir / path.name |
| 104 | + print(f"Downloading {path.name}") |
| 105 | + request.urlretrieve(artifact["url"], save_path) |
| 106 | + print(f"Done with saving {path.name}") |
| 107 | + |
| 108 | + |
| 109 | +cnt = 1 |
| 110 | +while True: |
| 111 | + print(f"\nAttempt {cnt}") |
| 112 | + workflow = paginate_get_single_item(f"/pipeline/{pipeline_id}/workflow") |
| 113 | + if workflow["status"] != "running": |
| 114 | + if workflow["status"] != "success": |
| 115 | + # workflow failed |
| 116 | + raise RuntimeError(f"The workflow has status '{workflow['status']}'") |
| 117 | + |
| 118 | + # successfully finished workflow at this point |
| 119 | + job = paginate_get_single_item(f"/workflow/{workflow['id']}/job") |
| 120 | + |
| 121 | + # shouldn't really happen, but test anyways |
| 122 | + if job["status"] != "success": |
| 123 | + raise RuntimeError(f"The job has status '{workflow['status']}'") |
| 124 | + |
| 125 | + print(f"Downloading artifacts (they will all be saved in {str(save_dir)})") |
| 126 | + with concurrent.futures.ThreadPoolExecutor() as pool: |
| 127 | + pool.map( |
| 128 | + download_artifact, |
| 129 | + paginate_get_all_items( |
| 130 | + f"/project/{job['project_slug']}/{job['job_number']}/artifacts" |
| 131 | + ), |
| 132 | + ) |
| 133 | + |
| 134 | + break |
| 135 | + |
| 136 | + cnt += 1 |
| 137 | + print("Job is still running (now sleeping for 30s before retrying)") |
| 138 | + time.sleep(30) |
0 commit comments