|
| 1 | +from datetime import datetime, timedelta, timezone |
| 2 | +from os import path |
| 3 | +from urllib.parse import parse_qs, urlencode, urlparse |
| 4 | + |
| 5 | +from requests import Session |
| 6 | + |
| 7 | +from .. import __version__ |
| 8 | +from .base import ContentProvider |
| 9 | + |
| 10 | + |
| 11 | +class CKAN(ContentProvider): |
| 12 | + """Provide contents of a remote CKAN dataset.""" |
| 13 | + |
| 14 | + def __init__(self): |
| 15 | + super().__init__() |
| 16 | + self.session = Session() |
| 17 | + self.session.headers.update( |
| 18 | + { |
| 19 | + "user-agent": f"repo2docker {__version__}", |
| 20 | + } |
| 21 | + ) |
| 22 | + |
| 23 | + def _fetch_version(self, api_url): |
| 24 | + """Fetch dataset modified date and convert to epoch. |
| 25 | + Borrowed from the Hydroshare provider. |
| 26 | + """ |
| 27 | + package_show_url = f"{api_url}package_show?id={self.dataset_id}" |
| 28 | + resp = self.urlopen(package_show_url).json() |
| 29 | + date = resp["result"]["metadata_modified"] |
| 30 | + parsed_date = datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%f") |
| 31 | + epoch = parsed_date.replace(tzinfo=timezone(timedelta(0))).timestamp() |
| 32 | + # truncate the timestamp |
| 33 | + return str(int(epoch)) |
| 34 | + |
| 35 | + def _request(self, url, **kwargs): |
| 36 | + return self.session.get(url, **kwargs) |
| 37 | + |
| 38 | + urlopen = _request |
| 39 | + |
| 40 | + def detect(self, source, ref=None, extra_args=None): |
| 41 | + """Trigger this provider for things that resolve to a CKAN dataset.""" |
| 42 | + parsed_url = urlparse(source) |
| 43 | + if not parsed_url.netloc: |
| 44 | + return None |
| 45 | + |
| 46 | + if "/dataset/" not in parsed_url.path: |
| 47 | + # Not actually a dataset |
| 48 | + return None |
| 49 | + |
| 50 | + # CKAN may be under a URL prefix, and we should accomodate that |
| 51 | + url_prefix, dataset_url = parsed_url.path.split("/dataset/") |
| 52 | + |
| 53 | + dataset_url_parts = dataset_url.split("/") |
| 54 | + self.dataset_id = dataset_url_parts[0] |
| 55 | + |
| 56 | + api_url = parsed_url._replace( |
| 57 | + path=f"{url_prefix}/api/3/action/", query="" |
| 58 | + ).geturl() |
| 59 | + |
| 60 | + status_show_url = f"{api_url}status_show" |
| 61 | + resp = self.urlopen(status_show_url) |
| 62 | + if resp.status_code == 200: |
| 63 | + |
| 64 | + # Activity ID may be present either as a query parameter, activity_id |
| 65 | + # or as part of the URL, under `/history/<activity-id>`. If `/history/` |
| 66 | + # is present, that takes precedence over `activity_id` |
| 67 | + activity_id = None |
| 68 | + if "history" in dataset_url_parts: |
| 69 | + activity_id = dataset_url_parts[dataset_url_parts.index("history") + 1] |
| 70 | + elif parse_qs(parsed_url.query).get("activity_id") is not None: |
| 71 | + activity_id = parse_qs(parsed_url.query).get("activity_id")[0] |
| 72 | + |
| 73 | + self.version = self._fetch_version(api_url) |
| 74 | + return { |
| 75 | + "dataset_id": self.dataset_id, |
| 76 | + "activity_id": activity_id, |
| 77 | + "api_url": api_url, |
| 78 | + "version": self.version, |
| 79 | + } |
| 80 | + else: |
| 81 | + return None |
| 82 | + |
| 83 | + def fetch(self, spec, output_dir, yield_output=False): |
| 84 | + """Fetch a CKAN dataset.""" |
| 85 | + dataset_id = spec["dataset_id"] |
| 86 | + activity_id = spec["activity_id"] |
| 87 | + |
| 88 | + yield f"Fetching CKAN dataset {dataset_id}.\n" |
| 89 | + |
| 90 | + # handle the activites |
| 91 | + if activity_id: |
| 92 | + fetch_url = f"{spec['api_url']}activity_data_show?" + urlencode( |
| 93 | + {"id": activity_id, "object_type": "package"} |
| 94 | + ) |
| 95 | + else: |
| 96 | + fetch_url = f"{spec['api_url']}package_show?" + urlencode( |
| 97 | + {"id": dataset_id} |
| 98 | + ) |
| 99 | + |
| 100 | + resp = self.urlopen( |
| 101 | + fetch_url, |
| 102 | + headers={"accept": "application/json"}, |
| 103 | + ) |
| 104 | + |
| 105 | + dataset = resp.json() |
| 106 | + |
| 107 | + yield "Fetching CKAN resources.\n" |
| 108 | + |
| 109 | + resources = dataset["result"]["resources"] |
| 110 | + |
| 111 | + for resource in resources: |
| 112 | + file_url = resource["url"] |
| 113 | + if file_url == "": |
| 114 | + continue |
| 115 | + fname = file_url.rsplit("/", maxsplit=1)[-1] |
| 116 | + if fname == "": |
| 117 | + fname = resource["id"] |
| 118 | + |
| 119 | + yield f"Requesting {file_url}\n" |
| 120 | + resp = self._request(file_url, stream=True) |
| 121 | + resp.raise_for_status() |
| 122 | + |
| 123 | + dst_fname = path.join(output_dir, fname) |
| 124 | + with open(dst_fname, "wb") as dst: |
| 125 | + yield f"Fetching {fname}\n" |
| 126 | + for chunk in resp.iter_content(chunk_size=None): |
| 127 | + dst.write(chunk) |
| 128 | + |
| 129 | + @property |
| 130 | + def content_id(self): |
| 131 | + """A unique ID to represent the version of the content.""" |
| 132 | + return f"{self.dataset_id}.v{self.version}" |
0 commit comments