|
7 | 7 | from pybool_ir.datasets.pubmed.datautils import FTP_URL, FTP_BASELINE_CWD |
8 | 8 |
|
9 | 9 |
|
10 | | -def download_baseline(path: Path): |
| 10 | +# def download_baseline(path: Path): |
| 11 | +# with FTP(host=FTP_URL, user="anonymous") as ftp: |
| 12 | +# ftp.cwd(FTP_BASELINE_CWD) |
| 13 | +# files = [] |
| 14 | +# ftp.dir(files.append) |
| 15 | + |
| 16 | +# os.makedirs(str(path), exist_ok=True) |
| 17 | + |
| 18 | +# for filename in reversed(datautils.dir_to_filenames(files)): |
| 19 | +# if os.path.exists(str(path / filename)): |
| 20 | +# print(f"found {path / filename}, skipping") |
| 21 | +# continue |
| 22 | +# util.download_file("https://" + FTP_URL + FTP_BASELINE_CWD + filename, path / filename) |
| 23 | + |
| 24 | +# ftp.close() |
| 25 | + |
| 26 | +# currently used |
| 27 | +# def download_baseline(path: Path, limit: int = None): |
| 28 | +# with FTP(host=FTP_URL, user="anonymous") as ftp: |
| 29 | +# ftp.cwd(FTP_BASELINE_CWD) |
| 30 | +# files = [] |
| 31 | +# ftp.dir(files.append) |
| 32 | + |
| 33 | +# os.makedirs(str(path), exist_ok=True) |
| 34 | + |
| 35 | +# filenames = reversed(datautils.dir_to_filenames(files)) |
| 36 | + |
| 37 | +# if limit is not None and limit > 0: |
| 38 | +# filenames = list(filenames)[:limit] |
| 39 | +# print(f"Limit set: Downloading first {limit} documents ...") |
| 40 | + |
| 41 | +# for filename in filenames: |
| 42 | +# if os.path.exists(str(path / filename)): |
| 43 | +# print(f"found {path / filename}, skipping") |
| 44 | +# continue |
| 45 | + |
| 46 | +# util.download_file("https://" + FTP_URL + FTP_BASELINE_CWD + filename, path / filename) |
| 47 | + |
| 48 | +# ftp.close() |
| 49 | + |
| 50 | +# test version -> parallel download |
| 51 | +from concurrent.futures import ThreadPoolExecutor, as_completed |
| 52 | +from ftplib import FTP |
| 53 | +from pathlib import Path |
| 54 | +import time |
| 55 | +import os |
| 56 | + |
| 57 | +from pybool_ir import util |
| 58 | +from pybool_ir.datasets.pubmed import datautils |
| 59 | +from pybool_ir.datasets.pubmed.datautils import FTP_URL, FTP_BASELINE_CWD |
| 60 | + |
| 61 | + |
| 62 | +def download_baseline(path: Path, limit: int = None, workers: int = 2, retries: int = 3): |
11 | 63 | with FTP(host=FTP_URL, user="anonymous") as ftp: |
12 | 64 | ftp.cwd(FTP_BASELINE_CWD) |
13 | 65 | files = [] |
14 | 66 | ftp.dir(files.append) |
15 | 67 |
|
16 | 68 | os.makedirs(str(path), exist_ok=True) |
| 69 | + filenames = list(reversed(datautils.dir_to_filenames(files))) |
| 70 | + |
| 71 | + if limit is not None and limit > 0: |
| 72 | + filenames = filenames[:limit] |
| 73 | + print(f"Limit set: Downloading first {limit} documents ...") |
| 74 | + |
| 75 | + def download_one(filename): |
| 76 | + target = path / filename |
| 77 | + if target.exists(): |
| 78 | + return f"skip {filename}" |
| 79 | + |
| 80 | + url = "https://" + FTP_URL + FTP_BASELINE_CWD + filename |
17 | 81 |
|
18 | | - for filename in reversed(datautils.dir_to_filenames(files)): |
19 | | - if os.path.exists(str(path / filename)): |
20 | | - print(f"found {path / filename}, skipping") |
21 | | - continue |
22 | | - util.download_file("https://" + FTP_URL + FTP_BASELINE_CWD + filename, path / filename) |
| 82 | + for attempt in range(1, retries + 1): |
| 83 | + try: |
| 84 | + util.download_file(url, target) |
| 85 | + return f"done {filename}" |
| 86 | + except Exception as e: |
| 87 | + if attempt == retries: |
| 88 | + return f"FAILED {filename}: {e}" |
| 89 | + time.sleep(2 * attempt) |
23 | 90 |
|
24 | | - ftp.close() |
| 91 | + with ThreadPoolExecutor(max_workers=workers) as executor: |
| 92 | + futures = [executor.submit(download_one, fn) for fn in filenames] |
| 93 | + for f in as_completed(futures): |
| 94 | + print(f.result()) |
0 commit comments