|
2 | 2 |
|
3 | 3 | from datetime import datetime |
4 | 4 | import os |
| 5 | +from pathlib import Path |
| 6 | +from typing import List |
5 | 7 |
|
| 8 | +from bs4 import BeautifulSoup |
| 9 | +import requests |
6 | 10 | from sphinx.builders.latex import LaTeXBuilder |
7 | 11 |
|
8 | 12 | LaTeXBuilder.supported_image_types = ["image/png", "image/pdf", "image/svg+xml"] |
|
19 | 23 | watermark, |
20 | 24 | ) |
21 | 25 |
|
| 26 | +THIS_PATH = Path(__file__).parent.resolve() |
| 27 | + |
| 28 | +EXAMPLE_PATH = (THIS_PATH / "examples" / "sphinx_examples").resolve() |
| 29 | + |
22 | 30 | # Project information |
23 | 31 | project = "ansys_sphinx_theme" |
24 | 32 | copyright = f"(c) {datetime.now().year} ANSYS, Inc. All rights reserved" |
|
66 | 74 | "sphinx.ext.intersphinx", |
67 | 75 | "notfound.extension", |
68 | 76 | "sphinx_copybutton", |
| 77 | + "sphinx_design", |
| 78 | + "sphinx_jinja", |
69 | 79 | ] |
70 | 80 |
|
71 | 81 | # Intersphinx mapping |
|
128 | 138 | "body": generate_404(), |
129 | 139 | } |
130 | 140 | notfound_no_urls_prefix = True |
| 141 | + |
| 142 | + |
| 143 | +def extract_example_links(archive_url: str, exclude_files: List[str]) -> List[str]: |
| 144 | + """ |
| 145 | + Extract example links from a specific URL. |
| 146 | +
|
| 147 | + Parameters |
| 148 | + ---------- |
| 149 | + archive_url : str |
| 150 | + The URL of the archive to retrieve the example links from. |
| 151 | + exclude_files : list of str |
| 152 | + A list of files to exclude from the returned example links. |
| 153 | +
|
| 154 | + Returns |
| 155 | + ------- |
| 156 | + list |
| 157 | + List of example links. |
| 158 | + """ |
| 159 | + response = requests.get(archive_url) |
| 160 | + soup = BeautifulSoup(response.content, "html5lib") |
| 161 | + links = soup.find_all("a") |
| 162 | + example_links = [ |
| 163 | + f"https://raw.githubusercontent.com{link['href'].replace('/blob/', '/')}" |
| 164 | + for link in links |
| 165 | + if link["href"].endswith(".txt") and all(file not in link["href"] for file in exclude_files) |
| 166 | + ] |
| 167 | + return example_links |
| 168 | + |
| 169 | + |
| 170 | +def download_and_process_files(example_links: List[str]) -> List[str]: |
| 171 | + """Download and process a series of example files. |
| 172 | +
|
| 173 | + This function downloads a series of example files using a |
| 174 | + list of links and processes each file. |
| 175 | +
|
| 176 | + Parameters |
| 177 | + ---------- |
| 178 | + example_links : List[str] |
| 179 | + List of links to the example files to be downloaded. |
| 180 | +
|
| 181 | + Returns |
| 182 | + ------- |
| 183 | + list |
| 184 | + List of the names of the processed files. |
| 185 | + """ |
| 186 | + file_names = [] |
| 187 | + for link in example_links: |
| 188 | + file_name = link.split("/")[-1] |
| 189 | + file_path = str((EXAMPLE_PATH / file_name).absolute()) |
| 190 | + with open(file_path, "wb") as f: |
| 191 | + response = requests.get(link) |
| 192 | + content = response.content.decode() |
| 193 | + lines = content.splitlines() |
| 194 | + # Customised only to remove the warnings on docs build. |
| 195 | + filtered_lines = [ |
| 196 | + line |
| 197 | + for line in lines |
| 198 | + if not line.startswith("Cards Clickable") and not line.startswith("...............") |
| 199 | + ] |
| 200 | + f.write( |
| 201 | + b"\n".join([line.replace("target", file_name).encode() for line in filtered_lines]) |
| 202 | + ) |
| 203 | + file_names.append(file_name) |
| 204 | + return file_names |
| 205 | + |
| 206 | + |
| 207 | +URL_ARCHIVE = "https://github.com/executablebooks/sphinx-design/tree/main/docs/snippets/rst" |
| 208 | +example_links = extract_example_links(URL_ARCHIVE, exclude_files=["article-info.txt"]) |
| 209 | +file_names = download_and_process_files(example_links) |
| 210 | + |
| 211 | +jinja_contexts = {"examples": {"inputs_examples": file_names}} |
0 commit comments