Skip to content

Commit 73f8869

Browse files
dependabot[bot]glenn-jocherUltralyticsAssistant
authored
Bump actions/upload-artifact from 4 to 5 in /.github/workflows (#166)
Signed-off-by: dependabot[bot] <[email protected]> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher <[email protected]> Co-authored-by: UltralyticsAssistant <[email protected]>
1 parent 05256e2 commit 73f8869

File tree

3 files changed

+29
-38
lines changed

3 files changed

+29
-38
lines changed

.github/workflows/publish.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ jobs:
7070
- uses: astral-sh/setup-uv@v7
7171
- run: uv pip install --system --no-cache build
7272
- run: python -m build
73-
- uses: actions/upload-artifact@v4
73+
- uses: actions/upload-artifact@v5
7474
with:
7575
name: dist
7676
path: dist/

plugin/main.py

Lines changed: 22 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -26,16 +26,15 @@
2626

2727

2828
class MetaPlugin(BasePlugin):
29-
"""
30-
MetaPlugin class for enhancing MkDocs documentation with metadata, social sharing, and structured data.
29+
"""MetaPlugin class for enhancing MkDocs documentation with metadata, social sharing, and structured data.
3130
32-
This class extends the BasePlugin class from MkDocs to add various meta tags, social sharing buttons, and
33-
structured data to the generated HTML pages. It also retrieves git information for each file to include
34-
authorship and modification details.
31+
This class extends the BasePlugin class from MkDocs to add various meta tags, social sharing buttons, and structured
32+
data to the generated HTML pages. It also retrieves git information for each file to include authorship and
33+
modification details.
3534
3635
Attributes:
37-
config_scheme (tuple): Configuration options for the plugin including verbose output, default images,
38-
authors, and various feature toggles for descriptions, images, keywords, share buttons, and JSON-LD.
36+
config_scheme (tuple): Configuration options for the plugin including verbose output, default images, authors,
37+
and various feature toggles for descriptions, images, keywords, share buttons, and JSON-LD.
3938
4039
Methods:
4140
get_git_info: Retrieve git information of a specified file including hash, date, and branch.
@@ -99,8 +98,7 @@ def get_git_info(self, file_path: str) -> dict[str, Any]:
9998
return git_info
10099

101100
def on_page_content(self, content: str, page, config, files) -> str:
102-
"""
103-
Process page content with optional enhancements like images, descriptions, and keywords.
101+
"""Process page content with optional enhancements like images, descriptions, and keywords.
104102
105103
Args:
106104
content (str): The content of the page in HTML format.
@@ -146,18 +144,12 @@ def on_page_content(self, content: str, page, config, files) -> str:
146144

147145
@staticmethod
148146
def insert_content(soup: BeautifulSoup, content_to_insert) -> None:
149-
"""
150-
Insert additional content into a BeautifulSoup object at a specified location.
147+
"""Insert additional content into a BeautifulSoup object at a specified location.
151148
152149
Args:
153150
soup (BeautifulSoup): The BeautifulSoup object representing the HTML content.
154151
content_to_insert (Tag | NavigableString): The HTML content to be inserted.
155152
156-
Notes:
157-
This function specifically searches for an HTML element with the id "__comments" and inserts the
158-
content_to_insert before it. If the "__comments" element is not found, it defaults to appending
159-
the content to the element with class "md-content__inner".
160-
161153
Examples:
162154
Insert content into a BeautifulSoup object
163155
>>> from bs4 import BeautifulSoup
@@ -166,6 +158,11 @@ def insert_content(soup: BeautifulSoup, content_to_insert) -> None:
166158
>>> new_content = soup.new_tag('div', id='new')
167159
>>> new_content.string = "This is new content"
168160
>>> MetaPlugin.insert_content(soup, new_content)
161+
162+
Notes:
163+
This function specifically searches for an HTML element with the id "__comments" and inserts the
164+
content_to_insert before it. If the "__comments" element is not found, it defaults to appending
165+
the content to the element with class "md-content__inner".
169166
"""
170167
if comments_header := soup.find("h2", id="__comments"):
171168
comments_header.insert_before(content_to_insert)
@@ -175,27 +172,26 @@ def insert_content(soup: BeautifulSoup, content_to_insert) -> None:
175172

176173
@staticmethod
177174
def parse_faq(soup: BeautifulSoup) -> list[dict[str, Any]]:
178-
"""
179-
Parse the FAQ questions and answers from the HTML page content.
175+
"""Parse the FAQ questions and answers from the HTML page content.
180176
181177
Args:
182178
soup (BeautifulSoup): The BeautifulSoup object representing the HTML page content.
183179
184180
Returns:
185-
faqs (List[Dict[str, Any]]): A list of dictionaries, each containing a parsed FAQ entry with 'Question'
186-
and 'Answer' fields following the JSON-LD schema.
187-
188-
Notes:
189-
This method identifies the FAQ section by looking for an `h2` tag with the text "FAQ". Each question is
190-
identified by an `h3` tag, and its corresponding answer is captured from `p` tags until the next `h3` or
191-
`h2` tag.
181+
faqs (List[Dict[str, Any]]): A list of dictionaries, each containing a parsed FAQ entry with 'Question' and
182+
'Answer' fields following the JSON-LD schema.
192183
193184
Examples:
194185
Parse FAQ content from HTML
195186
>>> from bs4 import BeautifulSoup
196187
>>> html_content = '<h2>FAQ</h2><h3>Question 1?</h3><p>Answer to question 1.</p>'
197188
>>> soup = BeautifulSoup(html_content, 'html.parser')
198189
>>> faq_data = MetaPlugin.parse_faq(soup)
190+
191+
Notes:
192+
This method identifies the FAQ section by looking for an `h2` tag with the text "FAQ". Each question is
193+
identified by an `h3` tag, and its corresponding answer is captured from `p` tags until the next `h3` or
194+
`h2` tag.
199195
"""
200196
faqs = []
201197
if faq_section := soup.find("h2", string="FAQ"):
@@ -226,8 +222,7 @@ def parse_faq(soup: BeautifulSoup) -> list[dict[str, Any]]:
226222
return faqs
227223

228224
def on_post_page(self, output: str, page, config) -> str:
229-
"""
230-
Enhance the HTML output of a page with metadata tags, git information, and share buttons.
225+
"""Enhance the HTML output of a page with metadata tags, git information, and share buttons.
231226
232227
Args:
233228
output (str): The HTML content of the rendered page.

plugin/utils.py

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,7 @@
1818

1919

2020
def calculate_time_difference(date_string: str) -> tuple[str, str]:
21-
"""
22-
Calculate the time difference between a given date and the current date in a human-readable format.
21+
"""Calculate the time difference between a given date and the current date in a human-readable format.
2322
2423
Args:
2524
date_string (str): Date and time string in the format "%Y-%m-%d %H:%M:%S %z".
@@ -50,12 +49,11 @@ def calculate_time_difference(date_string: str) -> tuple[str, str]:
5049

5150

5251
def get_youtube_video_ids(soup: BeautifulSoup) -> list[str]:
53-
"""
54-
Extract YouTube video IDs from iframe elements present in the provided BeautifulSoup object.
52+
"""Extract YouTube video IDs from iframe elements present in the provided BeautifulSoup object.
5553
5654
Args:
57-
soup (BeautifulSoup): A BeautifulSoup object containing the HTML content from which YouTube video IDs need
58-
to be extracted.
55+
soup (BeautifulSoup): A BeautifulSoup object containing the HTML content from which YouTube video IDs need to be
56+
extracted.
5957
6058
Returns:
6159
(List[str]): A list containing YouTube video IDs extracted from the HTML content.
@@ -86,8 +84,7 @@ def get_youtube_video_ids(soup: BeautifulSoup) -> list[str]:
8684
def get_github_username_from_email(
8785
email: str, cache: dict, file_path: str = "", verbose: bool = True
8886
) -> tuple[str | None, str | None]:
89-
"""
90-
Retrieve the GitHub username and avatar URL associated with the given email address.
87+
"""Retrieve the GitHub username and avatar URL associated with the given email address.
9188
9289
Args:
9390
email (str): The email address to retrieve the GitHub username for.
@@ -139,8 +136,7 @@ def get_github_username_from_email(
139136

140137

141138
def get_github_usernames_from_file(file_path: str, default_user: str | None = None) -> dict[str, dict[str, any]]:
142-
"""
143-
Fetch GitHub usernames associated with a file using Git Log and Git Blame commands.
139+
"""Fetch GitHub usernames associated with a file using Git Log and Git Blame commands.
144140
145141
Args:
146142
file_path (str): The path to the file for which GitHub usernames are to be retrieved.

0 commit comments

Comments
 (0)