Skip to content

Commit d6ca59d

Browse files
committed
Drop support for Python 3.9
1 parent fb66ec0 commit d6ca59d

File tree

5 files changed

+25
-34
lines changed

5 files changed

+25
-34
lines changed

.github/workflows/lint.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ jobs:
2828
strategy:
2929
fail-fast: false
3030
matrix:
31-
python-version: ['3.9', '3.14']
31+
python-version: ['3.10', '3.14']
3232
steps:
3333
- uses: actions/checkout@v5
3434
with:

.github/workflows/test.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ jobs:
2020
strategy:
2121
fail-fast: false
2222
matrix:
23-
python-version: ['3.9', '3.14']
23+
python-version: ['3.10', '3.14']
2424
steps:
2525
- uses: actions/checkout@v5
2626
with:

pyproject.toml

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ description = "Parse publications from ResearchFish API and produces the files n
1515
dynamic = ["version"]
1616
name = "rfparser"
1717
readme = "README.md"
18-
requires-python = ">=3.9"
18+
requires-python = ">=3.10"
1919
license = "MIT"
2020
license-files = [
2121
"LICENSE",
@@ -24,7 +24,6 @@ classifiers = [
2424
"Development Status :: 3 - Alpha",
2525
"Operating System :: OS Independent",
2626
"Programming Language :: Python :: 3",
27-
"Programming Language :: Python :: 3.9",
2827
"Programming Language :: Python :: 3.10",
2928
"Programming Language :: Python :: 3.11",
3029
"Programming Language :: Python :: 3.12",
@@ -38,14 +37,10 @@ rfparser = "rfparser:main"
3837

3938
[tool.black]
4039
line-length = 120
41-
target-version = ['py39']
4240

4341
[tool.darker]
4442
isort = true
4543

46-
[tool.ruff]
47-
target-version = "py39"
48-
4944
[tool.ruff.lint]
5045
select = ["E", "F", "B", "UP"]
5146
# Exceptions:

rfparser/__init__.py

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,8 @@
1111
from time import sleep
1212
from typing import (
1313
Any,
14-
Optional,
1514
SupportsIndex,
1615
TYPE_CHECKING,
17-
Union,
1816
)
1917
from xml.etree import ElementTree
2018
from xml.etree.ElementTree import indent
@@ -97,10 +95,10 @@
9795
class Researcher:
9896
def __init__(
9997
self,
100-
family_names: Optional[str] = None,
101-
given_names: Optional[str] = None,
102-
name: Optional[str] = None,
103-
orcid_id: Optional[str] = None,
98+
family_names: str | None = None,
99+
given_names: str | None = None,
100+
name: str | None = None,
101+
orcid_id: str | None = None,
104102
):
105103
self.family_names = family_names
106104
self.given_names = given_names
@@ -127,7 +125,7 @@ def from_CR(cls, author_dict: dict[str, Any]) -> "Self":
127125
@classmethod
128126
def from_DC(cls, creator_dict: dict[str, Any]) -> "Self":
129127
nameIdentifiers = creator_dict["nameIdentifiers"]
130-
orcid_id: Optional[str] = None
128+
orcid_id: str | None = None
131129
for nameIdentifier_dict in nameIdentifiers:
132130
if nameIdentifier_dict["nameIdentifierScheme"] == "ORCID":
133131
orcid_id = nameIdentifier_dict["nameIdentifier"]
@@ -160,13 +158,13 @@ class User(Researcher):
160158
family_names: str
161159
given_names: str
162160

163-
def __init__(self, username: str, family_names: str, given_names: str, orcid_id: Optional[str] = None):
161+
def __init__(self, username: str, family_names: str, given_names: str, orcid_id: str | None = None):
164162
self.username = username
165163
super().__init__(family_names=family_names, given_names=given_names, orcid_id=orcid_id)
166164

167165

168166
class DOI(str):
169-
def __new__(cls, doi: Optional[str]) -> "Self":
167+
def __new__(cls, doi: str | None) -> "Self":
170168
"""
171169
Sanitise a DOI, see
172170
https://www.doi.org/doi-handbook/DOI_Handbook_Final.pdf
@@ -193,9 +191,9 @@ def __hash__(self) -> int:
193191

194192
def startswith(
195193
self,
196-
prefix: Union[str, tuple[str, ...]],
197-
start: Optional[SupportsIndex] = None,
198-
end: Optional[SupportsIndex] = None,
194+
prefix: str | tuple[str, ...],
195+
start: SupportsIndex | None = None,
196+
end: SupportsIndex | None = None,
199197
) -> bool:
200198
lower_prefix = prefix.lower() if isinstance(prefix, str) else tuple(_.lower() for _ in prefix)
201199
return self.lower().startswith(lower_prefix, start, end)
@@ -223,11 +221,11 @@ def RF_login(username: str, password: str) -> Session:
223221

224222
def get_url(
225223
url: str,
226-
params: Optional[dict] = None,
227-
headers: Optional[dict[str, str]] = None,
224+
params: dict | None = None,
225+
headers: dict[str, str] | None = None,
228226
timeout: float = REQUEST_TIMEOUT,
229227
retries: int = REQUEST_RETRIES,
230-
s: Optional[Session] = None,
228+
s: Session | None = None,
231229
) -> Response:
232230
for i in range(retries):
233231
backoff_time = 0 if i == 0 else REQUEST_RETRIES_BACKOFF_FACTOR * (2**i)
@@ -256,7 +254,7 @@ def get_url(
256254
return r
257255

258256

259-
def RF_get_paginated(s: Session, url: str, params: Optional[dict] = None, max_pages: int = sys.maxsize) -> list[dict]:
257+
def RF_get_paginated(s: Session, url: str, params: dict | None = None, max_pages: int = sys.maxsize) -> list[dict]:
260258
"""
261259
Get paginated items from ResearchFish API.
262260
"""
@@ -287,7 +285,7 @@ def get_doi_RA(doi: str) -> dict[str, str]:
287285
return r.json()[0]
288286

289287

290-
def CR_get_pub_metadata(doi: str, headers: Optional[dict[str, str]] = None) -> dict[str, Any]:
288+
def CR_get_pub_metadata(doi: str, headers: dict[str, str] | None = None) -> dict[str, Any]:
291289
"""
292290
Get metadata for a publication from CrossRef API.
293291
"""
@@ -315,7 +313,7 @@ def unpaywall_get_oa_status(s: Session, doi: str, email: str) -> str:
315313
return r_dict["oa_status"]
316314

317315

318-
def get_dois_from_old_xml(nbiros_pub_export_xml_url: Optional[str], pubs_with_doi: dict[DOI, dict[str, Any]]) -> None:
316+
def get_dois_from_old_xml(nbiros_pub_export_xml_url: str | None, pubs_with_doi: dict[DOI, dict[str, Any]]) -> None:
319317
"""
320318
Get the DOIs from the old ei.xml file generated from NBIROS.
321319
"""
@@ -356,7 +354,7 @@ def get_dois_from_old_xml(nbiros_pub_export_xml_url: Optional[str], pubs_with_do
356354
pubs_with_doi[doi]["nbiros_entries"].append(pub_el)
357355

358356

359-
def sanitise_orcid_id(orcid_id: Optional[str]) -> Optional[str]:
357+
def sanitise_orcid_id(orcid_id: str | None) -> str | None:
360358
if not orcid_id:
361359
return None
362360
# Remove initial part, if it's a URL
@@ -367,7 +365,7 @@ def sanitise_orcid_id(orcid_id: Optional[str]) -> Optional[str]:
367365
return f"https://orcid.org/{number}"
368366

369367

370-
def get_users(people_data_csv_url: Optional[str]) -> list[User]:
368+
def get_users(people_data_csv_url: str | None) -> list[User]:
371369
log.info("Started get_users")
372370
if not people_data_csv_url:
373371
log.warning("people_data_csv_url option not specified")
@@ -395,13 +393,13 @@ def get_users(people_data_csv_url: Optional[str]) -> list[User]:
395393
def write_xml_output(
396394
pubs_with_doi: dict[DOI, dict[str, Any]],
397395
outfile: str,
398-
people_data_csv_url: Optional[str],
396+
people_data_csv_url: str | None,
399397
) -> None:
400398
"""
401399
Write the publications to an XML file for the EI website.
402400
"""
403401

404-
def author_dict_to_username(author: Author) -> Optional[str]:
402+
def author_dict_to_username(author: Author) -> str | None:
405403
# First try to match the ORCID id
406404
orcid_id = author.orcid_id
407405
if orcid_id:

rfparser/util.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,7 @@
44
from io import StringIO
55
from typing import (
66
Any,
7-
Optional,
87
TypeVar,
9-
Union,
108
)
119

1210
NAME_SPLITTER_PATTERN = re.compile(r"[\s-]+")
@@ -39,7 +37,7 @@ def strip_tags(html: str) -> str:
3937
return s.get_data()
4038

4139

42-
def str_if_not_None(s: Any) -> Optional[str]:
40+
def str_if_not_None(s: Any) -> str | None:
4341
"""
4442
Cast a variable to str if it's not None.
4543
"""
@@ -88,7 +86,7 @@ def is_same_person(family_names1: str, given_names1: str, family_names2: str, gi
8886
return True
8987

9088

91-
def extend_list_to_size(t: list[T], size: int) -> list[Union[None, T]]:
89+
def extend_list_to_size(t: list[T], size: int) -> list[None | T]:
9290
"""
9391
Extend a list with ``None``s if it is shorter than the requested size.
9492
"""

0 commit comments

Comments
 (0)