Skip to content

Commit 84a82e3

Browse files
jag426claudedreamiurg
authored
chore: fix pre-existing pre-commit failures (#68)
Fix ty type checker errors and warnings: - Remove 29 now-unused type: ignore comments across scraper.py, examples, and tests (ty updated its BS4 type stubs) - Fix invalid-assignment in models.py by annotating trip_report as dict[str, Any] before mutating it - Fix str | AttributeValueList assignment in scraper.py by casting url_link["href"] to str() Fix yamllint warnings and errors in workflow files: - Add required second space before inline # comments in ci.yml, release.yml, scorecard.yml, and codeql.yml - Break long comment string in dependabot-auto-merge.yml into a $BODY variable to stay under the 120-char line limit Co-authored-by: Aaron Gustafson <jag426@users.noreply.github.com> Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com> Co-authored-by: Dmytro Gaivoronsky <the@dreamiurg.net>
1 parent 6e4f918 commit 84a82e3

File tree

10 files changed

+56
-54
lines changed

10 files changed

+56
-54
lines changed

.github/workflows/ci.yml

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -30,17 +30,17 @@ jobs:
3030
name: Lint and format check
3131
runs-on: ubuntu-latest
3232
steps:
33-
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
33+
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
3434

3535
- name: Set up Python
36-
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6
36+
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6
3737
with:
3838
python-version: "3.14"
3939
cache: pip
4040
cache-dependency-path: pyproject.toml
4141

4242
- name: Install uv
43-
uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7
43+
uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7
4444
with:
4545
enable-cache: true
4646
cache-dependency-glob: "pyproject.toml"
@@ -75,17 +75,17 @@ jobs:
7575
python-version: "3.14"
7676

7777
steps:
78-
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
78+
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
7979

8080
- name: Set up Python ${{ matrix.python-version }}
81-
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6
81+
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6
8282
with:
8383
python-version: ${{ matrix.python-version }}
8484
cache: pip
8585
cache-dependency-path: pyproject.toml
8686

8787
- name: Install uv
88-
uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7
88+
uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7
8989
with:
9090
enable-cache: true
9191
cache-dependency-glob: "pyproject.toml"
@@ -98,7 +98,7 @@ jobs:
9898

9999
- name: Upload coverage to Codecov
100100
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.14'
101-
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5
101+
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5
102102
with:
103103
token: ${{ secrets.CODECOV_TOKEN }}
104104
file: ./coverage.xml
@@ -110,7 +110,7 @@ jobs:
110110
if: github.event_name == 'pull_request'
111111
steps:
112112
- name: Validate PR title follows conventional commit format
113-
uses: amannn/action-semantic-pull-request@48f256284bd46cdaab1048c3721360e808335d50 # v6
113+
uses: amannn/action-semantic-pull-request@48f256284bd46cdaab1048c3721360e808335d50 # v6
114114
env:
115115
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
116116
with:

.github/workflows/codeql.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,16 +24,16 @@ jobs:
2424

2525
steps:
2626
- name: Checkout repository
27-
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
27+
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
2828

2929
- name: Initialize CodeQL
30-
uses: github/codeql-action/init@5d5cd550d3e189c569da8f16ea8de2d821c9bf7a # v3
30+
uses: github/codeql-action/init@5d5cd550d3e189c569da8f16ea8de2d821c9bf7a # v3
3131
with:
3232
languages: python
3333
# Queries: security-extended includes all security checks
3434
queries: security-extended
3535

3636
- name: Perform CodeQL Analysis
37-
uses: github/codeql-action/analyze@5d5cd550d3e189c569da8f16ea8de2d821c9bf7a # v3
37+
uses: github/codeql-action/analyze@5d5cd550d3e189c569da8f16ea8de2d821c9bf7a # v3
3838
with:
3939
category: "/language:python"

.github/workflows/dependabot-auto-merge.yml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,5 +32,6 @@ jobs:
3232
PR_URL: ${{ github.event.pull_request.html_url }}
3333
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
3434
run: |
35-
gh pr comment "$PR_URL" --body \
36-
"This is a **major version update** and will not be auto-merged. Please review the changelog for breaking changes before merging manually."
35+
BODY="This is a **major version update** and will not be auto-merged."
36+
BODY="$BODY Please review the changelog for breaking changes before merging manually."
37+
gh pr comment "$PR_URL" --body "$BODY"

.github/workflows/release.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,26 +20,26 @@ jobs:
2020
steps:
2121
- name: Generate GitHub App token
2222
id: app-token
23-
uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 # v2
23+
uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 # v2
2424
with:
2525
app-id: ${{ secrets.RELEASE_APP_ID }}
2626
private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
2727

2828
- name: Checkout
29-
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
29+
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
3030
with:
3131
fetch-depth: 0
3232
token: ${{ steps.app-token.outputs.token }}
3333

3434
- name: Set up Python
35-
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6
35+
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6
3636
with:
3737
python-version: '3.12'
3838
cache: pip
3939
cache-dependency-path: pyproject.toml
4040

4141
- name: Install uv
42-
uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7
42+
uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7
4343
with:
4444
enable-cache: true
4545
cache-dependency-glob: "pyproject.toml"
@@ -68,6 +68,6 @@ jobs:
6868
6969
- name: Publish to PyPI
7070
if: steps.semantic-release.outputs.new_release == 'true'
71-
uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # release/v1
71+
uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # release/v1
7272
with:
7373
print-hash: true

.github/workflows/scorecard.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,26 +27,26 @@ jobs:
2727

2828
steps:
2929
- name: Checkout code
30-
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
30+
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
3131
with:
3232
persist-credentials: false
3333

3434
- name: Run analysis
35-
uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
35+
uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
3636
with:
3737
results_file: results.sarif
3838
results_format: sarif
3939
# Publish results to OpenSSF REST API for the badge
4040
publish_results: true
4141

4242
- name: Upload artifact
43-
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
43+
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
4444
with:
4545
name: SARIF file
4646
path: results.sarif
4747
retention-days: 5
4848

4949
- name: Upload to code-scanning
50-
uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # v4
50+
uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # v4
5151
with:
5252
sarif_file: results.sarif

examples/batch_peaks.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def fetch_peak_details(peak_id: str) -> dict[str, Any] | None:
2424
text=True,
2525
check=True,
2626
)
27-
return json.loads(result.stdout) # type: ignore[no-any-return]
27+
return json.loads(result.stdout)
2828
except subprocess.CalledProcessError as e:
2929
print(f"Error fetching peak {peak_id}: {e}", file=sys.stderr)
3030
return None

examples/export_csv.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def search_peaks(query: str) -> list[dict[str, Any]]:
2424
text=True,
2525
check=True,
2626
)
27-
return json.loads(result.stdout) # type: ignore[no-any-return]
27+
return json.loads(result.stdout)
2828
except (subprocess.CalledProcessError, json.JSONDecodeError) as e:
2929
print(f"Error searching for '{query}': {e}", file=sys.stderr)
3030
return []
@@ -39,7 +39,7 @@ def fetch_peak_details(peak_id: str) -> dict[str, Any] | None:
3939
text=True,
4040
check=True,
4141
)
42-
return json.loads(result.stdout) # type: ignore[no-any-return]
42+
return json.loads(result.stdout)
4343
except (subprocess.CalledProcessError, json.JSONDecodeError) as e:
4444
print(f"Error fetching peak {peak_id}: {e}", file=sys.stderr)
4545
return None

peakbagger/models.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ def to_dict(self) -> dict[str, Any]:
132132
}
133133

134134
# Add trip report info
135-
result["trip_report"] = {
135+
trip_report: dict[str, Any] = {
136136
"has_report": self.has_trip_report,
137137
"word_count": self.trip_report_words,
138138
}
@@ -166,9 +166,10 @@ def to_dict(self) -> dict[str, Any]:
166166

167167
# Add full trip report text if present
168168
if self.trip_report_text:
169-
result["trip_report"]["text"] = self.trip_report_text
169+
trip_report["text"] = self.trip_report_text
170170
if self.trip_report_url:
171-
result["trip_report"]["external_url"] = self.trip_report_url
171+
trip_report["external_url"] = self.trip_report_url
172+
result["trip_report"] = trip_report
172173

173174
return result
174175

peakbagger/scraper.py

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -42,26 +42,26 @@ def parse_search_results(html: str) -> list[SearchResult]:
4242
return results
4343

4444
# Find the next table after the header
45-
table: Tag | None = search_header.find_next("table", class_="gray") # type: ignore[assignment]
45+
table: Tag | None = search_header.find_next("table", class_="gray")
4646
if not table:
4747
logger.debug("No search results table found")
4848
return results
4949

5050
# Skip header row, process data rows
51-
rows: list[Tag] = table.find_all("tr")[1:] # type: ignore[assignment]
51+
rows: list[Tag] = table.find_all("tr")[1:]
5252
logger.debug(f"Found {len(rows)} result rows to process")
5353

5454
for row in rows:
55-
cells: list[Tag] = row.find_all("td") # type: ignore[assignment]
55+
cells: list[Tag] = row.find_all("td")
5656
if len(cells) < 5: # Need at least: Type, Name, Location, Range, Elevation
5757
continue
5858

5959
# Extract peak link (2nd column)
60-
link: Tag | None = cells[1].find("a", href=lambda x: x and "peak.aspx?pid=" in x) # type: ignore[assignment]
60+
link: Tag | None = cells[1].find("a", href=lambda x: x and "peak.aspx?pid=" in x)
6161
if not link:
6262
continue
6363

64-
href: str = link["href"] # type: ignore[assignment]
64+
href: str = link["href"]
6565
name: str = link.get_text(strip=True)
6666

6767
# Extract peak ID from URL
@@ -126,7 +126,7 @@ def parse_peak_detail(html: str, pid: str) -> Peak | None:
126126

127127
try:
128128
# Extract peak name and state from H1
129-
h1: Tag | None = soup.find("h1") # type: ignore[assignment]
129+
h1: Tag | None = soup.find("h1")
130130
if not h1:
131131
logger.debug("No H1 tag found in peak detail page")
132132
return None
@@ -143,7 +143,7 @@ def parse_peak_detail(html: str, pid: str) -> Peak | None:
143143
peak: Peak = Peak(pid=pid, name=name, state=state)
144144

145145
# Extract elevation from H2
146-
h2: Tag | None = soup.find("h2") # type: ignore[assignment]
146+
h2: Tag | None = soup.find("h2")
147147
if h2:
148148
elevation_text: str = h2.get_text(strip=True)
149149
# Format: "Elevation: 10,984 feet, 3348 meters"
@@ -310,7 +310,7 @@ def parse_peak_ascents(html: str) -> list[Ascent]:
310310
ascents: list[Ascent] = []
311311

312312
# Find all tables
313-
tables: list[Tag] = soup.find_all("table") # type: ignore[assignment]
313+
tables: list[Tag] = soup.find_all("table")
314314
logger.debug(f"Found {len(tables)} tables in HTML")
315315

316316
# Look for the data table with dynamic header detection
@@ -319,14 +319,14 @@ def parse_peak_ascents(html: str) -> list[Ascent]:
319319
num_columns: int = 0
320320

321321
for table in tables:
322-
rows: list[Tag] = table.find_all("tr") # type: ignore[assignment]
322+
rows: list[Tag] = table.find_all("tr")
323323
if len(rows) < 10:
324324
continue
325325

326326
# Check if second row has expected headers
327327
if len(rows) > 1:
328328
header_row: Tag = rows[1]
329-
headers: list[Tag] = header_row.find_all(["th", "td"], recursive=False) # type: ignore[assignment]
329+
headers: list[Tag] = header_row.find_all(["th", "td"], recursive=False)
330330

331331
# Table must have reasonable number of columns (not the merged giant table)
332332
if len(headers) < 3 or len(headers) > 20:
@@ -368,7 +368,7 @@ def parse_peak_ascents(html: str) -> list[Ascent]:
368368
# Process data rows (skip first 2 rows: separator and header)
369369
for row in rows[2:]:
370370
# Use recursive=False to avoid counting cells in nested tables (e.g., route icons)
371-
cells: list[Tag] = row.find_all(["td", "th"], recursive=False) # type: ignore[assignment]
371+
cells: list[Tag] = row.find_all(["td", "th"], recursive=False)
372372

373373
# Skip rows that don't match the expected column count
374374
if len(cells) != num_columns:
@@ -378,7 +378,7 @@ def parse_peak_ascents(html: str) -> list[Ascent]:
378378
climber_cell: Tag = cells[climber_idx]
379379
climber_link: Tag | None = climber_cell.find(
380380
"a", href=lambda x: x and "climber.aspx?cid=" in x
381-
) # type: ignore[assignment]
381+
)
382382
if not climber_link:
383383
continue
384384

@@ -391,7 +391,7 @@ def parse_peak_ascents(html: str) -> list[Ascent]:
391391
date_cell: Tag = cells[date_idx]
392392
date_link: Tag | None = date_cell.find(
393393
"a", href=lambda x: x and "ascent.aspx?aid=" in x
394-
) # type: ignore[assignment]
394+
)
395395
if not date_link:
396396
continue
397397

@@ -415,7 +415,7 @@ def parse_peak_ascents(html: str) -> list[Ascent]:
415415
has_gpx: bool = False
416416
if gps_idx != -1:
417417
gps_cell: Tag = cells[gps_idx]
418-
gps_img: Tag | None = gps_cell.find("img", src=lambda x: x and "GPS.gif" in x) # type: ignore[assignment]
418+
gps_img: Tag | None = gps_cell.find("img", src=lambda x: x and "GPS.gif" in x)
419419
has_gpx = gps_img is not None
420420

421421
# Check for trip report (optional column)
@@ -470,7 +470,7 @@ def parse_ascent_detail(html: str, ascent_id: str) -> Ascent | None:
470470

471471
try:
472472
# Extract title: "Ascent of [Peak Name] on [Date]" or "Ascent of [Peak Name] in [Year]"
473-
h1: Tag | None = soup.find("h1") # type: ignore[assignment]
473+
h1: Tag | None = soup.find("h1")
474474
if not h1:
475475
logger.debug("No H1 tag found in ascent detail page")
476476
return None
@@ -486,13 +486,13 @@ def parse_ascent_detail(html: str, ascent_id: str) -> Ascent | None:
486486
peak_name = parts[0].replace("Ascent of ", "").strip()
487487

488488
# Extract climber from H2: "Climber: [Name]"
489-
h2: Tag | None = soup.find("h2") # type: ignore[assignment]
489+
h2: Tag | None = soup.find("h2")
490490
climber_name: str | None = None
491491
climber_id: str | None = None
492492
if h2:
493493
climber_link: Tag | None = h2.find(
494494
"a", href=lambda x: x and "climber.aspx?cid=" in x
495-
) # type: ignore[assignment]
495+
)
496496
if climber_link:
497497
climber_name = climber_link.get_text(strip=True)
498498
climber_href: str = climber_link["href"] # type: ignore[assignment]
@@ -506,7 +506,7 @@ def parse_ascent_detail(html: str, ascent_id: str) -> Ascent | None:
506506
# Find the left gray table (width="49%", align="left")
507507
table: Tag | None = soup.find(
508508
"table", class_="gray", attrs={"width": "49%", "align": "left"}
509-
) # type: ignore[assignment]
509+
)
510510
if not table:
511511
return None
512512

@@ -519,9 +519,9 @@ def parse_ascent_detail(html: str, ascent_id: str) -> Ascent | None:
519519
)
520520

521521
# Extract data from table rows
522-
rows: list[Tag] = table.find_all("tr") # type: ignore[assignment]
522+
rows: list[Tag] = table.find_all("tr")
523523
for row in rows:
524-
cells: list[Tag] = row.find_all("td", recursive=False) # type: ignore[assignment]
524+
cells: list[Tag] = row.find_all("td", recursive=False)
525525
if len(cells) < 1:
526526
continue
527527

@@ -551,9 +551,9 @@ def parse_ascent_detail(html: str, ascent_id: str) -> Ascent | None:
551551
ascent.trip_report_text = report_text.strip()
552552

553553
# Extract external URL if present
554-
url_link: Tag | None = cells[0].find("a", href=re.compile(r"^https?://")) # type: ignore[assignment]
554+
url_link: Tag | None = cells[0].find("a", href=re.compile(r"^https?://"))
555555
if url_link and url_link.get("href"):
556-
href = url_link["href"] # type: ignore[assignment]
556+
href = str(url_link["href"])
557557
# Only store if not peakbagger.com
558558
if "peakbagger.com" not in href:
559559
ascent.trip_report_url = href
@@ -565,7 +565,7 @@ def parse_ascent_detail(html: str, ascent_id: str) -> Ascent | None:
565565
# Get label from first cell
566566
# Some labels have <b> tags, others are just text
567567
label_cell = cells[0]
568-
label_b: Tag | None = label_cell.find("b") # type: ignore[assignment]
568+
label_b: Tag | None = label_cell.find("b")
569569
if label_b:
570570
label: str = label_b.get_text(strip=True).rstrip(":")
571571
else:
@@ -616,7 +616,7 @@ def parse_ascent_detail(html: str, ascent_id: str) -> Ascent | None:
616616
# Extract peak link and ID
617617
peak_link: Tag | None = value_cell.find(
618618
"a", href=lambda x: x and "peak.aspx?pid=" in x
619-
) # type: ignore[assignment]
619+
)
620620
if peak_link:
621621
ascent.peak_name = peak_link.get_text(strip=True)
622622
peak_href: str = peak_link["href"] # type: ignore[assignment]

0 commit comments

Comments
 (0)