Skip to content

Commit b400ee3

Browse files
authored
Merge pull request #10274 from pradyunsg/blacken/index
Blacken src/pip/_internal/index
2 parents fee140b + 1897784 commit b400ee3

File tree

3 files changed

+130
-113
lines changed

3 files changed

+130
-113
lines changed

.pre-commit-config.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ repos:
2222
- id: black
2323
exclude: |
2424
(?x)
25-
^src/pip/_internal/index|
2625
^src/pip/_internal/models|
2726
^src/pip/_internal/operations|
2827
^src/pip/_internal/vcs|

src/pip/_internal/index/collector.py

Lines changed: 43 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def _match_vcs_scheme(url: str) -> Optional[str]:
5252
Returns the matched VCS scheme, or None if there's no match.
5353
"""
5454
for scheme in vcs.schemes:
55-
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
55+
if url.lower().startswith(scheme) and url[len(scheme)] in "+:":
5656
return scheme
5757
return None
5858

@@ -85,7 +85,7 @@ def _ensure_html_response(url: str, session: PipSession) -> None:
8585
`_NotHTML` if the content type is not text/html.
8686
"""
8787
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
88-
if scheme not in {'http', 'https'}:
88+
if scheme not in {"http", "https"}:
8989
raise _NotHTTP()
9090

9191
resp = session.head(url, allow_redirects=True)
@@ -110,7 +110,7 @@ def _get_html_response(url: str, session: PipSession) -> Response:
110110
if is_archive_file(Link(url).filename):
111111
_ensure_html_response(url, session=session)
112112

113-
logger.debug('Getting page %s', redact_auth_from_url(url))
113+
logger.debug("Getting page %s", redact_auth_from_url(url))
114114

115115
resp = session.get(
116116
url,
@@ -145,12 +145,11 @@ def _get_html_response(url: str, session: PipSession) -> Response:
145145

146146

147147
def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]:
148-
"""Determine if we have any encoding information in our headers.
149-
"""
148+
"""Determine if we have any encoding information in our headers."""
150149
if headers and "Content-Type" in headers:
151150
content_type, params = cgi.parse_header(headers["Content-Type"])
152151
if "charset" in params:
153-
return params['charset']
152+
return params["charset"]
154153
return None
155154

156155

@@ -195,7 +194,7 @@ def _clean_file_url_path(part: str) -> str:
195194

196195

197196
# percent-encoded: /
198-
_reserved_chars_re = re.compile('(@|%2F)', re.IGNORECASE)
197+
_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE)
199198

200199

201200
def _clean_url_path(path: str, is_local_path: bool) -> str:
@@ -212,12 +211,12 @@ def _clean_url_path(path: str, is_local_path: bool) -> str:
212211
parts = _reserved_chars_re.split(path)
213212

214213
cleaned_parts = []
215-
for to_clean, reserved in pairwise(itertools.chain(parts, [''])):
214+
for to_clean, reserved in pairwise(itertools.chain(parts, [""])):
216215
cleaned_parts.append(clean_func(to_clean))
217216
# Normalize %xx escapes (e.g. %2f -> %2F)
218217
cleaned_parts.append(reserved.upper())
219218

220-
return ''.join(cleaned_parts)
219+
return "".join(cleaned_parts)
221220

222221

223222
def _clean_link(url: str) -> str:
@@ -248,10 +247,10 @@ def _create_link_from_element(
248247
return None
249248

250249
url = _clean_link(urllib.parse.urljoin(base_url, href))
251-
pyrequire = anchor.get('data-requires-python')
250+
pyrequire = anchor.get("data-requires-python")
252251
pyrequire = html.unescape(pyrequire) if pyrequire else None
253252

254-
yanked_reason = anchor.get('data-yanked')
253+
yanked_reason = anchor.get("data-yanked")
255254
if yanked_reason:
256255
yanked_reason = html.unescape(yanked_reason)
257256

@@ -271,8 +270,7 @@ def __init__(self, page: "HTMLPage") -> None:
271270
self.page = page
272271

273272
def __eq__(self, other: object) -> bool:
274-
return (isinstance(other, type(self)) and
275-
self.page.url == other.page.url)
273+
return isinstance(other, type(self)) and self.page.url == other.page.url
276274

277275
def __hash__(self) -> int:
278276
return hash(self.page.url)
@@ -353,7 +351,7 @@ def __str__(self) -> str:
353351
def _handle_get_page_fail(
354352
link: Link,
355353
reason: Union[str, Exception],
356-
meth: Optional[Callable[..., None]] = None
354+
meth: Optional[Callable[..., None]] = None,
357355
) -> None:
358356
if meth is None:
359357
meth = logger.debug
@@ -366,7 +364,8 @@ def _make_html_page(response: Response, cache_link_parsing: bool = True) -> HTML
366364
response.content,
367365
encoding=encoding,
368366
url=response.url,
369-
cache_link_parsing=cache_link_parsing)
367+
cache_link_parsing=cache_link_parsing,
368+
)
370369

371370

372371
def _get_html_page(
@@ -377,37 +376,43 @@ def _get_html_page(
377376
"_get_html_page() missing 1 required keyword argument: 'session'"
378377
)
379378

380-
url = link.url.split('#', 1)[0]
379+
url = link.url.split("#", 1)[0]
381380

382381
# Check for VCS schemes that do not support lookup as web pages.
383382
vcs_scheme = _match_vcs_scheme(url)
384383
if vcs_scheme:
385-
logger.warning('Cannot look at %s URL %s because it does not support '
386-
'lookup as web pages.', vcs_scheme, link)
384+
logger.warning(
385+
"Cannot look at %s URL %s because it does not support lookup as web pages.",
386+
vcs_scheme,
387+
link,
388+
)
387389
return None
388390

389391
# Tack index.html onto file:// URLs that point to directories
390392
scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
391-
if (scheme == 'file' and os.path.isdir(urllib.request.url2pathname(path))):
393+
if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)):
392394
# add trailing slash if not present so urljoin doesn't trim
393395
# final segment
394-
if not url.endswith('/'):
395-
url += '/'
396-
url = urllib.parse.urljoin(url, 'index.html')
397-
logger.debug(' file: URL is directory, getting %s', url)
396+
if not url.endswith("/"):
397+
url += "/"
398+
url = urllib.parse.urljoin(url, "index.html")
399+
logger.debug(" file: URL is directory, getting %s", url)
398400

399401
try:
400402
resp = _get_html_response(url, session=session)
401403
except _NotHTTP:
402404
logger.warning(
403-
'Skipping page %s because it looks like an archive, and cannot '
404-
'be checked by a HTTP HEAD request.', link,
405+
"Skipping page %s because it looks like an archive, and cannot "
406+
"be checked by a HTTP HEAD request.",
407+
link,
405408
)
406409
except _NotHTML as exc:
407410
logger.warning(
408-
'Skipping page %s because the %s request got Content-Type: %s.'
409-
'The only supported Content-Type is text/html',
410-
link, exc.request_desc, exc.content_type,
411+
"Skipping page %s because the %s request got Content-Type: %s."
412+
"The only supported Content-Type is text/html",
413+
link,
414+
exc.request_desc,
415+
exc.content_type,
411416
)
412417
except NetworkConnectionError as exc:
413418
_handle_get_page_fail(link, exc)
@@ -422,8 +427,7 @@ def _get_html_page(
422427
except requests.Timeout:
423428
_handle_get_page_fail(link, "timed out")
424429
else:
425-
return _make_html_page(resp,
426-
cache_link_parsing=link.cache_link_parsing)
430+
return _make_html_page(resp, cache_link_parsing=link.cache_link_parsing)
427431
return None
428432

429433

@@ -451,9 +455,10 @@ def __init__(
451455

452456
@classmethod
453457
def create(
454-
cls, session: PipSession,
458+
cls,
459+
session: PipSession,
455460
options: Values,
456-
suppress_no_index: bool = False
461+
suppress_no_index: bool = False,
457462
) -> "LinkCollector":
458463
"""
459464
:param session: The Session to use to make requests.
@@ -463,19 +468,21 @@ def create(
463468
index_urls = [options.index_url] + options.extra_index_urls
464469
if options.no_index and not suppress_no_index:
465470
logger.debug(
466-
'Ignoring indexes: %s',
467-
','.join(redact_auth_from_url(url) for url in index_urls),
471+
"Ignoring indexes: %s",
472+
",".join(redact_auth_from_url(url) for url in index_urls),
468473
)
469474
index_urls = []
470475

471476
# Make sure find_links is a list before passing to create().
472477
find_links = options.find_links or []
473478

474479
search_scope = SearchScope.create(
475-
find_links=find_links, index_urls=index_urls,
480+
find_links=find_links,
481+
index_urls=index_urls,
476482
)
477483
link_collector = LinkCollector(
478-
session=session, search_scope=search_scope,
484+
session=session,
485+
search_scope=search_scope,
479486
)
480487
return link_collector
481488

0 commit comments

Comments
 (0)