Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ repos:
exclude: ^pandas/tests
args: [--select, "ANN001,ANN2", --fix-only, --exit-non-zero-on-fix]
- id: ruff-format
exclude: ^scripts|^pandas/tests/frame/test_query_eval.py
exclude: ^pandas/tests/frame/test_query_eval.py
- repo: https://github.com/jendrikseipp/vulture
rev: v2.14
hooks:
Expand Down
3 changes: 1 addition & 2 deletions scripts/check_for_inconsistent_pandas_namespace.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@
from typing import NamedTuple

ERROR_MESSAGE = (
"{path}:{lineno}:{col_offset}: "
"Found both '{prefix}.{name}' and '{name}' in {path}"
"{path}:{lineno}:{col_offset}: Found both '{prefix}.{name}' and '{name}' in {path}"
)


Expand Down
1 change: 1 addition & 0 deletions scripts/check_test_naming.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
NOTE: if this finds a false positive, you can add the comment `# not a test` to the
class or function definition. Though hopefully that shouldn't be necessary.
"""

from __future__ import annotations

import argparse
Expand Down
1 change: 1 addition & 0 deletions scripts/generate_pip_deps_from_conda.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
generated with this script:
$ python scripts/generate_pip_deps_from_conda.py --compare
"""

import argparse
import pathlib
import re
Expand Down
1 change: 1 addition & 0 deletions scripts/pandas_errors_documented.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

pre-commit run pandas-errors-documented --all-files
"""

from __future__ import annotations

import argparse
Expand Down
1 change: 1 addition & 0 deletions scripts/sort_whatsnew_note.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

pre-commit run sort-whatsnew-items --all-files
"""

from __future__ import annotations

import argparse
Expand Down
5 changes: 1 addition & 4 deletions scripts/tests/test_check_test_naming.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,7 @@
0,
),
(
"class Foo: # not a test\n"
" pass\n"
"def test_foo():\n"
" Class.foo()\n",
"class Foo: # not a test\n pass\ndef test_foo():\n Class.foo()\n",
"",
0,
),
Expand Down
8 changes: 2 additions & 6 deletions scripts/tests/test_inconsistent_namespace_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,10 @@
)

BAD_FILE_0 = (
"from pandas import Categorical\n"
"cat_0 = Categorical()\n"
"cat_1 = pd.Categorical()"
"from pandas import Categorical\ncat_0 = Categorical()\ncat_1 = pd.Categorical()"
)
BAD_FILE_1 = (
"from pandas import Categorical\n"
"cat_0 = pd.Categorical()\n"
"cat_1 = Categorical()"
"from pandas import Categorical\ncat_0 = pd.Categorical()\ncat_1 = Categorical()"
)
BAD_FILE_2 = (
"from pandas import Categorical\n"
Expand Down
11 changes: 6 additions & 5 deletions scripts/tests/test_validate_docstrings.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from scripts import validate_docstrings


# fmt: off
class BadDocstrings:
"""Everything here has a bad docstring"""

Expand Down Expand Up @@ -88,6 +89,7 @@ def leftover_files(self) -> None:
>>> import pathlib
>>> pathlib.Path("foo.txt").touch()
"""
# fmt: on


class TestValidator:
Expand Down Expand Up @@ -227,13 +229,13 @@ def test_validate_all_ignore_errors(self, monkeypatch):
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc")
("ER03", "err desc"),
],
"warnings": [],
"examples_errors": "",
"deprecated": True,
"file": "file1",
"file_line": "file_line1"
"file_line": "file_line1",
},
)
monkeypatch.setattr(
Expand Down Expand Up @@ -272,14 +274,13 @@ def test_validate_all_ignore_errors(self, monkeypatch):
None: {"ER03"},
"pandas.DataFrame.align": {"ER01"},
# ignoring an error that is not requested should be of no effect
"pandas.Index.all": {"ER03"}
}
"pandas.Index.all": {"ER03"},
},
)
# two functions * two not global ignored errors - one function ignored error
assert exit_status == 2 * 2 - 1



class TestApiItems:
@property
def api_doc(self):
Expand Down
7 changes: 4 additions & 3 deletions scripts/validate_docstrings.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
$ ./validate_docstrings.py
$ ./validate_docstrings.py pandas.DataFrame.head
"""

from __future__ import annotations

import argparse
Expand Down Expand Up @@ -380,13 +381,13 @@ def print_validate_all_results(
)
for err_code in actual_failures - expected_failures:
sys.stdout.write(
f'{prefix}{res["file"]}:{res["file_line"]}:'
f"{prefix}{res['file']}:{res['file_line']}:"
f"{err_code}:{func_name}:{error_messages[err_code]}\n"
)
exit_status += 1
for err_code in ignore_errors.get(func_name, set()) - actual_failures:
sys.stdout.write(
f'{prefix}{res["file"]}:{res["file_line"]}:'
f"{prefix}{res['file']}:{res['file_line']}:"
f"{err_code}:{func_name}:"
"EXPECTED TO FAIL, BUT NOT FAILING\n"
)
Expand Down Expand Up @@ -419,7 +420,7 @@ def header(title, width=80, char="#") -> str:

sys.stderr.write(header("Validation"))
if result["errors"]:
sys.stderr.write(f'{len(result["errors"])} Errors found for `{func_name}`:\n')
sys.stderr.write(f"{len(result['errors'])} Errors found for `{func_name}`:\n")
for err_code, err_desc in result["errors"]:
sys.stderr.write(f"\t{err_code}\t{err_desc}\n")
else:
Expand Down
1 change: 1 addition & 0 deletions scripts/validate_exception_location.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
As a pre-commit hook:
pre-commit run validate-errors-locations --all-files
"""

from __future__ import annotations

import argparse
Expand Down
8 changes: 3 additions & 5 deletions scripts/validate_min_versions_in_sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

pre-commit run validate-min-versions-in-sync --all-files
"""

from __future__ import annotations

import pathlib
Expand Down Expand Up @@ -100,14 +101,11 @@ def get_operator_from(dependency: str) -> str | None:


def get_yaml_map_from(
yaml_dic: list[str | dict[str, list[str]]]
yaml_dic: list[str | dict[str, list[str]]],
) -> dict[str, list[str] | None]:
yaml_map: dict[str, list[str] | None] = {}
for dependency in yaml_dic:
if (
isinstance(dependency, dict)
or dependency in yaml_map
):
if isinstance(dependency, dict) or dependency in yaml_map:
continue
search_text = str(dependency)
operator = get_operator_from(search_text)
Expand Down
4 changes: 3 additions & 1 deletion scripts/validate_rst_title_capitalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
From the command-line:
python scripts/validate_rst_title_capitalization.py <rst file>
"""

from __future__ import annotations

import argparse
Expand Down Expand Up @@ -271,7 +272,8 @@ def main(source_paths: list[str]) -> int:
if title != correct_title_capitalization(title):
print(
f"""{filename}:{line_number}:{err_msg} "{title}" to "{
correct_title_capitalization(title)}" """
correct_title_capitalization(title)
}" """
)
number_of_errors += 1

Expand Down
59 changes: 25 additions & 34 deletions scripts/validate_unwanted_patterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,17 +184,11 @@ def strings_with_wrong_placed_whitespace(

For example:

>>> rule = (
... "We want the space at the end of the line, "
... "not at the beginning"
... )
>>> rule = "We want the space at the end of the line, not at the beginning"

Instead of:

>>> rule = (
... "We want the space at the end of the line,"
... " not at the beginning"
... )
>>> rule = "We want the space at the end of the line, not at the beginning"

Parameters
----------
Expand Down Expand Up @@ -234,35 +228,29 @@ def has_wrong_whitespace(first_line: str, second_line: str) -> bool:

For example, this is bad:

>>> rule = (
... "We want the space at the end of the line,"
... " not at the beginning"
... )
>>> rule = "We want the space at the end of the line, not at the beginning"

And what we want is:

>>> rule = (
... "We want the space at the end of the line, "
... "not at the beginning"
... )
>>> rule = "We want the space at the end of the line, not at the beginning"

And if the string is ending with a new line character (\n) we
do not want any trailing whitespaces after it.

For example, this is bad:

>>> rule = (
... "We want the space at the begging of "
... "the line if the previous line is ending with a \n "
... "not at the end, like always"
... "We want the space at the begging of "
... "the line if the previous line is ending with a \n "
... "not at the end, like always"
... )

And what we do want is:

>>> rule = (
... "We want the space at the begging of "
... "the line if the previous line is ending with a \n"
... " not at the end, like always"
... "We want the space at the begging of "
... "the line if the previous line is ending with a \n"
... " not at the end, like always"
... )
"""
if first_line.endswith(r"\n"):
Expand Down Expand Up @@ -324,10 +312,14 @@ def nodefault_used_not_only_for_typing(file_obj: IO[str]) -> Iterable[tuple[int,
while nodes:
in_annotation, node = nodes.pop()
if not in_annotation and (
(isinstance(node, ast.Name) # Case `NoDefault`
and node.id == "NoDefault")
or (isinstance(node, ast.Attribute) # Cases e.g. `lib.NoDefault`
and node.attr == "NoDefault")
(
isinstance(node, ast.Name) # Case `NoDefault`
and node.id == "NoDefault"
)
or (
isinstance(node, ast.Attribute) # Cases e.g. `lib.NoDefault`
and node.attr == "NoDefault"
)
):
yield (node.lineno, "NoDefault is used not only for typing")

Expand All @@ -348,6 +340,7 @@ def nodefault_used_not_only_for_typing(file_obj: IO[str]) -> Iterable[tuple[int,
if isinstance(value, ast.AST)
)


def doesnt_use_pandas_warnings(file_obj: IO[str]) -> Iterable[tuple[int, str]]:
"""
Checking that pandas-specific warnings are used for deprecations.
Expand All @@ -371,9 +364,8 @@ def doesnt_use_pandas_warnings(file_obj: IO[str]) -> Iterable[tuple[int, str]]:
if not isinstance(node, ast.Call):
continue

if (
isinstance(node.func, ast.Attribute)
and isinstance(node.func.value, ast.Name)
if isinstance(node.func, ast.Attribute) and isinstance(
node.func.value, ast.Name
):
# Check for `warnings.warn`.
if node.func.value.id != "warnings" or node.func.attr != "warn":
Expand All @@ -387,18 +379,17 @@ def doesnt_use_pandas_warnings(file_obj: IO[str]) -> Iterable[tuple[int, str]]:
for k in range(node.lineno - 1, node.end_lineno + 1)
):
continue
values = (
[arg.id for arg in node.args if isinstance(arg, ast.Name)]
+ [kw.value.id for kw in node.keywords if kw.arg == "category"]
)
values = [arg.id for arg in node.args if isinstance(arg, ast.Name)] + [
kw.value.id for kw in node.keywords if kw.arg == "category"
]
for value in values:
matches = re.match(DEPRECATION_WARNINGS_PATTERN, value)
if matches is not None:
yield (
node.lineno,
f"Don't use {matches[0]}, use a pandas-specific warning in "
f"pd.errors instead. You can add "
f"`# pdlint: ignore[warning_class]` to override."
f"`# pdlint: ignore[warning_class]` to override.",
)


Expand Down
Loading