diff --git a/.github/workflows/check-code-quality.yml b/.github/workflows/check-code-quality.yml index 9e38edf2..fcd7d0bf 100644 --- a/.github/workflows/check-code-quality.yml +++ b/.github/workflows/check-code-quality.yml @@ -29,25 +29,16 @@ jobs: - name: Upgrade pip run: pip install --upgrade pip - - name: Install and run isort - run: | - pip install isort - isort . - - - name: Install and run black - run: | - pip install black[jupyter] - black . - - name: Install and run mypy run: | pip install mypy mypy src - - name: Install and run pylint + - name: Install and run ruff run: | - pip install pylint . - pylint power_grid_model_io + pip install ruff . + ruff check . + ruff format . - name: If needed raise error run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index db053f9d..70f4d9df 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,14 +7,15 @@ repos: rev: v5.0.2 hooks: - id: reuse - - repo: https://github.com/pycqa/isort - rev: 6.0.1 + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.11.12 hooks: - - id: isort - - repo: https://github.com/psf/black - rev: 25.1.0 - hooks: - - id: black-jupyter + # Run the linter. + - id: ruff-check + args: [ --fix ] + # Run the formatter. + - id: ruff-format - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.16.0 hooks: @@ -22,14 +23,6 @@ repos: additional_dependencies: [numpy, pandas] - repo: local hooks: - - id: pylint - name: pylint - entry: pylint - files: ^src/.+\.py$ - language: system - types: [ python ] - args: [ "--rcfile=pyproject.toml" ] - require_serial: true - id: pytest name: pytest entry: pytest diff --git a/docs/examples/arrow_example.ipynb b/docs/examples/arrow_example.ipynb index b2715d16..c6891393 100644 --- a/docs/examples/arrow_example.ipynb +++ b/docs/examples/arrow_example.ipynb @@ -26,22 +26,20 @@ "outputs": [], "source": [ "%%capture cap --no-stderr\n", - "from IPython.display import display\n", "from typing import Iterable\n", "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import pyarrow as pa\n", + "from IPython.display import display\n", "from power_grid_model import (\n", - " PowerGridModel,\n", - " initialize_array,\n", - " CalculationMethod,\n", - " power_grid_meta_data,\n", + " ComponentAttributeFilterOptions,\n", " ComponentType,\n", " DatasetType,\n", - " ComponentAttributeFilterOptions,\n", + " PowerGridModel,\n", + " power_grid_meta_data,\n", ")\n", - "from power_grid_model.data_types import SingleColumnarData\n", - "import pyarrow as pa\n", - "import pandas as pd\n", - "import numpy as np" + "from power_grid_model.data_types import SingleColumnarData" ] }, { diff --git a/docs/examples/pandapower_example.ipynb b/docs/examples/pandapower_example.ipynb index fbc1ea93..0b61d257 100644 --- a/docs/examples/pandapower_example.ipynb +++ b/docs/examples/pandapower_example.ipynb @@ -42,6 +42,7 @@ "outputs": [], "source": [ "import warnings\n", + "\n", "import pandapower as pp\n", "\n", "warnings.filterwarnings(\"ignore\", module=\"pandapower\", category=FutureWarning) # Hide warnings related to pandas\n", @@ -1046,8 +1047,9 @@ "source": [ "%%capture cap --no-stderr\n", "\n", - "from power_grid_model import PowerGridModel, CalculationType\n", + "from power_grid_model import CalculationType, PowerGridModel\n", "from power_grid_model.validation import assert_valid_input_data\n", + "\n", "from power_grid_model_io.converters import PandaPowerConverter\n", "\n", "output_file = \"data/pandapower/example_simple_output.json\"\n", diff --git a/docs/examples/pgm_json_example.ipynb b/docs/examples/pgm_json_example.ipynb index e46ca5e5..a303f9cb 100644 --- a/docs/examples/pgm_json_example.ipynb +++ b/docs/examples/pgm_json_example.ipynb @@ -105,7 +105,8 @@ ], "source": [ "from pathlib import Path\n", - "from IPython.display import display, Markdown\n", + "\n", + "from IPython.display import Markdown, display\n", "\n", "with Path(source_file).open() as json_file:\n", " display(Markdown(f\"
{json_file.read()}\"))"
@@ -578,7 +579,8 @@
],
"source": [
"from pathlib import Path\n",
- "from IPython.display import display, Markdown\n",
+ "\n",
+ "from IPython.display import Markdown, display\n",
"\n",
"with Path(destination_file).open() as json_file:\n",
" display(Markdown(f\"{json_file.read()}\"))"
@@ -599,6 +601,7 @@
"source": [
"%%capture cap --no-stderr\n",
"from power_grid_model import PowerGridModel\n",
+ "\n",
"from power_grid_model_io.converters import PgmJsonConverter\n",
"\n",
"source_file = \"data/tiny-net/input.json\"\n",
diff --git a/docs/examples/vision_example.ipynb b/docs/examples/vision_example.ipynb
index 63124a5e..cd817217 100644
--- a/docs/examples/vision_example.ipynb
+++ b/docs/examples/vision_example.ipynb
@@ -552,7 +552,8 @@
],
"source": [
"from pathlib import Path\n",
- "from IPython.display import display, Markdown\n",
+ "\n",
+ "from IPython.display import Markdown, display\n",
"\n",
"with Path(destination_file).open() as json_file:\n",
" display(Markdown(f\"{json_file.read()}\"))"
@@ -651,9 +652,10 @@
"source": [
"%%capture cap --no-stderr\n",
"\n",
- "from power_grid_model import PowerGridModel, CalculationType\n",
+ "from power_grid_model import CalculationType, PowerGridModel\n",
"from power_grid_model.validation import assert_valid_input_data\n",
- "from power_grid_model_io.converters import VisionExcelConverter, PgmJsonConverter\n",
+ "\n",
+ "from power_grid_model_io.converters import PgmJsonConverter, VisionExcelConverter\n",
"\n",
"source_file = \"data/vision/example.xlsx\"\n",
"destination_file = \"data/vision/sym_output.json\"\n",
diff --git a/pyproject.toml b/pyproject.toml
index 57365120..bf8ea3c7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -44,6 +44,7 @@ dev = [
"pylint",
"pytest",
"pytest-cov",
+ "ruff",
"pydantic>2", # Used in unit tests
"pandapower>2.11.1",
]
@@ -89,33 +90,43 @@ addopts = [
]
xfail_strict = true
-[tool.black]
+[tool.ruff]
+# Same as Black.
line-length = 120
-target-version = ["py311", "py312", "py313"]
+indent-width = 4
+show-fixes = true
-[tool.isort]
-profile = "black"
-line_length = 120
+# Assume Python 3.11
+target-version = "py311"
-[tool.pylint]
-max-line-length = 120
-ignore-paths = [
- "docs/",
- "examples/",
- "tests/",
- "setup.py",
-]
-disable = [
- "fixme", # allow todos
-]
-good-names=[
- "ex", # exception
- "i", # iterator or current
- "p", # active power
- "q", # reactive power
- "s", # power (p + q)
- "v", # voltage
+[tool.ruff.lint]
+select = [
+ # pycodestyle
+ "E",
+ # Pyflakes
+ "F",
+ # isort
+ "I",
+ "SIM",
+ "YTT",
+ "BLE",
+ "PERF",
+ "Q",
+ "ICN",
+ "ISC",
+ "G",
+ "LOG",
+ "EXE",
+ "FA",
+ "FURB",
+ "FLY",
+ "SLOT",
]
+ignore = ["SIM108", "SIM118", "SIM110", "SIM211"]
+
+[tool.ruff.lint.isort]
+# Imports that are imported using keyword "as" and are from the same source - are combined.
+combine-as-imports = true
[tool.mypy]
follow_imports = "silent"
diff --git a/set_pypi_version.py b/set_pypi_version.py
index 73de0dbe..db891833 100644
--- a/set_pypi_version.py
+++ b/set_pypi_version.py
@@ -8,7 +8,6 @@
import os
from pathlib import Path
-from typing import cast
import requests
@@ -26,7 +25,7 @@ def set_version(pkg_dir: Path):
ref = os.environ["GITHUB_REF"]
build_number = os.environ["GITHUB_RUN_NUMBER"]
# short hash number in numeric
- short_hash = f'{int(f"0x{sha[0:6]}", base=16):08}'
+ short_hash = f"{int(f'0x{sha[0:6]}', base=16):08}"
if "main" in ref:
# main branch
diff --git a/src/power_grid_model_io/converters/__init__.py b/src/power_grid_model_io/converters/__init__.py
index 02723cf0..057bcc61 100644
--- a/src/power_grid_model_io/converters/__init__.py
+++ b/src/power_grid_model_io/converters/__init__.py
@@ -8,3 +8,5 @@
from power_grid_model_io.converters.pandapower_converter import PandaPowerConverter
from power_grid_model_io.converters.pgm_json_converter import PgmJsonConverter
from power_grid_model_io.converters.vision_excel_converter import VisionExcelConverter
+
+__all__ = ["PandaPowerConverter", "PgmJsonConverter", "VisionExcelConverter"]
diff --git a/src/power_grid_model_io/converters/base_converter.py b/src/power_grid_model_io/converters/base_converter.py
index 21294891..7d9bb3d0 100644
--- a/src/power_grid_model_io/converters/base_converter.py
+++ b/src/power_grid_model_io/converters/base_converter.py
@@ -4,6 +4,7 @@
"""
Abstract converter class
"""
+
import logging
from abc import ABC, abstractmethod
from typing import Generic, Optional, Tuple, TypeVar
diff --git a/src/power_grid_model_io/converters/pandapower_converter.py b/src/power_grid_model_io/converters/pandapower_converter.py
index 36fb1d21..b93a6c3f 100644
--- a/src/power_grid_model_io/converters/pandapower_converter.py
+++ b/src/power_grid_model_io/converters/pandapower_converter.py
@@ -461,7 +461,7 @@ def _create_pgm_input_sources(self):
}
if not all(checks.values()):
failed_checks = ", ".join([key for key, value in checks.items() if not value])
- logger.warning(f"Zero sequence parameters given in external grid shall be ignored:{failed_checks}")
+ logger.warning("Zero sequence parameters given in external grid shall be ignored: %s", failed_checks)
pgm_sources = initialize_array(data_type="input", component_type="source", shape=len(pp_ext_grid))
pgm_sources["id"] = self._generate_ids("ext_grid", pp_ext_grid.index)
@@ -753,7 +753,7 @@ def _create_pgm_input_transformers(self): # pylint: disable=too-many-statements
}
if not all(checks.values()):
failed_checks = ", ".join([key for key, value in checks.items() if not value])
- logger.warning(f"Zero sequence parameters given in trafo shall be ignored:{failed_checks}")
+ logger.warning("Zero sequence parameters given in trafo shall be ignored: %s", failed_checks)
# Do not use taps when mandatory tap data is not available
no_taps = np.equal(tap_side, None) | np.isnan(tap_pos) | np.isnan(tap_nom) | np.isnan(tap_size)
@@ -862,7 +862,7 @@ def _create_pgm_input_three_winding_transformers(self):
}
if not all(checks.values()):
failed_checks = ", ".join([key for key, value in checks.items() if not value])
- logger.warning(f"Zero sequence parameters given in trafo3w are ignored: {failed_checks}")
+ logger.warning("Zero sequence parameters given in trafo3w are ignored: %s", failed_checks)
# Do not use taps when mandatory tap data is not available
no_taps = np.equal(tap_side, None) | np.isnan(tap_pos) | np.isnan(tap_nom) | np.isnan(tap_size)
diff --git a/src/power_grid_model_io/converters/pgm_json_converter.py b/src/power_grid_model_io/converters/pgm_json_converter.py
index 7a38ae93..e7f3c99f 100644
--- a/src/power_grid_model_io/converters/pgm_json_converter.py
+++ b/src/power_grid_model_io/converters/pgm_json_converter.py
@@ -238,7 +238,7 @@ def _serialize_dataset(self, data: SingleDataset, extra_info: Optional[ExtraInfo
"""
# This should be a single data set
- for component, array in data.items():
+ for array in data.values():
if not isinstance(array, np.ndarray) or array.ndim != 1:
raise ValueError("Invalid data format")
diff --git a/src/power_grid_model_io/data_stores/vision_excel_file_store.py b/src/power_grid_model_io/data_stores/vision_excel_file_store.py
index 56b6326c..f28668c7 100644
--- a/src/power_grid_model_io/data_stores/vision_excel_file_store.py
+++ b/src/power_grid_model_io/data_stores/vision_excel_file_store.py
@@ -4,6 +4,7 @@
"""
Vision Excel file store
"""
+
from pathlib import Path
from typing import Optional
diff --git a/src/power_grid_model_io/data_types/__init__.py b/src/power_grid_model_io/data_types/__init__.py
index 14cc28a7..24a862e5 100644
--- a/src/power_grid_model_io/data_types/__init__.py
+++ b/src/power_grid_model_io/data_types/__init__.py
@@ -7,3 +7,5 @@
from power_grid_model_io.data_types._data_types import ExtraInfo, ExtraInfoLookup, StructuredData
from power_grid_model_io.data_types.tabular_data import LazyDataFrame, TabularData
+
+__all__ = ["ExtraInfo", "ExtraInfoLookup", "StructuredData", "LazyDataFrame", "TabularData"]
diff --git a/src/power_grid_model_io/functions/__init__.py b/src/power_grid_model_io/functions/__init__.py
index 8d651f2e..7b5fc59f 100644
--- a/src/power_grid_model_io/functions/__init__.py
+++ b/src/power_grid_model_io/functions/__init__.py
@@ -17,3 +17,15 @@
value_or_default,
value_or_zero,
)
+
+__all__ = [
+ "both_zeros_to_nan",
+ "complex_inverse_imaginary_part",
+ "complex_inverse_real_part",
+ "degrees_to_clock",
+ "get_winding",
+ "has_value",
+ "is_greater_than",
+ "value_or_default",
+ "value_or_zero",
+]
diff --git a/src/power_grid_model_io/mappings/tabular_mapping.py b/src/power_grid_model_io/mappings/tabular_mapping.py
index 6c815cc2..23a56995 100644
--- a/src/power_grid_model_io/mappings/tabular_mapping.py
+++ b/src/power_grid_model_io/mappings/tabular_mapping.py
@@ -4,6 +4,7 @@
"""
Tabular data mapping helper class
"""
+
from typing import Dict, Generator, List, Tuple, Union
import structlog
diff --git a/src/power_grid_model_io/mappings/value_mapping.py b/src/power_grid_model_io/mappings/value_mapping.py
index efa7814f..b0390380 100644
--- a/src/power_grid_model_io/mappings/value_mapping.py
+++ b/src/power_grid_model_io/mappings/value_mapping.py
@@ -4,6 +4,7 @@
"""
Value substitution helper class
"""
+
from typing import Dict, Optional, Union
import structlog
diff --git a/src/power_grid_model_io/utils/auto_id.py b/src/power_grid_model_io/utils/auto_id.py
index 756cb34c..8fc4ff5f 100644
--- a/src/power_grid_model_io/utils/auto_id.py
+++ b/src/power_grid_model_io/utils/auto_id.py
@@ -4,6 +4,7 @@
"""
Automatic ID generator class
"""
+
import collections
from collections.abc import Hashable
from typing import Any, Dict, List, Optional, Union
diff --git a/src/power_grid_model_io/utils/dict.py b/src/power_grid_model_io/utils/dict.py
index 215c4eda..e84a6436 100644
--- a/src/power_grid_model_io/utils/dict.py
+++ b/src/power_grid_model_io/utils/dict.py
@@ -4,6 +4,7 @@
"""
General dictionary utilities
"""
+
from copy import deepcopy
from typing import Dict
diff --git a/src/power_grid_model_io/utils/excel_ambiguity_checker.py b/src/power_grid_model_io/utils/excel_ambiguity_checker.py
index db06a105..463d12c6 100644
--- a/src/power_grid_model_io/utils/excel_ambiguity_checker.py
+++ b/src/power_grid_model_io/utils/excel_ambiguity_checker.py
@@ -19,6 +19,7 @@
- xml.etree.ElementTree for parsing XML structures within the Excel file.
- zipfile to handle the Excel file as a ZIP archive for parsing.
"""
+
import os
import xml.etree.ElementTree as ET
import zipfile
@@ -80,11 +81,10 @@ def _parse_zip(self, zip_file) -> List[Optional[str]]:
list: A list of shared strings used in the Excel file.
"""
shared_strings_path = SHARED_STR_PATH
- shared_strings = []
+ shared_strings: List[Optional[str]] = []
with zip_file.open(shared_strings_path) as f:
tree = ET.parse(f)
- for si in tree.findall(FIND_T, namespaces=XML_NAME_SPACE):
- shared_strings.append(si.text)
+ shared_strings.extend(si.text for si in tree.findall(FIND_T, namespaces=XML_NAME_SPACE))
return shared_strings
def _get_column_names_from_row(self, row, shared_strings) -> List[Optional[str]]:
diff --git a/src/power_grid_model_io/utils/modules.py b/src/power_grid_model_io/utils/modules.py
index 4443861f..48c32880 100644
--- a/src/power_grid_model_io/utils/modules.py
+++ b/src/power_grid_model_io/utils/modules.py
@@ -4,6 +4,7 @@
"""
Module utilities, expecially useful for loading optional dependencies
"""
+
from importlib import import_module
from typing import Callable
diff --git a/tests/conftest.py b/tests/conftest.py
index e3ee4ce3..5db6a751 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -2,9 +2,9 @@
#
# SPDX-License-Identifier: MPL-2.0
+from contextlib import suppress
+
import pandas as pd
-try:
+with suppress(pd.errors.OptionError):
pd.set_option("future.no_silent_downcasting", True)
-except pd.errors.OptionError:
- pass
diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py
index 5aee0e4f..52e59ce7 100644
--- a/tests/unit/conftest.py
+++ b/tests/unit/conftest.py
@@ -2,10 +2,10 @@
#
# SPDX-License-Identifier: MPL-2.0
+from contextlib import suppress
+
import pandas as pd
-try:
+with suppress(pd.errors.OptionError):
# TODO(mgovers) We're ready for Pandas 3.x, but pandapower is not. Move to parent conftest when it is.
pd.set_option("mode.copy_on_write", True)
-except pd.errors.OptionError:
- pass
diff --git a/tests/unit/converters/test_tabular_converter.py b/tests/unit/converters/test_tabular_converter.py
index 2661fa53..dd64e902 100644
--- a/tests/unit/converters/test_tabular_converter.py
+++ b/tests/unit/converters/test_tabular_converter.py
@@ -233,7 +233,7 @@ def test_convert_col_def_to_attribute(
):
with pytest.raises(
KeyError,
- match=r"Could not find attribute 'incorrect_attribute' for 'nodes'. " r"\(choose from: id, u_rated\)",
+ match=r"Could not find attribute 'incorrect_attribute' for 'nodes'. \(choose from: id, u_rated\)",
):
converter._convert_col_def_to_attribute(
data=tabular_data_no_units_no_substitutions,
diff --git a/tests/unit/converters/test_vision_excel_converter.py b/tests/unit/converters/test_vision_excel_converter.py
index 6a9456e1..99bcfaec 100644
--- a/tests/unit/converters/test_vision_excel_converter.py
+++ b/tests/unit/converters/test_vision_excel_converter.py
@@ -146,4 +146,4 @@ def test_ambiguity_in_vision_excel():
ambiguious_test_file = Path(__file__).parents[2] / "data" / "vision" / "excel_ambiguity_check_data.xlsx"
excel_file_checker = ExcelAmbiguityChecker(file_path=ambiguious_test_file.as_posix())
res, _ = excel_file_checker.check_ambiguity()
- assert res == True
+ assert res
diff --git a/tests/unit/data_stores/test_excel_file_store.py b/tests/unit/data_stores/test_excel_file_store.py
index db62feb3..96e0d167 100644
--- a/tests/unit/data_stores/test_excel_file_store.py
+++ b/tests/unit/data_stores/test_excel_file_store.py
@@ -4,7 +4,7 @@
from pathlib import Path
from typing import Dict
-from unittest.mock import MagicMock, call, mock_open, patch
+from unittest.mock import MagicMock, call, patch
import numpy as np
import pandas as pd
diff --git a/tests/unit/mappings/test_field_mapping.py b/tests/unit/mappings/test_field_mapping.py
index a110a29d..2331b490 100644
--- a/tests/unit/mappings/test_field_mapping.py
+++ b/tests/unit/mappings/test_field_mapping.py
@@ -3,7 +3,6 @@
# SPDX-License-Identifier: MPL-2.0
import logging
-import pytest
import structlog
from power_grid_model_io.mappings.field_mapping import FieldMapping
diff --git a/tests/unit/utils/test_json.py b/tests/unit/utils/test_json.py
index bc6275ef..a2c945e3 100644
--- a/tests/unit/utils/test_json.py
+++ b/tests/unit/utils/test_json.py
@@ -20,9 +20,8 @@ def test_compact_json_dump():
string_stream = io.StringIO()
compact_json_dump(data, string_stream, indent=2, max_level=0)
assert (
- string_stream.getvalue()
- == """{"node": [{"id": 1, "x": 2}, {"id": 3, "x": 4}],"""
- + """ "line": [{"id": 5, "x": 6}, {"id": 7, "x": {"y": 8.1, "z": 8.2}}]}"""
+ string_stream.getvalue() == """{"node": [{"id": 1, "x": 2}, {"id": 3, "x": 4}],"""
+ """ "line": [{"id": 5, "x": 6}, {"id": 7, "x": {"y": 8.1, "z": 8.2}}]}"""
)
string_stream = io.StringIO()
@@ -165,7 +164,7 @@ def test_json_encoder(numpy_type: type, native_type: type):
value = encoder.default(value)
# Assert
- assert type(value) == native_type
+ assert type(value) is native_type
assert value == 123
@@ -178,7 +177,7 @@ def test_json_encoder__np_array():
value = encoder.default(value)
# Assert
- assert type(value) == list
+ assert type(value) is list
assert value == [1, 2, 3]
diff --git a/tests/utils.py b/tests/utils.py
index f918b51a..6e0787ee 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -93,21 +93,20 @@ def _apply_operator(fn: str, left: Any, right: Any):
obj = copy(left)
else:
obj = MockFn(fn, left)
- if MockFn._is_operator(right):
- if (
- obj.fn == "+"
- and right.fn == "+"
- or obj.fn == "-"
- and right.fn == "+"
- or obj.fn == "*"
- and right.fn == "*"
- or obj.fn == "/"
- and right.fn == "*"
- or obj.fn == "&"
- and right.fn == "&"
- ):
- obj.args += right.args
- return obj
+ if MockFn._is_operator(right) and (
+ obj.fn == "+"
+ and right.fn == "+"
+ or obj.fn == "-"
+ and right.fn == "+"
+ or obj.fn == "*"
+ and right.fn == "*"
+ or obj.fn == "/"
+ and right.fn == "*"
+ or obj.fn == "&"
+ and right.fn == "&"
+ ):
+ obj.args += right.args
+ return obj
obj.args += [right]
return obj
@@ -176,7 +175,7 @@ def isnan(x: Any):
return False
def eq(left, right) -> bool:
- if type(left) != type(right):
+ if type(left) is not type(right):
return False
if isinstance(left, pd.DataFrame) and left.columns != right.columns:
return False
diff --git a/tests/validation/conftest.py b/tests/validation/conftest.py
index 9dac814e..791d3b94 100644
--- a/tests/validation/conftest.py
+++ b/tests/validation/conftest.py
@@ -2,15 +2,13 @@
#
# SPDX-License-Identifier: MPL-2.0
+from contextlib import suppress
+
import pandas as pd
-try:
+with suppress(pd.errors.OptionError):
pd.set_option("future.no_silent_downcasting", True)
-except pd.errors.OptionError:
- pass
-try:
+with suppress(pd.errors.OptionError):
# TODO(mgovers) We're ready for Pandas 3.x, but pandapower is not. Move to parent conftest when it is.
pd.set_option("mode.copy_on_write", False)
-except pd.errors.OptionError:
- pass
diff --git a/tests/validation/converters/test_pandapower_converter_output.py b/tests/validation/converters/test_pandapower_converter_output.py
index 68f8735a..808d5df8 100644
--- a/tests/validation/converters/test_pandapower_converter_output.py
+++ b/tests/validation/converters/test_pandapower_converter_output.py
@@ -15,12 +15,12 @@
from power_grid_model_io.converters import PandaPowerConverter
from power_grid_model_io.converters.pandapower_converter import PandaPowerData
-pp = pytest.importorskip("pandapower", reason="pandapower is not installed")
-# we add this to enable python 3.13 testing even though pandapower 3.0 is not yet compatible with it
-
from ...data.pandapower.pp_validation import pp_net, pp_net_3ph
from ..utils import component_attributes_df, load_json_single_dataset
+pp = pytest.importorskip("pandapower", reason="pandapower is not installed")
+# we add this to enable python 3.13 testing even though pandapower 3.0 is not yet compatible with it
+
PGM_PP_TEST_DATA = Path(__file__).parents[2] / "data" / "pandapower"
PGM_OUTPUT_FILE = PGM_PP_TEST_DATA / "pgm_output_data.json"
PGM_ASYM_OUTPUT_FILE = PGM_PP_TEST_DATA / "pgm_asym_output_data.json"
diff --git a/tests/validation/converters/test_vision_excel_converter.py b/tests/validation/converters/test_vision_excel_converter.py
index 26ef5217..f2f90bfa 100644
--- a/tests/validation/converters/test_vision_excel_converter.py
+++ b/tests/validation/converters/test_vision_excel_converter.py
@@ -95,7 +95,8 @@ def test_input_data(input_data: Tuple[SingleDataset, SingleDataset]):
def test_input_data_custom_yaml():
"""
- Unit test to preload the expected and actual data, using a different mapping file other than the one in the default location
+ Unit test to preload the expected and actual data, using a different
+ mapping file other than the one in the default location
"""
for language in LANGUAGES:
# Arrange
diff --git a/tests/validation/utils.py b/tests/validation/utils.py
index ca44321b..123c5ab9 100644
--- a/tests/validation/utils.py
+++ b/tests/validation/utils.py
@@ -176,7 +176,7 @@ def load_json_single_dataset(file_path: Path, data_type: str) -> Tuple[SingleDat
"""
try:
dataset = json_deserialize_from_file(file_path=file_path)
- except PowerGridSerializationError as error:
+ except PowerGridSerializationError:
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
@@ -223,7 +223,7 @@ def compare_extra_info(actual: ExtraInfo, expected: ExtraInfo, component: str, o
# If the values don't match, that's an error
elif act[key] != value:
errors.append(
- f"Expected extra info '{key}' for {component} #{obj_id} to be {value}, " f"but it is {act[key]}."
+ f"Expected extra info '{key}' for {component} #{obj_id} to be {value}, but it is {act[key]}."
)
return errors