Skip to content

Commit d8d4fcf

Browse files
authored
Merge pull request #103 from alliander-opensource/feature/pre-commit
Feature/pre commit
2 parents ef03b13 + c0ca19d commit d8d4fcf

File tree

6 files changed

+79
-23
lines changed

6 files changed

+79
-23
lines changed

.pre-commit-config.yaml

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# SPDX-FileCopyrightText: 2022 Contributors to the Power Grid Model project <[email protected]>
2+
#
3+
# SPDX-License-Identifier: MPL-2.0
4+
5+
repos:
6+
- repo: https://github.com/fsfe/reuse-tool
7+
rev: v1.0.0
8+
hooks:
9+
- id: reuse
10+
- repo: https://github.com/psf/black
11+
rev: 22.6.0
12+
hooks:
13+
- id: black
14+
language_version: python3.8
15+
- repo: local
16+
hooks:
17+
- id: pylint
18+
name: pylint
19+
entry: pylint
20+
files: ^src/.+\.py$
21+
language: system
22+
types: [ python ]
23+
args: [ "--rcfile=pyproject.toml" ]
24+
- id: pytest
25+
name: pytest
26+
entry: pytest
27+
language: system
28+
pass_filenames: false
29+
always_run: true

CONTRIBUTING.md

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,32 @@ This project uses Google Format Style (NOTE: not Google C++ Programming Style) t
5858

5959
Tip: Use [clang-format](https://clang.llvm.org/docs/ClangFormat.html) to format your C++ code.
6060

61+
## pre-commit hooks
62+
This project uses [pre-commit](https://pre-commit.com/) to run a list of checks (and perform some automatic
63+
corrections) to your code (style) before each commit. It is up to the developer to choose whether you would like to
64+
use this tool or not. The goal is to make sure that each commit will pass the quality checks in the github actions
65+
workflow. Currently, these hooks are defined in [`.pre-commit-config.yaml`](.pre-commit-config.yaml):
66+
* **reuse**: check if all licence headers and files are in place
67+
* **black**: check and correct code style
68+
* **pylint**: check code style
69+
* **pytest**: run all unit tests
70+
71+
You can manually run pre-commit whenever you like:
72+
```bash
73+
pre-commit run
74+
```
75+
76+
Or you can install it as a git pre-commit hook. In this case a commit will be aborted whenever one of the hooks fail.
77+
```bash
78+
pre-commit install
79+
```
80+
81+
As using the pre-commit tool is not mandatory, you can always skip the tool:
82+
83+
```bash
84+
git commit ... --no-verify
85+
```
86+
6187
## REUSE Compliance
6288

6389
All the files in the repository need to be [REUSE compliant](https://reuse.software/).

pyproject.toml

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ requires = [
99
"cython",
1010
"oldest-supported-numpy",
1111
"requests",
12-
"pybuild-header-dependency"
12+
"pybuild-header-dependency",
1313
]
1414
build-backend = "setuptools.build_meta"
1515

@@ -38,15 +38,16 @@ classifiers = [
3838
]
3939
requires-python = ">=3.8"
4040
dependencies = [
41-
"numpy>=1.21.0"
41+
"numpy>=1.21.0",
4242
]
4343
dynamic = ["version"]
4444

4545

4646
[project.optional-dependencies]
4747
dev = [
48+
"pre-commit",
4849
"pytest",
49-
"pytest-cov"
50+
"pytest-cov",
5051
]
5152

5253
[project.urls]
@@ -66,3 +67,6 @@ addopts = ["--cov=power_grid_model", "--cov-report", "term", "--cov-report", "ht
6667
[tool.black]
6768
line-length = 120
6869
target-version = ['py38']
70+
71+
[tool.pylint]
72+
max-line-length = 120

src/power_grid_model/__init__.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,9 @@
22
#
33
# SPDX-License-Identifier: MPL-2.0
44

5+
"""Power Grid Model"""
6+
57
# pylint: disable=no-name-in-module
68

7-
# Helper functions
8-
# Power Grid metadata
9-
# Power Grid Model
109
from ._power_grid_core import PowerGridModel, initialize_array, power_grid_meta_data
11-
12-
# Enumerations
1310
from .enum import BranchSide, CalculationMethod, CalculationType, LoadGenType, MeasuredTerminalType, WindingType

src/power_grid_model/manual_testing.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -121,26 +121,26 @@ def convert_python_to_numpy(
121121
dataset[component] = initialize_array(data_type, component, len(objects))
122122

123123
for i, obj in enumerate(objects):
124-
# As each object is a separate dictionary, and the properties may differ per object, we need to check
125-
# all properties. Non-existing properties
126-
for prop, value in obj.items():
127-
if prop == "extra":
128-
# The "extra" property is a special one. It can store any type of information associated with
124+
# As each object is a separate dictionary, and the attributes may differ per object, we need to check
125+
# all attributes. Non-existing attributes
126+
for attribute, value in obj.items():
127+
if attribute == "extra":
128+
# The "extra" attribute is a special one. It can store any type of information associated with
129129
# an object, but it will not be used in the calculations. Therefore it is not included in the
130-
# numpy array, so we can skip this property
130+
# numpy array, so we can skip this attribute
131131
continue
132132

133-
if prop not in dataset[component].dtype.names:
134-
# If a property doen't exist, the user made a mistake. Let's be merciless in that case,
133+
if attribute not in dataset[component].dtype.names:
134+
# If a attribute doen't exist, the user made a mistake. Let's be merciless in that case,
135135
# for their own good.
136-
raise ValueError(f"Invalid property '{prop}' for {component} {data_type} data.")
136+
raise ValueError(f"Invalid attribute '{attribute}' for {component} {data_type} data.")
137137

138138
# Now just assign the value and raise an error if the value cannot be stored in the specific
139-
# numpy array data format for this property.
139+
# numpy array data format for this attribute.
140140
try:
141-
dataset[component][i][prop] = value
141+
dataset[component][i][attribute] = value
142142
except ValueError as ex:
143-
raise ValueError(f"Invalid '{prop}' value for {component} {data_type} data: {ex}") from ex
143+
raise ValueError(f"Invalid '{attribute}' value for {component} {data_type} data: {ex}") from ex
144144
return dataset
145145

146146

@@ -226,11 +226,11 @@ def convert_numpy_to_python(
226226
if not isinstance(example_data, np.ndarray) or example_data.ndim != 1:
227227
raise ValueError("Invalid data format")
228228

229-
# Convert each numpy array to a list of objects, which contains only the non-NaN properties:
229+
# Convert each numpy array to a list of objects, which contains only the non-NaN attributes:
230230
# For example: {"node": [{"id": 0, ...}, {"id": 1, ...}], "line": [{"id": 2, ...}]}
231231
return {
232232
component: [
233-
{property: obj[property].tolist() for property in objects.dtype.names if not is_nan(obj[property])}
233+
{attribute: obj[attribute].tolist() for attribute in objects.dtype.names if not is_nan(obj[attribute])}
234234
for obj in objects
235235
]
236236
for component, objects in data.items()

tests/unit/test_manual_testing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def test_round_trip_json_numpy_json(two_nodes_one_line, two_nodes_two_lines):
113113

114114

115115
def test_convert_python_to_numpy__raises_value_error():
116-
with pytest.raises(ValueError, match="Invalid property 'u' for line input data."):
116+
with pytest.raises(ValueError, match="Invalid attribute 'u' for line input data."):
117117
convert_python_to_numpy({"line": [{"id": 1, "u": 10.5e3}]}, "input")
118118
with pytest.raises(ValueError, match="Invalid 'id' value for line input data."):
119119
convert_python_to_numpy({"line": [{"id": "my_line", "u_rated": 10.5e3}]}, "input")

0 commit comments

Comments
 (0)