Skip to content

Commit d6c6d68

Browse files
authored
Merge pull request #104 from alliander-opensource/pylint
Code style (pylint)
2 parents 158d062 + ee06a99 commit d6c6d68

File tree

3 files changed

+19
-19
lines changed

3 files changed

+19
-19
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ requires = [
88
"wheel",
99
"cython",
1010
"oldest-supported-numpy",
11-
"requests"
11+
"requests",
1212
]
1313
build-backend = "setuptools.build_meta"
1414

src/power_grid_model/manual_testing.py

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -60,10 +60,11 @@ def convert_list_to_batch_data(
6060

6161
# Create a 2D array if the component exists in all datasets and number of objects is the same in each dataset
6262
comp_exists_in_all_datasets = all(component in x for x in list_data)
63-
all_sizes_are_the_same = lambda: all(x[component].size == list_data[0][component].size for x in list_data)
64-
if comp_exists_in_all_datasets and all_sizes_are_the_same():
65-
batch_data[component] = np.stack([x[component] for x in list_data], axis=0)
66-
continue
63+
if comp_exists_in_all_datasets:
64+
all_sizes_are_the_same = all(x[component].size == list_data[0][component].size for x in list_data)
65+
if all_sizes_are_the_same:
66+
batch_data[component] = np.stack([x[component] for x in list_data], axis=0)
67+
continue
6768

6869
# otherwise use indptr/data dict
6970
indptr = [0]
@@ -122,24 +123,24 @@ def convert_python_to_numpy(
122123
for i, obj in enumerate(objects):
123124
# As each object is a separate dictionary, and the properties may differ per object, we need to check
124125
# all properties. Non-existing properties
125-
for property, value in obj.items():
126-
if property == "extra":
126+
for prop, value in obj.items():
127+
if prop == "extra":
127128
# The "extra" property is a special one. It can store any type of information associated with
128129
# an object, but it will not be used in the calculations. Therefore it is not included in the
129130
# numpy array, so we can skip this property
130131
continue
131132

132-
if property not in dataset[component].dtype.names:
133+
if prop not in dataset[component].dtype.names:
133134
# If a property doen't exist, the user made a mistake. Let's be merciless in that case,
134135
# for their own good.
135-
raise ValueError(f"Invalid property '{property}' for {component} {data_type} data.")
136+
raise ValueError(f"Invalid property '{prop}' for {component} {data_type} data.")
136137

137138
# Now just assign the value and raise an error if the value cannot be stored in the specific
138139
# numpy array data format for this property.
139140
try:
140-
dataset[component][i][property] = value
141+
dataset[component][i][prop] = value
141142
except ValueError as ex:
142-
raise ValueError(f"Invalid '{property}' value for {component} {data_type} data: {ex}")
143+
raise ValueError(f"Invalid '{prop}' value for {component} {data_type} data: {ex}") from ex
143144
return dataset
144145

145146

@@ -309,7 +310,7 @@ def _inject_extra_info(
309310
elif isinstance(data, dict):
310311
if not isinstance(extra_info, dict):
311312
raise TypeError("Invalid extra info data type")
312-
for component, objects in data.items():
313+
for _, objects in data.items():
313314
for obj in objects:
314315
if obj["id"] in extra_info:
315316
obj["extra"] = extra_info[obj["id"]]

src/power_grid_model/validation/utils.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@
1111

1212
import numpy as np
1313

14-
from .errors import ValidationError
1514
from .. import power_grid_meta_data
15+
from .errors import ValidationError
1616

1717
InputData = Dict[str, np.ndarray]
1818
UpdateData = Dict[str, Union[np.ndarray, Dict[str, np.ndarray]]]
@@ -154,13 +154,12 @@ def split_numpy_array_in_batches(data: np.ndarray, component: str) -> List[np.nd
154154
)
155155
if data.ndim == 1:
156156
return [data]
157-
elif data.ndim == 2:
157+
if data.ndim == 2:
158158
return [data[i, :] for i in range(data.shape[0])]
159-
else:
160-
raise TypeError(
161-
f"Invalid data dimension {data.ndim} in update data for '{component}' "
162-
"(should be a 1D/2D Numpy structured array)."
163-
)
159+
raise TypeError(
160+
f"Invalid data dimension {data.ndim} in update data for '{component}' "
161+
"(should be a 1D/2D Numpy structured array)."
162+
)
164163

165164

166165
def split_compressed_sparse_structure_in_batches(

0 commit comments

Comments
 (0)