Skip to content

Commit 88b9836

Browse files
committed
Polars evaluator changes.
Polars evaluator now has an 'evalaute_flat' method that works with dictionaries with symbols of flattened tensor variables. Tested.
1 parent a43e618 commit 88b9836

File tree

2 files changed

+140
-31
lines changed

2 files changed

+140
-31
lines changed

desdeo/problem/evaluator.py

Lines changed: 62 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,19 @@
11
"""Defines a Polars-based evaluator."""
22

33
from enum import Enum
4+
from itertools import product
45

56
import numpy as np
67
import polars as pl
78

89
from desdeo.problem.json_parser import MathParser, replace_str
9-
from desdeo.problem.schema import Constant, ObjectiveTypeEnum, Problem, TensorConstant, TensorVariable
10+
from desdeo.problem.schema import (
11+
Constant,
12+
ObjectiveTypeEnum,
13+
Problem,
14+
TensorConstant,
15+
TensorVariable,
16+
)
1017

1118
SUPPORTED_EVALUATOR_MODES = ["variables", "discrete"]
1219
SUPPORTED_VAR_DIMENSIONS = ["scalar", "vector"]
@@ -120,6 +127,7 @@ def __init__(self, problem: Problem, evaluator_mode: PolarsEvaluatorModesEnum =
120127

121128
self.evaluator_mode = evaluator_mode
122129

130+
self.problem = problem
123131
# Gather any constants of the problem definition.
124132
self.problem_constants = problem.constants
125133
# Gather the objective functions
@@ -155,6 +163,7 @@ def __init__(self, problem: Problem, evaluator_mode: PolarsEvaluatorModesEnum =
155163
# Note, when calling an evaluate method, it is assumed the problem has been fully parsed.
156164
if self.evaluator_mode == PolarsEvaluatorModesEnum.variables:
157165
self.evaluate = self._polars_evaluate
166+
self.evaluate_flat = self._polars_evaluate_flat
158167
elif self.evaluator_mode == PolarsEvaluatorModesEnum.discrete:
159168
self.evaluate = self._from_discrete_data
160169
else:
@@ -378,6 +387,58 @@ def _polars_evaluate(
378387
# return the dataframe and let the solver figure it out
379388
return agg_df
380389

390+
def _polars_evaluate_flat(
391+
self,
392+
xs: dict[str, list[float | int | bool]],
393+
) -> pl.DataFrame:
394+
"""Evaluate the problem with flattened variables.
395+
396+
Args:
397+
xs (dict[str, list[float | int | bool]]): a dict with flattened variables.
398+
E.g., if the original problem has a tensor variable 'X' with shape (2,2),
399+
then the dictionary is expected to have entries names 'X_1_1', 'X_1_2',
400+
'X_2_1', and 'X_2_2'. The dictionary is rebuilt and passed to
401+
`self._evaluate`.
402+
403+
Note:
404+
Each flattened variable is assumed to contain the same number of samples.
405+
This means that if the entry 'X_1_1' of `xs` is, for example
406+
`[1,2,3]`, this means that 'X_1_1' and all the other flattened
407+
variables have three samples. This means also that the original
408+
problem will be evaluated with a tensor variable with shape (2,2)
409+
and three samples,
410+
e.g., 'X=[[[1, 1], [1,1]], [[2, 2], [2, 2]], [[3, 3], [3, 3]]]'.
411+
412+
Returns:
413+
pl.DataFrame: a dataframe with the original problem's evaluated functions.
414+
"""
415+
# Assume all variables have the same number of samples
416+
n_samples = len(next(iter(xs.values())))
417+
418+
fat_xs = {}
419+
420+
# iterate over the variables of the problem
421+
for var in self.problem.variables:
422+
if isinstance(var, TensorVariable):
423+
# construct the indices
424+
index_ranges = [range(upper) for upper in var.shape]
425+
indices = product(*index_ranges)
426+
427+
# create list to be filled
428+
tmp = np.ones((n_samples, *var.shape)) * np.nan
429+
430+
for index in indices:
431+
tmp[:, *(index)] = xs[f"{var.symbol}_{"_".join(str(x+1) for x in index)}"]
432+
433+
fat_xs[var.symbol] = tmp.tolist()
434+
435+
else:
436+
# else, proceed normally
437+
fat_xs[var.symbol] = xs[var.symbol]
438+
439+
# return result of regular evaluate
440+
return self.evaluate(fat_xs)
441+
381442
def _from_discrete_data(self) -> pl.DataFrame:
382443
"""Evaluates the problem based on its discrete representation only.
383444

tests/test_polars_evaluator.py

Lines changed: 78 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,18 @@
44
import polars as pl
55
import pytest
66

7-
# from desdeo.problem import PolarsEvaluator, river_pollution_problem, simple_test_problem, simple_knapsack_vectors
87
from desdeo.problem import (
9-
PolarsEvaluator,
108
Objective,
9+
ObjectiveTypeEnum,
10+
PolarsEvaluator,
1111
Problem,
1212
TensorConstant,
1313
TensorVariable,
14+
Variable,
15+
VariableTypeEnum,
1416
river_pollution_problem,
15-
simple_test_problem,
1617
simple_knapsack_vectors,
18+
simple_test_problem,
1719
)
1820
from desdeo.problem.evaluator import find_closest_points
1921

@@ -159,40 +161,86 @@ def test_knapsack_problem():
159161

160162

161163
@pytest.mark.polars
162-
def test_with_tensors():
163-
"""Test that GenericEvaluator raises an error when trying to initialize the evaluator with a problem with tensors."""
164-
# define a modified version of the simple_knapsack_vectors test problem for the purpose
165-
profit_values = [[3, 5], [6, 8]]
166-
profits = TensorConstant(name="Profits", symbol="P", shape=[2, 2], values=profit_values)
167-
168-
choices = TensorVariable(
169-
name="Chosen items",
164+
def test_evaluate_w_flattened():
165+
"""Test that the evaluator works when called with flattened vars."""
166+
var = Variable(
167+
name="x", symbol="x", lowerbound=0, upperbound=10, initial_value=5.2, variable_type=VariableTypeEnum.real
168+
)
169+
170+
tensor_var_1 = TensorVariable(
171+
name="X",
170172
symbol="X",
173+
shape=[3],
174+
variable_type=VariableTypeEnum.real,
175+
lowerbounds=-1.1,
176+
upperbounds=3.4,
177+
initial_values=0.2,
178+
)
179+
180+
tensor_var_2 = TensorVariable(
181+
name="Y",
182+
symbol="Y",
171183
shape=[2, 2],
172-
variable_type="binary",
173-
lowerbounds=[[0, 0], [0, 0]],
174-
upperbounds=[[1, 1], [1, 1]],
175-
initial_values=[[1, 1], [1, 1]],
184+
variable_type=VariableTypeEnum.integer,
185+
lowerbounds=-5,
186+
upperbounds=3,
187+
initial_values=2,
176188
)
177189

178-
profit_objective = Objective(
179-
name="max profit",
190+
variables = [var, tensor_var_1, tensor_var_2]
191+
192+
objective_1 = Objective(
193+
name="f_1",
180194
symbol="f_1",
181-
func="P@X",
182-
maximize=True,
183-
ideal=8,
184-
nadir=0,
195+
func="X[1] + X[2] + X[3] - x",
196+
maximize=False,
197+
objective_type=ObjectiveTypeEnum.analytical,
185198
is_linear=True,
186-
is_convex=False,
187-
is_twice_differentiable=False,
199+
is_convex=True,
200+
is_twice_differentiable=True,
188201
)
189202

190-
problem = Problem(
191-
name="Simple two-objective Knapsack problem",
192-
description="A simple variant of the classic combinatorial problem.",
193-
constants=[profits],
194-
variables=[choices],
195-
objectives=[profit_objective],
203+
objective_2 = Objective(
204+
name="f_2",
205+
symbol="f_2",
206+
func="Y[1, 1] + Y[1, 2] + Y[2, 1] + Y[2, 2] - x",
207+
maximize=False,
208+
objective_type=ObjectiveTypeEnum.analytical,
209+
is_linear=True,
210+
is_convex=True,
211+
is_twice_differentiable=True,
196212
)
197213

198-
PolarsEvaluator(problem) # fails
214+
objectives = [objective_1, objective_2]
215+
216+
problem = Problem(name="Test Problem", description="Test problem", variables=variables, objectives=objectives)
217+
218+
evaluator = PolarsEvaluator(problem)
219+
220+
xs = {
221+
"x": [2, 3],
222+
"X": [[-0.9, 0.1, 1.2], [0.1, 1.1, 0.7]],
223+
"Y": [[[-4.1, -3.9], [-3.1, -2.9]], [[-2.2, -1.9], [1.1, 2.2]]],
224+
}
225+
226+
flat_xs = {
227+
"x": [2, 3],
228+
"X_1": [-0.9, 0.1],
229+
"X_2": [0.1, 1.1],
230+
"X_3": [1.2, 0.7],
231+
"Y_1_1": [-4.1, -2.2],
232+
"Y_1_2": [-3.9, -1.9],
233+
"Y_2_1": [-3.1, 1.1],
234+
"Y_2_2": [-2.9, 2.2],
235+
}
236+
237+
res_flat = evaluator.evaluate_flat(flat_xs)
238+
res_tensor = evaluator.evaluate(xs)
239+
240+
# should have the same results
241+
npt.assert_allclose(res_flat["f_1"].to_numpy(), res_tensor["f_1"].to_numpy())
242+
npt.assert_allclose(res_flat["f_2"].to_numpy(), res_tensor["f_2"].to_numpy())
243+
244+
# check correct objective function values
245+
npt.assert_allclose(res_flat["f_1"].to_numpy(), [-1.6, -1.1])
246+
npt.assert_allclose(res_flat["f_2"].to_numpy(), [-16, -3.8])

0 commit comments

Comments
 (0)