|
4 | 4 | import polars as pl |
5 | 5 | import pytest |
6 | 6 |
|
7 | | -# from desdeo.problem import PolarsEvaluator, river_pollution_problem, simple_test_problem, simple_knapsack_vectors |
8 | 7 | from desdeo.problem import ( |
9 | | - PolarsEvaluator, |
10 | 8 | Objective, |
| 9 | + ObjectiveTypeEnum, |
| 10 | + PolarsEvaluator, |
11 | 11 | Problem, |
12 | 12 | TensorConstant, |
13 | 13 | TensorVariable, |
| 14 | + Variable, |
| 15 | + VariableTypeEnum, |
14 | 16 | river_pollution_problem, |
15 | | - simple_test_problem, |
16 | 17 | simple_knapsack_vectors, |
| 18 | + simple_test_problem, |
17 | 19 | ) |
18 | 20 | from desdeo.problem.evaluator import find_closest_points |
19 | 21 |
|
@@ -159,40 +161,86 @@ def test_knapsack_problem(): |
159 | 161 |
|
160 | 162 |
|
161 | 163 | @pytest.mark.polars |
162 | | -def test_with_tensors(): |
163 | | - """Test that GenericEvaluator raises an error when trying to initialize the evaluator with a problem with tensors.""" |
164 | | - # define a modified version of the simple_knapsack_vectors test problem for the purpose |
165 | | - profit_values = [[3, 5], [6, 8]] |
166 | | - profits = TensorConstant(name="Profits", symbol="P", shape=[2, 2], values=profit_values) |
167 | | - |
168 | | - choices = TensorVariable( |
169 | | - name="Chosen items", |
| 164 | +def test_evaluate_w_flattened(): |
| 165 | + """Test that the evaluator works when called with flattened vars.""" |
| 166 | + var = Variable( |
| 167 | + name="x", symbol="x", lowerbound=0, upperbound=10, initial_value=5.2, variable_type=VariableTypeEnum.real |
| 168 | + ) |
| 169 | + |
| 170 | + tensor_var_1 = TensorVariable( |
| 171 | + name="X", |
170 | 172 | symbol="X", |
| 173 | + shape=[3], |
| 174 | + variable_type=VariableTypeEnum.real, |
| 175 | + lowerbounds=-1.1, |
| 176 | + upperbounds=3.4, |
| 177 | + initial_values=0.2, |
| 178 | + ) |
| 179 | + |
| 180 | + tensor_var_2 = TensorVariable( |
| 181 | + name="Y", |
| 182 | + symbol="Y", |
171 | 183 | shape=[2, 2], |
172 | | - variable_type="binary", |
173 | | - lowerbounds=[[0, 0], [0, 0]], |
174 | | - upperbounds=[[1, 1], [1, 1]], |
175 | | - initial_values=[[1, 1], [1, 1]], |
| 184 | + variable_type=VariableTypeEnum.integer, |
| 185 | + lowerbounds=-5, |
| 186 | + upperbounds=3, |
| 187 | + initial_values=2, |
176 | 188 | ) |
177 | 189 |
|
178 | | - profit_objective = Objective( |
179 | | - name="max profit", |
| 190 | + variables = [var, tensor_var_1, tensor_var_2] |
| 191 | + |
| 192 | + objective_1 = Objective( |
| 193 | + name="f_1", |
180 | 194 | symbol="f_1", |
181 | | - func="P@X", |
182 | | - maximize=True, |
183 | | - ideal=8, |
184 | | - nadir=0, |
| 195 | + func="X[1] + X[2] + X[3] - x", |
| 196 | + maximize=False, |
| 197 | + objective_type=ObjectiveTypeEnum.analytical, |
185 | 198 | is_linear=True, |
186 | | - is_convex=False, |
187 | | - is_twice_differentiable=False, |
| 199 | + is_convex=True, |
| 200 | + is_twice_differentiable=True, |
188 | 201 | ) |
189 | 202 |
|
190 | | - problem = Problem( |
191 | | - name="Simple two-objective Knapsack problem", |
192 | | - description="A simple variant of the classic combinatorial problem.", |
193 | | - constants=[profits], |
194 | | - variables=[choices], |
195 | | - objectives=[profit_objective], |
| 203 | + objective_2 = Objective( |
| 204 | + name="f_2", |
| 205 | + symbol="f_2", |
| 206 | + func="Y[1, 1] + Y[1, 2] + Y[2, 1] + Y[2, 2] - x", |
| 207 | + maximize=False, |
| 208 | + objective_type=ObjectiveTypeEnum.analytical, |
| 209 | + is_linear=True, |
| 210 | + is_convex=True, |
| 211 | + is_twice_differentiable=True, |
196 | 212 | ) |
197 | 213 |
|
198 | | - PolarsEvaluator(problem) # fails |
| 214 | + objectives = [objective_1, objective_2] |
| 215 | + |
| 216 | + problem = Problem(name="Test Problem", description="Test problem", variables=variables, objectives=objectives) |
| 217 | + |
| 218 | + evaluator = PolarsEvaluator(problem) |
| 219 | + |
| 220 | + xs = { |
| 221 | + "x": [2, 3], |
| 222 | + "X": [[-0.9, 0.1, 1.2], [0.1, 1.1, 0.7]], |
| 223 | + "Y": [[[-4.1, -3.9], [-3.1, -2.9]], [[-2.2, -1.9], [1.1, 2.2]]], |
| 224 | + } |
| 225 | + |
| 226 | + flat_xs = { |
| 227 | + "x": [2, 3], |
| 228 | + "X_1": [-0.9, 0.1], |
| 229 | + "X_2": [0.1, 1.1], |
| 230 | + "X_3": [1.2, 0.7], |
| 231 | + "Y_1_1": [-4.1, -2.2], |
| 232 | + "Y_1_2": [-3.9, -1.9], |
| 233 | + "Y_2_1": [-3.1, 1.1], |
| 234 | + "Y_2_2": [-2.9, 2.2], |
| 235 | + } |
| 236 | + |
| 237 | + res_flat = evaluator.evaluate_flat(flat_xs) |
| 238 | + res_tensor = evaluator.evaluate(xs) |
| 239 | + |
| 240 | + # should have the same results |
| 241 | + npt.assert_allclose(res_flat["f_1"].to_numpy(), res_tensor["f_1"].to_numpy()) |
| 242 | + npt.assert_allclose(res_flat["f_2"].to_numpy(), res_tensor["f_2"].to_numpy()) |
| 243 | + |
| 244 | + # check correct objective function values |
| 245 | + npt.assert_allclose(res_flat["f_1"].to_numpy(), [-1.6, -1.1]) |
| 246 | + npt.assert_allclose(res_flat["f_2"].to_numpy(), [-16, -3.8]) |
0 commit comments