Skip to content

Commit 35ea33e

Browse files
authored
adding unit-tests and improvements to NLP interface. (#6)
* adding many tests, new smoothcanon for min, and improvements to ipopt_nlpif * fixing last two tests * add another example, qcp * adding example for acopf * add control of a car example done --------- Co-authored-by: William Zijie Zhang <william@gridmatic.com>
1 parent ccf69ad commit 35ea33e

File tree

11 files changed

+610
-114
lines changed

11 files changed

+610
-114
lines changed

cvxpy/atoms/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@
6868
from cvxpy.atoms.elementwise.sqrt import sqrt
6969
from cvxpy.atoms.elementwise.square import square
7070
from cvxpy.atoms.elementwise.xexp import xexp
71+
from cvxpy.atoms.elementwise.trig import sin, cos, tan
7172
from cvxpy.atoms.eye_minus_inv import eye_minus_inv, resolvent
7273
from cvxpy.atoms.gen_lambda_max import gen_lambda_max
7374
from cvxpy.atoms.geo_mean import geo_mean

cvxpy/atoms/elementwise/trig.py

Lines changed: 202 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,202 @@
1+
"""
2+
Copyright 2025 CVXPY Developers
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
"""
16+
from typing import List, Tuple
17+
18+
import numpy as np
19+
20+
from cvxpy.atoms.elementwise.elementwise import Elementwise
21+
from cvxpy.constraints.constraint import Constraint
22+
23+
24+
class sin(Elementwise):
25+
"""Elementwise :math:`\\sin x`.
26+
"""
27+
28+
def __init__(self, x) -> None:
29+
super(sin, self).__init__(x)
30+
31+
@Elementwise.numpy_numeric
32+
def numeric(self, values):
33+
"""Returns the elementwise sine of x.
34+
"""
35+
return np.sin(values[0])
36+
37+
def sign_from_args(self) -> Tuple[bool, bool]:
38+
"""Returns sign (is positive, is negative) of the expression.
39+
"""
40+
# Always unknown.
41+
return (False, False)
42+
43+
def is_atom_convex(self) -> bool:
44+
"""Is the atom convex?
45+
"""
46+
return False
47+
48+
def is_atom_concave(self) -> bool:
49+
"""Is the atom concave?
50+
"""
51+
return True
52+
53+
def is_atom_log_log_convex(self) -> bool:
54+
"""Is the atom log-log convex?
55+
"""
56+
return False
57+
58+
def is_atom_log_log_concave(self) -> bool:
59+
"""Is the atom log-log concave?
60+
"""
61+
return True
62+
63+
def is_incr(self, idx) -> bool:
64+
"""Is the composition non-decreasing in argument idx?
65+
"""
66+
return True
67+
68+
def is_decr(self, idx) -> bool:
69+
"""Is the composition non-increasing in argument idx?
70+
"""
71+
return False
72+
73+
def _domain(self) -> List[Constraint]:
74+
"""Returns constraints describing the domain of the node.
75+
"""
76+
return []
77+
78+
def _grad(self) -> List[Constraint]:
79+
"""Returns the gradient of the node.
80+
"""
81+
return []
82+
83+
84+
class cos(Elementwise):
85+
"""Elementwise :math:`\\cos x`.
86+
"""
87+
88+
def __init__(self, x) -> None:
89+
super(cos, self).__init__(x)
90+
91+
@Elementwise.numpy_numeric
92+
def numeric(self, values):
93+
"""Returns the elementwise cosine of x.
94+
"""
95+
return np.cos(values[0])
96+
97+
def sign_from_args(self) -> Tuple[bool, bool]:
98+
"""Returns sign (is positive, is negative) of the expression.
99+
"""
100+
# Always unknown.
101+
return (False, False)
102+
103+
def is_atom_convex(self) -> bool:
104+
"""Is the atom convex?
105+
"""
106+
return False
107+
108+
def is_atom_concave(self) -> bool:
109+
"""Is the atom concave?
110+
"""
111+
return True
112+
113+
def is_atom_log_log_convex(self) -> bool:
114+
"""Is the atom log-log convex?
115+
"""
116+
return False
117+
118+
def is_atom_log_log_concave(self) -> bool:
119+
"""Is the atom log-log concave?
120+
"""
121+
return True
122+
123+
def is_incr(self, idx) -> bool:
124+
"""Is the composition non-decreasing in argument idx?
125+
"""
126+
return True
127+
128+
def is_decr(self, idx) -> bool:
129+
"""Is the composition non-increasing in argument idx?
130+
"""
131+
return False
132+
133+
def _domain(self) -> List[Constraint]:
134+
"""Returns constraints describing the domain of the node.
135+
"""
136+
return []
137+
138+
def _grad(self) -> List[Constraint]:
139+
"""Returns the gradient of the node.
140+
"""
141+
return []
142+
143+
144+
class tan(Elementwise):
145+
"""Elementwise :math:`\\tan x`.
146+
"""
147+
148+
def __init__(self, x) -> None:
149+
super(tan, self).__init__(x)
150+
151+
@Elementwise.numpy_numeric
152+
def numeric(self, values):
153+
"""Returns the elementwise tangent of x.
154+
"""
155+
return np.tan(values[0])
156+
157+
def sign_from_args(self) -> Tuple[bool, bool]:
158+
"""Returns sign (is positive, is negative) of the expression.
159+
"""
160+
# Always unknown.
161+
return (False, False)
162+
163+
def is_atom_convex(self) -> bool:
164+
"""Is the atom convex?
165+
"""
166+
return False
167+
168+
def is_atom_concave(self) -> bool:
169+
"""Is the atom concave?
170+
"""
171+
return True
172+
173+
def is_atom_log_log_convex(self) -> bool:
174+
"""Is the atom log-log convex?
175+
"""
176+
return False
177+
178+
def is_atom_log_log_concave(self) -> bool:
179+
"""Is the atom log-log concave?
180+
"""
181+
return True
182+
183+
def is_incr(self, idx) -> bool:
184+
"""Is the composition non-decreasing in argument idx?
185+
"""
186+
return True
187+
188+
def is_decr(self, idx) -> bool:
189+
"""Is the composition non-increasing in argument idx?
190+
"""
191+
return False
192+
193+
def _domain(self) -> List[Constraint]:
194+
"""Returns constraints describing the domain of the node.
195+
"""
196+
return []
197+
198+
def _grad(self) -> List[Constraint]:
199+
"""Returns the gradient of the node.
200+
"""
201+
return []
202+

cvxpy/reductions/expr2smooth/canonicalizers/__init__.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,11 @@
1414
limitations under the License.
1515
"""
1616
from cvxpy.atoms import maximum
17+
from cvxpy.atoms.elementwise.minimum import minimum
1718
from cvxpy.atoms.elementwise.power import power
1819
from cvxpy.atoms.pnorm import Pnorm
1920
from cvxpy.atoms.elementwise.abs import abs
21+
from cvxpy.reductions.expr2smooth.canonicalizers.minimum_canon import minimum_canon
2022
from cvxpy.reductions.expr2smooth.canonicalizers.abs_canon import abs_canon
2123
from cvxpy.reductions.expr2smooth.canonicalizers.pnorm_canon import pnorm_canon
2224
from cvxpy.reductions.expr2smooth.canonicalizers.power_canon import power_canon
@@ -25,8 +27,9 @@
2527
CANON_METHODS = {
2628
abs: abs_canon,
2729
maximum : maximum_canon,
30+
minimum: minimum_canon,
2831
# log: log_canon,
29-
power: power_canon,
30-
Pnorm : pnorm_canon,
32+
#power: power_canon,
33+
#Pnorm : pnorm_canon,
3134
# inv: inv_pos_canon,
3235
}
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
"""
2+
Copyright 2025 CVXPY developers
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
"""
16+
17+
from cvxpy.atoms.elementwise.maximum import maximum
18+
from cvxpy.reductions.expr2smooth.canonicalizers.maximum_canon import (
19+
maximum_canon,
20+
)
21+
22+
23+
def minimum_canon(expr, args):
24+
del expr
25+
temp = maximum(*[-arg for arg in args])
26+
canon, constr = maximum_canon(temp, temp.args)
27+
return -canon, constr

cvxpy/reductions/expr2smooth/canonicalizers/power_canon.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,3 +36,9 @@ def power_canon(expr, args):
3636
t = Variable(shape)
3737
if 0 < p < 1:
3838
return t, [t**(1/p) == x, t >= 0]
39+
elif p > 1:
40+
return x**p, []
41+
else: # p < 0
42+
raise ValueError(
43+
"Power canonicalization does not support negative powers."
44+
)

cvxpy/reductions/solvers/nlp_solvers/ipopt_nlpif.py

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -120,8 +120,14 @@ def solve_via_data(self, data, warm_start: bool, verbose: bool, solver_opts, sol
120120
"""
121121
import cyipopt
122122
bounds = self.Bounds(data["problem"])
123-
x0 = [12, 5, 0]
124-
123+
initial_values = []
124+
for var in bounds.main_var:
125+
if var.value is not None:
126+
initial_values.append(var.value.flatten(order='F'))
127+
else:
128+
# If no initial value, use zero
129+
initial_values.append(np.zeros(var.size))
130+
x0 = np.concatenate(initial_values, axis=0)
125131
nlp = cyipopt.Problem(
126132
n=len(x0),
127133
m=len(bounds.cl),
@@ -134,7 +140,7 @@ def solve_via_data(self, data, warm_start: bool, verbose: bool, solver_opts, sol
134140
nlp.add_option('mu_strategy', 'adaptive')
135141
nlp.add_option('tol', 1e-7)
136142
nlp.add_option('hessian_approximation', "limited-memory")
137-
x, info = nlp.solve(x0)
143+
_, info = nlp.solve(x0)
138144
return info
139145

140146
def cite(self, data):
@@ -160,7 +166,7 @@ def objective(self, x):
160166
offset = 0
161167
for var in self.main_var:
162168
size = var.size
163-
var.value = x[offset:offset+size]
169+
var.value = x[offset:offset+size].reshape(var.shape, order='F')
164170
offset += size
165171
# Evaluate the objective
166172
obj_value = self.problem.objective.args[0].value
@@ -173,7 +179,7 @@ def gradient(self, x):
173179
torch_exprs = []
174180
for var in self.main_var:
175181
size = var.size
176-
slice = x[offset:offset+size]
182+
slice = x[offset:offset+size].reshape(var.shape, order='F')
177183
torch_exprs.append(torch.from_numpy(slice.astype(np.float64)).requires_grad_(True))
178184
offset += size
179185

@@ -196,7 +202,7 @@ def constraints(self, x):
196202
offset = 0
197203
for var in self.main_var:
198204
size = var.size
199-
var.value = x[offset:offset+size]
205+
var.value = x[offset:offset+size].reshape(var.shape, order='F')
200206
offset += size
201207

202208
# Evaluate all constraints
@@ -214,7 +220,7 @@ def jacobian(self, x):
214220

215221
for var in self.main_var:
216222
size = var.size
217-
slice = x[offset:offset+size]
223+
slice = x[offset:offset+size].reshape(var.shape, order='F')
218224
torch_tensor = torch.from_numpy(slice.astype(np.float64)).requires_grad_(True)
219225
torch_vars_dict[var.id] = torch_tensor # Map CVXPY variable ID to torch tensor
220226
torch_exprs.append(torch_tensor)
@@ -245,7 +251,7 @@ def constraint_function(*args):
245251
*constr_torch_args
246252
)
247253
constraint_values.append(torch_expr)
248-
return torch.cat([torch.atleast_1d(cv) for cv in constraint_values])
254+
return torch.cat([cv.flatten() for cv in constraint_values])
249255

250256
# Compute Jacobian using torch.autograd.functional.jacobian
251257
if len(self.problem.constraints) > 0:
@@ -254,7 +260,10 @@ def constraint_function(*args):
254260
# Handle the case where jacobian_tuple is a tuple (multiple variables)
255261
if isinstance(jacobian_tuple, tuple):
256262
# Concatenate along the last dimension (variable dimension)
257-
jacobian_matrix = torch.cat(jacobian_tuple, dim=-1)
263+
jacobian_matrix = torch.cat(
264+
[jac.reshape(jac.size(0), -1) for jac in jacobian_tuple],
265+
dim=1
266+
)
258267
else:
259268
# Single variable case
260269
jacobian_matrix = jacobian_tuple
@@ -298,8 +307,8 @@ def get_variable_bounds(self):
298307
for var in self.main_var:
299308
size = var.size
300309
if var.bounds:
301-
var_lower.extend(var.bounds[0])
302-
var_upper.extend(var.bounds[1])
310+
var_lower.extend(var.bounds[0].flatten(order='F'))
311+
var_upper.extend(var.bounds[1].flatten(order='F'))
303312
else:
304313
# No bounds specified, use infinite bounds
305314
var_lower.extend([-np.inf] * size)

0 commit comments

Comments
 (0)