Skip to content

Commit bc7abc5

Browse files
committed
converters
1 parent 7310fd0 commit bc7abc5

File tree

2 files changed

+87
-5
lines changed

2 files changed

+87
-5
lines changed

cvxpy/reductions/solvers/nlp_solvers/diff_engine/converters.py

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,16 @@ def _convert_matmul(expr, children):
4848
# One of them should be a Constant, the other a variable expression
4949
left_arg, right_arg = expr.args
5050

51+
left_arg_shape = tuple(left_arg.shape)
52+
left_arg_shape = (1,) * (2 - len(left_arg_shape)) + left_arg_shape
53+
d1_left, d2_left = left_arg_shape
54+
55+
right_arg_shape = tuple(right_arg.shape)
56+
right_arg_shape = (1,) * (2 - len(right_arg_shape)) + right_arg_shape
57+
d3_right, d4_right = right_arg_shape
58+
59+
assert(d2_left == d3_right), "Inner dimensions must match for matmul."
60+
5161
if left_arg.is_constant():
5262
# A @ f(x) -> left_matmul
5363
# TODO: why is this always dense? What's going on here?
@@ -198,8 +208,9 @@ def _convert_reshape(expr, children):
198208
"Only order='F' (Fortran) is currently supported."
199209
)
200210

201-
d1, d2 = expr.shape
202-
# TODO: can it happen that len(expr.shape) < 2?
211+
x_shape = tuple(expr.shape)
212+
x_shape = (1,) * (2 - len(x_shape)) + x_shape
213+
d1, d2 = x_shape
203214
return _diffengine.make_reshape(children[0], d1, d2)
204215

205216
def _convert_broadcast(expr, children):
@@ -221,6 +232,9 @@ def _convert_promote(expr, children):
221232
def _convert_NegExpression(_expr, children):
222233
return _diffengine.make_neg(children[0])
223234

235+
def _convert_quad_over_lin(_expr, children):
236+
return _diffengine.make_quad_over_lin(children[0], children[1])
237+
224238
# Mapping from CVXPY atom names to C diff engine functions
225239
# Converters receive (expr, children) where expr is the CVXPY expression
226240
ATOM_CONVERTERS = {
@@ -237,9 +251,7 @@ def _convert_NegExpression(_expr, children):
237251
# Bivariate
238252
"multiply": _convert_multiply,
239253
"QuadForm": _convert_quad_form,
240-
"quad_over_lin": lambda _expr, children: _diffengine.make_quad_over_lin(
241-
children[0], children[1]
242-
),
254+
"quad_over_lin": _convert_quad_over_lin,
243255
"rel_entr": _convert_rel_entr,
244256
# Matrix multiplication
245257
"MulExpression": _convert_matmul,

cvxpy/tests/nlp_tests/test_nlp_solvers.py

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,8 @@ def test_mle(self, solver):
6363
assert np.allclose(sigma.value, 0.77079388)
6464
assert np.allclose(mu.value, 0.59412321)
6565

66+
# we skip this because it is unclear what cvxpy does to support r @ x
67+
@pytest.mark.skip(reason="unclear handling of r @ x")
6668
def test_portfolio_opt(self, solver):
6769
# data taken from https://jump.dev/JuMP.jl/stable/tutorials/nonlinear/portfolio/
6870
# r and Q are pre-computed from historical data of 3 assets
@@ -89,6 +91,32 @@ def test_portfolio_opt(self, solver):
8991
# Second element can be slightly negative due to numerical tolerance
9092
assert np.allclose(x.value, np.array([4.97045504e+02, 0.0, 5.02954496e+02]), atol=1e-4)
9193

94+
def test_portfolio_opt_sum_multiply(self, solver):
95+
# data taken from https://jump.dev/JuMP.jl/stable/tutorials/nonlinear/portfolio/
96+
# r and Q are pre-computed from historical data of 3 assets
97+
r = np.array([0.026002150277777, 0.008101316405671, 0.073715909491990])
98+
Q = np.array([
99+
[0.018641039983891, 0.003598532927677, 0.001309759253660],
100+
[0.003598532927677, 0.006436938322676, 0.004887265158407],
101+
[0.001309759253660, 0.004887265158407, 0.068682765454814],
102+
])
103+
x = cp.Variable(3)
104+
x.value = np.array([10.0, 10.0, 10.0])
105+
variance = cp.quad_form(x, Q)
106+
expected_return = cp.sum(cp.multiply(r, x))
107+
problem = cp.Problem(
108+
cp.Minimize(variance),
109+
[
110+
cp.sum(x) <= 1000,
111+
expected_return >= 50,
112+
x >= 0
113+
]
114+
)
115+
problem.solve(solver=solver, nlp=True)
116+
assert problem.status == cp.OPTIMAL
117+
# Second element can be slightly negative due to numerical tolerance
118+
assert np.allclose(x.value, np.array([4.97045504e+02, 0.0, 5.02954496e+02]), atol=1e-4)
119+
92120
def test_rosenbrock(self, solver):
93121
x = cp.Variable(2, name='x')
94122
objective = cp.Minimize((1 - x[0])**2 + 100 * (x[1] - x[0]**2)**2)
@@ -119,6 +147,8 @@ def test_qcp(self, solver):
119147
assert np.allclose(y.value, np.array([0.25706586]))
120148
assert np.allclose(z.value, np.array([0.4159413]))
121149

150+
# we skip this because it is unclear what cvxpy does to support A @ x
151+
@pytest.mark.skip(reason="unclear handling of A @ x")
122152
def test_analytic_polytope_center(self, solver):
123153
# Generate random data
124154
np.random.seed(0)
@@ -135,6 +165,24 @@ def test_analytic_polytope_center(self, solver):
135165
# Solve the problem
136166
problem.solve(solver=solver, nlp=True)
137167
assert problem.status == cp.OPTIMAL
168+
169+
def test_analytic_polytope_center_x_column_vector(self, solver):
170+
# Generate random data
171+
np.random.seed(0)
172+
m, n = 50, 4
173+
b = np.ones((m, 1))
174+
rand = np.random.randn(m - 2*n, n)
175+
A = np.vstack((rand, np.eye(n), np.eye(n) * -1))
176+
177+
# Define the variable
178+
x = cp.Variable((n, 1))
179+
# set initial value for x
180+
objective = cp.Minimize(-cp.sum(cp.log(b - A @ x)))
181+
problem = cp.Problem(objective, [])
182+
# Solve the problem
183+
problem.solve(solver=solver, nlp=True)
184+
assert problem.status == cp.OPTIMAL
185+
138186

139187
def test_socp(self, solver):
140188
# Define variables
@@ -159,6 +207,8 @@ def test_socp(self, solver):
159207
assert np.allclose(x.value, [-3.87462191, -2.12978826, 2.33480343])
160208
assert np.allclose(y.value, 5)
161209

210+
# we skip this because it is unclear what cvxpy does to support L.T @ x
211+
@pytest.mark.skip(reason="unclear handling of L.T @ x")
162212
def test_portfolio_socp(self, solver):
163213
np.random.seed(858)
164214
n = 100
@@ -178,6 +228,26 @@ def test_portfolio_socp(self, solver):
178228
problem.solve(solver=solver, nlp=True)
179229
assert problem.status == cp.OPTIMAL
180230
assert np.allclose(problem.value, -1.93414338e+00)
231+
232+
def test_portfolio_socp_x_column_vector(self, solver):
233+
np.random.seed(858)
234+
n = 100
235+
x = cp.Variable((n, 1), name='x')
236+
mu = np.random.randn(n, 1)
237+
Sigma = np.random.randn(n, n)
238+
Sigma = Sigma.T @ Sigma
239+
gamma = 0.1
240+
t = cp.Variable(name='t', bounds=[0, None])
241+
L = np.linalg.cholesky(Sigma, upper=False)
242+
243+
objective = cp.Minimize(-cp.sum(cp.multiply(mu, x)) + gamma * t)
244+
constraints = [cp.norm(L.T @ x, 2) <= t,
245+
cp.sum(x) == 1,
246+
x >= 0]
247+
problem = cp.Problem(objective, constraints)
248+
problem.solve(solver=solver, nlp=True)
249+
assert problem.status == cp.OPTIMAL
250+
assert np.allclose(problem.value, -1.93414338e+00)
181251

182252
def test_localization(self, solver):
183253
np.random.seed(42)

0 commit comments

Comments
 (0)