Skip to content

Commit 192f15d

Browse files
committed
adding more changes from the reductions
1 parent 0756405 commit 192f15d

File tree

12 files changed

+672
-8
lines changed

12 files changed

+672
-8
lines changed

cvxpy/atoms/affine/binary_operators.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,9 @@ def _grad(self, values):
190190
X = values[0]
191191
Y = values[1]
192192

193+
if np.isscalar(X) or np.isscalar(Y):
194+
return [sp.csc_matrix(Y), sp.csc_matrix(X)]
195+
193196
# Get dimensions
194197
m, n = self.args[0].shape if len(self.args[0].shape) == 2 else (self.args[0].size, 1)
195198
n2, p = self.args[1].shape if len(self.args[1].shape) == 2 else (self.args[1].size, 1)
@@ -198,7 +201,7 @@ def _grad(self, values):
198201
assert n == n2, f"Inner dimensions must match for multiplication: {n} != {n2}"
199202

200203
# Compute ∂vec(Z)/∂vec(X) = (Y.T ⊗ I_m).T
201-
# This is a (m*n) × (m*p) matrix
204+
# This is a (m*n) × (m*p) matrix
202205
DX = sp.kron(Y.T, sp.eye(m, format='csc'), format='csc').T
203206

204207
# Compute ∂vec(Z)/∂vec(Y) = (I_p ⊗ X).T

cvxpy/reductions/expr2smooth/canonicalizers/div_canon.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,10 @@
1414
limitations under the License.
1515
"""
1616

17+
from cvxpy.atoms.affine.binary_operators import multiply
1718
from cvxpy.expressions.variable import Variable
1819

20+
1921
# We canonicalize div(x, y) as z * y = x.
2022
def div_canon(expr, args):
2123
# TODO: potential bounds here?
@@ -33,4 +35,4 @@ def div_canon(expr, args):
3335
else:
3436
z.value = expr.point_in_domain()
3537
# TODO: should we also include y >= 0 here?
36-
return z, [z * y == args[0], y == args[1]]#], #y >= 0, z >= 0]
38+
return z, [multiply(z, y) == args[0], y == args[1]]#, y >= 0, z >= 0]

cvxpy/reductions/expr2smooth/canonicalizers/log_canon.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919

2020
def log_canon(expr, args):
21-
t = Variable(args[0].size)
21+
t = Variable(args[0].shape)
2222
if args[0].value is not None:
2323
t.value = args[0].value
2424
else:

cvxpy/reductions/expr2smooth/canonicalizers/pnorm_canon.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,17 +19,15 @@
1919

2020
def pnorm_canon(expr, args):
2121
x = args[0]
22-
p = expr.p_rational
23-
w = expr.w
22+
p = expr.p
2423

2524
if p == 1:
2625
return x, []
2726

2827
shape = expr.shape
2928
t = Variable(shape)
3029
if p % 2 == 0:
31-
summation = [x[i]**p for i in range(len(x))]
30+
summation = sum(x[i]**p for i in range(x.size))
3231
return t, [t**p == summation, t >= 0]
3332
else:
3433
z = Variable(shape)
35-
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
"""
2+
Copyright 2025 CVXPY developers
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
"""
16+
17+
from cvxpy.expressions.variable import Variable
18+
19+
20+
def cos_canon(expr, args):
21+
if type(args[0]) is not Variable:
22+
t = Variable(args[0].size)
23+
t.value = expr.point_in_domain()
24+
return expr.copy([t]), [t==args[0]]
25+
return expr, []
26+
27+
28+
def sin_canon(expr, args):
29+
if type(args[0]) is not Variable:
30+
t = Variable(args[0].size)
31+
t.value = expr.point_in_domain()
32+
return expr.copy([t]), [t==args[0]]
33+
return expr, []
34+
35+
36+
def tan_canon(expr, args):
37+
if type(args[0]) is not Variable:
38+
t = Variable(args[0].size)
39+
t.value = expr.point_in_domain()
40+
return expr.copy([t]), [t==args[0]]
41+
return expr, []

cvxpy/reductions/solvers/nlp_solvers/ipopt_nlpif.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ def jacobianstructure(self):
261261
#var.value = self.initial_point[offset]
262262
var.value = np.nan
263263
else:
264-
var.value = np.nan * np.ones(var.size)
264+
var.value = (np.nan * np.ones(var.size)).reshape(var.shape, order='F')
265265
#var.value = np.atleast_1d(self.initial_point[offset:offset + var.size])
266266
#offset += var.size
267267
rows, cols = [], []

cvxpy/sandbox/logistic.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
import cvxpy as cp
2+
import numpy as np
3+
4+
# Generate synthetic data
5+
np.random.seed(42)
6+
n_samples = 1000
7+
n_features = 50
8+
9+
# Create two classes
10+
X_class0 = np.random.randn(n_samples // 2, n_features) - 1
11+
X_class1 = np.random.randn(n_samples // 2, n_features) + 1
12+
X = np.vstack([X_class0, X_class1])
13+
14+
# Labels: -1 for class 0, +1 for class 1
15+
y = np.concatenate([-np.ones(n_samples // 2), np.ones(n_samples // 2)])
16+
17+
# Add intercept term (bias)
18+
X_with_intercept = np.column_stack([np.ones(n_samples), X])
19+
20+
# Define CVXPY variables
21+
n_features_with_intercept = n_features + 1
22+
w = cp.Variable(n_features_with_intercept) # weights including bias
23+
24+
# Regularization parameter
25+
lambda_reg = 0.1
26+
27+
# Logistic regression objective: minimize negative log-likelihood + L2 regularization
28+
# Using log-sum-exp formulation for numerical stability
29+
log_likelihood = cp.sum(cp.logistic(-cp.multiply(y, X_with_intercept @ w)))
30+
regularization = lambda_reg * cp.norm(w[1:], 2)**2 # Don't regularize intercept
31+
32+
# Define the optimization problem
33+
objective = cp.Minimize(log_likelihood + regularization)
34+
problem = cp.Problem(objective)
35+
36+
# Solve the problem
37+
problem.solve(solver=cp.IPOPT, nlp=True, verbose=True)
38+
39+
# Print results
40+
print(f"Optimization status: {problem.status}")
41+
print(f"Optimal objective value: {problem.value:.4f}")
42+
print("Optimal weights (including bias):")
43+
print(f" Bias (intercept): {w.value[0]:.4f}")
44+
for i in range(n_features):
45+
print(f" Weight {i+1}: {w.value[i+1]:.4f}")
46+
47+
# Make predictions on training data
48+
predictions = np.sign(X_with_intercept @ w.value)
49+
accuracy = np.mean(predictions == y)
50+
print(f"\nTraining accuracy: {accuracy:.2%}")

cvxpy/sandbox/mle-canon.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
import numpy as np
2+
3+
import cvxpy as cp
4+
5+
n = 2
6+
np.random.seed(1234)
7+
data = np.random.randn(n)
8+
9+
mu = cp.Variable(1, name="mu")
10+
mu.value = np.array([0.0])
11+
sigma = cp.Variable(1, name="sigma")
12+
sigma.value = np.array([1.0])
13+
t1 = cp.Variable(1, name="t1")
14+
t2 = cp.Variable(n, name="t2")
15+
t3 = cp.Variable(1, name="t3")
16+
t4 = cp.Variable(1, name="t4")
17+
t5 = cp.Variable(1, name="t5")
18+
19+
log_likelihood = ((n / 2) * cp.log(t4) - cp.sum(cp.square(t2)) / (2 * (t1)**2))
20+
21+
constraints = [
22+
t1 == sigma,
23+
t2 == data - mu,
24+
t3 == (2 * np.pi * (t5)**2),
25+
t4 == 1 / t3,
26+
t5 == sigma,
27+
]
28+
t1.value = sigma.value
29+
t2.value = data - mu.value
30+
t3.value = (2 * np.pi * (sigma.value)**2)
31+
t4.value = 1 / t3.value
32+
t5.value = sigma.value
33+
34+
objective = cp.Maximize(log_likelihood)
35+
problem = cp.Problem(objective, constraints)
36+
problem.solve(solver=cp.IPOPT, nlp=True)
37+
assert problem.status == cp.OPTIMAL
38+
assert np.allclose(mu.value, np.mean(data))
39+
assert np.allclose(sigma.value, np.std(data))

0 commit comments

Comments
 (0)