Skip to content

Commit 4544ef7

Browse files
authored
Merge pull request #1 from cvxpy/ipopt-interface
Ipopt interface prototype
2 parents 87ec462 + e5b9352 commit 4544ef7

File tree

13 files changed

+830
-0
lines changed

13 files changed

+830
-0
lines changed
Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
"""
2+
This file is the CVXPY QP extension of the Cardinal Optimizer
3+
"""
4+
5+
import cvxpy.settings as s
6+
from cvxpy.reductions.solvers.nlp_solvers.nlp_solver import NLPsolver
7+
from cvxpy.utilities.citations import CITATION_DICT
8+
9+
10+
class IPOPT(NLPsolver):
11+
"""
12+
NLP interface for the IPOPT solver
13+
"""
14+
# Solve capabilities
15+
MIP_CAPABLE = True
16+
17+
# Keyword arguments for the CVXPY interface.
18+
INTERFACE_ARGS = ["save_file", "reoptimize"]
19+
20+
# Map between IPOPT status and CVXPY status
21+
STATUS_MAP = {
22+
1: s.OPTIMAL, # optimal
23+
2: s.INFEASIBLE, # infeasible
24+
3: s.UNBOUNDED, # unbounded
25+
4: s.INF_OR_UNB, # infeasible or unbounded
26+
5: s.SOLVER_ERROR, # numerical
27+
6: s.USER_LIMIT, # node limit
28+
7: s.OPTIMAL_INACCURATE, # imprecise
29+
8: s.USER_LIMIT, # time out
30+
9: s.SOLVER_ERROR, # unfinished
31+
10: s.USER_LIMIT # interrupted
32+
}
33+
34+
def name(self):
35+
"""
36+
The name of solver.
37+
"""
38+
return 'COPT'
39+
40+
def import_solver(self):
41+
"""
42+
Imports the solver.
43+
"""
44+
import cyipopt # noqa F401
45+
46+
def invert(self, solution, inverse_data):
47+
"""
48+
Returns the solution to the original problem given the inverse_data.
49+
"""
50+
pass
51+
52+
def solve_via_data(self, data, warm_start: bool, verbose: bool, solver_opts, solver_cache=None):
53+
"""
54+
Returns the result of the call to the solver.
55+
56+
Parameters
57+
----------
58+
data : dict
59+
Data used by the solver.
60+
warm_start : bool
61+
Not used.
62+
verbose : bool
63+
Should the solver print output?
64+
solver_opts : dict
65+
Additional arguments for the solver.
66+
solver_cache: None
67+
None
68+
69+
Returns
70+
-------
71+
tuple
72+
(status, optimal value, primal, equality dual, inequality dual)
73+
"""
74+
pass
75+
76+
def cite(self, data):
77+
"""Returns bibtex citation for the solver.
78+
79+
Parameters
80+
----------
81+
data : dict
82+
Data generated via an apply call.
83+
"""
84+
return CITATION_DICT["COPT"]
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
"""
2+
Copyright 2025, the CVXPY developers
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
"""
16+
17+
from cvxpy.constraints import NonNeg, Zero
18+
from cvxpy.reductions.solvers.solver import Solver
19+
20+
21+
class NLPsolver(Solver):
22+
"""
23+
A non-linear programming (NLP) solver.
24+
"""
25+
# Every QP solver supports Zero and NonNeg constraints.
26+
SUPPORTED_CONSTRAINTS = [Zero, NonNeg]
27+
28+
# Some solvers cannot solve problems that do not have constraints.
29+
# For such solvers, REQUIRES_CONSTR should be set to True.
30+
REQUIRES_CONSTR = False
31+
32+
IS_MIP = "IS_MIP"
33+
34+
def accepts(self, problem):
35+
# can accept everything?
36+
return True
37+
38+
def apply(self, problem):
39+
"""
40+
Construct NLP problem data stored in a dictionary.
41+
The NLP has the following form
42+
43+
minimize f(x)
44+
subject to g^l <= g(x) <= g^u
45+
x^l <= x <= x^u
46+
where f and g are non-linear (and possibly non-convex) functions
47+
"""
48+
pass

cvxpy/sandbox/clnlbeam.py

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
2+
3+
import cvxpy as cp
4+
5+
6+
def example_clnlbeam():
7+
# Problem parameters
8+
N = 1000
9+
h = 1 / N
10+
alpha = 350
11+
12+
# Define variables with bounds
13+
t = cp.Variable(N+1) # -1 <= t <= 1
14+
x = cp.Variable(N+1) # -0.05 <= x <= 0.05
15+
u = cp.Variable(N+1) # unbounded
16+
17+
# Define objective function
18+
# Minimize: sum of 0.5*h*(u[i+1]^2 + u[i]^2) + 0.5*alpha*h*(cos(t[i+1]) + cos(t[i]))
19+
objective_terms = []
20+
for i in range(N): # i from 0 to N-1 (Python 0-indexing)
21+
control_term = 0.5 * h * (u[i+1]**2 + u[i]**2)
22+
# Note: cos() is non-convex, this may cause solver issues
23+
trigonometric_term = 0.5 * alpha * h * (cp.cos(t[i+1]) + cp.cos(t[i]))
24+
objective_terms.append(control_term + trigonometric_term)
25+
26+
objective = cp.Minimize(cp.sum(objective_terms))
27+
28+
# Define constraints
29+
constraints = []
30+
31+
# Variable bounds
32+
constraints.extend([
33+
t >= -1,
34+
t <= 1,
35+
x >= -0.05,
36+
x <= 0.05
37+
])
38+
39+
# Dynamics constraints
40+
for i in range(N): # i from 0 to N-1
41+
# x[i+1] - x[i] - 0.5*h*(sin(t[i+1]) + sin(t[i])) == 0
42+
# Note: sin() is also non-convex
43+
position_constraint = (x[i+1] - x[i] -
44+
0.5 * h * (cp.sin(t[i+1]) + cp.sin(t[i])) == 0)
45+
constraints.append(position_constraint)
46+
47+
# t[i+1] - t[i] - 0.5*h*u[i+1] - 0.5*h*u[i] == 0
48+
angle_constraint = (t[i+1] - t[i] -
49+
0.5 * h * u[i+1] - 0.5 * h * u[i] == 0)
50+
constraints.append(angle_constraint)
51+
52+
# Create and solve the problem
53+
problem = cp.Problem(objective, constraints)
54+
return problem

cvxpy/sandbox/mle_estimation.py

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
import cyipopt
2+
import numpy as np
3+
from reduction_classes import HS071, Bounds_Getter
4+
5+
import cvxpy as cp
6+
7+
8+
def example_mle():
9+
# Generate data (matching Julia's Random.seed!(1234))
10+
n = 1000
11+
np.random.seed(1234)
12+
data = np.random.randn(n)
13+
print(data.var(), data.mean())
14+
15+
# Define variables - matching Julia exactly
16+
mu = cp.Variable(1, name="mu") # mean parameter
17+
sigma = cp.Variable(1, name="sigma") # standard deviation, σ >= 0
18+
19+
constraints = [mu == sigma**2]
20+
21+
# Calculate the objective exactly as in Julia
22+
# n / 2 * log(1 / (2 * π * σ^2)) - sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2)
23+
# = n/2 * log(1/(2*π)) - n*log(σ) - sum((data[i] - μ)^2) / (2*σ^2)
24+
25+
# Sum of squared residuals
26+
residual_sum = cp.sum_squares(data - mu)
27+
28+
# The complete objective (including the constant term that Julia includes)
29+
log_likelihood = (n / 2) * cp.log(1 / (2 * np.pi * (sigma)**2)) - residual_sum/(2 * (sigma)**2)
30+
31+
objective = cp.Minimize(-log_likelihood)
32+
33+
# Create problem with constraints
34+
problem = cp.Problem(objective, constraints)
35+
return problem
36+
37+
bounds = Bounds_Getter(example_mle())
38+
x0 = [1.0, 0.0]
39+
40+
nlp = cyipopt.Problem(
41+
n=len(x0),
42+
m=len(bounds.cl),
43+
problem_obj=HS071(bounds.new_problem),
44+
lb=[1e-6, None],
45+
ub=None,
46+
cl=bounds.cl,
47+
cu=bounds.cu,
48+
)
49+
50+
nlp.add_option('mu_strategy', 'adaptive')
51+
nlp.add_option('tol', 1e-7)
52+
nlp.add_option('hessian_approximation', "limited-memory")
53+
54+
x, info = nlp.solve(x0)
55+
print(x)

cvxpy/sandbox/portfolio_opt.py

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
import cyipopt
2+
import numpy as np
3+
import pandas as pd
4+
from reduction_classes import HS071, Bounds_Getter
5+
6+
import cvxpy as cp
7+
8+
9+
def portfolio_example():
10+
# Data
11+
df = pd.DataFrame({
12+
'IBM': [93.043, 84.585, 111.453, 99.525, 95.819, 114.708, 111.515,
13+
113.211, 104.942, 99.827, 91.607, 107.937, 115.590],
14+
'WMT': [51.826, 52.823, 56.477, 49.805, 50.287, 51.521, 51.531,
15+
48.664, 55.744, 47.916, 49.438, 51.336, 55.081],
16+
'SEHI': [1.063, 0.938, 1.000, 0.938, 1.438, 1.700, 2.540, 2.390,
17+
3.120, 2.980, 1.900, 1.750, 1.800]
18+
})
19+
20+
# Compute returns
21+
returns = df.pct_change().dropna().values
22+
r = np.mean(returns, axis=0)
23+
Q = np.cov(returns.T)
24+
25+
# Single-objective optimization
26+
x = cp.Variable(3)
27+
variance = cp.quad_form(x, Q)
28+
expected_return = r @ x
29+
30+
prob = cp.Problem(
31+
cp.Minimize(variance),
32+
[cp.sum(x) <= 1000, expected_return >= 50]
33+
)
34+
return prob
35+
36+
bounds = Bounds_Getter(portfolio_example())
37+
x0 = [10.0, 10.0, 10.0]
38+
39+
nlp = cyipopt.Problem(
40+
n=len(x0),
41+
m=len(bounds.cl),
42+
problem_obj=HS071(bounds.new_problem),
43+
lb=[0.0, 0.0, 0.0],
44+
ub=None,
45+
cl=bounds.cl,
46+
cu=bounds.cu,
47+
)
48+
49+
nlp.add_option('mu_strategy', 'adaptive')
50+
nlp.add_option('tol', 1e-7)
51+
nlp.add_option('hessian_approximation', "limited-memory")
52+
53+
x, info = nlp.solve(x0)
54+
print(x)

cvxpy/sandbox/qcp_example.py

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
2+
import cyipopt
3+
import numpy as np
4+
from reduction_classes import HS071, Bounds_Getter
5+
6+
import cvxpy as cp
7+
8+
9+
def example_qcp():
10+
# Define variables
11+
x = cp.Variable(1)
12+
y = cp.Variable(1, bounds=[0, np.inf]) # y >= 0
13+
z = cp.Variable(1, bounds=[0, np.inf]) # z >= 0
14+
15+
# Define objective (maximize x)
16+
objective = cp.Minimize(-x)
17+
18+
# Define constraints - exact same as JuMP model
19+
constraints = [
20+
x + y + z == 1, # Linear equality constraint
21+
x**2 + y**2 - z**2 <= 0, # Quadratic constraint: x*x + y*y - z*z <= 0
22+
x**2 - y*z <= 0 # Quadratic constraint: x*x - y*z <= 0
23+
]
24+
25+
# Create and solve problem
26+
problem = cp.Problem(objective, constraints)
27+
return problem
28+
29+
bounds = Bounds_Getter(example_qcp())
30+
x0 = [0.2, 0.2, 0.2]
31+
32+
nlp = cyipopt.Problem(
33+
n=len(x0),
34+
m=len(bounds.cl),
35+
problem_obj=HS071(bounds.new_problem),
36+
lb=bounds.lb,
37+
ub=bounds.ub,
38+
cl=bounds.cl,
39+
cu=bounds.cu,
40+
)
41+
42+
nlp.add_option('mu_strategy', 'adaptive')
43+
nlp.add_option('tol', 1e-7)
44+
nlp.add_option('hessian_approximation', "limited-memory")
45+
nlp.add_option('print_level', 7) # Increase for more detailed output
46+
47+
x, info = nlp.solve(x0)
48+
print(x)

0 commit comments

Comments
 (0)