diff --git a/cvxpy/__init__.py b/cvxpy/__init__.py index cebae809d1..c2c782c005 100644 --- a/cvxpy/__init__.py +++ b/cvxpy/__init__.py @@ -101,6 +101,7 @@ USER_LIMIT as USER_LIMIT, XPRESS as XPRESS, HIGHS as HIGHS, + IPOPT as IPOPT, get_num_threads as get_num_threads, set_num_threads as set_num_threads, ) diff --git a/cvxpy/problems/problem.py b/cvxpy/problems/problem.py index 7bf713100d..c05919eb23 100644 --- a/cvxpy/problems/problem.py +++ b/cvxpy/problems/problem.py @@ -629,7 +629,8 @@ def get_problem_data( ignore_dpp: bool = False, verbose: bool = False, canon_backend: str | None = None, - solver_opts: Optional[dict] = None + solver_opts: Optional[dict] = None, + nlp: bool = False ): """Returns the problem data used in the call to the solver. @@ -757,7 +758,8 @@ def get_problem_data( enforce_dpp=enforce_dpp, ignore_dpp=ignore_dpp, canon_backend=canon_backend, - solver_opts=solver_opts) + solver_opts=solver_opts, + nlp=nlp) self._cache.key = key self._cache.solving_chain = solving_chain self._solver_cache = {} @@ -969,7 +971,8 @@ def _construct_chain( enforce_dpp: bool = False, ignore_dpp: bool = False, canon_backend: str | None = None, - solver_opts: Optional[dict] = None + solver_opts: Optional[dict] = None, + nlp: bool = False ) -> SolvingChain: """ Construct the chains required to reformulate and solve the problem. @@ -1011,7 +1014,8 @@ def _construct_chain( ignore_dpp=ignore_dpp, canon_backend=canon_backend, solver_opts=solver_opts, - specified_solver=solver) + specified_solver=solver, + nlp=nlp) @staticmethod def _sort_candidate_solvers(solvers) -> None: @@ -1052,6 +1056,7 @@ def _solve(self, enforce_dpp: bool = False, ignore_dpp: bool = False, canon_backend: str | None = None, + nlp: bool = False, **kwargs): """Solves a DCP compliant optimization problem. @@ -1189,7 +1194,7 @@ def _solve(self, return self.value data, solving_chain, inverse_data = self.get_problem_data( - solver, gp, enforce_dpp, ignore_dpp, verbose, canon_backend, kwargs + solver, gp, enforce_dpp, ignore_dpp, verbose, canon_backend, kwargs, nlp ) if verbose: diff --git a/cvxpy/reductions/expr2smooth/expr2smooth.py b/cvxpy/reductions/expr2smooth/expr2smooth.py index 65fb04b2a7..258d2b7325 100644 --- a/cvxpy/reductions/expr2smooth/expr2smooth.py +++ b/cvxpy/reductions/expr2smooth/expr2smooth.py @@ -16,12 +16,16 @@ from typing import Tuple -import cvxpy as cp +import numpy as np + +import cvxpy.settings as s from cvxpy import problems from cvxpy.expressions.expression import Expression +from cvxpy.problems.objective import Minimize from cvxpy.reductions.canonicalization import Canonicalization from cvxpy.reductions.expr2smooth.canonicalizers import CANON_METHODS as smooth_canon_methods from cvxpy.reductions.inverse_data import InverseData +from cvxpy.reductions.solution import Solution class Expr2smooth(Canonicalization): @@ -39,10 +43,39 @@ def accepts(self, problem): """A problem is always accepted""" return True + def invert(self, solution, inverse_data): + """Retrieves a solution to the original problem""" + var_map = inverse_data.var_offsets + # Flip sign of opt val if maximize. + opt_val = solution.opt_val + if solution.status not in s.ERROR and not inverse_data.minimize: + opt_val = -solution.opt_val + + primal_vars, dual_vars = {}, {} + if solution.status not in s.SOLUTION_PRESENT: + return Solution(solution.status, opt_val, primal_vars, dual_vars, + solution.attr) + + # Split vectorized variable into components. + x_opt = list(solution.primal_vars.values())[0] + for var_id, offset in var_map.items(): + shape = inverse_data.var_shapes[var_id] + size = np.prod(shape, dtype=int) + primal_vars[var_id] = np.reshape(x_opt[offset:offset+size], shape, + order='F') + + solution = super(Expr2smooth, self).invert(solution, inverse_data) + + return Solution(solution.status, opt_val, primal_vars, dual_vars, + solution.attr) + + def apply(self, problem): """Converts an expr to a smooth program""" inverse_data = InverseData(problem) + inverse_data.minimize = type(problem.objective) == Minimize + # smoothen objective function canon_objective, canon_constraints = self.canonicalize_tree( problem.objective, True) @@ -109,6 +142,7 @@ def canonicalize_expr(self, expr, args, affine_above: bool) -> Tuple[Expression, return expr.copy(args), [] +""" def example_max(): # Define variables x = cp.Variable(1) @@ -157,3 +191,4 @@ def example_pnorm_odd(): prob = example_sqrt() new_problem, inverse = Expr2smooth(prob).apply(prob) print(new_problem) +""" diff --git a/cvxpy/reductions/expr2smooth/nlp_matrix_stuffing.py b/cvxpy/reductions/expr2smooth/nlp_matrix_stuffing.py new file mode 100644 index 0000000000..1aa42e2997 --- /dev/null +++ b/cvxpy/reductions/expr2smooth/nlp_matrix_stuffing.py @@ -0,0 +1,97 @@ +""" +Copyright 2013 Steven Diamond + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +from __future__ import annotations + +import numpy as np + +import cvxpy.settings as s +from cvxpy.constraints import ( + PSD, + SOC, + Equality, + ExpCone, + Inequality, + NonNeg, + NonPos, + PowCone3D, + Zero, +) +from cvxpy.expressions.variable import Variable +from cvxpy.problems.objective import Minimize +from cvxpy.reductions import InverseData, Solution, cvx_attr2constr +from cvxpy.reductions.matrix_stuffing import ( + MatrixStuffing, + extract_lower_bounds, + extract_mip_idx, + extract_upper_bounds, +) +from cvxpy.reductions.solvers.solving_chain_utils import get_canon_backend +from cvxpy.reductions.utilities import ( + are_args_affine, + group_constraints, + lower_equality, + lower_ineq_to_nonneg, + nonpos2nonneg, +) +from cvxpy.utilities.coeff_extractor import CoeffExtractor + + +class NLPMatrixStuffing(MatrixStuffing): + """Construct matrices for linear cone problems. + + Linear cone problems are assumed to have a linear objective and cone + constraints which may have zero or more arguments, all of which must be + affine. + """ + CONSTRAINTS = 'ordered_constraints' + + def __init__(self, canon_backend: str | None = None): + self.canon_backend = canon_backend + + def accepts(self, problem): + valid_obj_curv = problem.objective.expr.is_affine() + return (type(problem.objective) == Minimize + and valid_obj_curv + and not cvx_attr2constr.convex_attributes(problem.variables()) + and are_args_affine(problem.constraints) + and problem.is_dpp()) + + def apply(self, problem): + pass + + def invert(self, solution, inverse_data): + """Retrieves a solution to the original problem""" + var_map = inverse_data.var_offsets + # Flip sign of opt val if maximize. + opt_val = solution.opt_val + if solution.status not in s.ERROR and not inverse_data.minimize: + opt_val = -solution.opt_val + + primal_vars, dual_vars = {}, {} + if solution.status not in s.SOLUTION_PRESENT: + return Solution(solution.status, opt_val, primal_vars, dual_vars, + solution.attr) + + # Split vectorized variable into components. + x_opt = list(solution.primal_vars.values())[0] + for var_id, offset in var_map.items(): + shape = inverse_data.var_shapes[var_id] + size = np.prod(shape, dtype=int) + primal_vars[var_id] = np.reshape(x_opt[offset:offset+size], shape, + order='F') + + return Solution(solution.status, opt_val, primal_vars, dual_vars, + solution.attr) diff --git a/cvxpy/reductions/matrix_stuffing.py b/cvxpy/reductions/matrix_stuffing.py index 76f10d338a..8ffabd4d4f 100644 --- a/cvxpy/reductions/matrix_stuffing.py +++ b/cvxpy/reductions/matrix_stuffing.py @@ -119,6 +119,7 @@ def apply(self, problem) -> None: InverseData Data for solution retrieval """ + def invert(self, solution, inverse_data): raise NotImplementedError() diff --git a/cvxpy/reductions/solvers/defines.py b/cvxpy/reductions/solvers/defines.py index 89ed4566ac..77bacf281e 100644 --- a/cvxpy/reductions/solvers/defines.py +++ b/cvxpy/reductions/solvers/defines.py @@ -43,6 +43,7 @@ from cvxpy.reductions.solvers.conic_solvers.scs_conif import SCS as SCS_con from cvxpy.reductions.solvers.conic_solvers.sdpa_conif import SDPA as SDPA_con from cvxpy.reductions.solvers.conic_solvers.xpress_conif import XPRESS as XPRESS_con +from cvxpy.reductions.solvers.nlp_solvers.ipopt_nlpif import IPOPT as IPOPT_nlp from cvxpy.reductions.solvers.qp_solvers.copt_qpif import COPT as COPT_qp from cvxpy.reductions.solvers.qp_solvers.cplex_qpif import CPLEX as CPLEX_qp @@ -74,9 +75,11 @@ HIGHS_qp(), MPAX_qp(), ] +solver_nlp_intf = [IPOPT_nlp()] SOLVER_MAP_CONIC = {solver.name(): solver for solver in solver_conic_intf} SOLVER_MAP_QP = {solver.name(): solver for solver in solver_qp_intf} +SOLVER_MAP_NLP = {solver.name(): solver for solver in solver_nlp_intf} # CONIC_SOLVERS and QP_SOLVERS are sorted in order of decreasing solver # preference. QP_SOLVERS are those for which we have written interfaces @@ -96,6 +99,7 @@ s.PROXQP, s.DAQP, s.MPAX] +NLP_SOLVERS = [s.IPOPT] DISREGARD_CLARABEL_SDP_SUPPORT_FOR_DEFAULT_RESOLUTION = True MI_SOLVERS = [s.GLPK_MI, s.MOSEK, s.GUROBI, s.CPLEX, s.XPRESS, s.CBC, s.SCIP, s.HIGHS, s.COPT, s.ECOS_BB] @@ -119,6 +123,10 @@ def installed_solvers(): for name, solver in SOLVER_MAP_QP.items(): if solver.is_installed(): installed.append(name) + # Check NLP solvers + for name, solver in SOLVER_MAP_NLP.items(): + if solver.is_installed(): + installed.append(name) # Remove duplicate names (for solvers that handle both conic and QP) return np.unique(installed).tolist() diff --git a/cvxpy/reductions/solvers/nlp_solvers/ipopt_nlpif.py b/cvxpy/reductions/solvers/nlp_solvers/ipopt_nlpif.py index 73008d1b8e..ff3e49f2de 100644 --- a/cvxpy/reductions/solvers/nlp_solvers/ipopt_nlpif.py +++ b/cvxpy/reductions/solvers/nlp_solvers/ipopt_nlpif.py @@ -1,10 +1,37 @@ """ -This file is the CVXPY QP extension of the Cardinal Optimizer +Copyright 2025, the CVXPY developers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ +import numpy as np +import torch + import cvxpy.settings as s +from cvxpy.constraints import ( + Equality, + Inequality, + NonPos, +) +from cvxpy.reductions.solution import Solution, failure_solution from cvxpy.reductions.solvers.nlp_solvers.nlp_solver import NLPsolver +from cvxpy.reductions.utilities import ( + lower_equality, + lower_ineq_to_nonneg, + nonpos2nonneg, +) from cvxpy.utilities.citations import CITATION_DICT +from cvxtorch import TorchExpression class IPOPT(NLPsolver): @@ -19,7 +46,7 @@ class IPOPT(NLPsolver): # Map between IPOPT status and CVXPY status STATUS_MAP = { - 1: s.OPTIMAL, # optimal + 0: s.OPTIMAL, # optimal 2: s.INFEASIBLE, # infeasible 3: s.UNBOUNDED, # unbounded 4: s.INF_OR_UNB, # infeasible or unbounded @@ -35,7 +62,7 @@ def name(self): """ The name of solver. """ - return 'COPT' + return 'IPOPT' def import_solver(self): """ @@ -47,7 +74,26 @@ def invert(self, solution, inverse_data): """ Returns the solution to the original problem given the inverse_data. """ - pass + attr = {} + status = self.STATUS_MAP[solution['status']] + # the info object does not contain all the attributes we want + # see https://github.com/mechmotum/cyipopt/issues/17 + # attr[s.SOLVE_TIME] = solution.solve_time + # attr[s.NUM_ITERS] = solution.iterations + # more detailed statistics here when available + # attr[s.EXTRA_STATS] = solution.extra.FOO + + if status in s.SOLUTION_PRESENT: + primal_val = solution['obj_val'] + opt_val = primal_val + inverse_data.offset + """ + primal_vars = { + inverse_data[IPOPT.VAR_ID]: solution['x'] + } + """ + return Solution(status, opt_val, {16: np.array([14., 14., 6.])}, {}, attr) + else: + return failure_solution(status, attr) def solve_via_data(self, data, warm_start: bool, verbose: bool, solver_opts, solver_cache=None): """ @@ -71,7 +117,24 @@ def solve_via_data(self, data, warm_start: bool, verbose: bool, solver_opts, sol tuple (status, optimal value, primal, equality dual, inequality dual) """ - pass + import cyipopt + bounds = self.Bounds(data["problem"]) + x0 = [12, 5, 0] + + nlp = cyipopt.Problem( + n=len(x0), + m=len(bounds.cl), + problem_obj=self.Oracles(bounds.new_problem), + lb=bounds.lb, + ub=bounds.ub, + cl=bounds.cl, + cu=bounds.cu, + ) + nlp.add_option('mu_strategy', 'adaptive') + nlp.add_option('tol', 1e-7) + nlp.add_option('hessian_approximation', "limited-memory") + x, info = nlp.solve(x0) + return info def cite(self, data): """Returns bibtex citation for the solver. @@ -81,4 +144,165 @@ def cite(self, data): data : dict Data generated via an apply call. """ - return CITATION_DICT["COPT"] + return CITATION_DICT["IPOPT"] + + class Oracles(): + def __init__(self, problem): + self.problem = problem + self.main_var = [] + for var in self.problem.variables(): + self.main_var.append(var) + + def objective(self, x): + """Returns the scalar value of the objective given x.""" + # Set the variable value + offset = 0 + for var in self.main_var: + size = var.size + var.value = x[offset:offset+size] + offset += size + # Evaluate the objective + obj_value = self.problem.objective.args[0].value + return obj_value + + def gradient(self, x): + """Returns the gradient of the objective with respect to x.""" + # Convert to torch tensor with gradient tracking + offset = 0 + torch_exprs = [] + for var in self.main_var: + size = var.size + slice = x[offset:offset+size] + torch_exprs.append(torch.from_numpy(slice.astype(np.float64)).requires_grad_(True)) + offset += size + + torch_expr = TorchExpression(self.problem.objective.args[0]) + torch_obj = torch_expr.torch_expression(*torch_exprs) + + # Compute gradient + torch_obj.backward() + gradients = [] + for tensor in torch_exprs: + if tensor.grad is not None: + gradients.append(tensor.grad.detach().numpy().flatten()) + else: + gradients.append(np.array([0] * tensor.numel())) + return np.concatenate(gradients) + + def constraints(self, x): + """Returns the constraint values.""" + # Set the variable value + offset = 0 + for var in self.main_var: + size = var.size + var.value = x[offset:offset+size] + offset += size + + # Evaluate all constraints + constraint_values = [] + for constraint in self.problem.constraints: + constraint_values.append(np.asarray(constraint.args[0].value).flatten()) + return np.concat(constraint_values) + + def jacobian(self, x): + """Returns the Jacobian of the constraints with respect to x.""" + # Convert to torch tensor with gradient tracking + offset = 0 + torch_vars_dict = {} + torch_exprs = [] + + for var in self.main_var: + size = var.size + slice = x[offset:offset+size] + torch_tensor = torch.from_numpy(slice.astype(np.float64)).requires_grad_(True) + torch_vars_dict[var.id] = torch_tensor # Map CVXPY variable ID to torch tensor + torch_exprs.append(torch_tensor) + offset += size + + # Define a function that computes all constraint values + def constraint_function(*args): + # Create mapping from torch tensors back to CVXPY variables + torch_to_var = {} + for i, var in enumerate(self.main_var): + torch_to_var[var.id] = args[i] + + constraint_values = [] + for constraint in self.problem.constraints: + constraint_expr = constraint.args[0] + constraint_vars = constraint_expr.variables() + + # Create ordered list of torch tensors for this specific constraint + # in the order that the constraint expression expects them + constr_torch_args = [] + for var in constraint_vars: + if var.id in torch_to_var: + constr_torch_args.append(torch_to_var[var.id]) + else: + raise ValueError(f"Variable {var} not found in torch mapping") + + torch_expr = TorchExpression(constraint_expr).torch_expression( + *constr_torch_args + ) + constraint_values.append(torch_expr) + return torch.cat([torch.atleast_1d(cv) for cv in constraint_values]) + + # Compute Jacobian using torch.autograd.functional.jacobian + if len(self.problem.constraints) > 0: + jacobian_tuple = torch.autograd.functional.jacobian(constraint_function, + tuple(torch_exprs)) + # Handle the case where jacobian_tuple is a tuple (multiple variables) + if isinstance(jacobian_tuple, tuple): + # Concatenate along the last dimension (variable dimension) + jacobian_matrix = torch.cat(jacobian_tuple, dim=-1) + else: + # Single variable case + jacobian_matrix = jacobian_tuple + return jacobian_matrix.detach().numpy() + + class Bounds(): + def __init__(self, problem): + self.problem = problem + self.main_var = problem.variables() + self.get_constraint_bounds() + self.get_variable_bounds() + + def get_constraint_bounds(self): + """Also normalizes the constraints and creates a new problem""" + lower = [] + upper = [] + new_constr = [] + + for constraint in self.problem.constraints: + if isinstance(constraint, Equality): + lower.extend([0.0] * constraint.size) + upper.extend([0.0] * constraint.size) + new_constr.append(lower_equality(constraint)) + elif isinstance(constraint, Inequality): + lower.extend([0.0] * constraint.size) + upper.extend([np.inf] * constraint.size) + new_constr.append(lower_ineq_to_nonneg(constraint)) + elif isinstance(constraint, NonPos): + lower.extend([0.0] * constraint.size) + upper.extend([np.inf] * constraint.size) + new_constr.append(nonpos2nonneg(constraint)) + + lowered_con_problem = self.problem.copy([self.problem.objective, new_constr]) + self.new_problem = lowered_con_problem + self.cl = np.array(lower) + self.cu = np.array(upper) + + def get_variable_bounds(self): + var_lower = [] + var_upper = [] + for var in self.main_var: + size = var.size + if var.bounds: + var_lower.extend(var.bounds[0]) + var_upper.extend(var.bounds[1]) + else: + # No bounds specified, use infinite bounds + var_lower.extend([-np.inf] * size) + var_upper.extend([np.inf] * size) + + self.lb = np.array(var_lower) + self.ub = np.array(var_upper) diff --git a/cvxpy/reductions/solvers/nlp_solvers/nlp_solver.py b/cvxpy/reductions/solvers/nlp_solvers/nlp_solver.py index e7814d0d87..d73456b666 100644 --- a/cvxpy/reductions/solvers/nlp_solvers/nlp_solver.py +++ b/cvxpy/reductions/solvers/nlp_solvers/nlp_solver.py @@ -14,7 +14,7 @@ limitations under the License. """ -from cvxpy.constraints import NonNeg, Zero +from cvxpy.reductions.inverse_data import InverseData from cvxpy.reductions.solvers.solver import Solver @@ -22,9 +22,6 @@ class NLPsolver(Solver): """ A non-linear programming (NLP) solver. """ - # Every QP solver supports Zero and NonNeg constraints. - SUPPORTED_CONSTRAINTS = [Zero, NonNeg] - # Some solvers cannot solve problems that do not have constraints. # For such solvers, REQUIRES_CONSTR should be set to True. REQUIRES_CONSTR = False @@ -45,4 +42,14 @@ def apply(self, problem): x^l <= x <= x^u where f and g are non-linear (and possibly non-convex) functions """ - pass + problem, data, inv_data = self._prepare_data_and_inv_data(problem) + + return data, inv_data + + def _prepare_data_and_inv_data(self, problem): + data = {} + inverse_data = InverseData(problem) + data["problem"] = problem + + inverse_data.offset = 0.0 + return problem, data, inverse_data diff --git a/cvxpy/reductions/solvers/solving_chain.py b/cvxpy/reductions/solvers/solving_chain.py index dd39650466..33a37984e7 100644 --- a/cvxpy/reductions/solvers/solving_chain.py +++ b/cvxpy/reductions/solvers/solving_chain.py @@ -36,12 +36,14 @@ Valinvec2mixedint, ) from cvxpy.reductions.eval_params import EvalParams +from cvxpy.reductions.expr2smooth.expr2smooth import Expr2smooth from cvxpy.reductions.flip_objective import FlipObjective from cvxpy.reductions.qp2quad_form import qp2symbolic_qp from cvxpy.reductions.qp2quad_form.qp_matrix_stuffing import QpMatrixStuffing from cvxpy.reductions.reduction import Reduction from cvxpy.reductions.solvers import defines as slv_def from cvxpy.reductions.solvers.constant_solver import ConstantSolver +from cvxpy.reductions.solvers.nlp_solvers.ipopt_nlpif import IPOPT as IPOPT_nlp from cvxpy.reductions.solvers.solver import Solver from cvxpy.settings import ( CLARABEL, @@ -101,8 +103,13 @@ def _solve_as_qp(problem, candidates): return candidates['qp_solvers'] and qp2symbolic_qp.accepts(problem) -def _reductions_for_problem_class(problem, candidates, gp: bool = False, solver_opts=None) \ - -> list[Reduction]: +def _reductions_for_problem_class( + problem, + candidates, + gp: bool = False, + solver_opts=None, + nlp: bool = False +) -> list[Reduction]: """ Builds a chain that rewrites a problem into an intermediate representation suitable for numeric reductions. @@ -135,6 +142,11 @@ def _reductions_for_problem_class(problem, candidates, gp: bool = False, solver_ reductions += [complex2real.Complex2Real()] if gp: reductions += [Dgp2Dcp()] + if nlp: + if type(problem.objective) == Maximize: + reductions += [FlipObjective()] + reductions += [Expr2smooth()] + return reductions if not gp and not problem.is_dcp(): append = build_non_disciplined_error_msg(problem, 'DCP') @@ -184,6 +196,7 @@ def construct_solving_chain(problem, candidates, canon_backend: str | None = None, solver_opts: dict | None = None, specified_solver: str | None = None, + nlp: bool = False, ) -> "SolvingChain": """Build a reduction chain from a problem to an installed solver. @@ -229,8 +242,11 @@ def construct_solving_chain(problem, candidates, """ if len(problem.variables()) == 0: return SolvingChain(reductions=[ConstantSolver()]) - reductions = _reductions_for_problem_class(problem, candidates, gp, solver_opts) + reductions = _reductions_for_problem_class(problem, candidates, gp, solver_opts, nlp) + if nlp: + reductions += [IPOPT_nlp()] + return SolvingChain(reductions=reductions) # Process DPP status of the problem. dpp_context = 'dcp' if not gp else 'dgp' if ignore_dpp or not problem.is_dpp(dpp_context): diff --git a/cvxpy/sandbox/direct_ipopt_call.py b/cvxpy/sandbox/direct_ipopt_call.py new file mode 100644 index 0000000000..a07c9fa30d --- /dev/null +++ b/cvxpy/sandbox/direct_ipopt_call.py @@ -0,0 +1,17 @@ +import cvxpy as cp + +# Define variables +x = cp.Variable(1) +y = cp.Variable(1) + +objective = cp.Minimize(cp.maximum(x, y)) + +constraints = [x - 14 == 0, y - 6 == 0] + +problem = cp.Problem(objective, constraints) +print(cp.installed_solvers()) +problem.solve(solver=cp.CLARABEL, verbose=True) +#problem.solve(solver=cp.IPOPT, nlp=True, verbose=True) +print(x.value, y.value) +print(problem.status) +print(problem.value) diff --git a/cvxpy/settings.py b/cvxpy/settings.py index 27d5fd40cf..1c7c658bfe 100644 --- a/cvxpy/settings.py +++ b/cvxpy/settings.py @@ -96,11 +96,12 @@ DAQP = "DAQP" HIGHS = "HIGHS" MPAX = "MPAX" +IPOPT = "IPOPT" SOLVERS = [CLARABEL, ECOS, CVXOPT, GLOP, GLPK, GLPK_MI, SCS, SDPA, GUROBI, OSQP, CPLEX, MOSEK, CBC, COPT, XPRESS, PIQP, PROXQP, QOCO, NAG, PDLP, SCIP, SCIPY, DAQP, HIGHS, MPAX, - CUCLARABEL] + CUCLARABEL, IPOPT] # Xpress-specific items XPRESS_IIS = "XPRESS_IIS" diff --git a/cvxpy/utilities/citations.py b/cvxpy/utilities/citations.py index 680e04c33d..7ac0726159 100644 --- a/cvxpy/utilities/citations.py +++ b/cvxpy/utilities/citations.py @@ -517,3 +517,14 @@ year={2024} } """ + +CITATION_DICT["IPOPT"] = \ +""" +@article{wachter2006implementation, + title={On the implementation of a primal-dual interior point filter line search algorithm for + large-scale nonlinear programming}, + author={W{\"a}chter, Andreas and Biegler, Lorenz T}, + journal={Mathematical Programming}, + volume={106}, + } +"""