Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions cvxpy/atoms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@
from cvxpy.atoms.elementwise.sqrt import sqrt
from cvxpy.atoms.elementwise.square import square
from cvxpy.atoms.elementwise.xexp import xexp
from cvxpy.atoms.elementwise.trig import sin, cos, tan
from cvxpy.atoms.eye_minus_inv import eye_minus_inv, resolvent
from cvxpy.atoms.gen_lambda_max import gen_lambda_max
from cvxpy.atoms.geo_mean import geo_mean
Expand Down
202 changes: 202 additions & 0 deletions cvxpy/atoms/elementwise/trig.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
"""
Copyright 2025 CVXPY Developers

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List, Tuple

import numpy as np

from cvxpy.atoms.elementwise.elementwise import Elementwise
from cvxpy.constraints.constraint import Constraint


class sin(Elementwise):
"""Elementwise :math:`\\sin x`.
"""

def __init__(self, x) -> None:
super(sin, self).__init__(x)

@Elementwise.numpy_numeric
def numeric(self, values):
"""Returns the elementwise sine of x.
"""
return np.sin(values[0])

def sign_from_args(self) -> Tuple[bool, bool]:
"""Returns sign (is positive, is negative) of the expression.
"""
# Always unknown.
return (False, False)

def is_atom_convex(self) -> bool:
"""Is the atom convex?
"""
return False

def is_atom_concave(self) -> bool:
"""Is the atom concave?
"""
return True

def is_atom_log_log_convex(self) -> bool:
"""Is the atom log-log convex?
"""
return False

def is_atom_log_log_concave(self) -> bool:
"""Is the atom log-log concave?
"""
return True

def is_incr(self, idx) -> bool:
"""Is the composition non-decreasing in argument idx?
"""
return True

def is_decr(self, idx) -> bool:
"""Is the composition non-increasing in argument idx?
"""
return False

def _domain(self) -> List[Constraint]:
"""Returns constraints describing the domain of the node.
"""
return []

def _grad(self) -> List[Constraint]:
"""Returns the gradient of the node.
"""
return []


class cos(Elementwise):
"""Elementwise :math:`\\cos x`.
"""

def __init__(self, x) -> None:
super(cos, self).__init__(x)

@Elementwise.numpy_numeric
def numeric(self, values):
"""Returns the elementwise cosine of x.
"""
return np.cos(values[0])

def sign_from_args(self) -> Tuple[bool, bool]:
"""Returns sign (is positive, is negative) of the expression.
"""
# Always unknown.
return (False, False)

def is_atom_convex(self) -> bool:
"""Is the atom convex?
"""
return False

def is_atom_concave(self) -> bool:
"""Is the atom concave?
"""
return True

def is_atom_log_log_convex(self) -> bool:
"""Is the atom log-log convex?
"""
return False

def is_atom_log_log_concave(self) -> bool:
"""Is the atom log-log concave?
"""
return True

def is_incr(self, idx) -> bool:
"""Is the composition non-decreasing in argument idx?
"""
return True

def is_decr(self, idx) -> bool:
"""Is the composition non-increasing in argument idx?
"""
return False

def _domain(self) -> List[Constraint]:
"""Returns constraints describing the domain of the node.
"""
return []

def _grad(self) -> List[Constraint]:
"""Returns the gradient of the node.
"""
return []


class tan(Elementwise):
"""Elementwise :math:`\\tan x`.
"""

def __init__(self, x) -> None:
super(tan, self).__init__(x)

@Elementwise.numpy_numeric
def numeric(self, values):
"""Returns the elementwise tangent of x.
"""
return np.tan(values[0])

def sign_from_args(self) -> Tuple[bool, bool]:
"""Returns sign (is positive, is negative) of the expression.
"""
# Always unknown.
return (False, False)

def is_atom_convex(self) -> bool:
"""Is the atom convex?
"""
return False

def is_atom_concave(self) -> bool:
"""Is the atom concave?
"""
return True

def is_atom_log_log_convex(self) -> bool:
"""Is the atom log-log convex?
"""
return False

def is_atom_log_log_concave(self) -> bool:
"""Is the atom log-log concave?
"""
return True

def is_incr(self, idx) -> bool:
"""Is the composition non-decreasing in argument idx?
"""
return True

def is_decr(self, idx) -> bool:
"""Is the composition non-increasing in argument idx?
"""
return False

def _domain(self) -> List[Constraint]:
"""Returns constraints describing the domain of the node.
"""
return []

def _grad(self) -> List[Constraint]:
"""Returns the gradient of the node.
"""
return []

7 changes: 5 additions & 2 deletions cvxpy/reductions/expr2smooth/canonicalizers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,11 @@
limitations under the License.
"""
from cvxpy.atoms import maximum
from cvxpy.atoms.elementwise.minimum import minimum
from cvxpy.atoms.elementwise.power import power
from cvxpy.atoms.pnorm import Pnorm
from cvxpy.atoms.elementwise.abs import abs
from cvxpy.reductions.expr2smooth.canonicalizers.minimum_canon import minimum_canon
from cvxpy.reductions.expr2smooth.canonicalizers.abs_canon import abs_canon
from cvxpy.reductions.expr2smooth.canonicalizers.pnorm_canon import pnorm_canon
from cvxpy.reductions.expr2smooth.canonicalizers.power_canon import power_canon
Expand All @@ -25,8 +27,9 @@
CANON_METHODS = {
abs: abs_canon,
maximum : maximum_canon,
minimum: minimum_canon,
# log: log_canon,
power: power_canon,
Pnorm : pnorm_canon,
#power: power_canon,
#Pnorm : pnorm_canon,
# inv: inv_pos_canon,
}
27 changes: 27 additions & 0 deletions cvxpy/reductions/expr2smooth/canonicalizers/minimum_canon.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
"""
Copyright 2025 CVXPY developers

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

from cvxpy.atoms.elementwise.maximum import maximum
from cvxpy.reductions.expr2smooth.canonicalizers.maximum_canon import (
maximum_canon,
)


def minimum_canon(expr, args):
del expr
temp = maximum(*[-arg for arg in args])
canon, constr = maximum_canon(temp, temp.args)
return -canon, constr
6 changes: 6 additions & 0 deletions cvxpy/reductions/expr2smooth/canonicalizers/power_canon.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
def power_canon(expr, args):
x = args[0]
p = expr.p_rational
w = expr.w

Check failure on line 26 in cvxpy/reductions/expr2smooth/canonicalizers/power_canon.py

View workflow job for this annotation

GitHub Actions / actions-linting / linters

Ruff (F841)

cvxpy/reductions/expr2smooth/canonicalizers/power_canon.py:26:5: F841 Local variable `w` is assigned to but never used

if p == 1:
return x, []
Expand All @@ -36,3 +36,9 @@
t = Variable(shape)
if 0 < p < 1:
return t, [t**(1/p) == x, t >= 0]
elif p > 1:
return x**p, []
else: # p < 0
raise ValueError(
"Power canonicalization does not support negative powers."
)
31 changes: 20 additions & 11 deletions cvxpy/reductions/solvers/nlp_solvers/ipopt_nlpif.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,27 +14,27 @@
limitations under the License.
"""

import numpy as np
import torch

import cvxpy.settings as s
from cvxpy.constraints import (
Equality,
Inequality,
NonPos,
)
from cvxpy.reductions.solution import Solution, failure_solution
from cvxpy.reductions.solvers.nlp_solvers.nlp_solver import NLPsolver
from cvxpy.reductions.utilities import (
lower_equality,
lower_ineq_to_nonneg,
nonpos2nonneg,
)
from cvxpy.utilities.citations import CITATION_DICT
from cvxtorch import TorchExpression


class IPOPT(NLPsolver):

Check failure on line 37 in cvxpy/reductions/solvers/nlp_solvers/ipopt_nlpif.py

View workflow job for this annotation

GitHub Actions / actions-linting / linters

Ruff (I001)

cvxpy/reductions/solvers/nlp_solvers/ipopt_nlpif.py:17:1: I001 Import block is un-sorted or un-formatted
"""
NLP interface for the IPOPT solver
"""
Expand Down Expand Up @@ -120,8 +120,14 @@
"""
import cyipopt
bounds = self.Bounds(data["problem"])
x0 = [12, 5, 0]

initial_values = []
for var in bounds.main_var:
if var.value is not None:
initial_values.append(var.value.flatten(order='F'))
else:
# If no initial value, use zero
initial_values.append(np.zeros(var.size))
x0 = np.concatenate(initial_values, axis=0)
nlp = cyipopt.Problem(
n=len(x0),
m=len(bounds.cl),
Expand All @@ -134,7 +140,7 @@
nlp.add_option('mu_strategy', 'adaptive')
nlp.add_option('tol', 1e-7)
nlp.add_option('hessian_approximation', "limited-memory")
x, info = nlp.solve(x0)
_, info = nlp.solve(x0)
return info

def cite(self, data):
Expand All @@ -160,7 +166,7 @@
offset = 0
for var in self.main_var:
size = var.size
var.value = x[offset:offset+size]
var.value = x[offset:offset+size].reshape(var.shape, order='F')
offset += size
# Evaluate the objective
obj_value = self.problem.objective.args[0].value
Expand All @@ -173,7 +179,7 @@
torch_exprs = []
for var in self.main_var:
size = var.size
slice = x[offset:offset+size]
slice = x[offset:offset+size].reshape(var.shape, order='F')
torch_exprs.append(torch.from_numpy(slice.astype(np.float64)).requires_grad_(True))
offset += size

Expand All @@ -196,7 +202,7 @@
offset = 0
for var in self.main_var:
size = var.size
var.value = x[offset:offset+size]
var.value = x[offset:offset+size].reshape(var.shape, order='F')
offset += size

# Evaluate all constraints
Expand All @@ -214,7 +220,7 @@

for var in self.main_var:
size = var.size
slice = x[offset:offset+size]
slice = x[offset:offset+size].reshape(var.shape, order='F')
torch_tensor = torch.from_numpy(slice.astype(np.float64)).requires_grad_(True)
torch_vars_dict[var.id] = torch_tensor # Map CVXPY variable ID to torch tensor
torch_exprs.append(torch_tensor)
Expand Down Expand Up @@ -245,7 +251,7 @@
*constr_torch_args
)
constraint_values.append(torch_expr)
return torch.cat([torch.atleast_1d(cv) for cv in constraint_values])
return torch.cat([cv.flatten() for cv in constraint_values])

# Compute Jacobian using torch.autograd.functional.jacobian
if len(self.problem.constraints) > 0:
Expand All @@ -254,7 +260,10 @@
# Handle the case where jacobian_tuple is a tuple (multiple variables)
if isinstance(jacobian_tuple, tuple):
# Concatenate along the last dimension (variable dimension)
jacobian_matrix = torch.cat(jacobian_tuple, dim=-1)
jacobian_matrix = torch.cat(
[jac.reshape(jac.size(0), -1) for jac in jacobian_tuple],
dim=1
)
else:
# Single variable case
jacobian_matrix = jacobian_tuple
Expand Down Expand Up @@ -298,8 +307,8 @@
for var in self.main_var:
size = var.size
if var.bounds:
var_lower.extend(var.bounds[0])
var_upper.extend(var.bounds[1])
var_lower.extend(var.bounds[0].flatten(order='F'))
var_upper.extend(var.bounds[1].flatten(order='F'))
else:
# No bounds specified, use infinite bounds
var_lower.extend([-np.inf] * size)
Expand Down
Loading
Loading