Skip to content

Commit 573248b

Browse files
fixing ruff warnings (#35)
* raising error for unimplemented smooth canons * fixed ruff warnings * Revert "raising error for unimplemented smooth canons" This reverts commit f17e5dd. --------- Co-authored-by: William Zijie Zhang <[email protected]> Co-authored-by: Daniel <[email protected]> Co-authored-by: Daniel Cederberg <[email protected]>
1 parent 9d21c4c commit 573248b

30 files changed

+8710
-226
lines changed

cvxpy/reductions/expr2smooth/canonicalizers/abs_canon.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,9 @@
1616

1717
import numpy as np
1818

19-
20-
from cvxpy.expressions.variable import Variable
21-
from cvxpy.atoms.elementwise.power import power
22-
2319
# TODO (DCED): ask William if this the multiplication we want to use
2420
from cvxpy.atoms.affine.binary_operators import multiply
25-
from cvxpy.reductions.expr2smooth.canonicalizers.power_canon import power_canon
21+
from cvxpy.expressions.variable import Variable
2622

2723
#def abs_canon(expr, args):
2824
# shape = expr.shape
@@ -41,13 +37,13 @@ def abs_canon(expr, args):
4137
t1 = Variable(shape, bounds = [0, None])
4238
y = Variable(shape, bounds = [-1.01, 1.01])
4339
if args[0].value is not None:
44-
#t1.value = np.sqrt(expr.value**2)
4540
t1.value = np.abs(args[0].value)
4641
y.value = np.sign(args[0].value)
4742

4843
t1.value = np.ones(shape)
4944
y.value = np.zeros(shape)
5045

51-
# TODO (DCED): check how multiply is canonicalized. We don't want to introduce a new variable for
52-
# y inside multiply. But args[0] should potentially be canonicalized further?
46+
# TODO (DCED): check how multiply is canonicalized. We don't want to introduce a
47+
# new variable for y inside multiply. But args[0] should potentially be canonicalized
48+
# further?
5349
return t1, [y ** 2 == np.ones(shape), t1 == multiply(y, args[0])]

cvxpy/reductions/expr2smooth/canonicalizers/entr_canon.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,11 @@
1414
limitations under the License.
1515
"""
1616

17-
from cvxpy.expressions.variable import Variable
1817
import numpy as np
1918

19+
from cvxpy.expressions.variable import Variable
20+
21+
2022
def entr_canon(expr, args):
2123
t = Variable(args[0].shape, bounds=[0, None])
2224
if args[0].value is not None and np.all(args[0].value >= 1):

cvxpy/reductions/expr2smooth/canonicalizers/kl_div_canon.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,11 @@
1414
limitations under the License.
1515
"""
1616

17-
from cvxpy.expressions.variable import Variable
1817
import numpy as np
1918

19+
from cvxpy.expressions.variable import Variable
20+
21+
2022
def kl_div_canon(expr, args):
2123
constraints = []
2224

cvxpy/reductions/expr2smooth/canonicalizers/log_canon.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,11 @@
1414
limitations under the License.
1515
"""
1616

17-
from cvxpy.expressions.variable import Variable
18-
from cvxpy.expressions.constants import Constant
1917
import numpy as np
20-
from cvxpy.atoms.elementwise.exp import exp
18+
19+
from cvxpy.expressions.constants import Constant
20+
from cvxpy.expressions.variable import Variable
21+
2122

2223
def collect_constant_and_variable(expr, constants, variable):
2324
if isinstance(expr, Constant):

cvxpy/reductions/expr2smooth/canonicalizers/quad_over_lin.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
limitations under the License.
1515
"""
1616

17-
from cvxpy.expressions.variable import Variable
1817

1918
def quad_over_lin_canon(expr, args):
2019
assert(False)

cvxpy/reductions/expr2smooth/canonicalizers/rel_entr_canon.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,11 @@
1414
limitations under the License.
1515
"""
1616

17-
from cvxpy.expressions.variable import Variable
1817
import numpy as np
1918

19+
from cvxpy.expressions.variable import Variable
20+
21+
2022
def rel_entr_canon(expr, args):
2123
constraints = []
2224

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
import pdb
2+
from math import pi
3+
4+
import numpy as np
5+
import numpy.linalg as LA
6+
7+
import cvxpy as cp
8+
from cvxpy import log, square
9+
10+
np.random.seed(1234)
11+
n = 2
12+
data = 10 * np.random.randn(n)
13+
sigma_opt = (1 / np.sqrt(n)) * LA.norm(data - np.mean(data))
14+
mu_opt = np.mean(data)
15+
mu = cp.Variable((1, ), name="mu")
16+
TO_RUN = 2
17+
18+
# for n = 200, the first one doesn't work if we use cp.sum(cp.square(data-mu)) but it works
19+
# with sum_of_squares
20+
21+
# how is the prod canoncalized? maybe that's the issue. Hmm, or the chain rule! Start in the
22+
# opt solution and see what happens
23+
24+
if TO_RUN == 1:
25+
# here we wont induce that sigma is nonnegative so it can be useful to mention it
26+
sigma = cp.Variable((1, ), nonneg=True, name="sigma")
27+
obj = (n / 2) * log(2*pi*square(sigma)) + (1 / (2 * square(sigma))) * cp.sum(cp.square(data-mu))
28+
constraints = []
29+
elif TO_RUN == 2:
30+
# here we will induce that sigma2 is nonnegative so no need to mention it
31+
sigma2 = cp.Variable((1, ), name="sigma2")
32+
obj = (n / 2) * log(2*pi*sigma2) + (1 / (2 * sigma2)) * cp.sum(cp.square(data-mu))
33+
constraints = []
34+
sigma = cp.sqrt(sigma2)
35+
elif TO_RUN == 3:
36+
sigma = cp.Variable((1, ))
37+
#sigma.value = np.array([1 * sigma_opt])
38+
#mu.value = np.array([1 * mu_opt])
39+
#t = cp.Variable((n, ))
40+
#v = cp.Variable((1, ), bounds=[0, None])
41+
obj = n * log(np.sqrt(2*pi)*sigma) + (1 / (2 * square(sigma))) * cp.sum(cp.square(data-mu))
42+
#obj = n * log(np.sqrt(2*pi)*sigma) + (1 / (2 * square(sigma))) * cp.sum_squares(data-mu)
43+
constraints = []
44+
45+
problem = cp.Problem(cp.Minimize(obj), constraints)
46+
problem.solve(solver=cp.IPOPT, nlp=True)
47+
48+
print("mu difference: ", mu.value - np.mean(data))
49+
print("sigma difference: ", sigma.value - sigma_opt)
50+
pdb.set_trace()
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
from math import pi
2+
3+
import numpy as np
4+
import numpy.linalg as LA
5+
6+
import cvxpy as cp
7+
from cvxpy import log, square
8+
9+
np.random.seed(1234)
10+
TOL = 1e-3
11+
METHODS = [1, 2, 3, 4, 5]
12+
all_n = np.arange(2, 100, 5)
13+
scaling_factors = [1]
14+
mu = cp.Variable((1, ), name="mu")
15+
16+
for n in all_n:
17+
for factor in scaling_factors:
18+
data = factor*np.random.randn(n)
19+
sigma_opt = (1 / np.sqrt(n)) * LA.norm(data - np.mean(data))
20+
mu_opt = np.mean(data)
21+
for method in METHODS:
22+
mu.value = None
23+
print("Method, n, scale factor: ", method, n, factor)
24+
if method == 1:
25+
# here we wont deduce that sigma is nonnegative so it can be useful to mention it
26+
sigma = cp.Variable((1, ), nonneg=True)
27+
obj = (n / 2) * log(2*pi*square(sigma)) + \
28+
(1 / (2 * square(sigma))) * cp.sum(cp.square(data-mu))
29+
constraints = []
30+
elif method == 2:
31+
# here we will deduce that sigma2 is nonnegative so no need to mention it
32+
sigma2 = cp.Variable((1, ), name="Sigma2")
33+
obj = (n / 2) * log( 2 * pi * sigma2) + \
34+
(1 / (2 * sigma2)) * cp.sum(cp.square(data-mu))
35+
constraints = []
36+
sigma = cp.sqrt(sigma2)
37+
elif method == 3:
38+
# here we will deduce that sigma is nonnegative so no need to mention it
39+
sigma = cp.Variable((1, ), name="Sigma")
40+
obj = n * log(np.sqrt(2*pi)*sigma) + \
41+
(1 / (2 * square(sigma))) * cp.sum(cp.square(data-mu))
42+
constraints = []
43+
elif method == 4:
44+
# here we will deduce that sigma is nonnegative so no need to mention it
45+
sigma2 = cp.Variable((1, ), name="Sigma2")
46+
obj = (n / 2) * log(sigma2 * 2 * pi * -1 * -1) + \
47+
(1 / (2 * sigma2)) * cp.sum(cp.square(data-mu))
48+
constraints = []
49+
sigma = cp.sqrt(sigma2)
50+
elif method == 5:
51+
# here we will deduce that sigma is nonnegative so no need to mention it
52+
sigma = cp.Variable((1, ), name="Sigma")
53+
obj = n * log(np.sqrt(2*pi)*sigma * -1 * -1 * 2 * 0.5) + \
54+
(1 / (2 * square(sigma))) * cp.sum(cp.square(data-mu))
55+
constraints = []
56+
57+
problem = cp.Problem(cp.Minimize(obj), constraints)
58+
problem.solve(solver=cp.IPOPT, nlp=True)
59+
print("sigma.value: ", sigma.value)
60+
print("sigma_opt: ", sigma_opt)
61+
assert(np.abs(sigma.value - sigma_opt) / np.max([1, np.abs(sigma_opt)]) <= TOL)
62+
assert(np.abs(mu.value - mu_opt) / np.max([1, np.abs(mu_opt)]) <= TOL)
63+
64+
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
from math import pi
2+
3+
import numpy as np
4+
import numpy.linalg as LA
5+
6+
import cvxpy as cp
7+
from cvxpy import log, square
8+
9+
np.random.seed(1234)
10+
n = 10
11+
data = np.random.randn(n)
12+
sigma_opt = (1 / np.sqrt(n)) * LA.norm(data)
13+
res = LA.norm(data) ** 2
14+
15+
TO_RUN = 2
16+
17+
if TO_RUN == 1:
18+
sigma = cp.Variable((1, ))
19+
obj = (n / 2) * log(2*pi*square(sigma)) + (1 / (2 * square(sigma))) * res
20+
constraints = []
21+
elif TO_RUN == 2:
22+
sigma2 = cp.Variable((1, ))
23+
obj = (n / 2) * log(2*pi*sigma2) + (1 / (2 * sigma2)) * res
24+
constraints = []
25+
sigma = cp.sqrt(sigma2)
26+
elif TO_RUN == 3:
27+
sigma = cp.Variable((1, ))
28+
obj = n * log(np.sqrt(2*pi)*sigma) + (1 / (2 * square(sigma))) * res
29+
constraints = []
30+
31+
problem = cp.Problem(cp.Minimize(obj), constraints)
32+
problem.solve(solver=cp.IPOPT, nlp=True)
33+
print("difference sigma:", sigma.value - sigma_opt)
34+
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
from math import pi
2+
3+
import numpy as np
4+
import numpy.linalg as LA
5+
6+
import cvxpy as cp
7+
from cvxpy import log, square
8+
9+
np.random.seed(1234)
10+
TOL = 1e-3
11+
METHODS = [1, 2, 3, 4, 5]
12+
all_n = np.arange(2, 100, 5)
13+
# if we use dual LS to initialize multipliers we pass this for scaling factors 0.1, 1, 10.
14+
scaling_factors = [0.1, 1e0, 10]
15+
# with the exp formulation it works for scaling_factor = 1e-2 as well
16+
#scaling_factors = [0.1, 1e0, 10]
17+
#scaling_factors = [1e-2]
18+
19+
for n in all_n:
20+
for factor in scaling_factors:
21+
data = factor*np.random.randn(n)
22+
sigma_opt = (1 / np.sqrt(n)) * LA.norm(data)
23+
res = LA.norm(data) ** 2
24+
for method in METHODS:
25+
print("Method, n, scale factor: ", method, n, factor)
26+
if method == 1:
27+
sigma = cp.Variable((1, ))
28+
obj = (n / 2) * log(2*pi*square(sigma)) + (1 / (2 * square(sigma))) * res
29+
constraints = []
30+
elif method == 2:
31+
sigma2 = cp.Variable((1, ))
32+
obj = (n / 2) * log( 2 * pi * sigma2) + (1 / (2 * sigma2)) * res
33+
constraints = []
34+
sigma = cp.sqrt(sigma2)
35+
elif method == 3:
36+
sigma = cp.Variable((1, ))
37+
obj = n * log(np.sqrt(2*pi)*sigma) + (1 / (2 * square(sigma))) * res
38+
constraints = []
39+
elif method == 4:
40+
sigma2 = cp.Variable((1, ))
41+
obj = (n / 2) * log(sigma2 * 2 * pi * -1 * -1) + (1 / (2 * sigma2)) * res
42+
constraints = []
43+
sigma = cp.sqrt(sigma2)
44+
elif method == 5:
45+
sigma = cp.Variable((1, ))
46+
obj = n * log(np.sqrt(2*pi)*sigma * -1 * -1 * 2 * 0.5) + \
47+
(1 / (2 * square(sigma))) * res
48+
constraints = []
49+
50+
problem = cp.Problem(cp.Minimize(obj), constraints)
51+
problem.solve(solver=cp.IPOPT, nlp=True)
52+
print("sigma.value: ", sigma.value)
53+
print("sigma_opt: ", sigma_opt)
54+
assert(np.abs(sigma.value - sigma_opt) / np.max([1, np.abs(sigma_opt)]) <= TOL)
55+
56+

0 commit comments

Comments
 (0)