-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfunctions.py
More file actions
121 lines (94 loc) · 4.46 KB
/
functions.py
File metadata and controls
121 lines (94 loc) · 4.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import scipy.linalg as LA
import numpy as np
from numpy.random import seed
seed(323)
# def opt_LQR(A,B,Q,R,s):
# R[1,1] = s
# R[0,0] = 1.5-s
# X = LA.solve_discrete_are(A,B,R,Q)
# K = -np.matmul(np.matmul(np.matmul(LA.inv(Q+np.matmul(B.T,np.matmul(X,B))),B.T), X),A)
# return K
#Done
def inf_cost(A,B,C,Q,Ru,Rv,K,L):
d,p = B.shape
a,b = C.shape
K = K.reshape((p,d))
L = L.reshape((b,a))
R = np.linalg.diag((Ru,Rv))
B_t = np.hstack((B,C))
cl_map = A + np.matmul(B,K) + np.matmul(C,L)
if np.amax(np.abs(LA.eigvals(cl_map))) < (1.0 - 1.0e-6):
cost = np.trace(LA.solve_discrete_are(A,B_t,Q,R))
else:
#cost = float("inf")
cost = -20
return cost
def get_g(batch_size,A,B,C,Q,Ru,Rv,K,L,T,baseline = 0):
# mini_batch is a single gradient(log sum derivative of pi), avg of this is ordinary gradient
# but here it is equivalent to g.
sigma_K = 5e-1
sigma_L = 5e-1
sigma_x = 1e-4
nx, nu = B.shape
_, nw = C.shape
K = K.reshape((nu,nx))
L = L.reshape((nw,nx))
Q = np.kron(np.eye(T,dtype=int), Q)
Rv = np.kron(np.eye(T,dtype=int), Rv)
Ru = np.kron(np.eye(T, dtype=int), Ru)
X = np.zeros((nx*(T+1),batch_size))
X[0:nx,:] = 0.2*np.random.normal(size=(nx,batch_size))
U = np.zeros((nu*T,batch_size))
W = np.zeros((nw*T,batch_size))
Vu = sigma_K*np.random.randn(nu*T,batch_size) # noise for U
Vw = sigma_L*np.random.randn(nw*T,batch_size) # noise for W
for t in range(T):
U[t*nu:(t+1)*nu,:] = np.matmul(K,X[nx*t:nx*(t+1),:]) + Vu[t*nu:(t+1)*nu,:]
W[t*nw:(t + 1) * nw, :] = np.matmul(L, X[nx * t:nx * (t + 1), :]) + Vw[t * nw:(t + 1) * nw, :]
X[nx*(t+1):nx*(t+2),:] = np.matmul(A,X[nx*t:nx*(t+1),:]) + np.matmul(B,U[t*nu:(t+1)*nu,:]).reshape((nx,batch_size)) +\
+ np.matmul(C,W[t*nw:(t+1)*nw,:]).reshape((nx,batch_size)) + sigma_x*np.random.randn(nx,batch_size)
X_cost = X[nx:,:]
reward = np.diagonal(np.matmul(X_cost.T,Q.dot(X_cost))) + np.diagonal(np.matmul(U.T,Ru.dot(U))) - np.diagonal(np.matmul(W.T,Rv.dot(W)))
new_baseline = np.mean(reward)
reward = reward.reshape((len(reward),1))
#DK portion
X_hat = X[:-nx,:] #taking only T = 0:T-1 for X for log gradient computation
outer_grad_log_K = np.einsum("ik, jk -> ijk",Vu,X_hat) # shape (a,b,c) means there are a of the (b,c) blocks. access (b,c) blocks via C[0,:,:]
outer_grad_log_L = np.einsum("ik, jk -> ijk", Vw, X_hat)
sum_grad_log_K =0
sum_grad_log_L = 0
for t in range(T):
sum_grad_log_K += outer_grad_log_K[nu * t:nu * (t + 1), nx * t:nx * (t + 1),:] # Summing all diagonal blocks. gives p by d by batch_size
sum_grad_log_L += outer_grad_log_L[nw * t:nw * (t + 1), nx * t:nx * (t + 1), :]
mini_batch_K = (1/sigma_K)**2 * ((reward-new_baseline).T*sum_grad_log_K) #mini_batch is p by d, same size as K
mini_batch_L = (1 /sigma_L) ** 2 * ((reward - new_baseline).T * sum_grad_log_L) # mini_batch is b by a/d, same size as K
# mini_batch_K = 2 * ((reward-new_baseline).T*sum_grad_log_K) #mini_batch is p by d, same size as K
# mini_batch_L = 2 * ((reward - new_baseline).T * sum_grad_log_L) # mini_batch is b by a/d, same size as K
# print(mini_batch_K[0,0,:])
temp = np.einsum('mnr,ndr->mdr', sum_grad_log_K.swapaxes(0,1),sum_grad_log_L)
batch_mixed_KL = (1/(sigma_K*sigma_L))**2 * ((reward-new_baseline).T*temp)
# print('---new---',sum_grad_log_K[:,:,10][0,0])
return np.mean(mini_batch_K,axis = 2),np.mean(mini_batch_L,axis = 2),np.mean(batch_mixed_KL,axis = 2),new_baseline
def gradient( num_sample,batch_size,A,B,C,Q,Ru,Rv,K,L,T,baseline = 0):
nu,nx = K.shape
nw,nx = L.shape
DK_samples = np.zeros(shape = (nx,num_sample))
DL_samples = np.zeros(shape=(nx, num_sample))
Dxy_all = np.zeros(shape = (num_sample,nx,nx))
for i in range(num_sample):
g,f,mixed,baseline= get_g(batch_size,A,B,C,Q,Ru,Rv,K,L,T,baseline)
DK_samples[:, i] = g.flatten()
DL_samples[:, i] = f.flatten()
Dxy_all[i,:,:] = mixed
return np.mean(DK_samples,axis = 1).reshape((nu,nx)),\
np.mean(DL_samples,axis = 1).reshape((nw,nx)),\
np.mean(Dxy_all, axis = 0)
def proj(L,temp,lower = 0.5):
s,v = np.linalg.eig(L.T@L)
s = np.minimum(np.maximum(s,lower),temp)
return (v@np.diag(s))@ v.T
def proj_sgd(L,temp):
temp = np.sqrt(temp)
s = np.linalg.norm(L,2)
s = np.minimum(s,temp)
return L/np.linalg.norm(L,2) * s