Skip to content

Commit c982499

Browse files
committed
Improved PID tuning functionality.
Added L2 reg.
1 parent 4a4829b commit c982499

File tree

1 file changed

+16
-9
lines changed

1 file changed

+16
-9
lines changed

control/system.py

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -393,7 +393,7 @@ def __init__(self, K_p, K_i, K_d, tf):
393393

394394
except ValueError:
395395
pass
396-
396+
397397
reduced_tf_num = np.polymul(np.array(tf_num), np.array(pid_num))
398398
reduced_tf_den = np.polymul(np.array(tf_den), np.array(pid_den))
399399
self.reduced_tf = TransferFunction(reduced_tf_num, reduced_tf_den)
@@ -438,7 +438,7 @@ def response(self, input_type, time_period=10, sample_time=0.05, ret=False, show
438438
except ValueError:
439439
print("Improper transfer function. `num` is longer than `den`.")
440440

441-
def tune(self, input_type="step", set_point=1, num_itr=70, rate=0.00000000001):
441+
def tune(self, input_type="step", set_point=1, num_itr=70, rate=0.00000000001, lambd=0.7):
442442
'''
443443
Parameters
444444
----------
@@ -449,14 +449,17 @@ def tune(self, input_type="step", set_point=1, num_itr=70, rate=0.00000000001):
449449
num_itr : number of iterations, optional
450450
DESCRIPTION. The default is 70. Might have to adjust this to prevent the cost from increasing after decreasing.
451451
rate : learning rate, optional
452-
DESCRIPTION. The default is 0.00000000001. Suggested not to increase it.
452+
DESCRIPTION. The default is 0.00000000001.
453+
lambd : regularization coefficient, optional
454+
DESCRIPTION. The default is 0.7
453455
454456
Returns
455457
-------
456458
k : numpy array
457459
DESCRIPTION. numpy array of Kp, Ki, Kd values
458460
459461
'''
462+
np.random.seed(1)
460463
k = np.random.random(3).reshape(3,1)
461464

462465
def red_tf():
@@ -508,12 +511,11 @@ def red_tf():
508511
y = np.zeros(m) + set_point
509512

510513
loss = (1/2)*((y_hat - y)**2)
511-
cost = float((1/m)*np.sum(loss))
512-
514+
cost = abs(-float((1/m)*np.sum(loss)) + (lambd/(2*m))*np.sum(k**2))
513515

514-
grad_kp = (y_hat + y)/(s*(polyval(self.tf.num_coef, s)/polyval(self.tf.den_coef, s)))
515-
grad_ki = (y_hat + y)/(polyval(self.tf.num_coef, s)/polyval(self.tf.den_coef, s))
516-
grad_kd = (y_hat + y)/((s**2)*(polyval(self.tf.num_coef, s)/polyval(self.tf.den_coef, s)))
516+
grad_kp = (y_hat + y)/(s*(polyval(self.tf.num_coef, s)/polyval(self.tf.den_coef, s))) + (lambd/(2*m))*2*k[0]
517+
grad_ki = (y_hat + y)/(polyval(self.tf.num_coef, s)/polyval(self.tf.den_coef, s)) + (lambd/(2*m))*2*k[1]
518+
grad_kd = (y_hat + y)/((s**2)*(polyval(self.tf.num_coef, s)/polyval(self.tf.den_coef, s))) + (lambd/(2*m))*2*k[2]
517519

518520
grads = np.array([grad_kp, grad_ki, grad_kd])
519521

@@ -524,10 +526,15 @@ def red_tf():
524526
print(f"cost {n}: {np.squeeze(cost)}")
525527
if n%20 == 0:
526528
costs.append(cost)
529+
530+
k = k[:,-1]
531+
self.K_p = k[0]
532+
self.K_i = k[1]
533+
self.K_d = k[2]
527534

528535
plt.plot(costs)
529536
plt.show()
530-
print(s)
537+
531538
return k
532539

533540
class reduce():

0 commit comments

Comments
 (0)