Skip to content

Commit d525d68

Browse files
authored
Merge pull request #15 from matousc89/LIncosh
Added Llncosh filter
2 parents 6a82d75 + 81851f0 commit d525d68

File tree

3 files changed

+185
-5
lines changed

3 files changed

+185
-5
lines changed

padasip/filters/__init__.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
"""
22
.. versionadded:: 0.1
3-
.. versionchanged:: 0.7
3+
.. versionchanged:: 1.2.0
44
55
66
An adaptive filter is a system that changes its adaptive parameteres
@@ -166,6 +166,7 @@
166166
"""
167167
from padasip.filters.ap import FilterAP
168168
from padasip.filters.gngd import FilterGNGD
169+
from padasip.filters.llncosh import FilterLlncosh
169170
from padasip.filters.lmf import FilterLMF
170171
from padasip.filters.lms import FilterLMS
171172
from padasip.filters.nlmf import FilterNLMF
@@ -175,7 +176,6 @@
175176
from padasip.filters.rls import FilterRLS
176177
from padasip.filters.sslms import FilterSSLMS
177178

178-
179179
def filter_data(d, x, model="lms", **kwargs):
180180
"""
181181
Function that filter data with selected adaptive filter.
@@ -252,11 +252,10 @@ def get_filter(name):
252252
except:
253253
raise ValueError('Unknown model of filter {}, options are {}'.format(name, list(FILTERS.keys())))
254254

255-
256-
257255
FILTER_CLASSES = [
258256
FilterAP,
259257
FilterGNGD,
258+
FilterLlncosh,
260259
FilterLMF,
261260
FilterLMS,
262261
FilterNLMF,

padasip/filters/llncosh.py

Lines changed: 171 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,171 @@
1+
"""
2+
.. versionadded:: 1.2.0
3+
4+
The least lncosh (Llncosh) algorithm (proposed in https://doi.org/10.1016/j.sigpro.2019.107348)
5+
is similar to LMS adaptive filter.
6+
7+
The Llncosh filter can be created as follows
8+
9+
>>> import padasip as pa
10+
>>> pa.filters.FilterLlncosh(n)
11+
12+
where :code:`n` is the size (number of taps) of the filter.
13+
14+
Content of this page:
15+
16+
.. contents::
17+
:local:
18+
:depth: 1
19+
20+
.. seealso:: :ref:`filters`
21+
22+
Algorithm Explanation
23+
==========================
24+
25+
The lncosh cost function is the natural logarithm of hyperbolic cosine function,
26+
which behaves like a hybrid of the mean square error and mean absolute error
27+
criteria according to its positive parameter `l`.
28+
29+
Minimal Working Examples
30+
==============================
31+
32+
If you have measured data you may filter it as follows
33+
34+
.. code-block:: python
35+
36+
import numpy as np
37+
import matplotlib.pylab as plt
38+
import padasip as pa
39+
40+
# creation of data
41+
N = 500
42+
x = np.random.normal(0, 1, (N, 4)) # input matrix
43+
v = np.random.normal(0, 0.1, N) # noise
44+
d = 2 * x[:, 0] + 0.1 * x[:, 1] - 4 * x[:, 2] + 0.5 * x[:, 3] + v # target
45+
46+
# identification
47+
f = pa.filters.FilterLlncosh(n=4, mu=0.1, l=0.1, w="random")
48+
y, e, w = f.run(d, x)
49+
50+
# show results
51+
plt.figure(figsize=(15, 9))
52+
plt.subplot(211);
53+
plt.title("Adaptation");
54+
plt.xlabel("samples - k")
55+
plt.plot(d, "b", label="d - target")
56+
plt.plot(y, "g", label="y - output");
57+
plt.legend()
58+
plt.subplot(212);
59+
plt.title("Filter error");
60+
plt.xlabel("samples - k")
61+
plt.plot(10 * np.log10(e ** 2), "r", label="e - error [dB]");
62+
plt.legend()
63+
plt.tight_layout()
64+
plt.show()
65+
66+
Code Explanation
67+
====================
68+
"""
69+
import numpy as np
70+
71+
from padasip.filters.base_filter import AdaptiveFilter
72+
73+
74+
class FilterLlncosh(AdaptiveFilter):
75+
"""
76+
This class represents an adaptive Llncosh filter.
77+
78+
**Args:**
79+
80+
* `n` : length of filter (integer) - how many input is input array
81+
(row of input matrix)
82+
83+
**Kwargs:**
84+
85+
* `mu` : learning rate (float). Also known as step size. If it is too slow,
86+
the filter may have bad performance. If it is too high,
87+
the filter will be unstable. The default value can be unstable
88+
for ill-conditioned input data.
89+
90+
* `mu` : lambda (float). Cost function shape parameter.
91+
92+
* `w` : initial weights of filter. Possible values are:
93+
94+
* array with initial weights (1 dimensional array) of filter size
95+
96+
* "random" : create random weights
97+
98+
* "zeros" : create zero value weights
99+
"""
100+
kind = "Llncosh"
101+
102+
def __init__(self, n, mu=0.01, l=3, w="random"):
103+
if type(n) == int:
104+
self.n = n
105+
else:
106+
raise ValueError('The size of filter must be an integer')
107+
self.mu = self.check_float_param(mu, 0, 1000, "mu")
108+
self.l = l
109+
self.init_weights(w, self.n)
110+
self.w_history = False
111+
112+
def adapt(self, d, x):
113+
"""
114+
Adapt weights according one desired value and its input.
115+
116+
**Args:**
117+
118+
* `d` : desired value (float)
119+
120+
* `x` : input array (1-dimensional array)
121+
"""
122+
y = np.dot(self.w, x)
123+
e = d - y
124+
self.w += self.mu * np.tanh(self.l * e) * x
125+
126+
def run(self, d, x):
127+
"""
128+
This function filters multiple samples in a row.
129+
130+
**Args:**
131+
132+
* `d` : desired value (1 dimensional array)
133+
134+
* `x` : input matrix (2-dimensional array). Rows are samples,
135+
columns are input arrays.
136+
137+
**Returns:**
138+
139+
* `y` : output value (1 dimensional array).
140+
The size corresponds with the desired value.
141+
142+
* `e` : filter error for every sample (1 dimensional array).
143+
The size corresponds with the desired value.
144+
145+
* `w` : history of all weights (2 dimensional array).
146+
Every row is set of the weights for given sample.
147+
"""
148+
# measure the data and check if the dimmension agree
149+
N = len(x)
150+
if not len(d) == N:
151+
raise ValueError('The length of vector d and matrix x must agree.')
152+
self.n = len(x[0])
153+
# prepare data
154+
try:
155+
x = np.array(x)
156+
d = np.array(d)
157+
except:
158+
raise ValueError('Impossible to convert x or d to a numpy array')
159+
# create empty arrays
160+
y = np.zeros(N)
161+
e = np.zeros(N)
162+
self.w_history = np.zeros((N,self.n))
163+
# adaptation loop
164+
for k in range(N):
165+
self.w_history[k,:] = self.w
166+
y[k] = np.dot(self.w, x[k])
167+
e[k] = d[k] - y[k]
168+
dw = self.mu * np.tanh(self.l * e[k]) * x[k]
169+
self.w += dw
170+
return y, e, self.w_history
171+

tests/filters.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,10 +70,20 @@ def test_filter_ocnlms(self):
7070
x = np.random.normal(0, 1, (N, 4))
7171
v = np.random.normal(0, 0.1, N)
7272
d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
73-
f = pa.filters.FilterOCNLMS(n=4, mu=1., eps=1., w="random")
73+
f = pa.filters.FilterOCNLMS(n=4, mu=1., mem=100, w="random")
7474
y, e, w = f.run(d, x)
7575
self.assertAlmostEqual(y.sum(), 6.962870033482984)
7676

77+
def test_filter_Llncosh(self):
78+
np.random.seed(100)
79+
N = 100
80+
x = np.random.normal(0, 1, (N, 4))
81+
v = np.random.normal(0, 0.1, N)
82+
d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
83+
f = pa.filters.FilterLlncosh(n=4, mu=1., l=3, w="random")
84+
y, e, w = f.run(d, x)
85+
self.assertAlmostEqual(y.sum(), 18.74164638623726)
86+
7787
def test_filter_rls(self):
7888
"""
7989
Test of RLS filter.

0 commit comments

Comments
 (0)