Skip to content

Commit 540f686

Browse files
committed
Added PCA and LDA
1 parent 12bfa68 commit 540f686

File tree

13 files changed

+671
-188
lines changed

13 files changed

+671
-188
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
__pycache__/
55
*.py[cod]
66
*$py.class
7+
*.pyc
78

89
# C extensions
910
*.so

README.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,14 @@ http://matousc89.github.io/padasip/
1919
Current Features
2020
================
2121

22+
********************
23+
Data Preprocessing
24+
********************
25+
26+
- Principal Component Analysis (PCA)
27+
28+
- Linear Discriminant Analysis (LDA)
29+
2230
******************
2331
Adaptive Filters
2432
******************

padasip/__init__.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
"""
2-
Current version: |version| (:ref:`changelog-label`)
2+
Current version: |version| (:ref:`changelog`)
33
44
This library is designed to simplify adaptive signal
55
processing tasks within python
@@ -78,7 +78,16 @@
7878
* :ref:`search`
7979
8080
"""
81-
from padasip.preprocess import *
81+
#from padasip.preprocess import
8282
from padasip.filters.shortcuts import *
8383
import padasip.ann
8484
import padasip.filters
85+
86+
import padasip.preprocess
87+
88+
# back compatibility with v0.5
89+
from padasip.preprocess.standardize import standardize
90+
from padasip.preprocess.standardize_back import standardize_back
91+
from padasip.preprocess.input_from_history import input_from_history
92+
93+

padasip/ann/mlp.py

Lines changed: 27 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -128,19 +128,20 @@ def activation(self, x, f="sigmoid", der=False):
128128
"""
129129
This function process values of layer outputs with activation function.
130130
131-
Args:
131+
**Args:**
132132
133133
* `x` : array to process (1-dimensional array)
134134
135-
Kwargs:
135+
**Kwargs:**
136136
137137
* `f` : activation function
138138
139139
* `der` : normal output, or its derivation (bool)
140140
141-
Returns:
141+
**Returns:**
142142
143143
* values processed with activation function (1-dimensional array)
144+
144145
"""
145146
if f == "sigmoid":
146147
if der:
@@ -155,14 +156,15 @@ def predict(self, x):
155156
"""
156157
This function make forward pass through this layer (no update).
157158
158-
Args:
159+
**Args:**
159160
160161
* `x` : input vector (1-dimensional array)
161162
162-
Returns:
163+
**Returns:**
163164
164165
* `y` : output of MLP (float or 1-diemnsional array).
165166
Size depends on number of nodes in this layer.
167+
166168
"""
167169
self.x[1:] = x
168170
self.y = self.activation(np.sum(self.w*self.x, axis=1), f=self.f)
@@ -173,12 +175,12 @@ def update(self, w, e):
173175
This function make update according provided target
174176
and the last used input vector.
175177
176-
Args:
178+
**Args:**
177179
178180
* `d` : target (float or 1-dimensional array).
179181
Size depends on number of MLP outputs.
180182
181-
Returns:
183+
**Returns:**
182184
183185
* `w` : weights of the layers (2-dimensional layer).
184186
Every row represents one node.
@@ -201,7 +203,7 @@ class NetworkMLP():
201203
"""
202204
This class represents a Multi-layer Perceptron neural network.
203205
204-
Args:
206+
*Args:**
205207
206208
* `layers` : array describing hidden layers of network
207209
(1-dimensional array of integers). Every number in array represents
@@ -212,7 +214,7 @@ class NetworkMLP():
212214
213215
* `n_input` : number of network inputs (int).
214216
215-
Kwargs:
217+
**Kwargs:**
216218
217219
* `outputs` : number of network outputs (int). Default is 1.
218220
@@ -267,7 +269,7 @@ def train(self, x, d, epochs=10, shuffle=False):
267269
"""
268270
Function for batch training of MLP.
269271
270-
Args:
272+
**Args:**
271273
272274
* `x` : input array (2-dimensional array).
273275
Every row represents one input vector (features).
@@ -276,15 +278,15 @@ def train(self, x, d, epochs=10, shuffle=False):
276278
Every row represents target for one input vector.
277279
Target can be one or more values (in case of multiple outputs).
278280
279-
Kwargs:
281+
**Kwargs:**
280282
281283
* `epochs` : amount of epochs (int). That means how many times
282284
the MLP will iterate over the passed set of data (`x`, `d`).
283285
284286
* `shuffle` : if true, the order of inputs and outpust are shuffled (bool).
285287
That means the pairs input-output are in different order in every epoch.
286288
287-
Returns:
289+
**Returns:**
288290
289291
* `e`: output vector (m-dimensional array). Every row represents
290292
error (or errors) for an input and output in given epoch.
@@ -293,6 +295,7 @@ def train(self, x, d, epochs=10, shuffle=False):
293295
294296
* `MSE` : mean squared error (1-dimensional array). Every value
295297
stands for MSE of one epoch.
298+
296299
"""
297300
# measure the data and check if the dimmension agree
298301
N = len(x)
@@ -335,15 +338,16 @@ def run(self, x):
335338
"""
336339
Function for batch usage of already trained and tested MLP.
337340
338-
Args:
341+
**Args:**
339342
340343
* `x` : input array (2-dimensional array).
341344
Every row represents one input vector (features).
342345
343-
Returns:
346+
**Returns:**
344347
345348
* `y`: output vector (n-dimensional array). Every row represents
346349
output (outputs) for an input vector.
350+
347351
"""
348352
# measure the data and check if the dimmension agree
349353
try:
@@ -365,7 +369,7 @@ def test(self, x, d):
365369
"""
366370
Function for batch test of already trained MLP.
367371
368-
Args:
372+
**Args:**
369373
370374
* `x` : input array (2-dimensional array).
371375
Every row represents one input vector (features).
@@ -374,10 +378,11 @@ def test(self, x, d):
374378
Every row represents target for one input vector.
375379
Target can be one or more values (in case of multiple outputs).
376380
377-
Returns:
381+
**Returns:**
378382
379383
* `e`: output vector (n-dimensional array). Every row represents
380384
error (or errors) for an input and output.
385+
381386
"""
382387
# measure the data and check if the dimmension agree
383388
N = len(x)
@@ -410,14 +415,15 @@ def predict(self, x):
410415
"""
411416
This function make forward pass through MLP (no update).
412417
413-
Args:
418+
**Args:**
414419
415420
* `x` : input vector (1-dimensional array)
416421
417-
Returns:
422+
**Returns:**
418423
419424
* `y` : output of MLP (float or 1-diemnsional array).
420425
Size depends on number of MLP outputs.
426+
421427
"""
422428
# forward pass to hidden layers
423429
for l in self.layers:
@@ -435,15 +441,16 @@ def update(self, d):
435441
This function make update according provided target
436442
and the last used input vector.
437443
438-
Args:
444+
**Args:**
439445
440446
* `d` : target (float or 1-dimensional array).
441447
Size depends on number of MLP outputs.
442448
443-
Returns:
449+
**Returns:**
444450
445451
* `e` : error used for update (float or 1-diemnsional array).
446452
Size correspond to size of input `d`.
453+
447454
"""
448455
# update output layer
449456
e = d - self.y

padasip/preprocess.py

Lines changed: 0 additions & 161 deletions
This file was deleted.

0 commit comments

Comments
 (0)