Skip to content

Commit 84d849a

Browse files
author
magicindian
committed
midpoint commit of neural network rewrite. I realy need a disributed version control system!
1 parent f64ecaf commit 84d849a

24 files changed

+1474
-2
lines changed
Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
15,21.66
2+
15,22.75
3+
15,22.3
4+
18,31.25
5+
28,44.79
6+
29,40.55
7+
37,50.25
8+
37,46.88
9+
44,52.33
10+
50,63.47
11+
50,61.13
12+
60,81
13+
61,73.09
14+
64,79.09
15+
65,79.51
16+
65,65.31
17+
72,71.9
18+
75,86.1
19+
75,94.6
20+
82,92.5
21+
85,105
22+
91,101.7
23+
91,102.9
24+
97,110
25+
98,104.3
26+
125,134.9
27+
142, 130.68
28+
147, 155.3
29+
147,152.2
30+
150,144.5
31+
159,142.15
32+
165,139.81
33+
183,153.22
34+
192,145.72
35+
195,161.1
36+
218,174.18
37+
218,173.03
38+
219,173.54
39+
224,178.86
40+
225,177.68
41+
227,173.73
42+
232,159.98
43+
232,161.29
44+
237,187.07
45+
246,176.13
46+
258,183.4
47+
276,186.26
48+
285,189.66
49+
300,186.09
50+
301,186.7
51+
305,186.8
52+
312,195.1
53+
317,216.41
54+
338,203.23
55+
347,188.38
56+
354,189.7
57+
357,195.31
58+
375,202.63
59+
394,224.82
60+
513,203.3
61+
535,209.7
62+
554,233.9
63+
591,234.7
64+
648,244.3
65+
660,231
66+
705,242.4
67+
723,230.77
68+
756,242.57
69+
768,232.12
70+
860,246.7
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
parameter 1 = age in days
2+
parameter 2 = weights of eyelenses in milligrams
3+
4+
source = Neural Networks A Comprehensive Foundation - Simon Haykin
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
package aima.learning.neural;
2+
3+
public class BackPropLearning implements NNTrainingScheme {
4+
private final double learningRate;
5+
private final double momentum;
6+
7+
private final Layer hiddenLayer;
8+
private final Layer outputLayer;
9+
10+
public BackPropLearning(FeedForwardNeuralNetwork network,
11+
double learningRate, double momentum) {
12+
13+
this.hiddenLayer = network.getHiddenLayer();
14+
this.outputLayer = network.getOutputLayer();
15+
this.learningRate = learningRate;
16+
this.momentum = momentum;
17+
}
18+
19+
public Vector processInput(FeedForwardNeuralNetwork network, Vector input) {
20+
21+
hiddenLayer.feedForward(input);
22+
outputLayer.feedForward(hiddenLayer.getLastActivationValues());
23+
return outputLayer.getLastActivationValues();
24+
}
25+
26+
public void processError(FeedForwardNeuralNetwork network, Vector error) {
27+
// TODO calculate total error somewhere
28+
// create Sensitivity Matrices
29+
outputLayer.sensitivityMatrixFromErrorMatrix(error);
30+
hiddenLayer.sensitivityMatrixFromSucceedingLayer(network
31+
.getOutputLayer());
32+
33+
// calculate weight Updates
34+
outputLayer.calculateWeightUpdates(hiddenLayer
35+
.getLastActivationValues(), learningRate, momentum);
36+
hiddenLayer.calculateWeightUpdates(hiddenLayer.getLastInputValues(),
37+
learningRate, momentum);
38+
39+
// calculate Bias Updates
40+
outputLayer.calculateBiasUpdates(learningRate, momentum);
41+
hiddenLayer.calculateBiasUpdates(learningRate, momentum);
42+
43+
// update weightsAndBiases
44+
outputLayer.updateWeights();
45+
outputLayer.updateBiases();
46+
47+
hiddenLayer.updateWeights();
48+
hiddenLayer.updateBiases();
49+
50+
}
51+
}
Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
package aima.learning.neural;
2+
3+
import aima.learning.framework.DataSet;
4+
import aima.util.Matrix;
5+
6+
public class FeedForwardNeuralNetwork implements FunctionApproximator {
7+
8+
private final Layer hiddenLayer;
9+
private final Layer outputLayer;
10+
11+
private final double learningRate, momentum;
12+
13+
private final NNTrainingScheme trainingScheme;
14+
15+
/*
16+
* constructor to be used for non testing code for now assume that config
17+
* contains learning rate, momentum parameter, and number of epochs. change
18+
* this later to accomodate varied learning schemes like early stopping
19+
*/
20+
public FeedForwardNeuralNetwork(NNConfig config) {
21+
22+
learningRate = config.getParameterAsDouble("learning_rate");
23+
momentum = config.getParameterAsDouble("momentum");
24+
25+
int numberOfInputNeurons = config
26+
.getParameterAsInteger("number_of_inputs");
27+
int numberOfHiddenNeurons = config
28+
.getParameterAsInteger("number_of_hidden_neurons");
29+
int numberOfOutputNeurons = config
30+
.getParameterAsInteger("number_of_outputs");
31+
32+
double lowerLimitForWeights = config
33+
.getParameterAsDouble("lower_limit_weights");
34+
double upperLimitForWeights = config
35+
.getParameterAsDouble("upper_limit_weights");
36+
37+
hiddenLayer = new Layer(numberOfHiddenNeurons, numberOfInputNeurons,
38+
lowerLimitForWeights, upperLimitForWeights,
39+
new LogSigActivationFunction());
40+
41+
outputLayer = new Layer(numberOfOutputNeurons, numberOfHiddenNeurons,
42+
lowerLimitForWeights, upperLimitForWeights,
43+
new PureLinearActivationFunction());
44+
trainingScheme = new BackPropLearning(this, learningRate, momentum);
45+
46+
}
47+
48+
/*
49+
* ONLY for testing to set up a network with known weights in future use to
50+
* deserialize networks after adding variables for pen weightupdate,
51+
* lastnput etc
52+
*/
53+
public FeedForwardNeuralNetwork(Matrix hiddenLayerWeights,
54+
Vector hiddenLayerBias, Matrix outputLayerWeights,
55+
Vector outputLayerBias, double learningRate, double momentum) {
56+
this.learningRate = learningRate;
57+
this.momentum = momentum;
58+
hiddenLayer = new Layer(hiddenLayerWeights, hiddenLayerBias,
59+
new LogSigActivationFunction());
60+
outputLayer = new Layer(outputLayerWeights, outputLayerBias,
61+
new PureLinearActivationFunction());
62+
trainingScheme = new BackPropLearning(this, learningRate, momentum);
63+
}
64+
65+
public void processError(Vector error) {
66+
67+
trainingScheme.processError(this, error);
68+
69+
}
70+
71+
public Vector processInput(Vector input) {
72+
return trainingScheme.processInput(this, input);
73+
}
74+
75+
public void testOn(DataSet ds) {
76+
// TODO Auto-generated method stub
77+
}
78+
79+
public Matrix getHiddenLayerWeights() {
80+
81+
return hiddenLayer.getWeightMatrix();
82+
}
83+
84+
public Vector getHiddenLayerBias() {
85+
86+
return hiddenLayer.getBiasVector();
87+
}
88+
89+
public Matrix getOutputLayerWeights() {
90+
91+
return outputLayer.getWeightMatrix();
92+
}
93+
94+
public Vector getOutputLayerBias() {
95+
96+
return outputLayer.getBiasVector();
97+
}
98+
99+
public Layer getHiddenLayer() {
100+
return hiddenLayer;
101+
}
102+
103+
public Layer getOutputLayer() {
104+
return outputLayer;
105+
}
106+
107+
}
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
package aima.learning.neural;
2+
3+
public interface FunctionApproximator {
4+
/*
5+
* accepts input pattern and processe it returning an output value
6+
*/
7+
Vector processInput(Vector input);
8+
9+
/*
10+
* accept an error and change the parameters to accomodate it
11+
*/
12+
void processError(Vector error);
13+
14+
}

0 commit comments

Comments
 (0)