Skip to content

Commit 0540b56

Browse files
committed
Added a MNIST example. Fixed typos.
Compatibility fixes. Compatibility fix 2.
1 parent 87b70b7 commit 0540b56

File tree

22 files changed

+337
-148
lines changed

22 files changed

+337
-148
lines changed

beacon8/__init__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,3 @@
1-
from layer import *
1+
from .layers import *
2+
from .containers import *
3+
from .criteria import *

beacon8/containers/Container.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
from ..layers import Module
2+
3+
4+
class Container(Module):
5+
6+
def __init__(self, *modules):
7+
Module.__init__(self)
8+
9+
self.modules = []
10+
for module in modules:
11+
self.add(module)
12+
13+
def evaluate(self):
14+
Module.evaluate(self)
15+
for module in self.modules:
16+
module.evaluate()
17+
18+
def training(self):
19+
Module.training(self)
20+
for module in self.modules:
21+
module.training()
22+
23+
def parameters(self):
24+
params, grads = [], []
25+
26+
for module in self.modules:
27+
mod_params, mod_grads = module.parameters()
28+
params += mod_params
29+
grads += mod_grads
30+
31+
return params, grads
32+
33+
def add(self, module):
34+
self.modules.append(module)

beacon8/containers/Sequential.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from .Container import Container
2+
3+
4+
class Sequential(Container):
5+
def symb_forward(self, symb_input):
6+
symb_output = symb_input
7+
for module in self.modules:
8+
symb_output = module.symb_forward(symb_output)
9+
return symb_output

beacon8/containers/__init__.py

Lines changed: 2 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1,36 +1,2 @@
1-
from .Layers import Module
2-
3-
class Container(Module):
4-
5-
def __init__(self, *modules):
6-
super().__init__()
7-
8-
self.modules = []
9-
for module in modules:
10-
self.add(module)
11-
12-
def evaluate(self):
13-
super().evaluate()
14-
for module in self.modules:
15-
module.evaluate()
16-
17-
def training(self):
18-
super().training()
19-
for module in self.modules:
20-
module.training()
21-
22-
def parameters(self):
23-
params, grads = [], []
24-
25-
for module in self.modules:
26-
mod_params, mod_grads = module.parameters()
27-
params += mod_params
28-
grads += mod_grads
29-
30-
return params, grads
31-
32-
def add(self, module):
33-
self.modules.append(module)
34-
35-
def symbolic_forward(self, symbolic_input):
36-
raise NotImplementedError
1+
from .Container import *
2+
from .Sequential import *
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
import theano.tensor as _T
2+
3+
4+
class ClassNLLCriterion:
5+
def symb_forward(self, symb_input, symb_targets):
6+
int_targets = _T.cast(symb_targets, 'int32')
7+
return _T.mean(-_T.log(symb_input[_T.arange(symb_targets.shape[0]), int_targets]))

beacon8/criteria/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
from .ClassNLLCriterion import *

beacon8/layers/Linear.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,13 @@
1-
from . import Module
1+
from .Module import Module
22

33
import numpy as _np
44
import theano as _th
55

6+
67
class Linear(Module):
78

89
def __init__(self, nin, nout, init='Xavier', with_bias=True):
9-
super().__init__()
10+
Module.__init__(self)
1011

1112
self.nin = nin
1213
self.nout = nout
@@ -17,19 +18,21 @@ def __init__(self, nin, nout, init='Xavier', with_bias=True):
1718

1819
def reset(self):
1920
if self.init == 'Xavier':
20-
w_bound = _np.sqrt(4 / (self.nin + self.nout))
21+
w_bound = _np.sqrt(4. / (self.nin + self.nout))
2122
W = _np.random.uniform(low=-w_bound, high=w_bound,
2223
size=(self.nin, self.nout))
2324
else:
2425
raise NotImplementedError
2526

2627
self.weight = _th.shared(W.astype(_th.config.floatX))
28+
self.grad_weight = _th.shared((W*0.).astype(_th.config.floatX))
2729

2830
if self.with_bias:
2931
self.bias = _th.shared(_np.zeros(shape=self.nout, dtype=_th.config.floatX))
32+
self.grad_bias = _th.shared(_np.zeros(shape=self.nout, dtype=_th.config.floatX))
3033

31-
def symbolic_forward(self, symbolic_input):
32-
out = _th.tensor.dot(symbolic_input, self.weight)
34+
def symb_forward(self, symb_input):
35+
out = _th.tensor.dot(symb_input, self.weight)
3336

3437
if self.with_bias:
3538
out += self.bias

beacon8/layers/Module.py

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
import theano as _th
2+
import theano.tensor as _T
3+
4+
5+
class Module:
6+
7+
def __init__(self):
8+
self.training_mode = True
9+
10+
self.fn_forward = None
11+
self.fn_accum_grads = None
12+
13+
def reset(self):
14+
pass
15+
16+
#def __hash__(self):
17+
# raise NotImplementedError("You *need* to reimplement hash, even if it's just python's default. See the documentation for more info.")
18+
19+
def zero_grad_parameters(self):
20+
_, grads = self.parameters()
21+
for grad in grads:
22+
grad.set_value(0 * grad.get_value())
23+
24+
def parameters(self):
25+
params, grads = [], []
26+
27+
if self.training_mode and hasattr(self, 'weight'):
28+
assert hasattr(self, 'grad_weight'), "The layer {} has a `weight` variable but no `grad_weight`, you probably forget to implement it.".format(type(self))
29+
params += [self.weight]
30+
grads += [self.grad_weight]
31+
32+
if self.training_mode and hasattr(self, 'bias'):
33+
assert hasattr(self, 'grad_bias'), "The layer {} has a `bias` variable but no `grad_bias`, you probably forget to implement it.".format(type(self))
34+
params += [self.bias]
35+
grads += [self.grad_bias]
36+
37+
return params, grads
38+
39+
def evaluate(self):
40+
self.training_mode = False
41+
42+
def training(self):
43+
self.training_mode = True
44+
45+
def symb_forward(self, symb_input):
46+
raise NotImplementedError
47+
48+
def forward(self, data):
49+
if self.fn_forward is None:
50+
symb_in = _T.TensorType(_th.config.floatX, (False,) * data.ndim)('X')
51+
symb_out = self.symb_forward(symb_in)
52+
self.fn_forward = _th.function(inputs=[symb_in], outputs=symb_out)
53+
54+
return self.fn_forward(data)
55+
56+
def accumulate_gradients(self, data_in, data_tgt, loss):
57+
if self.fn_accum_grads is None:
58+
symb_in = _T.TensorType(_th.config.floatX, (False,) * data_in.ndim)('X')
59+
symb_tgt = _T.TensorType(_th.config.floatX, (False,) * data_tgt.ndim)('T')
60+
symb_out = self.symb_forward(symb_in)
61+
symb_err = loss.symb_forward(symb_out, symb_tgt)
62+
63+
params, grads = self.parameters()
64+
symb_grads = _th.grad(cost=symb_err, wrt=params)
65+
66+
grads_updates = [(grad, grad + symb_grad) for grad, symb_grad in zip(grads, symb_grads)]
67+
self.fn_accum_grads = _th.function(
68+
inputs=[symb_in, symb_tgt],
69+
updates=grads_updates
70+
)
71+
72+
self.fn_accum_grads(data_in, data_tgt)

beacon8/layers/ReLU.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from .Module import Module
2+
3+
4+
class ReLU(Module):
5+
6+
def symb_forward(self, symb_input):
7+
return (symb_input + abs(symb_input)) * 0.5

beacon8/layers/Softmax.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,11 @@
22

33
import theano.tensor as _T
44

5+
56
class SoftMax(Module):
67

78
def __init__(self):
89
Module.__init__(self)
910

10-
def symbolic_forward(self, symbolic_input):
11-
return _T.nnet.softmax(symbolic_input)
11+
def symb_forward(self, symb_input):
12+
return _T.nnet.softmax(symb_input)

0 commit comments

Comments
 (0)