|
| 1 | +from .Module import Module |
| 2 | + |
| 3 | +import theano as _th |
| 4 | +import numpy as _np |
| 5 | + |
| 6 | + |
| 7 | +class SpatialConvolution(Module): |
| 8 | + def __init__(self, n_input_plane, n_output_plane, k_w, k_h, d_w=1, d_h=1, with_bias=True, border_mode='valid', imshape=None): |
| 9 | + Module.__init__(self) |
| 10 | + self.n_input_plane = n_input_plane |
| 11 | + self.n_output_plane = n_output_plane |
| 12 | + self.k_w = k_w |
| 13 | + self.k_h = k_h |
| 14 | + self.d_w = d_w |
| 15 | + self.d_h = d_h |
| 16 | + self.with_bias = with_bias |
| 17 | + self.border_mode = border_mode |
| 18 | + self.imshape = imshape |
| 19 | + |
| 20 | + w_bound = _np.sqrt(4. / ((self.n_input_plane + self.n_output_plane) * self.k_w * self.k_h)) |
| 21 | + W = _np.random.uniform(low=-w_bound, high=w_bound, size=(n_output_plane, n_input_plane, k_h, k_w)) |
| 22 | + self.weight = _th.shared(W.astype(dtype=_th.config.floatX)) |
| 23 | + self.grad_weight = _th.shared((W*0).astype(dtype=_th.config.floatX)) |
| 24 | + |
| 25 | + if self.with_bias: |
| 26 | + self.bias = _th.shared(_np.zeros(shape=(n_output_plane, ), dtype=_th.config.floatX)) |
| 27 | + self.grad_bias = _th.shared(_np.zeros(shape=(n_output_plane, ), dtype=_th.config.floatX)) |
| 28 | + |
| 29 | + def symb_forward(self, symb_input): |
| 30 | + conv_output = _th.tensor.nnet.conv.conv2d(symb_input, self.weight, |
| 31 | + image_shape=(None, self.n_input_plane) + (self.imshape or (None, None)), |
| 32 | + filter_shape=(self.n_output_plane, self.n_input_plane, self.k_h, self.k_w), |
| 33 | + border_mode=self.border_mode, |
| 34 | + subsample=(self.d_h, self.d_w) |
| 35 | + ) |
| 36 | + |
| 37 | + if self.with_bias: |
| 38 | + return conv_output + self.bias.dimshuffle('x', 0, 'x', 'x') |
| 39 | + else: |
| 40 | + return conv_output |
| 41 | + |
0 commit comments