diff --git a/SCNN/scnnapplygrads.m b/SCNN/scnnapplygrads.m new file mode 100644 index 0000000..49894fe --- /dev/null +++ b/SCNN/scnnapplygrads.m @@ -0,0 +1,15 @@ +function net = scnnapplygrads(net, opts) + for l = 2 : numel(net.layers) + if strcmp(net.layers{l}.type, 'c') + for j = 1 : numel(net.layers{l}.a) + for ii = 1 : numel(net.layers{l - 1}.a) + net.layers{l}.k{ii}{j} = net.layers{l}.k{ii}{j} - opts.alpha * net.layers{l}.dk{ii}{j}; + end + net.layers{l}.b{j} = net.layers{l}.b{j} - opts.alpha * net.layers{l}.db{j}; + end + end + end + + net.ffW = net.ffW - opts.alpha * net.dffW; + net.ffb = net.ffb - opts.alpha * net.dffb; +end diff --git a/SCNN/scnnbp.m b/SCNN/scnnbp.m new file mode 100644 index 0000000..0333b70 --- /dev/null +++ b/SCNN/scnnbp.m @@ -0,0 +1,64 @@ +function net = scnnbp(net, y) +n = numel(net.layers); + +% error +net.e = net.o - y; +% loss function +net.L = 1/2* sum(net.e(:) .^ 2) / size(net.e, 2); + +%% backprop deltas +net.od = net.e .* (net.o .* (1 - net.o)); % output delta +net.fvd = (net.ffW' * net.od); % feature vector delta +if strcmp(net.layers{n}.type, 'c') % only conv layers has sigm function + net.fvd = net.fvd .* (net.fv .* (1 - net.fv)); +end + +% reshape feature vector deltas into output map style +sa = sptsize(net.layers{n}.a{1}); +fvnum = sa(1) * sa(2); +for j = 1 : numel(net.layers{n}.a) + net.layers{n}.d{j} = tensor2spcell(reshape(net.fvd(((j - 1) * fvnum + 1) : j * fvnum, :), sa(1), sa(2), sa(3))); +end + +for l = (n - 1) : -1 : 1 + if strcmp(net.layers{l}.type, 'c') + for j = 1 : numel(net.layers{l}.a) + expand_param = [net.layers{l + 1}.scale net.layers{l + 1}.scale]; + expanded_delta = spcell_expand(net.layers{l + 1}.d{j},expand_param); + net.layers{l}.d{j} = spcell_mul(spcell_mul(net.layers{l}.a{j},spcell_sub(1,net.layers{l}.a{j})),... + spcell_div(expanded_delta,net.layers{l + 1}.scale ^ 2)); + end + elseif strcmp(net.layers{l}.type, 's') + for i = 1 : numel(net.layers{l}.a) + z = sptzeros(sptsize(net.layers{l}.a{1})); + for j = 1 : numel(net.layers{l + 1}.a) + z = spcell_add(z,spconvn(net.layers{l + 1}.d{j}, rot180(net.layers{l + 1}.k{i}{j}), 'full')); + end + net.layers{l}.d{i} = z; + end + end +end + +%% calc gradients +for l = 2 : n + if strcmp(net.layers{l}.type, 'c') + for j = 1 : numel(net.layers{l}.a) + n_iter_i = numel(net.layers{l - 1}.a); + spcell_temp = cell(1,n_iter_i); + parfor i = 1 : n_iter_i + spcell_temp{i} = spconvn(sptflipall(net.layers{l - 1}.a{i}),net.layers{l}.d{j}, 'valid') / size(net.layers{l}.d{j}, 3); + end + for i = 1:n_iter_i + net.layers{l}.dk{i}{j} = spcell_temp{i}; + end + net.layers{l}.db{j} = sptsum(net.layers{l}.d{j}) / numel(net.layers{l}.d{j}); + end + end +end +net.dffW = net.od * (net.fv)' / size(net.od, 2); +net.dffb = mean(net.od, 2); + + function X = rot180(X) + X = flip(flip(X, 1), 2); + end +end diff --git a/SCNN/scnnff.m b/SCNN/scnnff.m new file mode 100644 index 0000000..41ebb30 --- /dev/null +++ b/SCNN/scnnff.m @@ -0,0 +1,46 @@ +function net = scnnff(net, x) +%% DOCUMENT +%{ +type of x: cell_array(x,y)>(n) +%} +n = numel(net.layers); +net.layers{1}.a{1} = x; +inputmaps = 1; + +for l = 2 : n % for each layer + if strcmp(net.layers{l}.type, 'c') + % !!below can probably be handled by insane matrix operations + for j = 1 : net.layers{l}.outputmaps % for each output map + % create temp output map + z = sptzeros(sptsize(net.layers{l - 1}.a{1}) - [net.layers{l}.kernelsize - 1 net.layers{l}.kernelsize - 1 0]); + temp_matrix = cell(1,inputmaps); + for i = 1 : inputmaps % for each input map + % convolve with corresponding kernel and add to temp output map + temp_matrix{i} = spconvn(net.layers{l - 1}.a{i}, net.layers{l}.k{i}{j}, 'valid'); + end + for i = 1 : inputmaps + z = spcell_add(z,temp_matrix{i}); + end + % add bias, pass through nonlinearity + net.layers{l}.a{j} = spcell_sigm(spcell_add(z,net.layers{l}.b{j})); + end + % set number of input maps to this layers number of outputmaps + inputmaps = net.layers{l}.outputmaps; + elseif strcmp(net.layers{l}.type, 's') + % downsample + for j = 1 : inputmaps + z = spconvn(net.layers{l - 1}.a{j}, ones(net.layers{l}.scale) / (net.layers{l}.scale ^ 2), 'valid'); % !! replace with variable + net.layers{l}.a{j} = sptsubsample(z,net.layers{l}.scale); + end + end +end + +% concatenate all end layer feature maps into vector +net.fv = []; +for j = 1 : numel(net.layers{n}.a) + % sa = sptsize(net.layers{n}.a{j}); + net.fv = [net.fv; sptflatten(net.layers{n}.a{j})]; +end +% feedforward into output perceptrons +net.o = sigm(net.ffW * net.fv + repmat(net.ffb, 1, size(net.fv, 2))); +end diff --git a/SCNN/scnnnumgradcheck.m b/SCNN/scnnnumgradcheck.m new file mode 100644 index 0000000..14676dc --- /dev/null +++ b/SCNN/scnnnumgradcheck.m @@ -0,0 +1,79 @@ +function scnnnumgradcheck(net, x, y) + epsilon = 1e-4; + er = 1e-8; + n = numel(net.layers); + for j = 1 : numel(net.ffb) + net_m = net; net_p = net; + net_p.ffb(j) = net_m.ffb(j) + epsilon; + net_m.ffb(j) = net_m.ffb(j) - epsilon; + net_m = cnnff(net_m, x); net_m = cnnbp(net_m, y); + net_p = cnnff(net_p, x); net_p = cnnbp(net_p, y); + d = (net_p.L - net_m.L) / (2 * epsilon); + e = abs(d - net.dffb(j)); + if e > er + error('numerical gradient checking failed'); + end + end + + for i = 1 : size(net.ffW, 1) + for u = 1 : size(net.ffW, 2) + net_m = net; net_p = net; + net_p.ffW(i, u) = net_m.ffW(i, u) + epsilon; + net_m.ffW(i, u) = net_m.ffW(i, u) - epsilon; + net_m = cnnff(net_m, x); net_m = cnnbp(net_m, y); + net_p = cnnff(net_p, x); net_p = cnnbp(net_p, y); + d = (net_p.L - net_m.L) / (2 * epsilon); + e = abs(d - net.dffW(i, u)); + if e > er + error('numerical gradient checking failed'); + end + end + end + + for l = n : -1 : 2 + if strcmp(net.layers{l}.type, 'c') + for j = 1 : numel(net.layers{l}.a) + net_m = net; net_p = net; + net_p.layers{l}.b{j} = net_m.layers{l}.b{j} + epsilon; + net_m.layers{l}.b{j} = net_m.layers{l}.b{j} - epsilon; + net_m = cnnff(net_m, x); net_m = cnnbp(net_m, y); + net_p = cnnff(net_p, x); net_p = cnnbp(net_p, y); + d = (net_p.L - net_m.L) / (2 * epsilon); + e = abs(d - net.layers{l}.db{j}); + if e > er + error('numerical gradient checking failed'); + end + for i = 1 : numel(net.layers{l - 1}.a) + for u = 1 : size(net.layers{l}.k{i}{j}, 1) + for v = 1 : size(net.layers{l}.k{i}{j}, 2) + net_m = net; net_p = net; + net_p.layers{l}.k{i}{j}(u, v) = net_p.layers{l}.k{i}{j}(u, v) + epsilon; + net_m.layers{l}.k{i}{j}(u, v) = net_m.layers{l}.k{i}{j}(u, v) - epsilon; + net_m = cnnff(net_m, x); net_m = cnnbp(net_m, y); + net_p = cnnff(net_p, x); net_p = cnnbp(net_p, y); + d = (net_p.L - net_m.L) / (2 * epsilon); + e = abs(d - net.layers{l}.dk{i}{j}(u, v)); + if e > er + error('numerical gradient checking failed'); + end + end + end + end + end + elseif strcmp(net.layers{l}.type, 's') +% for j = 1 : numel(net.layers{l}.a) +% net_m = net; net_p = net; +% net_p.layers{l}.b{j} = net_m.layers{l}.b{j} + epsilon; +% net_m.layers{l}.b{j} = net_m.layers{l}.b{j} - epsilon; +% net_m = cnnff(net_m, x); net_m = cnnbp(net_m, y); +% net_p = cnnff(net_p, x); net_p = cnnbp(net_p, y); +% d = (net_p.L - net_m.L) / (2 * epsilon); +% e = abs(d - net.layers{l}.db{j}); +% if e > er +% error('numerical gradient checking failed'); +% end +% end + end + end +% keyboard +end diff --git a/SCNN/scnnsetup.m b/SCNN/scnnsetup.m new file mode 100644 index 0000000..6ec9290 --- /dev/null +++ b/SCNN/scnnsetup.m @@ -0,0 +1,36 @@ +function net = scnnsetup(net, x, y) + %assert(~isOctave() || compare_versions(OCTAVE_VERSION, '3.8.0', '>='), ['Octave 3.8.0 or greater is required for CNNs as there is a bug in convolution in previous versions. See http://savannah.gnu.org/bugs/?39314. Your version is ' myOctaveVersion]); + inputmaps = 1; + mapsize = size(x{1}); + + for l = 1 : numel(net.layers) % layer + if strcmp(net.layers{l}.type, 's') + mapsize = mapsize / net.layers{l}.scale; + assert(all(floor(mapsize)==mapsize), ['Layer ' num2str(l) ' size must be integer. Actual: ' num2str(mapsize)]); + for j = 1 : inputmaps + net.layers{l}.b{j} = 0; + end + end + if strcmp(net.layers{l}.type, 'c') + mapsize = mapsize - net.layers{l}.kernelsize + 1; + fan_out = net.layers{l}.outputmaps * net.layers{l}.kernelsize ^ 2; + for j = 1 : net.layers{l}.outputmaps % output map + fan_in = inputmaps * net.layers{l}.kernelsize ^ 2; + for i = 1 : inputmaps % input map + net.layers{l}.k{i}{j} = (rand(net.layers{l}.kernelsize) - 0.5) * 2 * sqrt(6 / (fan_in + fan_out)); + end + net.layers{l}.b{j} = 0; + end + inputmaps = net.layers{l}.outputmaps; + end + end + % 'onum' is the number of labels, that's why it is calculated using size(y, 1). If you have 20 labels so the output of the network will be 20 neurons. + % 'fvnum' is the number of output neurons at the last layer, the layer just before the output layer. + % 'ffb' is the biases of the output neurons. + % 'ffW' is the weights between the last layer and the output neurons. Note that the last layer is fully connected to the output layer, that's why the size of the weights is (onum * fvnum) + fvnum = prod(mapsize) * inputmaps; + onum = size(y, 1); + + net.ffb = zeros(onum, 1); + net.ffW = (rand(onum, fvnum) - 0.5) * 2 * sqrt(6 / (onum + fvnum)); +end diff --git a/SCNN/scnntest.m b/SCNN/scnntest.m new file mode 100644 index 0000000..1eb1977 --- /dev/null +++ b/SCNN/scnntest.m @@ -0,0 +1,9 @@ +function [er, bad] = scnntest(net, x, y) + % feedforward + net = scnnff(net, x); + [~, h] = max(net.o); + [~, a] = max(y); + bad = find(h ~= a); + + er = numel(bad) / size(y, 2); +end diff --git a/SCNN/scnntrain.m b/SCNN/scnntrain.m new file mode 100644 index 0000000..ebd4128 --- /dev/null +++ b/SCNN/scnntrain.m @@ -0,0 +1,35 @@ +function net = scnntrain(net, x, y, opts) + m = numel(x); + numbatches = m / opts.batchsize; + if rem(numbatches, 1) ~= 0 + error('numbatches not integer'); + end + net.rL = []; + wb = waitbar(0,'Training...(Sparse)'); + sum_count = opts.numepochs * numbatches; + t_index = 1; + for i = 1 : opts.numepochs + disp(['epoch ' num2str(i) '/' num2str(opts.numepochs)]); + tic; + kk = randperm(m); + for l = 1 : numbatches + + waitbar(t_index/sum_count,wb,... + sprintf('Training...Epoch:%d of %d BatchIndex%d of %d',i,opts.numepochs,l,numbatches)); + + batch_x = x(kk((l - 1) * opts.batchsize + 1 : l * opts.batchsize)); + batch_y = y(:, kk((l - 1) * opts.batchsize + 1 : l * opts.batchsize)); + + net = scnnff(net, batch_x); + net = scnnbp(net, batch_y); + net = scnnapplygrads(net, opts); + if isempty(net.rL) + net.rL(1) = net.L; + end + net.rL(end + 1) = 0.99 * net.rL(end) + 0.01 * net.L; + t_index = t_index + 1; + end + toc; + end + +end diff --git a/SCNN/spcell_add.m b/SCNN/spcell_add.m new file mode 100644 index 0000000..4be2e9c --- /dev/null +++ b/SCNN/spcell_add.m @@ -0,0 +1,17 @@ +function x = spcell_add(x, y) +%{ +x,y,z type: array(x,y)>(n) +%} +if iscell(y) && iscell(x) + n = numel(x); + for i = 1:n + x{i} = x{i} + y{i}; + end +elseif isa(y,'double') && iscell(x) + x = spcell_ewop(x,@(u) u+y); +elseif isa(x,'double') && iscell(y) + x = spcell_ewop(y,@(u) u+x); +else + x = x + y; +end +end \ No newline at end of file diff --git a/SCNN/spcell_div.m b/SCNN/spcell_div.m new file mode 100644 index 0000000..24b4bce --- /dev/null +++ b/SCNN/spcell_div.m @@ -0,0 +1,25 @@ +function x = spcell_div(x, y) +%{ + º¯Êý¹¦ÄÜ£º×öspcellµÄ£¨µã£©³ý·¨²Ù×÷ +%} + if iscell(y) && iscell(x) + n = numel(x); + for i = 1:n + x{i} = x{i} ./ y{i}; + end + elseif isa(y,'double') && iscell(x) + % x = spcell_ewop(x,@(u) u./y); + n = numel(x); + for i = 1:n + x{i} = x{i}./y; + end + elseif isa(x,'double') && iscell(y) + n = numel(y); + for i = 1:n + y{i} = x./y{i}; + end + x = y; + else + x = x./y; + end +end \ No newline at end of file diff --git a/SCNN/spcell_ewop.m b/SCNN/spcell_ewop.m new file mode 100644 index 0000000..c09bdd5 --- /dev/null +++ b/SCNN/spcell_ewop.m @@ -0,0 +1,9 @@ +function x = spcell_ewop(x, operation) +%{ + º¯Êý¹¦ÄÜ£ºÊ¹ÓòÙ×÷·û¶Ôÿһ¸öÔªËØ½øÐвÙ×÷ +%} + n = numel(x); + for i = 1:n + x{i} = operation(x{i}); + end +end \ No newline at end of file diff --git a/SCNN/spcell_expand.m b/SCNN/spcell_expand.m new file mode 100644 index 0000000..977996b --- /dev/null +++ b/SCNN/spcell_expand.m @@ -0,0 +1,7 @@ +function x = spcell_expand(x, expand_param) + % x = spcell_ewop(x,@(u) expand(u,expand_param)); + n = numel(x); + for i = 1:n + x{i} = expand(x{i},expand_param); + end +end \ No newline at end of file diff --git a/SCNN/spcell_mul.m b/SCNN/spcell_mul.m new file mode 100644 index 0000000..177e9cb --- /dev/null +++ b/SCNN/spcell_mul.m @@ -0,0 +1,24 @@ +function x = spcell_mul(x, y) +%{ +x,y,z type: array(x,y)>(n) +%} +if iscell(y) && iscell(x) + n = numel(x); + for i = 1:n + x{i} = x{i} .* y{i}; + end +elseif isa(y,'double') && iscell(x) + n = numel(x); + for i = 1:n + x{i} = x{i}.*y; + end +elseif isa(x,'double') && iscell(y) + n = numel(y); + for i = 1:n + y{i} = y{i}.*x; + end + x = y; +else + x = x .* y; +end +end \ No newline at end of file diff --git a/SCNN/spcell_sigm.m b/SCNN/spcell_sigm.m new file mode 100644 index 0000000..0b9de2e --- /dev/null +++ b/SCNN/spcell_sigm.m @@ -0,0 +1,7 @@ +function X = spcell_sigm(X) + % Sigmoid 激活函数 + n = numel(X); + for i = 1:n + X{i} = 1./(1+exp(-X{i})); + end +end \ No newline at end of file diff --git a/SCNN/spcell_sub.m b/SCNN/spcell_sub.m new file mode 100644 index 0000000..ef2f14a --- /dev/null +++ b/SCNN/spcell_sub.m @@ -0,0 +1,24 @@ +function x = spcell_sub(x, y) +%{ +x,y,z type: array(x,y)>(n) +%} +if iscell(y) && iscell(x) + n = numel(x); + for i = 1:n + x{i} = x{i} - y{i}; + end +elseif isa(y,'double') && iscell(x) + n = numel(x); + for i = 1:n + x{i} = x{i} - y; + end +elseif isa(x,'double') && iscell(y) + n = numel(y); + for i = 1:n + y{i} = y{i} - x; + end + x = y; +else + x = x - y; +end +end \ No newline at end of file diff --git a/SCNN/spconv.m b/SCNN/spconv.m new file mode 100644 index 0000000..f8e48c1 --- /dev/null +++ b/SCNN/spconv.m @@ -0,0 +1,19 @@ +function [ sparse_matrix_out ] = spconv( sparse_matrix_1, sparse_matrix_2, mode ) + %sparse_matrix_2ONV Ï¡Êè¾ØÕóµÄ¾í»ý + % Êʺϴó¹æÄ£Ï¡Êè¾ØÕóºÍСÐ;ØÕóµÄ¾í»ý + kx = size(sparse_matrix_2,1); + ky = size(sparse_matrix_2,2); + sizeX = size(sparse_matrix_1,1) + size(sparse_matrix_2,1) - 1; + sizeY = size(sparse_matrix_1,2) + size(sparse_matrix_2,2) - 1; + sparse_matrix_out = sparse(sizeX,sizeY); + [x,y,c] = find(sparse_matrix_2); + [rx,ry,rc] = find(sparse_matrix_1); + N = numel(c); + for i = 1:N + sparse_matrix_out = sparse_matrix_out + sparse(rx + x(i) - 1, ry + y(i) - 1, rc*c(i), sizeX, sizeY); + end + if strcmp(mode,'valid') + sparse_matrix_out = sparse_matrix_out(kx:end-(kx-1),ky:end-(ky-1)); + end +end + diff --git a/SCNN/spconvn.m b/SCNN/spconvn.m new file mode 100644 index 0000000..583a370 --- /dev/null +++ b/SCNN/spconvn.m @@ -0,0 +1,17 @@ +function [ sparse_array_out ] = spconvn( sparse_cellarray, sparse_matrix_kernel, mode ) + if ~iscell(sparse_matrix_kernel) + n = numel(sparse_cellarray); + sparse_array_out = cell(size(sparse_cellarray)); + for i = 1:n + sparse_array_out{i} = spconv(sparse_cellarray{i},sparse_matrix_kernel, mode); + end + else + assert(numel(sparse_cellarray)==numel(sparse_matrix_kernel),'unsupport conv operation.'); + assert(strcmp(mode,'valid'),'Mode is not "valid".') + n = numel(sparse_cellarray); + sparse_array_out = spconv(sparse_cellarray{1},sparse_matrix_kernel{1}, mode); + for i = 2:n + sparse_array_out = sparse_array_out + spconv(sparse_cellarray{i},sparse_matrix_kernel{i}, mode); + end + end +end \ No newline at end of file diff --git a/SCNN/sptflatten.m b/SCNN/sptflatten.m new file mode 100644 index 0000000..9ed2c69 --- /dev/null +++ b/SCNN/sptflatten.m @@ -0,0 +1,9 @@ +function spout = sptflatten(X) +splen = size(X{1}); +splen = splen(1)*splen(2); +n = numel(X); +spout = sparse(splen,n); +for i = 1:n + spout(:,i) = reshape(X{i},splen,1); +end +end \ No newline at end of file diff --git a/SCNN/sptflipall.m b/SCNN/sptflipall.m new file mode 100644 index 0000000..53f6aa7 --- /dev/null +++ b/SCNN/sptflipall.m @@ -0,0 +1,6 @@ +function X = sptflipall(X) +X = flip(X); +for i = 1:length(X) + X{i} = flip(flip(X{i},1),2); +end +end \ No newline at end of file diff --git a/SCNN/sptsize.m b/SCNN/sptsize.m new file mode 100644 index 0000000..2dcfb58 --- /dev/null +++ b/SCNN/sptsize.m @@ -0,0 +1,3 @@ +function z = sptsize(cell_array) +z = [size(cell_array{1}) numel(cell_array)]; +end \ No newline at end of file diff --git a/SCNN/sptsubsample.m b/SCNN/sptsubsample.m new file mode 100644 index 0000000..2ccf034 --- /dev/null +++ b/SCNN/sptsubsample.m @@ -0,0 +1,6 @@ +function X = sptsubsample(X,scale) +n = numel(X); +for i = 1:n + m = X{i}; + X{i} = m(1:scale:end,1:scale:end); +end \ No newline at end of file diff --git a/SCNN/sptsum.m b/SCNN/sptsum.m new file mode 100644 index 0000000..e972edf --- /dev/null +++ b/SCNN/sptsum.m @@ -0,0 +1,8 @@ +function sum_value = sptsum(spcell) +sum_value = 0; +n = numel(spcell); +for i = 1:n + m = spcell{i}; + sum_value = sum_value + sum(m(:)); +end +end \ No newline at end of file diff --git a/SCNN/sptzeros.m b/SCNN/sptzeros.m new file mode 100644 index 0000000..77e66db --- /dev/null +++ b/SCNN/sptzeros.m @@ -0,0 +1,8 @@ +function z = sptzeros(cell_size) +assert(numel(cell_size)==3,'shape of size!=3'); +n = cell_size(3); +z = cell(n,1); +for i = 1:n + z{i} = sparse(cell_size(1),cell_size(2)); +end +end \ No newline at end of file diff --git a/SCNN/tensor2spcell.m b/SCNN/tensor2spcell.m new file mode 100644 index 0000000..c01e876 --- /dev/null +++ b/SCNN/tensor2spcell.m @@ -0,0 +1,7 @@ +function spcell = tensor2spcell(T) +n = size(T,3); +spcell = cell(1,n); +for i = 1:n + spcell{i} = sparse(T(:,:,i)); +end +end \ No newline at end of file diff --git a/tests/test_example_SCNN.m b/tests/test_example_SCNN.m new file mode 100644 index 0000000..e6e33cc --- /dev/null +++ b/tests/test_example_SCNN.m @@ -0,0 +1,37 @@ +% function test_example_SCNN +load mnist_uint8; + +train_x = double(reshape(train_x',28,28,60000))/255; +train_x_sp = tensor2spcell(train_x); +test_x = double(reshape(test_x',28,28,10000))/255; +test_x_sp = tensor2spcell(test_x); +train_y = double(train_y'); +test_y = double(test_y'); + +%% ex1 Train a 6c-2s-12c-2s Convolutional neural network +%will run 1 epoch in about 200 second and get around 11% error. +%With 100 epochs you'll get around 1.2% error + +rand('state',0) + +scnn.layers = { + struct('type', 'i') %input layer + struct('type', 'c', 'outputmaps', 6, 'kernelsize', 5) %convolution layer + struct('type', 's', 'scale', 2) %sub sampling layer + struct('type', 'c', 'outputmaps', 12, 'kernelsize', 5) %convolution layer + struct('type', 's', 'scale', 2) %subsampling layer +}; + + +opts.alpha = 1; +opts.batchsize = 50; +opts.numepochs = 1; + +scnn = scnnsetup(scnn, train_x_sp, train_y); +scnn = scnntrain(scnn, train_x_sp, train_y, opts); + +[er, bad] = scnntest(scnn, test_x_sp, test_y); + +%plot mean squared error +figure; plot(scnn.rL); +assert(er<0.12, 'Too big error');