Skip to content

Commit 65aec82

Browse files
committed
Appending Neural Stack API
- Added base class GenericNeuralStack - Added LazyNeuralStack for layers which are lazily spawned
1 parent fbb74e0 commit 65aec82

File tree

3 files changed

+111
-10
lines changed

3 files changed

+111
-10
lines changed
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
package io.github.mandar2812.dynaml.models.neuralnets
2+
3+
import io.github.mandar2812.dynaml.graph.NeuralGraph
4+
5+
/**
6+
* @author mandar2812 date 17/04/2017.
7+
*
8+
* Base class for Neural Computation Stack
9+
* implementations.
10+
* */
11+
abstract class GenericNeuralStack[
12+
P, I, T <: Traversable[NeuralLayer[P, I, I]]
13+
](elements: T) extends NeuralGraph[T, I, I] {
14+
15+
self =>
16+
17+
override protected val g: T = elements
18+
19+
/**
20+
* Do a forward pass through the network outputting only the output layer activations.
21+
* */
22+
override val forwardPass: (I) => I = (x: I) => g.foldLeft(x)((h, layer) => layer.forward(h))
23+
24+
def _layers = g
25+
26+
/**
27+
* Do a forward pass through the network outputting all the intermediate.
28+
* layer activations.
29+
* */
30+
def forwardPropagate(x: I): Traversable[I] = g.scanLeft(x)((h, layer) => layer.forward(h))
31+
32+
/**
33+
* Batch version of [[forwardPropagate()]]
34+
* */
35+
def forwardPropagateBatch[G <: Traversable[I]](d: G): Traversable[G] = g.scanLeft(d)((h, layer) => layer.forward(h))
36+
37+
/**
38+
* Batch version of [[forwardPass()]]
39+
* */
40+
def forwardPassBatch[G <: Traversable[I]](d: G): G = g.foldLeft(d)((h, layer) => layer.forward(h))
41+
42+
/**
43+
* Slice the stack according to a range.
44+
* */
45+
def apply(r: Range): GenericNeuralStack[P, I, T]
46+
47+
/**
48+
* Append another computation stack to the end of the
49+
* current one.
50+
* */
51+
def ++[G <: Traversable[NeuralLayer[P, I, I]]](otherStack: GenericNeuralStack[P, I, G]): GenericNeuralStack[P, I, T]
52+
53+
/**
54+
* Append a single computation layer to the stack.
55+
* */
56+
def :+(computationLayer: NeuralLayer[P, I, I]): GenericNeuralStack[P, I, T]
57+
58+
}
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
package io.github.mandar2812.dynaml.models.neuralnets
2+
3+
4+
/**
5+
* @author mandar2812 date: 17/04/2017.
6+
*
7+
* A computation stack whose layers are lazily computed
8+
* */
9+
class LazyNeuralStack[P, I](elements: Stream[NeuralLayer[P, I, I]]) extends
10+
GenericNeuralStack[P, I, Stream[NeuralLayer[P, I, I]]](elements) {
11+
12+
self =>
13+
14+
/**
15+
* Slice the stack according to a range.
16+
**/
17+
override def apply(r: Range) = new LazyNeuralStack[P, I](g.slice(r.min, r.max + 1))
18+
19+
/**
20+
* Append another computation stack to the end of the
21+
* current one.
22+
**/
23+
override def ++[G <: Traversable[NeuralLayer[P, I, I]]](
24+
otherStack: GenericNeuralStack[P, I, G]) = new LazyNeuralStack[P, I](self.g ++ otherStack._layers)
25+
26+
/**
27+
* Append a single computation layer to the stack.
28+
**/
29+
override def :+(computationLayer: NeuralLayer[P, I, I]) = new LazyNeuralStack[P, I](self.g :+ computationLayer)
30+
}
31+
32+
object LazyNeuralStack {
33+
34+
def apply[P, I](
35+
layerFunc: (Int) => NeuralLayer[P, I, I],
36+
num_layers: Int) = new LazyNeuralStack[P, I](
37+
(0 until num_layers).toStream.map(i => layerFunc(i))
38+
)
39+
}

dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/models/neuralnets/NeuralStack.scala

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,35 +7,37 @@ import io.github.mandar2812.dynaml.pipes.DataPipe
77
/**
88
* A network, represented as a stack of [[NeuralLayer]] objects.
99
* */
10-
class NeuralStack[P, I](elements: NeuralLayer[P, I, I]*)
11-
extends NeuralGraph[Seq[NeuralLayer[P, I, I]], I, I] {
10+
class NeuralStack[P, I](elements: Seq[NeuralLayer[P, I, I]])
11+
extends GenericNeuralStack[P, I, Seq[NeuralLayer[P, I, I]]](elements) {
12+
13+
self =>
1214

1315
override protected val g: Seq[NeuralLayer[P, I, I]] = elements
1416

1517
val layerParameters: Seq[P] = g.map(_.parameters)
1618

17-
def _layers = g
19+
override def _layers = g
1820

1921
/**
2022
* Do a forward pass through the network outputting all the intermediate.
2123
* layer activations.
2224
* */
23-
def forwardPropagate(x: I): Seq[I] = g.scanLeft(x)((h, layer) => layer.forward(h))
25+
override def forwardPropagate(x: I): Seq[I] = g.scanLeft(x)((h, layer) => layer.forward(h))
2426

2527
/**
2628
* Do a forward pass through the network outputting only the output layer activations.
2729
* */
28-
val forwardPass: (I) => I = (x: I) => g.foldLeft(x)((h, layer) => layer.forward(h))
30+
override val forwardPass: (I) => I = (x: I) => g.foldLeft(x)((h, layer) => layer.forward(h))
2931

3032
/**
3133
* Batch version of [[forwardPropagate()]]
3234
* */
33-
def forwardPropagateBatch[T <: Traversable[I]](d: T): Seq[T] = g.scanLeft(d)((h, layer) => layer.forward(h))
35+
override def forwardPropagateBatch[T <: Traversable[I]](d: T): Seq[T] = g.scanLeft(d)((h, layer) => layer.forward(h))
3436

3537
/**
3638
* Batch version of [[forwardPass()]]
3739
* */
38-
def forwardPassBatch[T <: Traversable[I]](d: T): T = g.foldLeft(d)((h, layer) => layer.forward(h))
40+
override def forwardPassBatch[T <: Traversable[I]](d: T): T = g.foldLeft(d)((h, layer) => layer.forward(h))
3941

4042
/**
4143
* Slice the stack according to a range.
@@ -46,18 +48,20 @@ class NeuralStack[P, I](elements: NeuralLayer[P, I, I]*)
4648
* Append another computation stack to the end of the
4749
* current one.
4850
* */
49-
def ++(otherStack: NeuralStack[P, I]): NeuralStack[P, I] = NeuralStack(this.g ++ otherStack.g :_*)
51+
override def ++[T <: Traversable[NeuralLayer[P, I, I]]](otherStack: GenericNeuralStack[P, I, T]) =
52+
new NeuralStack(self.g ++ otherStack._layers)
5053

5154
/**
5255
* Append a single computation layer to the stack.
5356
* */
54-
def :+(computationLayer: NeuralLayer[P, I, I]): NeuralStack[P, I] = NeuralStack(this.g :+ computationLayer :_*)
57+
override def :+(computationLayer: NeuralLayer[P, I, I]): NeuralStack[P, I] =
58+
NeuralStack(self.g :+ computationLayer :_*)
5559

5660
}
5761

5862
object NeuralStack {
5963

60-
def apply[P, I](elements: NeuralLayer[P, I, I]*): NeuralStack[P, I] = new NeuralStack(elements:_*)
64+
def apply[P, I](elements: NeuralLayer[P, I, I]*): NeuralStack[P, I] = new NeuralStack(elements)
6165
}
6266

6367
/**

0 commit comments

Comments
 (0)