Skip to content

Commit d22e829

Browse files
epaxondependabot[bot]monkin77PhilippPlankmgkwill
authored
Graded relu (#860)
* GradedReluVec process and tests. * changed test to use thresh not 0. * removed duplicate docstring line. * Bump tornado from 6.4 to 6.4.1 (#863) Bumps [tornado](https://github.com/tornadoweb/tornado) from 6.4 to 6.4.1. - [Changelog](https://github.com/tornadoweb/tornado/blob/master/docs/releases.rst) - [Commits](tornadoweb/tornado@v6.4.0...v6.4.1) --- updated-dependencies: - dependency-name: tornado dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Fix: subthreshold dynamics equation of refractory lif (#842) * Fix: subthreshold dynamics equation of refractory lif * Fix: RefractoryLIF unit test to test the voltage dynamics * Bump urllib3 from 2.2.1 to 2.2.2 (#865) Bumps [urllib3](https://github.com/urllib3/urllib3) from 2.2.1 to 2.2.2. - [Release notes](https://github.com/urllib3/urllib3/releases) - [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst) - [Commits](urllib3/urllib3@2.2.1...2.2.2) --- updated-dependencies: - dependency-name: urllib3 dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: PhilippPlank <32519998+PhilippPlank@users.noreply.github.com> --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: João Gil <monkinsmurf@gmail.com> Co-authored-by: PhilippPlank <32519998+PhilippPlank@users.noreply.github.com> Co-authored-by: Marcus G K Williams <168222+mgkwill@users.noreply.github.com>
1 parent a82abc1 commit d22e829

File tree

3 files changed

+166
-3
lines changed

3 files changed

+166
-3
lines changed

src/lava/proc/graded/models.py

Lines changed: 39 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,8 @@
1111
from lava.magma.core.decorator import implements, requires, tag
1212
from lava.magma.core.model.py.model import PyLoihiProcessModel
1313

14-
from lava.proc.graded.process import GradedVec, NormVecDelay, InvSqrt
14+
from lava.proc.graded.process import (GradedVec, GradedReluVec,
15+
NormVecDelay, InvSqrt)
1516

1617

1718
class AbstractGradedVecModel(PyLoihiProcessModel):
@@ -51,6 +52,43 @@ class PyGradedVecModelFixed(AbstractGradedVecModel):
5152
exp: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
5253

5354

55+
class AbstractGradedReluVecModel(PyLoihiProcessModel):
56+
"""Implementation of GradedReluVec"""
57+
58+
a_in = None
59+
s_out = None
60+
v = None
61+
vth = None
62+
exp = None
63+
64+
def run_spk(self) -> None:
65+
"""The run function that performs the actual computation during
66+
execution orchestrated by a PyLoihiProcessModel using the
67+
LoihiProtocol.
68+
"""
69+
a_in_data = self.a_in.recv()
70+
self.v += a_in_data
71+
72+
is_spike = self.v > self.vth
73+
sp_out = self.v * is_spike
74+
75+
self.v[:] = 0
76+
77+
self.s_out.send(sp_out)
78+
79+
80+
@implements(proc=GradedReluVec, protocol=LoihiProtocol)
81+
@requires(CPU)
82+
@tag('fixed_pt')
83+
class PyGradedReluVecModelFixed(AbstractGradedReluVecModel):
84+
"""Fixed point implementation of GradedVec"""
85+
a_in = LavaPyType(PyInPort.VEC_DENSE, np.int32, precision=24)
86+
s_out = LavaPyType(PyOutPort.VEC_DENSE, np.int32, precision=24)
87+
vth: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
88+
v: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
89+
exp: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
90+
91+
5492
@implements(proc=NormVecDelay, protocol=LoihiProtocol)
5593
@requires(CPU)
5694
@tag('fixed_pt')

src/lava/proc/graded/process.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,45 @@ class GradedVec(AbstractProcess):
3434
Graded spike vector layer. Transmits accumulated input as
3535
graded spike with no dynamics.
3636
37+
v[t] = a_in
38+
s_out = v[t] * (|v[t]| > vth)
39+
40+
Parameters
41+
----------
42+
shape: tuple(int)
43+
number and topology of neurons
44+
vth: int
45+
threshold for spiking
46+
exp: int
47+
fixed point base
48+
"""
49+
50+
def __init__(
51+
self,
52+
shape: ty.Tuple[int, ...],
53+
vth: ty.Optional[int] = 1,
54+
exp: ty.Optional[int] = 0) -> None:
55+
56+
super().__init__(shape=shape)
57+
58+
self.a_in = InPort(shape=shape)
59+
self.s_out = OutPort(shape=shape)
60+
61+
self.v = Var(shape=shape, init=0)
62+
self.vth = Var(shape=(1,), init=vth)
63+
self.exp = Var(shape=(1,), init=exp)
64+
65+
@property
66+
def shape(self) -> ty.Tuple[int, ...]:
67+
"""Return shape of the Process."""
68+
return self.proc_params['shape']
69+
70+
71+
class GradedReluVec(AbstractProcess):
72+
"""GradedReluVec
73+
Graded spike vector layer. Transmits accumulated input as
74+
graded spike with no dynamics.
75+
3776
v[t] = a_in
3877
s_out = v[t] * (v[t] > vth)
3978

tests/lava/proc/graded/test_graded.py

Lines changed: 88 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,8 @@
66
import numpy as np
77
from scipy.sparse import csr_matrix
88

9-
from lava.proc.graded.process import GradedVec, NormVecDelay, InvSqrt
9+
from lava.proc.graded.process import (GradedVec, GradedReluVec,
10+
NormVecDelay, InvSqrt)
1011
from lava.proc.graded.models import inv_sqrt
1112
from lava.proc.dense.process import Dense
1213
from lava.proc.sparse.process import Sparse
@@ -59,7 +60,7 @@ def test_gradedvec_dot_dense(self):
5960
self.assertTrue(np.all(out_data[:, (3, 7)] == expected_out[:, (2, 6)]))
6061

6162
def test_gradedvec_dot_sparse(self):
62-
"""Tests that GradedVec and Dense computes dot product."""
63+
"""Tests that GradedVec and Sparse computes dot product"""
6364
num_steps = 10
6465
v_thresh = 1
6566

@@ -99,6 +100,91 @@ def test_gradedvec_dot_sparse(self):
99100
self.assertTrue(np.all(out_data[:, (3, 7)] == expected_out[:, (2, 6)]))
100101

101102

103+
class TestGradedReluVecProc(unittest.TestCase):
104+
"""Tests for GradedReluVec"""
105+
106+
def test_gradedreluvec_dot_dense(self):
107+
"""Tests that GradedReluVec and Dense computes dot product"""
108+
num_steps = 10
109+
v_thresh = 1
110+
111+
weights1 = np.zeros((10, 1))
112+
weights1[:, 0] = (np.arange(10) - 5) * 0.2
113+
114+
inp_data = np.zeros((weights1.shape[1], num_steps))
115+
inp_data[:, 2] = 1000
116+
inp_data[:, 6] = 20000
117+
118+
weight_exp = 7
119+
weights1 *= 2**weight_exp
120+
weights1 = weights1.astype('int')
121+
122+
dense1 = Dense(weights=weights1, num_message_bits=24,
123+
weight_exp=-weight_exp)
124+
vec1 = GradedReluVec(shape=(weights1.shape[0],),
125+
vth=v_thresh)
126+
127+
generator = io.source.RingBuffer(data=inp_data)
128+
logger = io.sink.RingBuffer(shape=(weights1.shape[0],),
129+
buffer=num_steps)
130+
131+
generator.s_out.connect(dense1.s_in)
132+
dense1.a_out.connect(vec1.a_in)
133+
vec1.s_out.connect(logger.a_in)
134+
135+
vec1.run(condition=RunSteps(num_steps=num_steps),
136+
run_cfg=Loihi2SimCfg(select_tag='fixed_pt'))
137+
out_data = logger.data.get().astype('int')
138+
vec1.stop()
139+
140+
ww = np.floor(weights1 / 2) * 2
141+
expected_out = np.floor((ww @ inp_data) / 2**weight_exp)
142+
expected_out *= expected_out > v_thresh
143+
144+
self.assertTrue(np.all(out_data[:, (3, 7)] == expected_out[:, (2, 6)]))
145+
146+
def test_gradedreluvec_dot_sparse(self):
147+
"""Tests that GradedReluVec and Sparse computes dot product"""
148+
num_steps = 10
149+
v_thresh = 1
150+
151+
weights1 = np.zeros((10, 1))
152+
weights1[:, 0] = (np.arange(10) - 5) * 0.2
153+
154+
inp_data = np.zeros((weights1.shape[1], num_steps))
155+
inp_data[:, 2] = 1000
156+
inp_data[:, 6] = 20000
157+
158+
weight_exp = 7
159+
weights1 *= 2**weight_exp
160+
weights1 = weights1.astype('int')
161+
162+
sparse1 = Sparse(weights=csr_matrix(weights1),
163+
num_message_bits=24,
164+
weight_exp=-weight_exp)
165+
vec1 = GradedReluVec(shape=(weights1.shape[0],),
166+
vth=v_thresh)
167+
168+
generator = io.source.RingBuffer(data=inp_data)
169+
logger = io.sink.RingBuffer(shape=(weights1.shape[0],),
170+
buffer=num_steps)
171+
172+
generator.s_out.connect(sparse1.s_in)
173+
sparse1.a_out.connect(vec1.a_in)
174+
vec1.s_out.connect(logger.a_in)
175+
176+
vec1.run(condition=RunSteps(num_steps=num_steps),
177+
run_cfg=Loihi2SimCfg(select_tag='fixed_pt'))
178+
out_data = logger.data.get().astype('int')
179+
vec1.stop()
180+
181+
ww = np.floor(weights1 / 2) * 2
182+
expected_out = np.floor((ww @ inp_data) / 2**weight_exp)
183+
expected_out *= expected_out > v_thresh
184+
185+
self.assertTrue(np.all(out_data[:, (3, 7)] == expected_out[:, (2, 6)]))
186+
187+
102188
class TestInvSqrtProc(unittest.TestCase):
103189
"""Tests for inverse square process."""
104190

0 commit comments

Comments
 (0)