Skip to content

Commit 6b7fcd7

Browse files
authored
Merge pull request #252 from NeuroBench/feature/act-sparsity-layer
Layer Wise Activation Sparsity
2 parents 6cfbdbc + 10057d5 commit 6b7fcd7

File tree

7 files changed

+102
-7
lines changed

7 files changed

+102
-7
lines changed

examples/nehar/benchmark.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@
99
ActivationSparsity,
1010
MembraneUpdates,
1111
SynapticOperations,
12-
ClassificationAccuracy
12+
ClassificationAccuracy,
13+
ActivationSparsityByLayer,
1314
)
1415
from neurobench.metrics.static import (
1516
ParameterCount,
@@ -47,7 +48,7 @@
4748

4849
# #
4950
static_metrics = [ParameterCount, Footprint, ConnectionSparsity]
50-
workload_metrics = [ActivationSparsity, MembraneUpdates, SynapticOperations, ClassificationAccuracy]
51+
workload_metrics = [ActivationSparsity, ActivationSparsityByLayer,MembraneUpdates, SynapticOperations, ClassificationAccuracy]
5152
# #
5253
benchmark = Benchmark(
5354
model, test_set_loader, [], postprocessors, [static_metrics, workload_metrics]

neurobench/hooks/neuron.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ class NeuronHook(ABC):
1010
1111
"""
1212

13-
def __init__(self, layer):
13+
def __init__(self, layer, name=None):
1414
"""
1515
Initializes the class.
1616
@@ -24,6 +24,7 @@ def __init__(self, layer):
2424
self.activation_inputs = []
2525
self.pre_fire_mem_potential = []
2626
self.post_fire_mem_potential = []
27+
self.name = name
2728
if layer is not None:
2829
self.hook = layer.register_forward_hook(self.hook_fn)
2930
self.hook_pre = layer.register_forward_pre_hook(self.pre_hook_fn)

neurobench/metrics/workload/__init__.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,19 @@
22
from .activation_sparsity import ActivationSparsity
33
from .synaptic_operations import SynapticOperations
44
from .classification_accuracy import ClassificationAccuracy
5+
from .activation_sparsity_by_layer import ActivationSparsityByLayer
56
from .mse import MSE
67
from .smape import SMAPE
78
from .r2 import R2
89
from .coco_map import CocoMap
910

10-
__stateless__ = ["ClassificationAccuracy", "ActivationSparsity", "MSE", "SMAPE"]
11+
__stateless__ = [
12+
"ClassificationAccuracy",
13+
"ActivationSparsity",
14+
"MSE",
15+
"SMAPE",
16+
"ActivationSparsityByLayer",
17+
]
1118

1219
__stateful__ = ["MembraneUpdates", "SynapticOperations", "R2", "CocoMap"]
1320

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
from neurobench.metrics.abstract.workload_metric import AccumulatedMetric
2+
from collections import defaultdict
3+
import torch
4+
5+
6+
class ActivationSparsityByLayer(AccumulatedMetric):
7+
"""
8+
Sparsity layer-wise of model activations.
9+
10+
Calculated as the number of zero activations over the number of activations layer by
11+
layer, over all timesteps, samples in data.
12+
13+
"""
14+
15+
def __init__(self):
16+
"""Initialize the ActivationSparsityByLayer metric."""
17+
self.layer_sparsity = defaultdict(float)
18+
self.layer_neuro_num = defaultdict(int)
19+
self.layer_spike_num = defaultdict(int)
20+
21+
def reset(self):
22+
"""Reset the metric."""
23+
self.layer_sparsity = defaultdict(float)
24+
self.layer_neuro_num = defaultdict(int)
25+
self.layer_spike_num = defaultdict(int)
26+
27+
def __call__(self, model, preds, data):
28+
"""
29+
Compute activation sparsity layer by layer.
30+
31+
Args:
32+
model: A NeuroBenchModel.
33+
preds: A tensor of model predictions.
34+
data: A tuple of data and labels.
35+
Returns:
36+
float: Activation sparsity
37+
38+
"""
39+
40+
for hook in model.activation_hooks:
41+
name = hook.name
42+
if name is None:
43+
continue
44+
for output in hook.activation_outputs:
45+
spike_num, neuro_num = torch.count_nonzero(
46+
output.dequantize() if output.is_quantized else output
47+
).item(), torch.numel(output)
48+
49+
self.layer_spike_num[name] += spike_num
50+
self.layer_neuro_num[name] += neuro_num
51+
52+
return self.compute()
53+
54+
def compute(self):
55+
"""Compute the activation sparsity layer by layer."""
56+
for key in self.layer_neuro_num:
57+
sparsity = (
58+
(self.layer_neuro_num[key] - self.layer_spike_num[key])
59+
/ self.layer_neuro_num[key]
60+
if self.layer_neuro_num[key] != 0
61+
else 0.0
62+
)
63+
self.layer_sparsity[key] = sparsity
64+
return dict(self.layer_sparsity)

neurobench/models/neurobench_model.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -79,9 +79,9 @@ def is_activation_layer(module):
7979
def find_activation_layers(module):
8080
"""Recursively find activation layers in a module."""
8181
layers = []
82-
for child in module.children():
82+
for child_name, child in module.named_children():
8383
if is_activation_layer(child):
84-
layers.append(child)
84+
layers.append({"layer_name": child_name, "layer": child})
8585
elif list(child.children()): # Check for nested submodules
8686
layers.extend(find_activation_layers(child))
8787
return layers
@@ -135,7 +135,9 @@ def register_hooks(self):
135135

136136
# Registered activation hooks
137137
for layer in self.activation_layers():
138-
self.activation_hooks.append(NeuronHook(layer))
138+
layer_name = layer["layer_name"]
139+
layer = layer["layer"]
140+
self.activation_hooks.append(NeuronHook(layer, layer_name))
139141

140142
for layer in self.connection_layers():
141143
self.connection_hooks.append(LayerHook(layer))

tests/models/model_list.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,13 @@
11
import torch.nn as nn
22
import snntorch as snn
3+
from torch import manual_seed
34
import snntorch.surrogate as surrogate
45

56
beta = 0.9
7+
manual_seed(0)
68
spike_grad = surrogate.fast_sigmoid()
79

10+
811
net = nn.Sequential(
912
nn.Flatten(),
1013
nn.Linear(20, 256),

tests/test_metrics.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
ActivationSparsity,
99
SynapticOperations,
1010
MembraneUpdates,
11+
ActivationSparsityByLayer,
1112
)
1213
from neurobench.metrics.static import (
1314
Footprint,
@@ -104,6 +105,7 @@ def setUp(self):
104105
self.activation_sparsity = ActivationSparsity()
105106
self.synaptic_operations = SynapticOperations()
106107
self.mem_updates = MembraneUpdates()
108+
self.activation_sparsity_by_layer = ActivationSparsityByLayer()
107109

108110
def test_classification_accuracy(self):
109111
model = SNNTorchModel(self.dummy_net)
@@ -426,9 +428,24 @@ def test_membrane_potential_updates(self):
426428

427429
out = model(inp)
428430
tot_mem_updates = self.mem_updates(model, out, (inp, 0))
431+
self.mem_updates.reset()
429432

430433
self.assertEqual(tot_mem_updates, 50)
431434

435+
def test_activation_sparsity_by_layer(self):
436+
437+
inp = torch.ones(5, 10, 20) # batch size, time steps, input size
438+
439+
model = SNNTorchModel(self.net_snn)
440+
441+
model.register_hooks()
442+
443+
out = model(inp)
444+
act_sparsity_by_layer = self.activation_sparsity_by_layer(model, out, (inp, 0))
445+
self.activation_sparsity_by_layer.reset()
446+
447+
self.assertEqual(act_sparsity_by_layer["1"], 0.96)
448+
432449

433450
# TODO: refactor this metric if needed
434451
# def test_neuron_update_metric():

0 commit comments

Comments
 (0)