@@ -213,9 +213,9 @@ and all bias terms set to zero.
213
213
# Definition
214
214
Propagates relevance ``R^{k+1}`` at layer output to ``R^k`` at layer input according to
215
215
```math
216
- R_j^k = \\ sum_i\\ frac{1}{\\ sum_l 1} R_i^{k+1} = \\ frac{1}{n} \\ sum_i R_i^{k+1}
216
+ R_j^k = \\ sum_i\\ frac{1}{\\ sum_l 1} R_i^{k+1} = \\ sum_i \\ frac{1}{n_i} R_i^{k+1}
217
217
```
218
- where ``n `` is the number of input neurons connected to the output neuron at index ``i``.
218
+ where ``n_i `` is the number of input neurons connected to the output neuron at index ``i``.
219
219
220
220
# References
221
221
- $REF_LAPUSCHKIN_CLEVER_HANS
@@ -434,7 +434,7 @@ for R in (ZeroRule, EpsilonRule)
434
434
end
435
435
436
436
# Fast implementation for Dense layer using Tullio.jl's einsum notation:
437
- for R in (ZeroRule, EpsilonRule, GammaRule, WSquareRule, FlatRule )
437
+ for R in (ZeroRule, EpsilonRule, GammaRule, WSquareRule)
438
438
@eval function lrp! (Rₖ, rule:: $R , layer:: Dense , aₖ, Rₖ₊₁)
439
439
reset! = get_layer_resetter (rule, layer)
440
440
modify_layer! (rule, layer)
@@ -445,3 +445,10 @@ for R in (ZeroRule, EpsilonRule, GammaRule, WSquareRule, FlatRule)
445
445
return nothing
446
446
end
447
447
end
448
+ function lrp! (Rₖ, :: FlatRule , layer:: Dense , aₖ, Rₖ₊₁)
449
+ n = size (Rₖ, 1 ) # number of input neurons connected to each output neuron
450
+ for i in axes (Rₖ, 2 ) # samples in batch
451
+ fill! (view (Rₖ, :, i), sum (view (Rₖ₊₁, :, i)) / n)
452
+ end
453
+ return nothing
454
+ end
0 commit comments