@@ -72,8 +72,8 @@ Inplace-modify parameters before computing the relevance.
72
72
@inline modify_param! (rule, param) = nothing # general fallback
73
73
74
74
# Useful presets:
75
- modify_param! (:: Val{:mask_positive} , p) = ( p .= max .(zero (eltype (p)), p), return nothing )
76
- modify_param! (:: Val{:mask_negative} , p) = ( p .= min .(zero (eltype (p)), p), return nothing )
75
+ modify_param! (:: Val{:mask_positive} , p) = p .= max .(zero (eltype (p)), p)
76
+ modify_param! (:: Val{:mask_negative} , p) = p .= min .(zero (eltype (p)), p)
77
77
78
78
# Internal wrapper functions for bias-free layers.
79
79
@inline modify_bias! (rule:: R , b) where {R} = modify_param! (rule, b)
@@ -101,18 +101,29 @@ end
101
101
"""
102
102
ZeroRule()
103
103
104
- Constructor for LRP-0 rule. Commonly used on upper layers.
104
+ LRP-0 rule. Commonly used on upper layers.
105
+
106
+ # References
107
+ [1]: S. Bach et al., On Pixel-Wise Explanations for Non-Linear Classifier Decisions by
108
+ Layer-Wise Relevance Propagation
105
109
"""
106
110
struct ZeroRule <: AbstractLRPRule end
107
111
@inline check_compat (:: ZeroRule , layer) = nothing
108
112
113
+ # Optimization to save allocations since weights don't need to be reset:
114
+ get_layer_resetter (:: ZeroRule , layer) = Returns (nothing )
115
+
109
116
"""
110
117
EpsilonRule([ϵ=1.0f-6])
111
118
112
- Constructor for LRP-``ϵ`` rule. Commonly used on middle layers.
119
+ LRP-``ϵ`` rule. Commonly used on middle layers.
113
120
114
121
Arguments:
115
122
- `ϵ`: Optional stabilization parameter, defaults to `1f-6`.
123
+
124
+ # References
125
+ [1]: S. Bach et al., On Pixel-Wise Explanations for Non-Linear Classifier Decisions by
126
+ Layer-Wise Relevance Propagation
116
127
"""
117
128
struct EpsilonRule{T} <: AbstractLRPRule
118
129
ϵ:: T
@@ -121,13 +132,19 @@ end
121
132
modify_denominator (r:: EpsilonRule , d) = stabilize_denom (d, r. ϵ)
122
133
@inline check_compat (:: EpsilonRule , layer) = nothing
123
134
135
+ # Optimization to save allocations since weights don't need to be reset:
136
+ get_layer_resetter (:: EpsilonRule , layer) = Returns (nothing )
137
+
124
138
"""
125
139
GammaRule([γ=0.25])
126
140
127
- Constructor for LRP-``γ`` rule. Commonly used on lower layers.
141
+ LRP-``γ`` rule. Commonly used on lower layers.
128
142
129
143
Arguments:
130
- - `γ`: Optional multiplier for added positive weights, defaults to 0.25.
144
+ - `γ`: Optional multiplier for added positive weights, defaults to `0.25`.
145
+
146
+ # References
147
+ [1]: G. Montavon et al., Layer-Wise Relevance Propagation: An Overview
131
148
"""
132
149
struct GammaRule{T} <: AbstractLRPRule
133
150
γ:: T
@@ -140,15 +157,34 @@ function modify_param!(r::GammaRule, param::AbstractArray{T}) where {T}
140
157
end
141
158
@inline check_compat (rule:: GammaRule , layer) = require_weight_and_bias (rule, layer)
142
159
160
+ """
161
+ PassRule()
162
+
163
+ Pass-through rule. Passes relevance through to the lower layer.
164
+ Supports reshaping layers.
165
+ """
166
+ struct PassRule <: AbstractLRPRule end
167
+ function lrp! (Rₖ, :: PassRule , layer, aₖ, Rₖ₊₁)
168
+ if size (aₖ) == size (Rₖ₊₁)
169
+ Rₖ .= Rₖ₊₁
170
+ end
171
+ Rₖ .= reshape (Rₖ₊₁, size (aₖ))
172
+ return nothing
173
+ end
174
+ # No extra checks as reshaping operation will throw an error if layer isn't compatible:
175
+ @inline check_compat (:: PassRule , layer) = nothing
176
+
143
177
"""
144
178
ZBoxRule(low, high)
145
179
146
- Constructor for LRP-``z^{\\ mathcal{B}}``-rule.
147
- Commonly used on the first layer for pixel input.
180
+ LRP-``z^{\\ mathcal{B}}``-rule. Commonly used on the first layer for pixel input.
148
181
149
182
The parameters `low` and `high` should be set to the lower and upper bounds of the input features,
150
183
e.g. `0.0` and `1.0` for raw image data.
151
184
It is also possible to provide two arrays of that match the input size.
185
+
186
+ ## References
187
+ [1]: G. Montavon et al., Explaining nonlinear classification decisions with deep Taylor decomposition
152
188
"""
153
189
struct ZBoxRule{T} <: AbstractLRPRule
154
190
low:: T
@@ -194,6 +230,13 @@ for R in (ZeroRule, EpsilonRule)
194
230
@eval lrp! (Rₖ, :: $R , :: ReshapingLayer , aₖ, Rₖ₊₁) = (Rₖ .= reshape (Rₖ₊₁, size (aₖ)))
195
231
end
196
232
233
+ # Special cases for rules that don't modify params for extra performance:
234
+ for R in (ZeroRule, EpsilonRule)
235
+ for L in (DropoutLayer, ReshapingLayer)
236
+ @eval lrp! (Rₖ, :: $R , l:: $L , aₖ, Rₖ₊₁) = lrp! (Rₖ, PassRule (), l, aₖ, Rₖ₊₁)
237
+ end
238
+ end
239
+
197
240
# Fast implementation for Dense layer using Tullio.jl's einsum notation:
198
241
for R in (ZeroRule, EpsilonRule, GammaRule)
199
242
@eval function lrp! (Rₖ, rule:: $R , layer:: Dense , aₖ, Rₖ₊₁)
0 commit comments