@@ -156,6 +156,29 @@ function modify_param!(r::GammaRule, param::AbstractArray{T}) where {T}
156
156
return nothing
157
157
end
158
158
159
+ """
160
+ WSquareRule()
161
+
162
+ LRP-``W^2`` rule. Commonly used on the first layer when values are unbounded.
163
+
164
+ # References
165
+ [1]: G. Montavon et al., Explaining nonlinear classification decisions with deep Taylor decomposition
166
+ """
167
+ struct WSquareRule <: AbstractLRPRule end
168
+ modify_param! (:: WSquareRule , p) = p .^= 2
169
+ modify_input (:: WSquareRule , input) = ones_like (input)
170
+
171
+ """
172
+ FlatRule()
173
+
174
+ LRP-Flat rule. Similar to the [`WSquareRule`](@ref), but with all parameters set to one.
175
+
176
+ # References
177
+ [1]: S. Lapuschkin et al., Unmasking Clever Hans predictors and assessing what machines really learn
178
+ """
179
+ struct FlatRule <: AbstractLRPRule end
180
+ modify_param! (:: FlatRule , p) = fill! (p, 0 )
181
+ modify_input (:: FlatRule , input) = ones_like (input)
159
182
160
183
"""
161
184
PassRule()
@@ -238,7 +261,7 @@ for R in (ZeroRule, EpsilonRule)
238
261
end
239
262
240
263
# Fast implementation for Dense layer using Tullio.jl's einsum notation:
241
- for R in (ZeroRule, EpsilonRule, GammaRule)
264
+ for R in (ZeroRule, EpsilonRule, GammaRule, WSquareRule, FlatRule )
242
265
@eval function lrp! (Rₖ, rule:: $R , layer:: Dense , aₖ, Rₖ₊₁)
243
266
reset! = get_layer_resetter (rule, layer)
244
267
modify_layer! (rule, layer)
0 commit comments