1
1
# https://adrhill.github.io/ExplainableAI.jl/stable/generated/advanced_lrp/#How-it-works-internally
2
2
abstract type AbstractLRPRule end
3
3
4
+ # Bibliography
5
+ const REF_BACH_LRP = " S. Bach et al., *On Pixel-Wise Explanations for Non-Linear Classifier Decisions by Layer-Wise Relevance Propagation*"
6
+ const REF_LAPUSCHKIN_CLEVER_HANS = " S. Lapuschkin et al., *Unmasking Clever Hans predictors and assessing what machines really learn*"
7
+ const REF_MONTAVON_DTD = " G. Montavon et al., *Explaining Nonlinear Classification Decisions with Deep Taylor Decomposition*"
8
+ const REF_MONTAVON_OVERVIEW = " G. Montavon et al., *Layer-Wise Relevance Propagation: An Overview*"
9
+
4
10
# Generic LRP rule. Since it uses autodiff, it is used as a fallback for layer types
5
11
# without custom implementations.
6
12
function lrp! (Rₖ, rule:: R , layer:: L , aₖ, Rₖ₊₁) where {R<: AbstractLRPRule ,L}
@@ -106,11 +112,10 @@ end
106
112
"""
107
113
ZeroRule()
108
114
109
- LRP-0 rule. Commonly used on upper layers.
115
+ LRP-``0`` rule. Commonly used on upper layers.
110
116
111
117
# References
112
- [1]: S. Bach et al., On Pixel-Wise Explanations for Non-Linear Classifier Decisions by
113
- Layer-Wise Relevance Propagation
118
+ - $REF_BACH_LRP
114
119
"""
115
120
struct ZeroRule <: AbstractLRPRule end
116
121
check_compat (:: ZeroRule , layer) = nothing
@@ -127,8 +132,7 @@ LRP-``ϵ`` rule. Commonly used on middle layers.
127
132
- `ϵ`: Optional stabilization parameter, defaults to `1f-6`.
128
133
129
134
# References
130
- [1]: S. Bach et al., On Pixel-Wise Explanations for Non-Linear Classifier Decisions by
131
- Layer-Wise Relevance Propagation
135
+ - $REF_BACH_LRP
132
136
"""
133
137
struct EpsilonRule{T} <: AbstractLRPRule
134
138
ϵ:: T
@@ -149,7 +153,7 @@ LRP-``γ`` rule. Commonly used on lower layers.
149
153
- `γ`: Optional multiplier for added positive weights, defaults to `0.25`.
150
154
151
155
# References
152
- [1]: G. Montavon et al., Layer-Wise Relevance Propagation: An Overview
156
+ - $REF_MONTAVON_OVERVIEW
153
157
"""
154
158
struct GammaRule{T} <: AbstractLRPRule
155
159
γ:: T
167
171
LRP-``W^2`` rule. Commonly used on the first layer when values are unbounded.
168
172
169
173
# References
170
- [1]: G. Montavon et al., Explaining nonlinear classification decisions with deep Taylor decomposition
174
+ - $REF_MONTAVON_DTD
171
175
"""
172
176
struct WSquareRule <: AbstractLRPRule end
173
177
modify_param! (:: WSquareRule , p) = p .^= 2
@@ -179,7 +183,7 @@ modify_input(::WSquareRule, input) = ones_like(input)
179
183
LRP-Flat rule. Similar to the [`WSquareRule`](@ref), but with all parameters set to one.
180
184
181
185
# References
182
- [1]: S. Lapuschkin et al., Unmasking Clever Hans predictors and assessing what machines really learn
186
+ - $REF_LAPUSCHKIN_CLEVER_HANS
183
187
"""
184
188
struct FlatRule <: AbstractLRPRule end
185
189
modify_param! (:: FlatRule , p) = fill! (p, 1 )
@@ -207,12 +211,12 @@ check_compat(::PassRule, layer) = nothing
207
211
208
212
LRP-``z^{\\ mathcal{B}}``-rule. Commonly used on the first layer for pixel input.
209
213
210
- The parameters `low` and `high` should be set to the lower and upper bounds of the input features,
211
- e.g. `0.0` and `1.0` for raw image data.
214
+ The parameters `low` and `high` should be set to the lower and upper bounds
215
+ of the input features, e.g. `0.0` and `1.0` for raw image data.
212
216
It is also possible to provide two arrays of that match the input size.
213
217
214
218
# References
215
- [1]: G. Montavon et al., Explaining nonlinear classification decisions with deep Taylor decomposition
219
+ - $REF_MONTAVON_OVERVIEW
216
220
"""
217
221
struct ZBoxRule{T} <: AbstractLRPRule
218
222
low:: T
@@ -264,10 +268,8 @@ Commonly used on lower layers.
264
268
- `beta`: Multiplier for the negative output term, defaults to `1.0`.
265
269
266
270
# References
267
- [1]: S. Bach et al., On Pixel-Wise Explanations for Non-Linear Classifier Decisions by
268
- Layer-Wise Relevance Propagation
269
-
270
- [2]: G. Montavon et al., Layer-Wise Relevance Propagation: An Overview
271
+ - $REF_BACH_LRP
272
+ - $REF_MONTAVON_OVERVIEW
271
273
"""
272
274
struct AlphaBetaRule{T} <: AbstractLRPRule
273
275
α:: T
0 commit comments