26
26
27
27
Modify input activation before computing relevance propagation.
28
28
"""
29
- @inline modify_input (rule, input) = input # general fallback
29
+ modify_input (rule, input) = input # general fallback
30
30
31
31
"""
32
32
modify_denominator(rule, d)
33
33
34
34
Modify denominator ``z`` for numerical stability on the forward pass.
35
35
"""
36
- @inline modify_denominator (rule, d) = stabilize_denom (d, 1.0f-9 ) # general fallback
36
+ modify_denominator (rule, d) = stabilize_denom (d, 1.0f-9 ) # general fallback
37
37
38
38
"""
39
39
check_compat(rule, layer)
@@ -44,7 +44,7 @@ Check compatibility of a LRP-Rule with layer type.
44
44
When implementing a custom `check_compat` function, return `nothing` if checks passed,
45
45
otherwise throw an `ArgumentError`.
46
46
"""
47
- @inline check_compat (rule, layer) = require_weight_and_bias (rule, layer)
47
+ check_compat (rule, layer) = require_weight_and_bias (rule, layer)
48
48
49
49
"""
50
50
modify_layer!(rule, layer)
69
69
70
70
Inplace-modify parameters before computing the relevance.
71
71
"""
72
- @inline modify_param! (rule, param) = nothing # general fallback
72
+ modify_param! (rule, param) = nothing # general fallback
73
73
74
74
# Useful presets:
75
75
modify_param! (:: Val{:mask_positive} , p) = p .= max .(zero (eltype (p)), p)
76
76
modify_param! (:: Val{:mask_negative} , p) = p .= min .(zero (eltype (p)), p)
77
77
78
78
# Internal wrapper functions for bias-free layers.
79
- @inline modify_bias! (rule:: R , b) where {R} = modify_param! (rule, b)
80
- @inline modify_bias! (rule, b:: Flux.Zeros ) = nothing # skip if bias=Flux.Zeros (Flux <= v0.12)
81
- @inline function modify_bias! (rule, b:: Bool ) # skip if bias=false (Flux >= v0.13)
79
+ modify_bias! (rule:: R , b) where {R} = modify_param! (rule, b)
80
+ modify_bias! (rule, b:: Flux.Zeros ) = nothing # skip if bias=Flux.Zeros (Flux <= v0.12)
81
+ function modify_bias! (rule, b:: Bool ) # skip if bias=false (Flux >= v0.13)
82
82
@assert b == false
83
83
return nothing
84
84
end
@@ -108,7 +108,7 @@ LRP-0 rule. Commonly used on upper layers.
108
108
Layer-Wise Relevance Propagation
109
109
"""
110
110
struct ZeroRule <: AbstractLRPRule end
111
- @inline check_compat (:: ZeroRule , layer) = nothing
111
+ check_compat (:: ZeroRule , layer) = nothing
112
112
113
113
# Optimization to save allocations since weights don't need to be reset:
114
114
get_layer_resetter (:: ZeroRule , layer) = Returns (nothing )
@@ -130,7 +130,7 @@ struct EpsilonRule{T} <: AbstractLRPRule
130
130
EpsilonRule (ϵ= 1.0f-6 ) = new {Float32} (ϵ)
131
131
end
132
132
modify_denominator (r:: EpsilonRule , d) = stabilize_denom (d, r. ϵ)
133
- @inline check_compat (:: EpsilonRule , layer) = nothing
133
+ check_compat (:: EpsilonRule , layer) = nothing
134
134
135
135
# Optimization to save allocations since weights don't need to be reset:
136
136
get_layer_resetter (:: EpsilonRule , layer) = Returns (nothing )
@@ -172,7 +172,7 @@ function lrp!(Rₖ, ::PassRule, layer, aₖ, Rₖ₊₁)
172
172
return nothing
173
173
end
174
174
# No extra checks as reshaping operation will throw an error if layer isn't compatible:
175
- @inline check_compat (:: PassRule , layer) = nothing
175
+ check_compat (:: PassRule , layer) = nothing
176
176
177
177
"""
178
178
ZBoxRule(low, high)
0 commit comments