Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ jobs:
version:
- 'lts'
- '1'
# - 'pre' # TODO: tests on pre-release version
- 'pre'
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
Expand Down
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,4 @@ Reexport = "1"
Statistics = "<0.0.1, 1"
XAIBase = "4"
Zygote = "0.6"
julia = "1.6"
julia = "1.10"
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ and [iNNvestigate][innvestigate-repo] for Keras models.
[^1]: More specifically, models currently have to be differentiable with [Zygote.jl](https://github.com/FluxML/Zygote.jl).

## Installation
This package supports Julia ≥1.6. To install it, open the Julia REPL and run
This package supports Julia ≥1.10. To install it, open the Julia REPL and run
```julia-repl
julia> ]add ExplainableAI
```
Expand Down
3 changes: 1 addition & 2 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,8 @@
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
ExplainableAI = "4f1bc3e1-d60d-4ed0-9367-9bdff9846d3b"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
LoopVectorization = "bdcacae8-1622-11e9-2a5c-532679323890"
PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d"
Tullio = "bc48ee85-29a4-5162-ae0b-a64e1601d4bc"
PkgJogger = "10150987-6cc1-4b76-abee-b1c1cbd91c01"

[compat]
BenchmarkTools = "1"
Expand Down
47 changes: 47 additions & 0 deletions benchmark/bench_jogger.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
using BenchmarkTools
using Flux
using ExplainableAI

on_CI = haskey(ENV, "GITHUB_ACTIONS")

T = Float32
input_size = (32, 32, 3, 1)
input = rand(T, input_size)

model = Chain(
Chain(
Conv((3, 3), 3 => 8, relu; pad=1),
Conv((3, 3), 8 => 8, relu; pad=1),
MaxPool((2, 2)),
Conv((3, 3), 8 => 16, relu; pad=1),
Conv((3, 3), 16 => 16, relu; pad=1),
MaxPool((2, 2)),
),
Chain(
Flux.flatten,
Dense(1024 => 512, relu), # 102_764_544 parameters
Dropout(0.5),
Dense(512 => 100, relu),
),
)
Flux.testmode!(model, true)

# Use one representative algorithm of each type
METHODS = Dict(
"Gradient" => Gradient,
"InputTimesGradient" => InputTimesGradient,
"SmoothGrad" => model -> SmoothGrad(model, 5),
"IntegratedGradients" => model -> IntegratedGradients(model, 5),
)

# Define benchmark
construct(method, model) = method(model) # for use with @benchmarkable macro

suite = BenchmarkGroup()
suite["CNN"] = BenchmarkGroup([k for k in keys(METHODS)])
for (name, method) in METHODS
analyzer = method(model)
suite["CNN"][name] = BenchmarkGroup(["construct analyzer", "analyze"])
suite["CNN"][name]["constructor"] = @benchmarkable construct($(method), $(model))
suite["CNN"][name]["analyze"] = @benchmarkable analyze($(input), $(analyzer))
end
98 changes: 4 additions & 94 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -1,95 +1,5 @@
using BenchmarkTools
using LoopVectorization
using Tullio
using Flux
using PkgJogger
using ExplainableAI
using ExplainableAI: lrp!, modify_layer

on_CI = haskey(ENV, "GITHUB_ACTIONS")

T = Float32
input_size = (32, 32, 3, 1)
input = rand(T, input_size)

model = Chain(
Chain(
Conv((3, 3), 3 => 8, relu; pad=1),
Conv((3, 3), 8 => 8, relu; pad=1),
MaxPool((2, 2)),
Conv((3, 3), 8 => 16, relu; pad=1),
Conv((3, 3), 16 => 16, relu; pad=1),
MaxPool((2, 2)),
),
Chain(
Flux.flatten,
Dense(1024 => 512, relu), # 102_764_544 parameters
Dropout(0.5),
Dense(512 => 100, relu),
),
)
Flux.testmode!(model, true)

# Use one representative algorithm of each type
algs = Dict(
"Gradient" => Gradient,
"InputTimesGradient" => InputTimesGradient,
"LRP" => LRP,
"LREpsilonPlusFlat" => model -> LRP(model, EpsilonPlusFlat()),
"SmoothGrad" => model -> SmoothGrad(model, 5),
"IntegratedGradients" => model -> IntegratedGradients(model, 5),
)

# Define benchmark
_alg(alg, model) = alg(model) # for use with @benchmarkable macro

SUITE = BenchmarkGroup()
SUITE["CNN"] = BenchmarkGroup([k for k in keys(algs)])
for (name, alg) in algs
analyzer = alg(model)
SUITE["CNN"][name] = BenchmarkGroup(["construct analyzer", "analyze"])
SUITE["CNN"][name]["construct analyzer"] = @benchmarkable _alg($(alg), $(model))
SUITE["CNN"][name]["analyze"] = @benchmarkable analyze($(input), $(analyzer))
end

# generate input for conv layers
insize = (32, 32, 3, 1)
in_dense = 64
out_dense = 10
aᵏ = rand(T, insize)

layers = Dict(
"Conv" => (Conv((3, 3), 3 => 2), aᵏ),
"Dense" => (Dense(in_dense, out_dense, relu), randn(T, in_dense, 1)),
)
rules = Dict(
"ZeroRule" => ZeroRule(),
"EpsilonRule" => EpsilonRule(),
"GammaRule" => GammaRule(),
"WSquareRule" => WSquareRule(),
"FlatRule" => FlatRule(),
"AlphaBetaRule" => AlphaBetaRule(),
"ZPlusRule" => ZPlusRule(),
"ZBoxRule" => ZBoxRule(zero(T), oneunit(T)),
)

layernames = String.(keys(layers))
rulenames = String.(keys(rules))

SUITE["modify layer"] = BenchmarkGroup(rulenames)
SUITE["apply rule"] = BenchmarkGroup(rulenames)
for rname in rulenames
SUITE["modify layer"][rname] = BenchmarkGroup(layernames)
SUITE["apply rule"][rname] = BenchmarkGroup(layernames)
end

for (lname, (layer, aᵏ)) in layers
Rᵏ = similar(aᵏ)
Rᵏ⁺¹ = layer(aᵏ)
for (rname, rule) in rules
modified_layer = modify_layer(rule, layer)
SUITE["modify layer"][rname][lname] = @benchmarkable modify_layer($(rule), $(layer))
SUITE["apply rule"][rname][lname] = @benchmarkable lrp!(
$(Rᵏ), $(rule), $(layer), $(modified_layer), $(aᵏ), $(Rᵏ⁺¹)
)
end
end
# Use PkgJogger.@jog to create the JogExplainableAI module
@jog ExplainableAI
SUITE = JogExplainableAI.suite()
1 change: 0 additions & 1 deletion src/ExplainableAI.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ using DifferentiationInterface: value_and_pullback
using Zygote
const DEFAULT_AD_BACKEND = AutoZygote()

include("compat.jl")
include("bibliography.jl")
include("input_augmentation.jl")
include("gradient.jl")
Expand Down
12 changes: 0 additions & 12 deletions src/compat.jl

This file was deleted.

4 changes: 3 additions & 1 deletion test/Project.toml
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
[deps]
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b"
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
JuliaFormatter = "98e50ef6-434e-11e9-1051-2b60c6c9e899"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
PkgJogger = "10150987-6cc1-4b76-abee-b1c1cbd91c01"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
ReferenceTests = "324d217c-45ce-50fc-942e-d289b448e8cf"
Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
XAIBase = "9b48221d-a747-4c1b-9860-46a1d8ba24a7"
Binary file modified test/references/cnn/GradCAM_max.jld2
Binary file not shown.
Binary file modified test/references/cnn/Gradient_max.jld2
Binary file not shown.
Binary file modified test/references/cnn/InputTimesGradient_max.jld2
Binary file not shown.
Binary file modified test/references/cnn/IntegratedGradients_max.jld2
Binary file not shown.
Binary file modified test/references/cnn/SmoothGrad_max.jld2
Binary file not shown.
9 changes: 6 additions & 3 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,13 @@ using Aqua
using JET

@testset "ExplainableAI.jl" begin
if VERSION >= v"1.10"
@info "Testing formalities..."
@testset verbose = true "Linting" begin
@testset "Code formatting" begin
@info "- running JuliaFormatter code formatting tests..."
@test JuliaFormatter.format(ExplainableAI; verbose=false, overwrite=false)
end
@testset "Aqua.jl" begin
@info "- running Aqua.jl tests. These might print warnings from dependencies..."
@info "- running Aqua.jl tests..."
Aqua.test_all(ExplainableAI; ambiguities=false)
end
@testset "JET tests" begin
Expand All @@ -34,4 +33,8 @@ using JET
@info "Testing analyzers on batches..."
include("test_batches.jl")
end
@testset "Benchmark correctness" begin
@info "Testing whether benchmarks are up-to-date..."
include("test_benchmarks.jl")
end
end
9 changes: 5 additions & 4 deletions test/test_batches.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@ using Test

using Flux
using Random
using StableRNGs: StableRNG
using Distributions: Laplace

pseudorand(dims...) = rand(MersenneTwister(123), Float32, dims...)
pseudorand(dims...) = rand(StableRNG(123), Float32, dims...)

## Test `fuse_batchnorm` on Dense and Conv layers
ins = 20
Expand All @@ -15,16 +16,16 @@ batchsize = 15
model = Chain(Dense(ins, 15, relu; init=pseudorand), Dense(15, outs, relu; init=pseudorand))

# Input 1 with batch dimension
input1 = rand(MersenneTwister(1), Float32, ins, 1)
input1 = rand(StableRNG(1), Float32, ins, 1)
# Input 2 with batch dimension
input2 = rand(MersenneTwister(2), Float32, ins, 1)
input2 = rand(StableRNG(2), Float32, ins, 1)
# Batch containing inputs 1 & 2
input_batch = cat(input1, input2; dims=2)

ANALYZERS = Dict(
"Gradient" => Gradient,
"InputTimesGradient" => InputTimesGradient,
"SmoothGrad" => m -> SmoothGrad(m, 5, 0.1, MersenneTwister(123)),
"SmoothGrad" => m -> SmoothGrad(m, 5, 0.1, StableRNG(123)),
"IntegratedGradients" => m -> IntegratedGradients(m, 5),
"GradCAM" => m -> GradCAM(m[1], m[2]),
)
Expand Down
4 changes: 4 additions & 0 deletions test/test_benchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
using PkgJogger
using ExplainableAI

PkgJogger.@test_benchmarks ExplainableAI
7 changes: 4 additions & 3 deletions test/test_cnn.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,22 @@ using ReferenceTests

using Flux
using Random
using StableRNGs: StableRNG
using JLD2

const GRADIENT_ANALYZERS = Dict(
"InputTimesGradient" => InputTimesGradient,
"SmoothGrad" => m -> SmoothGrad(m, 5, 0.1, MersenneTwister(123)),
"SmoothGrad" => m -> SmoothGrad(m, 5, 0.1, StableRNG(123)),
"IntegratedGradients" => m -> IntegratedGradients(m, 5),
"GradCAM" => m -> GradCAM(m[1], m[2]),
)

pseudorand(dims...) = rand(MersenneTwister(123), Float32, dims...)
pseudorand(dims...) = rand(StableRNG(123), Float32, dims...)

input_size = (32, 32, 3, 1)
input = pseudorand(input_size)

init(dims...) = Flux.glorot_uniform(MersenneTwister(123), dims...)
init(dims...) = Flux.glorot_uniform(StableRNG(123), dims...)

model = Chain(
Chain(
Expand Down
Loading