|
1 |
| -using BenchmarkTools |
2 |
| -using LoopVectorization |
3 |
| -using Tullio |
4 |
| -using Flux |
| 1 | +using PkgJogger |
5 | 2 | using ExplainableAI
|
6 |
| -using ExplainableAI: lrp!, modify_layer |
7 |
| - |
8 |
| -on_CI = haskey(ENV, "GITHUB_ACTIONS") |
9 |
| - |
10 |
| -T = Float32 |
11 |
| -input_size = (32, 32, 3, 1) |
12 |
| -input = rand(T, input_size) |
13 |
| - |
14 |
| -model = Chain( |
15 |
| - Chain( |
16 |
| - Conv((3, 3), 3 => 8, relu; pad=1), |
17 |
| - Conv((3, 3), 8 => 8, relu; pad=1), |
18 |
| - MaxPool((2, 2)), |
19 |
| - Conv((3, 3), 8 => 16, relu; pad=1), |
20 |
| - Conv((3, 3), 16 => 16, relu; pad=1), |
21 |
| - MaxPool((2, 2)), |
22 |
| - ), |
23 |
| - Chain( |
24 |
| - Flux.flatten, |
25 |
| - Dense(1024 => 512, relu), # 102_764_544 parameters |
26 |
| - Dropout(0.5), |
27 |
| - Dense(512 => 100, relu), |
28 |
| - ), |
29 |
| -) |
30 |
| -Flux.testmode!(model, true) |
31 |
| - |
32 |
| -# Use one representative algorithm of each type |
33 |
| -algs = Dict( |
34 |
| - "Gradient" => Gradient, |
35 |
| - "InputTimesGradient" => InputTimesGradient, |
36 |
| - "LRP" => LRP, |
37 |
| - "LREpsilonPlusFlat" => model -> LRP(model, EpsilonPlusFlat()), |
38 |
| - "SmoothGrad" => model -> SmoothGrad(model, 5), |
39 |
| - "IntegratedGradients" => model -> IntegratedGradients(model, 5), |
40 |
| -) |
41 |
| - |
42 |
| -# Define benchmark |
43 |
| -_alg(alg, model) = alg(model) # for use with @benchmarkable macro |
44 |
| - |
45 |
| -SUITE = BenchmarkGroup() |
46 |
| -SUITE["CNN"] = BenchmarkGroup([k for k in keys(algs)]) |
47 |
| -for (name, alg) in algs |
48 |
| - analyzer = alg(model) |
49 |
| - SUITE["CNN"][name] = BenchmarkGroup(["construct analyzer", "analyze"]) |
50 |
| - SUITE["CNN"][name]["construct analyzer"] = @benchmarkable _alg($(alg), $(model)) |
51 |
| - SUITE["CNN"][name]["analyze"] = @benchmarkable analyze($(input), $(analyzer)) |
52 |
| -end |
53 |
| - |
54 |
| -# generate input for conv layers |
55 |
| -insize = (32, 32, 3, 1) |
56 |
| -in_dense = 64 |
57 |
| -out_dense = 10 |
58 |
| -aᵏ = rand(T, insize) |
59 |
| - |
60 |
| -layers = Dict( |
61 |
| - "Conv" => (Conv((3, 3), 3 => 2), aᵏ), |
62 |
| - "Dense" => (Dense(in_dense, out_dense, relu), randn(T, in_dense, 1)), |
63 |
| -) |
64 |
| -rules = Dict( |
65 |
| - "ZeroRule" => ZeroRule(), |
66 |
| - "EpsilonRule" => EpsilonRule(), |
67 |
| - "GammaRule" => GammaRule(), |
68 |
| - "WSquareRule" => WSquareRule(), |
69 |
| - "FlatRule" => FlatRule(), |
70 |
| - "AlphaBetaRule" => AlphaBetaRule(), |
71 |
| - "ZPlusRule" => ZPlusRule(), |
72 |
| - "ZBoxRule" => ZBoxRule(zero(T), oneunit(T)), |
73 |
| -) |
74 |
| - |
75 |
| -layernames = String.(keys(layers)) |
76 |
| -rulenames = String.(keys(rules)) |
77 |
| - |
78 |
| -SUITE["modify layer"] = BenchmarkGroup(rulenames) |
79 |
| -SUITE["apply rule"] = BenchmarkGroup(rulenames) |
80 |
| -for rname in rulenames |
81 |
| - SUITE["modify layer"][rname] = BenchmarkGroup(layernames) |
82 |
| - SUITE["apply rule"][rname] = BenchmarkGroup(layernames) |
83 |
| -end |
84 |
| - |
85 |
| -for (lname, (layer, aᵏ)) in layers |
86 |
| - Rᵏ = similar(aᵏ) |
87 |
| - Rᵏ⁺¹ = layer(aᵏ) |
88 |
| - for (rname, rule) in rules |
89 |
| - modified_layer = modify_layer(rule, layer) |
90 |
| - SUITE["modify layer"][rname][lname] = @benchmarkable modify_layer($(rule), $(layer)) |
91 |
| - SUITE["apply rule"][rname][lname] = @benchmarkable lrp!( |
92 |
| - $(Rᵏ), $(rule), $(layer), $(modified_layer), $(aᵏ), $(Rᵏ⁺¹) |
93 |
| - ) |
94 |
| - end |
95 |
| -end |
| 3 | +# Use PkgJogger.@jog to create the JogExplainableAI module |
| 4 | +@jog ExplainableAI |
| 5 | +SUITE = JogExplainableAI.suite() |
0 commit comments