diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4876530..a50bb30 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,7 +22,7 @@ jobs: version: - 'lts' - '1' - # - 'pre' # TODO: tests on pre-release version + - 'pre' steps: - uses: actions/checkout@v4 - uses: julia-actions/setup-julia@v2 diff --git a/Project.toml b/Project.toml index b65b8dc..ef0e644 100644 --- a/Project.toml +++ b/Project.toml @@ -22,4 +22,4 @@ Reexport = "1" Statistics = "<0.0.1, 1" XAIBase = "4" Zygote = "0.6" -julia = "1.6" +julia = "1.10" diff --git a/README.md b/README.md index e383bfd..e08b9cd 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ and [iNNvestigate][innvestigate-repo] for Keras models. [^1]: More specifically, models currently have to be differentiable with [Zygote.jl](https://github.com/FluxML/Zygote.jl). ## Installation -This package supports Julia ≥1.6. To install it, open the Julia REPL and run +This package supports Julia ≥1.10. To install it, open the Julia REPL and run ```julia-repl julia> ]add ExplainableAI ``` diff --git a/src/ExplainableAI.jl b/src/ExplainableAI.jl index f999b15..567971f 100644 --- a/src/ExplainableAI.jl +++ b/src/ExplainableAI.jl @@ -14,7 +14,6 @@ using DifferentiationInterface: value_and_pullback using Zygote const DEFAULT_AD_BACKEND = AutoZygote() -include("compat.jl") include("bibliography.jl") include("input_augmentation.jl") include("gradient.jl") diff --git a/src/compat.jl b/src/compat.jl deleted file mode 100644 index aa98e07..0000000 --- a/src/compat.jl +++ /dev/null @@ -1,12 +0,0 @@ -# https://github.com/JuliaLang/julia/pull/39794 -if VERSION < v"1.7.0-DEV.793" - export Returns - - struct Returns{V} <: Function - value::V - Returns{V}(value) where {V} = new{V}(value) - Returns(value) = new{Core.Typeof(value)}(value) - end - - (obj::Returns)(args...; kw...) = obj.value -end diff --git a/test/Project.toml b/test/Project.toml index 48d2ec4..08f0277 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -8,6 +8,6 @@ JuliaFormatter = "98e50ef6-434e-11e9-1051-2b60c6c9e899" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" ReferenceTests = "324d217c-45ce-50fc-942e-d289b448e8cf" -Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" +StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" XAIBase = "9b48221d-a747-4c1b-9860-46a1d8ba24a7" diff --git a/test/references/cnn/GradCAM_max.jld2 b/test/references/cnn/GradCAM_max.jld2 index 5031171..944423b 100644 Binary files a/test/references/cnn/GradCAM_max.jld2 and b/test/references/cnn/GradCAM_max.jld2 differ diff --git a/test/references/cnn/Gradient_max.jld2 b/test/references/cnn/Gradient_max.jld2 index 8b774c4..8818624 100644 Binary files a/test/references/cnn/Gradient_max.jld2 and b/test/references/cnn/Gradient_max.jld2 differ diff --git a/test/references/cnn/InputTimesGradient_max.jld2 b/test/references/cnn/InputTimesGradient_max.jld2 index dd54f3d..0951dea 100644 Binary files a/test/references/cnn/InputTimesGradient_max.jld2 and b/test/references/cnn/InputTimesGradient_max.jld2 differ diff --git a/test/references/cnn/IntegratedGradients_max.jld2 b/test/references/cnn/IntegratedGradients_max.jld2 index 01b02f2..f78ec5a 100644 Binary files a/test/references/cnn/IntegratedGradients_max.jld2 and b/test/references/cnn/IntegratedGradients_max.jld2 differ diff --git a/test/references/cnn/SmoothGrad_max.jld2 b/test/references/cnn/SmoothGrad_max.jld2 index 89c9f15..f0676f8 100644 Binary files a/test/references/cnn/SmoothGrad_max.jld2 and b/test/references/cnn/SmoothGrad_max.jld2 differ diff --git a/test/runtests.jl b/test/runtests.jl index e73db4d..f6b79ee 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -6,14 +6,13 @@ using Aqua using JET @testset "ExplainableAI.jl" begin - if VERSION >= v"1.10" - @info "Testing formalities..." + @testset verbose = true "Linting" begin @testset "Code formatting" begin @info "- running JuliaFormatter code formatting tests..." @test JuliaFormatter.format(ExplainableAI; verbose=false, overwrite=false) end @testset "Aqua.jl" begin - @info "- running Aqua.jl tests. These might print warnings from dependencies..." + @info "- running Aqua.jl tests..." Aqua.test_all(ExplainableAI; ambiguities=false) end @testset "JET tests" begin diff --git a/test/test_batches.jl b/test/test_batches.jl index 473073d..c7ba69a 100644 --- a/test/test_batches.jl +++ b/test/test_batches.jl @@ -5,7 +5,7 @@ using Flux using Random using Distributions: Laplace -pseudorand(dims...) = rand(MersenneTwister(123), Float32, dims...) +pseudorand(dims...) = rand(StableRNG(123), Float32, dims...) ## Test `fuse_batchnorm` on Dense and Conv layers ins = 20 @@ -15,16 +15,16 @@ batchsize = 15 model = Chain(Dense(ins, 15, relu; init=pseudorand), Dense(15, outs, relu; init=pseudorand)) # Input 1 with batch dimension -input1 = rand(MersenneTwister(1), Float32, ins, 1) +input1 = rand(StableRNG(1), Float32, ins, 1) # Input 2 with batch dimension -input2 = rand(MersenneTwister(2), Float32, ins, 1) +input2 = rand(StableRNG(2), Float32, ins, 1) # Batch containing inputs 1 & 2 input_batch = cat(input1, input2; dims=2) ANALYZERS = Dict( "Gradient" => Gradient, "InputTimesGradient" => InputTimesGradient, - "SmoothGrad" => m -> SmoothGrad(m, 5, 0.1, MersenneTwister(123)), + "SmoothGrad" => m -> SmoothGrad(m, 5, 0.1, StableRNG(123)), "IntegratedGradients" => m -> IntegratedGradients(m, 5), "GradCAM" => m -> GradCAM(m[1], m[2]), ) diff --git a/test/test_cnn.jl b/test/test_cnn.jl index 6e7f6a8..b8ff8f3 100644 --- a/test/test_cnn.jl +++ b/test/test_cnn.jl @@ -4,21 +4,22 @@ using ReferenceTests using Flux using Random +using StableRNGs: StableRNG using JLD2 const GRADIENT_ANALYZERS = Dict( "InputTimesGradient" => InputTimesGradient, - "SmoothGrad" => m -> SmoothGrad(m, 5, 0.1, MersenneTwister(123)), + "SmoothGrad" => m -> SmoothGrad(m, 5, 0.1, StableRNG(123)), "IntegratedGradients" => m -> IntegratedGradients(m, 5), "GradCAM" => m -> GradCAM(m[1], m[2]), ) -pseudorand(dims...) = rand(MersenneTwister(123), Float32, dims...) +pseudorand(dims...) = rand(StableRNG(123), Float32, dims...) input_size = (32, 32, 3, 1) input = pseudorand(input_size) -init(dims...) = Flux.glorot_uniform(MersenneTwister(123), dims...) +init(dims...) = Flux.glorot_uniform(StableRNG(123), dims...) model = Chain( Chain(