Skip to content

Commit 6ed3643

Browse files
authored
Merge pull request #21 from JuliaAI/dev
For a 0.1.2 release
2 parents 37381d6 + d2996c5 commit 6ed3643

File tree

3 files changed

+22
-15
lines changed

3 files changed

+22
-15
lines changed

Project.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "MLJParticleSwarmOptimization"
22
uuid = "17a086e9-ed03-4f30-ab88-8b63f0f6126c"
33
authors = ["Long Nguyen <[email protected]> and contributors"]
4-
version = "0.1.1"
4+
version = "0.1.2"
55

66
[deps]
77
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
@@ -12,6 +12,6 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
1212

1313
[compat]
1414
Distributions = "0.25"
15-
MLJBase = "0.18"
16-
MLJTuning = "0.6.11"
15+
MLJBase = "0.18, 0.19, 0.20"
16+
MLJTuning = "0.6, 0.7"
1717
julia = "1.4"

test/parameters.jl

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -92,11 +92,10 @@
9292
ranges = [r1, (r2, Uniform), (r3, d3), r4]
9393
state = PSO.initialize(rng, ranges, n)
9494
PSO.retrieve!(rng, state)
95-
@test state.parameters == (
96-
["a", "a", "c"],
97-
[553, 250, 375],
98-
[3.9372495283243105, 3.6569395920512977, 3.6354556967115146],
99-
[-0.8067647f0, 0.4209916f0, 0.6736019f0]
100-
)
95+
params = state.parameters
96+
@test params[1] == ["a", "a", "c"]
97+
@test params[2] [553, 250, 375]
98+
@test params[3] [3.9372495283243105, 3.6569395920512977, 3.6354556967115146]
99+
@test params[4] [-0.8067647f0, 0.4209916f0, 0.6736019f0]
101100
end
102-
end
101+
end

test/strategies/adaptive.jl

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,10 @@ end
7575
@test c2 1.8 # increase social
7676
end
7777

78-
for acceleration in (CPU1(), CPUProcesses(), CPUThreads())
78+
const losses = []
79+
const modes = [CPU1(), CPUProcesses(), CPUThreads()]
80+
81+
for acceleration in modes
7982
@testset "EvoTree Tuning with AdaptiveParticleSwarm and $(typeof(acceleration))" begin
8083
rng = StableRNG(123)
8184
features = rand(rng, 10_000) .* 5 .- 2
@@ -115,9 +118,14 @@ for acceleration in (CPU1(), CPUProcesses(), CPUThreads())
115118
fit!(mach, verbosity=0)
116119
rep = report(mach)
117120
best_loss = rep.best_history_entry.measurement[1]
121+
push!(losses, best_loss)
122+
123+
# There is no reason to expect PSO to be better than
124+
# RandomSearch, but they should give similar results, say within 10%:
118125

119-
# Compare with random search result with the same settings
120-
@test best_loss < baseline_best_loss ||
121-
isapprox(best_loss, baseline_best_loss; atol=1e-3)
126+
@test abs(best_loss/baseline_best_loss - 1) < 0.1
122127
end
123-
end
128+
end
129+
130+
println("Adaptive PSO losses (see Issue #14):")
131+
(; modes=modes, losses=losses) |> MLJBase.pretty

0 commit comments

Comments
 (0)