Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 19 additions & 24 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
@@ -1,49 +1,44 @@
name: CI
on:
pull_request:
branches:
- master
- dev
push:
branches:
- master
- dev
tags: '*'
tags: ['*']
pull_request:
workflow_dispatch:
concurrency:
# Skip intermediate builds: always.
# Cancel intermediate builds: only if it is a pull request build.
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }}
jobs:
test:
name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }}
name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }}
runs-on: ${{ matrix.os }}
timeout-minutes: 60
permissions: # needed to allow julia-actions/cache to proactively delete old caches that it has created
actions: write
contents: read
strategy:
fail-fast: false
matrix:
version:
- '1.6'
- '1'
- 'lts'
os:
- ubuntu-latest
arch:
- x64
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@v1
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
with:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
- uses: actions/cache@v1
env:
cache-name: cache-artifacts
with:
path: ~/.julia/artifacts
key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }}
restore-keys: |
${{ runner.os }}-test-${{ env.cache-name }}-
${{ runner.os }}-test-
${{ runner.os }}-
- uses: julia-actions/cache@v2
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
env:
JULIA_NUM_THREADS: '2'
- uses: julia-actions/julia-processcoverage@v1
- uses: codecov/codecov-action@v1
- uses: codecov/codecov-action@v5
with:
file: lcov.info
files: lcov.info
21 changes: 17 additions & 4 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "MLJParticleSwarmOptimization"
uuid = "17a086e9-ed03-4f30-ab88-8b63f0f6126c"
authors = ["Long Nguyen <[email protected]> and contributors"]
version = "0.1.3"
version = "0.1.4"

[deps]
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
Expand All @@ -12,6 +12,19 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"

[compat]
Distributions = "0.25"
MLJBase = "0.18, 0.19, 0.20, 0.21"
MLJTuning = "0.6, 0.7"
julia = "1.6"
LinearAlgebra = "1"
MLJBase = "1"
MLJTuning = "0.8"
Random = "1"
julia = "1"

[extras]
ComputationalResources = "ed09eef8-17a6-5b46-8889-db040fac31e3"
Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b"
EvoTrees = "f6006082-12f8-11e9-0c9c-0d5d367ab1e5"
MLJ = "add582a8-e3ab-11e8-2d5e-e98b27df1bc7"
StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["ComputationalResources", "Distributed", "EvoTrees", "MLJ", "StableRNGs", "Test"]
3 changes: 1 addition & 2 deletions src/MLJParticleSwarmOptimization.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,9 @@ using MLJTuning

export ParticleSwarm, AdaptiveParticleSwarm

include("swarm.jl")
include("interface.jl")
include("parameters.jl")
include("update.jl")
include("strategies/abstract.jl")
include("strategies/basic.jl")
include("strategies/adaptive.jl")

Expand Down
47 changes: 47 additions & 0 deletions src/interface.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
abstract type AbstractParticleSwarm <: MLJTuning.TuningStrategy end

struct ParticleSwarmState{T, R, P, I}
ranges::R
parameters::P
indices::I
X::Matrix{T}
V::Matrix{T}
pbest_X::Matrix{T}
gbest_X::Matrix{T}
pbest::Vector{T}
gbest::Vector{T}
end

mutable struct ParticleSwarm <: AbstractParticleSwarm
n_particles::Integer
w::Float64
c1::Float64
c2::Float64
prob_shift::Float64
rng::AbstractRNG
# TODO: topology
end

mutable struct AdaptiveParticleSwarm <: AbstractParticleSwarm
n_particles::Integer
c1::Float64
c2::Float64
prob_shift::Float64
rng::AbstractRNG
end

get_n_particles(tuning::AbstractParticleSwarm) = tuning.n_particles
get_prob_shift(tuning::AbstractParticleSwarm) = tuning.prob_shift
get_rng(tuning::AbstractParticleSwarm) = tuning.rng

function initialize(r, tuning::AbstractParticleSwarm)
return initialize(get_rng(tuning), r, get_n_particles(tuning))
end

function retrieve!(state::ParticleSwarmState, tuning::AbstractParticleSwarm)
return retrieve!(get_rng(tuning), state)
end

function pbest!(state::ParticleSwarmState, measurements, tuning::AbstractParticleSwarm)
return pbest!(state, measurements, get_prob_shift(tuning))
end
11 changes: 0 additions & 11 deletions src/strategies/abstract.jl

This file was deleted.

11 changes: 1 addition & 10 deletions src/strategies/adaptive.jl
Original file line number Diff line number Diff line change
Expand Up @@ -40,15 +40,6 @@ copy of `model`. If the corresponding range has a specified `scale` function,
then the transformation is applied before the hyperparameter is returned. If
`scale` is a symbol (eg, `:log`), it is ignored.
"""
mutable struct AdaptiveParticleSwarm{R<:AbstractRNG} <: AbstractParticleSwarm
n_particles::Int
c1::Float64
c2::Float64
prob_shift::Float64
rng::R
end

# Constructor

function AdaptiveParticleSwarm(;
n_particles=3,
Expand All @@ -57,7 +48,7 @@ function AdaptiveParticleSwarm(;
prob_shift=0.25,
rng::R=Random.GLOBAL_RNG
) where {R}
swarm = AdaptiveParticleSwarm{R}(n_particles, c1, c2, prob_shift, rng)
swarm = AdaptiveParticleSwarm(n_particles, c1, c2, prob_shift, rng)
message = MLJTuning.clean!(swarm)
isempty(message) || @warn message
return swarm
Expand Down
11 changes: 1 addition & 10 deletions src/strategies/basic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -103,15 +103,6 @@ where pₛ is the probability of the sampled hyperparameter value. For more
information, refer to "A New Discrete Particle Swarm Optimization Algorithm" by
Strasser, Goodman, Sheppard, and Butcher.
"""
mutable struct ParticleSwarm{R<:AbstractRNG} <: AbstractParticleSwarm
n_particles::Int
w::Float64
c1::Float64
c2::Float64
prob_shift::Float64
rng::R
# TODO: topology
end

function ParticleSwarm(;
n_particles=3,
Expand All @@ -121,7 +112,7 @@ function ParticleSwarm(;
prob_shift=0.25,
rng::R=Random.GLOBAL_RNG
) where {R}
swarm = ParticleSwarm{R}(n_particles, w, c1, c2, prob_shift, rng)
swarm = ParticleSwarm(n_particles, w, c1, c2, prob_shift, rng)
message = MLJTuning.clean!(swarm)
isempty(message) || @warn message
return swarm
Expand Down
13 changes: 0 additions & 13 deletions src/swarm.jl

This file was deleted.

10 changes: 0 additions & 10 deletions test/Project.toml

This file was deleted.

2 changes: 1 addition & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ addprocs(2)
using ComputationalResources
using Distributions
using EvoTrees
using MLJBase
using MLJ
using MLJTuning
using StableRNGs
end
Expand Down
4 changes: 2 additions & 2 deletions test/strategies/adaptive.jl
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ for acceleration in modes
@testset "EvoTree Tuning with AdaptiveParticleSwarm and $(typeof(acceleration))" begin
rng = StableRNG(123)
features = rand(rng, 10_000) .* 5 .- 2
X = MLJBase.table(reshape(features, (size(features)[1], 1)))
X = MLJ.table(reshape(features, (size(features)[1], 1)))
y = sin.(features) .* 0.5 .+ 0.5
y = EvoTrees.logit(y) + randn(rng, size(y))
y = EvoTrees.sigmoid(y)
Expand Down Expand Up @@ -128,4 +128,4 @@ for acceleration in modes
end

println("Adaptive PSO losses (see Issue #14):")
(; modes=modes, losses=losses) |> MLJBase.pretty
(; modes=modes, losses=losses) |> MLJ.pretty
4 changes: 2 additions & 2 deletions test/strategies/basic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ for acceleration in (CPU1(), CPUProcesses(), CPUThreads())
@testset "EvoTree Tuning with ParticleSwarm and $(typeof(acceleration))" begin
rng = StableRNG(123)
features = rand(rng, 10_000) .* 5 .- 2
X = MLJBase.table(reshape(features, (size(features)[1], 1)))
X = MLJ.table(reshape(features, (size(features)[1], 1)))
y = sin.(features) .* 0.5 .+ 0.5
y = EvoTrees.logit(y) + randn(rng, size(y))
y = EvoTrees.sigmoid(y)
Expand All @@ -32,7 +32,7 @@ for acceleration in (CPU1(), CPUProcesses(), CPUThreads())
tuning=RandomSearch(rng=StableRNG(1234)),
resampling=CV(nfolds=5, rng=StableRNG(8888)),
range=[r1, r2],
measure=mae,
measure=MLJ.mae,
n=15,
acceleration=acceleration
)
Expand Down