Skip to content
This repository was archived by the owner on Sep 28, 2024. It is now read-only.

Commit d6dbe6b

Browse files
authored
Merge pull request #78 from SciML/refactor
Refactor
2 parents 61d32af + 221ddd4 commit d6dbe6b

File tree

25 files changed

+146
-153
lines changed

25 files changed

+146
-153
lines changed

.github/workflows/CI.yml

Lines changed: 18 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,15 @@ name: CI
22
env:
33
DATADEPS_ALWAYS_ACCEPT: true
44
on:
5-
- push
6-
- pull_request
7-
8-
defaults:
9-
run:
10-
shell: bash
11-
5+
push:
6+
branches: '*'
7+
tags: '*'
8+
pull_request:
9+
concurrency:
10+
# Skip intermediate builds: always.
11+
# Cancel intermediate builds: only if it is a pull request build.
12+
group: ${{ github.workflow }}-${{ github.ref }}
13+
cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }}
1214
jobs:
1315
test:
1416
name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }}
@@ -31,42 +33,30 @@ jobs:
3133
with:
3234
version: ${{ matrix.version }}
3335
arch: ${{ matrix.arch }}
34-
- uses: actions/cache@v1
35-
env:
36-
cache-name: cache-artifacts
37-
with:
38-
path: ~/.julia/artifacts
39-
key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }}
40-
restore-keys: |
41-
${{ runner.os }}-test-${{ env.cache-name }}-
42-
${{ runner.os }}-test-
43-
${{ runner.os }}-
36+
- uses: julia-actions/cache@v1
4437
- uses: julia-actions/julia-buildpkg@v1
4538
- uses: julia-actions/julia-runtest@v1
4639
- uses: julia-actions/julia-processcoverage@v1
47-
- uses: codecov/codecov-action@v1
40+
- uses: codecov/codecov-action@v2
4841
with:
49-
file: lcov.info
42+
files: lcov.info
5043
docs:
5144
name: Documentation
5245
runs-on: ubuntu-latest
46+
permissions:
47+
contents: write
5348
steps:
5449
- uses: actions/checkout@v2
5550
- uses: julia-actions/setup-julia@v1
5651
with:
5752
version: '1'
58-
- run: |
59-
julia --project=docs -e '
60-
using Pkg
61-
Pkg.develop(PackageSpec(path=pwd()))
62-
Pkg.instantiate()'
53+
- uses: julia-actions/julia-buildpkg@v1
54+
- uses: julia-actions/julia-docdeploy@v1
55+
env:
56+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
6357
- run: |
6458
julia --project=docs -e '
6559
using Documenter: DocMeta, doctest
6660
using NeuralOperators
6761
DocMeta.setdocmeta!(NeuralOperators, :DocTestSetup, :(using NeuralOperators); recursive=true)
6862
doctest(NeuralOperators)'
69-
- run: julia --project=docs docs/make.jl
70-
env:
71-
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
72-
DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }}

Project.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,10 @@ Tullio = "bc48ee85-29a4-5162-ae0b-a64e1601d4bc"
1616
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
1717

1818
[compat]
19-
CUDA = "3.9"
19+
CUDA = "3"
2020
CUDAKernels = "0.3, 0.4"
21-
ChainRulesCore = "1.14"
22-
FFTW = "1.4"
21+
ChainRulesCore = "1"
22+
FFTW = "1"
2323
Flux = "0.13"
2424
GeometricFlux = "0.12"
2525
KernelAbstractions = "0.7, 0.8"

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ And then train as a Flux model.
7676

7777
```julia
7878
loss(𝐱, 𝐲) = l₂loss(model(𝐱), 𝐲)
79-
opt = Flux.Optimiser(WeightDecay(1f-4), Flux.ADAM(1f-3))
79+
opt = Flux.Optimiser(WeightDecay(1f-4), Flux.Adam(1f-3))
8080
Flux.@epochs 50 Flux.train!(loss, params(model), data, opt)
8181
```
8282

@@ -102,7 +102,7 @@ loss(xtrain, ytrain, sensor) = Flux.Losses.mse(model(xtrain, sensor), ytrain)
102102
evalcb() = @show(loss(xval, yval, grid))
103103

104104
learning_rate = 0.001
105-
opt = ADAM(learning_rate)
105+
opt = Adam(learning_rate)
106106
parameters = params(model)
107107
Flux.@epochs 400 Flux.train!(loss, parameters, [(xtrain, ytrain, grid)], opt, cb=evalcb)
108108
```

docs/src/index.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ And then train as a Flux model.
5353

5454
```julia
5555
loss(𝐱, 𝐲) = l₂loss(model(𝐱), 𝐲)
56-
opt = Flux.Optimiser(WeightDecay(1f-4), Flux.ADAM(1f-3))
56+
opt = Flux.Optimiser(WeightDecay(1f-4), Flux.Adam(1f-3))
5757
Flux.@epochs 50 Flux.train!(loss, params(model), data, opt)
5858
```
5959

@@ -80,7 +80,7 @@ loss(xtrain, ytrain, sensor) = Flux.Losses.mse(model(xtrain, sensor), ytrain)
8080
evalcb() = @show(loss(xval, yval, grid))
8181

8282
learning_rate = 0.001
83-
opt = ADAM(learning_rate)
83+
opt = Adam(learning_rate)
8484
parameters = params(model)
8585
Flux.@epochs 400 Flux.train!(loss, parameters, [(xtrain, ytrain, grid)], opt, cb=evalcb)
8686
```

example/Burgers/src/Burgers.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ function train(; cuda = true, η₀ = 1.0f-3, λ = 1.0f-4, epochs = 500)
5656
model = FourierNeuralOperator(ch = (2, 64, 64, 64, 64, 64, 128, 1), modes = (16,),
5757
σ = gelu)
5858
data = get_dataloader()
59-
optimiser = Flux.Optimiser(WeightDecay(λ), Flux.ADAM(η₀))
59+
optimiser = Flux.Optimiser(WeightDecay(λ), Flux.Adam(η₀))
6060
loss_func = l₂loss
6161

6262
learner = Learner(model, data, optimiser, loss_func,
@@ -88,7 +88,7 @@ function train_nomad(; n = 300, cuda = true, learning_rate = 0.001, epochs = 400
8888
grid = rand(collect(0:0.001:1), (280, 1024)) |> device
8989
gridval = rand(collect(0:0.001:1), (20, 1024)) |> device
9090

91-
opt = ADAM(learning_rate)
91+
opt = Adam(learning_rate)
9292

9393
m = NOMAD((1024, 1024), (2048, 1024), gelu, gelu) |> device
9494

example/Burgers/src/Burgers_deeponet.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ function train_don(; n = 300, cuda = true, learning_rate = 0.001, epochs = 400)
2626

2727
grid = collect(range(0, 1, length = 1024)') |> device
2828

29-
opt = ADAM(learning_rate)
29+
opt = Adam(learning_rate)
3030

3131
m = DeepONet((1024, 1024, 1024), (1, 1024, 1024), gelu, gelu) |> device
3232

example/Burgers/test/runtests.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
using Burgers
2+
using FluxTraining
23
using Test
34

45
@testset "Burgers" begin

example/DoublePendulum/src/DoublePendulum.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ function train(; cuda = true, Δt = 1, η₀ = 1.0f-3, λ = 1.0f-4, epochs = 20)
9393
model = FourierNeuralOperator(ch = (2, 64, 64, 64, 64, 64, 128, 2), modes = (4, 16),
9494
σ = gelu)
9595
data = get_dataloader(Δt = Δt)
96-
optimiser = Flux.Optimiser(WeightDecay(λ), Flux.ADAM(η₀))
96+
optimiser = Flux.Optimiser(WeightDecay(λ), Flux.Adam(η₀))
9797
loss_func = l₂loss
9898

9999
learner = Learner(model, data, optimiser, loss_func,

example/DoublePendulum/test/runtests.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
using DoublePendulum
2+
using FluxTraining
23
using Test
34

45
@testset "DoublePendulum" begin

example/FlowOverCircle/src/FlowOverCircle.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ function train(; cuda = true, η₀ = 1.0f-3, λ = 1.0f-4, epochs = 50)
6262
model = MarkovNeuralOperator(ch = (1, 64, 64, 64, 64, 64, 1), modes = (24, 24),
6363
σ = gelu)
6464
data = get_dataloader()
65-
optimiser = Flux.Optimiser(WeightDecay(λ), Flux.ADAM(η₀))
65+
optimiser = Flux.Optimiser(WeightDecay(λ), Flux.Adam(η₀))
6666
loss_func = l₂loss
6767

6868
learner = Learner(model, data, optimiser, loss_func,
@@ -92,7 +92,7 @@ function train_gno(; cuda = true, η₀ = 1.0f-3, λ = 1.0f-4, epochs = 50)
9292
WithGraph(featured_graph, GraphKernel(Dense(2 * 16, 16, gelu), 16)),
9393
Dense(16, 1))
9494
data = get_dataloader(batchsize = 16, flatten = true)
95-
optimiser = Flux.Optimiser(WeightDecay(λ), Flux.ADAM(η₀))
95+
optimiser = Flux.Optimiser(WeightDecay(λ), Flux.Adam(η₀))
9696
loss_func = l₂loss
9797

9898
learner = Learner(model, data, optimiser, loss_func,

0 commit comments

Comments
 (0)