Skip to content

Commit dabd150

Browse files
authored
Use Adam instead of ADAM (#247)
1 parent c0cbc53 commit dabd150

File tree

8 files changed

+14
-14
lines changed

8 files changed

+14
-14
lines changed

docs/introduction.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ The customizable, expanded version of the code looks like this:
9090
dls = taskdataloaders(data, task)
9191
model = taskmodel(task, Models.xresnet18())
9292
lossfn = tasklossfn(task)
93-
learner = Learner(model, dls, ADAM(), lossfn, ToGPU(), Metrics(accuracy))
93+
learner = Learner(model, dls, Adam(), lossfn, ToGPU(), Metrics(accuracy))
9494
```
9595

9696
At this step, we can also pass in any number of [callbacks](https://fluxml.ai/FluxTraining.jl/dev/docs/callbacks/reference.md.html) to customize the training. Here [`ToGPU`](#) ensures an available GPU is used, and [`Metrics`](#) adds additional metrics to track during training.

docs/learning_methods.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ model = Chain(
176176
Dense(512, length(task.classes)),
177177
)
178178
)
179-
optimizer = ADAM()
179+
optimizer = Adam()
180180
lossfn = Flux.Losses.logitcrossentropy
181181

182182
learner = Learner(model, lossfn; data = (traindl, valdl), optimizer)

docs/notebooks/imagesegmentation.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -479,7 +479,7 @@
479479
],
480480
"source": [
481481
"traindl, validdl = taskdataloaders(data, task, 16)\n",
482-
"optimizer = ADAM()\n",
482+
"optimizer = Adam()\n",
483483
"learner = Learner(model, lossfn; data=(traindl, validdl), optimizer, callbacks=[ToGPU()])"
484484
]
485485
},

docs/notebooks/keypointregression.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -508,7 +508,7 @@
508508
" model,\n",
509509
" tasklossfn(task);\n",
510510
" data=(traindl, validdl),\n",
511-
" optimizer=Flux.ADAM(),\n",
511+
" optimizer=Flux.Adam(),\n",
512512
" callbacks=[ToGPU()])"
513513
]
514514
},

docs/notebooks/siamese.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -872,7 +872,7 @@
872872
{
873873
"data": {
874874
"text/plain": [
875-
"ADAM(0.001, (0.9, 0.999), IdDict{Any, Any}())"
875+
"Adam(0.001, (0.9, 0.999), IdDict{Any, Any}())"
876876
]
877877
},
878878
"execution_count": 24,
@@ -882,7 +882,7 @@
882882
],
883883
"source": [
884884
"lossfn = Flux.Losses.logitcrossentropy\n",
885-
"optimizer = Flux.ADAM()"
885+
"optimizer = Flux.Adam()"
886886
]
887887
},
888888
{
@@ -965,7 +965,7 @@
965965
"h, w, ch, b = Flux.outputsize(encoder, (128, 128, 3, 1))\n",
966966
"head = Models.visionhead(2ch, 2)\n",
967967
"model = SiameseModel(encoder, head);\n",
968-
"learner = Learner(model, (traindl, valdl), ADAM(0.01), lossfn, callbacks...)"
968+
"learner = Learner(model, (traindl, valdl), Adam(0.01), lossfn, callbacks...)"
969969
]
970970
},
971971
{

src/learner.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ Create a [`Learner`](#) to train a model for learning task `task` using
1313
- `backbone = nothing`: Backbone model to construct task-specific model from using
1414
[`taskmodel`](#)`(task, backbone)`.
1515
- `model = nothing`: Complete model to use. If given, the `backbone` argument is ignored.
16-
- `optimizer = ADAM()`: Optimizer passed to `Learner`.
16+
- `optimizer = Adam()`: Optimizer passed to `Learner`.
1717
- `lossfn = `[`tasklossfn`](#)`(task)`: Loss function passed to `Learner`.
1818
1919
Any other keyword arguments will be passed to [`taskdataloaders`](#).
@@ -51,7 +51,7 @@ function tasklearner(task::LearningTask,
5151
callbacks = [],
5252
pctgval = 0.2,
5353
batchsize = 16,
54-
optimizer = ADAM(),
54+
optimizer = Adam(),
5555
lossfn = tasklossfn(task),
5656
kwargs...)
5757
if isnothing(model)

src/training/onecycle.jl

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,11 +54,11 @@ end
5454
# ## Tests
5555

5656
@testset "decay_optim" begin
57-
optim = ADAM()
57+
optim = Adam()
5858
@test decay_optim(optim, 0.1) isa Optimiser
59-
@test decay_optim(Optimiser(ADAM(), ADAM()), 0.1) isa Optimiser
59+
@test decay_optim(Optimiser(Adam(), Adam()), 0.1) isa Optimiser
6060
@test decay_optim(optim, 0.1).os[1] isa WeightDecay
61-
o = decay_optim(Optimiser(ADAM(), WeightDecay(0.5)), 0.1)
61+
o = decay_optim(Optimiser(Adam(), WeightDecay(0.5)), 0.1)
6262
@test o.os[1] isa WeightDecay
63-
@test o.os[2] isa ADAM
63+
@test o.os[2] isa Adam
6464
end

test/runtests.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ using InlineTest
1212
using ..FastAI
1313
import ..FastAI: Block, Encoding, encodedblock, decodedblock, encode, decode,
1414
testencoding, test_task_show, checkblock
15-
using Flux.Optimise: Optimiser, ADAM, apply!
15+
using Flux.Optimise: Optimiser, Adam, apply!
1616

1717
ENV["DATADEPS_ALWAYS_ACCEPT"] = "true"
1818
include("testdata.jl")

0 commit comments

Comments
 (0)