Skip to content
This repository was archived by the owner on Sep 28, 2024. It is now read-only.

Commit 4805885

Browse files
committed
Implement l2 loss
1 parent 6014f52 commit 4805885

File tree

3 files changed

+10
-1
lines changed

3 files changed

+10
-1
lines changed

example/FlowOverCircle/src/FlowOverCircle.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ function train()
3131
Dense(64, 1),
3232
) |> device
3333

34-
loss(𝐱, 𝐲) = sum(abs2, 𝐲 .- m(𝐱)) / size(𝐱)[end]
34+
loss(𝐱, 𝐲) = l₂loss(m(𝐱), 𝐲)
3535

3636
opt = Flux.Optimiser(WeightDecay(1f-4), Flux.ADAM(1f-3))
3737

src/NeuralOperators.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ module NeuralOperators
1414

1515
include("Transform/Transform.jl")
1616
include("operator_kernel.jl")
17+
include("loss.jl")
1718
include("model.jl")
1819
include("DeepONet.jl")
1920
include("subnets.jl")

src/loss.jl

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
export l₂loss
2+
3+
function l₂loss(𝐲̂, 𝐲; agg=mean, is_grid_normalized=true)
4+
feature_dims = 2:(ndims(𝐲)-1)
5+
loss = agg(.√(sum(abs2, 𝐲̂-𝐲, dims=feature_dims)))
6+
7+
return is_grid_normalized ? loss/prod(feature_dims) : loss
8+
end

0 commit comments

Comments
 (0)