diff --git a/src/models/GenericHybridModel.jl b/src/models/GenericHybridModel.jl index 2587d208..8037fd52 100644 --- a/src/models/GenericHybridModel.jl +++ b/src/models/GenericHybridModel.jl @@ -103,7 +103,7 @@ function constructHybridModel( @assert all(n in all_names for n in neural_param_names) "neural_param_names ⊆ param_names" # if empty predictors do not construct NN - if length(predictors) > 0 + if length(predictors) > 0 && length(neural_param_names) > 0 in_dim = length(predictors) out_dim = length(neural_param_names) diff --git a/src/utils/loss_fn.jl b/src/utils/loss_fn.jl index 786a8d5c..f369baa2 100644 --- a/src/utils/loss_fn.jl +++ b/src/utils/loss_fn.jl @@ -127,6 +127,29 @@ function loss_fn(ŷ, y, y_nan, ::Val{:kgeLoss}) ) end +function loss_fn(ŷ, y, y_nan, ::Val{:β}) + ŷv = ŷ[y_nan] + yv = y[y_nan] + + μ_s = mean(ŷv) + μ_o = mean(yv) + + β = μ_s / μ_o + + return β +end + +function loss_fn(ŷ, y, y_nan, ::Val{:α}) + ŷv = ŷ[y_nan] + yv = y[y_nan] + + σ_s = std(ŷv) + σ_o = std(yv) + α = σ_s / σ_o + + return α +end + # Kling–Gupta Efficiency metric (to MAXIMIZE, e.g. for reporting) function loss_fn(ŷ, y, y_nan, ::Val{:kge}) kge_loss = loss_fn(ŷ, y, y_nan, Val(:kgeLoss)) diff --git a/test/test_loss_fn.jl b/test/test_loss_fn.jl index 03e57453..519f86e1 100644 --- a/test/test_loss_fn.jl +++ b/test/test_loss_fn.jl @@ -55,6 +55,12 @@ using EasyHybrid: bestdirection, isbetter, check_training_loss, Minimize, Maximi # KGE test (1 - KGE Loss) @test loss_fn(ŷ, y, y_nan, Val(:kge)) ≈ 1.0 - kge_loss + # β test (mean ratio) + @test loss_fn(ŷ, y, y_nan, Val(:β)) ≈ β + + # α test (standard deviation ratio) + @test loss_fn(ŷ, y, y_nan, Val(:α)) ≈ α + # PBKGE Loss test (Partial Kling-Gupta Efficiency) μ_s = mean(ŷ) μ_o = mean(y) @@ -123,6 +129,15 @@ using EasyHybrid: bestdirection, isbetter, check_training_loss, Minimize, Maximi @test loss_fn(ŷ, y, y_nan, Val(:pbkgeLoss)) ≈ pbkge_loss @test loss_fn(ŷ, y, y_nan, Val(:pbkge)) ≈ 1.0 - pbkge_loss + # β test with NaN handling + @test loss_fn(ŷ, y, y_nan, Val(:β)) ≈ β + + # α test with NaN handling + σ_s = std(valid_ŷ) + σ_o = std(valid_y) + α = σ_s / σ_o + @test loss_fn(ŷ, y, y_nan, Val(:α)) ≈ α + # Test NaN handling for generic functions @test loss_fn(ŷ, y, y_nan, simple_loss) ≈ mean(abs2, valid_ŷ .- valid_y) @test loss_fn(ŷ, y, y_nan, (weighted_loss, (2.0,))) ≈ 2.0 * mean(abs2, valid_ŷ .- valid_y)