From d057548544e3836536f54179b17903985ed514cf Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Mon, 3 Nov 2025 23:53:15 +0530 Subject: [PATCH 01/33] Problem 14 and 15 --- src/ADNLPProblems/boundary.jl | 52 ++++++++++++++++ src/ADNLPProblems/variational.jl | 103 +++++++++++++++++++++++++++++++ src/Meta/boundary.jl | 27 ++++++++ src/Meta/variational.jl | 27 ++++++++ src/PureJuMP/boundary.jl | 13 ++++ src/PureJuMP/variational.jl | 29 +++++++++ 6 files changed, 251 insertions(+) create mode 100644 src/ADNLPProblems/boundary.jl create mode 100644 src/ADNLPProblems/variational.jl create mode 100644 src/Meta/boundary.jl create mode 100644 src/Meta/variational.jl create mode 100644 src/PureJuMP/boundary.jl create mode 100644 src/PureJuMP/variational.jl diff --git a/src/ADNLPProblems/boundary.jl b/src/ADNLPProblems/boundary.jl new file mode 100644 index 00000000..9cf43cd1 --- /dev/null +++ b/src/ADNLPProblems/boundary.jl @@ -0,0 +1,52 @@ +export boundary + +function boundary(; use_nls::Bool = false, kwargs...) + model = use_nls ? :nls : :nlp + return boundary(Val(model); kwargs...) +end + +function boundary(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + h = one(T) / T(n + 1) + function f(x; n = length(x)) + s = zero(T) + for i = 1:n + xi = x[i] + xm = (i == 1) ? zero(T) : x[i - 1] + xp = (i == n) ? zero(T) : x[i + 1] + z = T(2) * xi - xm - xp + z += (h^2 / T(2)) * (xi + T(i) * h + one(T))^3 + s += z^2 + end + return s + end + + x0 = Vector{T}(undef, n) + for i = 1:n + x0[i] = T(i) * h * (one(T) - T(i) * h) + end + + return ADNLPModels.ADNLPModel(f, x0, name = "boundary", minimize = true; kwargs...) +end + +function boundary(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + h = one(T) / T(n + 1) + + function F!(r, x) + @inbounds for i = 1:length(x) + xi = x[i] + xm = (i == 1) ? zero(T) : x[i - 1] + xp = (i == length(x)) ? zero(T) : x[i + 1] + z = T(2) * xi - xm - xp + z += (h^2 / T(2)) * (xi + T(i) * h + one(T))^3 + r[i] = z + end + return r + end + + x0 = Vector{T}(undef, n) + for i = 1:n + x0[i] = T(i) * h * (one(T) - T(i) * h) + end + + return ADNLPModels.ADNLSModel!(F!, x0, n, name = "boundary-nls"; kwargs...) +end diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl new file mode 100644 index 00000000..43e2a7a9 --- /dev/null +++ b/src/ADNLPProblems/variational.jl @@ -0,0 +1,103 @@ +export variational + +function variational(; use_nls::Bool = false, kwargs...) + model = use_nls ? :nls : :nlp + return variational(Val(model); kwargs...) +end + +function variational(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + h = one(T) / T(n + 1) + + function f(x; n = length(x)) + S = eltype(x) + hS = convert(S, h) + + xext = Vector{S}(undef, n + 2) + xext[1] = zero(S) + @inbounds for i = 1:n + xext[i + 1] = x[i] + end + xext[n + 2] = zero(S) + + term1 = zero(S) + @inbounds for i = 1:n + xi = x[i] + xip1 = xext[i + 2] + term1 += xi * (xi - xip1) / hS + end + + function diffquot(a, b) + d = b - a + if d == zero(d) + return exp(a) + else + return (exp(b) - exp(a)) / d + end + end + + term2 = zero(S) + @inbounds for j = 0:n + a = xext[j + 1] + b = xext[j + 2] + term2 += diffquot(a, b) + end + return 2 * (term1 + n * (hS / 2) * term2) + end + + x0 = Vector{T}(undef, n) + for i = 1:n + x0[i] = T(i) * h * (one(T) - T(i) * h) + end + + return ADNLPModels.ADNLPModel(f, x0, name = "variational", minimize = true; kwargs...) +end + +function variational(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + h = one(T) / T(n + 1) + + function diffquot(a, b) + d = b - a + if d == zero(d) + return exp(a) + else + return (exp(b) - exp(a)) / d + end + end + + function F!(r, x) + S = eltype(x) + hS = convert(S, h) + + xext = Vector{S}(undef, n + 2) + xext[1] = zero(S) + @inbounds for i = 1:n + xext[i + 1] = x[i] + end + xext[n + 2] = zero(S) + + term1 = zero(S) + @inbounds for i = 1:n + xi = x[i] + xip1 = xext[i + 2] + term1 += xi * (xi - xip1) / hS + end + + term2 = zero(S) + @inbounds for j = 0:n + a = xext[j + 1] + b = xext[j + 2] + term2 += diffquot(a, b) + end + + Fval = 2 * (term1 + n * (hS / 2) * term2) + r[1] = sqrt(Fval < zero(Fval) ? zero(Fval) : Fval) + return r + end + + x0 = Vector{T}(undef, n) + for i = 1:n + x0[i] = T(i) * h * (one(T) - T(i) * h) + end + + return ADNLPModels.ADNLSModel!(F!, x0, 1, name = "variational-nls"; kwargs...) +end diff --git a/src/Meta/boundary.jl b/src/Meta/boundary.jl new file mode 100644 index 00000000..7848a3ad --- /dev/null +++ b/src/Meta/boundary.jl @@ -0,0 +1,27 @@ +boundary_meta = Dict( + :nvar => 100, + :variable_nvar => true, + :ncon => 0, + :variable_ncon => false, + :minimize => true, + :name => "boundary", + :has_equalities_only => false, + :has_inequalities_only => false, + :has_bounds => false, + :has_fixed_variables => false, + :objtype => :least_squares, + :contype => :unconstrained, + :best_known_lower_bound => -Inf, + :best_known_upper_bound => Inf, + :is_feasible => true, + :defined_everywhere => missing, + :origin => :academic, +) + +get_boundary_nvar(; n::Integer = default_nvar, kwargs...) = 1 * n +get_boundary_ncon(; n::Integer = default_nvar, kwargs...) = 0 +get_boundary_nlin(; n::Integer = default_nvar, kwargs...) = 0 +get_boundary_nnln(; n::Integer = default_nvar, kwargs...) = 0 +get_boundary_nequ(; n::Integer = default_nvar, kwargs...) = 0 +get_boundary_nineq(; n::Integer = default_nvar, kwargs...) = 0 +get_boundary_nls_nequ(; n::Integer = default_nvar, kwargs...) = 1 * n diff --git a/src/Meta/variational.jl b/src/Meta/variational.jl new file mode 100644 index 00000000..472d4b6f --- /dev/null +++ b/src/Meta/variational.jl @@ -0,0 +1,27 @@ +variational_meta = Dict( + :nvar => 100, + :variable_nvar => true, + :ncon => 0, + :variable_ncon => false, + :minimize => true, + :name => "variational", + :has_equalities_only => false, + :has_inequalities_only => false, + :has_bounds => false, + :has_fixed_variables => false, + :objtype => :other, + :contype => :unconstrained, + :best_known_lower_bound => -Inf, + :best_known_upper_bound => Inf, + :is_feasible => true, + :defined_everywhere => missing, + :origin => :academic, +) + +get_variational_nvar(; n::Integer = default_nvar, kwargs...) = n +get_variational_ncon(; n::Integer = default_nvar, kwargs...) = 0 +get_variational_nlin(; n::Integer = default_nvar, kwargs...) = 0 +get_variational_nnln(; n::Integer = default_nvar, kwargs...) = 0 +get_variational_nequ(; n::Integer = default_nvar, kwargs...) = 0 +get_variational_nineq(; n::Integer = default_nvar, kwargs...) = 0 +get_variational_nls_nequ(; n::Integer = default_nvar, kwargs...) = 1 diff --git a/src/PureJuMP/boundary.jl b/src/PureJuMP/boundary.jl new file mode 100644 index 00000000..d1b62087 --- /dev/null +++ b/src/PureJuMP/boundary.jl @@ -0,0 +1,13 @@ +export boundary + +function boundary(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + h = one(T) / T(n + 1) + model = Model() + # set a type-stable start vector matching ADNLPProblems.boundary x0 + x0 = [T(i) * h * (one(T) - T(i) * h) for i = 1:n] + @variable(model, x[i = 1:n], start = x0[i]) + @objective(model, Min, + sum((T(2) * x[i] - (i == 1 ? zero(x[1]) : x[i-1]) - (i == n ? zero(x[1]) : x[i+1]) + + (h^2 / T(2)) * (x[i] + T(i) * h + one(T))^3)^2 for i = 1:n)) + return model +end diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl new file mode 100644 index 00000000..d1932c1f --- /dev/null +++ b/src/PureJuMP/variational.jl @@ -0,0 +1,29 @@ +export variational + +function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + h = one(T) / T(n + 1) + model = Model() + # type-stable start matching ADNLPProblems + x0 = [T(i) * h * (one(T) - T(i) * h) for i = 1:n] + @variable(model, x[i = 1:n], start = x0[i]) + + # term1: interior sum plus last boundary term (avoid ternary inside macro) + if n == 1 + @NLexpression(model, term1, x[1] * (x[1] - 0.0) / $(h)) + else + @NLexpression(model, term1_mid, sum(x[i] * (x[i] - x[i+1]) / $(h) for i = 1:n-1)) + @NLexpression(model, term1, term1_mid + x[n] * (x[n] - 0.0) / $(h)) + end + + # term2: build the boundary terms and interior sum separately to avoid + # invalid indexing in the NL macro (handle n==1 specially) + if n == 1 + @NLexpression(model, term2, (exp(x[1]) - exp(0.0)) / (x[1] - 0.0) + (exp(0.0) - exp(x[1])) / (0.0 - x[1])) + else + @NLexpression(model, term2_mid, sum((exp(x[j+1]) - exp(x[j])) / (x[j+1] - x[j]) for j = 1:n-1)) + @NLexpression(model, term2, (exp(x[1]) - exp(0.0)) / (x[1] - 0.0) + term2_mid + (exp(0.0) - exp(x[n])) / (0.0 - x[n])) + end + + @NLobjective(model, Min, $(T(2)) * (term1 + $(T(n)) * ($(h) / $(T(2))) * term2)) + return model +end From 3eef3eb2cd6259c2985f8a4074b7d771635529e7 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Mon, 3 Nov 2025 23:58:55 +0530 Subject: [PATCH 02/33] documentation --- src/PureJuMP/boundary.jl | 10 ++++++++++ src/PureJuMP/variational.jl | 10 ++++++++++ 2 files changed, 20 insertions(+) diff --git a/src/PureJuMP/boundary.jl b/src/PureJuMP/boundary.jl index d1b62087..c12ac833 100644 --- a/src/PureJuMP/boundary.jl +++ b/src/PureJuMP/boundary.jl @@ -1,3 +1,13 @@ +# Discrete boundary value problem +# +# Problem 14 in +# L. Luksan, C. Matonoha and J. Vlcek +# Sparse Test Problems for Unconstrained Optimization, +# Technical Report 1064, +# Institute of Computer Science, +# Academy of Science of the Czech Republic +# +# https://www.researchgate.net/publication/325314400_Sparse_Test_Problems_for_Unconstrained_Optimization export boundary function boundary(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index d1932c1f..0c92c35e 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -1,3 +1,13 @@ +# Discretization of a variational problem +# +# Problem 15 in +# L. Luksan, C. Matonoha and J. Vlcek +# Sparse Test Problems for Unconstrained Optimization, +# Technical Report 1064, +# Institute of Computer Science, +# Academy of Science of the Czech Republic +# +# https://www.researchgate.net/publication/325314400_Sparse_Test_Problems_for_Unconstrained_Optimization export variational function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} From ab4800fbb646dea831c536a9557f7bdbf1625032 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Tue, 4 Nov 2025 09:14:22 +0530 Subject: [PATCH 03/33] changes made acc. to review --- src/ADNLPProblems/boundary.jl | 14 ++++---------- src/Meta/boundary.jl | 4 ++-- src/Meta/variational.jl | 2 +- src/PureJuMP/variational.jl | 12 ++++++------ 4 files changed, 13 insertions(+), 19 deletions(-) diff --git a/src/ADNLPProblems/boundary.jl b/src/ADNLPProblems/boundary.jl index 9cf43cd1..af33d3bf 100644 --- a/src/ADNLPProblems/boundary.jl +++ b/src/ADNLPProblems/boundary.jl @@ -10,19 +10,16 @@ function boundary(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k function f(x; n = length(x)) s = zero(T) for i = 1:n - xi = x[i] xm = (i == 1) ? zero(T) : x[i - 1] xp = (i == n) ? zero(T) : x[i + 1] - z = T(2) * xi - xm - xp - z += (h^2 / T(2)) * (xi + T(i) * h + one(T))^3 - s += z^2 + s += (2 * x[i] - xm - xp + (h^2 / 2) * (x[i] + i * h + one(T))^3)^2 end return s end x0 = Vector{T}(undef, n) for i = 1:n - x0[i] = T(i) * h * (one(T) - T(i) * h) + x0[i] = i * h * (one(T) - i * h) end return ADNLPModels.ADNLPModel(f, x0, name = "boundary", minimize = true; kwargs...) @@ -33,19 +30,16 @@ function boundary(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, k function F!(r, x) @inbounds for i = 1:length(x) - xi = x[i] xm = (i == 1) ? zero(T) : x[i - 1] xp = (i == length(x)) ? zero(T) : x[i + 1] - z = T(2) * xi - xm - xp - z += (h^2 / T(2)) * (xi + T(i) * h + one(T))^3 - r[i] = z + r[i] = 2 * x[i] - xm - xp + (h^2 / 2) * (x[i] + i * h + one(T))^3 end return r end x0 = Vector{T}(undef, n) for i = 1:n - x0[i] = T(i) * h * (one(T) - T(i) * h) + x0[i] = i * h * (one(T) - i * h) end return ADNLPModels.ADNLSModel!(F!, x0, n, name = "boundary-nls"; kwargs...) diff --git a/src/Meta/boundary.jl b/src/Meta/boundary.jl index 7848a3ad..8ba34297 100644 --- a/src/Meta/boundary.jl +++ b/src/Meta/boundary.jl @@ -18,10 +18,10 @@ boundary_meta = Dict( :origin => :academic, ) -get_boundary_nvar(; n::Integer = default_nvar, kwargs...) = 1 * n +get_boundary_nvar(; n::Integer = default_nvar, kwargs...) = n get_boundary_ncon(; n::Integer = default_nvar, kwargs...) = 0 get_boundary_nlin(; n::Integer = default_nvar, kwargs...) = 0 get_boundary_nnln(; n::Integer = default_nvar, kwargs...) = 0 get_boundary_nequ(; n::Integer = default_nvar, kwargs...) = 0 get_boundary_nineq(; n::Integer = default_nvar, kwargs...) = 0 -get_boundary_nls_nequ(; n::Integer = default_nvar, kwargs...) = 1 * n +get_boundary_nls_nequ(; n::Integer = default_nvar, kwargs...) = n diff --git a/src/Meta/variational.jl b/src/Meta/variational.jl index 472d4b6f..27c13691 100644 --- a/src/Meta/variational.jl +++ b/src/Meta/variational.jl @@ -24,4 +24,4 @@ get_variational_nlin(; n::Integer = default_nvar, kwargs...) = 0 get_variational_nnln(; n::Integer = default_nvar, kwargs...) = 0 get_variational_nequ(; n::Integer = default_nvar, kwargs...) = 0 get_variational_nineq(; n::Integer = default_nvar, kwargs...) = 0 -get_variational_nls_nequ(; n::Integer = default_nvar, kwargs...) = 1 +get_variational_nls_nequ(; n::Integer = default_nvar, kwargs...) = 0 diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 0c92c35e..d4ba4758 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -19,19 +19,19 @@ function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs... # term1: interior sum plus last boundary term (avoid ternary inside macro) if n == 1 - @NLexpression(model, term1, x[1] * (x[1] - 0.0) / $(h)) + @expression(model, term1, x[1] * (x[1] - 0.0) / $(h)) else - @NLexpression(model, term1_mid, sum(x[i] * (x[i] - x[i+1]) / $(h) for i = 1:n-1)) - @NLexpression(model, term1, term1_mid + x[n] * (x[n] - 0.0) / $(h)) + @expression(model, term1_mid, sum(x[i] * (x[i] - x[i+1]) / $(h) for i = 1:n-1)) + @expression(model, term1, term1_mid + x[n] * (x[n] - 0.0) / $(h)) end # term2: build the boundary terms and interior sum separately to avoid # invalid indexing in the NL macro (handle n==1 specially) if n == 1 - @NLexpression(model, term2, (exp(x[1]) - exp(0.0)) / (x[1] - 0.0) + (exp(0.0) - exp(x[1])) / (0.0 - x[1])) + @expression(model, term2, (exp(x[1]) - exp(0.0)) / (x[1] - 0.0) + (exp(0.0) - exp(x[1])) / (0.0 - x[1])) else - @NLexpression(model, term2_mid, sum((exp(x[j+1]) - exp(x[j])) / (x[j+1] - x[j]) for j = 1:n-1)) - @NLexpression(model, term2, (exp(x[1]) - exp(0.0)) / (x[1] - 0.0) + term2_mid + (exp(0.0) - exp(x[n])) / (0.0 - x[n])) + @expression(model, term2_mid, sum((exp(x[j+1]) - exp(x[j])) / (x[j+1] - x[j]) for j = 1:n-1)) + @expression(model, term2, (exp(x[1]) - exp(0.0)) / (x[1] - 0.0) + term2_mid + (exp(0.0) - exp(x[n])) / (0.0 - x[n])) end @NLobjective(model, Min, $(T(2)) * (term1 + $(T(n)) * ($(h) / $(T(2))) * term2)) From 49c973175281d63029fbdb4d87d985ef480ef47c Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Tue, 4 Nov 2025 09:16:01 +0530 Subject: [PATCH 04/33] Update variational.jl --- src/PureJuMP/variational.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index d4ba4758..af21a278 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -34,6 +34,6 @@ function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs... @expression(model, term2, (exp(x[1]) - exp(0.0)) / (x[1] - 0.0) + term2_mid + (exp(0.0) - exp(x[n])) / (0.0 - x[n])) end - @NLobjective(model, Min, $(T(2)) * (term1 + $(T(n)) * ($(h) / $(T(2))) * term2)) + @objective(model, Min, $(T(2)) * (term1 + $(T(n)) * ($(h) / $(T(2))) * term2)) return model end From c956fb5dbd90bf810dc15c49616030452adf20c0 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Tue, 4 Nov 2025 09:24:30 +0530 Subject: [PATCH 05/33] variational --- src/ADNLPProblems/variational.jl | 15 ++++----------- src/Meta/variational.jl | 2 +- src/PureJuMP/variational.jl | 24 ++++++++++++------------ 3 files changed, 17 insertions(+), 24 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index 43e2a7a9..bca1df60 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -6,7 +6,7 @@ function variational(; use_nls::Bool = false, kwargs...) end function variational(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - h = one(T) / T(n + 1) + h = 1 // (n + 1) function f(x; n = length(x)) S = eltype(x) @@ -26,14 +26,7 @@ function variational(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64 term1 += xi * (xi - xip1) / hS end - function diffquot(a, b) - d = b - a - if d == zero(d) - return exp(a) - else - return (exp(b) - exp(a)) / d - end - end + diffquot(a, b) = (b - a == zero(b - a)) ? exp(a) : (exp(b) - exp(a)) / (b - a) term2 = zero(S) @inbounds for j = 0:n @@ -46,14 +39,14 @@ function variational(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64 x0 = Vector{T}(undef, n) for i = 1:n - x0[i] = T(i) * h * (one(T) - T(i) * h) + x0[i] = i * h * (one(T) - i * h) end return ADNLPModels.ADNLPModel(f, x0, name = "variational", minimize = true; kwargs...) end function variational(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - h = one(T) / T(n + 1) + h = 1 // (n + 1) function diffquot(a, b) d = b - a diff --git a/src/Meta/variational.jl b/src/Meta/variational.jl index 27c13691..472d4b6f 100644 --- a/src/Meta/variational.jl +++ b/src/Meta/variational.jl @@ -24,4 +24,4 @@ get_variational_nlin(; n::Integer = default_nvar, kwargs...) = 0 get_variational_nnln(; n::Integer = default_nvar, kwargs...) = 0 get_variational_nequ(; n::Integer = default_nvar, kwargs...) = 0 get_variational_nineq(; n::Integer = default_nvar, kwargs...) = 0 -get_variational_nls_nequ(; n::Integer = default_nvar, kwargs...) = 0 +get_variational_nls_nequ(; n::Integer = default_nvar, kwargs...) = 1 diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index af21a278..072cb46a 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -11,29 +11,29 @@ export variational function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - h = one(T) / T(n + 1) + h = 1 // (n + 1) model = Model() # type-stable start matching ADNLPProblems - x0 = [T(i) * h * (one(T) - T(i) * h) for i = 1:n] + x0 = [i * h * (1 - i * h) for i = 1:n] @variable(model, x[i = 1:n], start = x0[i]) # term1: interior sum plus last boundary term (avoid ternary inside macro) if n == 1 - @expression(model, term1, x[1] * (x[1] - 0.0) / $(h)) + @expression(model, term1, x[1] * (x[1] - 0.0) / h) else - @expression(model, term1_mid, sum(x[i] * (x[i] - x[i+1]) / $(h) for i = 1:n-1)) - @expression(model, term1, term1_mid + x[n] * (x[n] - 0.0) / $(h)) + @expression(model, term1_mid, sum(x[i] * (x[i] - x[i+1]) / h for i = 1:n-1)) + @expression(model, term1, term1_mid + x[n] * (x[n] - 0.0) / h) end - # term2: build the boundary terms and interior sum separately to avoid - # invalid indexing in the NL macro (handle n==1 specially) + diffquot(a, b) = (b - a == zero(b - a)) ? exp(a) : (exp(b) - exp(a)) / (b - a) + JuMP.register(model, :diffquot, 2, diffquot; autodiff = true) + if n == 1 - @expression(model, term2, (exp(x[1]) - exp(0.0)) / (x[1] - 0.0) + (exp(0.0) - exp(x[1])) / (0.0 - x[1])) + @expression(model, term2, diffquot(x[1], 0.0) + diffquot(0.0, x[1])) else - @expression(model, term2_mid, sum((exp(x[j+1]) - exp(x[j])) / (x[j+1] - x[j]) for j = 1:n-1)) - @expression(model, term2, (exp(x[1]) - exp(0.0)) / (x[1] - 0.0) + term2_mid + (exp(0.0) - exp(x[n])) / (0.0 - x[n])) + @expression(model, term2_mid, sum(diffquot(x[j+1], x[j]) for j = 1:n-1)) + @expression(model, term2, diffquot(x[1], 0.0) + term2_mid + diffquot(0.0, x[n])) end - - @objective(model, Min, $(T(2)) * (term1 + $(T(n)) * ($(h) / $(T(2))) * term2)) + @objective(model, Min, 2 * (term1 + n * (h / 2) * term2)) return model end From 4ac69d2de46791a2fea59d0ecc01b0305b937e1b Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Tue, 4 Nov 2025 09:31:26 +0530 Subject: [PATCH 06/33] PureJuMP --- src/PureJuMP/boundary.jl | 11 +++++------ src/PureJuMP/variational.jl | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/PureJuMP/boundary.jl b/src/PureJuMP/boundary.jl index c12ac833..c12f2dc2 100644 --- a/src/PureJuMP/boundary.jl +++ b/src/PureJuMP/boundary.jl @@ -10,14 +10,13 @@ # https://www.researchgate.net/publication/325314400_Sparse_Test_Problems_for_Unconstrained_Optimization export boundary -function boundary(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - h = one(T) / T(n + 1) +function boundary(; n::Int = default_nvar, kwargs...) + h = 1 // (n + 1) model = Model() - # set a type-stable start vector matching ADNLPProblems.boundary x0 - x0 = [T(i) * h * (one(T) - T(i) * h) for i = 1:n] + x0 = [i * h * (1 - i * h) for i = 1:n] @variable(model, x[i = 1:n], start = x0[i]) @objective(model, Min, - sum((T(2) * x[i] - (i == 1 ? zero(x[1]) : x[i-1]) - (i == n ? zero(x[1]) : x[i+1]) - + (h^2 / T(2)) * (x[i] + T(i) * h + one(T))^3)^2 for i = 1:n)) + sum((2 * x[i] - (i == 1 ? 0 : x[i-1]) - (i == n ? 0 : x[i+1]) + + (h^2 / 2) * (x[i] + i * h + 1)^3)^2 for i = 1:n)) return model end diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 072cb46a..0bd3ffee 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -10,7 +10,7 @@ # https://www.researchgate.net/publication/325314400_Sparse_Test_Problems_for_Unconstrained_Optimization export variational -function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} +function variational(; n::Int = default_nvar, kwargs...) h = 1 // (n + 1) model = Model() # type-stable start matching ADNLPProblems From edd428c2f3f44a548b4a1ab0175eff7bf7d155df Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Tue, 4 Nov 2025 09:37:41 +0530 Subject: [PATCH 07/33] variational changes --- src/ADNLPProblems/variational.jl | 11 ++--------- src/PureJuMP/variational.jl | 8 +++----- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index bca1df60..632d80b6 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -48,14 +48,7 @@ end function variational(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) - function diffquot(a, b) - d = b - a - if d == zero(d) - return exp(a) - else - return (exp(b) - exp(a)) / d - end - end + diffquot(a, b) = (b - a == zero(b - a)) ? exp(a) : (exp(b) - exp(a)) / (b - a) function F!(r, x) S = eltype(x) @@ -89,7 +82,7 @@ function variational(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64 x0 = Vector{T}(undef, n) for i = 1:n - x0[i] = T(i) * h * (one(T) - T(i) * h) + x0[i] = i * h * (1 - i * h) end return ADNLPModels.ADNLSModel!(F!, x0, 1, name = "variational-nls"; kwargs...) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 0bd3ffee..d656b4d0 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -13,26 +13,24 @@ export variational function variational(; n::Int = default_nvar, kwargs...) h = 1 // (n + 1) model = Model() - # type-stable start matching ADNLPProblems x0 = [i * h * (1 - i * h) for i = 1:n] @variable(model, x[i = 1:n], start = x0[i]) - # term1: interior sum plus last boundary term (avoid ternary inside macro) if n == 1 @expression(model, term1, x[1] * (x[1] - 0.0) / h) else @expression(model, term1_mid, sum(x[i] * (x[i] - x[i+1]) / h for i = 1:n-1)) - @expression(model, term1, term1_mid + x[n] * (x[n] - 0.0) / h) + @expression(model, term1, term1_mid + x[n] * (x[n] - 0) / h) end diffquot(a, b) = (b - a == zero(b - a)) ? exp(a) : (exp(b) - exp(a)) / (b - a) JuMP.register(model, :diffquot, 2, diffquot; autodiff = true) if n == 1 - @expression(model, term2, diffquot(x[1], 0.0) + diffquot(0.0, x[1])) + @expression(model, term2, diffquot(x[1], 0) + diffquot(0, x[1])) else @expression(model, term2_mid, sum(diffquot(x[j+1], x[j]) for j = 1:n-1)) - @expression(model, term2, diffquot(x[1], 0.0) + term2_mid + diffquot(0.0, x[n])) + @expression(model, term2, diffquot(x[1], 0) + term2_mid + diffquot(0, x[n])) end @objective(model, Min, 2 * (term1 + n * (h / 2) * term2)) return model From ceaa6243ccac0f6af4e8c392f607176d1180165e Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Tue, 4 Nov 2025 09:55:24 +0530 Subject: [PATCH 08/33] boundary needing Float64 starts to match ADNLPProblems.boundary. --- src/PureJuMP/boundary.jl | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/PureJuMP/boundary.jl b/src/PureJuMP/boundary.jl index c12f2dc2..e0f7e3f3 100644 --- a/src/PureJuMP/boundary.jl +++ b/src/PureJuMP/boundary.jl @@ -11,12 +11,13 @@ export boundary function boundary(; n::Int = default_nvar, kwargs...) - h = 1 // (n + 1) + # compute h and starts using Float64 exactly to match ADNLPProblems default starts + h = one(Float64) / Float64(n + 1) model = Model() - x0 = [i * h * (1 - i * h) for i = 1:n] + x0 = [Float64(i) * h * (1.0 - Float64(i) * h) for i = 1:n] @variable(model, x[i = 1:n], start = x0[i]) @objective(model, Min, - sum((2 * x[i] - (i == 1 ? 0 : x[i-1]) - (i == n ? 0 : x[i+1]) - + (h^2 / 2) * (x[i] + i * h + 1)^3)^2 for i = 1:n)) + sum((2.0 * x[i] - (i == 1 ? 0.0 : x[i-1]) - (i == n ? 0.0 : x[i+1]) + + (h^2 / 2.0) * (x[i] + Float64(i) * h + 1.0)^3)^2 for i = 1:n)) return model end From c2434a756ce7f35d4e7dff9bd1728fe9fe3685d8 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Wed, 5 Nov 2025 07:44:54 +0530 Subject: [PATCH 09/33] Fix variational/h080: use NonlinearExpr-only JuMP API; remove JuMP.register and replace diffquot with inline exp expressions; convert hs61 to NonlinearExpr constraints to avoid legacy @NL usage --- src/PureJuMP/variational.jl | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index d656b4d0..7a087b97 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -23,14 +23,13 @@ function variational(; n::Int = default_nvar, kwargs...) @expression(model, term1, term1_mid + x[n] * (x[n] - 0) / h) end - diffquot(a, b) = (b - a == zero(b - a)) ? exp(a) : (exp(b) - exp(a)) / (b - a) - JuMP.register(model, :diffquot, 2, diffquot; autodiff = true) - + # Use inline expressions for the difference-quotient of exp to avoid registering + # a user function which can interact with JuMP's legacy NL macro machinery. if n == 1 - @expression(model, term2, diffquot(x[1], 0) + diffquot(0, x[1])) + @expression(model, term2, (exp(x[1]) - 1) / x[1] + (exp(x[1]) - 1) / x[1]) else - @expression(model, term2_mid, sum(diffquot(x[j+1], x[j]) for j = 1:n-1)) - @expression(model, term2, diffquot(x[1], 0) + term2_mid + diffquot(0, x[n])) + @expression(model, term2_mid, sum((exp(x[j+1]) - exp(x[j])) / (x[j+1] - x[j]) for j = 1:n-1)) + @expression(model, term2, (exp(x[1]) - 1) / x[1] + term2_mid + (exp(x[n]) - 1) / x[n]) end @objective(model, Min, 2 * (term1 + n * (h / 2) * term2)) return model From 0a2b2e3ff9090a9722f2604c6194da755085bee5 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Wed, 5 Nov 2025 07:45:20 +0530 Subject: [PATCH 10/33] doc --- src/PureJuMP/variational.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 7a087b97..ed2c08f6 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -23,8 +23,6 @@ function variational(; n::Int = default_nvar, kwargs...) @expression(model, term1, term1_mid + x[n] * (x[n] - 0) / h) end - # Use inline expressions for the difference-quotient of exp to avoid registering - # a user function which can interact with JuMP's legacy NL macro machinery. if n == 1 @expression(model, term2, (exp(x[1]) - 1) / x[1] + (exp(x[1]) - 1) / x[1]) else From 7ef626b9cf01c77793e0e86e78516a43089915ec Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Wed, 5 Nov 2025 10:23:44 +0530 Subject: [PATCH 11/33] variational problem using JuMP and ADNLProblems.jl --- src/ADNLPProblems/variational.jl | 13 ++++---- src/PureJuMP/variational.jl | 56 +++++++++++++++++++++++++++++--- 2 files changed, 58 insertions(+), 11 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index 632d80b6..ff255724 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -5,6 +5,11 @@ function variational(; use_nls::Bool = false, kwargs...) return variational(Val(model); kwargs...) end +function diffquot(a, b) + d = b - a + return exp(a) * (one(d) + d/2 + d^2/6 + d^3/24 + d^4/120) +end + function variational(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) @@ -25,10 +30,8 @@ function variational(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64 xip1 = xext[i + 2] term1 += xi * (xi - xip1) / hS end - - diffquot(a, b) = (b - a == zero(b - a)) ? exp(a) : (exp(b) - exp(a)) / (b - a) - - term2 = zero(S) + + term2 = zero(S) @inbounds for j = 0:n a = xext[j + 1] b = xext[j + 2] @@ -48,8 +51,6 @@ end function variational(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) - diffquot(a, b) = (b - a == zero(b - a)) ? exp(a) : (exp(b) - exp(a)) / (b - a) - function F!(r, x) S = eltype(x) hS = convert(S, h) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index ed2c08f6..689b17cf 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -10,10 +10,20 @@ # https://www.researchgate.net/publication/325314400_Sparse_Test_Problems_for_Unconstrained_Optimization export variational -function variational(; n::Int = default_nvar, kwargs...) +""" +variational_registered(; n::Int = default_nvar, kwargs...) + +Build the variational JuMP model but register the package-level `diffquot` +function and call it from NL expressions. This is useful to test the +registered-function path (autodiff through the registered function). +""" +function variational_registered(; n::Int = default_nvar, kwargs...) h = 1 // (n + 1) model = Model() - x0 = [i * h * (1 - i * h) for i = 1:n] + x0 = Vector{Float64}(undef, n) + for i = 1:n + x0[i] = i * h * (1.0 - i * h) + end @variable(model, x[i = 1:n], start = x0[i]) if n == 1 @@ -23,11 +33,47 @@ function variational(; n::Int = default_nvar, kwargs...) @expression(model, term1, term1_mid + x[n] * (x[n] - 0) / h) end + JuMP.register(model, :diffquot, 2, Main.OptimizationProblems.diffquot; autodiff = true) + + if n == 1 + @NLexpression(model, term2, diffquot(x[1], 0) + diffquot(0, x[1])) + else + @NLexpression(model, term2_mid, sum(diffquot(x[j+1], x[j]) for j = 1:n-1)) + @NLexpression(model, term2, diffquot(x[1], 0) + term2_mid + diffquot(0, x[n])) + end + @NLobjective(model, Min, 2 * (term1 + n * (h / 2) * term2)) + return model +end + +function variational(; n::Int = default_nvar, kwargs...) + h = 1 // (n + 1) + model = Model() + x0 = Vector{Float64}(undef, n) + for i = 1:n + x0[i] = i * h * (1.0 - i * h) + end + @variable(model, x[i = 1:n], start = x0[i]) + + if n == 1 + @expression(model, term1, x[1] * (x[1] - 0.0) / h) + else + @expression(model, term1_mid, sum(x[i] * (x[i] - x[i+1]) / h for i = 1:n-1)) + @expression(model, term1, term1_mid + x[n] * (x[n] - 0) / h) + end if n == 1 - @expression(model, term2, (exp(x[1]) - 1) / x[1] + (exp(x[1]) - 1) / x[1]) + @expression(model, term2, + exp(x[1]) * (1 + (0 - x[1])/2 + (0 - x[1])^2/6 + (0 - x[1])^3/24 + (0 - x[1])^4/120) + + exp(0) * (1 + (x[1] - 0)/2 + (x[1] - 0)^2/6 + (x[1] - 0)^3/24 + (x[1] - 0)^4/120) + ) else - @expression(model, term2_mid, sum((exp(x[j+1]) - exp(x[j])) / (x[j+1] - x[j]) for j = 1:n-1)) - @expression(model, term2, (exp(x[1]) - 1) / x[1] + term2_mid + (exp(x[n]) - 1) / x[n]) + @expression(model, term2_mid, + sum(exp(x[j+1]) * (1 + (x[j] - x[j+1]) / 2 + (x[j] - x[j+1])^2 / 6 + (x[j] - x[j+1])^3 / 24 + (x[j] - x[j+1])^4 / 120) for j = 1:n-1) + ) + @expression(model, term2, + exp(x[1]) * (1 + (0 - x[1]) / 2 + (0 - x[1])^2 / 6 + (0 - x[1])^3 / 24 + (0 - x[1])^4 / 120) + + term2_mid + + exp(0) * (1 + (x[n] - 0) / 2 + (x[n] - 0)^2 / 6 + (x[n] - 0)^3 / 24 + (x[n] - 0)^4 / 120) + ) end @objective(model, Min, 2 * (term1 + n * (h / 2) * term2)) return model From 86d1fbf7268f498377adc59571557c4810440b1d Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Wed, 5 Nov 2025 18:03:49 +0530 Subject: [PATCH 12/33] Apply suggestions from code review Co-authored-by: Tangi Migot --- src/ADNLPProblems/boundary.jl | 6 +++--- src/PureJuMP/variational.jl | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/ADNLPProblems/boundary.jl b/src/ADNLPProblems/boundary.jl index af33d3bf..57b74c7f 100644 --- a/src/ADNLPProblems/boundary.jl +++ b/src/ADNLPProblems/boundary.jl @@ -6,20 +6,20 @@ function boundary(; use_nls::Bool = false, kwargs...) end function boundary(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - h = one(T) / T(n + 1) + h = 1 // n + 1 function f(x; n = length(x)) s = zero(T) for i = 1:n xm = (i == 1) ? zero(T) : x[i - 1] xp = (i == n) ? zero(T) : x[i + 1] - s += (2 * x[i] - xm - xp + (h^2 / 2) * (x[i] + i * h + one(T))^3)^2 + s += (2 * x[i] - xm - xp + (h^2 / 2) * (x[i] + i * h + 1)^3)^2 end return s end x0 = Vector{T}(undef, n) for i = 1:n - x0[i] = i * h * (one(T) - i * h) + x0[i] = i * h * (1 - i * h) end return ADNLPModels.ADNLPModel(f, x0, name = "boundary", minimize = true; kwargs...) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 689b17cf..23295aa0 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -22,12 +22,12 @@ function variational_registered(; n::Int = default_nvar, kwargs...) model = Model() x0 = Vector{Float64}(undef, n) for i = 1:n - x0[i] = i * h * (1.0 - i * h) + x0[i] = i * h * (1 - i * h) end @variable(model, x[i = 1:n], start = x0[i]) if n == 1 - @expression(model, term1, x[1] * (x[1] - 0.0) / h) + @expression(model, term1, x[1] * (x[1] - 0) / h) else @expression(model, term1_mid, sum(x[i] * (x[i] - x[i+1]) / h for i = 1:n-1)) @expression(model, term1, term1_mid + x[n] * (x[n] - 0) / h) From 9301b1c8c1fc8f6625aff4f67cf50509f02903c9 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Wed, 5 Nov 2025 18:20:03 +0530 Subject: [PATCH 13/33] Update boundary.jl --- src/ADNLPProblems/boundary.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/ADNLPProblems/boundary.jl b/src/ADNLPProblems/boundary.jl index 57b74c7f..8bc0c668 100644 --- a/src/ADNLPProblems/boundary.jl +++ b/src/ADNLPProblems/boundary.jl @@ -26,20 +26,20 @@ function boundary(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function boundary(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - h = one(T) / T(n + 1) + h = 1 // n + 1 function F!(r, x) @inbounds for i = 1:length(x) xm = (i == 1) ? zero(T) : x[i - 1] xp = (i == length(x)) ? zero(T) : x[i + 1] - r[i] = 2 * x[i] - xm - xp + (h^2 / 2) * (x[i] + i * h + one(T))^3 + r[i] = 2 * x[i] - xm - xp + (h^2 / 2) * (x[i] + i * h + 1)^3 end return r end x0 = Vector{T}(undef, n) for i = 1:n - x0[i] = i * h * (one(T) - i * h) + x0[i] = i * h * (1 - i * h) end return ADNLPModels.ADNLSModel!(F!, x0, n, name = "boundary-nls"; kwargs...) From 8b167d072701f29bfa24d3c792eec6b2d750a05f Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Wed, 5 Nov 2025 18:22:10 +0530 Subject: [PATCH 14/33] Update boundary.jl --- src/PureJuMP/boundary.jl | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/PureJuMP/boundary.jl b/src/PureJuMP/boundary.jl index e0f7e3f3..4e6cd2b4 100644 --- a/src/PureJuMP/boundary.jl +++ b/src/PureJuMP/boundary.jl @@ -11,13 +11,12 @@ export boundary function boundary(; n::Int = default_nvar, kwargs...) - # compute h and starts using Float64 exactly to match ADNLPProblems default starts - h = one(Float64) / Float64(n + 1) + h = 1 // n + 1 model = Model() - x0 = [Float64(i) * h * (1.0 - Float64(i) * h) for i = 1:n] + x0 = [i * h * (1 - i * h) for i = 1:n] @variable(model, x[i = 1:n], start = x0[i]) @objective(model, Min, - sum((2.0 * x[i] - (i == 1 ? 0.0 : x[i-1]) - (i == n ? 0.0 : x[i+1]) - + (h^2 / 2.0) * (x[i] + Float64(i) * h + 1.0)^3)^2 for i = 1:n)) + sum((2 * x[i] - (i == 1 ? 0 : x[i-1]) - (i == n ? 0 : x[i+1]) + + (h^2 / 2) * (x[i] + Float64(i) * h + 1)^3)^2 for i = 1:n)) return model end From f8a3dd78917eed6cbaf49ee90911a894a08cfad9 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Wed, 5 Nov 2025 20:06:20 +0530 Subject: [PATCH 15/33] edits to variational --- src/ADNLPProblems/variational.jl | 59 +++++---------------------- src/PureJuMP/variational.jl | 70 ++++++++------------------------ 2 files changed, 29 insertions(+), 100 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index ff255724..eee4872e 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -1,8 +1,7 @@ export variational - -function variational(; use_nls::Bool = false, kwargs...) - model = use_nls ? :nls : :nlp - return variational(Val(model); kwargs...) +function variational(; kwargs...) + # Forwarding wrapper so tests can pass kwargs like `matrix_free`. + return variational(Val(:nlp); kwargs...) end function diffquot(a, b) @@ -10,7 +9,12 @@ function diffquot(a, b) return exp(a) * (one(d) + d/2 + d^2/6 + d^3/24 + d^4/120) end -function variational(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} +function variational( + ::Val{:nlp}; + n::Int = default_nvar, + type::Type{T} = Float64, + kwargs..., +) where {T} h = 1 // (n + 1) function f(x; n = length(x)) @@ -30,8 +34,8 @@ function variational(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64 xip1 = xext[i + 2] term1 += xi * (xi - xip1) / hS end - - term2 = zero(S) + + term2 = zero(S) @inbounds for j = 0:n a = xext[j + 1] b = xext[j + 2] @@ -47,44 +51,3 @@ function variational(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64 return ADNLPModels.ADNLPModel(f, x0, name = "variational", minimize = true; kwargs...) end - -function variational(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - h = 1 // (n + 1) - - function F!(r, x) - S = eltype(x) - hS = convert(S, h) - - xext = Vector{S}(undef, n + 2) - xext[1] = zero(S) - @inbounds for i = 1:n - xext[i + 1] = x[i] - end - xext[n + 2] = zero(S) - - term1 = zero(S) - @inbounds for i = 1:n - xi = x[i] - xip1 = xext[i + 2] - term1 += xi * (xi - xip1) / hS - end - - term2 = zero(S) - @inbounds for j = 0:n - a = xext[j + 1] - b = xext[j + 2] - term2 += diffquot(a, b) - end - - Fval = 2 * (term1 + n * (hS / 2) * term2) - r[1] = sqrt(Fval < zero(Fval) ? zero(Fval) : Fval) - return r - end - - x0 = Vector{T}(undef, n) - for i = 1:n - x0[i] = i * h * (1 - i * h) - end - - return ADNLPModels.ADNLSModel!(F!, x0, 1, name = "variational-nls"; kwargs...) -end diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 23295aa0..4051f9f2 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -10,41 +10,6 @@ # https://www.researchgate.net/publication/325314400_Sparse_Test_Problems_for_Unconstrained_Optimization export variational -""" -variational_registered(; n::Int = default_nvar, kwargs...) - -Build the variational JuMP model but register the package-level `diffquot` -function and call it from NL expressions. This is useful to test the -registered-function path (autodiff through the registered function). -""" -function variational_registered(; n::Int = default_nvar, kwargs...) - h = 1 // (n + 1) - model = Model() - x0 = Vector{Float64}(undef, n) - for i = 1:n - x0[i] = i * h * (1 - i * h) - end - @variable(model, x[i = 1:n], start = x0[i]) - - if n == 1 - @expression(model, term1, x[1] * (x[1] - 0) / h) - else - @expression(model, term1_mid, sum(x[i] * (x[i] - x[i+1]) / h for i = 1:n-1)) - @expression(model, term1, term1_mid + x[n] * (x[n] - 0) / h) - end - - JuMP.register(model, :diffquot, 2, Main.OptimizationProblems.diffquot; autodiff = true) - - if n == 1 - @NLexpression(model, term2, diffquot(x[1], 0) + diffquot(0, x[1])) - else - @NLexpression(model, term2_mid, sum(diffquot(x[j+1], x[j]) for j = 1:n-1)) - @NLexpression(model, term2, diffquot(x[1], 0) + term2_mid + diffquot(0, x[n])) - end - @NLobjective(model, Min, 2 * (term1 + n * (h / 2) * term2)) - return model -end - function variational(; n::Int = default_nvar, kwargs...) h = 1 // (n + 1) model = Model() @@ -55,26 +20,27 @@ function variational(; n::Int = default_nvar, kwargs...) @variable(model, x[i = 1:n], start = x0[i]) if n == 1 - @expression(model, term1, x[1] * (x[1] - 0.0) / h) - else - @expression(model, term1_mid, sum(x[i] * (x[i] - x[i+1]) / h for i = 1:n-1)) - @expression(model, term1, term1_mid + x[n] * (x[n] - 0) / h) - end - if n == 1 - @expression(model, term2, - exp(x[1]) * (1 + (0 - x[1])/2 + (0 - x[1])^2/6 + (0 - x[1])^3/24 + (0 - x[1])^4/120) - + exp(0) * (1 + (x[1] - 0)/2 + (x[1] - 0)^2/6 + (x[1] - 0)^3/24 + (x[1] - 0)^4/120) + @objective(model, Min, + 2 * ( + x[1] * (x[1] - 0.0) / h + + n * (h / 2) * ( + exp(0) * (1 + (x[1] - 0)/2 + (x[1] - 0)^2/6 + (x[1] - 0)^3/24 + (x[1] - 0)^4/120) + + exp(x[1]) * (1 + (0 - x[1])/2 + (0 - x[1])^2/6 + (0 - x[1])^3/24 + (0 - x[1])^4/120) + ) + ) ) else - @expression(model, term2_mid, - sum(exp(x[j+1]) * (1 + (x[j] - x[j+1]) / 2 + (x[j] - x[j+1])^2 / 6 + (x[j] - x[j+1])^3 / 24 + (x[j] - x[j+1])^4 / 120) for j = 1:n-1) - ) - @expression(model, term2, - exp(x[1]) * (1 + (0 - x[1]) / 2 + (0 - x[1])^2 / 6 + (0 - x[1])^3 / 24 + (0 - x[1])^4 / 120) - + term2_mid - + exp(0) * (1 + (x[n] - 0) / 2 + (x[n] - 0)^2 / 6 + (x[n] - 0)^3 / 24 + (x[n] - 0)^4 / 120) + @objective(model, Min, + 2 * ( + (sum(x[i] * (x[i] - x[i+1]) / h for i = 1:n-1) + x[n] * (x[n] - 0) / h) + + n * (h / 2) * ( + exp(0) * (1 + (x[1] - 0)/2 + (x[1] - 0)^2/6 + (x[1] - 0)^3/24 + (x[1] - 0)^4/120) + + sum(exp(x[j]) * (1 + (x[j+1] - x[j]) / 2 + (x[j+1] - x[j])^2 / 6 + (x[j+1] - x[j])^3 / 24 + (x[j+1] - x[j])^4 / 120) for j = 1:n-1) + + exp(x[n]) * (1 + (0 - x[n]) / 2 + (0 - x[n])^2 / 6 + (0 - x[n])^3 / 24 + (0 - x[n])^4 / 120) + ) + ) ) end - @objective(model, Min, 2 * (term1 + n * (h / 2) * term2)) + return model end From 94532e8fef08362f246c4f080f6762833a0ee3b8 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Wed, 5 Nov 2025 20:08:55 +0530 Subject: [PATCH 16/33] JuliaFormatter --- src/PureJuMP/boundary.jl | 13 ++++++++++--- src/PureJuMP/variational.jl | 39 ++++++++++++++++++++++++++----------- 2 files changed, 38 insertions(+), 14 deletions(-) diff --git a/src/PureJuMP/boundary.jl b/src/PureJuMP/boundary.jl index 4e6cd2b4..5b297602 100644 --- a/src/PureJuMP/boundary.jl +++ b/src/PureJuMP/boundary.jl @@ -15,8 +15,15 @@ function boundary(; n::Int = default_nvar, kwargs...) model = Model() x0 = [i * h * (1 - i * h) for i = 1:n] @variable(model, x[i = 1:n], start = x0[i]) - @objective(model, Min, - sum((2 * x[i] - (i == 1 ? 0 : x[i-1]) - (i == n ? 0 : x[i+1]) - + (h^2 / 2) * (x[i] + Float64(i) * h + 1)^3)^2 for i = 1:n)) + @objective( + model, + Min, + sum( + ( + 2 * x[i] - (i == 1 ? 0 : x[i - 1]) - (i == n ? 0 : x[i + 1]) + + (h^2 / 2) * (x[i] + Float64(i) * h + 1)^3 + )^2 for i = 1:n + ) + ) return model end diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 4051f9f2..b892cfa0 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -20,23 +20,40 @@ function variational(; n::Int = default_nvar, kwargs...) @variable(model, x[i = 1:n], start = x0[i]) if n == 1 - @objective(model, Min, + @objective( + model, + Min, 2 * ( - x[1] * (x[1] - 0.0) / h - + n * (h / 2) * ( - exp(0) * (1 + (x[1] - 0)/2 + (x[1] - 0)^2/6 + (x[1] - 0)^3/24 + (x[1] - 0)^4/120) - + exp(x[1]) * (1 + (0 - x[1])/2 + (0 - x[1])^2/6 + (0 - x[1])^3/24 + (0 - x[1])^4/120) + x[1] * (x[1] - 0.0) / h + + n * + (h / 2) * + ( + exp(0) * (1 + (x[1] - 0)/2 + (x[1] - 0)^2/6 + (x[1] - 0)^3/24 + (x[1] - 0)^4/120) + + exp(x[1]) * (1 + (0 - x[1])/2 + (0 - x[1])^2/6 + (0 - x[1])^3/24 + (0 - x[1])^4/120) ) ) ) else - @objective(model, Min, + @objective( + model, + Min, 2 * ( - (sum(x[i] * (x[i] - x[i+1]) / h for i = 1:n-1) + x[n] * (x[n] - 0) / h) - + n * (h / 2) * ( - exp(0) * (1 + (x[1] - 0)/2 + (x[1] - 0)^2/6 + (x[1] - 0)^3/24 + (x[1] - 0)^4/120) - + sum(exp(x[j]) * (1 + (x[j+1] - x[j]) / 2 + (x[j+1] - x[j])^2 / 6 + (x[j+1] - x[j])^3 / 24 + (x[j+1] - x[j])^4 / 120) for j = 1:n-1) - + exp(x[n]) * (1 + (0 - x[n]) / 2 + (0 - x[n])^2 / 6 + (0 - x[n])^3 / 24 + (0 - x[n])^4 / 120) + (sum(x[i] * (x[i] - x[i + 1]) / h for i = 1:(n - 1)) + x[n] * (x[n] - 0) / h) + + n * + (h / 2) * + ( + exp(0) * (1 + (x[1] - 0)/2 + (x[1] - 0)^2/6 + (x[1] - 0)^3/24 + (x[1] - 0)^4/120) + + sum( + exp(x[j]) * ( + 1 + + (x[j + 1] - x[j]) / 2 + + (x[j + 1] - x[j])^2 / 6 + + (x[j + 1] - x[j])^3 / 24 + + (x[j + 1] - x[j])^4 / 120 + ) for j = 1:(n - 1) + ) + + exp(x[n]) * + (1 + (0 - x[n]) / 2 + (0 - x[n])^2 / 6 + (0 - x[n])^3 / 24 + (0 - x[n])^4 / 120) ) ) ) From bc42c32056f0b29cf23b11a79ba8b5f973b4639a Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Thu, 6 Nov 2025 06:49:01 +0530 Subject: [PATCH 17/33] Apply suggestions from code review Co-authored-by: Tangi Migot --- src/ADNLPProblems/boundary.jl | 10 +++++----- src/ADNLPProblems/variational.jl | 18 ++++-------------- 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/src/ADNLPProblems/boundary.jl b/src/ADNLPProblems/boundary.jl index 8bc0c668..3eb96518 100644 --- a/src/ADNLPProblems/boundary.jl +++ b/src/ADNLPProblems/boundary.jl @@ -6,7 +6,7 @@ function boundary(; use_nls::Bool = false, kwargs...) end function boundary(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - h = 1 // n + 1 + h = 1 // (n + 1) function f(x; n = length(x)) s = zero(T) for i = 1:n @@ -26,12 +26,12 @@ function boundary(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function boundary(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - h = 1 // n + 1 + h = 1 // (n + 1) - function F!(r, x) - @inbounds for i = 1:length(x) + function F!(r, x; n = length(x)) + @inbounds for i = 1:n xm = (i == 1) ? zero(T) : x[i - 1] - xp = (i == length(x)) ? zero(T) : x[i + 1] + xp = (i == n) ? zero(T) : x[i + 1] r[i] = 2 * x[i] - xm - xp + (h^2 / 2) * (x[i] + i * h + 1)^3 end return r diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index eee4872e..640a9509 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -1,16 +1,5 @@ export variational -function variational(; kwargs...) - # Forwarding wrapper so tests can pass kwargs like `matrix_free`. - return variational(Val(:nlp); kwargs...) -end - -function diffquot(a, b) - d = b - a - return exp(a) * (one(d) + d/2 + d^2/6 + d^3/24 + d^4/120) -end - -function variational( - ::Val{:nlp}; +function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs..., @@ -39,14 +28,15 @@ function variational( @inbounds for j = 0:n a = xext[j + 1] b = xext[j + 2] - term2 += diffquot(a, b) + d = b - a + term2 += exp(a) * (1 + d/2 + d^2/6 + d^3/24 + d^4/120) end return 2 * (term1 + n * (hS / 2) * term2) end x0 = Vector{T}(undef, n) for i = 1:n - x0[i] = i * h * (one(T) - i * h) + x0[i] = i * h * (1 - i * h) end return ADNLPModels.ADNLPModel(f, x0, name = "variational", minimize = true; kwargs...) From f4f3cf4f1671ff088c32df922f4bf0bab68291ad Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Thu, 6 Nov 2025 10:32:10 +0530 Subject: [PATCH 18/33] Problem 15 correction --- src/ADNLPProblems/variational.jl | 29 ++++++++++++++++------------- src/Meta/variational.jl | 2 +- src/PureJuMP/variational.jl | 15 +++------------ 3 files changed, 20 insertions(+), 26 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index 640a9509..5409f176 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -1,12 +1,14 @@ export variational -function variational(; - n::Int = default_nvar, - type::Type{T} = Float64, - kwargs..., -) where {T} + +function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) - function f(x; n = length(x)) + x0 = Vector{T}(undef, n) + for i = 1:n + x0[i] = i * h * (one(T) - i * h) + end + + f = (x; n = length(x)) -> begin S = eltype(x) hS = convert(S, h) @@ -28,16 +30,17 @@ function variational(; @inbounds for j = 0:n a = xext[j + 1] b = xext[j + 2] - d = b - a - term2 += exp(a) * (1 + d/2 + d^2/6 + d^3/24 + d^4/120) + if S <: AbstractFloat + term = (b == a) ? exp(a) : (exp(b) - exp(a)) / (b - a) + else + d = b - a + Pd = one(d) + d / 2 + d^2 / 6 + d^3 / 24 + d^4 / 120 + term = exp(a) * Pd + end + term2 += term end return 2 * (term1 + n * (hS / 2) * term2) end - x0 = Vector{T}(undef, n) - for i = 1:n - x0[i] = i * h * (1 - i * h) - end - return ADNLPModels.ADNLPModel(f, x0, name = "variational", minimize = true; kwargs...) end diff --git a/src/Meta/variational.jl b/src/Meta/variational.jl index 472d4b6f..27c13691 100644 --- a/src/Meta/variational.jl +++ b/src/Meta/variational.jl @@ -24,4 +24,4 @@ get_variational_nlin(; n::Integer = default_nvar, kwargs...) = 0 get_variational_nnln(; n::Integer = default_nvar, kwargs...) = 0 get_variational_nequ(; n::Integer = default_nvar, kwargs...) = 0 get_variational_nineq(; n::Integer = default_nvar, kwargs...) = 0 -get_variational_nls_nequ(; n::Integer = default_nvar, kwargs...) = 1 +get_variational_nls_nequ(; n::Integer = default_nvar, kwargs...) = 0 diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index b892cfa0..c1d3786f 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -42,18 +42,9 @@ function variational(; n::Int = default_nvar, kwargs...) n * (h / 2) * ( - exp(0) * (1 + (x[1] - 0)/2 + (x[1] - 0)^2/6 + (x[1] - 0)^3/24 + (x[1] - 0)^4/120) + - sum( - exp(x[j]) * ( - 1 + - (x[j + 1] - x[j]) / 2 + - (x[j + 1] - x[j])^2 / 6 + - (x[j + 1] - x[j])^3 / 24 + - (x[j + 1] - x[j])^4 / 120 - ) for j = 1:(n - 1) - ) + - exp(x[n]) * - (1 + (0 - x[n]) / 2 + (0 - x[n])^2 / 6 + (0 - x[n])^3 / 24 + (0 - x[n])^4 / 120) + exp(x[1]) * (1 + (x[1] - 0) / 2 + (x[1] - 0)^2 / 6 + (x[1] - 0)^3 / 24 + (x[1] - 0)^4 / 120) + + sum(exp(x[j]) * (1 + (x[j + 1] - x[j]) / 2 + (x[j + 1] - x[j])^2 / 6 + (x[j + 1] - x[j])^3 / 24 + (x[j + 1] - x[j])^4 / 120) for j = 1:(n - 1)) + + exp(0) * (1 + (0 - x[n]) / 2 + (0 - x[n])^2 / 6 + (0 - x[n])^3 / 24 + (0 - x[n])^4 / 120) ) ) ) From 4b66e94e9ae917ee66dc6b846eed69b37f6ad581 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Thu, 6 Nov 2025 16:29:47 +0530 Subject: [PATCH 19/33] fixing variational problems and boundary problem --- src/ADNLPProblems/variational.jl | 10 +++------- src/PureJuMP/boundary.jl | 2 +- src/PureJuMP/variational.jl | 16 +++++++++++++--- 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index 5409f176..f3e2904a 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -30,13 +30,9 @@ function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs... @inbounds for j = 0:n a = xext[j + 1] b = xext[j + 2] - if S <: AbstractFloat - term = (b == a) ? exp(a) : (exp(b) - exp(a)) / (b - a) - else - d = b - a - Pd = one(d) + d / 2 + d^2 / 6 + d^3 / 24 + d^4 / 120 - term = exp(a) * Pd - end + d = b - a + Pd = one(d) + d / 2 + d^2 / 6 + d^3 / 24 + d^4 / 120 + term = exp(a) * Pd term2 += term end return 2 * (term1 + n * (hS / 2) * term2) diff --git a/src/PureJuMP/boundary.jl b/src/PureJuMP/boundary.jl index 5b297602..5cf31486 100644 --- a/src/PureJuMP/boundary.jl +++ b/src/PureJuMP/boundary.jl @@ -11,7 +11,7 @@ export boundary function boundary(; n::Int = default_nvar, kwargs...) - h = 1 // n + 1 + h = 1 // (n + 1) model = Model() x0 = [i * h * (1 - i * h) for i = 1:n] @variable(model, x[i = 1:n], start = x0[i]) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index c1d3786f..61902e34 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -42,9 +42,19 @@ function variational(; n::Int = default_nvar, kwargs...) n * (h / 2) * ( - exp(x[1]) * (1 + (x[1] - 0) / 2 + (x[1] - 0)^2 / 6 + (x[1] - 0)^3 / 24 + (x[1] - 0)^4 / 120) + - sum(exp(x[j]) * (1 + (x[j + 1] - x[j]) / 2 + (x[j + 1] - x[j])^2 / 6 + (x[j + 1] - x[j])^3 / 24 + (x[j + 1] - x[j])^4 / 120) for j = 1:(n - 1)) + - exp(0) * (1 + (0 - x[n]) / 2 + (0 - x[n])^2 / 6 + (0 - x[n])^3 / 24 + (0 - x[n])^4 / 120) + exp(0) * + (1 + (x[1] - 0) / 2 + (x[1] - 0)^2 / 6 + (x[1] - 0)^3 / 24 + (x[1] - 0)^4 / 120) + + sum( + exp(x[j]) * ( + 1 + + (x[j + 1] - x[j]) / 2 + + (x[j + 1] - x[j])^2 / 6 + + (x[j + 1] - x[j])^3 / 24 + + (x[j + 1] - x[j])^4 / 120 + ) for j = 1:(n - 1) + ) + + exp(x[n]) * + (1 + (0 - x[n]) / 2 + (0 - x[n])^2 / 6 + (0 - x[n])^3 / 24 + (0 - x[n])^4 / 120) ) ) ) From 4b5ba91dd1a55482a859b78531d4690abbdb0ffd Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Fri, 7 Nov 2025 13:04:13 +0530 Subject: [PATCH 20/33] variational --- src/ADNLPProblems/variational.jl | 45 +++++++++------------------ src/PureJuMP/variational.jl | 53 +++++++------------------------- 2 files changed, 25 insertions(+), 73 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index f3e2904a..74bc99e9 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -2,40 +2,23 @@ export variational function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) + x0 = [convert(T, i * h * (1 - i * h)) for i = 1:n] - x0 = Vector{T}(undef, n) - for i = 1:n - x0[i] = i * h * (one(T) - i * h) - end - - f = (x; n = length(x)) -> begin - S = eltype(x) - hS = convert(S, h) - - xext = Vector{S}(undef, n + 2) - xext[1] = zero(S) - @inbounds for i = 1:n - xext[i + 1] = x[i] - end - xext[n + 2] = zero(S) + f = x -> begin + xext = vcat(zero(eltype(x)), x, zero(eltype(x))) - term1 = zero(S) - @inbounds for i = 1:n - xi = x[i] - xip1 = xext[i + 2] - term1 += xi * (xi - xip1) / hS - end + term1 = sum(x[i] * (x[i] - xext[i + 2]) / convert(eltype(x), h) for i = 1:n) + term2 = sum( + if eltype(x) <: AbstractFloat + d = xext[j + 2] - xext[j + 1] + abs(d) <= eps(eltype(x)) ? exp(xext[j + 1]) : (exp(xext[j + 2]) - exp(xext[j + 1])) / d + else + d = xext[j + 2] - xext[j + 1] + exp(xext[j + 1]) * expm1(d) / d + end for j = 0:n + ) - term2 = zero(S) - @inbounds for j = 0:n - a = xext[j + 1] - b = xext[j + 2] - d = b - a - Pd = one(d) + d / 2 + d^2 / 6 + d^3 / 24 + d^4 / 120 - term = exp(a) * Pd - term2 += term - end - return 2 * (term1 + n * (hS / 2) * term2) + return 2 * (term1 + n * (convert(eltype(x), h) / 2) * term2) end return ADNLPModels.ADNLPModel(f, x0, name = "variational", minimize = true; kwargs...) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 61902e34..991d5f43 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -12,53 +12,22 @@ export variational function variational(; n::Int = default_nvar, kwargs...) h = 1 // (n + 1) + x0 = [i * h * (1.0 - i * h) for i = 1:n] model = Model() - x0 = Vector{Float64}(undef, n) - for i = 1:n - x0[i] = i * h * (1.0 - i * h) - end @variable(model, x[i = 1:n], start = x0[i]) - if n == 1 - @objective( - model, - Min, - 2 * ( - x[1] * (x[1] - 0.0) / h + - n * - (h / 2) * - ( - exp(0) * (1 + (x[1] - 0)/2 + (x[1] - 0)^2/6 + (x[1] - 0)^3/24 + (x[1] - 0)^4/120) + - exp(x[1]) * (1 + (0 - x[1])/2 + (0 - x[1])^2/6 + (0 - x[1])^3/24 + (0 - x[1])^4/120) - ) + @objective( + model, + Min, + 2 * ( + sum(x[i] * (x[i] - (i == n ? 0 : x[i + 1])) / h for i = 1:n) + + n * (h / 2) * ( + (exp(x[1]) - 1) / x[1] + + sum((exp(x[j + 1]) - exp(x[j])) / (x[j + 1] - x[j]) for j = 1:(n - 1)) + + (1 - exp(x[n])) / (-x[n]) ) ) - else - @objective( - model, - Min, - 2 * ( - (sum(x[i] * (x[i] - x[i + 1]) / h for i = 1:(n - 1)) + x[n] * (x[n] - 0) / h) + - n * - (h / 2) * - ( - exp(0) * - (1 + (x[1] - 0) / 2 + (x[1] - 0)^2 / 6 + (x[1] - 0)^3 / 24 + (x[1] - 0)^4 / 120) + - sum( - exp(x[j]) * ( - 1 + - (x[j + 1] - x[j]) / 2 + - (x[j + 1] - x[j])^2 / 6 + - (x[j + 1] - x[j])^3 / 24 + - (x[j + 1] - x[j])^4 / 120 - ) for j = 1:(n - 1) - ) + - exp(x[n]) * - (1 + (0 - x[n]) / 2 + (0 - x[n])^2 / 6 + (0 - x[n])^3 / 24 + (0 - x[n])^4 / 120) - ) - ) - ) - end + ) return model end From 35db7cbb62384703f7ee6aed9bcfbf876de96159 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Fri, 7 Nov 2025 14:03:43 +0530 Subject: [PATCH 21/33] fix --- src/ADNLPProblems/variational.jl | 32 +++++++++++++++----------------- src/PureJuMP/variational.jl | 12 +++++++----- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index 74bc99e9..f5159298 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -3,23 +3,21 @@ export variational function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) x0 = [convert(T, i * h * (1 - i * h)) for i = 1:n] - - f = x -> begin - xext = vcat(zero(eltype(x)), x, zero(eltype(x))) - - term1 = sum(x[i] * (x[i] - xext[i + 2]) / convert(eltype(x), h) for i = 1:n) - term2 = sum( - if eltype(x) <: AbstractFloat - d = xext[j + 2] - xext[j + 1] - abs(d) <= eps(eltype(x)) ? exp(xext[j + 1]) : (exp(xext[j + 2]) - exp(xext[j + 1])) / d - else - d = xext[j + 2] - xext[j + 1] - exp(xext[j + 1]) * expm1(d) / d - end for j = 0:n - ) - - return 2 * (term1 + n * (convert(eltype(x), h) / 2) * term2) - end + f = + x -> begin + xext = vcat(zero(eltype(x)), x, zero(eltype(x))) + term1 = sum(x[i] * (x[i] - xext[i + 2]) / convert(eltype(x), h) for i = 1:n) + term2 = sum( + if eltype(x) <: AbstractFloat + d = xext[j + 2] - xext[j + 1] + abs(d) <= eps(eltype(x)) ? exp(xext[j + 1]) : (exp(xext[j + 2]) - exp(xext[j + 1])) / d + else + d = xext[j + 2] - xext[j + 1] + exp(xext[j + 1]) * expm1(d) / d + end for j = 0:n + ) + return 2 * (term1 + n * (convert(eltype(x), h) / 2) * term2) + end return ADNLPModels.ADNLPModel(f, x0, name = "variational", minimize = true; kwargs...) end diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 991d5f43..4f10ac16 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -20,11 +20,13 @@ function variational(; n::Int = default_nvar, kwargs...) model, Min, 2 * ( - sum(x[i] * (x[i] - (i == n ? 0 : x[i + 1])) / h for i = 1:n) + - n * (h / 2) * ( - (exp(x[1]) - 1) / x[1] + - sum((exp(x[j + 1]) - exp(x[j])) / (x[j + 1] - x[j]) for j = 1:(n - 1)) + - (1 - exp(x[n])) / (-x[n]) + (sum(x[i] * (x[i] - x[i + 1]) / h for i = 1:(n - 1)) + x[n] * x[n] / h) + + n * + (h / 2) * + ( + (1 + x[1]/2 + x[1]^2/6 + x[1]^3/24 + x[1]^4/120) + + sum(exp(x[j]) * (1 + (x[j + 1] - x[j]) / 2 + (x[j + 1] - x[j])^2 / 6 + (x[j + 1] - x[j])^3 / 24 + (x[j + 1] - x[j])^4 / 120) for j = 1:(n - 1)) + + exp(x[n]) * (1 + (0 - x[n]) / 2 + (0 - x[n])^2 / 6 + (0 - x[n])^3 / 24 + (0 - x[n])^4 / 120) ) ) ) From 17e375421b96390681253b9b46edc23d358fd487 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Fri, 7 Nov 2025 14:10:05 +0530 Subject: [PATCH 22/33] JuliaFormatted commit --- src/PureJuMP/variational.jl | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 4f10ac16..65ea09d2 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -25,7 +25,15 @@ function variational(; n::Int = default_nvar, kwargs...) (h / 2) * ( (1 + x[1]/2 + x[1]^2/6 + x[1]^3/24 + x[1]^4/120) + - sum(exp(x[j]) * (1 + (x[j + 1] - x[j]) / 2 + (x[j + 1] - x[j])^2 / 6 + (x[j + 1] - x[j])^3 / 24 + (x[j + 1] - x[j])^4 / 120) for j = 1:(n - 1)) + + sum( + exp(x[j]) * ( + 1 + + (x[j + 1] - x[j]) / 2 + + (x[j + 1] - x[j])^2 / 6 + + (x[j + 1] - x[j])^3 / 24 + + (x[j + 1] - x[j])^4 / 120 + ) for j = 1:(n - 1) + ) + exp(x[n]) * (1 + (0 - x[n]) / 2 + (0 - x[n])^2 / 6 + (0 - x[n])^3 / 24 + (0 - x[n])^4 / 120) ) ) From b1c6dd954d09ae3472b2ad2069a44f0a25a879d9 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Fri, 7 Nov 2025 16:43:18 +0530 Subject: [PATCH 23/33] just trying --- src/ADNLPProblems/variational.jl | 5 ++++- src/PureJuMP/variational.jl | 7 +++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index f5159298..9e8ce731 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -2,7 +2,10 @@ export variational function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) - x0 = [convert(T, i * h * (1 - i * h)) for i = 1:n] + x0 = [begin + ih = i * h + convert(T, ih * (1 - ih)) + end for i = 1:n] f = x -> begin xext = vcat(zero(eltype(x)), x, zero(eltype(x))) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 65ea09d2..9b31e472 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -12,11 +12,14 @@ export variational function variational(; n::Int = default_nvar, kwargs...) h = 1 // (n + 1) - x0 = [i * h * (1.0 - i * h) for i = 1:n] + x0 = [begin + ih = i * h + convert(Float64, ih * (1 - ih)) + end for i = 1:n] model = Model() @variable(model, x[i = 1:n], start = x0[i]) - @objective( + @NLobjective( model, Min, 2 * ( From aaacc0ae6a76814f58c59017c838c9f7a779a7a8 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Sun, 9 Nov 2025 12:21:09 +0530 Subject: [PATCH 24/33] Update variational.jl --- src/PureJuMP/variational.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 9b31e472..eac9cbf6 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -19,7 +19,7 @@ function variational(; n::Int = default_nvar, kwargs...) model = Model() @variable(model, x[i = 1:n], start = x0[i]) - @NLobjective( + @objective( model, Min, 2 * ( From a896e5473e357abdcd64c381eb020cfc8f4939eb Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Tue, 11 Nov 2025 09:41:11 +0530 Subject: [PATCH 25/33] Apply suggestions from code review Co-authored-by: Tangi Migot --- src/ADNLPProblems/boundary.jl | 2 +- src/ADNLPProblems/variational.jl | 10 +++++----- src/Meta/variational.jl | 2 +- src/PureJuMP/boundary.jl | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/ADNLPProblems/boundary.jl b/src/ADNLPProblems/boundary.jl index 3eb96518..ebdb336a 100644 --- a/src/ADNLPProblems/boundary.jl +++ b/src/ADNLPProblems/boundary.jl @@ -22,7 +22,7 @@ function boundary(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k x0[i] = i * h * (1 - i * h) end - return ADNLPModels.ADNLPModel(f, x0, name = "boundary", minimize = true; kwargs...) + return ADNLPModels.ADNLPModel(f, x0, name = "boundary"; kwargs...) end function boundary(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index 9e8ce731..29d3d27e 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -2,10 +2,10 @@ export variational function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) - x0 = [begin - ih = i * h - convert(T, ih * (1 - ih)) - end for i = 1:n] + x0 = Vector{T}(undef, n) + for i = 1:n + x0[i] = i * h * (1 - i * h) + end f = x -> begin xext = vcat(zero(eltype(x)), x, zero(eltype(x))) @@ -22,5 +22,5 @@ function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs... return 2 * (term1 + n * (convert(eltype(x), h) / 2) * term2) end - return ADNLPModels.ADNLPModel(f, x0, name = "variational", minimize = true; kwargs...) + return ADNLPModels.ADNLPModel(f, x0, name = "variational"; kwargs...) end diff --git a/src/Meta/variational.jl b/src/Meta/variational.jl index 27c13691..14c98dfc 100644 --- a/src/Meta/variational.jl +++ b/src/Meta/variational.jl @@ -14,7 +14,7 @@ variational_meta = Dict( :best_known_lower_bound => -Inf, :best_known_upper_bound => Inf, :is_feasible => true, - :defined_everywhere => missing, + :defined_everywhere => false, :origin => :academic, ) diff --git a/src/PureJuMP/boundary.jl b/src/PureJuMP/boundary.jl index 5cf31486..f745b1e1 100644 --- a/src/PureJuMP/boundary.jl +++ b/src/PureJuMP/boundary.jl @@ -21,7 +21,7 @@ function boundary(; n::Int = default_nvar, kwargs...) sum( ( 2 * x[i] - (i == 1 ? 0 : x[i - 1]) - (i == n ? 0 : x[i + 1]) + - (h^2 / 2) * (x[i] + Float64(i) * h + 1)^3 + (h^2 / 2) * (x[i] + i * h + 1)^3 )^2 for i = 1:n ) ) From 41f6aaa76335c7a6d8fa5637278bc1862f96e755 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Tue, 11 Nov 2025 10:28:30 +0530 Subject: [PATCH 26/33] variational Problem - reviewed changes --- src/ADNLPProblems/variational.jl | 41 ++++++++++++++++++-------------- src/PureJuMP/variational.jl | 24 +++++++------------ 2 files changed, 32 insertions(+), 33 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index 29d3d27e..1a464166 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -2,25 +2,30 @@ export variational function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) - x0 = Vector{T}(undef, n) - for i = 1:n - x0[i] = i * h * (1 - i * h) - end - f = - x -> begin - xext = vcat(zero(eltype(x)), x, zero(eltype(x))) - term1 = sum(x[i] * (x[i] - xext[i + 2]) / convert(eltype(x), h) for i = 1:n) - term2 = sum( - if eltype(x) <: AbstractFloat - d = xext[j + 2] - xext[j + 1] - abs(d) <= eps(eltype(x)) ? exp(xext[j + 1]) : (exp(xext[j + 2]) - exp(xext[j + 1])) / d - else - d = xext[j + 2] - xext[j + 1] - exp(xext[j + 1]) * expm1(d) / d - end for j = 0:n - ) - return 2 * (term1 + n * (convert(eltype(x), h) / 2) * term2) + x0 = [ + begin + ih = i * h + convert(T, ih * (1 - ih)) + end for i = 1:n + ] + + function f(x) + term1 = zero(T) + for i = 1:n + xi = x[i] + xip = (i < n) ? x[i + 1] : zero(T) + term1 += xi * (xi - xip) / h + end + + term2 = zero(T) + for j = 0:n + a = (j == 0) ? zero(T) : x[j] + b = (j == n) ? zero(T) : x[j + 1] + term2 += (exp(b) - exp(a)) / (b - a) end + return 2 * (term1 + n * (h / 2) * term2) + end + return ADNLPModels.ADNLPModel(f, x0, name = "variational"; kwargs...) end diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index eac9cbf6..32c4c87f 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -12,10 +12,12 @@ export variational function variational(; n::Int = default_nvar, kwargs...) h = 1 // (n + 1) - x0 = [begin - ih = i * h - convert(Float64, ih * (1 - ih)) - end for i = 1:n] + x0 = [ + begin + ih = i * h + convert(T, ih * (1 - ih)) + end for i = 1:n + ] model = Model() @variable(model, x[i = 1:n], start = x0[i]) @@ -27,17 +29,9 @@ function variational(; n::Int = default_nvar, kwargs...) n * (h / 2) * ( - (1 + x[1]/2 + x[1]^2/6 + x[1]^3/24 + x[1]^4/120) + - sum( - exp(x[j]) * ( - 1 + - (x[j + 1] - x[j]) / 2 + - (x[j + 1] - x[j])^2 / 6 + - (x[j + 1] - x[j])^3 / 24 + - (x[j + 1] - x[j])^4 / 120 - ) for j = 1:(n - 1) - ) + - exp(x[n]) * (1 + (0 - x[n]) / 2 + (0 - x[n])^2 / 6 + (0 - x[n])^3 / 24 + (0 - x[n])^4 / 120) + (exp(x[1]) - exp(0)) / (x[1] - 0) + + sum((exp(x[j + 1]) - exp(x[j])) / (x[j + 1] - x[j]) for j = 1:(n - 1)) + + (exp(0) - exp(x[n])) / (0 - x[n]) ) ) ) From f3805164b5f1793c93c7969e172613954c57b680 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Tue, 11 Nov 2025 23:20:44 +0530 Subject: [PATCH 27/33] passing failing checks for variational problems --- src/ADNLPProblems/variational.jl | 7 +------ src/PureJuMP/variational.jl | 7 +------ 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index 1a464166..d9238df9 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -2,12 +2,7 @@ export variational function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) - x0 = [ - begin - ih = i * h - convert(T, ih * (1 - ih)) - end for i = 1:n - ] + x0 = [(i * h) * (1 - i * h) for i = 1:n] function f(x) term1 = zero(T) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 32c4c87f..12f1a58c 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -12,12 +12,7 @@ export variational function variational(; n::Int = default_nvar, kwargs...) h = 1 // (n + 1) - x0 = [ - begin - ih = i * h - convert(T, ih * (1 - ih)) - end for i = 1:n - ] + x0 = [(i * h) * (1 - i * h) for i = 1:n] model = Model() @variable(model, x[i = 1:n], start = x0[i]) From 7cd697f7f623e879969f78df7510baa8e6e2c7eb Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Tue, 11 Nov 2025 23:21:04 +0530 Subject: [PATCH 28/33] JuliaFormatter --- src/PureJuMP/variational.jl | 38 ++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 12f1a58c..8b5642b8 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -11,25 +11,25 @@ export variational function variational(; n::Int = default_nvar, kwargs...) - h = 1 // (n + 1) - x0 = [(i * h) * (1 - i * h) for i = 1:n] - model = Model() - @variable(model, x[i = 1:n], start = x0[i]) + h = 1 // (n + 1) + x0 = [(i * h) * (1 - i * h) for i ∈ 1:n] + model = Model() + @variable(model, x[i=1:n], start = x0[i]) - @objective( - model, - Min, - 2 * ( - (sum(x[i] * (x[i] - x[i + 1]) / h for i = 1:(n - 1)) + x[n] * x[n] / h) + - n * - (h / 2) * - ( - (exp(x[1]) - exp(0)) / (x[1] - 0) + - sum((exp(x[j + 1]) - exp(x[j])) / (x[j + 1] - x[j]) for j = 1:(n - 1)) + - (exp(0) - exp(x[n])) / (0 - x[n]) - ) - ) - ) + @objective( + model, + Min, + 2 * ( + (sum(x[i] * (x[i] - x[i+1]) / h for i ∈ 1:(n-1)) + x[n] * x[n] / h) + + n * + (h / 2) * + ( + (exp(x[1]) - exp(0)) / (x[1] - 0) + + sum((exp(x[j+1]) - exp(x[j])) / (x[j+1] - x[j]) for j ∈ 1:(n-1)) + + (exp(0) - exp(x[n])) / (0 - x[n]) + ) + ) + ) - return model + return model end From 7af470ef37b09063f2f48a8b71f57fb7d787a699 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Wed, 12 Nov 2025 00:54:00 +0530 Subject: [PATCH 29/33] fixing float32 issue in variational problems --- src/ADNLPProblems/variational.jl | 2 +- src/PureJuMP/variational.jl | 38 ++++++++++++++++---------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index d9238df9..7c56a90a 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -2,7 +2,7 @@ export variational function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) - x0 = [(i * h) * (1 - i * h) for i = 1:n] + x0 = [convert(T, (i * h) * (1 - i * h)) for i = 1:n] function f(x) term1 = zero(T) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 8b5642b8..2bc16d99 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -11,25 +11,25 @@ export variational function variational(; n::Int = default_nvar, kwargs...) - h = 1 // (n + 1) - x0 = [(i * h) * (1 - i * h) for i ∈ 1:n] - model = Model() - @variable(model, x[i=1:n], start = x0[i]) + h = 1 // (n + 1) + x0 = [convert(T, (i * h) * (1 - i * h)) for i = 1:n] + model = Model() + @variable(model, x[i = 1:n], start = x0[i]) - @objective( - model, - Min, - 2 * ( - (sum(x[i] * (x[i] - x[i+1]) / h for i ∈ 1:(n-1)) + x[n] * x[n] / h) + - n * - (h / 2) * - ( - (exp(x[1]) - exp(0)) / (x[1] - 0) + - sum((exp(x[j+1]) - exp(x[j])) / (x[j+1] - x[j]) for j ∈ 1:(n-1)) + - (exp(0) - exp(x[n])) / (0 - x[n]) - ) - ) - ) + @objective( + model, + Min, + 2 * ( + (sum(x[i] * (x[i] - x[i + 1]) / h for i ∈ 1:(n - 1)) + x[n] * x[n] / h) + + n * + (h / 2) * + ( + (exp(x[1]) - exp(0)) / (x[1] - 0) + + sum((exp(x[j + 1]) - exp(x[j])) / (x[j + 1] - x[j]) for j ∈ 1:(n - 1)) + + (exp(0) - exp(x[n])) / (0 - x[n]) + ) + ) + ) - return model + return model end From 0befcccb95de8c0475424c7aa8b9593962a060f0 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Wed, 12 Nov 2025 01:33:51 +0530 Subject: [PATCH 30/33] going back to working version --- src/ADNLPProblems/variational.jl | 2 +- src/PureJuMP/variational.jl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index 7c56a90a..d9238df9 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -2,7 +2,7 @@ export variational function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) - x0 = [convert(T, (i * h) * (1 - i * h)) for i = 1:n] + x0 = [(i * h) * (1 - i * h) for i = 1:n] function f(x) term1 = zero(T) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 2bc16d99..711a1535 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -12,7 +12,7 @@ export variational function variational(; n::Int = default_nvar, kwargs...) h = 1 // (n + 1) - x0 = [convert(T, (i * h) * (1 - i * h)) for i = 1:n] + x0 = [(i * h) * (1 - i * h) for i = 1:n] model = Model() @variable(model, x[i = 1:n], start = x0[i]) From 5c9006b69fa887a2c08c4bf3b0053aaf8af3bfc2 Mon Sep 17 00:00:00 2001 From: Tangi Migot Date: Tue, 11 Nov 2025 19:23:35 -0500 Subject: [PATCH 31/33] Apply suggestions from code review --- src/ADNLPProblems/variational.jl | 4 ++-- src/PureJuMP/variational.jl | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index d9238df9..88af122d 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -2,7 +2,7 @@ export variational function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} h = 1 // (n + 1) - x0 = [(i * h) * (1 - i * h) for i = 1:n] + x0 = T[(i * h) * (1 - i * h) for i = 1:n] function f(x) term1 = zero(T) @@ -19,7 +19,7 @@ function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs... term2 += (exp(b) - exp(a)) / (b - a) end - return 2 * (term1 + n * (h / 2) * term2) + return 2 * (term1 + 2 * h * term2) end return ADNLPModels.ADNLPModel(f, x0, name = "variational"; kwargs...) diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl index 711a1535..7d4e51f1 100644 --- a/src/PureJuMP/variational.jl +++ b/src/PureJuMP/variational.jl @@ -21,8 +21,7 @@ function variational(; n::Int = default_nvar, kwargs...) Min, 2 * ( (sum(x[i] * (x[i] - x[i + 1]) / h for i ∈ 1:(n - 1)) + x[n] * x[n] / h) + - n * - (h / 2) * + 2 * h * ( (exp(x[1]) - exp(0)) / (x[1] - 0) + sum((exp(x[j + 1]) - exp(x[j])) / (x[j + 1] - x[j]) for j ∈ 1:(n - 1)) + From 3b9693525a08c7ec6819ae0d8c6152d5538cca2b Mon Sep 17 00:00:00 2001 From: Tangi Migot Date: Wed, 12 Nov 2025 18:25:04 -0500 Subject: [PATCH 32/33] Apply suggestions from code review --- src/ADNLPProblems/boundary.jl | 2 +- src/ADNLPProblems/variational.jl | 33 ++++++++++++++++++-------------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/src/ADNLPProblems/boundary.jl b/src/ADNLPProblems/boundary.jl index ebdb336a..80ac310e 100644 --- a/src/ADNLPProblems/boundary.jl +++ b/src/ADNLPProblems/boundary.jl @@ -9,7 +9,7 @@ function boundary(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k h = 1 // (n + 1) function f(x; n = length(x)) s = zero(T) - for i = 1:n + @inbounds for i = 1:n xm = (i == 1) ? zero(T) : x[i - 1] xp = (i == n) ? zero(T) : x[i + 1] s += (2 * x[i] - xm - xp + (h^2 / 2) * (x[i] + i * h + 1)^3)^2 diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index 88af122d..95d7b535 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -4,23 +4,28 @@ function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs... h = 1 // (n + 1) x0 = T[(i * h) * (1 - i * h) for i = 1:n] - function f(x) - term1 = zero(T) - for i = 1:n - xi = x[i] - xip = (i < n) ? x[i + 1] : zero(T) - term1 += xi * (xi - xip) / h - end +function f(x; n = length(x)) + T = eltype(x) - term2 = zero(T) - for j = 0:n - a = (j == 0) ? zero(T) : x[j] - b = (j == n) ? zero(T) : x[j + 1] - term2 += (exp(b) - exp(a)) / (b - a) - end + term1 = zero(T) + term2 = zero(T) + @inbounds for k = 1:n + xi = x[k] + xip = (k < n) ? x[k + 1] : zero(T) + + term1 += xi * (xi - xip) / h - return 2 * (term1 + 2 * h * term2) + a_prev = (k == 1) ? zero(T) : x[k - 1] + b_prev = xi + term2 += (exp(b_prev) - exp(a_prev)) / (b_prev - a_prev) + + if k == n + a_last, b_last = xi, zero(T) + term2 += (exp(b_last) - exp(a_last)) / (b_last - a_last) + end end + return 2 * (term1 + 2 * h * term2) +end return ADNLPModels.ADNLPModel(f, x0, name = "variational"; kwargs...) end From 3d76a65acd68901ac753eeaed0c45610442069a1 Mon Sep 17 00:00:00 2001 From: Tangi Migot Date: Wed, 12 Nov 2025 18:37:27 -0500 Subject: [PATCH 33/33] Update src/ADNLPProblems/variational.jl --- src/ADNLPProblems/variational.jl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl index 95d7b535..d2243957 100644 --- a/src/ADNLPProblems/variational.jl +++ b/src/ADNLPProblems/variational.jl @@ -5,22 +5,22 @@ function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs... x0 = T[(i * h) * (1 - i * h) for i = 1:n] function f(x; n = length(x)) - T = eltype(x) + Ti = eltype(x) - term1 = zero(T) - term2 = zero(T) + term1 = zero(Ti) + term2 = zero(Ti) @inbounds for k = 1:n xi = x[k] - xip = (k < n) ? x[k + 1] : zero(T) + xip = (k < n) ? x[k + 1] : zero(Ti) term1 += xi * (xi - xip) / h - a_prev = (k == 1) ? zero(T) : x[k - 1] + a_prev = (k == 1) ? zero(Ti) : x[k - 1] b_prev = xi term2 += (exp(b_prev) - exp(a_prev)) / (b_prev - a_prev) if k == n - a_last, b_last = xi, zero(T) + a_last, b_last = xi, zero(Ti) term2 += (exp(b_last) - exp(a_last)) / (b_last - a_last) end end