diff --git a/src/ADNLPProblems/boundary.jl b/src/ADNLPProblems/boundary.jl new file mode 100644 index 00000000..80ac310e --- /dev/null +++ b/src/ADNLPProblems/boundary.jl @@ -0,0 +1,46 @@ +export boundary + +function boundary(; use_nls::Bool = false, kwargs...) + model = use_nls ? :nls : :nlp + return boundary(Val(model); kwargs...) +end + +function boundary(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + h = 1 // (n + 1) + function f(x; n = length(x)) + s = zero(T) + @inbounds for i = 1:n + xm = (i == 1) ? zero(T) : x[i - 1] + xp = (i == n) ? zero(T) : x[i + 1] + s += (2 * x[i] - xm - xp + (h^2 / 2) * (x[i] + i * h + 1)^3)^2 + end + return s + end + + x0 = Vector{T}(undef, n) + for i = 1:n + x0[i] = i * h * (1 - i * h) + end + + return ADNLPModels.ADNLPModel(f, x0, name = "boundary"; kwargs...) +end + +function boundary(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + h = 1 // (n + 1) + + function F!(r, x; n = length(x)) + @inbounds for i = 1:n + xm = (i == 1) ? zero(T) : x[i - 1] + xp = (i == n) ? zero(T) : x[i + 1] + r[i] = 2 * x[i] - xm - xp + (h^2 / 2) * (x[i] + i * h + 1)^3 + end + return r + end + + x0 = Vector{T}(undef, n) + for i = 1:n + x0[i] = i * h * (1 - i * h) + end + + return ADNLPModels.ADNLSModel!(F!, x0, n, name = "boundary-nls"; kwargs...) +end diff --git a/src/ADNLPProblems/variational.jl b/src/ADNLPProblems/variational.jl new file mode 100644 index 00000000..d2243957 --- /dev/null +++ b/src/ADNLPProblems/variational.jl @@ -0,0 +1,31 @@ +export variational + +function variational(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + h = 1 // (n + 1) + x0 = T[(i * h) * (1 - i * h) for i = 1:n] + +function f(x; n = length(x)) + Ti = eltype(x) + + term1 = zero(Ti) + term2 = zero(Ti) + @inbounds for k = 1:n + xi = x[k] + xip = (k < n) ? x[k + 1] : zero(Ti) + + term1 += xi * (xi - xip) / h + + a_prev = (k == 1) ? zero(Ti) : x[k - 1] + b_prev = xi + term2 += (exp(b_prev) - exp(a_prev)) / (b_prev - a_prev) + + if k == n + a_last, b_last = xi, zero(Ti) + term2 += (exp(b_last) - exp(a_last)) / (b_last - a_last) + end + end + return 2 * (term1 + 2 * h * term2) +end + + return ADNLPModels.ADNLPModel(f, x0, name = "variational"; kwargs...) +end diff --git a/src/Meta/boundary.jl b/src/Meta/boundary.jl new file mode 100644 index 00000000..8ba34297 --- /dev/null +++ b/src/Meta/boundary.jl @@ -0,0 +1,27 @@ +boundary_meta = Dict( + :nvar => 100, + :variable_nvar => true, + :ncon => 0, + :variable_ncon => false, + :minimize => true, + :name => "boundary", + :has_equalities_only => false, + :has_inequalities_only => false, + :has_bounds => false, + :has_fixed_variables => false, + :objtype => :least_squares, + :contype => :unconstrained, + :best_known_lower_bound => -Inf, + :best_known_upper_bound => Inf, + :is_feasible => true, + :defined_everywhere => missing, + :origin => :academic, +) + +get_boundary_nvar(; n::Integer = default_nvar, kwargs...) = n +get_boundary_ncon(; n::Integer = default_nvar, kwargs...) = 0 +get_boundary_nlin(; n::Integer = default_nvar, kwargs...) = 0 +get_boundary_nnln(; n::Integer = default_nvar, kwargs...) = 0 +get_boundary_nequ(; n::Integer = default_nvar, kwargs...) = 0 +get_boundary_nineq(; n::Integer = default_nvar, kwargs...) = 0 +get_boundary_nls_nequ(; n::Integer = default_nvar, kwargs...) = n diff --git a/src/Meta/variational.jl b/src/Meta/variational.jl new file mode 100644 index 00000000..14c98dfc --- /dev/null +++ b/src/Meta/variational.jl @@ -0,0 +1,27 @@ +variational_meta = Dict( + :nvar => 100, + :variable_nvar => true, + :ncon => 0, + :variable_ncon => false, + :minimize => true, + :name => "variational", + :has_equalities_only => false, + :has_inequalities_only => false, + :has_bounds => false, + :has_fixed_variables => false, + :objtype => :other, + :contype => :unconstrained, + :best_known_lower_bound => -Inf, + :best_known_upper_bound => Inf, + :is_feasible => true, + :defined_everywhere => false, + :origin => :academic, +) + +get_variational_nvar(; n::Integer = default_nvar, kwargs...) = n +get_variational_ncon(; n::Integer = default_nvar, kwargs...) = 0 +get_variational_nlin(; n::Integer = default_nvar, kwargs...) = 0 +get_variational_nnln(; n::Integer = default_nvar, kwargs...) = 0 +get_variational_nequ(; n::Integer = default_nvar, kwargs...) = 0 +get_variational_nineq(; n::Integer = default_nvar, kwargs...) = 0 +get_variational_nls_nequ(; n::Integer = default_nvar, kwargs...) = 0 diff --git a/src/PureJuMP/boundary.jl b/src/PureJuMP/boundary.jl new file mode 100644 index 00000000..f745b1e1 --- /dev/null +++ b/src/PureJuMP/boundary.jl @@ -0,0 +1,29 @@ +# Discrete boundary value problem +# +# Problem 14 in +# L. Luksan, C. Matonoha and J. Vlcek +# Sparse Test Problems for Unconstrained Optimization, +# Technical Report 1064, +# Institute of Computer Science, +# Academy of Science of the Czech Republic +# +# https://www.researchgate.net/publication/325314400_Sparse_Test_Problems_for_Unconstrained_Optimization +export boundary + +function boundary(; n::Int = default_nvar, kwargs...) + h = 1 // (n + 1) + model = Model() + x0 = [i * h * (1 - i * h) for i = 1:n] + @variable(model, x[i = 1:n], start = x0[i]) + @objective( + model, + Min, + sum( + ( + 2 * x[i] - (i == 1 ? 0 : x[i - 1]) - (i == n ? 0 : x[i + 1]) + + (h^2 / 2) * (x[i] + i * h + 1)^3 + )^2 for i = 1:n + ) + ) + return model +end diff --git a/src/PureJuMP/variational.jl b/src/PureJuMP/variational.jl new file mode 100644 index 00000000..7d4e51f1 --- /dev/null +++ b/src/PureJuMP/variational.jl @@ -0,0 +1,34 @@ +# Discretization of a variational problem +# +# Problem 15 in +# L. Luksan, C. Matonoha and J. Vlcek +# Sparse Test Problems for Unconstrained Optimization, +# Technical Report 1064, +# Institute of Computer Science, +# Academy of Science of the Czech Republic +# +# https://www.researchgate.net/publication/325314400_Sparse_Test_Problems_for_Unconstrained_Optimization +export variational + +function variational(; n::Int = default_nvar, kwargs...) + h = 1 // (n + 1) + x0 = [(i * h) * (1 - i * h) for i = 1:n] + model = Model() + @variable(model, x[i = 1:n], start = x0[i]) + + @objective( + model, + Min, + 2 * ( + (sum(x[i] * (x[i] - x[i + 1]) / h for i ∈ 1:(n - 1)) + x[n] * x[n] / h) + + 2 * h * + ( + (exp(x[1]) - exp(0)) / (x[1] - 0) + + sum((exp(x[j + 1]) - exp(x[j])) / (x[j + 1] - x[j]) for j ∈ 1:(n - 1)) + + (exp(0) - exp(x[n])) / (0 - x[n]) + ) + ) + ) + + return model +end