diff --git a/src/ADNLPProblems/ADNLPProblems.jl b/src/ADNLPProblems/ADNLPProblems.jl index 824468a1f..23c40a033 100644 --- a/src/ADNLPProblems/ADNLPProblems.jl +++ b/src/ADNLPProblems/ADNLPProblems.jl @@ -1,6 +1,7 @@ module ADNLPProblems using Requires +import ..OptimizationProblems: @adjust_nvar_warn const default_nvar = 100 const data_path = joinpath(@__DIR__, "..", "..", "data") diff --git a/src/ADNLPProblems/NZF1.jl b/src/ADNLPProblems/NZF1.jl index e56c47ea0..5edd35f6f 100644 --- a/src/ADNLPProblems/NZF1.jl +++ b/src/ADNLPProblems/NZF1.jl @@ -6,8 +6,10 @@ function NZF1(; use_nls::Bool = false, kwargs...) end function NZF1(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n nbis = max(2, div(n, 13)) n = 13 * nbis + @adjust_nvar_warn("NZF1", n_orig, n) l = div(n, 13) function f(x; l = l) return sum( @@ -29,8 +31,10 @@ function NZF1(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg end function NZF1(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n nbis = max(2, div(n, 13)) n = 13 * nbis + @adjust_nvar_warn("NZF1", n_orig, n) l = div(n, 13) function F!(r, x; l = l) for i = 1:l diff --git a/src/ADNLPProblems/bearing.jl b/src/ADNLPProblems/bearing.jl index 40ab56ca9..238d07e30 100644 --- a/src/ADNLPProblems/bearing.jl +++ b/src/ADNLPProblems/bearing.jl @@ -10,6 +10,12 @@ function bearing(; # nx > 0 # grid points in 1st direction # ny > 0 # grid points in 2nd direction + n_orig = n + nx = max(1, nx) + ny = max(1, ny) + n = (nx + 2) * (ny + 2) + @adjust_nvar_warn("bearing", n_orig, n) + b = 10 # grid is (0,2*pi)x(0,2*b) e = 1 // 10 # eccentricity diff --git a/src/ADNLPProblems/broydn7d.jl b/src/ADNLPProblems/broydn7d.jl index 0ca69b21d..d76b8fa11 100644 --- a/src/ADNLPProblems/broydn7d.jl +++ b/src/ADNLPProblems/broydn7d.jl @@ -1,9 +1,10 @@ export broydn7d function broydn7d(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - mod(n, 2) > 0 && @warn("broydn7d: number of variables adjusted to be even") + n_orig = n n2 = max(1, div(n, 2)) n = 2 * n2 + @adjust_nvar_warn("broydn7d", n_orig, n) function f(x; n = length(x), n2 = n2) p = 7 // 3 return abs(1 - 2 * x[2] + (3 - x[1] / 2) * x[1])^p + diff --git a/src/ADNLPProblems/catenary.jl b/src/ADNLPProblems/catenary.jl index 0c4756f65..345a0bdc3 100644 --- a/src/ADNLPProblems/catenary.jl +++ b/src/ADNLPProblems/catenary.jl @@ -8,10 +8,10 @@ function catenary( FRACT = 0.6, kwargs..., ) where {T} - (n % 3 == 0) || @warn("catenary: number of variables adjusted to be a multiple of 3") + n_orig = n n = 3 * max(1, div(n, 3)) - (n < 6) || @warn("catenary: number of variables adjusted to be greater or equal to 6") n = max(n, 6) + @adjust_nvar_warn("catenary", n_orig, n) ## Model Parameters N = div(n, 3) - 2 diff --git a/src/ADNLPProblems/chain.jl b/src/ADNLPProblems/chain.jl index f92a59202..5d0db8cb1 100644 --- a/src/ADNLPProblems/chain.jl +++ b/src/ADNLPProblems/chain.jl @@ -1,7 +1,10 @@ export chain function chain(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n nh = max(2, div(n - 4, 4)) + n = 4 * nh + 4 + @adjust_nvar_warn("chain", n_orig, n) L = 4 a = 1 diff --git a/src/ADNLPProblems/chainwoo.jl b/src/ADNLPProblems/chainwoo.jl index 9d1520fe8..154b71ba8 100644 --- a/src/ADNLPProblems/chainwoo.jl +++ b/src/ADNLPProblems/chainwoo.jl @@ -6,8 +6,9 @@ function chainwoo(; use_nls::Bool = false, kwargs...) end function chainwoo(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 4 == 0) || @warn("chainwoo: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("chainwoo", n_orig, n) function f(x; n = length(x)) return 1 + sum( 100 * (x[2 * i] - x[2 * i - 1]^2)^2 + @@ -23,8 +24,9 @@ function chainwoo(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function chainwoo(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 4 == 0) || @warn("chainwoo: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("chainwoo", n_orig, n) function F!(r, x; n = length(x)) nb = div(n, 2) - 1 r[1] = 1 diff --git a/src/ADNLPProblems/channel.jl b/src/ADNLPProblems/channel.jl index 154f6ebd5..718d0bf7f 100644 --- a/src/ADNLPProblems/channel.jl +++ b/src/ADNLPProblems/channel.jl @@ -1,7 +1,10 @@ export channel function channel(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n nh = max(2, div(n, 8)) + n = 8 * nh + @adjust_nvar_warn("channel", n_orig, n) nc = 4 nd = 4 diff --git a/src/ADNLPProblems/clnlbeam.jl b/src/ADNLPProblems/clnlbeam.jl index fc46d8e97..bf25a8b68 100644 --- a/src/ADNLPProblems/clnlbeam.jl +++ b/src/ADNLPProblems/clnlbeam.jl @@ -1,7 +1,10 @@ export clnlbeam function clnlbeam(args...; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n N = div(n - 3, 3) + n = 3 * N + 3 + @adjust_nvar_warn("clnlbeam", n_orig, n) h = 1 // N alpha = 350 function f(y; N = N, h = h, alpha = alpha) diff --git a/src/ADNLPProblems/clplatea.jl b/src/ADNLPProblems/clplatea.jl index a54e08ade..7174be032 100644 --- a/src/ADNLPProblems/clplatea.jl +++ b/src/ADNLPProblems/clplatea.jl @@ -6,9 +6,10 @@ function clplatea(; wght = -0.1, kwargs..., ) where {T} + n_orig = n p = max(floor(Int, sqrt(n)), 3) - p * p != n && @warn("clplatea: number of variables adjusted from $n to $(p*p)") n = p * p + @adjust_nvar_warn("clplatea", n_orig, n) hp2 = (1 // 2) * p^2 function f(x; p = p, hp2 = hp2, wght = wght) return (eltype(x)(wght) * x[p + (p - 1) * p]) + diff --git a/src/ADNLPProblems/clplateb.jl b/src/ADNLPProblems/clplateb.jl index a732e4311..f1fbc0120 100644 --- a/src/ADNLPProblems/clplateb.jl +++ b/src/ADNLPProblems/clplateb.jl @@ -6,9 +6,10 @@ function clplateb(; wght = -0.1, kwargs..., ) where {T} + n_orig = n p = max(floor(Int, sqrt(n)), 3) - p * p != n && @warn("clplateb: number of variables adjusted from $n to $(p*p)") n = p * p + @adjust_nvar_warn("clplateb", n_orig, n) hp2 = 1 // 2 * p^2 function f(x; p = p, hp2 = hp2, wght = wght) return sum(eltype(x)(wght) / (p - 1) * x[p + (j - 1) * p] for j = 1:p) + diff --git a/src/ADNLPProblems/clplatec.jl b/src/ADNLPProblems/clplatec.jl index 5d77a3f36..43d73eae4 100644 --- a/src/ADNLPProblems/clplatec.jl +++ b/src/ADNLPProblems/clplatec.jl @@ -8,9 +8,10 @@ function clplatec(; l = 0.01, kwargs..., ) where {T} + n_orig = n p = max(floor(Int, sqrt(n)), 3) - p * p != n && @warn("clplatec: number of variables adjusted from $n to $(p*p)") n = p * p + @adjust_nvar_warn("clplatec", n_orig, n) hp2 = 1 // 2 * p^2 function f(x; p = p, hp2 = hp2, wght = wght, r = r, l = l) diff --git a/src/ADNLPProblems/dixmaan_efgh.jl b/src/ADNLPProblems/dixmaan_efgh.jl index 325a3c089..b38fb16e9 100644 --- a/src/ADNLPProblems/dixmaan_efgh.jl +++ b/src/ADNLPProblems/dixmaan_efgh.jl @@ -9,9 +9,10 @@ function dixmaane(; δ = 125 // 1000, kwargs..., ) where {T} - (n % 3 == 0) || @warn("dixmaan: number of variables adjusted to be a multiple of 3") + n_orig = n m = max(1, div(n, 3)) n = 3 * m + @adjust_nvar_warn("dixmaane", n_orig, n) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum(i // n * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/dixmaan_ijkl.jl b/src/ADNLPProblems/dixmaan_ijkl.jl index f1d86948b..d0b5d8476 100644 --- a/src/ADNLPProblems/dixmaan_ijkl.jl +++ b/src/ADNLPProblems/dixmaan_ijkl.jl @@ -9,9 +9,10 @@ function dixmaani(; δ = 125 // 1000, kwargs..., ) where {T} - (n % 3 == 0) || @warn("dixmaan: number of variables adjusted to be a multiple of 3") + n_orig = n m = max(1, div(n, 3)) n = 3 * m + @adjust_nvar_warn("dixmaani", n_orig, n) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum((i // n)^2 * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/dixmaan_mnop.jl b/src/ADNLPProblems/dixmaan_mnop.jl index e62e449e4..627353c22 100644 --- a/src/ADNLPProblems/dixmaan_mnop.jl +++ b/src/ADNLPProblems/dixmaan_mnop.jl @@ -9,9 +9,10 @@ function dixmaanm(; δ = 125 // 1000, kwargs..., ) where {T} - (n % 3 == 0) || @warn("dixmaan: number of variables adjusted to be a multiple of 3") + n_orig = n m = max(1, div(n, 3)) n = 3 * m + @adjust_nvar_warn("dixmaanm", n_orig, n) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum((i // n)^2 * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/elec.jl b/src/ADNLPProblems/elec.jl index 058d7f771..11a6a30e3 100644 --- a/src/ADNLPProblems/elec.jl +++ b/src/ADNLPProblems/elec.jl @@ -1,36 +1,39 @@ export elec function elec(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n = max(2, div(n, 3)) + n_orig = n + m = max(2, div(n_orig, 3)) + n = 3 * m + @adjust_nvar_warn("elec", n_orig, n) # Define the objective function to minimize function f(x; n = n) return sum( sum( - 1 / sqrt((x[j] - x[i])^2 + (x[n + j] - x[n + i])^2 + (x[2n + j] - x[2n + i])^2) for - j = (i + 1):n - ) for i = 1:(n - 1) + 1 / sqrt((x[j] - x[i])^2 + (x[m + j] - x[m + i])^2 + (x[2m + j] - x[2m + i])^2) for + j = (i + 1):m + ) for i = 1:(m - 1) ) end # Define the constraints on these points (sum of the square of the coordinates = 1) function c!(cx, x; n = n) - for k = 1:n - cx[k] = x[k]^2 + x[n + k]^2 + x[2n + k]^2 + for k = 1:m + cx[k] = x[k]^2 + x[m + k]^2 + x[2m + k]^2 end return cx end # bounds on the constraints - lcon = ucon = ones(T, n) + lcon = ucon = ones(T, m) # building a feasible x0 - range0 = T[i / n for i = 1:n] + range0 = T[i / m for i = 1:m] θ0 = 2π .* range0 ϕ0 = π .* range0 - xini = T[sin(θ0[i]) * cos(ϕ0[i]) for i = 1:n] # x coordinate - yini = T[sin(θ0[i]) * sin(ϕ0[i]) for i = 1:n] # y coordinate - zini = T[cos(θ0[i]) for i = 1:n] # z coordinate + xini = T[sin(θ0[i]) * cos(ϕ0[i]) for i = 1:m] # x coordinate + yini = T[sin(θ0[i]) * sin(ϕ0[i]) for i = 1:m] # y coordinate + zini = T[cos(θ0[i]) for i = 1:m] # z coordinate x0 = [xini; yini; zini] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "elec"; kwargs...) diff --git a/src/ADNLPProblems/fminsrf2.jl b/src/ADNLPProblems/fminsrf2.jl index c7fe806ea..eae7709ba 100644 --- a/src/ADNLPProblems/fminsrf2.jl +++ b/src/ADNLPProblems/fminsrf2.jl @@ -1,12 +1,11 @@ export fminsrf2 function fminsrf2(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n < 4 && @warn("fminsrf2: number of variables must be ≥ 4") + n_orig = n n = max(4, n) - p = floor(Int, sqrt(n)) - p * p != n && @warn("fminsrf2: number of variables adjusted from $n down to $(p*p)") n = p * p + @adjust_nvar_warn("fminsrf2", n_orig, n) h00 = 1 slopej = 4 diff --git a/src/ADNLPProblems/hovercraft1d.jl b/src/ADNLPProblems/hovercraft1d.jl index e2e1f30c4..2dfab9bb9 100644 --- a/src/ADNLPProblems/hovercraft1d.jl +++ b/src/ADNLPProblems/hovercraft1d.jl @@ -11,7 +11,10 @@ function hovercraft1d( type::Type{T} = Float64, kwargs..., ) where {T} - N = div(n, 3) + n_orig = n + N = div(n_orig, 3) + n = 3 * N - 1 + @adjust_nvar_warn("hovercraft1d", n_orig, n) function f(y; N = N) @views x, v, u = y[1:N], y[(N + 1):(2 * N)], y[(2 * N + 1):end] return 1 // 2 * sum(u .^ 2) @@ -72,7 +75,10 @@ function hovercraft1d( type::Type{T} = Float64, kwargs..., ) where {T} - N = div(n, 3) + n_orig = n + N = div(n_orig, 3) + n = 3 * N - 1 + @adjust_nvar_warn("hovercraft1d", n_orig, n) function F!(r, y; N = N) @views x, v, u = y[1:N], y[(N + 1):(2 * N)], y[(2 * N + 1):end] r .= u diff --git a/src/ADNLPProblems/marine.jl b/src/ADNLPProblems/marine.jl index aa96414d9..0ddcdefc0 100644 --- a/src/ADNLPProblems/marine.jl +++ b/src/ADNLPProblems/marine.jl @@ -1,12 +1,15 @@ export marine function marine(; n::Int = default_nvar, nc::Int = 1, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n nc = max(min(nc, 4), 1) # number of collocation points ne = 8 # number of differential equations nm = 21 # number of measurements n = max(n, 3 * ne * nc + ne + 2 * ne) nh = Int(round((n - 2 * ne + 1) / (3 * ne * nc + ne))) # number of partition intervals + n = 8 + 7 + nh * (8 + 3 * 8 * nc) + @adjust_nvar_warn("marine", n_orig, n) # roots of k-th degree Legendre polynomial rho = if nc == 1 diff --git a/src/ADNLPProblems/powellsg.jl b/src/ADNLPProblems/powellsg.jl index 38ac7db65..e95cc1d55 100644 --- a/src/ADNLPProblems/powellsg.jl +++ b/src/ADNLPProblems/powellsg.jl @@ -6,8 +6,9 @@ function powellsg(; use_nls::Bool = false, kwargs...) end function powellsg(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 4 == 0) || @warn("powellsg: number of variables adjusted to be a multiple of 4") - n = 4 * max(1, div(n, 4)) # number of variables adjusted to be a multiple of 4 + n_orig = n + n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("powellsg", n_orig, n) function f(x; n = length(x)) return sum( (x[j] + 10 * x[j + 1])^2 + @@ -24,8 +25,9 @@ function powellsg(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function powellsg(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 4 == 0) || @warn("powellsg: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("powellsg", n_orig, n) function F!(r, x; n = length(x)) @inbounds for j = 1:4:n r[j] = x[j] + 10 * x[j + 1] diff --git a/src/ADNLPProblems/robotarm.jl b/src/ADNLPProblems/robotarm.jl index a1264eebd..e1f0d23db 100644 --- a/src/ADNLPProblems/robotarm.jl +++ b/src/ADNLPProblems/robotarm.jl @@ -10,8 +10,11 @@ export robotarm # classification OOR2-AN-V-V function robotarm(; n::Int = default_nvar, L = 4.5, type::Type{T} = Float64, kwargs...) where {T} - N = max(2, div(n, 9)) + n_orig = n + N = max(2, div(n_orig, 9)) n = N + 1 + nvars = 9 * n + 1 + @adjust_nvar_warn("robotarm", n_orig, nvars) L = T(L) # x : vector of variables, of the form : [ρ(t=t1); ρ(t=t2); ... ρ(t=tf), θ(t=t1), ..., then ρ_dot, ..., then ρ_acc, .. ϕ_acc, tf] diff --git a/src/ADNLPProblems/spmsrtls.jl b/src/ADNLPProblems/spmsrtls.jl index d55161026..8cbc967cf 100644 --- a/src/ADNLPProblems/spmsrtls.jl +++ b/src/ADNLPProblems/spmsrtls.jl @@ -6,8 +6,10 @@ function spmsrtls(; use_nls::Bool = false, kwargs...) end function spmsrtls(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 + @adjust_nvar_warn("spmsrtls", n_orig, n) p = [sin(i^2) for i = 1:n] x0 = T[p[i] / 5 for i = 1:n] @@ -59,8 +61,10 @@ function spmsrtls(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function spmsrtls(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 + @adjust_nvar_warn("spmsrtls", n_orig, n) p = [sin(i^2) for i = 1:n] x0 = T[p[i] / 5 for i = 1:n] diff --git a/src/ADNLPProblems/srosenbr.jl b/src/ADNLPProblems/srosenbr.jl index 451d0a6e7..8c51d9e64 100644 --- a/src/ADNLPProblems/srosenbr.jl +++ b/src/ADNLPProblems/srosenbr.jl @@ -1,8 +1,9 @@ export srosenbr function srosenbr(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 2 == 0) || @warn("srosenbr: number of variables adjusted to be even") + n_orig = n n = 2 * max(1, div(n, 2)) + @adjust_nvar_warn("srosenbr", n_orig, n) function f(x; n = length(x)) return sum(100 * (x[2 * i] - x[2 * i - 1]^2)^2 + (x[2 * i - 1] - 1)^2 for i = 1:div(n, 2)) end diff --git a/src/ADNLPProblems/structural.jl b/src/ADNLPProblems/structural.jl index 0793f199b..f39948fdf 100644 --- a/src/ADNLPProblems/structural.jl +++ b/src/ADNLPProblems/structural.jl @@ -1,7 +1,8 @@ export structural function structural(args...; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n = max(n, 100) + n_orig = n + n = max(n_orig, 100) sub2ind(shape, a, b) = LinearIndices(shape)[CartesianIndex.(a, b)] Nx = min(Int(round(n^(1 / 3))), 6) @@ -23,6 +24,9 @@ function structural(args...; n::Int = default_nvar, type::Type{T} = Float64, kwa M = Int(N * (N - 1) / 2) # number of edges + nvars = 2 * M + @adjust_nvar_warn("structural", n_orig, nvars) + # EDGES: columns are the indices of the nodes at either end edges = Array{Int}(zeros(M, 2)) diff --git a/src/ADNLPProblems/watson.jl b/src/ADNLPProblems/watson.jl index 56e1f2757..cf75edc30 100644 --- a/src/ADNLPProblems/watson.jl +++ b/src/ADNLPProblems/watson.jl @@ -6,7 +6,9 @@ function watson(; use_nls::Bool = false, kwargs...) end function watson(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n n = min(max(n, 2), 31) + @adjust_nvar_warn("watson", n_orig, n) function f(x; n = n) Ti = eltype(x) return 1 // 2 * sum( @@ -31,7 +33,9 @@ function watson(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwa end function watson(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n n = min(max(n, 2), 31) + @adjust_nvar_warn("watson", n_orig, n) function F!(r, x; n = n) Ti = eltype(x) for i = 1:29 diff --git a/src/ADNLPProblems/woods.jl b/src/ADNLPProblems/woods.jl index 426166561..575630098 100644 --- a/src/ADNLPProblems/woods.jl +++ b/src/ADNLPProblems/woods.jl @@ -1,8 +1,9 @@ export woods function woods(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 4 == 0) || @warn("woods: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("woods", n_orig, n) function f(x; n = length(x)) return sum( 100 * (x[4 * i - 2] - x[4 * i - 3]^2)^2 + diff --git a/src/OptimizationProblems.jl b/src/OptimizationProblems.jl index 7ea7b664e..1d5a5540d 100644 --- a/src/OptimizationProblems.jl +++ b/src/OptimizationProblems.jl @@ -2,6 +2,21 @@ module OptimizationProblems using DataFrames +""" + @adjust_nvar_warn(problem_name, n_orig, n) + +Issue a warning if the number of variables was adjusted, showing both original and adjusted values. +""" +macro adjust_nvar_warn(problem_name, n_orig, n) + return quote + local _n_orig = $(esc(n_orig)) + local _n = $(esc(n)) + (_n == _n_orig) || @warn( + string($(esc(problem_name)), ": number of variables adjusted from ", _n_orig, " to ", _n) + ) + end +end + include("ADNLPProblems/ADNLPProblems.jl") include("PureJuMP/PureJuMP.jl") diff --git a/src/PureJuMP/NZF1.jl b/src/PureJuMP/NZF1.jl index f0eb746c5..886bfc857 100644 --- a/src/PureJuMP/NZF1.jl +++ b/src/PureJuMP/NZF1.jl @@ -7,9 +7,10 @@ export NZF1 function NZF1(args...; n::Int = default_nvar, kwargs...) - mod(n, 13) != 0 && @warn("NZF1: number of variables adjusted to be divisible by 13 and ≥ 26") + n_orig = n nbis = max(2, div(n, 13)) n = 13 * nbis + @adjust_nvar_warn("NZF1", n_orig, n) l = div(n, 13) diff --git a/src/PureJuMP/PureJuMP.jl b/src/PureJuMP/PureJuMP.jl index a855ee252..ae7418db0 100644 --- a/src/PureJuMP/PureJuMP.jl +++ b/src/PureJuMP/PureJuMP.jl @@ -19,6 +19,7 @@ function _ensure_data!(key::Symbol, relpath::AbstractString) end using JuMP, LinearAlgebra, SpecialFunctions +import ..OptimizationProblems: @adjust_nvar_warn path = dirname(@__FILE__) files = filter(x -> x[(end - 2):end] == ".jl", readdir(path)) diff --git a/src/PureJuMP/bearing.jl b/src/PureJuMP/bearing.jl index 86a3d47bc..442de6250 100644 --- a/src/PureJuMP/bearing.jl +++ b/src/PureJuMP/bearing.jl @@ -28,6 +28,12 @@ function bearing( # nx > 0 # grid points in 1st direction # ny > 0 # grid points in 2nd direction + n_orig = n + nx = max(1, nx) + ny = max(1, ny) + n = (nx + 2) * (ny + 2) + @adjust_nvar_warn("bearing", n_orig, n) + b = 10 # grid is (0,2*pi)x(0,2*b) e = 0.1 # eccentricity diff --git a/src/PureJuMP/broydn7d.jl b/src/PureJuMP/broydn7d.jl index 5778efbcc..eb24cfc2f 100644 --- a/src/PureJuMP/broydn7d.jl +++ b/src/PureJuMP/broydn7d.jl @@ -46,9 +46,10 @@ export broydn7d "Broyden 7-diagonal model in size `n`" function broydn7d(args...; n::Int = default_nvar, p::Float64 = 7 / 3, kwargs...) - mod(n, 2) > 0 && @warn("broydn7d: number of variables adjusted to be even") + n_orig = n n2 = max(1, div(n, 2)) n = 2 * n2 + @adjust_nvar_warn("broydn7d", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/catenary.jl b/src/PureJuMP/catenary.jl index 4da81b57f..6be4300b3 100644 --- a/src/PureJuMP/catenary.jl +++ b/src/PureJuMP/catenary.jl @@ -17,10 +17,10 @@ export catenary function catenary(args...; n::Int = default_nvar, Bl = 1.0, FRACT = 0.6, kwargs...) - (n % 3 == 0) || @warn("catenary: number of variables adjusted to be a multiple of 3") + n_orig = n n = 3 * max(1, div(n, 3)) - (n < 6) || @warn("catenary: number of variables adjusted to be greater or equal to 6") n = max(n, 6) + @adjust_nvar_warn("catenary", n_orig, n) ## Model Parameters diff --git a/src/PureJuMP/catmix.jl b/src/PureJuMP/catmix.jl index 580f2db7c..ca118b4d8 100644 --- a/src/PureJuMP/catmix.jl +++ b/src/PureJuMP/catmix.jl @@ -6,8 +6,11 @@ export catmix function catmix(args...; n::Int = default_nvar, kwargs...) + n_orig = n ne = 2 nc = 3 + n = n_orig + @adjust_nvar_warn("catmix", n_orig, 23 * n + 2) tf = 1 h = tf / n # Final time diff --git a/src/PureJuMP/chain.jl b/src/PureJuMP/chain.jl index befb5f6cd..dddc3128d 100644 --- a/src/PureJuMP/chain.jl +++ b/src/PureJuMP/chain.jl @@ -13,7 +13,10 @@ export chain function chain(args...; n::Int = default_nvar, kwargs...) + n_orig = n nh = max(2, div(n - 4, 4)) + n = 4 * nh + 4 + @adjust_nvar_warn("chain", n_orig, n) L = 4 a = 1 diff --git a/src/PureJuMP/chainwoo.jl b/src/PureJuMP/chainwoo.jl index 0fc8cb893..7d0112043 100644 --- a/src/PureJuMP/chainwoo.jl +++ b/src/PureJuMP/chainwoo.jl @@ -35,8 +35,9 @@ export chainwoo "The chained Woods function in size `n`, a variant on the Woods function" function chainwoo(args...; n::Int = default_nvar, kwargs...) - (n % 4 == 0) || @warn("chainwoo: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("chainwoo", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/channel.jl b/src/PureJuMP/channel.jl index cce634660..8054a121d 100644 --- a/src/PureJuMP/channel.jl +++ b/src/PureJuMP/channel.jl @@ -12,7 +12,10 @@ export channel function channel(args...; n::Int = default_nvar, kwargs...) + n_orig = n nh = max(2, div(n, 8)) + n = 8 * nh + @adjust_nvar_warn("channel", n_orig, n) nc = 4 nd = 4 diff --git a/src/PureJuMP/clnlbeam.jl b/src/PureJuMP/clnlbeam.jl index 3849c7fe8..6cafb82cb 100644 --- a/src/PureJuMP/clnlbeam.jl +++ b/src/PureJuMP/clnlbeam.jl @@ -14,7 +14,10 @@ export clnlbeam "The clnlbeam problem in size `n`" function clnlbeam(args...; n::Int = default_nvar, kwargs...) + n_orig = n N = div(n - 3, 3) + n = 3 * N + 3 + @adjust_nvar_warn("clnlbeam", n_orig, n) h = 1 / N alpha = 350 model = Model() diff --git a/src/PureJuMP/clplatea.jl b/src/PureJuMP/clplatea.jl index 011e0db9a..a12947dce 100644 --- a/src/PureJuMP/clplatea.jl +++ b/src/PureJuMP/clplatea.jl @@ -26,9 +26,10 @@ export clplatea "The clamped plate problem (Strang, Nocedal, Dax)." function clplatea(args...; n::Int = default_nvar, wght::Float64 = -0.1, kwargs...) + n_orig = n p = floor(Int, sqrt(n)) - p * p != n && @warn("clplatea: number of variables adjusted from $n down to $(p*p)") n = p * p + @adjust_nvar_warn("clplatea", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/clplateb.jl b/src/PureJuMP/clplateb.jl index 575e9fe5d..ec6315f17 100644 --- a/src/PureJuMP/clplateb.jl +++ b/src/PureJuMP/clplateb.jl @@ -27,9 +27,10 @@ export clplateb "The clamped plate problem (Strang, Nocedal, Dax)." function clplateb(args...; n::Int = default_nvar, wght::Float64 = -0.1, kwargs...) + n_orig = n p = floor(Int, sqrt(n)) - p * p != n && @warn("clplateb: number of variables adjusted from $n down to $(p*p)") n = p * p + @adjust_nvar_warn("clplateb", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/clplatec.jl b/src/PureJuMP/clplatec.jl index 274feda44..e85f9686d 100644 --- a/src/PureJuMP/clplatec.jl +++ b/src/PureJuMP/clplatec.jl @@ -33,9 +33,10 @@ function clplatec( l::Float64 = 0.01, kwargs..., ) + n_orig = n p = floor(Int, sqrt(n)) - p * p != n && @warn("clplatec: number of variables adjusted from $n down to $(p*p)") n = p * p + @adjust_nvar_warn("clplatec", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_efgh.jl b/src/PureJuMP/dixmaan_efgh.jl index 39e2c5976..f25edac80 100644 --- a/src/PureJuMP/dixmaan_efgh.jl +++ b/src/PureJuMP/dixmaan_efgh.jl @@ -33,9 +33,10 @@ function dixmaane( δ::Float64 = 0.125, kwargs..., ) - (n % 3 == 0) || @warn("dixmaan: number of variables adjusted to be a multiple of 3") + n_orig = n m = max(1, div(n, 3)) n = 3 * m + @adjust_nvar_warn("dixmaane", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_ijkl.jl b/src/PureJuMP/dixmaan_ijkl.jl index 11f291714..e4f22ed05 100644 --- a/src/PureJuMP/dixmaan_ijkl.jl +++ b/src/PureJuMP/dixmaan_ijkl.jl @@ -33,9 +33,10 @@ function dixmaani( δ::Float64 = 0.125, kwargs..., ) - (n % 3 == 0) || @warn("dixmaan: number of variables adjusted to be a multiple of 3") + n_orig = n m = max(1, div(n, 3)) n = 3 * m + @adjust_nvar_warn("dixmaani", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_mnop.jl b/src/PureJuMP/dixmaan_mnop.jl index c4537bcd5..0c676cc59 100644 --- a/src/PureJuMP/dixmaan_mnop.jl +++ b/src/PureJuMP/dixmaan_mnop.jl @@ -31,9 +31,10 @@ function dixmaanm( δ::Float64 = 0.125, kwargs..., ) - (n % 3 == 0) || @warn("dixmaan: number of variables adjusted to be a multiple of 3") + n_orig = n m = max(1, div(n, 3)) n = 3 * m + @adjust_nvar_warn("dixmaanm", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/elec.jl b/src/PureJuMP/elec.jl index a1f0ca3db..0d7f690ae 100644 --- a/src/PureJuMP/elec.jl +++ b/src/PureJuMP/elec.jl @@ -11,33 +11,36 @@ export elec function elec(args...; n::Int = default_nvar, kwargs...) - n = max(2, div(n, 3)) + n_orig = n + m = max(2, div(n_orig, 3)) + n = 3 * m + @adjust_nvar_warn("elec", n_orig, n) nlp = Model() - range0 = [i / n for i = 1:n] + range0 = [i / m for i = 1:m] θ0 = 2π .* range0 ϕ0 = π .* range0 - xini = [sin(θ0[i]) * cos(ϕ0[i]) for i = 1:n] # x coordinate - yini = [sin(θ0[i]) * sin(ϕ0[i]) for i = 1:n] # y coordinate - zini = [cos(θ0[i]) for i = 1:n] # z coordinate + xini = [sin(θ0[i]) * cos(ϕ0[i]) for i = 1:m] # x coordinate + yini = [sin(θ0[i]) * sin(ϕ0[i]) for i = 1:m] # y coordinate + zini = [cos(θ0[i]) for i = 1:m] # z coordinate x0 = [xini; yini; zini] - @variable(nlp, x[i = 1:(3n)], start = x0[i]) + @variable(nlp, x[i = 1:(3m)], start = x0[i]) @objective( nlp, Min, sum( sum( - 1 / sqrt((x[j] - x[i])^2 + (x[n + j] - x[n + i])^2 + (x[2n + j] - x[2n + i])^2) for - j = (i + 1):n - ) for i = 1:(n - 1) + 1 / sqrt((x[j] - x[i])^2 + (x[m + j] - x[m + i])^2 + (x[2m + j] - x[2m + i])^2) for + j = (i + 1):m + ) for i = 1:(m - 1) ) ) - @constraint(nlp, [k = 1:n], x[k]^2 + x[n + k]^2 + x[2n + k]^2 == 1) + @constraint(nlp, [k = 1:m], x[k]^2 + x[m + k]^2 + x[2m + k]^2 == 1) return nlp end diff --git a/src/PureJuMP/fminsrf2.jl b/src/PureJuMP/fminsrf2.jl index b6acb5a8f..d73c4c630 100644 --- a/src/PureJuMP/fminsrf2.jl +++ b/src/PureJuMP/fminsrf2.jl @@ -21,12 +21,12 @@ export fminsrf2 function fminsrf2(args...; n::Int = default_nvar, kwargs...) - n < 4 && @warn("fminsrf2: number of variables must be ≥ 4") + n_orig = n n = max(4, n) p = floor(Int, sqrt(n)) - p * p != n && @warn("fminsrf2: number of variables adjusted from $n down to $(p*p)") n = p * p + @adjust_nvar_warn("fminsrf2", n_orig, n) h00 = 1.0 slopej = 4.0 diff --git a/src/PureJuMP/gasoil.jl b/src/PureJuMP/gasoil.jl index c39531f3c..577fa93f8 100644 --- a/src/PureJuMP/gasoil.jl +++ b/src/PureJuMP/gasoil.jl @@ -8,7 +8,10 @@ export gasoil function gasoil(; n::Int = default_nvar, kwargs...) + n_orig = n nc = 4 # number of collocation points + n = n_orig + @adjust_nvar_warn("gasoil", n_orig, 26 * n + 3) ne = 2 # number of differential equations np = 3 # number of ODE parameters nm = 21 # number of measurements diff --git a/src/PureJuMP/glider.jl b/src/PureJuMP/glider.jl index 9d217b716..af1d0c3cf 100644 --- a/src/PureJuMP/glider.jl +++ b/src/PureJuMP/glider.jl @@ -8,7 +8,10 @@ export glider function glider(; n::Int = default_nvar, kwargs...) + n_orig = n # Design parameters + n = n_orig + @adjust_nvar_warn("glider", n_orig, 5 * n + 6) x_0 = 0.0 y_0 = 1000.0 y_f = 900.0 diff --git a/src/PureJuMP/hovercraft1d.jl b/src/PureJuMP/hovercraft1d.jl index 0ddce6c3f..f98eacb56 100644 --- a/src/PureJuMP/hovercraft1d.jl +++ b/src/PureJuMP/hovercraft1d.jl @@ -6,9 +6,12 @@ export hovercraft1d function hovercraft1d(args...; n::Int = default_nvar, kwargs...) + n_orig = n nlp = Model() - T = div(n, 3) # length of time horizon + T = div(n_orig, 3) # length of time horizon + n = 3 * T - 1 + @adjust_nvar_warn("hovercraft1d", n_orig, n) @variable(nlp, x[1:T]) # resulting position @variable(nlp, v[1:T]) # resulting velocity @variable(nlp, u[1:(T - 1)]) # thruster input diff --git a/src/PureJuMP/marine.jl b/src/PureJuMP/marine.jl index afd07b22d..462d775a7 100644 --- a/src/PureJuMP/marine.jl +++ b/src/PureJuMP/marine.jl @@ -18,14 +18,17 @@ export marine function marine(args...; n::Int = default_nvar, nc::Int = 1, kwargs...) + n_orig = n nlp = Model() nc = max(min(nc, 4), 1) # number of collocation points ne = 8 # number of differential equations nm = 21 # number of measurements - n = max(n, 3 * ne * nc + ne + 2 * ne) + n = max(n_orig, 3 * ne * nc + ne + 2 * ne) nh = Int(round((n - 2 * ne + 1) / (3 * ne * nc + ne))) # number of partition intervals + n = 8 + 7 + nh * (8 + 3 * 8 * nc) + @adjust_nvar_warn("marine", n_orig, n) # roots of k-th degree Legendre polynomial rho = if nc == 1 diff --git a/src/PureJuMP/methanol.jl b/src/PureJuMP/methanol.jl index f27f055ca..2e70e2309 100644 --- a/src/PureJuMP/methanol.jl +++ b/src/PureJuMP/methanol.jl @@ -8,6 +8,7 @@ export methanol function methanol(args...; n::Int = default_nvar, kwargs...) + n_orig = n ne = 3 np = 5 nc = 3 @@ -35,6 +36,8 @@ function methanol(args...; n::Int = default_nvar, kwargs...) 1.122, ] tf = tau[nm] # ODEs defined in [0,tf] + n = n_orig + @adjust_nvar_warn("methanol", n_orig, 30 * n + 5) h = tf / n # uniform interval length t = [(i-1)*h for i = 1:(n + 1)] # partition fact = [factorial(k) for k = 0:nc] diff --git a/src/PureJuMP/minsurf.jl b/src/PureJuMP/minsurf.jl index a50e97f15..f755d60bf 100644 --- a/src/PureJuMP/minsurf.jl +++ b/src/PureJuMP/minsurf.jl @@ -12,10 +12,13 @@ export minsurf function minsurf(args...; n = default_nvar, kwargs...) + n_orig = n # number of variables is (nx + 2) x (ny + 2) if !((:nx in keys(kwargs)) & (:ny in keys(kwargs))) - nx, ny = Int(round(sqrt(max(1, n - 2)))), Int(round(sqrt(max(1, n - 2)))) + nx, ny = Int(round(sqrt(max(1, n_orig - 2)))), Int(round(sqrt(max(1, n_orig - 2)))) end + n = (nx + 2) * (ny + 2) + @adjust_nvar_warn("minsurf", n_orig, n) x_mesh = LinRange(0, 1, nx + 2) # coordinates of the mesh points x v0 = zeros(nx + 2, ny + 2) # Surface matrix initialization diff --git a/src/PureJuMP/pinene.jl b/src/PureJuMP/pinene.jl index 347fa83d5..314813113 100644 --- a/src/PureJuMP/pinene.jl +++ b/src/PureJuMP/pinene.jl @@ -13,6 +13,7 @@ export pinene function pinene(; n::Int = default_nvar, kwargs...) + n_orig = n nc = 3 # number of collocation points ne = 5 # number of differential equations np = 5 # number of ODE parameters @@ -25,6 +26,8 @@ function pinene(; n::Int = default_nvar, kwargs...) # times at which observations made tau = [1230.0, 3060.0, 4920.0, 7800.0, 10680.0, 15030.0, 22620.0, 36420.0] tf = tau[nm] # ODEs defined in [0,tf] + n = n_orig + @adjust_nvar_warn("pinene", n_orig, 50 * n + 5) h = tf / n # uniform interval length t = [(i-1)*h for i = 1:(n + 1)] # partition diff --git a/src/PureJuMP/powellsg.jl b/src/PureJuMP/powellsg.jl index bcb9f7deb..e7ecdd2a5 100644 --- a/src/PureJuMP/powellsg.jl +++ b/src/PureJuMP/powellsg.jl @@ -37,8 +37,9 @@ export powellsg "The extended Powell singular problem in size 'n' " function powellsg(args...; n::Int = default_nvar, kwargs...) - (n % 4 == 0) || @warn("powellsg: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("powellsg", n_orig, n) x0 = zeros(n) x0[4 * (collect(1:div(n, 4))) .- 3] .= 3.0 diff --git a/src/PureJuMP/robotarm.jl b/src/PureJuMP/robotarm.jl index b6252bd90..950187b4a 100644 --- a/src/PureJuMP/robotarm.jl +++ b/src/PureJuMP/robotarm.jl @@ -14,8 +14,11 @@ export robotarm function robotarm(; n::Int = default_nvar, L = 4.5, kwargs...) - N = max(2, div(n, 9)) + n_orig = n + N = max(2, div(n_orig, 9)) n = N + 1 + nvars = 9 * n + 1 + @adjust_nvar_warn("robotarm", n_orig, nvars) nlp = Model() diff --git a/src/PureJuMP/rocket.jl b/src/PureJuMP/rocket.jl index 56ab5a499..c824cdeca 100644 --- a/src/PureJuMP/rocket.jl +++ b/src/PureJuMP/rocket.jl @@ -7,7 +7,10 @@ export rocket function rocket(; n::Int = default_nvar, kwargs...) + n_orig = n h_0 = 1.0 + nvars = 4 * n_orig + 5 + @adjust_nvar_warn("rocket", n_orig, nvars) v_0 = 0.0 m_0 = 1.0 g_0 = 1.0 diff --git a/src/PureJuMP/spmsrtls.jl b/src/PureJuMP/spmsrtls.jl index 98ae91cc2..4e08dd863 100644 --- a/src/PureJuMP/spmsrtls.jl +++ b/src/PureJuMP/spmsrtls.jl @@ -21,8 +21,10 @@ export spmsrtls function spmsrtls(args...; n::Int = default_nvar, kwargs...) + n_orig = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 + @adjust_nvar_warn("spmsrtls", n_orig, n) p = [sin(i^2) for i = 1:n] x0 = [p[i] / 5 for i = 1:n] diff --git a/src/PureJuMP/srosenbr.jl b/src/PureJuMP/srosenbr.jl index fc971e61d..1381f38c0 100644 --- a/src/PureJuMP/srosenbr.jl +++ b/src/PureJuMP/srosenbr.jl @@ -21,8 +21,9 @@ export srosenbr "The separable extension of Rosenbrock's function 'n' " function srosenbr(args...; n::Int = default_nvar, kwargs...) - (n % 2 == 0) || @warn("srosenbr: number of variables adjusted to be even") + n_orig = n n = 2 * max(1, div(n, 2)) + @adjust_nvar_warn("srosenbr", n_orig, n) x0 = ones(n) x0[2 * (collect(1:div(n, 2))) .- 1] .= -1.2 diff --git a/src/PureJuMP/steering.jl b/src/PureJuMP/steering.jl index 353be640b..9a72508a0 100644 --- a/src/PureJuMP/steering.jl +++ b/src/PureJuMP/steering.jl @@ -7,7 +7,10 @@ export steering function steering(; n::Int = default_nvar, kwargs...) + n_orig = n a = 100.0 # Magnitude of force. + nvars = 5 * n_orig + 6 + @adjust_nvar_warn("steering", n_orig, nvars) # Bounds on the control u_min, u_max = -pi/2.0, pi/2.0 xs = zeros(4) diff --git a/src/PureJuMP/structural.jl b/src/PureJuMP/structural.jl index 65ba54d47..4bfbb8184 100644 --- a/src/PureJuMP/structural.jl +++ b/src/PureJuMP/structural.jl @@ -6,7 +6,8 @@ export structural function structural(args...; n::Int = default_nvar, kwargs...) - n = max(n, 100) + n_orig = n + n = max(n_orig, 100) sub2ind(shape, a, b) = LinearIndices(shape)[CartesianIndex.(a, b)] Nx = min(Int(round(n^(1 / 3))), 6) @@ -29,6 +30,9 @@ function structural(args...; n::Int = default_nvar, kwargs...) M = Int(N * (N - 1) / 2) # number of edges + nvars = 2 * M + @adjust_nvar_warn("structural", n_orig, nvars) + # EDGES: columns are the indices of the nodes at either end edges = Array{Int}(zeros(M, 2)) diff --git a/src/PureJuMP/torsion.jl b/src/PureJuMP/torsion.jl index 926d1e456..8fee5c164 100644 --- a/src/PureJuMP/torsion.jl +++ b/src/PureJuMP/torsion.jl @@ -7,9 +7,12 @@ export torsion function torsion(args...; n = default_nvar, kwargs...) # number of variables is (nx + 1) x (ny + 1) + n_orig = n if !((:nx in keys(kwargs)) & (:ny in keys(kwargs))) - nx, ny = Int(round(sqrt(max(1, n - 2)))), Int(round(sqrt(max(1, n - 2)))) + nx, ny = Int(round(sqrt(max(1, n_orig - 2)))), Int(round(sqrt(max(1, n_orig - 2)))) end + nvars = (nx + 2) * (ny + 2) + @adjust_nvar_warn("torsion", n_orig, nvars) c = 5.0 hx = 1.0 / (nx + 1.0) # grid spacing hy = 1.0 / (ny + 1.0) # grid spacing diff --git a/src/PureJuMP/watson.jl b/src/PureJuMP/watson.jl index 6dc9def6e..e9f6a582c 100644 --- a/src/PureJuMP/watson.jl +++ b/src/PureJuMP/watson.jl @@ -17,7 +17,9 @@ export watson function watson(args...; n::Int = default_nvar, kwargs...) + n_orig = n n = min(max(n, 2), 31) + @adjust_nvar_warn("watson", n_orig, n) m = 31 nlp = Model() diff --git a/src/PureJuMP/woods.jl b/src/PureJuMP/woods.jl index 8127b51ab..c9f5b8938 100644 --- a/src/PureJuMP/woods.jl +++ b/src/PureJuMP/woods.jl @@ -39,8 +39,9 @@ export woods "The extended Woods problem `n` " function woods(args...; n::Int = default_nvar, kwargs...) - (n % 4 == 0) || @warn("woods: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("woods", n_orig, n) nlp = Model() diff --git a/test/test-defined-problems.jl b/test/test-defined-problems.jl index 0007a346b..4db6dbdd0 100644 --- a/test/test-defined-problems.jl +++ b/test/test-defined-problems.jl @@ -22,6 +22,34 @@ probes = @sync begin end @info "PureJuMP missing per worker" probes +@testset "Adjusted dimension warnings" begin + var_probs = OptimizationProblems.meta[OptimizationProblems.meta.variable_nvar, :name] + @test !isempty(var_probs) + + for prob_name in var_probs + prob_sym = Symbol(prob_name) + + get_nvar_func = getfield(OptimizationProblems, Symbol("get_", prob_name, "_nvar")) + + for n in (50, 100, 150) + n_adjusted = get_nvar_func(; n = n) + n_adjusted == n && continue # Skip if no adjustment for this n + + msg_re = Regex("number of variables adjusted from $(n) to $(n_adjusted)") + + for mod in (ADNLPProblems, PureJuMP) + isdefined(mod, prob_sym) || continue + + constructor = getfield(mod, prob_sym) + + @test_logs (:warn, msg_re) constructor(; n = n) + end + + break + end + end +end + @test setdiff(union(names(ADNLPProblems), list_problems_not_ADNLPProblems), list_problems) == [:ADNLPProblems] @test setdiff(union(names(PureJuMP), list_problems_not_PureJuMP), list_problems) == [:PureJuMP]