Skip to content

Commit

Permalink
🤖 Format .jl files (#343)
Browse files Browse the repository at this point in the history
Co-authored-by: tmigot <[email protected]>
  • Loading branch information
github-actions[bot] and tmigot authored Sep 7, 2024
1 parent 36d8941 commit db7c813
Show file tree
Hide file tree
Showing 40 changed files with 262 additions and 136 deletions.
4 changes: 2 additions & 2 deletions src/ADNLPProblems/allinit.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ function allinit(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wh
# return cx
#end
A = T[
0 0 0 1;
0 1 0 0;
0 0 0 1
0 1 0 0
0 0 1 0
]

Expand Down
4 changes: 2 additions & 2 deletions src/ADNLPProblems/allinitc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ function allinitc(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) w
return cx
end
A = T[
0 0 0 1;
0 1 0 0;
0 0 0 1
0 1 0 0
0 0 1 0
]

Expand Down
2 changes: 1 addition & 1 deletion src/ADNLPProblems/alsotame.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ function alsotame(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) w
return cx
end
A = T[
1 0 0;
1 0 0
0 1 0
]

Expand Down
154 changes: 151 additions & 3 deletions src/ADNLPProblems/avion2.jl
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,145 @@ function avion2(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwa
1,
1,
]
A = sparse([1, 2, 3, 2, 4, 3, 1, 4, 4, 4, 4, 5, 5, 7, 10, 14, 6, 8, 14, 14, 6, 13, 15, 7, 7, 8, 15, 9, 15, 10, 15, 11, 15, 12, 15, 13, 15, 9, 14, 11, 14, 12, 14], [1, 1, 2, 5, 5, 6, 7, 7, 8, 9, 10, 10,
19, 20, 20, 20, 22, 22, 22, 23, 24, 26, 31, 33, 34, 35, 35, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 47, 47, 48, 48, 49, 49], T[-0.13, -0.7, -1.0, 1.0, -2.0, 1.0, 1.0, -2.0, -2.0, -1.0, 1.0, -20.0, 1.0, -1.0, -0.043, 0.5, -2.0, -0.137, -1.0, 1.0, 1.0, -300.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -35.0, 660.0, -200.0, 95.0, -120.0, 70.0], 15, 49)
A = sparse(
[
1,
2,
3,
2,
4,
3,
1,
4,
4,
4,
4,
5,
5,
7,
10,
14,
6,
8,
14,
14,
6,
13,
15,
7,
7,
8,
15,
9,
15,
10,
15,
11,
15,
12,
15,
13,
15,
9,
14,
11,
14,
12,
14,
],
[
1,
1,
2,
5,
5,
6,
7,
7,
8,
9,
10,
10,
19,
20,
20,
20,
22,
22,
22,
23,
24,
26,
31,
33,
34,
35,
35,
37,
37,
38,
38,
39,
39,
40,
40,
41,
41,
47,
47,
48,
48,
49,
49,
],
T[
-0.13,
-0.7,
-1.0,
1.0,
-2.0,
1.0,
1.0,
-2.0,
-2.0,
-1.0,
1.0,
-20.0,
1.0,
-1.0,
-0.043,
0.5,
-2.0,
-0.137,
-1.0,
1.0,
1.0,
-300.0,
1.0,
-1.0,
1.0,
1.0,
-1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
-35.0,
660.0,
-200.0,
95.0,
-120.0,
70.0,
],
15,
49,
)
function c!(cx, x)
return cx
end
Expand Down Expand Up @@ -289,7 +426,18 @@ function avion2(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwa
2,
]

return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, findnz(A)..., c!, lcon, ucon, name = "avion2"; kwargs...)
return ADNLPModels.ADNLPModel!(
f,
x0,
lvar,
uvar,
findnz(A)...,
c!,
lcon,
ucon,
name = "avion2";
kwargs...,
)
end

function avion2(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T}
Expand Down
2 changes: 1 addition & 1 deletion src/ADNLPProblems/booth.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ function booth(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher
x0 = zeros(T, 2)

A = T[
1 2;
1 2
2 1
]
function c!(cx, x)
Expand Down
22 changes: 17 additions & 5 deletions src/ADNLPProblems/camshape.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,24 +24,36 @@ function camshape(args...; n::Int = default_nvar, type::Type{T} = Float64, kwarg
ucon = vcat(T(0), T* θ) * ones(T, n + 1), zeros(T, n + 1))

A = zeros(T, n + 2, n)
A[2,n] = -1
A[2, n] = -1
lcon[2] -= R_max
ucon[2] -= R_max
A[3,1] = 1
A[3, 1] = 1
lcon[3] += R_min
ucon[3] += R_min
for i = 1:(n - 1)
A[3 + i, i + 1] = 1
A[3 + i, i] = -1
end
# cx[n + 3] = -R_min^2 - R_min * y[1] + 2 * R_min * y[1] * cos(θ)
A[1, 1] = -R_min + 2 * R_min * cos(θ)
A[1, 1] = -R_min + 2 * R_min * cos(θ)
lcon[1] += R_min^2
ucon[1] += R_min^2

lvar = T(R_min) * ones(T, n)
uvar = T(R_max) * ones(T, n)

x0 = T((R_min + R_max) / 2) * ones(T, n)
return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, findnz(sparse(A))..., c!, lcon, ucon, name = "camshape", ; kwargs...)
return ADNLPModels.ADNLPModel!(
f,
x0,
lvar,
uvar,
findnz(sparse(A))...,
c!,
lcon,
ucon,
name = "camshape",
;
kwargs...,
)
end
9 changes: 8 additions & 1 deletion src/ADNLPProblems/catenary.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,13 @@
export catenary

function catenary(args...; n::Int = default_nvar, type::Type{T} = Float64, Bl = 1, FRACT = 0.6, kwargs...) where {T}
function catenary(
args...;
n::Int = default_nvar,
type::Type{T} = Float64,
Bl = 1,
FRACT = 0.6,
kwargs...,
) where {T}
(n % 3 == 0) || @warn("catenary: number of variables adjusted to be a multiple of 3")
n = 3 * max(1, div(n, 3))
(n < 6) || @warn("catenary: number of variables adjusted to be greater or equal to 6")
Expand Down
4 changes: 2 additions & 2 deletions src/ADNLPProblems/hs100.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ function hs100(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher
end
x0 = T[1, 2, 0, 4, 0, 1, 1]
function c!(cx, x)
cx[1] = - 7 * x[1] - 3 * x[2] - 10 * x[3]^2 - x[4] + x[5]
cx[2] = - 23 * x[1] - x[2]^2 - 6 * x[6]^2 + 8 * x[7]
cx[1] = -7 * x[1] - 3 * x[2] - 10 * x[3]^2 - x[4] + x[5]
cx[2] = -23 * x[1] - x[2]^2 - 6 * x[6]^2 + 8 * x[7]
cx[4] = 127 - 2 * x[1]^2 - 3 * x[2]^4 - x[3] - 4 * x[4]^2 - 5 * x[5]
cx[3] = -4 * x[1]^2 - x[2]^2 + 3 * x[1] * x[2] - 2 * x[3]^2 - 5 * x[6] + 11 * x[7]
return cx
Expand Down
16 changes: 8 additions & 8 deletions src/ADNLPProblems/hs108.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,16 @@ function hs108(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher
lvar = T[-Inf, -Inf, -Inf, -Inf, -Inf, -Inf, -Inf, -Inf, 0]
uvar = T(Inf) * ones(T, 9)
function c!(cx, x)
cx[1] = - x[3]^2 - x[4]^2
cx[2] = - x[5]^2 - x[6]^2
cx[3] = - (x[1] - x[5])^2 - (x[2] - x[6])^2
cx[4] = - (x[1] - x[7])^2 - (x[2] - x[8])^2
cx[5] = - (x[3] - x[5])^2 - (x[4] - x[6])^2
cx[6] = - (x[3] - x[7])^2 - (x[4] - x[8])^2
cx[1] = -x[3]^2 - x[4]^2
cx[2] = -x[5]^2 - x[6]^2
cx[3] = -(x[1] - x[5])^2 - (x[2] - x[6])^2
cx[4] = -(x[1] - x[7])^2 - (x[2] - x[8])^2
cx[5] = -(x[3] - x[5])^2 - (x[4] - x[6])^2
cx[6] = -(x[3] - x[7])^2 - (x[4] - x[8])^2
cx[7] = x[3] * x[9]
cx[8] = x[5] * x[8] - x[6] * x[7]
cx[9] = - x[9]^2
cx[10] = - x[1]^2 - (x[2] - x[9])^2
cx[9] = -x[9]^2
cx[10] = -x[1]^2 - (x[2] - x[9])^2
cx[11] = x[1] * x[4] - x[2] * x[3]
cx[12] = -x[5] * x[9]
return cx
Expand Down
6 changes: 3 additions & 3 deletions src/ADNLPProblems/hs109.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ function hs109(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher
a = 50176 // 1000
b = sin(eltype(x)(25 // 100))
ci = cos(eltype(x)(25 // 100))
cx[1] = - x[1]^2 - x[8]^2
cx[2] = - x[2]^2 - x[9]^2
cx[1] = -x[1]^2 - x[8]^2
cx[2] = -x[2]^2 - x[9]^2
cx[3] =
x[5] * x[6] * sin(-x[3] - 1 / 4) + x[5] * x[7] * sin(-x[4] - 1 / 4) + 2 * b * x[5]^2 -
a * x[1] + 400 * a
Expand All @@ -36,7 +36,7 @@ function hs109(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher
2 * ci * x[7]^2 + 0.7533e-3 * a * x[7]^2
return cx
end
lcon = vcat(-T(0.55), - 2250000, -2250000, zeros(T, 6))
lcon = vcat(-T(0.55), -2250000, -2250000, zeros(T, 6))
ucon = vcat(T(0.55), T(Inf), T(Inf), zeros(T, 6))
return ADNLPModels.ADNLPModel!(
f,
Expand Down
2 changes: 1 addition & 1 deletion src/ADNLPProblems/hs116.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ function hs116(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher
cx[5] = x[12] - b * x[9] + ci * x[2] * x[9]
cx[6] = x[11] - b * x[8] + ci * x[1] * x[8]
cx[7] = x[5] * x[7] - x[1] * x[8] - x[4] * x[7] + x[4] * x[8]
cx[8] = - a * (x[2] * x[9] + x[5] * x[8] - x[1] * x[8] - x[6] * x[9]) - x[5] - x[6]
cx[8] = -a * (x[2] * x[9] + x[5] * x[8] - x[1] * x[8] - x[6] * x[9]) - x[5] - x[6]
cx[9] = x[2] * x[9] - x[3] * x[10] - x[6] * x[9] - 500 * x[2] + 500 * x[6] + x[2] * x[10]
cx[10] = x[2] - a * (x[2] * x[10] - x[3] * x[10])
return cx
Expand Down
2 changes: 1 addition & 1 deletion src/ADNLPProblems/hs225.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ function hs225(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher
x0 = T[3, 1]
function c!(cx, x)
cx[1] = x[1]^2 + x[2]^2
cx[2] = 9 * x[1]^2 + x[2]^2
cx[2] = 9 * x[1]^2 + x[2]^2
cx[3] = x[1]^2 - x[2]
cx[4] = x[2]^2 - x[1]
return cx
Expand Down
2 changes: 1 addition & 1 deletion src/ADNLPProblems/hs226.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ function hs226(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher
uvar = T[Inf, Inf]
function c!(cx, x)
cx[1] = x[1]^2 + x[2]^2
cx[2] = - x[1]^2 - x[2]^2
cx[2] = -x[1]^2 - x[2]^2
return cx
end
lcon = T[0; -1]
Expand Down
2 changes: 1 addition & 1 deletion src/ADNLPProblems/hs23.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ function hs23(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg
uvar = 50 * ones(T, 2)
function c!(cx, x)
cx[1] = x[1]^2 + x[2]^2
cx[2] = 9 * x[1]^2 + x[2]^2
cx[2] = 9 * x[1]^2 + x[2]^2
cx[3] = x[1]^2 - x[2]
cx[4] = x[2]^2 - x[1]
return cx
Expand Down
2 changes: 1 addition & 1 deletion src/ADNLPProblems/hs47.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
export hs47

function hs47(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T}
function c!(cx, x)
function c!(cx, x)
cx[3] = x[1] + x[2]^2 + x[3]^3 - 3
cx[1] = x[2] - x[3]^2 + x[4]
cx[2] = x[1] * x[5]
Expand Down
4 changes: 2 additions & 2 deletions src/ADNLPProblems/hs72.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ function hs72(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where
lvar = T(0.001) * ones(T, 4)
uvar = T[(5 - i) * 1e5 for i = 1:4]
function c!(cx, x)
cx[1] = + 4 / x[1] + 2.25 / x[2] + 1 / x[3] + 0.25 / x[4] - 0.0401
cx[2] = + 0.16 / x[1] + 0.36 / x[2] + 0.64 / x[3] + 0.64 / x[4] - 0.010085
cx[1] = +4 / x[1] + 2.25 / x[2] + 1 / x[3] + 0.25 / x[4] - 0.0401
cx[2] = +0.16 / x[1] + 0.36 / x[2] + 0.64 / x[3] + 0.64 / x[4] - 0.010085
return cx
end
lcon = -T(Inf) * ones(T, 2)
Expand Down
3 changes: 1 addition & 2 deletions src/ADNLPProblems/hs95.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@ function hs95(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where
17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] -
139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6]
cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5]
cx[4] =
159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6]
cx[4] = 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6]
return cx
end
lcon = T[4.97, -1.88, -29.08, -78.02]
Expand Down
3 changes: 1 addition & 2 deletions src/ADNLPProblems/hs96.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@ function hs96(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where
17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] -
139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6]
cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5]
cx[4] =
159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6]
cx[4] = 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6]
return cx
end
lcon = T[4.97, -1.88, -69.08, -118.02]
Expand Down
Loading

0 comments on commit db7c813

Please sign in to comment.