Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use explicit imports of package dependencies #52

Merged
merged 1 commit into from
Sep 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions src/PiecewiseLinearOpt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@
module PiecewiseLinearOpt

using JuMP
import MathOptInterface
const MOI = MathOptInterface
using LinearAlgebra
using Random

import LinearAlgebra
import MathOptInterface as MOI
import Random

export PWLFunction, UnivariatePWLFunction, BivariatePWLFunction, piecewiselinear

Expand Down
12 changes: 6 additions & 6 deletions src/jump.jl
Original file line number Diff line number Diff line change
Expand Up @@ -220,8 +220,8 @@
n = length(λ)-1
for b in B
JuMP.@constraints(m, begin
dot(b,h[1])*λ[1] + sum(min(dot(b,h[v]),dot(b,h[v-1]))*λ[v] for v in 2:n) + dot(b,h[n])*λ[n+1] ≤ dot(b,y)
dot(b,h[1])*λ[1] + sum(max(dot(b,h[v]),dot(b,h[v-1]))*λ[v] for v in 2:n) + dot(b,h[n])*λ[n+1] ≥ dot(b,y)
LinearAlgebra.dot(b,h[1])*λ[1] + sum(min(LinearAlgebra.dot(b,h[v]),LinearAlgebra.dot(b,h[v-1]))*λ[v] for v in 2:n) + LinearAlgebra.dot(b,h[n])*λ[n+1] ≤ LinearAlgebra.dot(b,y)
LinearAlgebra.dot(b,h[1])*λ[1] + sum(max(LinearAlgebra.dot(b,h[v]),LinearAlgebra.dot(b,h[v-1]))*λ[v] for v in 2:n) + LinearAlgebra.dot(b,h[n])*λ[n+1] ≥ LinearAlgebra.dot(b,y)
end)
end
return nothing
Expand Down Expand Up @@ -365,9 +365,9 @@
if indices == [n-1]
break
end
if rank(d[indices,:]) == length(indices) && length(indices) <= k-1
if LinearAlgebra.rank(d[indices,:]) == length(indices) && length(indices) <= k-1
if length(indices) == k-1
nullsp = nullspace(d[indices,:])
nullsp = LinearAlgebra.nullspace(d[indices,:])
@assert size(nullsp,2) == 1
v = vec(nullsp)
push!(spanners, canonical!(v))
Expand Down Expand Up @@ -406,7 +406,7 @@
end

function canonical!(v::Vector{Float64})
normalize!(v)
LinearAlgebra.normalize!(v)
for j in 1:length(v)
if abs(v[j]) < 1e-8
v[j] = 0
Expand Down Expand Up @@ -582,7 +582,7 @@
A = [p¹[1] p¹[2] 1
p²[1] p²[2] 1
p³[1] p³[2] 1]
@assert rank(A) == 3
@assert LinearAlgebra.rank(A) == 3

Check warning on line 585 in src/jump.jl

View check run for this annotation

Codecov / codecov/patch

src/jump.jl#L585

Added line #L585 was not covered by tests
b = [0, 0, 1]
q = A \ b
@assert isapprox(q[1]*p¹[1] + q[2]*p¹[2] + q[3], 0, atol=1e-4)
Expand Down
2 changes: 1 addition & 1 deletion src/types.jl
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ function BivariatePWLFunction(x, y, fz::Function; pattern=:BestFit, seed=hash((l
m = length(x)
n = length(y)

mt = MersenneTwister(seed)
mt = Random.MersenneTwister(seed)
# run for each square on [x[i],x[i+1]] × [y[i],y[i+1]]
for i in 1:length(x)-1, j in 1:length(y)-1
SWt, NWt, NEt, SEt = LinearIndices((m,n))[i,j], LinearIndices((m,n))[i,j+1], LinearIndices((m,n))[i+1,j+1], LinearIndices((m,n))[i+1,j]
Expand Down
72 changes: 43 additions & 29 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,22 +3,51 @@
# Use of this source code is governed by an MIT-style license that can be found
# in the LICENSE.md file or at https://opensource.org/licenses/MIT.

using Cbc
using PiecewiseLinearOpt
using Test
using LinearAlgebra

import Cbc
import JuMP
import MathOptInterface
const MOI = MathOptInterface

using PiecewiseLinearOpt

# TODO: Add :SOS2 to this list, once Cbc supports it
methods_1D = (:CC,:MC,:Logarithmic,:LogarithmicIB,:ZigZag,:ZigZagInteger,:GeneralizedCelaya,:SymmetricCelaya,:Incremental,:DisaggLogarithmic)
# TODO: Add :SOS2 to this list, once Cbc supports it
# TODO: Add :MC to this list, Cbc (but not Gurobi) gives a different answer below, only for :MC (maybe a bug in Cbc?)
methods_2D = (:CC,:Logarithmic,:LogarithmicIB,:ZigZag,:ZigZagInteger,:GeneralizedCelaya,:SymmetricCelaya,:DisaggLogarithmic)
patterns_2D = (:Upper,:Lower,:BestFit,:UnionJack,:K1,:Random) # :OptimalTriangleSelection and :Stencil not supported currently
import LinearAlgebra
import MathOptInterface as MOI

methods_1D = (
:CC,
:MC,
:Logarithmic,
:LogarithmicIB,
:ZigZag,
:ZigZagInteger,
:GeneralizedCelaya,
:SymmetricCelaya,
:Incremental,
:DisaggLogarithmic,
# :SOS2, not supported by Cbc
)

methods_2D = (
:CC,
:Logarithmic,
:LogarithmicIB,
:ZigZag,
:ZigZagInteger,
:GeneralizedCelaya,
:SymmetricCelaya,
:DisaggLogarithmic,
# :SOS2, not supported by Cbc
# TODO: Add :MC to this list, Cbc (but not Gurobi) gives a different answer
# below, only for :MC (maybe a bug in Cbc?)
)
patterns_2D = (
:Upper,
:Lower,
:BestFit,
:UnionJack,
:K1,
:Random,
# :OptimalTriangleSelection not supported currently
# :Stencil
)

optimizer=JuMP.optimizer_with_attributes(Cbc.Optimizer, MOI.Silent() => true)

Expand All @@ -28,17 +57,12 @@ optimizer=JuMP.optimizer_with_attributes(Cbc.Optimizer, MOI.Silent() => true)
JuMP.@variable(model, x)
z = piecewiselinear(model, x, range(1,stop=2π, length=8), sin, method=method)
JuMP.@objective(model, Max, z)

JuMP.optimize!(model)

@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.value(x) ≈ 1.75474 rtol=1e-4
@test JuMP.value(z) ≈ 0.98313 rtol=1e-4

JuMP.@constraint(model, x ≤ 1.5z)

JuMP.optimize!(model)

@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.value(x) ≈ 1.36495 rtol=1e-4
@test JuMP.value(z) ≈ 0.90997 rtol=1e-4
Expand All @@ -55,20 +79,15 @@ end
f = (x1,x2) -> 2*(x1-1/3)^2 + 3*(x2-4/7)^4
z = piecewiselinear(model, x[1], x[2], BivariatePWLFunction(d, d, f, pattern=pattern), method=method)
JuMP.@objective(model, Min, z)

JuMP.optimize!(model)

@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.value(x[1]) ≈ 0.285714 rtol=1e-4
@test JuMP.value(x[2]) ≈ 0.571429 rtol=1e-4
@test JuMP.value(z) ≈ 0.004535 rtol=1e-3
@test JuMP.objective_value(model) ≈ 0.004535 rtol=1e-3
@test JuMP.objective_value(model) ≈ JuMP.value(z) rtol=1e-3

JuMP.@constraint(model, x[1] ≥ 0.6)

JuMP.optimize!(model)

@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.value(x[1]) ≈ 0.6 rtol=1e-4
@test JuMP.value(x[2]) ≈ 0.571428 rtol=1e-4
Expand All @@ -77,7 +96,7 @@ end
@test JuMP.objective_value(model) ≈ JuMP.value(z) rtol=1e-3
end
end
#

# println("\nbivariate optimal IB scheme tests")
# @testset "2D: optimal IB, UnionJack" begin
# model = JuMP.Model(JuMP.with_optimizer(Cbc.Optimizer))
Expand All @@ -87,20 +106,15 @@ end
# f = (x,y) -> 2*(x-1/3)^2 + 3*(y-4/7)^4
# z = piecewiselinear(model, x, y, BivariatePWLFunction(d, d, f, pattern=:UnionJack), method=:OptimalIB, subsolver=solver)
# JuMP.@objective(model, Min, z)
#
# JuMP.optimize!(model)
#
# @test JuMP.termination_status(model) == MOI.OPTIMAL
# @test JuMP.value(x) ≈ 0.5 rtol=1e-4
# @test JuMP.value(y) ≈ 0.5 rtol=1e-4
# @test JuMP.value(z) ≈ 0.055634 rtol=1e-3
# @test getobjectivevalue(model) ≈ 0.055634 rtol=1e-3
# @test getobjectivevalue(model) ≈ JuMP.value(z) rtol=1e-3
#
# JuMP.@constraint(model, x ≥ 0.6)
#
# JuMP.optimize!(model)
#
# @test JuMP.termination_status(model) == MOI.OPTIMAL
# @test JuMP.value(x) ≈ 0.6 rtol=1e-4
# @test JuMP.value(y) ≈ 0.5 rtol=1e-4
Expand Down
Loading