diff --git a/src/jump.jl b/src/jump.jl index 36ef1da..d172222 100644 --- a/src/jump.jl +++ b/src/jump.jl @@ -10,7 +10,7 @@ function initPWL!(m::JuMP.Model) if !haskey(m.ext, :PWL) m.ext[:PWL] = PWLData() end - nothing + return nothing end const VarOrAff = Union{JuMP.Variable,JuMP.AffExpr} @@ -18,7 +18,7 @@ const VarOrAff = Union{JuMP.Variable,JuMP.AffExpr} function piecewiselinear(m::JuMP.Model, x::VarOrAff, d, f::Function; method=defaultmethod()) initPWL!(m) fd = [f(xx) for xx in d] - piecewiselinear(m, x, d, fd; method=method) + return piecewiselinear(m, x, d, fd; method=method) end piecewiselinear(m::JuMP.Model, x::VarOrAff, d, fd; method=defaultmethod()) = @@ -36,8 +36,9 @@ function piecewiselinear(m::JuMP.Model, x::VarOrAff, pwl::UnivariatePWLFunction; if n != length(fd) error("You provided a different number of breakpoints ($n) and function values at the breakpoints ($(length(fd)))") end - - n == 0 && error("I don't know how to handle a piecewise linear function with no breakpoints") + if n == 0 + error("I don't know how to handle a piecewise linear function with no breakpoints") + end z = JuMP.@variable(m, lowerbound=minimum(fd), upperbound=maximum(fd), basename="z_$counter") @@ -82,15 +83,17 @@ function piecewiselinear(m::JuMP.Model, x::VarOrAff, pwl::UnivariatePWLFunction; for j in 1:r JuMP.@constraint(m, sum((γ[i,i]+γ[i+1,i])*H[i][j] for i in 1:(n-1)) == y[j]) end - else # V-formulation method + else + # V-formulation methods λ = JuMP.@variable(m, [1:n], lowerbound=0, upperbound=1, basename="λ_$counter") JuMP.@constraint(m, sum(λ) == 1) JuMP.@constraint(m, sum(λ[i]* d[i] for i in 1:n) == x) JuMP.@constraint(m, sum(λ[i]*fd[i] for i in 1:n) == z) + if method in (:Logarithmic,:Log) - sos2_logarthmic_formulation!(m, λ) + sos2_logarithmic_formulation!(m, λ) elseif method in (:LogarithmicIB,:LogIB) - sos2_logarthmic_IB_formulation!(m, λ) + sos2_logarithmic_IB_formulation!(m, λ) elseif method == :CC sos2_cc_formulation!(m, λ) elseif method in (:ZigZag,:ZZB) @@ -107,7 +110,8 @@ function piecewiselinear(m::JuMP.Model, x::VarOrAff, pwl::UnivariatePWLFunction; error("Unrecognized method $method") end end - z + + return z end function sos2_cc_formulation!(m::JuMP.Model, λ) @@ -120,33 +124,32 @@ function sos2_cc_formulation!(m::JuMP.Model, λ) JuMP.@constraint(m, λ[i] ≤ y[i-1] + y[i]) end JuMP.@constraint(m, λ[n] ≤ y[n-1]) - nothing -end - -function sos2_mc_formulation!(m::JuMP.Model, λ) - counter = m.ext[:PWL].counter - n = length(λ) - γ = JuMP.@variable(m, [1:n-1, 1:n], basename="γ_$counter") - y = JuMP.@variable(m, [1:n-1], Bin, basename="y_$counter") - JuMP.@constraint(m, sum(y) == 1) - JuMP.@constraint(m, sum(γ[i,:] for i in 1:n-1) .== λ) - for i in 1:n-1 - JuMP.@constraint(m, γ[i,i] + γ[i,i+1] ≥ y[i]) - end - nothing + return nothing end -function sos2_logarthmic_formulation!(m::JuMP.Model, λ) +# function sos2_mc_formulation!(m::JuMP.Model, λ) # not currently used +# counter = m.ext[:PWL].counter +# n = length(λ) +# γ = JuMP.@variable(m, [1:n-1, 1:n], basename="γ_$counter") +# y = JuMP.@variable(m, [1:n-1], Bin, basename="y_$counter") +# JuMP.@constraint(m, sum(y) == 1) +# JuMP.@constraint(m, sum(γ[i,:] for i in 1:n-1) .== λ) +# for i in 1:n-1 +# JuMP.@constraint(m, γ[i,i] + γ[i,i+1] ≥ y[i]) +# end +# return nothing +# end + +function sos2_logarithmic_formulation!(m::JuMP.Model, λ) counter = m.ext[:PWL].counter n = length(λ)-1 k = ceil(Int,log2(n)) y = JuMP.@variable(m, [1:k], Bin, basename="y_$counter") - sos2_encoding_constraints!(m, λ, y, reflected_gray_codes(k), unit_vector_hyperplanes(k)) - nothing + return nothing end -function sos2_logarthmic_IB_formulation!(m::JuMP.Model, λ) +function sos2_logarithmic_IB_formulation!(m::JuMP.Model, λ) # IB: independent branching counter = m.ext[:PWL].counter n = length(λ)-1 k = ceil(Int,log2(n)) @@ -162,7 +165,7 @@ function sos2_logarthmic_IB_formulation!(m::JuMP.Model, λ) sum(λ[i] for i in 1:(n+1) if H[i-1][j] == H[i][j] == 0) ≤ 1 - y[j] end) end - nothing + return nothing end function sos2_zigzag_formulation!(m::JuMP.Model, λ) @@ -170,9 +173,8 @@ function sos2_zigzag_formulation!(m::JuMP.Model, λ) n = length(λ)-1 k = ceil(Int,log2(n)) y = JuMP.@variable(m, [1:k], Bin, basename="y_$counter") - sos2_encoding_constraints!(m, λ, y, zigzag_codes(k), zigzag_hyperplanes(k)) - nothing + return nothing end function sos2_zigzag_general_integer_formulation!(m::JuMP.Model, λ) @@ -181,37 +183,32 @@ function sos2_zigzag_general_integer_formulation!(m::JuMP.Model, λ) k = ceil(Int,log2(n)) # TODO: tighter upperbounds y = JuMP.@variable(m, [i=1:k], Int, lowerbound=0, upperbound=2^(k-i), basename="y_$counter") - sos2_encoding_constraints!(m, λ, y, integer_zigzag_codes(k), unit_vector_hyperplanes(k)) - nothing + return nothing end function sos2_generalized_celaya_formulation!(m::JuMP.Model, λ) counter = m.ext[:PWL].counter n = length(λ)-1 k = ceil(Int,log2(n)) - codes = generalized_celaya_codes(k) lb = [minimum(t[i] for t in codes) for i in 1:k] ub = [maximum(t[i] for t in codes) for i in 1:k] y = JuMP.@variable(m, [i=1:k], Int, lowerbound=lb[i], upperbound=ub[i], basename="y_$counter") - sos2_encoding_constraints!(m, λ, y, codes, generalized_celaya_hyperplanes(k)) - nothing + return nothing end function sos2_symmetric_celaya_formulation!(m::JuMP.Model, λ) counter = m.ext[:PWL].counter n = length(λ)-1 k = ceil(Int,log2(n)) - codes = symmetric_celaya_codes(k) lb = [minimum(t[i] for t in codes) for i in 1:k] ub = [maximum(t[i] for t in codes) for i in 1:k] y = JuMP.@variable(m, [i=1:k], Int, lowerbound=lb[i], upperbound=ub[i], basename="y_$counter") - sos2_encoding_constraints!(m, λ, y, codes, symmetric_celaya_hyperplanes(k)) - nothing + return nothing end function sos2_encoding_constraints!(m, λ, y, h, B) @@ -222,48 +219,44 @@ function sos2_encoding_constraints!(m, λ, y, h, B) dot(b,h[1])*λ[1] + sum(max(dot(b,h[v]),dot(b,h[v-1]))*λ[v] for v in 2:n) + dot(b,h[n])*λ[n+1] ≥ dot(b,y) end) end - nothing + return nothing end function reflected_gray_codes(k::Int) if k == 0 - codes = Vector{Int}[] + return Vector{Int}[] elseif k == 1 - codes = [[0],[1]] + return [[0],[1]] else codes′ = reflected_gray_codes(k-1) - codes = vcat([vcat(code,0) for code in codes′], - [vcat(code,1) for code in reverse(codes′)]) - return codes + return vcat([vcat(code,0) for code in codes′], + [vcat(code,1) for code in reverse(codes′)]) end - codes end function zigzag_codes(k::Int) if k <= 0 error("Invalid code length $k") elseif k == 1 - codes = [[0],[1]] + return [[0],[1]] else codes′ = zigzag_codes(k-1) - codes = vcat([vcat(code,0) for code in codes′], - [vcat(code,1) for code in codes′]) + return vcat([vcat(code,0) for code in codes′], + [vcat(code,1) for code in codes′]) end - codes end function integer_zigzag_codes(k::Int) if k <= 0 error("Invalid code length $k") elseif k == 1 - codes = [[0],[1]] + return [[0],[1]] else codes′ = integer_zigzag_codes(k-1) offset = [2^(j-2) for j in k:-1:2] - codes = vcat([vcat(code, 0) for code in codes′], - [vcat(code.+offset,1) for code in codes′]) + return vcat([vcat(code, 0) for code in codes′], + [vcat(code.+offset,1) for code in codes′]) end - codes end function generalized_celaya_codes(k::Int) @@ -312,7 +305,7 @@ function zigzag_hyperplanes(k::Int) end push!(hps, hp) end - hps + return hps end function unit_vector_hyperplanes(k::Int) @@ -322,18 +315,12 @@ function unit_vector_hyperplanes(k::Int) hp[i] = 1 push!(hps, hp) end - hps + return hps end -function generalized_celaya_hyperplanes(k::Int) - C = generalized_celaya_codes(k) - compute_hyperplanes(C) -end +generalized_celaya_hyperplanes(k::Int) = compute_hyperplanes(generalized_celaya_codes(k)) -function symmetric_celaya_hyperplanes(k::Int) - C = symmetric_celaya_codes(k) - compute_hyperplanes(C) -end +symmetric_celaya_hyperplanes(k::Int) = compute_hyperplanes(symmetric_celaya_codes(k)) function compute_hyperplanes{T}(C::Vector{Vector{T}}) n = length(C) @@ -358,8 +345,12 @@ function compute_hyperplanes{T}(C::Vector{Vector{T}}) approx12 = isapprox(spanners[1],spanners[2]) approx13 = isapprox(spanners[1],spanners[3]) approx23 = isapprox(spanners[2],spanners[3]) - approx12 || push!(indices,2) - approx13 || (!approx12 && approx23) || push!(indices,3) + if !approx12 + push!(indices,2) + end + if !approx13 && !(!approx12 && approx23) + push!(indices,3) + end return spanners[indices] end @@ -391,6 +382,7 @@ function compute_hyperplanes{T}(C::Vector{Vector{T}}) end end end + keepers = [1] for i in 2:length(spanners) alreadyin = false @@ -400,9 +392,12 @@ function compute_hyperplanes{T}(C::Vector{Vector{T}}) break end end - alreadyin || push!(keepers, i) + if !alreadyin + push!(keepers, i) + end end - spanners[keepers] + + return spanners[keepers] end function canonical!(v::Vector{Float64}) @@ -420,10 +415,10 @@ function canonical!(v::Vector{Float64}) v[j] *= sgn end end - v + return v end -function optimal_IB_scheme!(m::JuMP.Model, λ, pwl, subsolver) +function optimal_IB_scheme!(m::JuMP.Model, λ, pwl, subsolver) # IB: independent branching m.ext[:OptimalIB] = Int[] if !haskey(m.ext, :OptimalIBCache) @@ -445,17 +440,20 @@ function optimal_IB_scheme!(m::JuMP.Model, λ, pwl, subsolver) t = size(xx,1) else t = ceil(Int, log2(length(T))) + xx = Array{Float64,2}(0,0) + yy = Array{Float64,2}(0,0) - xx, yy = Array(Float64,0,0), Array(Float64,0,0) while true model = JuMP.Model(solver=subsolver) JuMP.@variable(model, x[1:t,1:n], Bin) JuMP.@variable(model, y[1:t,1:n], Bin) - JuMP.@variable(model, z[1:t,1:n,1:n],Bin) + JuMP.@variable(model, z[1:t,1:n,1:n], Bin) for j in 1:t for r in J, s in J - r >= s && continue + if r >= s + continue + end JuMP.@constraints(model, begin z[j,r,s] <= x[j,r] + x[j,s] z[j,r,s] <= x[j,r] + y[j,r] @@ -471,7 +469,9 @@ function optimal_IB_scheme!(m::JuMP.Model, λ, pwl, subsolver) end for r in J, s in J - r >= s && continue + if r >= s + continue + end if E[r,s] JuMP.@constraint(model, sum(z[j,r,s] for j in 1:t) == 0) else @@ -510,7 +510,8 @@ function optimal_IB_scheme!(m::JuMP.Model, λ, pwl, subsolver) end) end push!(m.ext[:OptimalIB], t) - nothing + + return nothing end piecewiselinear(m::JuMP.Model, x::VarOrAff, y::VarOrAff, dˣ, dʸ, f::Function; method=defaultmethod()) = @@ -578,11 +579,9 @@ function piecewiselinear(m::JuMP.Model, x₁::VarOrAff, x₂::VarOrAff, pwl::Biv p³[1] p³[2] 1] b = [0,0,1] q = A \ b - @assert isapprox(q[1]*p¹[1] + q[2]*p¹[2] + q[3], 0, atol=1e-4) @assert isapprox(q[1]*p²[1] + q[2]*p²[2] + q[3], 0, atol=1e-4) @assert isapprox(q[1]*p³[1] + q[2]*p³[2] + q[3], 1, atol=1e-4) - JuMP.@constraint(m, q[1]*x̂₁[t] + q[2]*x̂₂[t] + q[3]*y[t] ≥ 0) end A = [r¹[1] r¹[2] 1 @@ -590,13 +589,11 @@ function piecewiselinear(m::JuMP.Model, x₁::VarOrAff, x₂::VarOrAff, pwl::Biv r³[1] r³[2] 1] b = [fz¹, fz², fz³] q = A \ b - @assert isapprox(q[1]*r¹[1] + q[2]*r¹[2] + q[3], fz¹, atol=1e-4) @assert isapprox(q[1]*r²[1] + q[2]*r²[2] + q[3], fz², atol=1e-4) @assert isapprox(q[1]*r³[1] + q[2]*r³[2] + q[3], fz³, atol=1e-4) JuMP.@constraint(m, ẑ[t] == q[1]*x̂₁[t] + q[2]*x̂₂[t] + q[3]*y[t]) end - return z elseif method in (:DisaggLogarithmic,:DLog) T = pwl.T X = pwl.x @@ -611,332 +608,339 @@ function piecewiselinear(m::JuMP.Model, x₁::VarOrAff, x₂::VarOrAff, pwl::Biv JuMP.@constraint(m, sum(sum(γ[t,i] for t in Tv[i]) * X[i][1] for i in 1:n) == x₁) JuMP.@constraint(m, sum(sum(γ[t,i] for t in Tv[i]) * X[i][2] for i in 1:n) == x₂) JuMP.@constraint(m, sum(sum(γ[t,i] for t in Tv[i]) * Z[i] for i in 1:n) == z) - r = ceil(Int, log2(length(T))) H = reflected_gray_codes(r) y = JuMP.@variable(m, [1:r], Bin, basename="y_$counter") for j in 1:r JuMP.@constraint(m, sum(sum(γ[T[i],v] for v in T[i])*H[i][j] for i in 1:length(T)) == y[j]) end - return z - end - - λ = JuMP.@variable(m, [1:nˣ,1:nʸ], lowerbound=0, upperbound=1, basename="λ_$counter") - JuMP.@constraint(m, sum(λ) == 1) - JuMP.@constraint(m, sum(λ[i,j]*uˣ[i] for i in 1:nˣ, j in 1:nʸ) == x₁) - JuMP.@constraint(m, sum(λ[i,j]*uʸ[j] for i in 1:nˣ, j in 1:nʸ) == x₂) - JuMP.@constraint(m, sum(λ[i,j]*fd[i,j] for i in 1:nˣ, j in 1:nʸ) == z) - - if method == :CC - T = pwl.T - y = JuMP.@variable(m, [T], Bin, basename="y_$counter") - JuMP.@constraint(m, sum(y) == 1) - - Ts = Dict((i,j) => Vector{Int}[] for i in 1:nˣ, j in 1:nʸ) - for t in T, ind in t - rˣ, rʸ = pwl.x[ind] - i, j = ˣtoⁱ[rˣ], ʸtoʲ[rʸ] - push!(Ts[(i,j)], t) - end - for i in 1:nˣ, j in 1:nʸ - JuMP.@constraint(m, λ[i,j] ≤ sum(y[t] for t in Ts[(i,j)])) - end - return z - elseif method == :Incremental - error("Incremental formulation for bivariate functions is not currently implemented.") - # TODO: Implement algorithm of Geissler et al. (2012) - elseif method == :OptimalIB - optimal_IB_scheme!(m, λ, pwl, subsolver) - return z - else # formulations with SOS2 along each dimension - Tx = [sum(λ[tˣ,tʸ] for tˣ in 1:nˣ) for tʸ in 1:nʸ] - Ty = [sum(λ[tˣ,tʸ] for tʸ in 1:nʸ) for tˣ in 1:nˣ] - if method in (:Logarithmic,:Log) - sos2_logarthmic_formulation!(m, Tx) - sos2_logarthmic_formulation!(m, Ty) - elseif method in (:LogarithmicIB,:LogIB) - sos2_logarthmic_IB_formulation!(m, Tx) - sos2_logarthmic_IB_formulation!(m, Ty) - elseif method in (:ZigZag,:ZZB) - sos2_zigzag_formulation!(m, Tx) - sos2_zigzag_formulation!(m, Ty) - elseif method in (:ZigZagInteger,:ZZI) - sos2_zigzag_general_integer_formulation!(m, Tx) - sos2_zigzag_general_integer_formulation!(m, Ty) - elseif method == :GeneralizedCelaya - sos2_generalized_celaya_formulation!(m, Tx) - sos2_generalized_celaya_formulation!(m, Ty) - elseif method == :SymmetricCelaya - sos2_symmetric_celaya_formulation!(m, Tx) - sos2_symmetric_celaya_formulation!(m, Ty) - elseif method == :SOS2 - γˣ = JuMP.@variable(m, [1:nˣ], lowerbound=0, upperbound=1, basename="γˣ_$counter") - γʸ = JuMP.@variable(m, [1:nʸ], lowerbound=0, upperbound=1, basename="γʸ_$counter") - JuMP.@constraint(m, [tˣ in 1:nˣ], γˣ[tˣ] == sum(λ[tˣ,tʸ] for tʸ in 1:nʸ)) - JuMP.@constraint(m, [tʸ in 1:nʸ], γʸ[tʸ] == sum(λ[tˣ,tʸ] for tˣ in 1:nˣ)) - JuMP.addSOS2(m, [i*γˣ[i] for i in 1:nˣ]) - JuMP.addSOS2(m, [i*γʸ[i] for i in 1:nʸ]) + else + # V-formulation methods + λ = JuMP.@variable(m, [1:nˣ,1:nʸ], lowerbound=0, upperbound=1, basename="λ_$counter") + JuMP.@constraint(m, sum(λ) == 1) + JuMP.@constraint(m, sum(λ[i,j]*uˣ[i] for i in 1:nˣ, j in 1:nʸ) == x₁) + JuMP.@constraint(m, sum(λ[i,j]*uʸ[j] for i in 1:nˣ, j in 1:nʸ) == x₂) + JuMP.@constraint(m, sum(λ[i,j]*fd[i,j] for i in 1:nˣ, j in 1:nʸ) == z) + + if method == :CC + T = pwl.T + y = JuMP.@variable(m, [T], Bin, basename="y_$counter") + JuMP.@constraint(m, sum(y) == 1) + Ts = Dict((i,j) => Vector{Int}[] for i in 1:nˣ, j in 1:nʸ) + for t in T, ind in t + rˣ, rʸ = pwl.x[ind] + i, j = ˣtoⁱ[rˣ], ʸtoʲ[rʸ] + push!(Ts[(i,j)], t) + end + for i in 1:nˣ, j in 1:nʸ + JuMP.@constraint(m, λ[i,j] ≤ sum(y[t] for t in Ts[(i,j)])) + end + elseif method == :Incremental + error("Incremental formulation for bivariate functions is not currently implemented.") + # TODO: implement algorithm of Geissler et al. (2012) + elseif method == :OptimalIB + optimal_IB_scheme!(m, λ, pwl, subsolver) else - error("Unrecognized method $method") - end - - pattern = pwl.meta[:structure] - if pattern == :UnionJack - numT = 0 - minˣ, minʸ = uˣ[1], uʸ[1] - # find the index of the bottom-left point - idx = findfirst(pwl.x) do w; w == (minˣ, minʸ); end - for t in pwl.T - if idx in t - numT += 1 - end + # formulations with SOS2 along each dimension + Tx = [sum(λ[tˣ,tʸ] for tˣ in 1:nˣ) for tʸ in 1:nʸ] + Ty = [sum(λ[tˣ,tʸ] for tʸ in 1:nʸ) for tˣ in 1:nˣ] + + if method in (:Logarithmic,:Log) + sos2_logarithmic_formulation!(m, Tx) + sos2_logarithmic_formulation!(m, Ty) + elseif method in (:LogarithmicIB,:LogIB) + sos2_logarithmic_IB_formulation!(m, Tx) + sos2_logarithmic_IB_formulation!(m, Ty) + elseif method in (:ZigZag,:ZZB) + sos2_zigzag_formulation!(m, Tx) + sos2_zigzag_formulation!(m, Ty) + elseif method in (:ZigZagInteger,:ZZI) + sos2_zigzag_general_integer_formulation!(m, Tx) + sos2_zigzag_general_integer_formulation!(m, Ty) + elseif method == :GeneralizedCelaya + sos2_generalized_celaya_formulation!(m, Tx) + sos2_generalized_celaya_formulation!(m, Ty) + elseif method == :SymmetricCelaya + sos2_symmetric_celaya_formulation!(m, Tx) + sos2_symmetric_celaya_formulation!(m, Ty) + elseif method == :SOS2 + γˣ = JuMP.@variable(m, [1:nˣ], lowerbound=0, upperbound=1, basename="γˣ_$counter") + γʸ = JuMP.@variable(m, [1:nʸ], lowerbound=0, upperbound=1, basename="γʸ_$counter") + JuMP.@constraint(m, [tˣ in 1:nˣ], γˣ[tˣ] == sum(λ[tˣ,tʸ] for tʸ in 1:nʸ)) + JuMP.@constraint(m, [tʸ in 1:nʸ], γʸ[tʸ] == sum(λ[tˣ,tʸ] for tˣ in 1:nˣ)) + JuMP.addSOS2(m, [i*γˣ[i] for i in 1:nˣ]) + JuMP.addSOS2(m, [i*γʸ[i] for i in 1:nʸ]) + else + error("Unrecognized method $method") end - @assert 1 <= numT <= 2 - w = JuMP.@variable(m, category=:Bin, basename="w_$counter") - # If numT==1, then bottom-left is contained in only one point, and so needs separating; otherwise numT==2, and need to offset by one - JuMP.@constraints(m, begin - sum(λ[tx,ty] for tx in 1:2:nˣ, ty in numT :2:nʸ) ≤ w - sum(λ[tx,ty] for tx in 2:2:nˣ, ty in (3-numT):2:nʸ) ≤ 1 - w - end) - elseif pattern == :K1 - # Assumption is that the triangulation is uniform, as defined in types.jl - w = JuMP.@variable(m, [1:2], Bin, basename="w_$counter") - JuMP.@constraints(m, begin - sum(λ[tx,ty] for tx in 1:nˣ, ty in 1:nʸ if mod(tx,2) == mod(ty,2) && (tx+ty) in 2:4:(nˣ+nʸ)) ≤ w[1] - sum(λ[tx,ty] for tx in 1:nˣ, ty in 1:nʸ if mod(tx,2) == mod(ty,2) && (tx+ty) in 4:4:(nˣ+nʸ)) ≤ 1 - w[1] - sum(λ[tx,ty] for tx in 1:nˣ, ty in 1:nʸ if mod(tx,2) != mod(ty,2) && (tx+ty) in 3:4:(nˣ+nʸ)) ≤ w[2] - sum(λ[tx,ty] for tx in 1:nˣ, ty in 1:nʸ if mod(tx,2) != mod(ty,2) && (tx+ty) in 5:4:(nˣ+nʸ)) ≤ 1 - w[2] - end) - elseif pattern == :OptimalTriangleSelection - m.ext[:OptimalTriSelect] = Int[] + pattern = pwl.meta[:structure] + if pattern == :UnionJack + numT = 0 + minˣ, minʸ = uˣ[1], uʸ[1] + # find the index of the bottom-left point + idx = findfirst(pwl.x) do w; w == (minˣ, minʸ); end + for t in pwl.T + if idx in t + numT += 1 + end + end + @assert 1 <= numT <= 2 - if !haskey(m.ext, :OptimalTriSelectCache) - m.ext[:OptimalTriSelectCache] = Dict() - end + w = JuMP.@variable(m, category=:Bin, basename="w_$counter") + # if numT==1, then bottom-left is contained in only one point, and so needs separating; otherwise numT==2, and need to offset by one + JuMP.@constraints(m, begin + sum(λ[tx,ty] for tx in 1:2:nˣ, ty in numT :2:nʸ) ≤ w + sum(λ[tx,ty] for tx in 2:2:nˣ, ty in (3-numT):2:nʸ) ≤ 1 - w + end) + elseif pattern == :K1 + # assumption is that the triangulation is uniform, as defined in types.jl + w = JuMP.@variable(m, [1:2], Bin, basename="w_$counter") + JuMP.@constraints(m, begin + sum(λ[tx,ty] for tx in 1:nˣ, ty in 1:nʸ if mod(tx,2) == mod(ty,2) && (tx+ty) in 2:4:(nˣ+nʸ)) ≤ w[1] + sum(λ[tx,ty] for tx in 1:nˣ, ty in 1:nʸ if mod(tx,2) == mod(ty,2) && (tx+ty) in 4:4:(nˣ+nʸ)) ≤ 1 - w[1] + sum(λ[tx,ty] for tx in 1:nˣ, ty in 1:nʸ if mod(tx,2) != mod(ty,2) && (tx+ty) in 3:4:(nˣ+nʸ)) ≤ w[2] + sum(λ[tx,ty] for tx in 1:nˣ, ty in 1:nʸ if mod(tx,2) != mod(ty,2) && (tx+ty) in 5:4:(nˣ+nʸ)) ≤ 1 - w[2] + end) + elseif pattern == :OptimalTriangleSelection + m.ext[:OptimalTriSelect] = Int[] - J = [(i,j) for i in 1:nˣ, j in 1:nʸ] - E = Set{Tuple{Tuple{Int,Int},Tuple{Int,Int}}}() - for t in T - @assert length(t) == 3 - IJ = [(ˣtoⁱ[pwl.x[i][1]],ʸtoʲ[pwl.x[i][2]]) for i in t] - im = minimum(ij[1] for ij in IJ) - iM = maximum(ij[1] for ij in IJ) - jm = minimum(ij[2] for ij in IJ) - jM = maximum(ij[2] for ij in IJ) - @assert im < iM - @assert im < iM - if ((im,jm) in IJ) && ((iM,jM) in IJ) - push!(E, ((im,jM),(iM,jm))) - elseif ((im,jM) in IJ) && ((iM,jm) in IJ) - push!(E, ((im,jm),(iM,jM))) - else - error() + if !haskey(m.ext, :OptimalTriSelectCache) + m.ext[:OptimalTriSelectCache] = Dict() end - end - if haskey(m.ext[:OptimalTriSelectCache], E) - xx, yy = m.ext[:OptimalTriSelectCache][E] - t = JuMP.size(xx,1) - else - if subsolver === nothing - error("No MIP solver provided to construct optimal triangle selection. Pass a solver object to the piecewiselinear function, e.g. piecewiselinear(m, x₁, x₂, bivariatefunc, method=:Logarithmic, subsolver=GurobiSolver())") + J = [(i,j) for i in 1:nˣ, j in 1:nʸ] + E = Set{Tuple{Tuple{Int,Int},Tuple{Int,Int}}}() + for t in T + @assert length(t) == 3 + IJ = [(ˣtoⁱ[pwl.x[i][1]],ʸtoʲ[pwl.x[i][2]]) for i in t] + im = minimum(ij[1] for ij in IJ) + iM = maximum(ij[1] for ij in IJ) + jm = minimum(ij[2] for ij in IJ) + jM = maximum(ij[2] for ij in IJ) + @assert im < iM + @assert im < iM + if ((im,jm) in IJ) && ((iM,jM) in IJ) + push!(E, ((im,jM),(iM,jm))) + elseif ((im,jM) in IJ) && ((iM,jm) in IJ) + push!(E, ((im,jm),(iM,jM))) + else + error() + end end - t = 1 - xx, yy = Array(Float64,0,0), Array(Float64,0,0) - while true - subm = JuMP.Model(solver=subsolver) - JuMP.@variable(subm, xˢᵘᵇ[1:t,J], Bin) - JuMP.@variable(subm, yˢᵘᵇ[1:t,J], Bin) - JuMP.@variable(subm, zˢᵘᵇ[1:t,J,J], Bin) - - for j in 1:t + + if haskey(m.ext[:OptimalTriSelectCache], E) + xx, yy = m.ext[:OptimalTriSelectCache][E] + t = JuMP.size(xx,1) + else + if subsolver === nothing + error("No MIP solver provided to construct optimal triangle selection. Pass a solver object to the piecewiselinear function, e.g. piecewiselinear(m, x₁, x₂, bivariatefunc, method=:Logarithmic, subsolver=GurobiSolver())") + end + t = 1 + xx, yy = Array(Float64,0,0), Array(Float64,0,0) + while true + subm = JuMP.Model(solver=subsolver) + JuMP.@variable(subm, xˢᵘᵇ[1:t,J], Bin) + JuMP.@variable(subm, yˢᵘᵇ[1:t,J], Bin) + JuMP.@variable(subm, zˢᵘᵇ[1:t,J,J], Bin) + + for j in 1:t + for r in J, s in J + # lexicographic ordering on points on grid + if r[1] > s[1] || (r[1] == s[1] && r[2] >= s[2]) + continue + end + JuMP.@constraints(subm, begin + zˢᵘᵇ[j,r,s] <= xˢᵘᵇ[j,r] + xˢᵘᵇ[j,s] + zˢᵘᵇ[j,r,s] <= xˢᵘᵇ[j,r] + yˢᵘᵇ[j,r] + zˢᵘᵇ[j,r,s] <= xˢᵘᵇ[j,s] + yˢᵘᵇ[j,s] + zˢᵘᵇ[j,r,s] <= yˢᵘᵇ[j,r] + yˢᵘᵇ[j,s] + zˢᵘᵇ[j,r,s] >= xˢᵘᵇ[j,r] + yˢᵘᵇ[j,s] - 1 + zˢᵘᵇ[j,r,s] >= xˢᵘᵇ[j,s] + yˢᵘᵇ[j,r] - 1 + end) + end + for r in J + JuMP.@constraint(subm, xˢᵘᵇ[j,r] + yˢᵘᵇ[j,r] <= 1) + end + end + for r in J, s in J - # lexiographic ordering on points on grid + # lexicographic ordering on points on grid (r[1] > s[1] || (r[1] == s[1] && r[2] >= s[2])) && continue - JuMP.@constraints(subm, begin - zˢᵘᵇ[j,r,s] <= xˢᵘᵇ[j,r] + xˢᵘᵇ[j,s] - zˢᵘᵇ[j,r,s] <= xˢᵘᵇ[j,r] + yˢᵘᵇ[j,r] - zˢᵘᵇ[j,r,s] <= xˢᵘᵇ[j,s] + yˢᵘᵇ[j,s] - zˢᵘᵇ[j,r,s] <= yˢᵘᵇ[j,r] + yˢᵘᵇ[j,s] - zˢᵘᵇ[j,r,s] >= xˢᵘᵇ[j,r] + yˢᵘᵇ[j,s] - 1 - zˢᵘᵇ[j,r,s] >= xˢᵘᵇ[j,s] + yˢᵘᵇ[j,r] - 1 - end) - end - for r in J - JuMP.@constraint(subm, xˢᵘᵇ[j,r] + yˢᵘᵇ[j,r] <= 1) + if (r,s) in E + JuMP.@constraint(subm, sum(zˢᵘᵇ[j,r,s] for j in 1:t) >= 1) + elseif max(abs(r[1]-s[1]), abs(r[2]-s[2])) == 1 + JuMP.@constraint(subm, sum(zˢᵘᵇ[j,r,s] for j in 1:t) == 0) + end end - end - for r in J, s in J - # lexiographic ordering on points on grid - (r[1] > s[1] || (r[1] == s[1] && r[2] >= s[2])) && continue - if (r,s) in E - JuMP.@constraint(subm, sum(zˢᵘᵇ[j,r,s] for j in 1:t) >= 1) - elseif max(abs(r[1]-s[1]), abs(r[2]-s[2])) == 1 - JuMP.@constraint(subm, sum(zˢᵘᵇ[j,r,s] for j in 1:t) == 0) + JuMP.@objective(subm, Min, sum(xˢᵘᵇ) + sum(yˢᵘᵇ)) + stat = JuMP.solve(subm) + if any(isnan, subm.colVal) + t += 1 + else + xx = JuMP.getvalue(xˢᵘᵇ) + yy = JuMP.getvalue(yˢᵘᵇ) + m.ext[:OptimalTriSelectCache][E] = (xx,yy) + break end end + end + y = JuMP.@variable(m, [1:t], Bin, basename="Δselect_$counter") - JuMP.@objective(subm, Min, sum(xˢᵘᵇ) + sum(yˢᵘᵇ)) - stat = JuMP.solve(subm) - if any(isnan, subm.colVal) - t += 1 - else - xx = JuMP.getvalue(xˢᵘᵇ) - yy = JuMP.getvalue(yˢᵘᵇ) - m.ext[:OptimalTriSelectCache][E] = (xx,yy) - break + for i in 1:t + JuMP.@constraints(m, begin + sum(λ[v[1],v[2]] for v in J if xx[i,v] ≈ 1) ≤ y[i] + sum(λ[v[1],v[2]] for v in J if yy[i,v] ≈ 1) ≤ 1 - y[i] + end) + end + push!(m.ext[:OptimalTriSelect], t) + elseif pattern in (:Stencil,:Stencil9) + w = JuMP.@variable(m, [1:3,1:3], Bin, basename="w_$counter") + for oˣ in 1:3, oʸ in 1:3 + innoT = fill(true, nˣ, nʸ) + for (i,j,k) in pwl.T + xⁱ, xʲ, xᵏ = pwl.x[i], pwl.x[j], pwl.x[k] + iiˣ, iiʸ = ˣtoⁱ[xⁱ[1]], ʸtoʲ[xⁱ[2]] + jjˣ, jjʸ = ˣtoⁱ[xʲ[1]], ʸtoʲ[xʲ[2]] + kkˣ, kkʸ = ˣtoⁱ[xᵏ[1]], ʸtoʲ[xᵏ[2]] + # check to see if one of the points in the triangle falls on the grid + if (mod1(iiˣ,3) == oˣ && mod1(iiʸ,3) == oʸ) || (mod1(jjˣ,3) == oˣ && mod1(jjʸ,3) == oʸ) || (mod1(kkˣ,3) == oˣ && mod1(kkʸ,3) == oʸ) + innoT[iiˣ,iiʸ] = false + innoT[jjˣ,jjʸ] = false + innoT[kkˣ,kkʸ] = false + end end + JuMP.@constraints(m, begin + sum(λ[i,j] for i in oˣ:3:nˣ, j in oʸ:3:nʸ) ≤ 1 - w[oˣ,oʸ] + sum(λ[i,j] for i in 1:nˣ, j in 1:nʸ if innoT[i,j]) ≤ w[oˣ,oʸ] + end) end - end - y = JuMP.@variable(m, [1:t], Bin, basename="Δselect_$counter") - - for i in 1:t - JuMP.@constraints(m, begin - sum(λ[v[1],v[2]] for v in J if xx[i,v] ≈ 1) ≤ y[i] - sum(λ[v[1],v[2]] for v in J if yy[i,v] ≈ 1) ≤ 1 - y[i] - end) - end - push!(m.ext[:OptimalTriSelect], t) - - z - elseif pattern in (:Stencil,:Stencil9) - w = JuMP.@variable(m, [1:3,1:3], Bin, basename="w_$counter") - for oˣ in 1:3, oʸ in 1:3 - innoT = fill(true, nˣ, nʸ) + else + @assert pattern in (:Upper, :Lower, :BestFit, :Random) + # Eⁿᵉ[i,j] = true means that we must cover the edge {(i,j),(i+1,j+1)} + Eⁿᵉ = fill(false, nˣ-1, nʸ-1) for (i,j,k) in pwl.T xⁱ, xʲ, xᵏ = pwl.x[i], pwl.x[j], pwl.x[k] iiˣ, iiʸ = ˣtoⁱ[xⁱ[1]], ʸtoʲ[xⁱ[2]] jjˣ, jjʸ = ˣtoⁱ[xʲ[1]], ʸtoʲ[xʲ[2]] kkˣ, kkʸ = ˣtoⁱ[xᵏ[1]], ʸtoʲ[xᵏ[2]] - # check to see if one of the points in the triangle falls on the grid - if (mod1(iiˣ,3) == oˣ && mod1(iiʸ,3) == oʸ) || (mod1(jjˣ,3) == oˣ && mod1(jjʸ,3) == oʸ) || (mod1(kkˣ,3) == oˣ && mod1(kkʸ,3) == oʸ) - innoT[iiˣ,iiʸ] = false - innoT[jjˣ,jjʸ] = false - innoT[kkˣ,kkʸ] = false + IJ = [(iiˣ,iiʸ), (jjˣ,jjʸ), (kkˣ,kkʸ)] + im = min(iiˣ, jjˣ, kkˣ) + iM = max(iiˣ, jjˣ, kkˣ) + jm = min(iiʸ, jjʸ, kkʸ) + jM = max(iiʸ, jjʸ, kkʸ) + if ((im,jM) in IJ) && ((iM,jm) in IJ) + Eⁿᵉ[im,jm] = true + else + @assert (im,jm) in IJ && (iM,jM) in IJ end end - JuMP.@constraints(m, begin - sum(λ[i,j] for i in oˣ:3:nˣ, j in oʸ:3:nʸ) ≤ 1 - w[oˣ,oʸ] - sum(λ[i,j] for i in 1:nˣ, j in 1:nʸ if innoT[i,j]) ≤ w[oˣ,oʸ] - end) - end - else - # Eⁿᵉ[i,j] = true means that we must cover the edge {(i,j),(i+1,j+1)} - Eⁿᵉ = fill(false, nˣ-1, nʸ-1) - for (i,j,k) in pwl.T - xⁱ, xʲ, xᵏ = pwl.x[i], pwl.x[j], pwl.x[k] - iiˣ, iiʸ = ˣtoⁱ[xⁱ[1]], ʸtoʲ[xⁱ[2]] - jjˣ, jjʸ = ˣtoⁱ[xʲ[1]], ʸtoʲ[xʲ[2]] - kkˣ, kkʸ = ˣtoⁱ[xᵏ[1]], ʸtoʲ[xᵏ[2]] - IJ = [(iiˣ,iiʸ), (jjˣ,jjʸ), (kkˣ,kkʸ)] - im = min(iiˣ, jjˣ, kkˣ) - iM = max(iiˣ, jjˣ, kkˣ) - jm = min(iiʸ, jjʸ, kkʸ) - jM = max(iiʸ, jjʸ, kkʸ) - if ((im,jM) in IJ) && ((iM,jm) in IJ) - Eⁿᵉ[im,jm] = true - else - @assert (im,jm) in IJ && (iM,jM) in IJ - end - end - # Diagonal lines running from SW to NE. Grouped with an offset of 3. - wⁿᵉ = JuMP.@variable(m, [0:2], Bin, basename="wⁿᵉ_$counter") - for o in 0:2 - Aᵒ = Set{Tuple{Int,Int}}() - Bᵒ = Set{Tuple{Int,Int}}() - for offˣ in o:3:(nˣ-2) - SWinA = true # Whether we put the SW corner of the next - # triangle to cover in set A - for i in (1+offˣ):(nˣ-1) - j = i - offˣ - (1 ≤ i ≤ nˣ-1) || continue - (1 ≤ j ≤ nʸ-1) || continue # should never happen - if Eⁿᵉ[i,j] # if we need to cover the edge... - if SWinA # figure out which set we need to put it in. - # This depends on previous triangle covered - # in our current line - push!(Aᵒ, (i ,j )) - push!(Bᵒ, (i+1,j+1)) - else - push!(Aᵒ, (i+1,j+1)) - push!(Bᵒ, (i ,j )) + # diagonal lines running from SW to NE. Grouped with an offset of 3. + wⁿᵉ = JuMP.@variable(m, [0:2], Bin, basename="wⁿᵉ_$counter") + for o in 0:2 + Aᵒ = Set{Tuple{Int,Int}}() + Bᵒ = Set{Tuple{Int,Int}}() + for offˣ in o:3:(nˣ-2) + SWinA = true # whether we put the SW corner of the next triangle to cover in set A + for i in (1+offˣ):(nˣ-1) + j = i - offˣ + if !(1 ≤ i ≤ nˣ-1) + continue + end + if !(1 ≤ j ≤ nʸ-1) + continue # should never happen + end + if Eⁿᵉ[i,j] # if we need to cover the edge... + if SWinA # figure out which set we need to put it in; this depends on previous triangle in our current line + push!(Aᵒ, (i ,j )) + push!(Bᵒ, (i+1,j+1)) + else + push!(Aᵒ, (i+1,j+1)) + push!(Bᵒ, (i ,j )) + end + SWinA = !SWinA end - SWinA = !SWinA end end - end - for offʸ in (3-o):3:(nʸ-1) - SWinA = true - for j in (offʸ+1):(nʸ-1) - i = j - offʸ - (1 ≤ i ≤ nˣ-1) || continue - if Eⁿᵉ[i,j] - if SWinA - push!(Aᵒ, (i ,j )) - push!(Bᵒ, (i+1,j+1)) - else - push!(Aᵒ, (i+1,j+1)) - push!(Bᵒ, (i ,j )) + for offʸ in (3-o):3:(nʸ-1) + SWinA = true + for j in (offʸ+1):(nʸ-1) + i = j - offʸ + if !(1 ≤ i ≤ nˣ-1) + continue + end + if Eⁿᵉ[i,j] + if SWinA + push!(Aᵒ, (i ,j )) + push!(Bᵒ, (i+1,j+1)) + else + push!(Aᵒ, (i+1,j+1)) + push!(Bᵒ, (i ,j )) + end + SWinA = !SWinA end - SWinA = !SWinA end end + JuMP.@constraints(m, begin + sum(λ[i,j] for (i,j) in Aᵒ) ≤ wⁿᵉ[o] + sum(λ[i,j] for (i,j) in Bᵒ) ≤ 1 - wⁿᵉ[o] + end) end - JuMP.@constraints(m, begin - sum(λ[i,j] for (i,j) in Aᵒ) ≤ wⁿᵉ[o] - sum(λ[i,j] for (i,j) in Bᵒ) ≤ 1 - wⁿᵉ[o] - end) - end - wˢᵉ = JuMP.@variable(m, [0:2], Bin, basename="wˢᵉ_$counter") - for o in 0:2 - Aᵒ = Set{Tuple{Int,Int}}() - Bᵒ = Set{Tuple{Int,Int}}() - for offˣ in o:3:(nˣ-2) - SEinA = true - # for i in (1+offˣ):-1:1 - # j = offˣ - i + 2 - for j in 1:(nʸ-1) - i = nˣ - j - offˣ - (1 ≤ i ≤ nˣ-1) || continue - if !Eⁿᵉ[i,j] - if SEinA - push!(Aᵒ, (i+1,j )) - push!(Bᵒ, (i ,j+1)) - else - push!(Aᵒ, (i ,j+1)) - push!(Bᵒ, (i+1,j )) + wˢᵉ = JuMP.@variable(m, [0:2], Bin, basename="wˢᵉ_$counter") + for o in 0:2 + Aᵒ = Set{Tuple{Int,Int}}() + Bᵒ = Set{Tuple{Int,Int}}() + for offˣ in o:3:(nˣ-2) + SEinA = true + # for i in (1+offˣ):-1:1 + # j = offˣ - i + 2 + for j in 1:(nʸ-1) + i = nˣ - j - offˣ + if !(1 ≤ i ≤ nˣ-1) + continue + end + if !Eⁿᵉ[i,j] + if SEinA + push!(Aᵒ, (i+1,j )) + push!(Bᵒ, (i ,j+1)) + else + push!(Aᵒ, (i ,j+1)) + push!(Bᵒ, (i+1,j )) + end + SEinA = !SEinA end - SEinA = !SEinA end end - end - for offʸ in (3-o):3:(nʸ-1) - SEinA = true - for j in (offʸ+1):(nʸ-1) - i = nˣ - j + offʸ - (1 ≤ i ≤ nˣ-1) || continue - if !Eⁿᵉ[i,j] - if SEinA - push!(Aᵒ, (i+1,j )) - push!(Bᵒ, (i ,j+1)) - else - push!(Aᵒ, (i ,j+1)) - push!(Bᵒ, (i+1,j )) + for offʸ in (3-o):3:(nʸ-1) + SEinA = true + for j in (offʸ+1):(nʸ-1) + i = nˣ - j + offʸ + if !(1 ≤ i ≤ nˣ-1) + continue + end + if !Eⁿᵉ[i,j] + if SEinA + push!(Aᵒ, (i+1,j )) + push!(Bᵒ, (i ,j+1)) + else + push!(Aᵒ, (i ,j+1)) + push!(Bᵒ, (i+1,j )) + end + SEinA = !SEinA end - SEinA = !SEinA end end + JuMP.@constraints(m, begin + sum(λ[i,j] for (i,j) in Aᵒ) ≤ wˢᵉ[o] + sum(λ[i,j] for (i,j) in Bᵒ) ≤ 1 - wˢᵉ[o] + end) end - JuMP.@constraints(m, begin - sum(λ[i,j] for (i,j) in Aᵒ) ≤ wˢᵉ[o] - sum(λ[i,j] for (i,j) in Bᵒ) ≤ 1 - wˢᵉ[o] - end) end end end - z + + return z end diff --git a/src/types.jl b/src/types.jl index 3672969..54a78aa 100644 --- a/src/types.jl +++ b/src/types.jl @@ -4,12 +4,13 @@ type PWLFunction{D} T::Vector{Vector{Int}} meta::Dict end + function PWLFunction{D}(x::Vector{NTuple{D}}, z::Vector, T::Vector{Vector}, meta::Dict) - (n = length(x)) == length(z) || error() + @assert length(x) == length(z) for t in T - (minimum(t) > 0 && maximum(t) <= n) || error() + @assert minimum(t) > 0 && maximum(t) <= length(x) end - PWLFunction{D}(x, z, T, meta) + return PWLFunction{D}(x, z, T, meta) end PWLFunction(x, z, T) = PWLFunction(x, z, T, Dict()) @@ -18,12 +19,12 @@ const UnivariatePWLFunction = PWLFunction{1} function UnivariatePWLFunction(x, z) @assert issorted(x) - PWLFunction(Tuple{Float64}[(xx,) for xx in x], convert(Vector{Float64}, z), [[i,i+1] for i in 1:length(x)-1]) + return PWLFunction(Tuple{Float64}[(xx,) for xx in x], convert(Vector{Float64}, z), [[i,i+1] for i in 1:length(x)-1]) end function UnivariatePWLFunction(x, fz::Function) @assert issorted(x) - PWLFunction(Tuple{Float64}[(xx,) for xx in x], map(t->convert(Float64,fz(t)), x), [[i,i+1] for i in 1:length(x)-1]) + return PWLFunction(Tuple{Float64}[(xx,) for xx in x], map(t->convert(Float64,fz(t)), x), [[i,i+1] for i in 1:length(x)-1]) end const BivariatePWLFunction = PWLFunction{2} @@ -98,10 +99,12 @@ function BivariatePWLFunction(x, y, fz::Function; pattern=:BestFit, seed=hash((l t2 = [NEt,SEt,SWt] end else - error() + error("pattern $pattern not currently supported") end + push!(T, t1) push!(T, t2) end - PWLFunction{2}(X, Z, T, Dict(:structure=>pattern)) + + return PWLFunction{2}(X, Z, T, Dict(:structure=>pattern)) end diff --git a/test/REQUIRE b/test/REQUIRE index 450436c..eb40bf8 100644 --- a/test/REQUIRE +++ b/test/REQUIRE @@ -1,2 +1 @@ Cbc -HDF5 diff --git a/test/exhaustive-tests.jl b/test/exhaustive-tests.jl index 2465b63..0238131 100644 --- a/test/exhaustive-tests.jl +++ b/test/exhaustive-tests.jl @@ -1,110 +1,92 @@ -using PiecewiseLinearOpt -using Base.Test -using JuMP, Cbc -const solver = CbcSolver() +# using Cbc # slow, not recommended +# solver = CbcSolver(logLevel=0, integerTolerance=1e-9, primalTolerance=1e-9, ratioGap=1e-8) -methods_1D = (:CC,:MC,:Logarithmic,:ZigZag,:ZigZagInteger,:SOS2,:GeneralizedCelaya,:SymmetricCelaya,:Incremental) -methods_2D = (:CC,:MC,:Logarithmic,:ZigZag,:ZigZagInteger,:SOS2,:GeneralizedCelaya,:SymmetricCelaya) +using Gurobi +solver = GurobiSolver(OutputFlag=0) -using HDF5 +# using CPLEX +# solver = CplexSolver(CPX_PARAM_SCRIND=0) -const instance_data_1D = joinpath(dirname(@__FILE__),"1D-pwl-instances.h5") -const instance_data_2D = joinpath(dirname(@__FILE__),"2D-pwl-instances.h5") +using JuMP +using PiecewiseLinearOpt +using Base.Test +using HDF5 -for instance in ["10104_1_concave_1"] - objs = Dict() +methods_1D = (:CC,:MC,:Logarithmic,:LogarithmicIB,:ZigZag,:ZigZagInteger,:SOS2,:GeneralizedCelaya,:SymmetricCelaya,:Incremental,:DisaggLogarithmic) +methods_2D = (:CC,:MC,:Logarithmic,:LogarithmicIB,:ZigZag,:ZigZagInteger,:SOS2,:GeneralizedCelaya,:SymmetricCelaya,:DisaggLogarithmic) +# methods_2D = (:OptimalIB,) # very slow on all solvers +patterns_2D = (:Upper,:Lower,:UnionJack,:K1,:Random) # not :BestFit because requires function values at midpoints, :OptimalTriangleSelection and :Stencil not supported currently +# tests on network flow model with piecewise-linear objective +# instance data loaded from .h5 files +instance_data_1D = joinpath(dirname(@__FILE__),"1D-pwl-instances.h5") +instance_data_2D = joinpath(dirname(@__FILE__),"2D-pwl-instances.h5") + +println("\nunivariate tests") +@testset "instance $instance" for instance in ["10104_1_concave_1"] demand = h5read(instance_data_1D, string(instance,"/demand")) supply = h5read(instance_data_1D, string(instance,"/supply")) - numdem = size(demand, 1) - numsup = size(supply, 1) + ndem = size(demand, 1) + nsup = size(supply, 1) - d = h5read(instance_data_1D, string(instance,"/d")) - fd = h5read(instance_data_1D, string(instance,"/fd")) - K = size(d, 2) + rawd = h5read(instance_data_1D, string(instance,"/d")) + d = rawd[1,:] + rawfd = h5read(instance_data_1D, string(instance,"/fd")) + fd = rawfd[1,:] - for method in methods_1D + objval1 = NaN + @testset "1D: $method" for method in methods_1D model = Model(solver=solver) - @variable(model, x[1:numsup,1:numdem] ≥ 0) - for j in 1:numdem - # demand constraint - @constraint(model, sum(x[i,j] for i in 1:numsup) == demand[j]) - end - for i in 1:numsup - # supply constraint - @constraint(model, sum(x[i,j] for j in 1:numdem) == supply[i]) + @variable(model, x[1:nsup,1:ndem] ≥ 0) + @constraint(model, [j in 1:ndem], sum(x[i,j] for i in 1:nsup) == demand[j]) + @constraint(model, [i in 1:nsup], sum(x[i,j] for j in 1:ndem) == supply[i]) + @objective(model, Min, sum(piecewiselinear(model, x[i,j], d, fd, method=method) for i in 1:nsup, j in 1:ndem)) + + @test solve(model) == :Optimal + if isnan(objval1) + objval1 = getobjectivevalue(model) + else + @test getobjectivevalue(model) ≈ objval1 rtol=1e-4 end - - idx = 1 - obj = AffExpr() - for i in 1:numsup, j in 1:numdem - z = piecewiselinear(model, x[i,j], d[idx,:], fd[idx,:], method=method) - obj += z - end - @objective(model, Min, obj) - - stat = solve(model) - objs[method] = getobjectivevalue(model) - end - vals = collect(values(objs)) - for i in 2:length(vals) - @test isapprox(vals[i-1], vals[i], rtol=1e-4) end end -# for numpieces in [4,8,16,32], variety in 1:5, objective in 1:20 -for numpieces in [4], variety in 1:5, objective in 1:20 +println("\nbivariate tests") +# @testset "numpieces $numpieces, variety $variety, objective $objective" for numpieces in [4,8], variety in 1:5, objective in 1:20 +@testset "numpieces $numpieces, variety $variety, objective $objective" for numpieces in [4], variety in 1:1, objective in 1:1 instance = string(numpieces,"_",variety,"_",objective) - objs = Dict() demand = h5read(instance_data_2D, string(instance,"/demand")) supply = h5read(instance_data_2D, string(instance,"/supply")) - numdem = size(demand, 1) - numsup = size(supply, 1) - - d = h5read(instance_data_2D, string(instance,"/d")) - fd = h5read(instance_data_2D, string(instance,"/fd")) - K = size(d, 2) - - for method in methods_2D + ndem = size(demand, 1) + nsup = size(supply, 1) + + rawd = h5read(instance_data_2D, string(instance,"/d")) + d = rawd[1,:] + rawfd = h5read(instance_data_2D, string(instance,"/fd")) + fˣ = reshape(rawfd[1,:], length(d), length(d)) + ˣtoⁱ = Dict(d[p] => p for p in 1:length(d)) + f = (pˣ,pʸ) -> fˣ[ˣtoⁱ[pˣ],ˣtoⁱ[pʸ]] + + objval1 = NaN + @testset "2D: $method, $pattern" for method in methods_2D, pattern in patterns_2D model = Model(solver=solver) - @variable(model, x[1:numsup,1:numdem] ≥ 0) - @variable(model, y[1:numsup,1:numdem] ≥ 0) - - for j in 1:numdem - # demand constraint - @constraint(model, sum(x[i,j] for i in 1:numsup) == demand[j]) - @constraint(model, sum(y[i,j] for i in 1:numsup) == demand[j]) - end - for i in 1:numsup - # supply constraint - @constraint(model, sum(x[i,j] for j in 1:numdem) == supply[i]) - @constraint(model, sum(y[i,j] for j in 1:numdem) == supply[i]) + @variable(model, x[1:nsup,1:ndem] ≥ 0) + @constraint(model, [j in 1:ndem], sum(x[i,j] for i in 1:nsup) == demand[j]) + @constraint(model, [i in 1:nsup], sum(x[i,j] for j in 1:ndem) == supply[i]) + @variable(model, y[1:nsup,1:ndem] ≥ 0) + @constraint(model, [j in 1:ndem], sum(y[i,j] for i in 1:nsup) == demand[j]) + @constraint(model, [i in 1:nsup], sum(y[i,j] for j in 1:ndem) == supply[i]) + + @objective(model, Min, sum(piecewiselinear(model, x[i,j], y[i,j], BivariatePWLFunction(d, d, f, pattern=pattern), method=method, subsolver=solver) for i in 1:nsup, j in 1:ndem)) + + @test solve(model) == :Optimal + if isnan(objval1) + objval1 = getobjectivevalue(model) + else + @test getobjectivevalue(model) ≈ objval1 rtol=1e-4 end - - for i in 1:numdem, j in 1:numsup - @constraint(model, x[i,j] + y[i,j] ≤ 1.5d[end]) - end - - idx = 1 - obj = AffExpr() - for i in 1:numsup, j in 1:numdem - dˣ = d[idx,:] - fˣ = reshape(fd[idx,:], length(dˣ), length(dˣ)) - ˣtoⁱ = Dict(dˣ[i] => i for i in 1:length(dˣ)) - fp = (pˣ,pʸ) -> fˣ[ˣtoⁱ[pˣ],ˣtoⁱ[pʸ]] - z = piecewiselinear(model, x[i,j], y[i,j], BivariatePWLFunction(dˣ, dˣ, fp, pattern=:UnionJack), method=method) - obj += z - end - @objective(model, Min, obj) - - stat = solve(model) - objs[method] = getobjectivevalue(model) - end - vals = collect(values(objs)) - for i in 2:length(vals) - @test isapprox(vals[i-1], vals[i], rtol=1e-4) end end diff --git a/test/runtests.jl b/test/runtests.jl index 58c1771..f249747 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,48 +1,82 @@ + +using Cbc +solver = CbcSolver(logLevel=0, integerTolerance=1e-9, primalTolerance=1e-9, ratioGap=1e-8) + +# using Gurobi +# solver = GurobiSolver(OutputFlag=0) + +# using CPLEX +# solver = CplexSolver(CPX_PARAM_SCRIND=0) + + +using JuMP using PiecewiseLinearOpt using Base.Test -using JuMP, Cbc - -const solver = CbcSolver() methods_1D = (:CC,:MC,:Logarithmic,:LogarithmicIB,:ZigZag,:ZigZagInteger,:SOS2,:GeneralizedCelaya,:SymmetricCelaya,:Incremental,:DisaggLogarithmic) methods_2D = (:CC,:MC,:Logarithmic,:LogarithmicIB,:ZigZag,:ZigZagInteger,:SOS2,:GeneralizedCelaya,:SymmetricCelaya,:DisaggLogarithmic) +patterns_2D = (:Upper,:Lower,:BestFit,:UnionJack,:K1,:Random) # :OptimalTriangleSelection and :Stencil not supported currently + +println("\nunivariate tests") +@testset "1D: $method" for method in methods_1D + model = Model(solver=solver) + @variable(model, x) + z = piecewiselinear(model, x, linspace(1,2π,8), sin, method=method) + @objective(model, Max, z) + + @test solve(model) == :Optimal + @test getvalue(x) ≈ 1.75474 rtol=1e-4 + @test getvalue(z) ≈ 0.98313 rtol=1e-4 + + @constraint(model, x ≤ 1.5z) -let d = linspace(1,2π,8), f = sin - for method in methods_1D - println("Method: $method") - model = Model(solver=solver) - @variable(model, x) - z = piecewiselinear(model, x, d, sin, method=method) - @objective(model, Max, z) - @test solve(model) == :Optimal - @test isapprox(getvalue(x), 1.75474, rtol=1e-4) - @test isapprox(getvalue(z), 0.98313, rtol=1e-4) - - @constraint(model, x ≤ 1.5z) - @test solve(model) == :Optimal - @test isapprox(getvalue(x), 1.36495, rtol=1e-4) - @test isapprox(getvalue(z), 0.90997, rtol=1e-4) - end + @test solve(model) == :Optimal + @test getvalue(x) ≈ 1.36495 rtol=1e-4 + @test getvalue(z) ≈ 0.90997 rtol=1e-4 end -let dˣ = linspace(0,1,8), dʸ = linspace(0,1,8), f = (x,y) -> 2*(x-1/3)^2 + 3*(y-4/7)^4 - for method in methods_2D - println("Method: $method") - model = Model(solver=solver) - @variable(model, x) - @variable(model, y) - # z = piecewiselinear(model, x, y, dˣ, dʸ, f, method=method) - z = piecewiselinear(model, x, y, BivariatePWLFunction(dˣ, dʸ, f, pattern=:UnionJack), method=method) - @objective(model, Min, z) - @test solve(model) == :Optimal - @test isapprox(getvalue(x), 0.285714, rtol=1e-4) - @test isapprox(getvalue(y), 0.571429, rtol=1e-4) - @test isapprox(getvalue(z), 0.004535, atol=1e-4) - - @constraint(model, x ≥ 0.6) - @test solve(model) == :Optimal - @test isapprox(getvalue(x), 0.6, rtol=1e-4) - @test isapprox(getvalue(y), 0.571428, rtol=1e-4) - @test isapprox(getvalue(z), 0.148753, rtol=1e-4) - end +println("\nbivariate tests") +@testset "2D: $method, $pattern" for method in methods_2D, pattern in patterns_2D + model = Model(solver=solver) + @variable(model, x) + @variable(model, y) + d = linspace(0,1,8) + f = (x,y) -> 2*(x-1/3)^2 + 3*(y-4/7)^4 + z = piecewiselinear(model, x, y, BivariatePWLFunction(d, d, f, pattern=pattern), method=method, subsolver=solver) + @objective(model, Min, z) + + @test solve(model) == :Optimal + @test getvalue(x) ≈ 0.285714 rtol=1e-4 + @test getvalue(y) ≈ 0.571429 rtol=1e-4 + @test getvalue(z) ≈ 0.004535 rtol=1e-3 + + @constraint(model, x ≥ 0.6) + + @test solve(model) == :Optimal + @test getvalue(x) ≈ 0.6 rtol=1e-4 + @test getvalue(y) ≈ 0.571428 rtol=1e-4 + @test getvalue(z) ≈ 0.148753 rtol=1e-4 +end + +println("\nbivariate optimal IB scheme tests") +@testset "2D: optimal IB, UnionJack" begin + model = Model(solver=solver) + @variable(model, x) + @variable(model, y) + d = linspace(0,1,3) + f = (x,y) -> 2*(x-1/3)^2 + 3*(y-4/7)^4 + z = piecewiselinear(model, x, y, BivariatePWLFunction(d, d, f, pattern=:UnionJack), method=:OptimalIB, subsolver=solver) + @objective(model, Min, z) + + @test solve(model) == :Optimal + @test getvalue(x) ≈ 0.5 rtol=1e-4 + @test getvalue(y) ≈ 0.5 rtol=1e-4 + @test getvalue(z) ≈ 0.055634 rtol=1e-3 + + @constraint(model, x ≥ 0.6) + + @test solve(model) == :Optimal + @test getvalue(x) ≈ 0.6 rtol=1e-4 + @test getvalue(y) ≈ 0.5 rtol=1e-4 + @test getvalue(z) ≈ 0.222300 rtol=1e-3 end