Callbacks
Страница в процессе перевода. |
This tutorial was generated using Literate.jl. Download the source as a .jl
file.
The purpose of the tutorial is to demonstrate the various solver-independent and solver-dependent callbacks that are supported by JuMP.
The tutorial uses the following packages:
using JuMP
import GLPK
import Random
import Test
This tutorial uses the MathOptInterface API. By default, JuMP exports the |
```julia import MathOptInterface as MOI ```
Lazy constraints
An example using a lazy constraint callback.
function example_lazy_constraint()
model = Model(GLPK.Optimizer)
@variable(model, 0 <= x <= 2.5, Int)
@variable(model, 0 <= y <= 2.5, Int)
@objective(model, Max, y)
lazy_called = false
function my_callback_function(cb_data)
lazy_called = true
x_val = callback_value(cb_data, x)
y_val = callback_value(cb_data, y)
println("Called from (x, y) = ($x_val, $y_val)")
status = callback_node_status(cb_data, model)
if status == MOI.CALLBACK_NODE_STATUS_FRACTIONAL
println(" - Solution is integer infeasible!")
elseif status == MOI.CALLBACK_NODE_STATUS_INTEGER
println(" - Solution is integer feasible!")
else
@assert status == MOI.CALLBACK_NODE_STATUS_UNKNOWN
println(" - I don't know if the solution is integer feasible :(")
end
if y_val - x_val > 1 + 1e-6
con = @build_constraint(y - x <= 1)
println("Adding $(con)")
MOI.submit(model, MOI.LazyConstraint(cb_data), con)
elseif y_val + x_val > 3 + 1e-6
con = @build_constraint(y + x <= 3)
println("Adding $(con)")
MOI.submit(model, MOI.LazyConstraint(cb_data), con)
end
end
set_attribute(model, MOI.LazyConstraintCallback(), my_callback_function)
optimize!(model)
Test.@test is_solved_and_feasible(model)
Test.@test lazy_called
Test.@test value(x) == 1
Test.@test value(y) == 2
println("Optimal solution (x, y) = ($(value(x)), $(value(y)))")
return
end
example_lazy_constraint()
Called from (x, y) = (0.0, 2.0)
- Solution is integer feasible!
Adding ScalarConstraint{AffExpr, MathOptInterface.LessThan{Float64}}(y - x, MathOptInterface.LessThan{Float64}(1.0))
Called from (x, y) = (1.0, 2.0)
- Solution is integer feasible!
Optimal solution (x, y) = (1.0, 2.0)
User-cuts
An example using a user-cut callback.
function example_user_cut_constraint()
Random.seed!(1)
N = 30
item_weights, item_values = rand(N), rand(N)
model = Model(GLPK.Optimizer)
@variable(model, x[1:N], Bin)
@constraint(model, sum(item_weights[i] * x[i] for i in 1:N) <= 10)
@objective(model, Max, sum(item_values[i] * x[i] for i in 1:N))
callback_called = false
function my_callback_function(cb_data)
callback_called = true
x_vals = callback_value.(Ref(cb_data), x)
accumulated = sum(item_weights[i] for i in 1:N if x_vals[i] > 1e-4)
println("Called with accumulated = $(accumulated)")
n_terms = sum(1 for i in 1:N if x_vals[i] > 1e-4)
if accumulated > 10
con = @build_constraint(
sum(x[i] for i in 1:N if x_vals[i] > 0.5) <= n_terms - 1
)
println("Adding $(con)")
MOI.submit(model, MOI.UserCut(cb_data), con)
end
end
set_attribute(model, MOI.UserCutCallback(), my_callback_function)
optimize!(model)
Test.@test is_solved_and_feasible(model)
Test.@test callback_called
@show callback_called
return
end
example_user_cut_constraint()
Called with accumulated = 10.37975831721494
Adding ScalarConstraint{AffExpr, MathOptInterface.LessThan{Float64}}(x[1] + x[2] + x[3] + x[4] + x[5] + x[7] + x[8] + x[9] + x[10] + x[11] + x[12] + x[13] + x[14] + x[16] + x[17] + x[18] + x[20] + x[22] + x[23] + x[25] + x[26] + x[28] + x[29] + x[30], MathOptInterface.LessThan{Float64}(23.0))
Called with accumulated = 10.37975831721494
Adding ScalarConstraint{AffExpr, MathOptInterface.LessThan{Float64}}(x[1] + x[2] + x[3] + x[4] + x[5] + x[7] + x[8] + x[9] + x[10] + x[12] + x[13] + x[14] + x[16] + x[17] + x[18] + x[20] + x[22] + x[23] + x[25] + x[26] + x[28] + x[29] + x[30], MathOptInterface.LessThan{Float64}(23.0))
Called with accumulated = 10.585271197221452
Adding ScalarConstraint{AffExpr, MathOptInterface.LessThan{Float64}}(x[1] + x[2] + x[3] + x[4] + x[5] + x[7] + x[8] + x[9] + x[10] + x[11] + x[12] + x[13] + x[14] + x[16] + x[17] + x[18] + x[20] + x[22] + x[23] + x[25] + x[26] + x[29] + x[30], MathOptInterface.LessThan{Float64}(23.0))
Called with accumulated = 10.574173763699463
Adding ScalarConstraint{AffExpr, MathOptInterface.LessThan{Float64}}(x[1] + x[2] + x[3] + x[4] + x[5] + x[7] + x[8] + x[9] + x[10] + x[11] + x[12] + x[13] + x[14] + x[16] + x[17] + x[18] + x[20] + x[22] + x[23] + x[25] + x[26] + x[29] + x[30], MathOptInterface.LessThan{Float64}(23.0))
Called with accumulated = 10.193650358656257
Adding ScalarConstraint{AffExpr, MathOptInterface.LessThan{Float64}}(x[1] + x[2] + x[3] + x[4] + x[5] + x[7] + x[8] + x[9] + x[10] + x[11] + x[12] + x[13] + x[14] + x[15] + x[16] + x[17] + x[18] + x[20] + x[22] + x[23] + x[25] + x[26] + x[29] + x[30], MathOptInterface.LessThan{Float64}(23.0))
Called with accumulated = 10.54116374481702
Adding ScalarConstraint{AffExpr, MathOptInterface.LessThan{Float64}}(x[1] + x[2] + x[3] + x[4] + x[5] + x[7] + x[8] + x[9] + x[10] + x[11] + x[12] + x[13] + x[14] + x[16] + x[17] + x[18] + x[20] + x[22] + x[23] + x[25] + x[26] + x[27] + x[29] + x[30], MathOptInterface.LessThan{Float64}(24.0))
Called with accumulated = 10.37975831721494
Adding ScalarConstraint{AffExpr, MathOptInterface.LessThan{Float64}}(x[1] + x[2] + x[3] + x[4] + x[5] + x[7] + x[8] + x[9] + x[10] + x[12] + x[13] + x[14] + x[16] + x[17] + x[18] + x[20] + x[22] + x[23] + x[25] + x[26] + x[28] + x[29] + x[30], MathOptInterface.LessThan{Float64}(23.0))
callback_called = true
Heuristic solutions
An example using a heuristic solution callback.
function example_heuristic_solution()
Random.seed!(1)
N = 30
item_weights, item_values = rand(N), rand(N)
model = Model(GLPK.Optimizer)
@variable(model, x[1:N], Bin)
@constraint(model, sum(item_weights[i] * x[i] for i in 1:N) <= 10)
@objective(model, Max, sum(item_values[i] * x[i] for i in 1:N))
callback_called = false
function my_callback_function(cb_data)
callback_called = true
x_vals = callback_value.(Ref(cb_data), x)
ret =
MOI.submit(model, MOI.HeuristicSolution(cb_data), x, floor.(x_vals))
println("Heuristic solution status = $(ret)")
Test.@test ret in (
MOI.HEURISTIC_SOLUTION_ACCEPTED,
MOI.HEURISTIC_SOLUTION_REJECTED,
)
end
set_attribute(model, MOI.HeuristicCallback(), my_callback_function)
optimize!(model)
Test.@test is_solved_and_feasible(model)
Test.@test callback_called
return
end
example_heuristic_solution()
Heuristic solution status = HEURISTIC_SOLUTION_ACCEPTED
Heuristic solution status = HEURISTIC_SOLUTION_REJECTED
Heuristic solution status = HEURISTIC_SOLUTION_REJECTED
Heuristic solution status = HEURISTIC_SOLUTION_REJECTED
GLPK solver-dependent callback
An example using GLPK’s solver-dependent callback.
function example_solver_dependent_callback()
model = Model(GLPK.Optimizer)
@variable(model, 0 <= x <= 2.5, Int)
@variable(model, 0 <= y <= 2.5, Int)
@objective(model, Max, y)
lazy_called = false
function my_callback_function(cb_data)
lazy_called = true
reason = GLPK.glp_ios_reason(cb_data.tree)
println("Called from reason = $(reason)")
if reason != GLPK.GLP_IROWGEN
return
end
x_val = callback_value(cb_data, x)
y_val = callback_value(cb_data, y)
if y_val - x_val > 1 + 1e-6
con = @build_constraint(y - x <= 1)
println("Adding $(con)")
MOI.submit(model, MOI.LazyConstraint(cb_data), con)
elseif y_val + x_val > 3 + 1e-6
con = @build_constraint(y - x <= 1)
println("Adding $(con)")
MOI.submit(model, MOI.LazyConstraint(cb_data), con)
end
end
set_attribute(model, GLPK.CallbackFunction(), my_callback_function)
optimize!(model)
Test.@test is_solved_and_feasible(model)
Test.@test lazy_called
Test.@test value(x) == 1
Test.@test value(y) == 2
return
end
example_solver_dependent_callback()
Called from reason = 6
Called from reason = 7
Called from reason = 1
Adding ScalarConstraint{AffExpr, MathOptInterface.LessThan{Float64}}(y - x, MathOptInterface.LessThan{Float64}(1.0))
Called from reason = 7
Called from reason = 1
Called from reason = 2