Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion lib/OptimalBranchingCore/src/OptimalBranchingCore.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ using DataStructures
# logic expressions
export Clause, BranchingTable, DNF, booleans, ∨, ∧, ¬, covered_by, literals, is_true_literal, is_false_literal
# weighted minimum set cover solvers and optimal branching rule
export weighted_minimum_set_cover, AbstractSetCoverSolver, LPSolver, IPSolver
export weighted_minimum_set_cover, weighted_minimum_signed_exact_cover, AbstractSetCoverSolver, LPSolver, IPSolver
export minimize_γ, optimal_branching_rule, OptimalBranchingResult

##### interfaces #####
Expand Down
1 change: 1 addition & 0 deletions lib/OptimalBranchingCore/src/bitbasis.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ struct Clause{INT <: Integer}
new{INT}(mask, val & mask)
end
end
Base.length(c::Clause) = count_ones(c.mask)

function clause_string(c::Clause{INT}) where INT
join([iszero(readbit(c.val, i)) ? "¬#$i" : "#$i" for i = 1:bsizeof(INT) if readbit(c.mask, i) == 1], " ∧ ")
Expand Down
87 changes: 56 additions & 31 deletions lib/OptimalBranchingCore/src/setcovering.jl
Original file line number Diff line number Diff line change
Expand Up @@ -276,54 +276,35 @@
### Returns
A vector of indices of selected subsets.
"""
function weighted_minimum_set_cover(solver::LPSolver, weights::AbstractVector, subsets::Vector{Vector{Int}}, num_items::Int)
function weighted_minimum_set_cover(solver::Union{LPSolver, IPSolver}, weights::AbstractVector, subsets::Vector{Vector{Int}}, num_items::Int)
nsc = length(subsets)

sets_id = [Vector{Int}() for _=1:num_items]
for i in 1:nsc
for j in subsets[i]
push!(sets_id[j], i)
end
end
sets_id = _init_set_id(subsets, num_items)

# LP by JuMP
model = Model(solver.optimizer)
!solver.verbose && set_silent(model)
@variable(model, 0 <= x[i = 1:nsc] <= 1)
if solver isa LPSolver
@variable(model, 0 <= x[i = 1:nsc] <= 1)
elseif solver isa IPSolver
@variable(model, 0 <= x[i = 1:nsc] <= 1, Int)
end
@objective(model, Min, sum(x[i] * weights[i] for i in 1:nsc))
for i in 1:num_items
@constraint(model, sum(x[j] for j in sets_id[i]) >= 1)
end

optimize!(model)
xs = value.(x)
@assert is_solved_by(xs, sets_id, num_items)
return pick_sets(xs, subsets, num_items)
@assert is_solved_and_feasible(model)
return pick_sets(value.(x), subsets, num_items)
end

function weighted_minimum_set_cover(solver::IPSolver, weights::AbstractVector, subsets::Vector{Vector{Int}}, num_items::Int)
nsc = length(subsets)

function _init_set_id(subsets::Vector{Vector{Int}}, num_items::Int)
sets_id = [Vector{Int}() for _=1:num_items]
for i in 1:nsc
for i in 1:length(subsets)
for j in subsets[i]
push!(sets_id[j], i)
end
end

# IP by JuMP
model = Model(solver.optimizer)
!solver.verbose && set_silent(model)

@variable(model, 0 <= x[i = 1:nsc] <= 1, Int)
@objective(model, Min, sum(x[i] * weights[i] for i in 1:nsc))
for i in 1:num_items
@constraint(model, sum(x[j] for j in sets_id[i]) >= 1)
end

optimize!(model)
@assert is_solved_and_feasible(model)
return pick_sets(value.(x), subsets, num_items)
return sets_id
end

# by viewing xs as the probability of being selected, we can use a random algorithm to pick the sets
Expand All @@ -347,3 +328,47 @@

return [i for i in picked]
end

"""
weighted_minimum_signed_exact_cover(solver, weights::AbstractVector, subsets::Vector{Vector{Int}}, num_items::Int, cmax::Float64)

Solves the weighted minimum signed exact cover problem. It is different from the unbalanced version in that the variables are now changed to a real variable rather than a binary variable.
It represents how many times a subset is selected. This number can be positive, zero, or negative. The total number of times a subset is selected must be equal to one.

### Arguments
- `solver`: The solver to be used. It can be an instance of `LPSolver` or `IPSolver`.
- `weights::AbstractVector`: The weights of the subsets.
- `subsets::Vector{Vector{Int}}`: A vector of subsets.
- `num_items::Int`: The number of elements to cover.
- `cmax::Float64`: The maximum coefficient of the subsets.

### Returns
A vector of weights for each subset.
"""
function weighted_minimum_signed_exact_cover(solver::Union{LPSolver, IPSolver}, weights::AbstractVector, subsets::Vector{Vector{Int}}, num_items::Int, cmax::Float64)
nsc = length(subsets)
sets_id = _init_set_id(subsets, num_items)

# IP by JuMP
model = Model(solver.optimizer)
!solver.verbose && set_silent(model)
if solver isa LPSolver
@variable(model, 0 <= x[i = 1:nsc] <= 1)

Check warning on line 356 in lib/OptimalBranchingCore/src/setcovering.jl

View check run for this annotation

Codecov / codecov/patch

lib/OptimalBranchingCore/src/setcovering.jl#L356

Added line #L356 was not covered by tests
elseif solver isa IPSolver
@variable(model, 0 <= x[i = 1:nsc] <= 1, Int)
end
@variable(model, c[i = 1:nsc]) # coefficient of the i-th subset
@objective(model, Min, sum(x[i] * weights[i] for i in 1:nsc))
for i in 1:num_items # cover all items exactly once
@constraint(model, sum(c[j] for j in sets_id[i]) == 1)
end
for i in 1:nsc
# inspired by: https://ieeexplore.ieee.org/document/6638790
@constraint(model, c[i] >= -cmax * x[i])
@constraint(model, c[i] <= cmax * x[i])
end

optimize!(model)
@assert is_solved_and_feasible(model)
return value.(c)
end
4 changes: 4 additions & 0 deletions lib/OptimalBranchingCore/test/bitbasis.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@ using Test
@test c1 == c2
@test c1 !== c3
@test c1 !== c4
@test length(c1) == 3
@test length(c2) == 3
@test length(c3) == 3
@test length(c4) == 2

# literals
lts1 = literals(c1)
Expand Down
14 changes: 14 additions & 0 deletions lib/OptimalBranchingCore/test/setcovering.jl
Original file line number Diff line number Diff line change
Expand Up @@ -85,3 +85,17 @@ end
@test OptimalBranchingCore.covered_by(tbl, result_ip.optimal_rule)
@test result_ip.γ ≈ 1.0
end

@testset "weighted minimum signed exact cover" begin
subsets = [[1], [2], [3], [4], [1, 2], [2, 3], [3, 4], [4, 5]]
weights = collect(1:8.0)
num_items = 5
result_ip = OptimalBranchingCore.weighted_minimum_signed_exact_cover(IPSolver(max_itr = 10, verbose = false), weights, subsets, num_items, 10.0)
@test result_ip ≈ [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]

subsets = [[1, 2], [2], [2,3,4,5], [4], [2, 3], [3, 4], [4, 5]]
weights = collect(1:7.0)
num_items = 5
result_ip = OptimalBranchingCore.weighted_minimum_signed_exact_cover(IPSolver(max_itr = 10, verbose = false), weights, subsets, num_items, 10.0)
@test result_ip ≈ [1.0, -1.0, 1.0, 0.0, 0.0, 0.0, 0.0]
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@YijiawWang in many cases, we only have 0-1 outcome, but in some cases, we can have -1 weights. Here is an example.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When we only pursuit approximate cover, then we may have more non-Boolean coefficients. In this case, the requirement is not cover some entries exactly once. Instead, we need to reduce the norm
2 distance.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@YijiawWang in many cases, we only have 0-1 outcome, but in some cases, we can have -1 weights. Here is an example.

Soga! But in the set covering problem corresponding to the candidate clauses, the sizes of the sub-sets (the number of tbl entries covered by each candidate) and their weights (the truncated sc after removing the candidate) are generally proportional. So maybe it's not common to encounter situations like sub_set_2 in the example, where a smaller sub-set has a larger weight, making it more advantageous for c_2 to take a negative value?

Copy link
Collaborator

@YijiawWang YijiawWang Mar 30, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When we only pursuit approximate cover, then we may have more non-Boolean coefficients. In this case, the requirement is not cover some entries exactly once. Instead, we need to reduce the norm 2 distance.

Soga! I see! Maybe using dynamic ob to give approximate solutions might better match the sparse structures compared to methods like MPS? However, in approximate cover, I'm worried that, similar to when we worked on QEC before, if we only consider local distance loss, we might discard branches that could play a significant role in the future, resulting in large errors? I wonder if we could try using a method like BP to first introduce an approximate environmental context for the approximate cover?

(This is a BP algorithm on counting ISs that ensures convergence.

end
Loading