diff --git a/ortools/sat/2d_rectangle_presolve.cc b/ortools/sat/2d_rectangle_presolve.cc index 94ec802c99..8db1a1e2f8 100644 --- a/ortools/sat/2d_rectangle_presolve.cc +++ b/ortools/sat/2d_rectangle_presolve.cc @@ -31,6 +31,7 @@ #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/stl_util.h" +#include "ortools/graph/max_flow.h" #include "ortools/graph/strongly_connected_components.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" @@ -149,7 +150,7 @@ bool PresolveFixed2dRectangles( if (!new_box.IsDisjoint(existing_box)) { is_disjoint = false; for (const Rectangle& disjoint_box : - new_box.SetDifference(existing_box)) { + new_box.RegionDifference(existing_box)) { to_add.push_back(disjoint_box); } break; @@ -209,7 +210,30 @@ bool PresolveFixed2dRectangles( optional_boxes.erase(optional_boxes.begin(), optional_boxes.begin() + num_optional_boxes_to_remove); - if (ReduceNumberofBoxes(fixed_boxes, &optional_boxes)) { + // TODO(user): instead of doing the greedy algorithm first with optional + // boxes, and then the one that is exact for mandatory boxes but weak for + // optional ones, refactor the second algorithm. One possible way of doing + // that would be to follow the shape boundary of optional+mandatory boxes and + // look whether we can shave off some turns. For example, if we have a shape + // like below, with the "+" representing area covered by optional boxes, we + // can replace the turns by a straight line. + // + // --> + // ^ ++++ + // . ++++ . + // . ++++ . => + // ++++ \/ + // --> ++++ --> --> + // *********** *********** + // *********** *********** + // + // Since less turns means less edges, this should be a good way to reduce the + // number of boxes. + if (ReduceNumberofBoxesGreedy(fixed_boxes, &optional_boxes)) { + changed = true; + } + const int num_after_first_pass = fixed_boxes->size(); + if (ReduceNumberOfBoxesExactMandatory(fixed_boxes, &optional_boxes)) { changed = true; } if (changed && VLOG_IS_ON(1)) { @@ -219,8 +243,8 @@ bool PresolveFixed2dRectangles( } VLOG_EVERY_N_SEC(1, 1) << "Presolved " << original_num_boxes << " fixed rectangles (area=" << original_area - << ") into " << fixed_boxes->size() - << " (area=" << area << ")"; + << ") into " << num_after_first_pass << " then " + << fixed_boxes->size() << " (area=" << area << ")"; VLOG_EVERY_N_SEC(2, 2) << "Presolved rectangles:\n" << RenderDot(bounding_box, fixed_boxes_copy) @@ -283,18 +307,10 @@ struct Edge { }; } // namespace -bool ReduceNumberofBoxes(std::vector* mandatory_rectangles, - std::vector* optional_rectangles) { +bool ReduceNumberofBoxesGreedy(std::vector* mandatory_rectangles, + std::vector* optional_rectangles) { // The current implementation just greedly merge rectangles that shares an - // edge. This is far from optimal, and it exists a polynomial optimal - // algorithm (see page 3 of [1]) for this problem at least for the case where - // optional_rectangles is empty. - // - // TODO(user): improve - // - // [1] Eppstein, David. "Graph-theoretic solutions to computational geometry - // problems." International Workshop on Graph-Theoretic Concepts in Computer - // Science. Berlin, Heidelberg: Springer Berlin Heidelberg, 2009. + // edge. std::vector> rectangle_storage; enum class OptionalEnum { OPTIONAL, MANDATORY }; // bool for is_optional @@ -811,5 +827,615 @@ std::vector BoxesToShapes(absl::Span rectangles, return result; } +namespace { +struct PolygonCut { + std::pair start; + std::pair end; + int start_index; + int end_index; + + struct CmpByStartY { + bool operator()(const PolygonCut& a, const PolygonCut& b) const { + return std::tie(a.start.second, a.start.first) < + std::tie(b.start.second, b.start.first); + } + }; + + struct CmpByEndY { + bool operator()(const PolygonCut& a, const PolygonCut& b) const { + return std::tie(a.end.second, a.end.first) < + std::tie(b.end.second, b.end.first); + } + }; + + struct CmpByStartX { + bool operator()(const PolygonCut& a, const PolygonCut& b) const { + return a.start < b.start; + } + }; + + struct CmpByEndX { + bool operator()(const PolygonCut& a, const PolygonCut& b) const { + return a.end < b.end; + } + }; + + template + friend void AbslStringify(Sink& sink, const PolygonCut& diagonal) { + absl::Format(&sink, "(%v,%v)-(%v,%v)", diagonal.start.first, + diagonal.start.second, diagonal.end.first, + diagonal.end.second); + } +}; + +// A different representation of a shape. The two vectors must have the same +// size. The first one contains the points of the shape and the second one +// contains the index of the next point in the shape. +// +// Note that we code in this file is only correct for shapes with points +// connected only by horizontal or vertical lines. +struct FlatShape { + std::vector> points; + std::vector next; +}; + +EdgePosition GetSegmentDirection( + const std::pair& curr_segment, + const std::pair& next_segment) { + if (curr_segment.first == next_segment.first) { + return next_segment.second > curr_segment.second ? EdgePosition::TOP + : EdgePosition::BOTTOM; + } else { + return next_segment.first > curr_segment.first ? EdgePosition::RIGHT + : EdgePosition::LEFT; + } +} + +// Given a polygon, this function returns all line segments that start on a +// concave vertex and follow horizontally or vertically until it reaches the +// border of the polygon. This function returns all such segments grouped on the +// direction the line takes after starting in the concave vertex. Some of those +// segments start and end on a convex vertex, so they will appear twice in the +// output. This function modifies the shape by splitting some of the path +// segments in two. This is needed to make sure that `PolygonCut.start_index` +// and `PolygonCut.end_index` always corresponds to points in the FlatShape, +// even if they are not edges. +std::array, 4> GetPotentialPolygonCuts( + FlatShape& shape) { + std::array, 4> cuts; + + // First, for each concave vertex we create a cut that starts at it and + // crosses the polygon until infinite (in practice, int_max/int_min). + for (int i = 0; i < shape.points.size(); i++) { + const auto& it = &shape.points[shape.next[i]]; + const auto& previous = &shape.points[i]; + const auto& next_segment = &shape.points[shape.next[shape.next[i]]]; + const EdgePosition previous_dir = GetSegmentDirection(*previous, *it); + const EdgePosition next_dir = GetSegmentDirection(*it, *next_segment); + + if ((previous_dir == EdgePosition::TOP && next_dir == EdgePosition::LEFT) || + (previous_dir == EdgePosition::RIGHT && + next_dir == EdgePosition::TOP)) { + cuts[EdgePosition::RIGHT].push_back( + {.start = *it, + .end = {std::numeric_limits::max(), it->second}, + .start_index = shape.next[i]}); + } + if ((previous_dir == EdgePosition::BOTTOM && + next_dir == EdgePosition::RIGHT) || + (previous_dir == EdgePosition::LEFT && + next_dir == EdgePosition::BOTTOM)) { + cuts[EdgePosition::LEFT].push_back( + {.start = {std::numeric_limits::min(), it->second}, + .end = *it, + .end_index = shape.next[i]}); + } + if ((previous_dir == EdgePosition::RIGHT && + next_dir == EdgePosition::TOP) || + (previous_dir == EdgePosition::BOTTOM && + next_dir == EdgePosition::RIGHT)) { + cuts[EdgePosition::BOTTOM].push_back( + {.start = {it->first, std::numeric_limits::min()}, + .end = *it, + .end_index = shape.next[i]}); + } + if ((previous_dir == EdgePosition::TOP && next_dir == EdgePosition::LEFT) || + (previous_dir == EdgePosition::LEFT && + next_dir == EdgePosition::BOTTOM)) { + cuts[EdgePosition::TOP].push_back( + {.start = *it, + .end = {it->first, std::numeric_limits::max()}, + .start_index = shape.next[i]}); + } + } + + // Now that we have one of the points of the segment (the one starting on a + // vertex), we need to find the other point. This is basically finding the + // first path segment that crosses each cut connecting edge->infinity we + // collected above. We do a rather naive implementation of that below and its + // complexity is O(N^2) even if it should be fast in most cases. If it + // turns out to be costly on profiling we can use a more sophisticated + // algorithm for finding the first intersection. + + // We need to sort the cuts so we can use binary search to quickly find cuts + // that cross a segment. + std::sort(cuts[EdgePosition::RIGHT].begin(), cuts[EdgePosition::RIGHT].end(), + PolygonCut::CmpByStartY()); + std::sort(cuts[EdgePosition::LEFT].begin(), cuts[EdgePosition::LEFT].end(), + PolygonCut::CmpByEndY()); + std::sort(cuts[EdgePosition::BOTTOM].begin(), + cuts[EdgePosition::BOTTOM].end(), PolygonCut::CmpByEndX()); + std::sort(cuts[EdgePosition::TOP].begin(), cuts[EdgePosition::TOP].end(), + PolygonCut::CmpByStartX()); + + // This function cuts a segment in two if it crosses a cut. In any case, it + // returns the index of a point `point_idx` so that `shape.points[point_idx] + // == point_to_cut`. + const auto cut_segment_if_necessary = + [&shape](int segment_idx, + std::pair point_to_cut) { + const auto& cur = shape.points[segment_idx]; + const auto& next = shape.points[shape.next[segment_idx]]; + if (cur.second == next.second) { + DCHECK_EQ(point_to_cut.second, cur.second); + // We have a horizontal segment + const IntegerValue edge_start = std::min(cur.first, next.first); + const IntegerValue edge_end = std::max(cur.first, next.first); + + if (edge_start < point_to_cut.first && + point_to_cut.first < edge_end) { + shape.points.push_back(point_to_cut); + const int next_idx = shape.next[segment_idx]; + shape.next[segment_idx] = shape.points.size() - 1; + shape.next.push_back(next_idx); + return static_cast(shape.points.size() - 1); + } + return (shape.points[segment_idx] == point_to_cut) + ? segment_idx + : shape.next[segment_idx]; + } else { + DCHECK_EQ(cur.first, next.first); + DCHECK_EQ(point_to_cut.first, cur.first); + // We have a vertical segment + const IntegerValue edge_start = std::min(cur.second, next.second); + const IntegerValue edge_end = std::max(cur.second, next.second); + + if (edge_start < point_to_cut.second && + point_to_cut.second < edge_end) { + shape.points.push_back(point_to_cut); + const int next_idx = shape.next[segment_idx]; + shape.next[segment_idx] = shape.points.size() - 1; + shape.next.push_back(next_idx); + return static_cast(shape.points.size() - 1); + } + return (shape.points[segment_idx] == point_to_cut) + ? segment_idx + : shape.next[segment_idx]; + } + }; + + for (int i = 0; i < shape.points.size(); i++) { + const auto* cur_point_ptr = &shape.points[shape.next[i]]; + const auto* previous = &shape.points[i]; + DCHECK(cur_point_ptr->first == previous->first || + cur_point_ptr->second == previous->second) + << "found a segment that is neither horizontal nor vertical"; + const EdgePosition direction = + GetSegmentDirection(*previous, *cur_point_ptr); + + if (direction == EdgePosition::BOTTOM) { + const auto cut_start = absl::c_lower_bound( + cuts[EdgePosition::RIGHT], + PolygonCut{.start = {std::numeric_limits::min(), + cur_point_ptr->second}}, + PolygonCut::CmpByStartY()); + auto cut_end = absl::c_upper_bound( + cuts[EdgePosition::RIGHT], + PolygonCut{.start = {std::numeric_limits::max(), + previous->second}}, + PolygonCut::CmpByStartY()); + + for (auto cut_it = cut_start; cut_it < cut_end; ++cut_it) { + PolygonCut& diagonal = *cut_it; + const IntegerValue diagonal_start_x = diagonal.start.first; + const IntegerValue diagonal_cur_end_x = diagonal.end.first; + // Our binary search guarantees those two conditions. + DCHECK_LE(cur_point_ptr->second, diagonal.start.second); + DCHECK_LE(diagonal.start.second, previous->second); + + // Let's test if the diagonal crosses the current boundary segment + if (diagonal_start_x <= previous->first && + diagonal_cur_end_x > cur_point_ptr->first) { + DCHECK_LT(diagonal_start_x, cur_point_ptr->first); + DCHECK_LE(previous->first, diagonal_cur_end_x); + + diagonal.end.first = cur_point_ptr->first; + + diagonal.end_index = cut_segment_if_necessary(i, diagonal.end); + DCHECK(shape.points[diagonal.end_index] == diagonal.end); + + // Subtle: cut_segment_if_necessary might add new points to the vector + // of the shape, so the pointers computed from it might become + // invalid. Moreover, the current segment now is shorter, so we need + // to update our upper bound. + cur_point_ptr = &shape.points[shape.next[i]]; + previous = &shape.points[i]; + cut_end = absl::c_upper_bound( + cuts[EdgePosition::RIGHT], + PolygonCut{.start = {std::numeric_limits::max(), + previous->second}}, + PolygonCut::CmpByStartY()); + } + } + } + + if (direction == EdgePosition::TOP) { + const auto cut_start = absl::c_lower_bound( + cuts[EdgePosition::LEFT], + PolygonCut{.end = {std::numeric_limits::min(), + previous->second}}, + PolygonCut::CmpByEndY()); + auto cut_end = absl::c_upper_bound( + cuts[EdgePosition::LEFT], + PolygonCut{.end = {std::numeric_limits::max(), + cur_point_ptr->second}}, + PolygonCut::CmpByEndY()); + for (auto cut_it = cut_start; cut_it < cut_end; ++cut_it) { + PolygonCut& diagonal = *cut_it; + const IntegerValue diagonal_start_x = diagonal.start.first; + const IntegerValue diagonal_cur_end_x = diagonal.end.first; + // Our binary search guarantees those two conditions. + DCHECK_LE(diagonal.end.second, cur_point_ptr->second); + DCHECK_LE(previous->second, diagonal.end.second); + + // Let's test if the diagonal crosses the current boundary segment + if (diagonal_start_x < cur_point_ptr->first && + previous->first <= diagonal_cur_end_x) { + DCHECK_LT(cur_point_ptr->first, diagonal_cur_end_x); + DCHECK_LE(diagonal_start_x, previous->first); + + diagonal.start.first = cur_point_ptr->first; + diagonal.start_index = cut_segment_if_necessary(i, diagonal.start); + DCHECK(shape.points[diagonal.start_index] == diagonal.start); + cur_point_ptr = &shape.points[shape.next[i]]; + previous = &shape.points[i]; + cut_end = absl::c_upper_bound( + cuts[EdgePosition::LEFT], + PolygonCut{.end = {std::numeric_limits::max(), + cur_point_ptr->second}}, + PolygonCut::CmpByEndY()); + } + } + } + + if (direction == EdgePosition::LEFT) { + const auto cut_start = absl::c_lower_bound( + cuts[EdgePosition::BOTTOM], + PolygonCut{.end = {cur_point_ptr->first, + std::numeric_limits::min()}}, + PolygonCut::CmpByEndX()); + auto cut_end = absl::c_upper_bound( + cuts[EdgePosition::BOTTOM], + PolygonCut{.end = {previous->first, + std::numeric_limits::max()}}, + PolygonCut::CmpByEndX()); + for (auto cut_it = cut_start; cut_it < cut_end; ++cut_it) { + PolygonCut& diagonal = *cut_it; + const IntegerValue diagonal_start_y = diagonal.start.second; + const IntegerValue diagonal_cur_end_y = diagonal.end.second; + + // Our binary search guarantees those two conditions. + DCHECK_LE(cur_point_ptr->first, diagonal.end.first); + DCHECK_LE(diagonal.end.first, previous->first); + + // Let's test if the diagonal crosses the current boundary segment + if (diagonal_start_y < cur_point_ptr->second && + cur_point_ptr->second <= diagonal_cur_end_y) { + DCHECK_LE(diagonal_start_y, previous->second); + DCHECK_LT(cur_point_ptr->second, diagonal_cur_end_y); + + diagonal.start.second = cur_point_ptr->second; + diagonal.start_index = cut_segment_if_necessary(i, diagonal.start); + DCHECK(shape.points[diagonal.start_index] == diagonal.start); + cur_point_ptr = &shape.points[shape.next[i]]; + previous = &shape.points[i]; + cut_end = absl::c_upper_bound( + cuts[EdgePosition::BOTTOM], + PolygonCut{.end = {previous->first, + std::numeric_limits::max()}}, + PolygonCut::CmpByEndX()); + } + } + } + + if (direction == EdgePosition::RIGHT) { + const auto cut_start = absl::c_lower_bound( + cuts[EdgePosition::TOP], + PolygonCut{.start = {previous->first, + std::numeric_limits::min()}}, + PolygonCut::CmpByStartX()); + auto cut_end = absl::c_upper_bound( + cuts[EdgePosition::TOP], + PolygonCut{.start = {cur_point_ptr->first, + std::numeric_limits::max()}}, + PolygonCut::CmpByStartX()); + for (auto cut_it = cut_start; cut_it < cut_end; ++cut_it) { + PolygonCut& diagonal = *cut_it; + const IntegerValue diagonal_start_y = diagonal.start.second; + const IntegerValue diagonal_cur_end_y = diagonal.end.second; + + // Our binary search guarantees those two conditions. + DCHECK_LE(previous->first, diagonal.start.first); + DCHECK_LE(diagonal.start.first, cur_point_ptr->first); + + // Let's test if the diagonal crosses the current boundary segment + if (diagonal_start_y <= cur_point_ptr->second && + cur_point_ptr->second < diagonal_cur_end_y) { + DCHECK_LT(diagonal_start_y, previous->second); + DCHECK_LE(cur_point_ptr->second, diagonal_cur_end_y); + + diagonal.end.second = cur_point_ptr->second; + diagonal.end_index = cut_segment_if_necessary(i, diagonal.end); + DCHECK(shape.points[diagonal.end_index] == diagonal.end); + cur_point_ptr = &shape.points[shape.next[i]]; + cut_end = absl::c_upper_bound( + cuts[EdgePosition::TOP], + PolygonCut{.start = {cur_point_ptr->first, + std::numeric_limits::max()}}, + PolygonCut::CmpByStartX()); + previous = &shape.points[i]; + } + } + } + } + return cuts; +} + +void CutShapeWithPolygonCuts(FlatShape& shape, + absl::Span cuts) { + std::vector previous(shape.points.size(), -1); + for (int i = 0; i < shape.points.size(); i++) { + previous[shape.next[i]] = i; + } + + std::vector> cut_previous_index(cuts.size(), {-1, -1}); + for (int i = 0; i < cuts.size(); i++) { + DCHECK(cuts[i].start == shape.points[cuts[i].start_index]); + DCHECK(cuts[i].end == shape.points[cuts[i].end_index]); + + cut_previous_index[i].first = previous[cuts[i].start_index]; + cut_previous_index[i].second = previous[cuts[i].end_index]; + } + + for (const auto& [i, j] : cut_previous_index) { + const int prev_start_next = shape.next[i]; + const int prev_end_next = shape.next[j]; + const std::pair start = + shape.points[prev_start_next]; + const std::pair end = + shape.points[prev_end_next]; + + shape.points.push_back(start); + shape.next[i] = shape.points.size() - 1; + shape.next.push_back(prev_end_next); + + shape.points.push_back(end); + shape.next[j] = shape.points.size() - 1; + shape.next.push_back(prev_start_next); + } +} +} // namespace + +// This function applies the method described in page 3 of [1]. +// +// [1] Eppstein, David. "Graph-theoretic solutions to computational geometry +// problems." International Workshop on Graph-Theoretic Concepts in Computer +// Science. Berlin, Heidelberg: Springer Berlin Heidelberg, 2009. +std::vector CutShapeIntoRectangles(SingleShape shape) { + auto is_aligned = [](const std::pair& p1, + const std::pair& p2, + const std::pair& p3) { + return ((p1.first == p2.first) == (p2.first == p3.first)) && + ((p1.second == p2.second) == (p2.second == p3.second)); + }; + const auto add_segment = + [&is_aligned](const std::pair& segment, + const int start_index, + std::vector>& points, + std::vector& next) { + if (points.size() > 1 + start_index && + is_aligned(points[points.size() - 1], points[points.size() - 2], + segment)) { + points.back() = segment; + } else { + points.push_back(segment); + next.push_back(points.size()); + } + }; + + // To cut our polygon into rectangles, we first put it into a data structure + // that is easier to manipulate. + FlatShape flat_shape; + for (int i = 0; 1 + i < shape.boundary.step_points.size(); ++i) { + const std::pair& segment = + shape.boundary.step_points[i]; + add_segment(segment, 0, flat_shape.points, flat_shape.next); + } + flat_shape.next.back() = 0; + for (const ShapePath& hole : shape.holes) { + const int start = flat_shape.next.size(); + if (hole.step_points.size() < 2) continue; + for (int i = 0; i + 1 < hole.step_points.size(); ++i) { + const std::pair& segment = + hole.step_points[i]; + add_segment(segment, start, flat_shape.points, flat_shape.next); + } + flat_shape.next.back() = start; + } + + std::array, 4> all_cuts = + GetPotentialPolygonCuts(flat_shape); + + // Some cuts connect two concave edges and will be duplicated in all_cuts. + // Those are important: since they "fix" two concavities with a single cut, + // they are called "good diagonals" in the literature. Note that in + // computational geometry jargon, a diagonal of a polygon is a line segment + // that connects two non-adjacent vertices of a polygon, even in cases like + // ours that we are only talking of diagonals that are not "diagonal" in the + // usual meaning of the word: ie., horizontal or vertical segments connecting + // two vertices of the polygon). + std::array, 2> good_diagonals; + for (const auto& d : all_cuts[EdgePosition::BOTTOM]) { + if (absl::c_binary_search(all_cuts[EdgePosition::TOP], d, + PolygonCut::CmpByStartX())) { + good_diagonals[0].push_back(d); + } + } + for (const auto& d : all_cuts[EdgePosition::LEFT]) { + if (absl::c_binary_search(all_cuts[EdgePosition::RIGHT], d, + PolygonCut::CmpByStartY())) { + good_diagonals[1].push_back(d); + } + } + + // The "good diagonals" are only more optimal that any cut if they are not + // crossed by other cuts. To maximize their usefulness, we build a graph where + // the good diagonals are the vertices and we add an edge every time a + // vertical and horizontal diagonal cross. The minimum vertex cover of this + // graph is the minimal set of good diagonals that are not crossed by other + // cuts. + std::vector> arcs(good_diagonals[0].size()); + for (int i = 0; i < good_diagonals[0].size(); ++i) { + for (int j = 0; j < good_diagonals[1].size(); ++j) { + const PolygonCut& vertical = good_diagonals[0][i]; + const PolygonCut& horizontal = good_diagonals[1][j]; + const IntegerValue vertical_x = vertical.start.first; + const IntegerValue horizontal_y = horizontal.start.second; + if (horizontal.start.first <= vertical_x && + vertical_x <= horizontal.end.first && + vertical.start.second <= horizontal_y && + horizontal_y <= vertical.end.second) { + arcs[i].push_back(good_diagonals[0].size() + j); + } + } + } + + const std::vector minimum_cover = + BipartiteMinimumVertexCover(arcs, good_diagonals[1].size()); + + std::vector minimum_cover_horizontal_diagonals; + for (int i = good_diagonals[0].size(); + i < good_diagonals[0].size() + good_diagonals[1].size(); ++i) { + if (minimum_cover[i]) continue; + minimum_cover_horizontal_diagonals.push_back( + good_diagonals[1][i - good_diagonals[0].size()]); + } + + // Since our data structure only allow to cut the shape according to a list + // of vertical or horizontal cuts, but not a list mixing both, we cut first + // on the chosen horizontal good diagonals. + CutShapeWithPolygonCuts(flat_shape, minimum_cover_horizontal_diagonals); + + // We need to recompute the cuts after we applied the good diagonals, since + // the geometry has changed. + all_cuts = GetPotentialPolygonCuts(flat_shape); + + // Now that we did all horizontal good diagonals, we need to cut on all + // vertical good diagonals and then cut arbitrarily to remove all concave + // edges. To make things simple, just apply all vertical cuts, since they + // include all the vertical good diagonals and also fully slice the shape into + // rectangles. + + // Remove duplicates coming from good diagonals first. + std::vector cuts = all_cuts[EdgePosition::TOP]; + for (const auto& cut : all_cuts[EdgePosition::BOTTOM]) { + if (!absl::c_binary_search(all_cuts[EdgePosition::TOP], cut, + PolygonCut::CmpByStartX())) { + cuts.push_back(cut); + } + } + + CutShapeWithPolygonCuts(flat_shape, cuts); + + // Now every connected component of the shape is a rectangle. Build the final + // result. + std::vector result; + std::vector seen(flat_shape.points.size(), false); + for (int i = 0; i < flat_shape.points.size(); ++i) { + if (seen[i]) continue; + Rectangle& rectangle = result.emplace_back(Rectangle{ + .x_min = std::numeric_limits::max(), + .x_max = std::numeric_limits::min(), + .y_min = std::numeric_limits::max(), + .y_max = std::numeric_limits::min(), + }); + int cur = i; + do { + seen[cur] = true; + rectangle.GrowToInclude({.x_min = flat_shape.points[cur].first, + .x_max = flat_shape.points[cur].first, + .y_min = flat_shape.points[cur].second, + .y_max = flat_shape.points[cur].second}); + cur = flat_shape.next[cur]; + DCHECK_LT(cur, flat_shape.next.size()); + } while (cur != i); + } + + return result; +} + +bool ReduceNumberOfBoxesExactMandatory( + std::vector* mandatory_rectangles, + std::vector* optional_rectangles) { + if (mandatory_rectangles->empty()) return false; + std::vector result = *mandatory_rectangles; + std::vector new_optional_rectangles = *optional_rectangles; + + Rectangle mandatory_bounding_box = (*mandatory_rectangles)[0]; + for (const Rectangle& box : *mandatory_rectangles) { + mandatory_bounding_box.GrowToInclude(box); + } + const std::vector mandatory_empty_holes = + FindEmptySpaces(mandatory_bounding_box, *mandatory_rectangles); + const std::vector> mandatory_holes_components = + SplitInConnectedComponents(BuildNeighboursGraph(mandatory_empty_holes)); + + // Now for every connected component of the holes in the mandatory area, see + // if we can fill them with optional boxes. + std::vector holes_in_component; + for (const std::vector& component : mandatory_holes_components) { + holes_in_component.clear(); + holes_in_component.reserve(component.size()); + for (const int index : component) { + holes_in_component.push_back(mandatory_empty_holes[index]); + } + if (RegionIncludesOther(new_optional_rectangles, holes_in_component)) { + // Fill the hole. + result.insert(result.end(), holes_in_component.begin(), + holes_in_component.end()); + // We can modify `optional_rectangles` here since we know that if we + // remove a hole this function will return true. + new_optional_rectangles = PavedRegionDifference( + new_optional_rectangles, std::move(holes_in_component)); + } + } + const Neighbours neighbours = BuildNeighboursGraph(result); + std::vector shapes = BoxesToShapes(result, neighbours); + + result.clear(); + for (SingleShape& shape : shapes) { + // This is the function that applies the algorithm described in [1]. + const std::vector cut_rectangles = + CutShapeIntoRectangles(std::move(shape)); + result.insert(result.end(), cut_rectangles.begin(), cut_rectangles.end()); + } + // It is possible that the algorithm actually increases the number of boxes. + // See the "Problematic2" test. + if (result.size() >= mandatory_rectangles->size()) return false; + mandatory_rectangles->swap(result); + optional_rectangles->swap(new_optional_rectangles); + return true; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/2d_rectangle_presolve.h b/ortools/sat/2d_rectangle_presolve.h index 362f458ecf..a7f1c6f07e 100644 --- a/ortools/sat/2d_rectangle_presolve.h +++ b/ortools/sat/2d_rectangle_presolve.h @@ -38,17 +38,29 @@ bool PresolveFixed2dRectangles( absl::Span non_fixed_boxes, std::vector* fixed_boxes); -// Given a set of non-overlapping rectangles split in two groups, mandatory and -// optional, try to build a set of as few non-overlapping rectangles as -// possible defining a region R that satisfy: +// Given two vectors of non-overlapping rectangles defining two regions of the +// space: one mandatory region that must be occupied and one optional region +// that can be occupied, try to build a vector of as few non-overlapping +// rectangles as possible defining a region R that satisfy: // - R \subset (mandatory \union optional); // - mandatory \subset R. // -// The function updates the set of `mandatory_rectangles` with `R` and +// The function updates the vector of `mandatory_rectangles` with `R` and // `optional_rectangles` with `optional_rectangles \setdiff R`. It returns // true if the `mandatory_rectangles` was updated. -bool ReduceNumberofBoxes(std::vector* mandatory_rectangles, - std::vector* optional_rectangles); +// +// This function uses a greedy algorithm that merge rectangles that share an +// edge. +bool ReduceNumberofBoxesGreedy(std::vector* mandatory_rectangles, + std::vector* optional_rectangles); + +// Same as above, but this implementation returns the optimal solution in +// minimizing the number of boxes if `optional_rectangles` is empty. On the +// other hand, its handling of optional boxes is rather limited. It simply fills +// the holes in the mandatory boxes with optional boxes, if possible. +bool ReduceNumberOfBoxesExactMandatory( + std::vector* mandatory_rectangles, + std::vector* optional_rectangles); enum EdgePosition { TOP = 0, RIGHT = 1, BOTTOM = 2, LEFT = 3 }; @@ -172,6 +184,8 @@ struct SingleShape { std::vector BoxesToShapes(absl::Span rectangles, const Neighbours& neighbours); +std::vector CutShapeIntoRectangles(SingleShape shapes); + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/2d_rectangle_presolve_test.cc b/ortools/sat/2d_rectangle_presolve_test.cc index 0c47b13152..899dc9cd37 100644 --- a/ortools/sat/2d_rectangle_presolve_test.cc +++ b/ortools/sat/2d_rectangle_presolve_test.cc @@ -51,13 +51,15 @@ std::vector BuildFromAsciiArt(std::string_view input) { for (int i = 0; i < lines.size(); i++) { for (int j = 0; j < lines[i].size(); j++) { if (lines[i][j] != ' ') { - rectangles.push_back( - {.x_min = j, .x_max = j + 1, .y_min = i, .y_max = i + 1}); + rectangles.push_back({.x_min = j, + .x_max = j + 1, + .y_min = 2 * lines.size() - 2 * i, + .y_max = 2 * lines.size() - 2 * i + 2}); } } } std::vector empty; - ReduceNumberofBoxes(&rectangles, &empty); + ReduceNumberofBoxesGreedy(&rectangles, &empty); return rectangles; } @@ -156,8 +158,7 @@ TEST(RectanglePresolve, RemoveOutsideBB) { } TEST(RectanglePresolve, RandomTest) { - constexpr int kTotalRectangles = 100; - constexpr int kFixedRectangleSize = 60; + constexpr int kFixedRectangleSize = 10; constexpr int kNumRuns = 1000; absl::BitGen bit_gen; @@ -165,9 +166,8 @@ TEST(RectanglePresolve, RandomTest) { // Start by generating a feasible problem that we know the solution with // some items fixed. std::vector input = - GenerateNonConflictingRectangles(kTotalRectangles, bit_gen); + GenerateNonConflictingRectanglesWithPacking({100, 100}, 40, bit_gen); std::shuffle(input.begin(), input.end(), bit_gen); - CHECK_EQ(input.size(), kTotalRectangles); absl::Span fixed_rectangles = absl::MakeConstSpan(input).subspan(0, kFixedRectangleSize); absl::Span other_rectangles = @@ -185,7 +185,12 @@ TEST(RectanglePresolve, RandomTest) { << RenderDot(std::nullopt, new_fixed_rectangles); } - CHECK_LE(new_fixed_rectangles.size(), kFixedRectangleSize); + if (new_fixed_rectangles.size() > fixed_rectangles.size()) { + LOG(FATAL) << "Presolved:\n" + << RenderDot(std::nullopt, fixed_rectangles) << "To:\n" + << RenderDot(std::nullopt, new_fixed_rectangles); + } + CHECK_LE(new_fixed_rectangles.size(), fixed_rectangles.size()); // Check if the original solution is still a solution. std::vector all_rectangles(new_fixed_rectangles.begin(), @@ -742,16 +747,14 @@ TEST(ContourTest, Random) { GenerateNonConflictingRectanglesWithPacking({100, 100}, 60, bit_gen); std::shuffle(input.begin(), input.end(), bit_gen); const int num_fixed_rectangles = input.size() * 2 / 3; - absl::Span fixed_rectangles = + const absl::Span fixed_rectangles = absl::MakeConstSpan(input).subspan(0, num_fixed_rectangles); - absl::Span other_rectangles = + const absl::Span other_rectangles = absl::MakeSpan(input).subspan(num_fixed_rectangles); - std::vector new_fixed_rectangles(fixed_rectangles.begin(), - fixed_rectangles.end()); const std::vector input_in_range = MakeItemsFromRectangles(other_rectangles, 0.6, bit_gen); - auto neighbours = BuildNeighboursGraph(fixed_rectangles); + const Neighbours neighbours = BuildNeighboursGraph(fixed_rectangles); const auto components = SplitInConnectedComponents(neighbours); const Rectangle bb = {.x_min = 0, .x_max = 100, .y_min = 0, .y_max = 100}; int min_index = -1; @@ -766,25 +769,26 @@ TEST(ContourTest, Random) { } } - auto s = BoxesToShapes(fixed_rectangles, neighbours); - for (int i = 0; i < s.size(); ++i) { - const ShapePath& shape = s[i].boundary; + const std::vector shapes = + BoxesToShapes(fixed_rectangles, neighbours); + for (const SingleShape& shape : shapes) { + const ShapePath& boundary = shape.boundary; const ShapePath expected_shape = - TraceBoundary(shape.step_points[0], shape.touching_box_index[0], + TraceBoundary(boundary.step_points[0], boundary.touching_box_index[0], fixed_rectangles, neighbours); - if (shape.step_points != expected_shape.step_points) { + if (boundary.step_points != expected_shape.step_points) { LOG(ERROR) << "Fast algo:\n" - << RenderContour(bb, fixed_rectangles, shape); + << RenderContour(bb, fixed_rectangles, boundary); LOG(ERROR) << "Naive algo:\n" << RenderContour(bb, fixed_rectangles, expected_shape); LOG(FATAL) << "Found different solutions between naive and fast algo!"; } - EXPECT_EQ(shape.step_points, expected_shape.step_points); - EXPECT_EQ(shape.touching_box_index, expected_shape.touching_box_index); + EXPECT_EQ(boundary.step_points, expected_shape.step_points); + EXPECT_EQ(boundary.touching_box_index, expected_shape.touching_box_index); } if (run == 0) { - LOG(INFO) << RenderShapes(bb, fixed_rectangles, s); + LOG(INFO) << RenderShapes(bb, fixed_rectangles, shapes); } } } @@ -839,6 +843,143 @@ TEST(ContourTest, SimpleShapes) { std::make_pair(0, 20))); } +TEST(ContourTest, ExampleFromPaper) { + const std::vector input = BuildFromAsciiArt(R"( + ******************* + ******************* + ********** ******************* + ********** ******************* + *************************************** + *************************************** + *************************************** + *************************************** + *********** ************** **** + *********** ************** **** + *********** ******* *** **** + *********** ******* *** **** + *********** ************** **** + *********** ************** **** + *********** ************** **** + *************************************** + *************************************** + *************************************** + ************************************** + ************************************** + ************************************** + ******************************* + *************************************** + *************************************** + **************** **************** + **************** **************** + ****** *** + ****** *** + ****** *** + ****** + )"); + const Neighbours neighbours = BuildNeighboursGraph(input); + auto s = BoxesToShapes(input, neighbours); + LOG(INFO) << RenderDot(std::nullopt, input); + const std::vector output = CutShapeIntoRectangles(s[0]); + LOG(INFO) << RenderDot(std::nullopt, output); + EXPECT_THAT(output.size(), 16); +} + +bool RectanglesCoverSameArea(absl::Span a, + absl::Span b) { + return RegionIncludesOther(a, b) && RegionIncludesOther(b, a); +} + +TEST(ReduceNumberOfBoxes, RandomTestNoOptional) { + constexpr int kNumRuns = 1000; + absl::BitGen bit_gen; + + for (int run = 0; run < kNumRuns; ++run) { + // Start by generating a feasible problem that we know the solution with + // some items fixed. + std::vector input = + GenerateNonConflictingRectanglesWithPacking({100, 100}, 60, bit_gen); + std::shuffle(input.begin(), input.end(), bit_gen); + + std::vector output = input; + std::vector optional_rectangles_empty; + ReduceNumberOfBoxesExactMandatory(&output, &optional_rectangles_empty); + if (run == 0) { + LOG(INFO) << "Presolved:\n" << RenderDot(std::nullopt, input); + LOG(INFO) << "To:\n" << RenderDot(std::nullopt, output); + } + + if (output.size() > input.size()) { + LOG(INFO) << "Presolved:\n" << RenderDot(std::nullopt, input); + LOG(INFO) << "To:\n" << RenderDot(std::nullopt, output); + ADD_FAILURE() << "ReduceNumberofBoxes() increased the number of boxes, " + "but it should be optimal in reducing them!"; + } + CHECK(RectanglesCoverSameArea(output, input)); + } +} + +TEST(ReduceNumberOfBoxes, Problematic) { + // This example shows that we must consider diagonals that touches only on its + // extremities as "intersecting" for the bipartite graph. + const std::vector input = { + {.x_min = 26, .x_max = 51, .y_min = 54, .y_max = 81}, + {.x_min = 51, .x_max = 78, .y_min = 44, .y_max = 67}, + {.x_min = 51, .x_max = 62, .y_min = 67, .y_max = 92}, + {.x_min = 78, .x_max = 98, .y_min = 24, .y_max = 54}, + }; + std::vector output = input; + std::vector optional_rectangles_empty; + ReduceNumberOfBoxesExactMandatory(&output, &optional_rectangles_empty); + LOG(INFO) << "Presolved:\n" << RenderDot(std::nullopt, input); + LOG(INFO) << "To:\n" << RenderDot(std::nullopt, output); +} + +// This example shows that sometimes the best solution with respect to minimum +// number of boxes requires *not* filling a hole. Actually this follows from the +// formula that the minimum number of rectangles in a partition of a polygon +// with n vertices and h holes is n/2 + h − g − 1, where g is the number of +// non-intersecting good diagonals. This test-case shows a polygon with 4 +// internal vertices, 1 hole and 4 non-intersecting good diagonals that includes +// the hole. Removing the hole reduces the n/2 term by 2, decrease the h term by +// 1, but decrease the g term by 4. +// +// *********************** +// *********************** +// ***********************..................... +// ***********************..................... +// ***********************..................... +// ***********************..................... +// ***********************..................... +// ++++++++++++++++++++++ ..................... +// ++++++++++++++++++++++ ..................... +// ++++++++++++++++++++++ ..................... +// ++++++++++++++++++++++000000000000000000000000 +// ++++++++++++++++++++++000000000000000000000000 +// ++++++++++++++++++++++000000000000000000000000 +// 000000000000000000000000 +// 000000000000000000000000 +// 000000000000000000000000 +// 000000000000000000000000 +// +TEST(ReduceNumberOfBoxes, Problematic2) { + const std::vector input = { + {.x_min = 64, .x_max = 82, .y_min = 76, .y_max = 98}, + {.x_min = 39, .x_max = 59, .y_min = 63, .y_max = 82}, + {.x_min = 59, .x_max = 78, .y_min = 61, .y_max = 76}, + {.x_min = 44, .x_max = 64, .y_min = 82, .y_max = 100}, + }; + std::vector output = input; + std::vector optional_rectangles = { + {.x_min = 59, .x_max = 64, .y_min = 76, .y_max = 82}, + }; + ReduceNumberOfBoxesExactMandatory(&output, &optional_rectangles); + LOG(INFO) << "Presolving:\n" << RenderDot(std::nullopt, input); + + // Presolve will refuse to do anything since removing the hole will increase + // the number of boxes. + CHECK(input == output); +} + } // namespace } // namespace sat } // namespace operations_research diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 9991d8998d..5077db303c 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -2532,6 +2532,7 @@ cc_library( ":diffn_util", ":integer", "//ortools/base:stl_util", + "//ortools/graph:max_flow", "//ortools/graph:strongly_connected_components", "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/container:btree", diff --git a/ortools/sat/cuts.cc b/ortools/sat/cuts.cc index 289d4da62c..ea727355d4 100644 --- a/ortools/sat/cuts.cc +++ b/ortools/sat/cuts.cc @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -232,7 +233,7 @@ bool CutData::AllCoefficientsArePositive() const { return true; } -void CutData::Canonicalize() { +void CutData::SortRelevantEntries() { num_relevant_entries = 0; max_magnitude = 0; for (CutTerm& entry : terms) { @@ -269,92 +270,78 @@ double CutData::ComputeEfficacy() const { return violation / std::sqrt(norm); } -void CutDataBuilder::ClearIndices() { - num_merges_ = 0; - constraint_is_indexed_ = false; - bool_index_.clear(); - secondary_bool_index_.clear(); -} - -void CutDataBuilder::RegisterAllBooleanTerms(const CutData& cut) { - constraint_is_indexed_ = true; - const int size = cut.terms.size(); - for (int i = 0; i < size; ++i) { - const CutTerm& term = cut.terms[i]; - if (term.bound_diff != 1) continue; - if (!term.IsSimple()) continue; - - // Initially we shouldn't have duplicate bools and (1 - bools). - // So we just fill bool_index_. - bool_index_[term.expr_vars[0]] = i; +// We can only merge the term if term.coeff + old_coeff do not overflow and +// if t * new_coeff do not overflow. +// +// If we cannot merge the term, we will keep them separate. The produced cut +// will be less strong, but can still be used. +bool CutDataBuilder::MergeIfPossible(IntegerValue t, CutTerm& to_add, + CutTerm& target) { + DCHECK_EQ(to_add.expr_vars[0], target.expr_vars[0]); + DCHECK_EQ(to_add.expr_coeffs[0], target.expr_coeffs[0]); + + const IntegerValue new_coeff = CapAddI(to_add.coeff, target.coeff); + if (AtMinOrMaxInt64I(new_coeff) || ProdOverflow(t, new_coeff)) { + return false; } -} -void CutDataBuilder::AddOrMergeTerm(const CutTerm& term, IntegerValue t, - CutData* cut) { - if (!constraint_is_indexed_) { - RegisterAllBooleanTerms(*cut); - } + to_add.coeff = 0; // Clear since we merge it. + target.coeff = new_coeff; + return true; +} - DCHECK(term.IsSimple()); - const IntegerVariable var = term.expr_vars[0]; - const bool is_positive = (term.expr_coeffs[0] > 0); - const int new_index = cut->terms.size(); - const auto [it, inserted] = bool_index_.insert({var, new_index}); - if (inserted) { - cut->terms.push_back(term); - return; - } +// We only deal with coeff * Bool or coeff * (1 - Bool) +// +// TODO(user): Because of merges, we might have entry with a coefficient of +// zero than are not useful. Remove them? +int CutDataBuilder::AddOrMergeBooleanTerms(absl::Span new_terms, + IntegerValue t, CutData* cut) { + if (new_terms.empty()) return 0; - // If the referred var is not right, replace the entry. - int entry_index = it->second; - if (entry_index >= new_index || cut->terms[entry_index].expr_vars[0] != var) { - it->second = new_index; - cut->terms.push_back(term); - return; + bool_index_.clear(); + secondary_bool_index_.clear(); + int num_merges = 0; + + // Fill the maps. + int i = 0; + for (CutTerm& term : new_terms) { + const IntegerVariable var = term.expr_vars[0]; + auto& map = term.expr_coeffs[0] > 0 ? bool_index_ : secondary_bool_index_; + const auto [it, inserted] = map.insert({var, i}); + if (!inserted) { + if (MergeIfPossible(t, term, new_terms[it->second])) { + ++num_merges; + } + } + ++i; } - // If the sign is not right, look into secondary hash_map for opposite sign. - if ((cut->terms[entry_index].expr_coeffs[0] > 0) != is_positive) { - const auto [it, inserted] = secondary_bool_index_.insert({var, new_index}); - if (inserted) { - cut->terms.push_back(term); - return; - } + // Loop over the cut now. Note that we loop with indices as we might add new + // terms in the middle of the loop. + for (CutTerm& term : cut->terms) { + if (term.bound_diff != 1) continue; + if (!term.IsSimple()) continue; - // If the referred var is not right, replace the entry. - entry_index = it->second; - if (entry_index >= new_index || - cut->terms[entry_index].expr_vars[0] != var) { - it->second = new_index; - cut->terms.push_back(term); - return; - } + const IntegerVariable var = term.expr_vars[0]; + auto& map = term.expr_coeffs[0] > 0 ? bool_index_ : secondary_bool_index_; + auto it = map.find(var); + if (it == map.end()) continue; - // If the sign is not right, replace the entry. - if ((cut->terms[entry_index].expr_coeffs[0] > 0) != is_positive) { - it->second = new_index; - cut->terms.push_back(term); - return; + // We found a match, try to merge the map entry into the cut. + // Note that we don't waste time erasing this entry from the map since + // we should have no duplicates in the original cut. + if (MergeIfPossible(t, new_terms[it->second], term)) { + ++num_merges; } } - DCHECK_EQ(cut->terms[entry_index].expr_vars[0], var); - DCHECK_EQ((cut->terms[entry_index].expr_coeffs[0] > 0), is_positive); - // We can only merge the term if term.coeff + old_coeff do not overflow and - // if t * new_coeff do not overflow. - // - // If we cannot merge the term, we will keep them separate. The produced cut - // will be less strong, but can still be used. - const IntegerValue new_coeff = - CapAddI(cut->terms[entry_index].coeff, term.coeff); - if (AtMinOrMaxInt64I(new_coeff) || ProdOverflow(t, new_coeff)) { - // If we cannot merge the term, we keep them separate. + // Finally add the terms we couldn't merge. + for (const CutTerm& term : new_terms) { + if (term.coeff == 0) continue; cut->terms.push_back(term); - } else { - ++num_merges_; - cut->terms[entry_index].coeff = new_coeff; } + + return num_merges; } // TODO(user): Divide by gcd first to avoid possible overflow in the @@ -788,40 +775,38 @@ bool IntegerRoundingCutHelper::ComputeCut( // This should be better except it can mess up the norm and the divisors. cut_ = base_ct; if (options.use_ib_before_heuristic && ib_processor != nullptr) { - ib_processor->BaseCutBuilder()->ClearNumMerges(); - const int old_size = static_cast(cut_.terms.size()); - bool abort = true; - for (int i = 0; i < old_size; ++i) { - if (cut_.terms[i].bound_diff <= 1) continue; - if (!cut_.terms[i].HasRelevantLpValue()) continue; - - if (options.prefer_positive_ib && cut_.terms[i].coeff < 0) { + std::vector* new_bool_terms = + ib_processor->ClearedMutableTempTerms(); + for (CutTerm& term : cut_.terms) { + if (term.bound_diff <= 1) continue; + if (!term.HasRelevantLpValue()) continue; + + if (options.prefer_positive_ib && term.coeff < 0) { // We complement the term before trying the implied bound. - cut_.terms[i].Complement(&cut_.rhs); + term.Complement(&cut_.rhs); if (ib_processor->TryToExpandWithLowerImpliedbound( - IntegerValue(1), i, - /*complement=*/true, &cut_, ib_processor->BaseCutBuilder())) { + IntegerValue(1), + /*complement=*/true, &term, &cut_.rhs, new_bool_terms)) { ++total_num_initial_ibs_; - abort = false; continue; } - cut_.terms[i].Complement(&cut_.rhs); + term.Complement(&cut_.rhs); } if (ib_processor->TryToExpandWithLowerImpliedbound( - IntegerValue(1), i, - /*complement=*/true, &cut_, ib_processor->BaseCutBuilder())) { - abort = false; + IntegerValue(1), + /*complement=*/true, &term, &cut_.rhs, new_bool_terms)) { ++total_num_initial_ibs_; } } - total_num_initial_merges_ += - ib_processor->BaseCutBuilder()->NumMergesSinceLastClear(); // TODO(user): We assume that this is called with and without the option // use_ib_before_heuristic, so that we can abort if no IB has been applied // since then we will redo the computation. This is not really clean. - if (abort) return false; + if (new_bool_terms->empty()) return false; + total_num_initial_merges_ += + ib_processor->MutableCutBuilder()->AddOrMergeBooleanTerms( + absl::MakeSpan(*new_bool_terms), IntegerValue(1), &cut_); } // Our heuristic will try to generate a few different cuts, and we will keep @@ -841,7 +826,7 @@ bool IntegerRoundingCutHelper::ComputeCut( // // TODO(user): If the rhs is small and close to zero, we might want to // consider different way of complementing the variables. - cut_.Canonicalize(); + cut_.SortRelevantEntries(); const IntegerValue remainder_threshold( std::max(IntegerValue(1), cut_.max_magnitude / 1000)); if (cut_.rhs >= 0 && cut_.rhs < remainder_threshold.value()) { @@ -996,11 +981,11 @@ bool IntegerRoundingCutHelper::ComputeCut( // This should lead to stronger cuts even if the norms might be worse. num_ib_used_ = 0; if (ib_processor != nullptr) { - const auto [num_lb, num_ub] = ib_processor->PostprocessWithImpliedBound( - f, factor_t, &cut_, &cut_builder_); + const auto [num_lb, num_ub, num_merges] = + ib_processor->PostprocessWithImpliedBound(f, factor_t, &cut_); total_num_pos_lifts_ += num_lb; total_num_neg_lifts_ += num_ub; - total_num_merges_ += cut_builder_.NumMergesSinceLastClear(); + total_num_merges_ += num_merges; num_ib_used_ = num_lb + num_ub; } @@ -1296,21 +1281,23 @@ bool CoverCutHelper::TrySimpleKnapsack(const CutData& input_ct, // Tricky: This only work because the cut absl128 rhs is not changed by these // operations. if (ib_processor != nullptr) { - ib_processor->BaseCutBuilder()->ClearNumMerges(); - const int old_size = static_cast(cut_.terms.size()); - for (int i = 0; i < old_size; ++i) { + std::vector* new_bool_terms = + ib_processor->ClearedMutableTempTerms(); + for (CutTerm& term : cut_.terms) { // We only look at non-Boolean with an lp value not close to the upper // bound. - const CutTerm& term = cut_.terms[i]; if (term.bound_diff <= 1) continue; if (term.lp_value + 1e-4 > AsDouble(term.bound_diff)) continue; if (ib_processor->TryToExpandWithLowerImpliedbound( - IntegerValue(1), i, - /*complement=*/false, &cut_, ib_processor->BaseCutBuilder())) { + IntegerValue(1), + /*complement=*/false, &term, &cut_.rhs, new_bool_terms)) { ++cover_stats_.num_initial_ibs; } } + + ib_processor->MutableCutBuilder()->AddOrMergeBooleanTerms( + absl::MakeSpan(*new_bool_terms), IntegerValue(1), &cut_); } bool has_relevant_int = false; @@ -1386,11 +1373,11 @@ bool CoverCutHelper::TrySimpleKnapsack(const CutData& input_ct, } if (ib_processor != nullptr) { - const auto [num_lb, num_ub] = ib_processor->PostprocessWithImpliedBound( - f, /*factor_t=*/1, &cut_, &cut_builder_); + const auto [num_lb, num_ub, num_merges] = + ib_processor->PostprocessWithImpliedBound(f, /*factor_t=*/1, &cut_); cover_stats_.num_lb_ibs += num_lb; cover_stats_.num_ub_ibs += num_ub; - cover_stats_.num_merges += cut_builder_.NumMergesSinceLastClear(); + cover_stats_.num_merges += num_merges; } cover_stats_.num_bumps += ApplyWithPotentialBump(f, best_coeff, &cut_); @@ -1466,11 +1453,11 @@ bool CoverCutHelper::TrySingleNodeFlow(const CutData& input_ct, min_magnitude); if (ib_processor != nullptr) { - const auto [num_lb, num_ub] = ib_processor->PostprocessWithImpliedBound( - f, /*factor_t=*/1, &cut_, &cut_builder_); + const auto [num_lb, num_ub, num_merges] = + ib_processor->PostprocessWithImpliedBound(f, /*factor_t=*/1, &cut_); flow_stats_.num_lb_ibs += num_lb; flow_stats_.num_ub_ibs += num_ub; - flow_stats_.num_merges += cut_builder_.NumMergesSinceLastClear(); + flow_stats_.num_merges += num_merges; } // Lifting. @@ -1525,16 +1512,19 @@ bool CoverCutHelper::TryWithLetchfordSouliLifting( // // TODO(user): Merge Boolean terms that are complement of each other. if (ib_processor != nullptr) { - ib_processor->BaseCutBuilder()->ClearNumMerges(); - const int old_size = static_cast(cut_.terms.size()); - for (int i = 0; i < old_size; ++i) { - if (cut_.terms[i].bound_diff <= 1) continue; + std::vector* new_bool_terms = + ib_processor->ClearedMutableTempTerms(); + for (CutTerm& term : cut_.terms) { + if (term.bound_diff <= 1) continue; if (ib_processor->TryToExpandWithLowerImpliedbound( - IntegerValue(1), i, - /*complement=*/false, &cut_, ib_processor->BaseCutBuilder())) { + IntegerValue(1), + /*complement=*/false, &term, &cut_.rhs, new_bool_terms)) { ++ls_stats_.num_initial_ibs; } } + + ib_processor->MutableCutBuilder()->AddOrMergeBooleanTerms( + absl::MakeSpan(*new_bool_terms), IntegerValue(1), &cut_); } // TODO(user): we currently only deal with Boolean in the cover. Fix. @@ -2191,9 +2181,9 @@ bool ImpliedBoundsProcessor::DecomposeWithImpliedUpperBound( return true; } -std::pair ImpliedBoundsProcessor::PostprocessWithImpliedBound( +std::tuple ImpliedBoundsProcessor::PostprocessWithImpliedBound( const std::function& f, IntegerValue factor_t, - CutData* cut, CutDataBuilder* builder) { + CutData* cut) { int num_applied_lb = 0; int num_applied_ub = 0; @@ -2201,10 +2191,9 @@ std::pair ImpliedBoundsProcessor::PostprocessWithImpliedBound( CutTerm slack_term; CutTerm ub_bool_term; CutTerm ub_slack_term; - builder->ClearIndices(); - const int initial_size = cut->terms.size(); - for (int i = 0; i < initial_size; ++i) { - CutTerm& term = cut->terms[i]; + + tmp_terms_.clear(); + for (CutTerm& term : cut->terms) { if (term.bound_diff <= 1) continue; if (!term.IsSimple()) continue; @@ -2254,30 +2243,31 @@ std::pair ImpliedBoundsProcessor::PostprocessWithImpliedBound( // loose more, so we prefer to be a bit defensive. if (score > base_score + 1e-2) { ++num_applied_ub; - term = ub_slack_term; // Override first before push_back() ! - builder->AddOrMergeTerm(ub_bool_term, factor_t, cut); + term = ub_slack_term; + tmp_terms_.push_back(ub_bool_term); continue; } } if (expand) { ++num_applied_lb; - term = slack_term; // Override first before push_back() ! - builder->AddOrMergeTerm(bool_term, factor_t, cut); + term = slack_term; + tmp_terms_.push_back(bool_term); } } - return {num_applied_lb, num_applied_ub}; + + const int num_merges = cut_builder_.AddOrMergeBooleanTerms( + absl::MakeSpan(tmp_terms_), factor_t, cut); + + return {num_applied_lb, num_applied_ub, num_merges}; } -// Important: The cut_builder_ must have been reset. bool ImpliedBoundsProcessor::TryToExpandWithLowerImpliedbound( - IntegerValue factor_t, int i, bool complement, CutData* cut, - CutDataBuilder* builder) { - CutTerm& term = cut->terms[i]; - + IntegerValue factor_t, bool complement, CutTerm* term, absl::int128* rhs, + std::vector* new_bool_terms) { CutTerm bool_term; CutTerm slack_term; - if (!DecomposeWithImpliedLowerBound(term, factor_t, bool_term, slack_term)) { + if (!DecomposeWithImpliedLowerBound(*term, factor_t, bool_term, slack_term)) { return false; } @@ -2286,26 +2276,22 @@ bool ImpliedBoundsProcessor::TryToExpandWithLowerImpliedbound( // It is always good to complement such variable. // // Note that here we do more and just complement anything closer to UB. - // - // TODO(user): Because of merges, we might have entry with a coefficient of - // zero than are not useful. Remove them. if (complement) { if (bool_term.lp_value > 0.5) { - bool_term.Complement(&cut->rhs); + bool_term.Complement(rhs); } if (slack_term.lp_value > 0.5 * AsDouble(slack_term.bound_diff)) { - slack_term.Complement(&cut->rhs); + slack_term.Complement(rhs); } } - term = slack_term; - builder->AddOrMergeTerm(bool_term, factor_t, cut); + *term = slack_term; + new_bool_terms->push_back(bool_term); return true; } bool ImpliedBoundsProcessor::CacheDataForCut(IntegerVariable first_slack, CutData* cut) { - base_cut_builder_.ClearIndices(); cached_data_.clear(); const int size = cut->terms.size(); diff --git a/ortools/sat/cuts.h b/ortools/sat/cuts.h index 7e975cc6d4..245a440c87 100644 --- a/ortools/sat/cuts.h +++ b/ortools/sat/cuts.h @@ -134,6 +134,10 @@ struct CutData { double ComputeViolation() const; double ComputeEfficacy() const; + // This sorts terms by decreasing lp values and fills both + // num_relevant_entries and max_magnitude. + void SortRelevantEntries(); + std::string DebugString() const; // Note that we use a 128 bit rhs so we can freely complement variable without @@ -141,8 +145,7 @@ struct CutData { absl::int128 rhs; std::vector terms; - // This sorts terms and fill both num_relevant_entries and max_magnitude. - void Canonicalize(); + // Only filled after SortRelevantEntries(). IntegerValue max_magnitude; int num_relevant_entries; }; @@ -150,24 +153,21 @@ struct CutData { // Stores temporaries used to build or manipulate a CutData. class CutDataBuilder { public: + // Returns false if we encounter an integer overflow. + bool ConvertToLinearConstraint(const CutData& cut, LinearConstraint* output); + // These function allow to merges entries corresponding to the same variable // and complementation. That is (X - lb) and (ub - X) are NOT merged and kept // as separate terms. Note that we currently only merge Booleans since this // is the only case we need. - void ClearIndices(); - void AddOrMergeTerm(const CutTerm& term, IntegerValue t, CutData* cut); - - void ClearNumMerges() { num_merges_ = 0; } - int NumMergesSinceLastClear() const { return num_merges_; } - - // Returns false if we encounter an integer overflow. - bool ConvertToLinearConstraint(const CutData& cut, LinearConstraint* output); + // + // Return num_merges. + int AddOrMergeBooleanTerms(absl::Span terms, IntegerValue t, + CutData* cut); private: - void RegisterAllBooleanTerms(const CutData& cut); + bool MergeIfPossible(IntegerValue t, CutTerm& to_add, CutTerm& target); - int num_merges_ = 0; - bool constraint_is_indexed_ = false; absl::flat_hash_map bool_index_; absl::flat_hash_map secondary_bool_index_; absl::btree_map tmp_map_; @@ -219,27 +219,31 @@ class ImpliedBoundsProcessor { // We are about to apply the super-additive function f() to the CutData. Use // implied bound information to eventually substitute and make the cut - // stronger. Returns the number of {lb_ib, ub_ib} applied. + // stronger. Returns the number of {lb_ib, ub_ib, merges} applied. // // This should lead to stronger cuts even if the norms migth be worse. - std::pair PostprocessWithImpliedBound( + std::tuple PostprocessWithImpliedBound( const std::function& f, IntegerValue factor_t, - CutData* cut, CutDataBuilder* builder); + CutData* cut); // Precomputes quantities used by all cut generation. // This allows to do that once rather than 6 times. // Return false if there are no exploitable implied bounds. bool CacheDataForCut(IntegerVariable first_slack, CutData* cut); - // All our cut code use the same base cut (modulo complement), so we reuse the - // hash-map of where boolean are in the cut. Note that even if we add new - // entry that are no longer there for another cut algo, we can still reuse the - // same hash-map. - CutDataBuilder* BaseCutBuilder() { return &base_cut_builder_; } + bool TryToExpandWithLowerImpliedbound(IntegerValue factor_t, bool complement, + CutTerm* term, absl::int128* rhs, + std::vector* new_bool_terms); - bool TryToExpandWithLowerImpliedbound(IntegerValue factor_t, int i, - bool complement, CutData* cut, - CutDataBuilder* builder); + // This can be used to share the hash-map memory. + CutDataBuilder* MutableCutBuilder() { return &cut_builder_; } + + // This can be used as a temporary storage for + // TryToExpandWithLowerImpliedbound(). + std::vector* ClearedMutableTempTerms() { + tmp_terms_.clear(); + return &tmp_terms_; + } // Add a new variable that could be used in the new cuts. // Note that the cache must be computed to take this into account. @@ -283,7 +287,8 @@ class ImpliedBoundsProcessor { mutable absl::flat_hash_map cache_; // Temporary data used by CacheDataForCut(). - CutDataBuilder base_cut_builder_; + std::vector tmp_terms_; + CutDataBuilder cut_builder_; std::vector cached_data_; TopNCuts ib_cut_pool_ = TopNCuts(50); @@ -431,7 +436,6 @@ class IntegerRoundingCutHelper { std::vector best_rs_; int64_t num_ib_used_ = 0; - CutDataBuilder cut_builder_; CutData cut_; std::vector> adjusted_coeffs_; @@ -531,7 +535,6 @@ class CoverCutHelper { // Here to reuse memory, cut_ is both the input and the output. CutData cut_; CutData temp_cut_; - CutDataBuilder cut_builder_; // Hack to not sort twice. bool has_bool_base_ct_ = false; diff --git a/ortools/sat/cuts_test.cc b/ortools/sat/cuts_test.cc index 130db4669f..d8d78d153c 100644 --- a/ortools/sat/cuts_test.cc +++ b/ortools/sat/cuts_test.cc @@ -885,7 +885,6 @@ TEST(ImpliedBoundsProcessorTest, PositiveBasicTest) { // Lets look at the term X. CutData data; - CutDataBuilder builder; CutTerm X; X.coeff = 1; @@ -898,9 +897,14 @@ TEST(ImpliedBoundsProcessorTest, PositiveBasicTest) { data.terms.push_back(X); processor.CacheDataForCut(IntegerVariable(100), &data); - EXPECT_TRUE(processor.TryToExpandWithLowerImpliedbound(IntegerValue(1), 0, - /*complement=*/false, - &data, &builder)); + const IntegerValue t(1); + std::vector new_terms; + EXPECT_TRUE(processor.TryToExpandWithLowerImpliedbound( + t, /*complement=*/false, &data.terms[0], &data.rhs, &new_terms)); + + EXPECT_EQ(0, processor.MutableCutBuilder()->AddOrMergeBooleanTerms( + absl::MakeSpan(new_terms), t, &data)); + EXPECT_EQ(data.terms.size(), 2); EXPECT_THAT(data.terms[0].DebugString(), ::testing::StartsWith("coeff=1 lp=0 range=7")); @@ -937,7 +941,6 @@ TEST(ImpliedBoundsProcessorTest, NegativeBasicTest) { // Lets look at the term X. CutData data; - CutDataBuilder builder; CutTerm X; X.coeff = 1; @@ -950,9 +953,14 @@ TEST(ImpliedBoundsProcessorTest, NegativeBasicTest) { data.terms.push_back(X); processor.CacheDataForCut(IntegerVariable(100), &data); - EXPECT_TRUE(processor.TryToExpandWithLowerImpliedbound(IntegerValue(1), 0, - /*complement=*/false, - &data, &builder)); + + const IntegerValue t(1); + std::vector new_terms; + EXPECT_TRUE(processor.TryToExpandWithLowerImpliedbound( + t, /*complement=*/false, &data.terms[0], &data.rhs, &new_terms)); + EXPECT_EQ(0, processor.MutableCutBuilder()->AddOrMergeBooleanTerms( + absl::MakeSpan(new_terms), t, &data)); + EXPECT_EQ(data.terms.size(), 2); EXPECT_THAT(data.terms[0].DebugString(), ::testing::StartsWith("coeff=1 lp=0 range=7")); diff --git a/ortools/sat/diffn_util.cc b/ortools/sat/diffn_util.cc index ba8875ff6a..2c7d93d365 100644 --- a/ortools/sat/diffn_util.cc +++ b/ortools/sat/diffn_util.cc @@ -51,7 +51,7 @@ bool Rectangle::IsDisjoint(const Rectangle& other) const { other.y_min >= y_max; } -absl::InlinedVector Rectangle::SetDifference( +absl::InlinedVector Rectangle::RegionDifference( const Rectangle& other) const { const Rectangle intersect = Intersect(other); if (intersect.SizeX() == 0) { @@ -155,8 +155,8 @@ bool ReportEnergyConflict(Rectangle bounding_box, absl::Span boxes, return x->ReportConflict(); } -bool BoxesAreInEnergyConflict(const std::vector& rectangles, - const std::vector& energies, +bool BoxesAreInEnergyConflict(absl::Span rectangles, + absl::Span energies, absl::Span boxes, Rectangle* conflict) { // First consider all relevant intervals along the x axis. @@ -1542,18 +1542,17 @@ std::string RenderDot(std::optional bb, std::stringstream ss; ss << "digraph {\n"; ss << " graph [ bgcolor=lightgray ]\n"; - ss << " node [style=filled]\n"; + ss << " node [style=filled shape=box]\n"; if (bb.has_value()) { ss << " bb [fillcolor=\"grey\" pos=\"" << 2 * bb->x_min + bb->SizeX() - << "," << 2 * bb->y_min + bb->SizeY() - << "!\" shape=box width=" << 2 * bb->SizeX() + << "," << 2 * bb->y_min + bb->SizeY() << "!\" width=" << 2 * bb->SizeX() << " height=" << 2 * bb->SizeY() << "]\n"; } for (int i = 0; i < solution.size(); ++i) { ss << " " << i << " [fillcolor=\"" << colors[i % colors.size()] << "\" pos=\"" << 2 * solution[i].x_min + solution[i].SizeX() << "," << 2 * solution[i].y_min + solution[i].SizeY() - << "!\" shape=box width=" << 2 * solution[i].SizeX() + << "!\" width=" << 2 * solution[i].SizeX() << " height=" << 2 * solution[i].SizeY() << "]\n"; } ss << extra_dot_payload; @@ -1563,27 +1562,30 @@ std::string RenderDot(std::optional bb, std::vector FindEmptySpaces( const Rectangle& bounding_box, std::vector ocupied_rectangles) { - std::vector empty_spaces = {bounding_box}; - std::vector new_empty_spaces; // Sorting is not necessary for correctness but makes it faster. std::sort(ocupied_rectangles.begin(), ocupied_rectangles.end(), [](const Rectangle& a, const Rectangle& b) { return std::tuple(a.x_min, -a.x_max, a.y_min) < std::tuple(b.x_min, -b.x_max, b.y_min); }); - for (const Rectangle& ocupied_rectangle : ocupied_rectangles) { - new_empty_spaces.clear(); - for (const auto& empty_space : empty_spaces) { - for (Rectangle& r : empty_space.SetDifference(ocupied_rectangle)) { - new_empty_spaces.push_back(std::move(r)); - } - } - empty_spaces.swap(new_empty_spaces); - if (empty_spaces.empty()) { - break; + return PavedRegionDifference({bounding_box}, ocupied_rectangles); +} + +std::vector PavedRegionDifference( + std::vector original_region, + absl::Span area_to_remove) { + std::vector new_area_to_cover; + for (const Rectangle& rectangle : area_to_remove) { + new_area_to_cover.clear(); + for (const Rectangle& r : original_region) { + const auto& new_rectangles = r.RegionDifference(rectangle); + new_area_to_cover.insert(new_area_to_cover.end(), new_rectangles.begin(), + new_rectangles.end()); } + original_region.swap(new_area_to_cover); + if (original_region.empty()) break; } - return empty_spaces; + return original_region; } } // namespace sat diff --git a/ortools/sat/diffn_util.h b/ortools/sat/diffn_util.h index 3aa6933807..c2688c0d8c 100644 --- a/ortools/sat/diffn_util.h +++ b/ortools/sat/diffn_util.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -63,7 +64,8 @@ struct Rectangle { // Returns `this \ other` as a set of disjoint rectangles of non-empty area. // The resulting vector will have at most four elements. - absl::InlinedVector SetDifference(const Rectangle& other) const; + absl::InlinedVector RegionDifference( + const Rectangle& other) const; template friend void AbslStringify(Sink& sink, const Rectangle& r) { @@ -125,8 +127,8 @@ std::vector> GetOverlappingRectangleComponents( // Visible for testing. The algo is in O(n^4) so shouldn't be used directly. // Returns true if there exist a bounding box with too much energy. -bool BoxesAreInEnergyConflict(const std::vector& rectangles, - const std::vector& energies, +bool BoxesAreInEnergyConflict(absl::Span rectangles, + absl::Span energies, absl::Span boxes, Rectangle* conflict = nullptr); @@ -615,6 +617,20 @@ std::string RenderDot(std::optional bb, std::vector FindEmptySpaces( const Rectangle& bounding_box, std::vector ocupied_rectangles); +// Given two regions, each one of them defined by a vector of non-overlapping +// rectangles paving them, returns a vector of non-overlapping rectangles that +// paves the points that were part of the first region but not of the second. +// This can also be seen as the set difference of the points of the regions. +std::vector PavedRegionDifference( + std::vector original_region, + absl::Span area_to_remove); + +// The two regions must be defined by non-overlapping rectangles. +inline bool RegionIncludesOther(absl::Span region, + absl::Span other) { + return PavedRegionDifference({other.begin(), other.end()}, region).empty(); +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/diffn_util_test.cc b/ortools/sat/diffn_util_test.cc index 508ac031be..b0d9a11279 100644 --- a/ortools/sat/diffn_util_test.cc +++ b/ortools/sat/diffn_util_test.cc @@ -483,7 +483,7 @@ TEST(RectangleTest, BasicTest) { Rectangle({.x_min = 1, .x_max = 2, .y_min = 1, .y_max = 2})); } -TEST(RectangleTest, RandomSetDifferenceTest) { +TEST(RectangleTest, RandomRegionDifferenceTest) { absl::BitGen random; const int64_t size = 20; constexpr int num_runs = 400; @@ -497,7 +497,7 @@ TEST(RectangleTest, RandomSetDifferenceTest) { ret[i].y_max = ret[i].y_min + IntegerValue(absl::Uniform(random, 1, size - 1)); } - auto set_diff = ret[0].SetDifference(ret[1]); + auto set_diff = ret[0].RegionDifference(ret[1]); EXPECT_EQ(set_diff.empty(), ret[0].Intersect(ret[1]) == ret[0]); IntegerValue diff_area = 0; for (int i = 0; i < set_diff.size(); ++i) { @@ -514,6 +514,34 @@ TEST(RectangleTest, RandomSetDifferenceTest) { } } +TEST(RectangleTest, RandomPavedRegionDifferenceTest) { + absl::BitGen random; + constexpr int num_runs = 100; + for (int k = 0; k < num_runs; k++) { + const std::vector set1 = + GenerateNonConflictingRectanglesWithPacking({100, 100}, 60, random); + const std::vector set2 = + GenerateNonConflictingRectanglesWithPacking({100, 100}, 60, random); + + const std::vector diff = PavedRegionDifference(set1, set2); + for (int i = 0; i < diff.size(); ++i) { + for (int j = i + 1; j < diff.size(); ++j) { + EXPECT_TRUE(diff[i].IsDisjoint(diff[j])); + } + } + for (const auto& r_diff : diff) { + for (const auto& r2 : set2) { + EXPECT_TRUE(r_diff.IsDisjoint(r2)); + } + IntegerValue area = 0; + for (const auto& r1 : set1) { + area += r_diff.IntersectArea(r1); + } + EXPECT_EQ(area, r_diff.Area()); + } + } +} + TEST(GetMinimumOverlapTest, BasicTest) { RectangleInRange range_ret = { .bounding_area = {.x_min = 0, .x_max = 15, .y_min = 0, .y_max = 15}, diff --git a/ortools/sat/integer.cc b/ortools/sat/integer.cc index 85fe9849af..5803e39202 100644 --- a/ortools/sat/integer.cc +++ b/ortools/sat/integer.cc @@ -49,7 +49,7 @@ namespace operations_research { namespace sat { std::vector NegationOf( - const std::vector& vars) { + absl::Span vars) { std::vector result(vars.size()); for (int i = 0; i < vars.size(); ++i) { result[i] = NegationOf(vars[i]); diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index 3923dcb89d..8aa355bddb 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -207,8 +207,7 @@ inline std::string IntegerTermDebugString(IntegerVariable var, } // Returns the vector of the negated variables. -std::vector NegationOf( - const std::vector& vars); +std::vector NegationOf(absl::Span vars); // The integer equivalent of a literal. // It represents an IntegerVariable and an upper/lower bound on it. diff --git a/ortools/sat/linear_constraint_test.cc b/ortools/sat/linear_constraint_test.cc index bc87f50ed3..cad9ad4d9e 100644 --- a/ortools/sat/linear_constraint_test.cc +++ b/ortools/sat/linear_constraint_test.cc @@ -44,7 +44,8 @@ TEST(ComputeActivityTest, BasicBehavior) { util_intops::StrongVector values = {0.5, 0.0, 1.4, 0.0, -2.1, 0.0}; - EXPECT_NEAR(ComputeActivity(ct.Build(), values), 1 * 0.5 - 2 * 1.4 - 3 * 2.1, 1e-6); + EXPECT_NEAR(ComputeActivity(ct.Build(), values), 1 * 0.5 - 2 * 1.4 - 3 * 2.1, + 1e-6); } TEST(ComputeActivityTest, EmptyConstraint) { diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index e69f6cbc25..a1d70915b6 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -219,10 +219,11 @@ void ScatteredIntegerVector::ConvertToCutData( CutData* result) { result->terms.clear(); result->rhs = rhs; + absl::Span dense_vector = dense_vector_; if (is_sparse_) { std::sort(non_zeros_.begin(), non_zeros_.end()); for (const glop::ColIndex col : non_zeros_) { - const IntegerValue coeff = dense_vector_[col]; + const IntegerValue coeff = dense_vector[col.value()]; if (coeff == 0) continue; const IntegerVariable var = integer_variables[col.value()]; CHECK(result->AppendOneTerm(var, coeff, lp_solution[col.value()], @@ -230,12 +231,11 @@ void ScatteredIntegerVector::ConvertToCutData( integer_trail->LevelZeroUpperBound(var))); } } else { - const int size = dense_vector_.size(); - for (glop::ColIndex col(0); col < size; ++col) { - const IntegerValue coeff = dense_vector_[col]; + for (int col(0); col < dense_vector.size(); ++col) { + const IntegerValue coeff = dense_vector[col]; if (coeff == 0) continue; - const IntegerVariable var = integer_variables[col.value()]; - CHECK(result->AppendOneTerm(var, coeff, lp_solution[col.value()], + const IntegerVariable var = integer_variables[col]; + CHECK(result->AppendOneTerm(var, coeff, lp_solution[col], integer_trail->LevelZeroLowerBound(var), integer_trail->LevelZeroUpperBound(var))); } @@ -1466,8 +1466,6 @@ void LinearProgrammingConstraint::AddMirCuts() { const int num_rows = lp_data_.num_constraints().value(); std::vector> base_rows; util_intops::StrongVector row_weights(num_rows, 0.0); - util_intops::StrongVector at_ub(num_rows, false); - util_intops::StrongVector at_lb(num_rows, false); for (RowIndex row(0); row < num_rows; ++row) { // We only consider tight rows. // We use both the status and activity to have as much options as possible. @@ -1480,13 +1478,11 @@ void LinearProgrammingConstraint::AddMirCuts() { if (activity > lp_data_.constraint_upper_bounds()[row] - 1e-4 || status == glop::ConstraintStatus::AT_UPPER_BOUND || status == glop::ConstraintStatus::FIXED_VALUE) { - at_ub[row] = true; base_rows.push_back({row, IntegerValue(1)}); } if (activity < lp_data_.constraint_lower_bounds()[row] + 1e-4 || status == glop::ConstraintStatus::AT_LOWER_BOUND || status == glop::ConstraintStatus::FIXED_VALUE) { - at_lb[row] = true; base_rows.push_back({row, IntegerValue(-1)}); } @@ -1604,16 +1600,20 @@ void LinearProgrammingConstraint::AddMirCuts() { if (used_rows[row]) continue; used_rows[row] = true; - // We only consider "tight" rows, as defined above. + // Note that we consider all rows here, not only tight one. This makes a + // big difference on problem like blp-ic98.pb.gz. We can also use the + // integrality of the slack when adding a non-tight row to derive good + // cuts. Also, non-tight row will have a low weight, so they should + // still be chosen after the tight-one in most situation. bool add_row = false; - if (at_ub[row]) { + if (!integer_lp_[row].ub_is_trivial) { if (entry.coefficient() > 0.0) { if (dense_cut[var_to_eliminate] < 0) add_row = true; } else { if (dense_cut[var_to_eliminate] > 0) add_row = true; } } - if (at_lb[row]) { + if (!integer_lp_[row].lb_is_trivial) { if (entry.coefficient() > 0.0) { if (dense_cut[var_to_eliminate] > 0) add_row = true; } else { @@ -1933,7 +1933,7 @@ bool LinearProgrammingConstraint::ScalingCanOverflow( std::vector> LinearProgrammingConstraint::ScaleLpMultiplier( bool take_objective_into_account, bool ignore_trivial_constraints, - const std::vector>& lp_multipliers, + absl::Span> lp_multipliers, IntegerValue* scaling, int64_t overflow_cap) const { *scaling = 0; diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index 184753244f..2757dc3f2e 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -330,7 +330,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, // will still be exact as it will work for any set of multiplier. std::vector> ScaleLpMultiplier( bool take_objective_into_account, bool ignore_trivial_constraints, - const std::vector>& lp_multipliers, + absl::Span> lp_multipliers, IntegerValue* scaling, int64_t overflow_cap = std::numeric_limits::max()) const; diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index c4f864cf17..8bd2aae00b 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -13,6 +13,7 @@ # limitations under the License. import itertools +import time from absl.testing import absltest import pandas as pd @@ -97,42 +98,55 @@ def bool_var_values(self): class TimeRecorder(cp_model.CpSolverSolutionCallback): - def __init__(self, default_time: float) -> None: + def __init__(self) -> None: super().__init__() - self.__last_time = default_time + self.__last_time: float = 0.0 def on_solution_callback(self) -> None: - self.__last_time = self.wall_time + self.__last_time = time.time() @property - def last_time(self): + def last_time(self) -> float: return self.__last_time class LogToString: """Record log in a string.""" - def __init__(self): + def __init__(self) -> None: self.__log = "" - def new_message(self, message: str): + def new_message(self, message: str) -> None: self.__log += message self.__log += "\n" @property - def log(self): + def log(self) -> str: return self.__log class BestBoundCallback: - def __init__(self): + def __init__(self) -> None: self.best_bound: float = 0.0 - def new_best_bound(self, bb: float): + def new_best_bound(self, bb: float) -> None: self.best_bound = bb +class BestBoundTimeCallback: + + def __init__(self) -> None: + self.__last_time: float = 0.0 + + def new_best_bound(self, unused_bb: float): + self.__last_time = time.time() + + @property + def last_time(self) -> float: + return self.__last_time + + class CpModelTest(absltest.TestCase): def testCreateIntegerVariable(self): @@ -1769,9 +1783,10 @@ def rotate_symbols(symbols: list[int], turns: int) -> list[int]: solver.parameters.cp_model_presolve = False solver.parameters.symmetry_level = 0 - callback = TimeRecorder(solver.parameters.max_time_in_seconds) - solver.Solve(model, callback) - self.assertLess(solver.wall_time, callback.last_time + 5.0) + solution_callback = TimeRecorder() + status = solver.Solve(model, solution_callback) + if status == cp_model.OPTIMAL: + self.assertLess(time.time(), solution_callback.last_time + 5.0) def testIssue4376MinimizeModel(self): print("testIssue4376MinimizeModel") @@ -1868,9 +1883,15 @@ def testIssue4376MinimizeModel(self): solver.parameters.num_workers = 8 solver.parameters.max_time_in_seconds = 50 solver.parameters.log_search_progress = True - callback = TimeRecorder(solver.parameters.max_time_in_seconds) - solver.Solve(model, callback) - self.assertLess(solver.wall_time, callback.last_time + 5.0) + solution_callback = TimeRecorder() + best_bound_callback = BestBoundTimeCallback() + solver.best_bound_callback = best_bound_callback.new_best_bound + status = solver.Solve(model, solution_callback) + if status == cp_model.OPTIMAL: + self.assertLess( + time.time(), + max(best_bound_callback.last_time, solution_callback.last_time) + 5.0, + ) if __name__ == "__main__":