diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index a8477d00e0..411bfdbf3c 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -1887,6 +1887,21 @@ cc_library( ], ) +cc_library( + name = "timetable_horizontal_edgefinding", + srcs = ["timetable_horizontal_edgefinding.cc"], + hdrs = ["timetable_horizontal_edgefinding.h"], + deps = [ + ":integer", + ":intervals", + ":model", + ":sat_base", + "//ortools/base:iterator_adaptors", + "//ortools/util:strong_integers", + "@com_google_absl//absl/log:check", + ], +) + cc_library( name = "cumulative", srcs = ["cumulative.cc"], @@ -1906,6 +1921,7 @@ cc_library( ":sat_solver", ":timetable", ":timetable_edgefinding", + ":timetable_horizontal_edgefinding", "//ortools/base", "//ortools/util:strong_integers", "@com_google_absl//absl/log:check", diff --git a/ortools/sat/cumulative.cc b/ortools/sat/cumulative.cc index ed1265277c..43dc5a590a 100644 --- a/ortools/sat/cumulative.cc +++ b/ortools/sat/cumulative.cc @@ -34,6 +34,7 @@ #include "ortools/sat/sat_solver.h" #include "ortools/sat/timetable.h" #include "ortools/sat/timetable_edgefinding.h" +#include "ortools/sat/timetable_horizontal_edgefinding.h" #include "ortools/util/strong_integers.h" namespace operations_research { @@ -244,6 +245,17 @@ std::function Cumulative( time_tabling->RegisterWith(watcher); model->TakeOwnership(time_tabling); + // Propagator responsible for applying the Horizontal Elastic Overload + // Checking rule. The propogator simply takes care of detecting conflicts + // without actually modifying the domain of the variables. + if (parameters.use_horizontal_overload_checking_in_cumulative()) { + HorizontallyElasticOverloadChecker* heoc = + new HorizontallyElasticOverloadChecker(capacity, helper, + demands_helper, model); + heoc->RegisterWith(watcher); + model->TakeOwnership(heoc); + } + // Propagator responsible for applying the Overload Checking filtering rule. // It increases the minimum of the capacity variable. if (parameters.use_overload_checker_in_cumulative()) { diff --git a/ortools/sat/cumulative_test.cc b/ortools/sat/cumulative_test.cc index e14f2c63ce..4e7e650d36 100644 --- a/ortools/sat/cumulative_test.cc +++ b/ortools/sat/cumulative_test.cc @@ -353,9 +353,10 @@ TEST(CumulativeTest, RegressionTest2) { // ======================================================================== // Param1: Number of tasks. -// Param3: Enable overload checking. -// Param4: Enable timetable edge finding. -typedef ::testing::tuple CumulativeTestParams; +// Param2: Enable overload checking. +// Param3: Enable timetable edge finding. +// Param4: Enable horizontal elastic overload checking. +typedef ::testing::tuple CumulativeTestParams; class RandomCumulativeTest : public ::testing::TestWithParam { @@ -369,6 +370,8 @@ class RandomCumulativeTest ::testing::get<1>(GetParam())); parameters.set_use_timetable_edge_finding_in_cumulative( ::testing::get<2>(GetParam())); + parameters.set_use_horizontal_overload_checking_in_cumulative( + ::testing::get<3>(GetParam())); return parameters; } }; @@ -409,12 +412,14 @@ TEST_P(SlowRandomCumulativeTest, FindAllOptionalTasks) { INSTANTIATE_TEST_SUITE_P( All, FastRandomCumulativeTest, ::testing::Combine(::testing::Range(3, DEBUG_MODE ? 4 : 6), - ::testing::Bool(), ::testing::Bool())); + ::testing::Bool(), ::testing::Bool(), + ::testing::Bool())); INSTANTIATE_TEST_SUITE_P( All, SlowRandomCumulativeTest, ::testing::Combine(::testing::Range(3, DEBUG_MODE ? 4 : 5), - ::testing::Bool(), ::testing::Bool())); + ::testing::Bool(), ::testing::Bool(), + ::testing::Bool())); } // namespace } // namespace sat diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 6e03c28e2d..e8293a8e3e 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -23,7 +23,7 @@ option java_multiple_files = true; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 302 +// NEXT TAG: 303 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -816,6 +816,14 @@ message SatParameters { // depending on the problem, turning this off may lead to a faster solution. optional bool use_overload_checker_in_cumulative = 78 [default = false]; + // When this is true, the cumulative constraint is reinforced with horizontal + // elastic overload checking, i.e., an additional level of reasoning based on + // horizontal elasticity and energy. This additional level supplements the + // default level of reasoning as well as timetable edge finding. + // + // This propagator is experimental. + optional bool use_horizontal_overload_checking_in_cumulative = 302 [default = false]; + // Enable a heuristic to solve cumulative constraints using a modified energy // constraint. We modify the usual energy definition by applying a // super-additive function (also called "conservative scale" or "dual-feasible diff --git a/ortools/sat/timetable_horizontal_edgefinding.cc b/ortools/sat/timetable_horizontal_edgefinding.cc new file mode 100644 index 0000000000..822f370505 --- /dev/null +++ b/ortools/sat/timetable_horizontal_edgefinding.cc @@ -0,0 +1,320 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/timetable_horizontal_edgefinding.h" + +#include +#include +#include + +#include "absl/log/check.h" +#include "ortools/base/iterator_adaptors.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/model.h" +#include "ortools/util/sort.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { + +HorizontallyElasticOverloadChecker::HorizontallyElasticOverloadChecker( + AffineExpression capacity, SchedulingConstraintHelper* helper, + SchedulingDemandHelper* demands, Model* model) + : num_tasks_(helper->NumTasks()), + capacity_(capacity), + helper_(helper), + demands_(demands), + integer_trail_(model->GetOrCreate()) { + task_types_.resize(num_tasks_); + + // Initialize the profile with dummy events for each task/event + a sentinel + // to simplify the propagator's logic. The events are updated each time + // OverloadCheckerPass is called. + // + // TODO: This datastructure contains everything we need to compute the + // "classic" profile used in Time-Tabling. + for (int i = 0; i < num_tasks_; i++) { + if (helper_->IsPresent(i) && demands_->DemandMin(i) > 0) { + profile_events_.push_back({i, ProfileEventType::kStartMin, 0, 0}); + profile_events_.push_back({i, ProfileEventType::kStartMax, 0, 0}); + profile_events_.push_back({i, ProfileEventType::kEndMin, 0, 0}); + profile_events_.push_back({i, ProfileEventType::kEndMax, 0, 0}); + } + } + profile_events_.push_back( + {-1, ProfileEventType::kSentinel, kMaxIntegerValue, 0}); +} + +void HorizontallyElasticOverloadChecker::RegisterWith( + GenericLiteralWatcher* watcher) { + const int id = watcher->Register(this); + watcher->WatchUpperBound(capacity_, id); + helper_->WatchAllTasks(id, watcher); + for (int t = 0; t < num_tasks_; t++) { + watcher->WatchLowerBound(demands_->Demands()[t], id); + } + watcher->SetPropagatorPriority(id, 3); + watcher->NotifyThatPropagatorMayNotReachFixedPointInOnePass(id); +} + +bool HorizontallyElasticOverloadChecker::Propagate() { + if (!helper_->SynchronizeAndSetTimeDirection(true)) return false; + if (!OverloadCheckerPass()) return false; + if (!helper_->SynchronizeAndSetTimeDirection(false)) return false; + if (!OverloadCheckerPass()) return false; + return true; +} + +bool HorizontallyElasticOverloadChecker::OverloadCheckerPass() { + // Update the events and re-sort them incrementally. The updated events are + // used in all subsequent calls of ScheduleTasks to dynamically compute the + // profile. The events are valid for the entire function and do not need to + // be recomputed. + // + // Note that we do not iterate over the last event which should ALWAYS + // be the sentinel. + for (int i = 0; i < profile_events_.size() - 1; ++i) { + int t = profile_events_[i].task_id; + if (!helper_->IsPresent(t)) { + profile_events_[i].height = 0; // effectively ignore the event + profile_events_[i].time = kMaxIntegerValue; // reduce sorting effort + continue; + } + + profile_events_[i].height = demands_->DemandMin(t); + switch (profile_events_[i].event_type) { + case ProfileEventType::kStartMin: + profile_events_[i].time = helper_->StartMin(t); + break; + case ProfileEventType::kStartMax: + profile_events_[i].time = helper_->StartMin(t); + break; + case ProfileEventType::kEndMin: + profile_events_[i].time = helper_->StartMin(t); + break; + case ProfileEventType::kEndMax: + profile_events_[i].time = helper_->StartMin(t); + break; + default: + break; + } + } + IncrementalSort(profile_events_.begin(), profile_events_.end() - 1); + + // Iterate on all the windows [-inf, window_end] where window_end is the end + // max of a task. + IntegerValue window_end = kMinIntegerValue; + for (const ProfileEvent event : profile_events_) { + if (event.event_type != ProfileEventType::kEndMax || + event.time <= window_end) { + continue; + } + window_end = event.time; + + const IntegerValue capacity = integer_trail_->UpperBound(capacity_); + if (window_end < ScheduleTasks(window_end, capacity)) { + AddScheduleTaskReason(window_end); + return helper_->ReportConflict(); + } + } + + return true; +} + +// ScheduleTasks returns a lower bound of the earliest time at which a group +// of tasks will complete. The group of tasks is all the tasks finishing before +// the end of the window + the fixed part of the tasks having a mandatory part +// that overlaps with the window. +IntegerValue HorizontallyElasticOverloadChecker::ScheduleTasks( + IntegerValue window_end, IntegerValue capacity) { + // TODO: If we apply this only by increasing window_end, then there is no + // need to re-process the FULL tasks. Only the fixed-part and ignored might + // change type. Specifically, FIXED-PART can become FULL while IGNORE can + // either become FIXED-PART or FULL. Said otherwise, FULL is the terminal + // state. + for (int i = 0; i < num_tasks_; ++i) { + if (helper_->EndMax(i) <= window_end) { + task_types_[i] = TaskType::kFull; + continue; + } + // If the task is external but has a compulsory part that starts before + // window_end, then we can process it partially. + if (helper_->StartMax(i) < window_end && + helper_->StartMax(i) < helper_->EndMin(i)) { + task_types_[i] = TaskType::kFixedPart; + continue; + } + // Otherwise, simply mark the task to be ignored during sweep. + task_types_[i] = TaskType::kIgnore; + } + + int next_event = 0; + IntegerValue time = profile_events_[0].time; + + // Estimation of the earliest time at which all the processed tasks can be + // scheduled. + IntegerValue new_window_end = kMinIntegerValue; + + // Overload represents the accumulated quantity of energy that could not be + // consumed before time. + IntegerValue overload = 0; + + // Total demand required at time if all processed tasks were starting at + // their start min. + IntegerValue demand_req = 0; + + // Total demand required at time if all processed tasks that could overlap + // time were. This is used to avoid placing overload in places were no task + // would actually be. + IntegerValue demand_max = 0; + + while (time < window_end) { + // Update the two profile lines demand_req and demand_max by processing + // all the events at time. How to process an event depends on its type. + while (profile_events_[next_event].time == time) { + const ProfileEvent event = profile_events_[next_event]; + switch (task_types_[event.task_id]) { + case TaskType::kIgnore: + break; // drop the event + case TaskType::kFull: + switch (event.event_type) { + case ProfileEventType::kStartMin: + demand_max += event.height; + demand_req += event.height; + break; + case ProfileEventType::kEndMin: + demand_req -= event.height; + break; + case ProfileEventType::kEndMax: + demand_max -= event.height; + break; + default: + break; + } + break; + case TaskType::kFixedPart: + switch (event.event_type) { + case ProfileEventType::kStartMax: + demand_max += event.height; + demand_req += event.height; + break; + case ProfileEventType::kEndMin: + demand_max -= event.height; + demand_req -= event.height; + break; + default: + break; + } + break; + } + next_event++; + } + + DCHECK_LE(demand_req, demand_max); + DCHECK_GE(overload, 0); + + // Should always be safe thanks to the sentinel. + DCHECK_LT(next_event, profile_events_.size()); + + IntegerValue next_time = profile_events_[next_event].time; + + // The maximum amount of resource that could be consumed if all non-ignored + // tasks that could be scheduled at the current time were. + const IntegerValue true_capa = std::min(demand_max, capacity); + + // Indicate whether we're using some capacity at this time point. This is + // used to decide later on how to update new_window_end. + const IntegerValue capa_used = std::min(demand_req + overload, true_capa); + + // Amount of resource available ("space") at each time point until the next + // event to potentially place some of the accunulated overload. + const IntegerValue space_per_time_unit = demand_req - true_capa; + + // Update overload by either accumulating new overload (if the space is + // negative) or by potentially freeing some of the previously accumulated + // overload (if the space is positive). + if (space_per_time_unit < 0) { // accumulate overload + overload -= space_per_time_unit * (next_time - time); + } else if (space_per_time_unit > 0 && overload > 0) { // free overload + // Maximum amount of overload that could be freed until the next event. + const IntegerValue max_freeable_overload = + (next_time - time) * space_per_time_unit; + + if (max_freeable_overload < overload) { + overload -= max_freeable_overload; + } else { + // Adjust next_time to indicate that the true "next event" in terms of + // a change in resource consumption is happening before the next event + // in the profile. This is important to guarantee that new_window_end + // is properly adjusted below. + // + // This operation corresponds to: + // next_time = time + ceil(overload/max_freeable_overload) + next_time = time + (overload + max_freeable_overload - 1) / + max_freeable_overload; + overload = 0; + } + } + + if (capa_used > 0) { + // Note that next_time might be earlier than the time of the next event if + // all the overload was consumed. See comment above. + new_window_end = next_time; + } + + time = profile_events_[next_event].time; + } + + if (overload > 0) { + return kMaxIntegerValue; + } + return new_window_end; +} + +void HorizontallyElasticOverloadChecker::AddScheduleTaskReason( + IntegerValue window_end) { + helper_->ClearReason(); + + // Capacity of the resource. + if (capacity_.var != kNoIntegerVariable) { + helper_->MutableIntegerReason()->push_back( + integer_trail_->UpperBoundAsLiteral(capacity_.var)); + } + + // TODO: There's an opportunity to further generalize the reason if + // demand_max and overload are set to 0 before the end of the window. + // This can happen if the resources consumption has "humps" though it + // is unclear whether this pattern is likely in practice or not. + for (int t = 0; t < num_tasks_; ++t) { + switch (task_types_[t]) { + case TaskType::kFull: + helper_->AddStartMinReason(t, helper_->StartMin(t)); + helper_->AddEndMaxReason(t, helper_->EndMax(t)); + break; + case TaskType::kFixedPart: + helper_->AddEndMinReason(t, std::min(helper_->EndMin(t), window_end)); + helper_->AddStartMaxReason(t, helper_->StartMax(t)); + break; + case TaskType::kIgnore: + continue; + } + + helper_->AddPresenceReason(t); + helper_->AddSizeMinReason(t); + demands_->AddDemandMinReason(t); + } +} + +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/timetable_horizontal_edgefinding.h b/ortools/sat/timetable_horizontal_edgefinding.h new file mode 100644 index 0000000000..527ef4c22a --- /dev/null +++ b/ortools/sat/timetable_horizontal_edgefinding.h @@ -0,0 +1,107 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_SAT_TIMETABLE_HORTIZONTAL_EDGEFINDING_H_ +#define OR_TOOLS_SAT_TIMETABLE_HORTIZONTAL_EDGEFINDING_H_ + +#include + +#include "ortools/sat/integer.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { + +// HorizontallyElasticOverloadChecker implements the improved quadratic +// horizonally elastic + timetable overload checker filtering rule presented +// in Roger Kameugne et al., "Improved timetable edge finder rule for +// cumulative constraint with profile". +class HorizontallyElasticOverloadChecker : public PropagatorInterface { + public: + HorizontallyElasticOverloadChecker(AffineExpression capacity, + SchedulingConstraintHelper* helper, + SchedulingDemandHelper* demands, Model* model); + + // This type is neither copyable nor movable. + HorizontallyElasticOverloadChecker(const HorizontallyElasticOverloadChecker&) = delete; + HorizontallyElasticOverloadChecker& operator=(const HorizontallyElasticOverloadChecker&) = delete; + + bool Propagate() final; + + void RegisterWith(GenericLiteralWatcher* watcher); + + private: + enum class ProfileEventType { + kStartMin, + kStartMax, + kEndMin, + kEndMax, + kSentinel, + }; + + enum class TaskType { + kFull, + kFixedPart, + kIgnore, + }; + + // ProfileEvent represents an event to construct the horizontal elastic + // profile. + struct ProfileEvent { + /* const */ int task_id; + /* const */ ProfileEventType event_type; + IntegerValue time; + IntegerValue height; + + ProfileEvent(int task_id, ProfileEventType event_type, IntegerValue time, IntegerValue height) + : task_id(task_id), event_type(event_type), time(time), height(height) {} + + bool operator<(const ProfileEvent& other) const { + return time < other.time; + } + }; + + // Performs a single pass of the Horizontal Elastic Overload Checker rule + // to detect potential conflicts. This same function can be used forward + // and backward by calling the SynchronizeAndSetTimeDirection method first. + bool OverloadCheckerPass(); + + // Returns the end min for the task group in the given window accordingly + // to the horizontally elastic rule + timetable. + IntegerValue ScheduleTasks(IntegerValue window_end, IntegerValue capacity); + + void AddScheduleTaskReason(IntegerValue window_end); + + const int num_tasks_; + const AffineExpression capacity_; + SchedulingConstraintHelper* helper_; + SchedulingDemandHelper* demands_; + IntegerTrail* integer_trail_; + + // Pre-allocated vector indicating how tasks should be processed by + // ScheduleTasks and AddScheduleTaskReason. + std::vector task_types_; + + // Pre-allocated vector to contain the profile. The profile cannot + // contain more than 4*n + 1 events: one for each start/end min/max + // event + one sentinel. + std::vector profile_events_; +}; + +} // namespace sat +} // namespace operations_research + +#endif // OR_TOOLS_SAT_TIMETABLE_HORTIZONTAL_EDGEFINDING_H_