Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Add the horizontal elastic timetable overload checking rule to the cumulative constraint. #4401

Draft
wants to merge 12 commits into
base: main
Choose a base branch
from
16 changes: 16 additions & 0 deletions ortools/sat/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -1879,6 +1879,21 @@ cc_library(
],
)

cc_library(
name = "timetable_horizontal_edgefinding",
srcs = ["timetable_horizontal_edgefinding.cc"],
hdrs = ["timetable_horizontal_edgefinding.h"],
deps = [
":integer",
":intervals",
":model",
":sat_base",
"//ortools/base:iterator_adaptors",
"//ortools/util:strong_integers",
"@com_google_absl//absl/log:check",
],
)

cc_library(
name = "cumulative",
srcs = ["cumulative.cc"],
Expand All @@ -1898,6 +1913,7 @@ cc_library(
":sat_solver",
":timetable",
":timetable_edgefinding",
":timetable_horizontal_edgefinding",
"//ortools/base",
"//ortools/util:strong_integers",
"@com_google_absl//absl/log:check",
Expand Down
12 changes: 12 additions & 0 deletions ortools/sat/cumulative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#include "ortools/sat/sat_solver.h"
#include "ortools/sat/timetable.h"
#include "ortools/sat/timetable_edgefinding.h"
#include "ortools/sat/timetable_horizontal_edgefinding.h"
#include "ortools/util/strong_integers.h"

namespace operations_research {
Expand Down Expand Up @@ -244,6 +245,17 @@ std::function<void(Model*)> Cumulative(
time_tabling->RegisterWith(watcher);
model->TakeOwnership(time_tabling);

// Propagator responsible for applying the Horizontal Elastic Overload
// Checking rule. The propogator simply takes care of detecting conflicts
// without actually modifying the domain of the variables.
if (parameters.use_horizontal_overload_checking_in_cumulative()) {
HorizontallyElasticOverloadChecker* heoc =
new HorizontallyElasticOverloadChecker(capacity, helper,
demands_helper, model);
heoc->RegisterWith(watcher);
model->TakeOwnership(heoc);
}

// Propagator responsible for applying the Overload Checking filtering rule.
// It increases the minimum of the capacity variable.
if (parameters.use_overload_checker_in_cumulative()) {
Expand Down
15 changes: 10 additions & 5 deletions ortools/sat/cumulative_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -353,9 +353,10 @@ TEST(CumulativeTest, RegressionTest2) {
// ========================================================================

// Param1: Number of tasks.
// Param3: Enable overload checking.
// Param4: Enable timetable edge finding.
typedef ::testing::tuple<int, bool, bool> CumulativeTestParams;
// Param2: Enable overload checking.
// Param3: Enable timetable edge finding.
// Param4: Enable horizontal elastic overload checking.
typedef ::testing::tuple<int, bool, bool, bool> CumulativeTestParams;

class RandomCumulativeTest
: public ::testing::TestWithParam<CumulativeTestParams> {
Expand All @@ -369,6 +370,8 @@ class RandomCumulativeTest
::testing::get<1>(GetParam()));
parameters.set_use_timetable_edge_finding_in_cumulative(
::testing::get<2>(GetParam()));
parameters.set_use_horizontal_overload_checking_in_cumulative(
::testing::get<3>(GetParam()));
return parameters;
}
};
Expand Down Expand Up @@ -409,12 +412,14 @@ TEST_P(SlowRandomCumulativeTest, FindAllOptionalTasks) {
INSTANTIATE_TEST_SUITE_P(
All, FastRandomCumulativeTest,
::testing::Combine(::testing::Range(3, DEBUG_MODE ? 4 : 6),
::testing::Bool(), ::testing::Bool()));
::testing::Bool(), ::testing::Bool(),
::testing::Bool()));

INSTANTIATE_TEST_SUITE_P(
All, SlowRandomCumulativeTest,
::testing::Combine(::testing::Range(3, DEBUG_MODE ? 4 : 5),
::testing::Bool(), ::testing::Bool()));
::testing::Bool(), ::testing::Bool(),
::testing::Bool()));

} // namespace
} // namespace sat
Expand Down
290 changes: 290 additions & 0 deletions ortools/sat/timetable_horizontal_edgefinding.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,290 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "ortools/sat/timetable_horizontal_edgefinding.h"

#include <algorithm>
#include <cmath>
#include <vector>

#include "absl/log/check.h"
#include "ortools/base/iterator_adaptors.h"
#include "ortools/sat/integer.h"
#include "ortools/sat/intervals.h"
#include "ortools/sat/model.h"
#include "ortools/util/sort.h"
#include "ortools/util/strong_integers.h"

namespace operations_research {
namespace sat {

HorizontallyElasticOverloadChecker::HorizontallyElasticOverloadChecker(
AffineExpression capacity, SchedulingConstraintHelper* helper,
SchedulingDemandHelper* demands, Model* model)
: num_tasks_(helper->NumTasks()),
capacity_(capacity),
helper_(helper),
demands_(demands),
integer_trail_(model->GetOrCreate<IntegerTrail>()) {
task_types_.resize(num_tasks_);
profile_events_.reserve(4 * num_tasks_ + 1);
}

void HorizontallyElasticOverloadChecker::RegisterWith(
GenericLiteralWatcher* watcher) {
const int id = watcher->Register(this);
watcher->WatchUpperBound(capacity_, id);
helper_->WatchAllTasks(id, watcher);
for (int t = 0; t < num_tasks_; t++) {
watcher->WatchLowerBound(demands_->Demands()[t], id);
}
watcher->SetPropagatorPriority(id, 3);
watcher->NotifyThatPropagatorMayNotReachFixedPointInOnePass(id);
}

bool HorizontallyElasticOverloadChecker::Propagate() {
if (!helper_->SynchronizeAndSetTimeDirection(true)) return false;
if (!OverloadCheckerPass()) return false;
if (!helper_->SynchronizeAndSetTimeDirection(false)) return false;
if (!OverloadCheckerPass()) return false;
return true;
}

bool HorizontallyElasticOverloadChecker::OverloadCheckerPass() {
// Prepate the profile events which will be used during ScheduleTasks to
// dynamically compute the profile. The events are valid for the entire
// function and do not need to be recomputed.
//
// TODO: This datastructure contains everything we need to compute the
// "classic" profile used in Time-Tabling.
profile_events_.clear();
for (int i = 0; i < num_tasks_; i++) {
if (helper_->IsPresent(i) && demands_->DemandMin(i) > 0) {
profile_events_.emplace_back(i, helper_->StartMin(i),
demands_->DemandMin(i),
ProfileEventType::kStartMin);
profile_events_.emplace_back(i, helper_->StartMax(i),
demands_->DemandMin(i),
ProfileEventType::kStartMax);
profile_events_.emplace_back(i, helper_->EndMin(i),
demands_->DemandMin(i),
ProfileEventType::kEndMin);
profile_events_.emplace_back(i, helper_->EndMax(i),
demands_->DemandMin(i),
ProfileEventType::kEndMax);
}
}
profile_events_.emplace_back(-1, kMaxIntegerValue, IntegerValue(0),
ProfileEventType::kSentinel);
IncrementalSort(profile_events_.begin(), profile_events_.end());
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think that if you are not updating profile_events_ incrementally there is no point on sorting it with this method. I think plain std::sort would be cleaner.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You're right, changed the code to truly build the profile incrementally by updating the events rather than building it from scratch each time.

I don't think this drives much benefits yet but it opens the door for further incremental optimization using reversibles similarly to what is done in time-table.


// Iterate on all the windows [-inf, window_end] where window_end is the end
// max of a task.
IntegerValue window_end = kMinIntegerValue;
for (const ProfileEvent event : profile_events_) {
if (event.event_type != ProfileEventType::kEndMax ||
event.time <= window_end) {
continue;
}
window_end = event.time;

const IntegerValue capacity = integer_trail_->UpperBound(capacity_);
if (window_end < ScheduleTasks(window_end, capacity)) {
AddScheduleTaskReason(window_end);
return helper_->ReportConflict();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If end_min is lower than the result of ScheduleTasks, can't you push a tighter end_min?

Copy link
Author

@rhartert rhartert Oct 20, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good question, I guess we could use ScheduleTasks(window_end, capacity) - 1 but I would have to think a little more about it.

That being said, I don't think this propagator as it stands (compared to its extensions that update the start/end vars) is worth adding to OR-tools. I'd rather prefer to optimize the explanation of these versions.

}
}

return true;
}

// ScheduleTasks returns a lower bound of the earliest time at which a group
// of tasks will complete. The group of tasks is all the tasks finishing before
// the end of the window + the fixed part of the tasks having a mandatory part
// that overlaps with the window.
IntegerValue HorizontallyElasticOverloadChecker::ScheduleTasks(
IntegerValue window_end, IntegerValue capacity) {
// TODO: If we apply this only by increasing window_end, then there is no
// need to re-process the FULL tasks. Only the fixed-part and ignored might
// change type. Specifically, FIXED-PART can become FULL while IGNORE can
// either become FIXED-PART or FULL. Said otherwise, FULL is the terminal
// state.
for (int i = 0; i < num_tasks_; ++i) {
if (helper_->EndMax(i) <= window_end) {
task_types_[i] = TaskType::kFull;
continue;
}
// If the task is external but has a compulsory part that starts before
// window_end, then we can process it partially.
if (helper_->StartMax(i) < window_end &&
helper_->StartMax(i) < helper_->EndMin(i)) {
task_types_[i] = TaskType::kFixedPart;
continue;
}
// Otherwise, simply mark the task to be ignored during sweep.
task_types_[i] = TaskType::kIgnore;
}

int next_event = 0;
IntegerValue time = profile_events_[0].time;

// Estimation of the earliest time at which all the processed tasks can be
// scheduled.
IntegerValue new_window_end = kMinIntegerValue;

// Overload represents the accumulated quantity of energy that could not be
// consumed before time.
IntegerValue overload = 0;

// Total demand required at time if all processed tasks were starting at
// their start min.
IntegerValue demand_req = 0;

// Total demand required at time if all processed tasks that could overlap
// time were. This is used to avoid placing overload in places were no task
// would actually be.
IntegerValue demand_max = 0;

while (time < window_end) {
// Aggregate the changes of all events happening at time. How to process
// an event depends on its type ("full" vs "fixed-part").
IntegerValue delta_max = 0;
IntegerValue delta_req = 0;
while (profile_events_[next_event].time == time) {
const ProfileEvent event = profile_events_[next_event];
switch (task_types_[event.task_id]) {
case TaskType::kIgnore:
break; // drop the event
case TaskType::kFull:
switch (event.event_type) {
case ProfileEventType::kStartMin:
delta_max += event.height;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: I think changing demand_req and demand_max directly without the deltas might be simpler to understand.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agreed, done 👍

delta_req += event.height;
break;
case ProfileEventType::kEndMin:
delta_req -= event.height;
break;
case ProfileEventType::kEndMax:
delta_max -= event.height;
break;
default:
break;
}
break;
case TaskType::kFixedPart:
switch (event.event_type) {
case ProfileEventType::kStartMax:
delta_max += event.height;
delta_req += event.height;
break;
case ProfileEventType::kEndMin:
delta_req -= event.height;
delta_max -= event.height;
break;
default:
break;
}
break;
}
next_event++;
}

// Should always be safe thanks to the sentinel.
DCHECK_LT(next_event, profile_events_.size());

IntegerValue next_time = profile_events_[next_event].time;
const IntegerValue length = next_time - time;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: please move this definition closer to where it is used.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Given that length is only used once, I've inlined it directly in the overload adjustment. IMO this does not hurt the code's readability as the computation is small and pretty clear.


demand_max += delta_max;
demand_req += delta_req;

DCHECK_LE(demand_req, demand_max);
DCHECK_GE(overload, 0);

// The maximum amount of resource that could be consumed if all non-ignored
// tasks that could be scheduled at the current time were.
const IntegerValue true_capa = std::min(demand_max, capacity);

// Indicate whether we're using some capacity at this time point. This is
// used to decide later on how to update new_window_end.
const IntegerValue capa_used = std::min(demand_req + overload, true_capa);

// Amount of resource available to potentially place some overload from
// previous time points.
const IntegerValue overload_delta = demand_req - true_capa;

if (overload_delta > 0) { // adding overload
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: I think this logic would be easier to read if the variable naming made clear what is the overload/removable/used for a time unit and what is the equivalent for the time window.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wasn't super happy with these names either; what do you think about the new ones? I've also slightly simplified the logic as there's no need for the std::min call.

overload += length * overload_delta;
} else if (overload_delta < 0 && overload > 0) { // remove overload
const IntegerValue used = std::min(-overload_delta, overload);
const IntegerValue removable = used * length;
if (removable < overload) {
overload -= removable;
} else {
// Adjust next_time to indicate that the true "next event" in terms of
// a change in resource consumption is happening before the next event
// in the profile. This is important to gurantee that new_window_end
// is properly adjusted below.
next_time = time + (overload + used - 1) / used; // ceil(overload/used)
overload = 0;
}
}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if these formulas from the paper could be simplified much further.
I'm not sure whether the following approach would be equivalent. In each subwindow [time, next_time]:

  • the energy that can be used by tasks is (next_time - time) * true_capa.
  • the energy required by fixed tasks is (next_time - time) * demand_req.
    Then the energy required by full tasks over all subwindows is just the sum of energies of full tasks.
    If energy_required is strictly larger than energy_usable, then the set of full/fixed tasks associated to window_end are not feasible.

WDYT? The paper looks like it is scheduling each time point by hand, does this approach miss something?


if (capa_used > 0) {
// Note that next_time might be earlier than the time of the next event if
// all the overload was consumed. See comment above.
new_window_end = next_time;
}

time = profile_events_[next_event].time;
}

if (overload > 0) {
return kMaxIntegerValue;
}
return new_window_end;
}

void HorizontallyElasticOverloadChecker::AddScheduleTaskReason(
IntegerValue window_end) {
helper_->ClearReason();

// Capacity of the resource.
if (capacity_.var != kNoIntegerVariable) {
helper_->MutableIntegerReason()->push_back(
integer_trail_->UpperBoundAsLiteral(capacity_.var));
}

for (int t = 0; t < num_tasks_; ++t) {
switch (task_types_[t]) {
case TaskType::kFull:
helper_->AddStartMinReason(t, helper_->StartMin(t));
helper_->AddEndMaxReason(t, helper_->EndMax(t));
break;
case TaskType::kFixedPart:
// TODO: improve.
helper_->AddStartMinReason(t, helper_->StartMin(t));
helper_->AddEndMaxReason(t, helper_->EndMax(t));
break;
case TaskType::kIgnore:
continue;
}

helper_->AddPresenceReason(t);
helper_->AddSizeMinReason(t);
demands_->AddDemandMinReason(t);
}
}

} // namespace sat
} // namespace operations_research
Loading