-
Notifications
You must be signed in to change notification settings - Fork 2.2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[WIP] Add the horizontal elastic timetable overload checking rule to the cumulative constraint. #4401
base: main
Are you sure you want to change the base?
[WIP] Add the horizontal elastic timetable overload checking rule to the cumulative constraint. #4401
Changes from 1 commit
931ff96
3a8e1d9
25f97dc
40f6217
41a4f4d
23bfd50
def0d6d
92456ee
3d153d3
278f4b5
e304e4a
b57e0df
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,290 @@ | ||
// Copyright 2010-2024 Google LLC | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
#include "ortools/sat/timetable_horizontal_edgefinding.h" | ||
|
||
#include <algorithm> | ||
#include <cmath> | ||
#include <vector> | ||
|
||
#include "absl/log/check.h" | ||
#include "ortools/base/iterator_adaptors.h" | ||
#include "ortools/sat/integer.h" | ||
#include "ortools/sat/intervals.h" | ||
#include "ortools/sat/model.h" | ||
#include "ortools/util/sort.h" | ||
#include "ortools/util/strong_integers.h" | ||
|
||
namespace operations_research { | ||
namespace sat { | ||
|
||
HorizontallyElasticOverloadChecker::HorizontallyElasticOverloadChecker( | ||
AffineExpression capacity, SchedulingConstraintHelper* helper, | ||
SchedulingDemandHelper* demands, Model* model) | ||
: num_tasks_(helper->NumTasks()), | ||
capacity_(capacity), | ||
helper_(helper), | ||
demands_(demands), | ||
integer_trail_(model->GetOrCreate<IntegerTrail>()) { | ||
task_types_.resize(num_tasks_); | ||
profile_events_.reserve(4 * num_tasks_ + 1); | ||
} | ||
|
||
void HorizontallyElasticOverloadChecker::RegisterWith( | ||
GenericLiteralWatcher* watcher) { | ||
const int id = watcher->Register(this); | ||
watcher->WatchUpperBound(capacity_, id); | ||
helper_->WatchAllTasks(id, watcher); | ||
for (int t = 0; t < num_tasks_; t++) { | ||
watcher->WatchLowerBound(demands_->Demands()[t], id); | ||
} | ||
watcher->SetPropagatorPriority(id, 3); | ||
watcher->NotifyThatPropagatorMayNotReachFixedPointInOnePass(id); | ||
} | ||
|
||
bool HorizontallyElasticOverloadChecker::Propagate() { | ||
if (!helper_->SynchronizeAndSetTimeDirection(true)) return false; | ||
if (!OverloadCheckerPass()) return false; | ||
if (!helper_->SynchronizeAndSetTimeDirection(false)) return false; | ||
if (!OverloadCheckerPass()) return false; | ||
return true; | ||
} | ||
|
||
bool HorizontallyElasticOverloadChecker::OverloadCheckerPass() { | ||
// Prepate the profile events which will be used during ScheduleTasks to | ||
// dynamically compute the profile. The events are valid for the entire | ||
// function and do not need to be recomputed. | ||
// | ||
// TODO: This datastructure contains everything we need to compute the | ||
// "classic" profile used in Time-Tabling. | ||
profile_events_.clear(); | ||
for (int i = 0; i < num_tasks_; i++) { | ||
if (helper_->IsPresent(i) && demands_->DemandMin(i) > 0) { | ||
profile_events_.emplace_back(i, helper_->StartMin(i), | ||
demands_->DemandMin(i), | ||
ProfileEventType::kStartMin); | ||
profile_events_.emplace_back(i, helper_->StartMax(i), | ||
demands_->DemandMin(i), | ||
ProfileEventType::kStartMax); | ||
profile_events_.emplace_back(i, helper_->EndMin(i), | ||
demands_->DemandMin(i), | ||
ProfileEventType::kEndMin); | ||
profile_events_.emplace_back(i, helper_->EndMax(i), | ||
demands_->DemandMin(i), | ||
ProfileEventType::kEndMax); | ||
} | ||
} | ||
profile_events_.emplace_back(-1, kMaxIntegerValue, IntegerValue(0), | ||
ProfileEventType::kSentinel); | ||
IncrementalSort(profile_events_.begin(), profile_events_.end()); | ||
|
||
// Iterate on all the windows [-inf, window_end] where window_end is the end | ||
// max of a task. | ||
IntegerValue window_end = kMinIntegerValue; | ||
for (const ProfileEvent event : profile_events_) { | ||
if (event.event_type != ProfileEventType::kEndMax || | ||
event.time <= window_end) { | ||
continue; | ||
} | ||
window_end = event.time; | ||
|
||
const IntegerValue capacity = integer_trail_->UpperBound(capacity_); | ||
if (window_end < ScheduleTasks(window_end, capacity)) { | ||
AddScheduleTaskReason(window_end); | ||
return helper_->ReportConflict(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If end_min is lower than the result of ScheduleTasks, can't you push a tighter end_min? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good question, I guess we could use That being said, I don't think this propagator as it stands (compared to its extensions that update the start/end vars) is worth adding to OR-tools. I'd rather prefer to optimize the explanation of these versions. |
||
} | ||
} | ||
|
||
return true; | ||
} | ||
|
||
// ScheduleTasks returns a lower bound of the earliest time at which a group | ||
// of tasks will complete. The group of tasks is all the tasks finishing before | ||
// the end of the window + the fixed part of the tasks having a mandatory part | ||
// that overlaps with the window. | ||
IntegerValue HorizontallyElasticOverloadChecker::ScheduleTasks( | ||
IntegerValue window_end, IntegerValue capacity) { | ||
// TODO: If we apply this only by increasing window_end, then there is no | ||
// need to re-process the FULL tasks. Only the fixed-part and ignored might | ||
// change type. Specifically, FIXED-PART can become FULL while IGNORE can | ||
// either become FIXED-PART or FULL. Said otherwise, FULL is the terminal | ||
// state. | ||
for (int i = 0; i < num_tasks_; ++i) { | ||
if (helper_->EndMax(i) <= window_end) { | ||
task_types_[i] = TaskType::kFull; | ||
continue; | ||
} | ||
// If the task is external but has a compulsory part that starts before | ||
// window_end, then we can process it partially. | ||
if (helper_->StartMax(i) < window_end && | ||
helper_->StartMax(i) < helper_->EndMin(i)) { | ||
task_types_[i] = TaskType::kFixedPart; | ||
continue; | ||
} | ||
// Otherwise, simply mark the task to be ignored during sweep. | ||
task_types_[i] = TaskType::kIgnore; | ||
} | ||
|
||
int next_event = 0; | ||
IntegerValue time = profile_events_[0].time; | ||
|
||
// Estimation of the earliest time at which all the processed tasks can be | ||
// scheduled. | ||
IntegerValue new_window_end = kMinIntegerValue; | ||
|
||
// Overload represents the accumulated quantity of energy that could not be | ||
// consumed before time. | ||
IntegerValue overload = 0; | ||
|
||
// Total demand required at time if all processed tasks were starting at | ||
// their start min. | ||
IntegerValue demand_req = 0; | ||
|
||
// Total demand required at time if all processed tasks that could overlap | ||
// time were. This is used to avoid placing overload in places were no task | ||
// would actually be. | ||
IntegerValue demand_max = 0; | ||
|
||
while (time < window_end) { | ||
// Aggregate the changes of all events happening at time. How to process | ||
// an event depends on its type ("full" vs "fixed-part"). | ||
IntegerValue delta_max = 0; | ||
IntegerValue delta_req = 0; | ||
while (profile_events_[next_event].time == time) { | ||
const ProfileEvent event = profile_events_[next_event]; | ||
switch (task_types_[event.task_id]) { | ||
case TaskType::kIgnore: | ||
break; // drop the event | ||
case TaskType::kFull: | ||
switch (event.event_type) { | ||
case ProfileEventType::kStartMin: | ||
delta_max += event.height; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: I think changing demand_req and demand_max directly without the deltas might be simpler to understand. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Agreed, done 👍 |
||
delta_req += event.height; | ||
break; | ||
case ProfileEventType::kEndMin: | ||
delta_req -= event.height; | ||
break; | ||
case ProfileEventType::kEndMax: | ||
delta_max -= event.height; | ||
break; | ||
default: | ||
break; | ||
} | ||
break; | ||
case TaskType::kFixedPart: | ||
switch (event.event_type) { | ||
case ProfileEventType::kStartMax: | ||
delta_max += event.height; | ||
delta_req += event.height; | ||
break; | ||
case ProfileEventType::kEndMin: | ||
delta_req -= event.height; | ||
delta_max -= event.height; | ||
break; | ||
default: | ||
break; | ||
} | ||
break; | ||
} | ||
next_event++; | ||
} | ||
|
||
// Should always be safe thanks to the sentinel. | ||
DCHECK_LT(next_event, profile_events_.size()); | ||
|
||
IntegerValue next_time = profile_events_[next_event].time; | ||
const IntegerValue length = next_time - time; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: please move this definition closer to where it is used. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Given that |
||
|
||
demand_max += delta_max; | ||
demand_req += delta_req; | ||
|
||
DCHECK_LE(demand_req, demand_max); | ||
DCHECK_GE(overload, 0); | ||
|
||
// The maximum amount of resource that could be consumed if all non-ignored | ||
// tasks that could be scheduled at the current time were. | ||
const IntegerValue true_capa = std::min(demand_max, capacity); | ||
|
||
// Indicate whether we're using some capacity at this time point. This is | ||
// used to decide later on how to update new_window_end. | ||
const IntegerValue capa_used = std::min(demand_req + overload, true_capa); | ||
|
||
// Amount of resource available to potentially place some overload from | ||
// previous time points. | ||
const IntegerValue overload_delta = demand_req - true_capa; | ||
|
||
if (overload_delta > 0) { // adding overload | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: I think this logic would be easier to read if the variable naming made clear what is the overload/removable/used for a time unit and what is the equivalent for the time window. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I wasn't super happy with these names either; what do you think about the new ones? I've also slightly simplified the logic as there's no need for the |
||
overload += length * overload_delta; | ||
} else if (overload_delta < 0 && overload > 0) { // remove overload | ||
const IntegerValue used = std::min(-overload_delta, overload); | ||
const IntegerValue removable = used * length; | ||
if (removable < overload) { | ||
overload -= removable; | ||
} else { | ||
// Adjust next_time to indicate that the true "next event" in terms of | ||
// a change in resource consumption is happening before the next event | ||
// in the profile. This is important to gurantee that new_window_end | ||
// is properly adjusted below. | ||
next_time = time + (overload + used - 1) / used; // ceil(overload/used) | ||
overload = 0; | ||
} | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I wonder if these formulas from the paper could be simplified much further.
WDYT? The paper looks like it is scheduling each time point by hand, does this approach miss something? |
||
|
||
if (capa_used > 0) { | ||
// Note that next_time might be earlier than the time of the next event if | ||
// all the overload was consumed. See comment above. | ||
new_window_end = next_time; | ||
} | ||
|
||
time = profile_events_[next_event].time; | ||
} | ||
|
||
if (overload > 0) { | ||
return kMaxIntegerValue; | ||
} | ||
return new_window_end; | ||
} | ||
|
||
void HorizontallyElasticOverloadChecker::AddScheduleTaskReason( | ||
IntegerValue window_end) { | ||
helper_->ClearReason(); | ||
|
||
// Capacity of the resource. | ||
if (capacity_.var != kNoIntegerVariable) { | ||
helper_->MutableIntegerReason()->push_back( | ||
integer_trail_->UpperBoundAsLiteral(capacity_.var)); | ||
} | ||
|
||
for (int t = 0; t < num_tasks_; ++t) { | ||
switch (task_types_[t]) { | ||
case TaskType::kFull: | ||
helper_->AddStartMinReason(t, helper_->StartMin(t)); | ||
helper_->AddEndMaxReason(t, helper_->EndMax(t)); | ||
break; | ||
case TaskType::kFixedPart: | ||
// TODO: improve. | ||
helper_->AddStartMinReason(t, helper_->StartMin(t)); | ||
helper_->AddEndMaxReason(t, helper_->EndMax(t)); | ||
break; | ||
case TaskType::kIgnore: | ||
continue; | ||
} | ||
|
||
helper_->AddPresenceReason(t); | ||
helper_->AddSizeMinReason(t); | ||
demands_->AddDemandMinReason(t); | ||
} | ||
} | ||
|
||
} // namespace sat | ||
} // namespace operations_research |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think that if you are not updating
profile_events_
incrementally there is no point on sorting it with this method. I think plainstd::sort
would be cleaner.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You're right, changed the code to truly build the profile incrementally by updating the events rather than building it from scratch each time.
I don't think this drives much benefits yet but it opens the door for further incremental optimization using reversibles similarly to what is done in time-table.