diff --git a/ortools/sat/clause.cc b/ortools/sat/clause.cc index fea1b866304..1b6d0fe9993 100644 --- a/ortools/sat/clause.cc +++ b/ortools/sat/clause.cc @@ -504,11 +504,16 @@ void BinaryImplicationGraph::Resize(int num_variables) { } // TODO(user): Not all of the solver knows about representative literal, do -// use them here and in AddBinaryClauseDuringSearch() to maintains invariant? -// Explore this when we start cleaning our clauses using equivalence during -// search. We can easily do it for every conflict we learn instead of here. -void BinaryImplicationGraph::AddBinaryClause(Literal a, Literal b) { +// use them here to maintains invariant? Explore this when we start cleaning our +// clauses using equivalence during search. We can easily do it for every +// conflict we learn instead of here. +bool BinaryImplicationGraph::AddBinaryClause(Literal a, Literal b) { SCOPED_TIME_STAT(&stats_); + + // Tricky: If this is the first clause, the propagator will be added and + // assumed to be in a "propagated" state. This makes sure this is the case. + if (IsEmpty()) propagation_trail_index_ = trail_->Index(); + if (drat_proof_handler_ != nullptr) { // TODO(user): Like this we will duplicate all binary clause from the // problem. However this leads to a simpler API (since we don't need to @@ -516,38 +521,34 @@ void BinaryImplicationGraph::AddBinaryClause(Literal a, Literal b) { // proof for testing anyway. drat_proof_handler_->AddClause({a, b}); } + estimated_sizes_[a.NegatedIndex()]++; estimated_sizes_[b.NegatedIndex()]++; implications_[a.NegatedIndex()].push_back(b); implications_[b.NegatedIndex()].push_back(a); is_dag_ = false; num_implications_ += 2; -} - -bool BinaryImplicationGraph::AddBinaryClauseDuringSearch(Literal a, Literal b) { - SCOPED_TIME_STAT(&stats_); - - // Tricky: If this is the first clause, the propagator will be added and - // assumed to be in a "propagated" state. This makes sure this is the case. - if (IsEmpty()) propagation_trail_index_ = trail_->Index(); - - AddBinaryClause(a, b); const auto& assignment = trail_->Assignment(); - if (assignment.LiteralIsFalse(a)) { - if (assignment.LiteralIsAssigned(b)) { - if (assignment.LiteralIsFalse(b)) return false; - } else { - reasons_[trail_->Index()] = a; - trail_->Enqueue(b, propagator_id_); - } - } else if (assignment.LiteralIsFalse(b)) { - if (!assignment.LiteralIsAssigned(a)) { - reasons_[trail_->Index()] = b; - trail_->Enqueue(a, propagator_id_); + if (trail_->CurrentDecisionLevel() == 0) { + CHECK(!assignment.LiteralIsAssigned(a)); + CHECK(!assignment.LiteralIsAssigned(b)); + } else { + if (assignment.LiteralIsFalse(a)) { + if (assignment.LiteralIsAssigned(b)) { + if (assignment.LiteralIsFalse(b)) return false; + } else { + reasons_[trail_->Index()] = a; + trail_->Enqueue(b, propagator_id_); + } + } else if (assignment.LiteralIsFalse(b)) { + if (!assignment.LiteralIsAssigned(a)) { + reasons_[trail_->Index()] = b; + trail_->Enqueue(a, propagator_id_); + } } } - is_dag_ = false; + return true; } diff --git a/ortools/sat/clause.h b/ortools/sat/clause.h index 125a9c8bab1..7195d947302 100644 --- a/ortools/sat/clause.h +++ b/ortools/sat/clause.h @@ -488,18 +488,19 @@ class BinaryImplicationGraph : public SatPropagator { // Adds the binary clause (a OR b), which is the same as (not a => b). // Note that it is also equivalent to (not b => a). - void AddBinaryClause(Literal a, Literal b); - void AddImplication(Literal a, Literal b) { + // + // Preconditions: + // - If we are at root node, then none of the literal should be assigned. + // This is Checked and is there to track inefficiency as we do not need a + // clause in this case. + // - If we are at a positive decision level, we will propagate something if + // we can. However, if both literal are false, we will just return false + // and do nothing. In all other case, we will return true. + bool AddBinaryClause(Literal a, Literal b); + bool AddImplication(Literal a, Literal b) { return AddBinaryClause(a.Negated(), b); } - // Same as AddBinaryClause() but enqueues a possible unit propagation. Note - // that if the binary clause propagates, it must do so at the last level, this - // is DCHECKed. - // - // Return false and do nothing if both a and b are currently false. - bool AddBinaryClauseDuringSearch(Literal a, Literal b); - // An at most one constraint of size n is a compact way to encode n * (n - 1) // implications. This must only be called at level zero. // diff --git a/ortools/sat/cp_model_loader.cc b/ortools/sat/cp_model_loader.cc index 5819ea924e7..8adcd6bf5e2 100644 --- a/ortools/sat/cp_model_loader.cc +++ b/ortools/sat/cp_model_loader.cc @@ -1535,16 +1535,7 @@ void LoadLinMaxConstraint(const ConstraintProto& ct, Model* m) { void LoadNoOverlapConstraint(const ConstraintProto& ct, Model* m) { auto* mapping = m->GetOrCreate(); - auto* params = m->GetOrCreate(); - const int num_intervals = ct.no_overlap().intervals_size(); - if (num_intervals <= - params->max_size_to_create_precedence_literals_in_disjunctive() && - params->use_strong_propagation_in_disjunctive()) { - AddDisjunctiveWithBooleanPrecedences( - mapping->Intervals(ct.no_overlap().intervals()), m); - } else { - m->Add(Disjunctive(mapping->Intervals(ct.no_overlap().intervals()))); - } + AddDisjunctive(mapping->Intervals(ct.no_overlap().intervals()), m); } void LoadNoOverlap2dConstraint(const ConstraintProto& ct, Model* m) { diff --git a/ortools/sat/cp_model_presolve.h b/ortools/sat/cp_model_presolve.h index 50fdef29354..9fcf4b0430c 100644 --- a/ortools/sat/cp_model_presolve.h +++ b/ortools/sat/cp_model_presolve.h @@ -16,6 +16,7 @@ #include #include +#include #include #include diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index 59954e023be..5de6dea7838 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -342,7 +342,17 @@ std::function ConstructUserSearchStrategy( std::function ConstructHeuristicSearchStrategy( const CpModelProto& cp_model_proto, Model* model) { if (ModelHasSchedulingConstraints(cp_model_proto)) { - return SchedulingSearchHeuristic(model); + if (model->GetOrCreate() + ->use_dynamic_precedence_in_disjunctive()) { + // We combine the two, because the precedence only work for disjunctive, + // and not if we only have cumulative constraints. + return SequentialSearch({SchedulingPrecedenceSearchHeuristic(model), + SchedulingSearchHeuristic(model)}); + } else { + // Precedence based model seems better, but the other one lead to faster + // solution. + return SchedulingSearchHeuristic(model); + } } return PseudoCost(model); } @@ -616,6 +626,7 @@ absl::flat_hash_map GetNamedParameters( strategies["auto"] = new_params; new_params.set_search_branching(SatParameters::FIXED_SEARCH); + new_params.set_use_dynamic_precedence_in_disjunctive(false); strategies["fixed"] = new_params; } diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 1883cc4329f..d86560922bb 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -126,6 +126,11 @@ ABSL_FLAG(bool, cp_model_dump_models, false, "format to 'FLAGS_cp_model_dump_prefix'{model|presolved_model|" "mapping_model}.pb.txt."); +ABSL_FLAG( + bool, cp_model_export_model, false, + "DEBUG ONLY. When set to true, SolveCpModel() will dump its input model " + "protos in text format to 'FLAGS_cp_model_dump_prefix'{name}.pb.txt."); + ABSL_FLAG(bool, cp_model_dump_text_proto, true, "DEBUG ONLY, dump models in text proto instead of binary proto."); @@ -194,7 +199,7 @@ std::string Summarize(absl::string_view input) { } template -void DumpModelProto(const M& proto, const std::string& name) { +void DumpModelProto(const M& proto, absl::string_view name) { std::string filename; if (absl::GetFlag(FLAGS_cp_model_dump_text_proto)) { filename = absl::StrCat(absl::GetFlag(FLAGS_cp_model_dump_prefix), name, @@ -1736,7 +1741,7 @@ void LoadCpModel(const CpModelProto& model_proto, Model* model) { // Report the initial objective variable bounds. auto* integer_trail = model->GetOrCreate(); shared_response_manager->UpdateInnerObjectiveBounds( - absl::StrCat(model->Name(), " initial_propagation"), + absl::StrCat(model->Name(), " (initial_propagation)"), integer_trail->LowerBound(objective_var), integer_trail->UpperBound(objective_var)); @@ -1858,6 +1863,13 @@ void SolveLoadedCpModel(const CpModelProto& model_proto, Model* model) { } }; + // Make sure we are not at a postive level. + if (!model->GetOrCreate()->ResetToLevelZero()) { + shared_response_manager->NotifyThatImprovingProblemIsInfeasible( + model->Name()); + return; + } + // Reconfigure search heuristic if it was changed. ConfigureSearchHeuristics(model); @@ -2788,8 +2800,8 @@ class ObjectiveShavingSolver : public SubSolver { private: std::string Info() { - return absl::StrCat(name(), " #vars=", local_proto_.variables().size(), - " #csts=", local_proto_.constraints().size()); + return absl::StrCat(name(), " (vars=", local_proto_.variables().size(), + " csts=", local_proto_.constraints().size(), ")"); } bool ResetModel() { @@ -3082,7 +3094,7 @@ class LnsSolver : public SubSolver { static_cast(generator_->num_fully_solved_calls()) / static_cast(num_calls); const std::string lns_info = absl::StrFormat( - "%s(d=%0.2f s=%i t=%0.2f p=%0.2f stall=%d h=%s)", source_info, + "%s (d=%0.2f s=%i t=%0.2f p=%0.2f stall=%d h=%s)", source_info, data.difficulty, task_id, data.deterministic_limit, fully_solved_proportion, stall, search_info); @@ -3991,6 +4003,13 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { if (absl::GetFlag(FLAGS_cp_model_dump_models)) { DumpModelProto(model_proto, "model"); } + if (absl::GetFlag(FLAGS_cp_model_export_model)) { + if (model_proto.name().empty()) { + DumpModelProto(model_proto, "unnamed_model"); + } else { + DumpModelProto(model_proto, model_proto.name()); + } + } #endif // __PORTABLE_PLATFORM__ #if !defined(__PORTABLE_PLATFORM__) diff --git a/ortools/sat/cumulative.cc b/ortools/sat/cumulative.cc index d18b52afed8..75dc829da67 100644 --- a/ortools/sat/cumulative.cc +++ b/ortools/sat/cumulative.cc @@ -143,7 +143,7 @@ std::function Cumulative( // // TODO(user): A better place for stuff like this could be in the // presolver so that it is easier to disable and play with alternatives. - if (in_disjunction.size() > 1) model->Add(Disjunctive(in_disjunction)); + if (in_disjunction.size() > 1) AddDisjunctive(in_disjunction, model); if (in_disjunction.size() == vars.size()) return; } diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index e5afafcf1c8..63c616bc176 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -283,12 +283,8 @@ NonOverlappingRectanglesDisjunctivePropagator:: x_(x->NumTasks(), model), watcher_(model->GetOrCreate()), overload_checker_(&x_), - forward_detectable_precedences_( - true, model->GetOrCreate(), - model->GetOrCreate(), &x_), - backward_detectable_precedences_( - false, model->GetOrCreate(), - model->GetOrCreate(), &x_), + forward_detectable_precedences_(true, &x_), + backward_detectable_precedences_(false, &x_), forward_not_last_(true, &x_), backward_not_last_(false, &x_), forward_edge_finding_(true, &x_), diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index b75af997af7..9b25f46c26b 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -37,183 +37,122 @@ namespace operations_research { namespace sat { -std::function Disjunctive( - const std::vector& intervals) { - return [=](Model* model) { - bool is_all_different = true; - IntervalsRepository* repository = model->GetOrCreate(); - for (const IntervalVariable var : intervals) { - if (repository->IsOptional(var) || repository->MinSize(var) != 1 || - repository->MaxSize(var) != 1) { - is_all_different = false; - break; - } +void AddDisjunctive(const std::vector& intervals, + Model* model) { + // Depending on the parameters, create all pair of conditional precedences. + // TODO(user): create them dynamically instead? + const auto& params = *model->GetOrCreate(); + if (intervals.size() <= + params.max_size_to_create_precedence_literals_in_disjunctive() && + params.use_strong_propagation_in_disjunctive()) { + AddDisjunctiveWithBooleanPrecedencesOnly(intervals, model); + } + + bool is_all_different = true; + IntervalsRepository* repository = model->GetOrCreate(); + for (const IntervalVariable var : intervals) { + if (repository->IsOptional(var) || repository->MinSize(var) != 1 || + repository->MaxSize(var) != 1) { + is_all_different = false; + break; } - if (is_all_different) { - std::vector starts; - starts.reserve(intervals.size()); - for (const IntervalVariable interval : intervals) { - starts.push_back(repository->Start(interval)); - } - model->Add(AllDifferentOnBounds(starts)); - return; + } + if (is_all_different) { + std::vector starts; + starts.reserve(intervals.size()); + for (const IntervalVariable interval : intervals) { + starts.push_back(repository->Start(interval)); } + model->Add(AllDifferentOnBounds(starts)); + return; + } - auto* watcher = model->GetOrCreate(); - const auto& sat_parameters = *model->GetOrCreate(); - if (intervals.size() > 2 && sat_parameters.use_combined_no_overlap()) { - model->GetOrCreate>()->AddNoOverlap(intervals); - model->GetOrCreate>()->AddNoOverlap(intervals); - return; - } + auto* watcher = model->GetOrCreate(); + if (intervals.size() > 2 && params.use_combined_no_overlap()) { + model->GetOrCreate>()->AddNoOverlap(intervals); + model->GetOrCreate>()->AddNoOverlap(intervals); + return; + } - SchedulingConstraintHelper* helper = - new SchedulingConstraintHelper(intervals, model); - model->TakeOwnership(helper); - - // Experiments to use the timetable only to propagate the disjunctive. - if (/*DISABLES_CODE*/ (false)) { - const AffineExpression one(IntegerValue(1)); - std::vector demands(intervals.size(), one); - SchedulingDemandHelper* demands_helper = - model->GetOrCreate()->GetOrCreateDemandHelper( - helper, demands); - - TimeTablingPerTask* timetable = - new TimeTablingPerTask(one, helper, demands_helper, model); - timetable->RegisterWith(watcher); - model->TakeOwnership(timetable); - return; - } + SchedulingConstraintHelper* helper = repository->GetOrCreateHelper( + intervals, /*register_as_disjunctive_helper=*/true); + + // Experiments to use the timetable only to propagate the disjunctive. + if (/*DISABLES_CODE*/ (false)) { + const AffineExpression one(IntegerValue(1)); + std::vector demands(intervals.size(), one); + SchedulingDemandHelper* demands_helper = + repository->GetOrCreateDemandHelper(helper, demands); + + TimeTablingPerTask* timetable = + new TimeTablingPerTask(one, helper, demands_helper, model); + timetable->RegisterWith(watcher); + model->TakeOwnership(timetable); + return; + } - if (intervals.size() == 2) { - DisjunctiveWithTwoItems* propagator = new DisjunctiveWithTwoItems(helper); - propagator->RegisterWith(watcher); - model->TakeOwnership(propagator); - } else { - // We decided to create the propagators in this particular order, but it - // shouldn't matter much because of the different priorities used. - { - // Only one direction is needed by this one. - DisjunctiveOverloadChecker* overload_checker = - new DisjunctiveOverloadChecker(helper); - const int id = overload_checker->RegisterWith(watcher); - watcher->SetPropagatorPriority(id, 1); - model->TakeOwnership(overload_checker); - } - for (const bool time_direction : {true, false}) { - DisjunctiveDetectablePrecedences* detectable_precedences = - new DisjunctiveDetectablePrecedences( - time_direction, model->GetOrCreate(), - model->GetOrCreate(), helper); - const int id = detectable_precedences->RegisterWith(watcher); - watcher->SetPropagatorPriority(id, 2); - model->TakeOwnership(detectable_precedences); - } - for (const bool time_direction : {true, false}) { - DisjunctiveNotLast* not_last = - new DisjunctiveNotLast(time_direction, helper); - const int id = not_last->RegisterWith(watcher); - watcher->SetPropagatorPriority(id, 3); - model->TakeOwnership(not_last); - } - for (const bool time_direction : {true, false}) { - DisjunctiveEdgeFinding* edge_finding = - new DisjunctiveEdgeFinding(time_direction, helper); - const int id = edge_finding->RegisterWith(watcher); - watcher->SetPropagatorPriority(id, 4); - model->TakeOwnership(edge_finding); - } + if (intervals.size() == 2) { + DisjunctiveWithTwoItems* propagator = new DisjunctiveWithTwoItems(helper); + propagator->RegisterWith(watcher); + model->TakeOwnership(propagator); + } else { + // We decided to create the propagators in this particular order, but it + // shouldn't matter much because of the different priorities used. + { + // Only one direction is needed by this one. + DisjunctiveOverloadChecker* overload_checker = + new DisjunctiveOverloadChecker(helper); + const int id = overload_checker->RegisterWith(watcher); + watcher->SetPropagatorPriority(id, 1); + model->TakeOwnership(overload_checker); } - - // Note that we keep this one even when there is just two intervals. This is - // because it might push a variable that is after both of the intervals - // using the fact that they are in disjunction. - if (sat_parameters.use_precedences_in_disjunctive_constraint() && - !sat_parameters.use_combined_no_overlap()) { - for (const bool time_direction : {true, false}) { - DisjunctivePrecedences* precedences = new DisjunctivePrecedences( - time_direction, helper, model->GetOrCreate(), - model->GetOrCreate()); - const int id = precedences->RegisterWith(watcher); - watcher->SetPropagatorPriority(id, 5); - model->TakeOwnership(precedences); - } + for (const bool time_direction : {true, false}) { + DisjunctiveDetectablePrecedences* detectable_precedences = + new DisjunctiveDetectablePrecedences(time_direction, helper); + const int id = detectable_precedences->RegisterWith(watcher); + watcher->SetPropagatorPriority(id, 2); + model->TakeOwnership(detectable_precedences); } - }; -} - -void AddDisjunctiveWithBooleanPrecedencesOnly( - const std::vector& intervals, Model* model) { - SatSolver* sat_solver = model->GetOrCreate(); - IntegerTrail* integer_trail = model->GetOrCreate(); - IntervalsRepository* repository = model->GetOrCreate(); - std::vector enforcement_literals; - for (int i = 1; i < intervals.size(); ++i) { - enforcement_literals.clear(); - const AffineExpression start_i = repository->Start(intervals[i]); - const AffineExpression end_i = repository->End(intervals[i]); - if (repository->IsOptional(intervals[i])) { - enforcement_literals.push_back(repository->PresenceLiteral(intervals[i])); + for (const bool time_direction : {true, false}) { + DisjunctiveNotLast* not_last = + new DisjunctiveNotLast(time_direction, helper); + const int id = not_last->RegisterWith(watcher); + watcher->SetPropagatorPriority(id, 3); + model->TakeOwnership(not_last); } - const int enforcement_literals_size = enforcement_literals.size(); - - for (int j = 0; j < i; ++j) { - enforcement_literals.resize(enforcement_literals_size); - const AffineExpression start_j = repository->Start(intervals[j]); - const AffineExpression end_j = repository->End(intervals[j]); - if (repository->IsOptional(intervals[j])) { - enforcement_literals.push_back( - repository->PresenceLiteral(intervals[j])); - } + for (const bool time_direction : {true, false}) { + DisjunctiveEdgeFinding* edge_finding = + new DisjunctiveEdgeFinding(time_direction, helper); + const int id = edge_finding->RegisterWith(watcher); + watcher->SetPropagatorPriority(id, 4); + model->TakeOwnership(edge_finding); + } + } - DCHECK_LE(enforcement_literals.size(), 2); - - if (integer_trail->UpperBound(start_i) < - integer_trail->LowerBound(end_j)) { - // task_i is always before task_j. - AddConditionalAffinePrecedence(enforcement_literals, end_i, start_j, - model); - } else if (integer_trail->UpperBound(start_j) < - integer_trail->LowerBound(end_i)) { - // task_j is always before task_i. - AddConditionalAffinePrecedence(enforcement_literals, end_j, start_i, - model); - } else { - // TODO(user): Cache boolean_var. - const BooleanVariable boolean_var = sat_solver->NewBooleanVariable(); - const Literal i_before_j = Literal(boolean_var, true); - enforcement_literals.push_back(i_before_j); - AddConditionalAffinePrecedence(enforcement_literals, end_i, start_j, - model); - DCHECK_LE(enforcement_literals.size(), 3); - enforcement_literals.pop_back(); - enforcement_literals.push_back(i_before_j.Negated()); - AddConditionalAffinePrecedence(enforcement_literals, end_j, start_i, - model); - DCHECK_LE(enforcement_literals.size(), 3); - enforcement_literals.pop_back(); - - // Force the value of boolean_var in case the precedence is not - // active. This avoids duplicate solutions when enumerating all - // possible solutions. - if (repository->IsOptional(intervals[i])) { - model->Add(Implication( - repository->PresenceLiteral(intervals[i]).Negated(), i_before_j)); - } - if (repository->IsOptional(intervals[j])) { - model->Add(Implication( - repository->PresenceLiteral(intervals[j]).Negated(), i_before_j)); - } - } + // Note that we keep this one even when there is just two intervals. This is + // because it might push a variable that is after both of the intervals + // using the fact that they are in disjunction. + if (params.use_precedences_in_disjunctive_constraint() && + !params.use_combined_no_overlap()) { + for (const bool time_direction : {true, false}) { + DisjunctivePrecedences* precedences = new DisjunctivePrecedences( + time_direction, helper, model->GetOrCreate(), + model->GetOrCreate()); + const int id = precedences->RegisterWith(watcher); + watcher->SetPropagatorPriority(id, 5); + model->TakeOwnership(precedences); } } } -void AddDisjunctiveWithBooleanPrecedences( +void AddDisjunctiveWithBooleanPrecedencesOnly( const std::vector& intervals, Model* model) { - AddDisjunctiveWithBooleanPrecedencesOnly(intervals, model); - model->Add(Disjunctive(intervals)); + auto* repo = model->GetOrCreate(); + for (int i = 0; i < intervals.size(); ++i) { + for (int j = i + 1; j < intervals.size(); ++j) { + repo->CreateDisjunctivePrecedenceLiteral(intervals[i], intervals[j]); + } + } } void TaskSet::AddEntry(const Entry& e) { @@ -870,65 +809,66 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow() { // We need: // - StartMax(ct) < EndMin(t) for the detectable precedence. // - StartMin(ct) >= window_start for the value of task_set_end_min. - const AffineExpression after = helper_->Starts()[t]; const IntegerValue end_min_if_present = helper_->ShiftedStartMin(t) + helper_->SizeMin(t); const IntegerValue window_start = sorted_tasks[critical_index].start_min; IntegerValue min_slack = kMaxIntegerValue; - bool all_statically_before = true; + bool all_already_before = true; + IntegerValue energy_of_task_before = 0; for (int i = critical_index; i < sorted_tasks.size(); ++i) { const int ct = sorted_tasks[i].task; DCHECK_NE(ct, t); helper_->AddPresenceReason(ct); - helper_->AddEnergyAfterReason(ct, sorted_tasks[i].size_min, - window_start); + + // Heuristic, if some tasks are known to be after the first one, + // we just add the min-size as a reason. + if (i > critical_index && helper_->GetCurrentMinDistanceBetweenTasks( + sorted_tasks[critical_index].task, ct, + /*add_reason_if_after=*/true) >= 0) { + helper_->AddSizeMinReason(ct); + } else { + helper_->AddEnergyAfterReason(ct, sorted_tasks[i].size_min, + window_start); + } // We only need the reason for being before if we don't already have // a static precedence between the tasks. - const AffineExpression before = helper_->Ends()[ct]; - const IntegerValue needed = before.constant - after.constant; - const IntegerValue known = - precedence_relations_->GetOffset(before.var, after.var); - if (known < needed || before.coeff != 1 || after.coeff != 1) { - all_statically_before = false; - helper_->AddStartMaxReason(ct, end_min_if_present - 1); + const IntegerValue dist = helper_->GetCurrentMinDistanceBetweenTasks( + ct, t, /*add_reason_if_after=*/true); + if (dist >= 0) { + energy_of_task_before += sorted_tasks[i].size_min; + min_slack = std::min(min_slack, dist); } else { - min_slack = std::min(min_slack, known - needed); + all_already_before = false; + helper_->AddStartMaxReason(ct, end_min_if_present - 1); } } // We only need the end-min of t if not all the task are already known // to be before. IntegerValue new_start_min = task_set_end_min; - if (all_statically_before) { - // We can actually push further ! + if (all_already_before) { + // We can actually push further! + // And we don't need other reason except the precedences. new_start_min += min_slack; } else { helper_->AddEndMinReason(t, end_min_if_present); } + // In some situation, we can push the task further. + // TODO(user): We can also reduce the reason in this case. + if (min_slack != kMaxIntegerValue && + window_start + energy_of_task_before + min_slack > new_start_min) { + new_start_min = window_start + energy_of_task_before + min_slack; + } + // If we detect precedences at level zero, lets add them to the // precedence propagator. The hope is that this leads to faster // propagation with better reason if they ever trigger. if (helper_->CurrentDecisionLevel() == 0 && helper_->IsPresent(t)) { - if (after.coeff == 1 && after.var != kNoIntegerVariable) { - for (int i = critical_index; i < sorted_tasks.size(); ++i) { - const AffineExpression before = - helper_->Ends()[sorted_tasks[i].task]; - if (before.coeff == 1 && before.var != kNoIntegerVariable) { - const IntegerValue offset = before.constant - after.constant; - precedence_relations_->UpdateOffset(before.var, after.var, - offset); - if (precedences_->AddPrecedenceWithOffsetIfNew( - before.var, after.var, offset)) { - VLOG(2) << t << " after #" - << sorted_tasks.size() - critical_index << " " - << before.DebugString() << " " << after.DebugString() - << " " << offset; - } - } - } + for (int i = critical_index; i < sorted_tasks.size(); ++i) { + helper_->AddLevelZeroPrecedence(sorted_tasks[i].task, t); } } @@ -1237,7 +1177,10 @@ bool DisjunctiveNotLast::PropagateSubwindow() { const int ct = sorted_tasks[i].task; if (t == ct) continue; const IntegerValue start_max = helper_->StartMax(ct); - if (start_max > largest_ct_start_max) { + + // If we already known that t is after ct we can have a tighter start-max. + if (start_max > largest_ct_start_max && + helper_->GetCurrentMinDistanceBetweenTasks(ct, t) < 0) { largest_ct_start_max = start_max; } } @@ -1256,7 +1199,10 @@ bool DisjunctiveNotLast::PropagateSubwindow() { helper_->AddPresenceReason(ct); helper_->AddEnergyAfterReason(ct, sorted_tasks[i].size_min, window_start); - helper_->AddStartMaxReason(ct, largest_ct_start_max); + if (helper_->GetCurrentMinDistanceBetweenTasks( + ct, t, /*add_reason_if_after=*/true) < 0) { + helper_->AddStartMaxReason(ct, largest_ct_start_max); + } } // Add the reason for t, we only need the start-max. @@ -1431,21 +1377,41 @@ bool DisjunctiveEdgeFinding::PropagateSubwindow(IntegerValue window_end_min) { const int critical_event = theta_tree_.GetMaxEventWithEnvelopeGreaterThan(non_gray_end_min - 1); - const int first_event = - std::min(critical_event, critical_event_with_gray); - const int second_event = - std::max(critical_event, critical_event_with_gray); + + // Even if we need less task to explain the overload, because we are + // going to explain the full non_gray_end_min, we can relax the + // explanation. + if (critical_event_with_gray > critical_event) { + critical_event_with_gray = critical_event; + } + + const int first_event = critical_event_with_gray; + const int second_event = critical_event; const IntegerValue first_start = window_[first_event].time; const IntegerValue second_start = window_[second_event].time; // window_end is chosen to be has big as possible and still have an // overload if the gray task is not last. - const IntegerValue window_end = - non_gray_end_max + event_size_[gray_event] - available_energy - 1; + // + // If gray_task is with variable duration, it is possible that it must + // be last just because is end-min is large. + bool use_energy_reason = true; + IntegerValue window_end; + if (helper_->EndMin(gray_task) > non_gray_end_max) { + // This is actually a form of detectable precedence. + // + // TODO(user): We could relax the reason more, but this happen really + // rarely it seems. + use_energy_reason = false; + window_end = helper_->EndMin(gray_task) - 1; + } else { + window_end = non_gray_end_min + event_size_[gray_event] - 1; + } CHECK_GE(window_end, non_gray_end_max); // The non-gray part of the explanation as detailed above. helper_->ClearReason(); + bool one_before = false; for (int event = first_event; event < window_size; event++) { const int task = window_[event].task_index; if (is_gray_[task]) continue; @@ -1453,13 +1419,39 @@ bool DisjunctiveEdgeFinding::PropagateSubwindow(IntegerValue window_end_min) { helper_->AddEnergyAfterReason( task, event_size_[event], event >= second_event ? second_start : first_start); - helper_->AddEndMaxReason(task, window_end); + + const IntegerValue dist = helper_->GetCurrentMinDistanceBetweenTasks( + task, gray_task, /*add_reason_if_after=*/true); + if (dist >= 0) { + one_before = true; + } else { + helper_->AddEndMaxReason(task, window_end); + } } // Add the reason for the gray_task (we don't need the end-max or // presence reason). - helper_->AddEnergyAfterReason(gray_task, event_size_[gray_event], - window_[critical_event_with_gray].time); + if (one_before) { + helper_->AddSizeMinReason(gray_task); + } else if (use_energy_reason) { + helper_->AddEnergyAfterReason(gray_task, event_size_[gray_event], + window_[critical_event_with_gray].time); + } else { + helper_->AddEndMinReason(gray_task, helper_->EndMin(gray_task)); + } + + // If we detect precedences at level zero, lets add them to the + // precedence propagator. The hope is that this leads to faster + // propagation with better reason if they ever trigger. + if (helper_->CurrentDecisionLevel() == 0 && + helper_->IsPresent(gray_task)) { + for (int i = first_event; i < window_size; ++i) { + const int task = window_[i].task_index; + if (!is_gray_[task]) { + helper_->AddLevelZeroPrecedence(task, gray_task); + } + } + } // Enqueue the new start-min for gray_task. // diff --git a/ortools/sat/disjunctive.h b/ortools/sat/disjunctive.h index 01336de205e..7c5e3cf3409 100644 --- a/ortools/sat/disjunctive.h +++ b/ortools/sat/disjunctive.h @@ -36,8 +36,8 @@ namespace sat { // // TODO(user): This is not completely true for empty intervals (start == end). // Make sure such intervals are ignored by the constraint. -std::function Disjunctive( - const std::vector& intervals); +void AddDisjunctive(const std::vector& intervals, + Model* model); // Creates Boolean variables for all the possible precedences of the form (task // i is before task j) and forces that, for each couple of task (i,j), either i @@ -45,10 +45,6 @@ std::function Disjunctive( void AddDisjunctiveWithBooleanPrecedencesOnly( const std::vector& intervals, Model* model); -// Same as Disjunctive() + DisjunctiveWithBooleanPrecedencesOnly(). -void AddDisjunctiveWithBooleanPrecedences( - const std::vector& intervals, Model* model); - // Helper class to compute the end-min of a set of tasks given their start-min // and size-min. In Petr Vilim's PhD "Global Constraints in Scheduling", // this corresponds to his Theta-tree except that we use a O(n) implementation @@ -159,12 +155,8 @@ class DisjunctiveOverloadChecker : public PropagatorInterface { class DisjunctiveDetectablePrecedences : public PropagatorInterface { public: DisjunctiveDetectablePrecedences(bool time_direction, - PrecedenceRelations* relations, - PrecedencesPropagator* precedences, SchedulingConstraintHelper* helper) : time_direction_(time_direction), - precedence_relations_(relations), - precedences_(precedences), helper_(helper), task_set_(helper->NumTasks()) {} bool Propagate() final; @@ -180,8 +172,6 @@ class DisjunctiveDetectablePrecedences : public PropagatorInterface { std::vector to_propagate_; const bool time_direction_; - PrecedenceRelations* precedence_relations_; - PrecedencesPropagator* precedences_; SchedulingConstraintHelper* helper_; TaskSet task_set_; }; diff --git a/ortools/sat/docs/scheduling.md b/ortools/sat/docs/scheduling.md index aacd2233ee0..b61a8e98301 100644 --- a/ortools/sat/docs/scheduling.md +++ b/ortools/sat/docs/scheduling.md @@ -655,6 +655,155 @@ A cumulative constraint takes a list of intervals, and a list of demands, and a capacity. It enforces that at any time point, the sum of demands of tasks active at that time point is less than a given capacity. +Modeling a varying profile can be done using fixed (interval, demand) to occupy +the capacity between the actual profile and it max capacity. + +### Python code + +```python +#!/usr/bin/env python3 +"""Solve a simple scheduling problem with a variable work load.""" +import io + +import pandas as pd + +from ortools.sat.python import cp_model + + +def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame]: + """Creates the two dataframes that describes the model.""" + + capacity_str: str = """ + start_hour capacity + 0 0 + 2 0 + 4 1 + 6 3 + 8 6 + 10 12 + 12 8 + 14 12 + 16 10 + 18 4 + 20 2 + 22 0 + """ + + tasks_str: str = """ + name duration load priority + t1 60 3 2 + t2 180 2 1 + t3 240 5 3 + t4 90 4 2 + t5 120 3 1 + t6 300 3 3 + t7 120 1 2 + t8 100 5 2 + t9 110 2 1 + t10 300 5 3 + t11 90 4 2 + t12 120 3 1 + t13 250 3 3 + t14 120 1 2 + t15 40 5 3 + t16 70 4 2 + t17 90 8 1 + t18 40 3 3 + t19 120 5 2 + t20 60 3 2 + t21 180 2 1 + t22 240 5 3 + t23 90 4 2 + t24 120 3 1 + t25 300 3 3 + t26 120 1 2 + t27 100 5 2 + t28 110 2 1 + t29 300 5 3 + t30 90 4 2 + """ + + capacity_df = pd.read_table(io.StringIO(capacity_str), sep=r"\s+") + tasks_df = pd.read_table(io.StringIO(tasks_str), index_col=0, sep=r"\s+") + return capacity_df, tasks_df + + +def main(): + """Create the model and solves it.""" + capacity_df, tasks_df = create_data_model() + + # Create the model. + model = cp_model.CpModel() + + # Get the max capacity from the capacity dataframe. + max_capacity = capacity_df.capacity.max() + print(f"Max capacity = {max_capacity}") + print(f"#tasks = {len(tasks_df)}") + + minutes_per_period: int = 120 + horizon: int = 24 * 60 + + # Variables + starts = model.NewIntVarSeries( + name="starts", lower_bounds=0, upper_bounds=horizon, index=tasks_df.index + ) + performed = model.NewBoolVarSeries(name="performed", index=tasks_df.index) + + intervals = model.NewOptionalFixedSizeIntervalVarSeries( + name="intervals", + index=tasks_df.index, + starts=starts, + sizes=tasks_df.duration, + performed_literals=performed, + ) + + # Set up the profile. We use fixed (intervals, demands) to fill in the space + # between the actual load profile and the max capacity. + time_period_intervals = model.NewFixedSizeIntervalVarSeries( + name="time_period_intervals", + index=capacity_df.index, + starts=capacity_df.start_hour * minutes_per_period, + sizes=minutes_per_period, + ) + time_period_heights = max_capacity - capacity_df.capacity + + # Cumulative constraint. + model.AddCumulative( + intervals.to_list() + time_period_intervals.to_list(), + tasks_df.load.to_list() + time_period_heights.to_list(), + max_capacity, + ) + + # Objective: maximize the value of performed intervals. + # 1 is the max priority. + max_priority = max(tasks_df.priority) + model.Maximize(sum(performed * (max_priority + 1 - tasks_df.priority))) + + # Create the solver and solve the model. + solver = cp_model.CpSolver() + solver.parameters.log_search_progress = True + solver.parameters.num_workers = 8 + solver.parameters.max_time_in_seconds = 30.0 + status = solver.Solve(model) + + if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE: + start_values = solver.Values(starts) + performed_values = solver.BooleanValues(performed) + for task in tasks_df.index: + if performed_values[task]: + print(f"task {task} starts at {start_values[task]}") + else: + print(f"task {task} is not performed") + elif status == cp_model.INFEASIBLE: + print("The problem is infeasible") + else: + print("Something is wrong, check the status and the log of the solve") + + +if __name__ == "__main__": + main() +``` + ## Alternative resources for one interval ## Ranking tasks in a disjunctive resource diff --git a/ortools/sat/integer.cc b/ortools/sat/integer.cc index 9246ae28a8a..9dfd6b227b6 100644 --- a/ortools/sat/integer.cc +++ b/ortools/sat/integer.cc @@ -706,7 +706,7 @@ bool IntegerTrail::Propagate(Trail* trail) { if (level > integer_search_levels_.size()) { integer_search_levels_.push_back(integer_trail_.size()); reason_decision_levels_.push_back(literals_reason_starts_.size()); - CHECK_EQ(trail->CurrentDecisionLevel(), integer_search_levels_.size()); + CHECK_EQ(level, integer_search_levels_.size()); } // This is required because when loading a model it is possible that we add diff --git a/ortools/sat/integer_search.cc b/ortools/sat/integer_search.cc index 416783b29e5..82ee73d8d34 100644 --- a/ortools/sat/integer_search.cc +++ b/ortools/sat/integer_search.cc @@ -392,7 +392,19 @@ std::function SchedulingSearchHeuristic( model->GetOrCreate()->search_random_variable_pool_size()); auto* random = model->GetOrCreate(); - return [repo, heuristic, trail, integer_trail, randomization_size, random]() { + // To avoid to scan already fixed intervals, we use a simple reversible int. + auto* rev_int_repo = model->GetOrCreate(); + const int num_intervals = repo->NumIntervals(); + int rev_fixed = 0; + int rev_is_in_dive = 0; + std::vector intervals(num_intervals); + std::vector cached_start_mins(num_intervals); + for (IntervalVariable i(0); i < num_intervals; ++i) { + intervals[i.value()] = i; + } + + // Note(user): only the model is captured for no reason. + return [=]() mutable { struct ToSchedule { // Variable to fix. LiteralIndex presence = kNoLiteralIndex; @@ -413,56 +425,97 @@ std::function SchedulingSearchHeuristic( std::tie(other.start_min, other.start_max, other.size_min, other.noise); } + + // Generating random noise can take time, so we use this function to + // delay it. + bool MightBeBetter(const ToSchedule& other) const { + return std::tie(start_min, start_max) <= + std::tie(other.start_min, other.start_max); + } }; std::vector top_decisions; + top_decisions.reserve(randomization_size); top_decisions.resize(1); + // Save rev_fixed before we modify it. + rev_int_repo->SaveState(&rev_fixed); + // TODO(user): we should also precompute fixed precedences and only fix // interval that have all their predecessors fixed. - const int num_intervals = repo->NumIntervals(); - for (IntervalVariable i(0); i < num_intervals; ++i) { - if (repo->IsAbsent(i)) continue; - if (!repo->IsPresent(i) || !integer_trail->IsFixed(repo->Start(i)) || - !integer_trail->IsFixed(repo->End(i))) { - IntegerValue start_min = integer_trail->LowerBound(repo->Start(i)); - IntegerValue start_max = integer_trail->UpperBound(repo->Start(i)); - if (repo->IsOptional(i)) { - // For task whose presence is still unknown, our propagators should - // have propagated the minimum time as if it was present. So this - // should reflect the earliest time at which this interval can be - // scheduled. - start_min = std::max(start_min, - integer_trail->ConditionalLowerBound( - repo->PresenceLiteral(i), repo->Start(i))); - start_max = std::min(start_max, - integer_trail->ConditionalUpperBound( - repo->PresenceLiteral(i), repo->Start(i))); - } + for (int i = rev_fixed; i < num_intervals; ++i) { + const ToSchedule& worst = top_decisions.back(); + if (rev_is_in_dive == 1 && cached_start_mins[i] > worst.start_min) { + continue; + } + + const IntervalVariable interval = intervals[i]; + if (repo->IsAbsent(interval)) { + std::swap(intervals[i], intervals[rev_fixed]); + std::swap(cached_start_mins[i], cached_start_mins[rev_fixed]); + ++rev_fixed; + continue; + } + const AffineExpression start = repo->Start(interval); + const AffineExpression end = repo->End(interval); + if (repo->IsPresent(interval) && integer_trail->IsFixed(start) && + integer_trail->IsFixed(end)) { + std::swap(intervals[i], intervals[rev_fixed]); + std::swap(cached_start_mins[i], cached_start_mins[rev_fixed]); + ++rev_fixed; + continue; + } + + ToSchedule candidate; + if (repo->IsOptional(interval)) { + // For task whose presence is still unknown, our propagators should + // have propagated the minimum time as if it was present. So this + // should reflect the earliest time at which this interval can be + // scheduled. + const Literal lit = repo->PresenceLiteral(interval); + candidate.start_min = integer_trail->ConditionalLowerBound(lit, start); + candidate.start_max = integer_trail->ConditionalUpperBound(lit, start); + } else { + candidate.start_min = integer_trail->LowerBound(start); + candidate.start_max = integer_trail->UpperBound(start); + } + cached_start_mins[i] = candidate.start_min; + if (top_decisions.size() < randomization_size || + candidate.MightBeBetter(worst)) { + // Finish filling candidate. + // // For variable size, we compute the min size once the start is fixed // to time. This is needed to never pick the "artificial" makespan // interval at the end in priority compared to intervals that still // need to be scheduled. - const IntegerValue size_min = - std::max(integer_trail->LowerBound(repo->Size(i)), - integer_trail->LowerBound(repo->End(i)) - start_min); - const ToSchedule& worst = top_decisions.back(); - const ToSchedule candidate( - {repo->IsOptional(i) ? repo->PresenceLiteral(i).Index() - : kNoLiteralIndex, - repo->Start(i), repo->End(i), start_min, start_max, size_min, - absl::Uniform(*random, 0.0, 1.0)}); - if (top_decisions.size() < randomization_size || candidate < worst) { - if (top_decisions.size() == randomization_size) { - top_decisions.pop_back(); - } - top_decisions.push_back(candidate); - if (top_decisions.size() > 1) { - std::sort(top_decisions.begin(), top_decisions.end()); - } + candidate.start = start; + candidate.end = end; + candidate.presence = repo->IsOptional(interval) + ? repo->PresenceLiteral(interval).Index() + : kNoLiteralIndex; + candidate.size_min = + std::max(integer_trail->LowerBound(repo->Size(interval)), + integer_trail->LowerBound(end) - candidate.start_min); + candidate.noise = absl::Uniform(*random, 0.0, 1.0); + + if (top_decisions.size() == randomization_size) { + // Do not replace if we have a strict inequality now. + if (worst < candidate) continue; + top_decisions.pop_back(); + } + top_decisions.push_back(candidate); + if (top_decisions.size() > 1) { + std::sort(top_decisions.begin(), top_decisions.end()); } } } + + // Setup rev_is_in_dive to be 1 only if there was no backtrack since the + // previous call. + rev_is_in_dive = 0; + rev_int_repo->SaveState(&rev_is_in_dive); + rev_is_in_dive = 1; + const ToSchedule best = top_decisions.size() == 1 ? top_decisions.front() @@ -535,6 +588,83 @@ std::function SchedulingSearchHeuristic( }; } +namespace { + +bool PrecedenceIsBetter(SchedulingConstraintHelper* helper, int a, + SchedulingConstraintHelper* other_helper, int other_a) { + return std::make_tuple(helper->StartMin(a), helper->StartMax(a), + helper->SizeMin(a)) < + std::make_tuple(other_helper->StartMin(other_a), + other_helper->StartMax(other_a), + other_helper->SizeMin(other_a)); +} + +} // namespace + +// The algo goes as follow: +// - For each disjunctive, consider the intervals by start time, consider +// adding the first precedence between overlapping interval. +// - Take the smallest start time amongst all disjunctive. +std::function SchedulingPrecedenceSearchHeuristic( + Model* model) { + auto* repo = model->GetOrCreate(); + return [repo]() { + SchedulingConstraintHelper* best_helper = nullptr; + int best_before; + int best_after; + for (SchedulingConstraintHelper* helper : repo->AllDisjunctiveHelpers()) { + if (!helper->SynchronizeAndSetTimeDirection(true)) { + return BooleanOrIntegerLiteral(); + } + + // TODO(user): tie break by size/start-max + // TODO(user): Use conditional lower bounds? note that in automatic search + // all precedence will be fixed before this is called though. In fixed + // search maybe we should use the other SchedulingSearchHeuristic(). + int a = -1; + for (auto [b, time] : helper->TaskByIncreasingStartMin()) { + if (helper->IsAbsent(b)) continue; + if (a == -1 || helper->EndMin(a) <= helper->StartMin(b)) { + a = b; + continue; + } + + // Swap (a,b) if they have the same start_min. + if (PrecedenceIsBetter(helper, b, helper, a)) { + std::swap(a, b); + + // Corner case in case b can fit before a (size zero) + if (helper->EndMin(a) <= helper->StartMin(b)) { + a = b; + continue; + } + } + + // TODO(Fdid): Also compare the second part of the precedence in + // PrecedenceIsBetter() and not just the interval before? + if (best_helper == nullptr || + PrecedenceIsBetter(helper, a, best_helper, best_before)) { + best_helper = helper; + best_before = a; + best_after = b; + } + break; + } + } + + if (best_helper != nullptr) { + VLOG(2) << "New precedence: " << best_helper->TaskDebugString(best_before) + << " " << best_helper->TaskDebugString(best_after); + const IntervalVariable a = best_helper->IntervalVariables()[best_before]; + const IntervalVariable b = best_helper->IntervalVariables()[best_after]; + repo->CreateDisjunctivePrecedenceLiteral(a, b); + return BooleanOrIntegerLiteral(repo->GetPrecedenceLiteral(a, b)); + } + + return BooleanOrIntegerLiteral(); + }; +} + std::function RandomizeOnRestartHeuristic( bool lns_mode, Model* model) { SatSolver* sat_solver = model->GetOrCreate(); @@ -1439,12 +1569,12 @@ void ContinuousProber::LogStatistics() { shared_response_manager_->LogMessageWithThrottling( "Probe", absl::StrCat( - "#iterations:", iteration_, - " #literals fixed/probed:", prober_->num_new_literals_fixed(), "/", - num_literals_probed_, " #bounds shaved/tried:", num_bounds_shaved_, - "/", num_bounds_tried_, " #new_integer_bounds:", + " (iterations=", iteration_, + " literals fixed/probed=", prober_->num_new_literals_fixed(), "/", + num_literals_probed_, " bounds shaved/tried=", num_bounds_shaved_, + "/", num_bounds_tried_, " new_integer_bounds=", shared_bounds_manager_->NumBoundsExported("probing"), - ", #new_binary_clauses:", prober_->num_new_binary_clauses())); + ", new_binary_clause=", prober_->num_new_binary_clauses(), ")")); } } diff --git a/ortools/sat/integer_search.h b/ortools/sat/integer_search.h index 1d5b3b30b86..e5b0de7f08a 100644 --- a/ortools/sat/integer_search.h +++ b/ortools/sat/integer_search.h @@ -226,6 +226,12 @@ std::function PseudoCost(Model* model); std::function SchedulingSearchHeuristic( Model* model); +// Compared to SchedulingSearchHeuristic() this one take decision on precedences +// between tasks. Lazily creating a precedence Boolean for the task in +// disjunction. +std::function SchedulingPrecedenceSearchHeuristic( + Model* model); + // Returns true if the number of variables in the linearized part represent // a large enough proportion of all the problem variables. bool LinearizedPartIsLarge(Model* model); diff --git a/ortools/sat/intervals.cc b/ortools/sat/intervals.cc index 57dcabcb92e..be796085f98 100644 --- a/ortools/sat/intervals.cc +++ b/ortools/sat/intervals.cc @@ -81,44 +81,79 @@ IntervalVariable IntervalsRepository::CreateInterval(AffineExpression start, return i; } -SchedulingConstraintHelper::SchedulingConstraintHelper( - const std::vector& tasks, Model* model) - : trail_(model->GetOrCreate()), - integer_trail_(model->GetOrCreate()), - precedences_(model->GetOrCreate()) { - starts_.clear(); - ends_.clear(); - minus_ends_.clear(); - minus_starts_.clear(); - sizes_.clear(); - reason_for_presence_.clear(); +void IntervalsRepository::CreateDisjunctivePrecedenceLiteral( + IntervalVariable a, IntervalVariable b) { + const auto it = disjunctive_precedences_.find({a, b}); + if (it != disjunctive_precedences_.end()) return; - auto* repository = model->GetOrCreate(); - for (const IntervalVariable i : tasks) { - if (repository->IsOptional(i)) { - reason_for_presence_.push_back(repository->PresenceLiteral(i).Index()); - } else { - reason_for_presence_.push_back(kNoLiteralIndex); + std::vector enforcement_literals; + if (IsOptional(a)) enforcement_literals.push_back(PresenceLiteral(a)); + if (IsOptional(b)) enforcement_literals.push_back(PresenceLiteral(b)); + if (sat_solver_->CurrentDecisionLevel() == 0) { + int new_size = 0; + for (const Literal l : enforcement_literals) { + // We can ignore always absent interval, and skip the literal of the + // interval that are now always present. + if (assignment_.LiteralIsTrue(l)) continue; + if (assignment_.LiteralIsFalse(l)) return; + enforcement_literals[new_size++] = l; } - sizes_.push_back(repository->Size(i)); - starts_.push_back(repository->Start(i)); - ends_.push_back(repository->End(i)); - minus_starts_.push_back(repository->Start(i).Negated()); - minus_ends_.push_back(repository->End(i).Negated()); + enforcement_literals.resize(new_size); } - RegisterWith(model->GetOrCreate()); - InitSortedVectors(); - if (!SynchronizeAndSetTimeDirection(true)) { - model->GetOrCreate()->NotifyThatModelIsUnsat(); + const AffineExpression start_a = Start(a); + const AffineExpression end_a = End(a); + const AffineExpression start_b = Start(b); + const AffineExpression end_b = End(b); + + // task_a is always before task_b ? + if (integer_trail_->UpperBound(start_a) < integer_trail_->LowerBound(end_b)) { + AddConditionalAffinePrecedence(enforcement_literals, end_a, start_b, + model_); + return; } + + // task_b is always before task_a ? + if (integer_trail_->UpperBound(start_b) < integer_trail_->LowerBound(end_a)) { + AddConditionalAffinePrecedence(enforcement_literals, end_b, start_a, + model_); + return; + } + + // Create a new literal. + const BooleanVariable boolean_var = sat_solver_->NewBooleanVariable(); + const Literal a_before_b = Literal(boolean_var, true); + disjunctive_precedences_.insert({{a, b}, a_before_b}); + disjunctive_precedences_.insert({{b, a}, a_before_b.Negated()}); + + enforcement_literals.push_back(a_before_b); + AddConditionalAffinePrecedence(enforcement_literals, end_a, start_b, model_); + enforcement_literals.pop_back(); + + enforcement_literals.push_back(a_before_b.Negated()); + AddConditionalAffinePrecedence(enforcement_literals, end_b, start_a, model_); + enforcement_literals.pop_back(); + + // Force the value of boolean_var in case the precedence is not active. This + // avoids duplicate solutions when enumerating all possible solutions. + for (const Literal l : enforcement_literals) { + implications_->AddBinaryClause(l, a_before_b); + } +} + +LiteralIndex IntervalsRepository::GetPrecedenceLiteral( + IntervalVariable a, IntervalVariable b) const { + const auto it = disjunctive_precedences_.find({a, b}); + if (it != disjunctive_precedences_.end()) return it->second.Index(); + return kNoLiteralIndex; } // TODO(user): Ideally we should sort the vector of variables, but right now // we cannot since we often use this with a parallel vector of demands. So this // "sorting" should happen in the presolver so we can share as much as possible. SchedulingConstraintHelper* IntervalsRepository::GetOrCreateHelper( - const std::vector& variables) { + const std::vector& variables, + bool register_as_disjunctive_helper) { const auto it = helper_repository_.find(variables); if (it != helper_repository_.end()) return it->second; @@ -126,6 +161,9 @@ SchedulingConstraintHelper* IntervalsRepository::GetOrCreateHelper( new SchedulingConstraintHelper(variables, model_); helper_repository_[variables] = helper; model_->TakeOwnership(helper); + if (register_as_disjunctive_helper) { + disjunctive_helpers_.push_back(helper); + } return helper; } @@ -150,10 +188,46 @@ void IntervalsRepository::InitAllDecomposedEnergies() { } } +SchedulingConstraintHelper::SchedulingConstraintHelper( + const std::vector& tasks, Model* model) + : trail_(model->GetOrCreate()), + integer_trail_(model->GetOrCreate()), + precedence_relations_(model->GetOrCreate()), + precedences_(model->GetOrCreate()), + interval_variables_(tasks) { + starts_.clear(); + ends_.clear(); + minus_ends_.clear(); + minus_starts_.clear(); + sizes_.clear(); + reason_for_presence_.clear(); + + auto* repository = model->GetOrCreate(); + for (const IntervalVariable i : tasks) { + if (repository->IsOptional(i)) { + reason_for_presence_.push_back(repository->PresenceLiteral(i).Index()); + } else { + reason_for_presence_.push_back(kNoLiteralIndex); + } + sizes_.push_back(repository->Size(i)); + starts_.push_back(repository->Start(i)); + ends_.push_back(repository->End(i)); + minus_starts_.push_back(repository->Start(i).Negated()); + minus_ends_.push_back(repository->End(i).Negated()); + } + + RegisterWith(model->GetOrCreate()); + InitSortedVectors(); + if (!SynchronizeAndSetTimeDirection(true)) { + model->GetOrCreate()->NotifyThatModelIsUnsat(); + } +} + SchedulingConstraintHelper::SchedulingConstraintHelper(int num_tasks, Model* model) : trail_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), + precedence_relations_(model->GetOrCreate()), precedences_(model->GetOrCreate()) { starts_.resize(num_tasks); CHECK_EQ(NumTasks(), num_tasks); @@ -278,6 +352,7 @@ bool SchedulingConstraintHelper::ResetFromSubset( current_time_direction_ = other.current_time_direction_; const int num_tasks = tasks.size(); + interval_variables_.resize(num_tasks); starts_.resize(num_tasks); ends_.resize(num_tasks); minus_ends_.resize(num_tasks); @@ -286,6 +361,7 @@ bool SchedulingConstraintHelper::ResetFromSubset( reason_for_presence_.resize(num_tasks); for (int i = 0; i < num_tasks; ++i) { const int t = tasks[i]; + interval_variables_[i] = other.interval_variables_[t]; starts_[i] = other.starts_[t]; ends_[i] = other.ends_[t]; minus_ends_[i] = other.minus_ends_[t]; @@ -370,6 +446,51 @@ bool SchedulingConstraintHelper::SynchronizeAndSetTimeDirection( return true; } +// TODO(user): be more precise when we know a and b are in disjunction. +// we really just need start_b > start_a, or even >= if duration is non-zero. +IntegerValue SchedulingConstraintHelper::GetCurrentMinDistanceBetweenTasks( + int a, int b, bool add_reason_if_after) { + const AffineExpression before = ends_[a]; + const AffineExpression after = starts_[b]; + if (before.var == kNoIntegerVariable) return kMinIntegerValue; + if (after.var == kNoIntegerVariable) return kMinIntegerValue; + + const IntegerValue needed = before.constant - after.constant; + const IntegerValue static_known = + precedence_relations_->GetOffset(before.var, after.var); + + const std::pair dynamic_known = + precedences_->GetConditionalOffset(before.var, after.var); + + const IntegerValue best = std::max(static_known, dynamic_known.second); + if (best == kMinIntegerValue) return kMinIntegerValue; + + if (add_reason_if_after && dynamic_known.second > static_known && + dynamic_known.second >= needed) { + literal_reason_.push_back(dynamic_known.first.Negated()); + } + return best - needed; +} + +void SchedulingConstraintHelper::AddLevelZeroPrecedence(int a, int b) { + CHECK_EQ(trail_->CurrentDecisionLevel(), 0); + if (!IsPresent(a)) return; + if (!IsPresent(b)) return; + const AffineExpression before = ends_[a]; + const AffineExpression after = starts_[b]; + if (after.coeff != 1) return; + if (before.coeff != 1) return; + if (after.var == kNoIntegerVariable) return; + if (before.var == kNoIntegerVariable) return; + const IntegerValue offset = before.constant - after.constant; + precedence_relations_->UpdateOffset(before.var, after.var, offset); + if (precedences_->AddPrecedenceWithOffsetIfNew(before.var, after.var, + offset)) { + VLOG(2) << "new relation " << TaskDebugString(a) + << " <= " << TaskDebugString(b); + } +} + const std::vector& SchedulingConstraintHelper::TaskByIncreasingStartMin() { const int num_tasks = NumTasks(); diff --git a/ortools/sat/intervals.h b/ortools/sat/intervals.h index bf008072246..5428e10c198 100644 --- a/ortools/sat/intervals.h +++ b/ortools/sat/intervals.h @@ -58,6 +58,8 @@ class IntervalsRepository { explicit IntervalsRepository(Model* model) : model_(model), assignment_(model->GetOrCreate()->Assignment()), + sat_solver_(model->GetOrCreate()), + implications_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()) {} // This type is neither copyable nor movable. @@ -78,8 +80,8 @@ class IntervalsRepository { LiteralIndex is_present); IntervalVariable CreateInterval(AffineExpression start, AffineExpression end, AffineExpression size, - LiteralIndex is_present, - bool add_linear_relation); + LiteralIndex is_present = kNoLiteralIndex, + bool add_linear_relation = false); // Returns whether or not a interval is optional and the associated literal. bool IsOptional(IntervalVariable i) const { @@ -152,8 +154,13 @@ class IntervalsRepository { // Returns a SchedulingConstraintHelper corresponding to the given variables. // Note that the order of interval in the helper will be the same. + // + // It is possible to indicate that this correspond to a disjunctive constraint + // by setting the Boolean to true. This is used by our scheduling heuristic + // based on precedences. SchedulingConstraintHelper* GetOrCreateHelper( - const std::vector& variables); + const std::vector& variables, + bool register_as_disjunctive_helper = false); // Returns a SchedulingDemandHelper corresponding to the given helper and // demands. Note that the order of interval in the helper and the order of @@ -165,10 +172,32 @@ class IntervalsRepository { // Calls InitDecomposedEnergies on all SchedulingDemandHelper created. void InitAllDecomposedEnergies(); + // Assuming a and b cannot overlap if they are present, this create a new + // literal such that: + // - literal & presences => a is before b. + // - not(literal) & presences => b is before a. + // - not present => literal @ true for disallowing multiple solutions. + // + // If such literal already exists this returns it. + void CreateDisjunctivePrecedenceLiteral(IntervalVariable a, + IntervalVariable b); + + // Returns the precedence literal from GetOrCreateDisjunctivePrecedence() if + // it exists or kNoLiteralIndex otherwise. + LiteralIndex GetPrecedenceLiteral(IntervalVariable a, + IntervalVariable b) const; + + const std::vector& AllDisjunctiveHelpers() + const { + return disjunctive_helpers_; + } + private: // External classes needed. Model* model_; const VariablesAssignment& assignment_; + SatSolver* sat_solver_; + BinaryImplicationGraph* implications_; IntegerTrail* integer_trail_; // Literal indicating if the tasks is executed. Tasks that are always executed @@ -189,6 +218,13 @@ class IntervalsRepository { std::pair>, SchedulingDemandHelper*> demand_helper_repository_; + + // Disjunctive precedences. + absl::flat_hash_map, Literal> + disjunctive_precedences_; + + // Disjunctive helpers_. + std::vector disjunctive_helpers_; }; // An helper struct to sort task by time. This is used by the @@ -304,6 +340,14 @@ class SchedulingConstraintHelper : public PropagatorInterface, bool IsPresent(int t) const; bool IsAbsent(int t) const; + // Return a value so that End(a) + dist <= Start(b). + // Returns kMinInterValue if we don't have any such relation. + IntegerValue GetCurrentMinDistanceBetweenTasks( + int a, int b, bool add_reason_if_after = false); + + // Add a new level zero precedence between two tasks. + void AddLevelZeroPrecedence(int a, int b); + // Return the minimum overlap of interval i with the time window [start..end]. // // Note: this is different from the mandatory part of an interval. @@ -390,6 +434,9 @@ class SchedulingConstraintHelper : public PropagatorInterface, IntegerLiteral lit); // Returns the underlying affine expressions. + const std::vector& IntervalVariables() const { + return interval_variables_; + } const std::vector& Starts() const { return starts_; } const std::vector& Ends() const { return ends_; } const std::vector& Sizes() const { return sizes_; } @@ -458,6 +505,7 @@ class SchedulingConstraintHelper : public PropagatorInterface, Trail* trail_; IntegerTrail* integer_trail_; + PrecedenceRelations* precedence_relations_; PrecedencesPropagator* precedences_; // The current direction of time, true for forward, false for backward. @@ -465,6 +513,7 @@ class SchedulingConstraintHelper : public PropagatorInterface, // All the underlying variables of the tasks. // The vectors are indexed by the task index t. + std::vector interval_variables_; std::vector starts_; std::vector ends_; std::vector sizes_; diff --git a/ortools/sat/lb_tree_search.cc b/ortools/sat/lb_tree_search.cc index 8f2509c725c..9b7dab93f58 100644 --- a/ortools/sat/lb_tree_search.cc +++ b/ortools/sat/lb_tree_search.cc @@ -196,9 +196,9 @@ void LbTreeSearch::MarkSubtreeAsDeleted(NodeIndex root) { std::string LbTreeSearch::SmallProgressString() const { return absl::StrCat( - "#nodes:", num_nodes_in_tree_, "/", nodes_.size(), - " #rc:", num_rc_detected_, " #decisions:", num_decisions_taken_, - " #@root:", num_back_to_root_node_, " #restarts:", num_full_restarts_); + "nodes=", num_nodes_in_tree_, "/", nodes_.size(), + " rc=", num_rc_detected_, " decisions=", num_decisions_taken_, + " @root=", num_back_to_root_node_, " restarts=", num_full_restarts_); } SatSolver::Status LbTreeSearch::Search( @@ -296,8 +296,8 @@ SatSolver::Status LbTreeSearch::Search( const IntegerValue bound = nodes_[current_branch_[0]].MinObjective(); if (bound > current_objective_lb_) { shared_response_->UpdateInnerObjectiveBounds( - absl::StrCat("lb_tree_search ", SmallProgressString()), bound, - integer_trail_->LevelZeroUpperBound(objective_var_)); + absl::StrCat("lb_tree_search (", SmallProgressString(), ") "), + bound, integer_trail_->LevelZeroUpperBound(objective_var_)); current_objective_lb_ = bound; if (VLOG_IS_ON(3)) DebugDisplayTree(current_branch_[0]); } @@ -329,7 +329,8 @@ SatSolver::Status LbTreeSearch::Search( if (num_decisions_taken_ >= num_decisions_taken_at_last_restart_ + kNumDecisionsBeforeInitialRestarts && num_full_restarts_ < kMaxNumInitialRestarts) { - VLOG(2) << "lb_tree_search initial_restart " << SmallProgressString(); + VLOG(2) << "lb_tree_search (initial_restart " << SmallProgressString() + << ")"; if (!FullRestart()) return sat_solver_->UnsatStatus(); } @@ -491,8 +492,8 @@ SatSolver::Status LbTreeSearch::Search( } if (VLOG_IS_ON(1)) { - shared_response_->LogMessageWithThrottling("TreeS", - SmallProgressString()); + shared_response_->LogMessageWithThrottling( + "TreeS", absl::StrCat(" (", SmallProgressString(), ")")); } if (n < nodes_.size()) { diff --git a/ortools/sat/optimization.cc b/ortools/sat/optimization.cc index 9648af312d5..b547f983741 100644 --- a/ortools/sat/optimization.cc +++ b/ortools/sat/optimization.cc @@ -755,8 +755,7 @@ SatSolver::Status CoreBasedOptimizer::OptimizeWithSatEncoding( const int num_bools = sat_solver_->NumVariables(); const int num_fixed = sat_solver_->NumFixedVariables(); model_->GetOrCreate()->UpdateInnerObjectiveBounds( - absl::StrFormat("bool_core num_cores:%d [%s] assumptions:%u " - "depth:%d fixed_bools:%d/%d", + absl::StrFormat("bool_core (num_cores=%d [%s] a=%u d=%d fixed=%d/%d)", iter, previous_core_info, encoder.nodes().size(), max_depth, num_fixed, num_bools), new_obj_lb, integer_trail_->LevelZeroUpperBound(objective_var_)); @@ -993,8 +992,8 @@ void CoreBasedOptimizer::PresolveObjectiveWithAtMostOne( if (overall_lb_increase > 0) { // Report new bounds right away with extra information. model_->GetOrCreate()->UpdateInnerObjectiveBounds( - absl::StrFormat("am1_presolve num_literals:%d num_am1:%d " - "increase:%lld work_done:%lld", + absl::StrFormat("am1_presolve (num_literals=%d num_am1=%d " + "increase=%lld work_done=%lld)", (int)candidates.size(), num_at_most_ones, overall_lb_increase.value(), implications_->WorkDone()), IntegerValue(offset->value()), diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index e94a22a66fa..0af8a610526 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -278,6 +278,7 @@ bool PrecedencesPropagator::Propagate() { literal_to_new_impacted_arcs_[literal.Index()]) { if (--arc_counts_[arc_index] == 0) { const ArcInfo& arc = arcs_[arc_index]; + AddToConditionalRelations(arc); impacted_arcs_[arc.tail_var].push_back(arc_index); } } @@ -335,6 +336,36 @@ bool PrecedencesPropagator::PropagateOutgoingArcs(IntegerVariable var) { return true; } +// TODO(user): Add as fixed precedence if we fix at level zero. +void PrecedencesPropagator::AddToConditionalRelations(const ArcInfo& arc) { + if (arc.presence_literals.size() != 1) return; + + // We currently do not handle variable size in the reasons. + // TODO(user): we could easily take a level zero ArcOffset() instead, or + // add this to the reason though. + if (arc.offset_var != kNoIntegerVariable) return; + const std::pair key = {arc.tail_var, + arc.head_var}; + const IntegerValue offset = ArcOffset(arc); + + // We only insert if it is not already present! + conditional_relations_.insert({key, {arc.presence_literals[0], offset}}); +} + +void PrecedencesPropagator::RemoveFromConditionalRelations(const ArcInfo& arc) { + if (arc.presence_literals.size() != 1) return; + if (arc.offset_var != kNoIntegerVariable) return; + const std::pair key = {arc.tail_var, + arc.head_var}; + const auto it = conditional_relations_.find(key); + if (it == conditional_relations_.end()) return; + if (it->second.first != arc.presence_literals[0]) return; + + // It is okay if we erase a wrong one on untrail, what is important is not to + // forget to erase one we added. + conditional_relations_.erase(it); +} + void PrecedencesPropagator::Untrail(const Trail& trail, int trail_index) { if (propagation_trail_index_ > trail_index) { // This means that we already propagated all there is to propagate @@ -349,6 +380,7 @@ void PrecedencesPropagator::Untrail(const Trail& trail, int trail_index) { literal_to_new_impacted_arcs_[literal.Index()]) { if (arc_counts_[arc_index]++ == 0) { const ArcInfo& arc = arcs_[arc_index]; + RemoveFromConditionalRelations(arc); impacted_arcs_[arc.tail_var].pop_back(); } } @@ -493,7 +525,6 @@ void PrecedencesPropagator::AdjustSizeFor(IntegerVariable i) { void PrecedencesPropagator::AddArc( IntegerVariable tail, IntegerVariable head, IntegerValue offset, IntegerVariable offset_var, absl::Span presence_literals) { - DCHECK_EQ(trail_->CurrentDecisionLevel(), 0); AdjustSizeFor(tail); AdjustSizeFor(head); if (offset_var != kNoIntegerVariable) AdjustSizeFor(offset_var); @@ -518,16 +549,19 @@ void PrecedencesPropagator::AddArc( integer_trail_->IsIgnoredLiteral(offset_var).Negated()); } gtl::STLSortAndRemoveDuplicates(&enforcement_literals); - int new_size = 0; - for (const Literal l : enforcement_literals) { - if (trail_->Assignment().LiteralIsTrue(Literal(l))) { - continue; // At true, ignore this literal. - } else if (trail_->Assignment().LiteralIsFalse(Literal(l))) { - return; // At false, ignore completely this arc. + + if (trail_->CurrentDecisionLevel() == 0) { + int new_size = 0; + for (const Literal l : enforcement_literals) { + if (trail_->Assignment().LiteralIsTrue(Literal(l))) { + continue; // At true, ignore this literal. + } else if (trail_->Assignment().LiteralIsFalse(Literal(l))) { + return; // At false, ignore completely this arc. + } + enforcement_literals[new_size++] = l; } - enforcement_literals[new_size++] = l; + enforcement_literals.resize(new_size); } - enforcement_literals.resize(new_size); } if (head == tail) { @@ -543,8 +577,8 @@ void PrecedencesPropagator::AddArc( // Remove the offset_var if it is fixed. // TODO(user): We should also handle the case where tail or head is fixed. if (offset_var != kNoIntegerVariable) { - const IntegerValue lb = integer_trail_->LowerBound(offset_var); - if (lb == integer_trail_->UpperBound(offset_var)) { + const IntegerValue lb = integer_trail_->LevelZeroLowerBound(offset_var); + if (lb == integer_trail_->LevelZeroUpperBound(offset_var)) { offset += lb; offset_var = kNoIntegerVariable; } @@ -628,7 +662,18 @@ void PrecedencesPropagator::AddArc( literal_to_new_impacted_arcs_[l.Index()].push_back(arc_index); } } - arc_counts_.push_back(presence_literals.size()); + + if (trail_->CurrentDecisionLevel() == 0) { + arc_counts_.push_back(presence_literals.size()); + } else { + arc_counts_.push_back(0); + for (const Literal l : presence_literals) { + if (!trail_->Assignment().LiteralIsTrue(l)) { + ++arc_counts_.back(); + } + } + CHECK(presence_literals.empty() || arc_counts_.back() > 0); + } } } diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 747dce55d52..c56bb9e17c0 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -243,6 +243,20 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // so that we can use it all the time. int AddGreaterThanAtLeastOneOfConstraints(Model* model); + // If known, return an offset such that we have a + offset <= b. + // Note that this only cover the case where this was conditionned by a single + // literal. + // + // TODO(user): Support list of literals, it isn't that much harder. + std::pair GetConditionalOffset(IntegerVariable a, + IntegerVariable b) { + const auto it = conditional_relations_.find({a, b}); + if (it == conditional_relations_.end()) { + return {Literal(), kMinIntegerValue}; + } + return it->second; + } + private: DEFINE_STRONG_INDEX_TYPE(ArcIndex); DEFINE_STRONG_INDEX_TYPE(OptionalArcIndex); @@ -326,6 +340,10 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // This is only meant to be used in a DCHECK() and is not optimized. bool NoPropagationLeft(const Trail& trail) const; + // Update conditional_relations_. + void AddToConditionalRelations(const ArcInfo& arc); + void RemoveFromConditionalRelations(const ArcInfo& arc); + // External class needed to get the IntegerVariable lower bounds and Enqueue // new ones. Trail* trail_; @@ -399,6 +417,14 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // Temp vector used by the tree traversal in DisassembleSubtree(). std::vector tmp_vector_; + // When a literal => X + offset <= Y become true, we add it here if X and Y + // do not already have a conditial relation. We also remove it on untrail. + // This is especially useful when we create all the literal between pair of + // interval for a disjunctive constraint. + absl::flat_hash_map, + std::pair> + conditional_relations_; + // Stats. int64_t num_cycles_ = 0; int64_t num_pushes_ = 0; diff --git a/ortools/sat/presolve_util.h b/ortools/sat/presolve_util.h index 6a0e80f3d03..822087cf319 100644 --- a/ortools/sat/presolve_util.h +++ b/ortools/sat/presolve_util.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index b3d0b704ded..3f127dfebc6 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -1204,8 +1204,8 @@ def NewIntVarSeries( f" upper_bound={upper_bounds} for variable set={name}" ) - lower_bounds = _ConvertToSeriesAndValidateIndex(lower_bounds, index) - upper_bounds = _ConvertToSeriesAndValidateIndex(upper_bounds, index) + lower_bounds = _ConvertToIntegralSeriesAndValidateIndex(lower_bounds, index) + upper_bounds = _ConvertToIntegralSeriesAndValidateIndex(upper_bounds, index) return pd.Series( index=index, data=[ @@ -2010,6 +2010,58 @@ def NewIntervalVar( raise TypeError("cp_model.NewIntervalVar: end must be affine or constant.") return IntervalVar(self.__model, start_expr, size_expr, end_expr, None, name) + def NewIntervalVarSeries( + self, + name: str, + index: pd.Index, + starts: Union[LinearExprT, pd.Series], + sizes: Union[LinearExprT, pd.Series], + ends: Union[LinearExprT, pd.Series], + ) -> pd.Series: + """Creates a series of interval variables with the given name. + + Args: + name (str): Required. The name of the variable set. + index (pd.Index): Required. The index to use for the variable set. + starts (Union[LinearExprT, pd.Series]): The start of each interval in the + set. If a `pd.Series` is passed in, it will be based on the + corresponding values of the pd.Series. + sizes (Union[LinearExprT, pd.Series]): The size of each interval in the + set. If a `pd.Series` is passed in, it will be based on the + corresponding values of the pd.Series. + ends (Union[LinearExprT, pd.Series]): The ends of each interval in the + set. If a `pd.Series` is passed in, it will be based on the + corresponding values of the pd.Series. + + Returns: + pd.Series: The interval variable set indexed by its corresponding + dimensions. + + Raises: + TypeError: if the `index` is invalid (e.g. a `DataFrame`). + ValueError: if the `name` is not a valid identifier or already exists. + ValueError: if the all the indexes do not match. + """ + if not isinstance(index, pd.Index): + raise TypeError("Non-index object is used as index") + if not name.isidentifier(): + raise ValueError("name={} is not a valid identifier".format(name)) + + starts = _ConvertToLinearExprSeriesAndValidateIndex(starts, index) + sizes = _ConvertToLinearExprSeriesAndValidateIndex(sizes, index) + ends = _ConvertToLinearExprSeriesAndValidateIndex(ends, index) + interval_array = [] + for i in index: + interval_array.append( + self.NewIntervalVar( + start=starts[i], + size=sizes[i], + end=ends[i], + name=f"{name}[{i}]", + ) + ) + return pd.Series(index=index, data=interval_array) + def NewFixedSizeIntervalVar( self, start: LinearExprT, size: IntegralT, name: str ) -> IntervalVar: @@ -2037,6 +2089,52 @@ def NewFixedSizeIntervalVar( ) return IntervalVar(self.__model, start_expr, size_expr, end_expr, None, name) + def NewFixedSizeIntervalVarSeries( + self, + name: str, + index: pd.Index, + starts: Union[LinearExprT, pd.Series], + sizes: Union[IntegralT, pd.Series], + ) -> pd.Series: + """Creates a series of interval variables with the given name. + + Args: + name (str): Required. The name of the variable set. + index (pd.Index): Required. The index to use for the variable set. + starts (Union[LinearExprT, pd.Series]): The start of each interval in the + set. If a `pd.Series` is passed in, it will be based on the + corresponding values of the pd.Series. + sizes (Union[IntegralT, pd.Series]): The fixed size of each interval in + the set. If a `pd.Series` is passed in, it will be based on the + corresponding values of the pd.Series. + + Returns: + pd.Series: The interval variable set indexed by its corresponding + dimensions. + + Raises: + TypeError: if the `index` is invalid (e.g. a `DataFrame`). + ValueError: if the `name` is not a valid identifier or already exists. + ValueError: if the all the indexes do not match. + """ + if not isinstance(index, pd.Index): + raise TypeError("Non-index object is used as index") + if not name.isidentifier(): + raise ValueError("name={} is not a valid identifier".format(name)) + + starts = _ConvertToLinearExprSeriesAndValidateIndex(starts, index) + sizes = _ConvertToIntegralSeriesAndValidateIndex(sizes, index) + interval_array = [] + for i in index: + interval_array.append( + self.NewFixedSizeIntervalVar( + start=starts[i], + size=sizes[i], + name=f"{name}[{i}]", + ) + ) + return pd.Series(index=index, data=interval_array) + def NewOptionalIntervalVar( self, start: LinearExprT, @@ -2088,6 +2186,64 @@ def NewOptionalIntervalVar( self.__model, start_expr, size_expr, end_expr, is_present_index, name ) + def NewOptionalIntervalVarSeries( + self, + name: str, + index: pd.Index, + starts: Union[LinearExprT, pd.Series], + sizes: Union[LinearExprT, pd.Series], + ends: Union[LinearExprT, pd.Series], + performed_literals: Union[LiteralT, pd.Series], + ) -> pd.Series: + """Creates a series of interval variables with the given name. + + Args: + name (str): Required. The name of the variable set. + index (pd.Index): Required. The index to use for the variable set. + starts (Union[LinearExprT, pd.Series]): The start of each interval in the + set. If a `pd.Series` is passed in, it will be based on the + corresponding values of the pd.Series. + sizes (Union[LinearExprT, pd.Series]): The size of each interval in the + set. If a `pd.Series` is passed in, it will be based on the + corresponding values of the pd.Series. + ends (Union[LinearExprT, pd.Series]): The ends of each interval in the + set. If a `pd.Series` is passed in, it will be based on the + corresponding values of the pd.Series. + + Returns: + pd.Series: The interval variable set indexed by its corresponding + dimensions. + + Raises: + TypeError: if the `index` is invalid (e.g. a `DataFrame`). + ValueError: if the `name` is not a valid identifier or already exists. + ValueError: if the all the indexes do not match. + """ + if not isinstance(index, pd.Index): + raise TypeError("Non-index object is used as index") + if not name.isidentifier(): + raise ValueError("name={} is not a valid identifier".format(name)) + + starts = _ConvertToLinearExprSeriesAndValidateIndex(starts, index) + sizes = _ConvertToLinearExprSeriesAndValidateIndex(sizes, index) + ends = _ConvertToLinearExprSeriesAndValidateIndex(ends, index) + performed_literals = _ConvertToLiteralSeriesAndValidateIndex( + performed_literals, index + ) + + interval_array = [] + for i in index: + interval_array.append( + self.NewOptionalIntervalVar( + start=starts[i], + size=sizes[i], + end=ends[i], + is_present=performed_literals[i], + name=f"{name}[{i}]", + ) + ) + return pd.Series(index=index, data=interval_array) + def NewOptionalFixedSizeIntervalVar( self, start: LinearExprT, size: IntegralT, is_present: LiteralT, name: str ) -> IntervalVar: @@ -2120,6 +2276,60 @@ def NewOptionalFixedSizeIntervalVar( self.__model, start_expr, size_expr, end_expr, is_present_index, name ) + def NewOptionalFixedSizeIntervalVarSeries( + self, + name: str, + index: pd.Index, + starts: Union[LinearExprT, pd.Series], + sizes: Union[IntegralT, pd.Series], + performed_literals: Union[LiteralT, pd.Series], + ) -> pd.Series: + """Creates a series of interval variables with the given name. + + Args: + name (str): Required. The name of the variable set. + index (pd.Index): Required. The index to use for the variable set. + starts (Union[LinearExprT, pd.Series]): The start of each interval in the + set. If a `pd.Series` is passed in, it will be based on the + corresponding values of the pd.Series. + sizes (Union[IntegralT, pd.Series]): The fixed size of each interval in + the set. If a `pd.Series` is passed in, it will be based on the + corresponding values of the pd.Series. + performed_literals (Union[LiteralT, pd.Series]): The performed literal of + each interval in the set. If a `pd.Series` is passed in, it will be + based on the corresponding values of the pd.Series. + + Returns: + pd.Series: The interval variable set indexed by its corresponding + dimensions. + + Raises: + TypeError: if the `index` is invalid (e.g. a `DataFrame`). + ValueError: if the `name` is not a valid identifier or already exists. + ValueError: if the all the indexes do not match. + """ + if not isinstance(index, pd.Index): + raise TypeError("Non-index object is used as index") + if not name.isidentifier(): + raise ValueError("name={} is not a valid identifier".format(name)) + + starts = _ConvertToLinearExprSeriesAndValidateIndex(starts, index) + sizes = _ConvertToIntegralSeriesAndValidateIndex(sizes, index) + performed_literals = _ConvertToLiteralSeriesAndValidateIndex( + performed_literals, index + ) + interval_array = [] + for i in index: + interval_array.append( + self.NewOptionalFixedSizeIntervalVar( + start=starts[i], + size=sizes[i], + is_present=performed_literals[i], + name=f"{name}[{i}]", + ) + ) + return pd.Series(index=index, data=interval_array) + def AddNoOverlap(self, interval_vars: Iterable[IntervalVar]) -> Constraint: """Adds NoOverlap(interval_vars). @@ -2975,7 +3185,7 @@ def _AttributeSeries( ) -def _ConvertToSeriesAndValidateIndex( +def _ConvertToIntegralSeriesAndValidateIndex( value_or_series: Union[IntegralT, pd.Series], index: pd.Index ) -> pd.Series: """Returns a pd.Series of the given index with the corresponding values. @@ -3001,3 +3211,59 @@ def _ConvertToSeriesAndValidateIndex( else: raise TypeError("invalid type={}".format(type(value_or_series))) return result + + +def _ConvertToLinearExprSeriesAndValidateIndex( + value_or_series: Union[LinearExprT, pd.Series], index: pd.Index +) -> pd.Series: + """Returns a pd.Series of the given index with the corresponding values. + + Args: + value_or_series: the values to be converted (if applicable). + index: the index of the resulting pd.Series. + + Returns: + pd.Series: The set of values with the given index. + + Raises: + TypeError: If the type of `value_or_series` is not recognized. + ValueError: If the index does not match. + """ + if cmh.is_integral(value_or_series): + result = pd.Series(data=value_or_series, index=index) + elif isinstance(value_or_series, pd.Series): + if value_or_series.index.equals(index): + result = value_or_series + else: + raise ValueError("index does not match") + else: + raise TypeError("invalid type={}".format(type(value_or_series))) + return result + + +def _ConvertToLiteralSeriesAndValidateIndex( + value_or_series: Union[LiteralT, pd.Series], index: pd.Index +) -> pd.Series: + """Returns a pd.Series of the given index with the corresponding values. + + Args: + value_or_series: the values to be converted (if applicable). + index: the index of the resulting pd.Series. + + Returns: + pd.Series: The set of values with the given index. + + Raises: + TypeError: If the type of `value_or_series` is not recognized. + ValueError: If the index does not match. + """ + if cmh.is_integral(value_or_series): + result = pd.Series(data=value_or_series, index=index) + elif isinstance(value_or_series, pd.Series): + if value_or_series.index.equals(index): + result = value_or_series + else: + raise ValueError("index does not match") + else: + raise TypeError("invalid type={}".format(type(value_or_series))) + return result diff --git a/ortools/sat/samples/BUILD.bazel b/ortools/sat/samples/BUILD.bazel index bb50f979917..04105b0a5de 100644 --- a/ortools/sat/samples/BUILD.bazel +++ b/ortools/sat/samples/BUILD.bazel @@ -39,6 +39,8 @@ code_sample_cc_py(name = "cp_is_fun_sat") code_sample_cc_py(name = "cp_sat_example") +code_sample_py(name = "cumulative_variable_profile_sample_sat") + code_sample_cc_py(name = "earliness_tardiness_cost_sample_sat") code_sample_cc_py(name = "interval_sample_sat") diff --git a/ortools/sat/samples/assignment_sat.py b/ortools/sat/samples/assignment_sat.py index 51147821af6..4f84089722c 100644 --- a/ortools/sat/samples/assignment_sat.py +++ b/ortools/sat/samples/assignment_sat.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Solve a simple assignment problem.""" +"""Solve a simple assignment problem with CP-SAT.""" # [START program] # [START import] import io diff --git a/ortools/sat/samples/bin_packing_sat.py b/ortools/sat/samples/bin_packing_sat.py index a1be1932b51..2e6d761dd74 100644 --- a/ortools/sat/samples/bin_packing_sat.py +++ b/ortools/sat/samples/bin_packing_sat.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Solve a simple bin packing problem using a MIP solver.""" +"""Solve a simple bin packing problem using CP-SAT.""" # [START program] # [START import] import io @@ -25,7 +25,7 @@ # [START program_part1] # [START data_model] -def create_data_model(): +def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame]: """Create the data for the example.""" items_str = """ diff --git a/ortools/sat/samples/cumulative_variable_profile_sample_sat.py b/ortools/sat/samples/cumulative_variable_profile_sample_sat.py new file mode 100644 index 00000000000..eeeeb19aaf3 --- /dev/null +++ b/ortools/sat/samples/cumulative_variable_profile_sample_sat.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +# Copyright 2010-2022 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Solve a simple scheduling problem with a variable work load.""" +# [START program] +# [START import] +import io + +import pandas as pd + +from ortools.sat.python import cp_model +# [END import] + + +# [START program_part1] +# [START data_model] +def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame]: + """Creates the two dataframes that describes the model.""" + + capacity_str: str = """ + start_hour capacity + 0 0 + 2 0 + 4 1 + 6 3 + 8 6 + 10 12 + 12 8 + 14 12 + 16 10 + 18 4 + 20 2 + 22 0 + """ + + tasks_str: str = """ + name duration load priority + t1 60 3 2 + t2 180 2 1 + t3 240 5 3 + t4 90 4 2 + t5 120 3 1 + t6 300 3 3 + t7 120 1 2 + t8 100 5 2 + t9 110 2 1 + t10 300 5 3 + t11 90 4 2 + t12 120 3 1 + t13 250 3 3 + t14 120 1 2 + t15 40 5 3 + t16 70 4 2 + t17 90 8 1 + t18 40 3 3 + t19 120 5 2 + t20 60 3 2 + t21 180 2 1 + t22 240 5 3 + t23 90 4 2 + t24 120 3 1 + t25 300 3 3 + t26 120 1 2 + t27 100 5 2 + t28 110 2 1 + t29 300 5 3 + t30 90 4 2 + """ + + capacity_df = pd.read_table(io.StringIO(capacity_str), sep=r"\s+") + tasks_df = pd.read_table(io.StringIO(tasks_str), index_col=0, sep=r"\s+") + return capacity_df, tasks_df + # [END data_model] + + +def main(): + """Create the model and solves it.""" + # [START data] + capacity_df, tasks_df = create_data_model() + # [END data] + # [END program_part1] + + # [START model] + # Create the model. + model = cp_model.CpModel() + # [END model] + + # Get the max capacity from the capacity dataframe. + max_capacity = capacity_df.capacity.max() + print(f"Max capacity = {max_capacity}") + print(f"#tasks = {len(tasks_df)}") + + minutes_per_period: int = 120 + horizon: int = 24 * 60 + + # [START program_part2] + # [START variables] + # Variables + starts = model.NewIntVarSeries( + name="starts", lower_bounds=0, upper_bounds=horizon, index=tasks_df.index + ) + performed = model.NewBoolVarSeries(name="performed", index=tasks_df.index) + + intervals = model.NewOptionalFixedSizeIntervalVarSeries( + name="intervals", + index=tasks_df.index, + starts=starts, + sizes=tasks_df.duration, + performed_literals=performed, + ) + # [END variables] + + # [START constraints] + # Set up the profile. We use fixed (intervals, demands) to fill in the space + # between the actual load profile and the max capacity. + time_period_intervals = model.NewFixedSizeIntervalVarSeries( + name="time_period_intervals", + index=capacity_df.index, + starts=capacity_df.start_hour * minutes_per_period, + sizes=minutes_per_period, + ) + time_period_heights = max_capacity - capacity_df.capacity + + # Cumulative constraint. + model.AddCumulative( + intervals.to_list() + time_period_intervals.to_list(), + tasks_df.load.to_list() + time_period_heights.to_list(), + max_capacity, + ) + # [END constraints] + + # [START objective] + # Objective: maximize the value of performed intervals. + # 1 is the max priority. + max_priority = max(tasks_df.priority) + model.Maximize(sum(performed * (max_priority + 1 - tasks_df.priority))) + # [END objective] + + # [START solve] + # Create the solver and solve the model. + solver = cp_model.CpSolver() + solver.parameters.log_search_progress = True + solver.parameters.num_workers = 8 + solver.parameters.max_time_in_seconds = 30.0 + status = solver.Solve(model) + # [END solve] + + # [START print_solution] + if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE: + start_values = solver.Values(starts) + performed_values = solver.BooleanValues(performed) + for task in tasks_df.index: + if performed_values[task]: + print(f"task {task} starts at {start_values[task]}") + else: + print(f"task {task} is not performed") + elif status == cp_model.INFEASIBLE: + print("The problem is infeasible") + else: + print("Something is wrong, check the status and the log of the solve") + # [END print_solution] + + +if __name__ == "__main__": + main() +# [END program_part2] +# [END program] diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index e30ba2498e4..8e5ec224162 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -23,7 +23,7 @@ option csharp_namespace = "Google.OrTools.Sat"; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 263 +// NEXT TAG: 264 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -743,6 +743,11 @@ message SatParameters { // Enable stronger and more expensive propagation on no_overlap constraint. optional bool use_strong_propagation_in_disjunctive = 230 [default = false]; + // Whether we try to branch on decision "interval A before interval B" rather + // than on intervals bounds. This usually works better, but slow down a bit + // the time to find first solutions. + optional bool use_dynamic_precedence_in_disjunctive = 263 [default = false]; + // When this is true, the cumulative constraint is reinforced with overload // checking, i.e., an additional level of reasoning based on energy. This // additional level supplements the default level of reasoning as well as diff --git a/ortools/sat/sat_solver.cc b/ortools/sat/sat_solver.cc index 343c41fea26..23a383cf789 100644 --- a/ortools/sat/sat_solver.cc +++ b/ortools/sat/sat_solver.cc @@ -158,6 +158,12 @@ bool SatSolver::SetModelUnsat() { bool SatSolver::AddClauseDuringSearch(absl::Span literals) { if (model_is_unsat_) return false; + + // Let filter clauses if we are at level zero + if (trail_->CurrentDecisionLevel() == 0) { + return AddProblemClause(literals, /*is_safe=*/false); + } + const int index = trail_->Index(); if (literals.empty()) return SetModelUnsat(); if (literals.size() == 1) return AddUnitClause(literals[0]); @@ -165,8 +171,7 @@ bool SatSolver::AddClauseDuringSearch(absl::Span literals) { // TODO(user): We generate in some corner cases clauses with // literals[0].Variable() == literals[1].Variable(). Avoid doing that and // adding such binary clauses to the graph? - if (!binary_implication_graph_->AddBinaryClauseDuringSearch(literals[0], - literals[1])) { + if (!binary_implication_graph_->AddBinaryClause(literals[0], literals[1])) { CHECK_EQ(CurrentDecisionLevel(), 0); return SetModelUnsat(); } @@ -204,15 +209,18 @@ bool SatSolver::AddTernaryClause(Literal a, Literal b, Literal c) { bool SatSolver::AddProblemClause(absl::Span literals, bool is_safe) { SCOPED_TIME_STAT(&stats_); - CHECK_EQ(CurrentDecisionLevel(), 0); if (model_is_unsat_) return false; // Filter already assigned literals. - literals_scratchpad_.clear(); - for (const Literal l : literals) { - if (trail_->Assignment().LiteralIsTrue(l)) return true; - if (trail_->Assignment().LiteralIsFalse(l)) continue; - literals_scratchpad_.push_back(l); + if (CurrentDecisionLevel() == 0) { + literals_scratchpad_.clear(); + for (const Literal l : literals) { + if (trail_->Assignment().LiteralIsTrue(l)) return true; + if (trail_->Assignment().LiteralIsFalse(l)) continue; + literals_scratchpad_.push_back(l); + } + } else { + literals_scratchpad_.assign(literals.begin(), literals.end()); } if (!is_safe) { @@ -238,8 +246,7 @@ bool SatSolver::AddProblemClause(absl::Span literals, bool SatSolver::AddProblemClauseInternal(absl::Span literals) { SCOPED_TIME_STAT(&stats_); - if (DEBUG_MODE) { - CHECK_EQ(CurrentDecisionLevel(), 0); + if (DEBUG_MODE && CurrentDecisionLevel() == 0) { for (const Literal l : literals) { CHECK(!trail_->Assignment().LiteralIsAssigned(l)); } @@ -420,8 +427,7 @@ int SatSolver::AddLearnedClauseAndEnqueueUnitPropagation( if (shared_binary_clauses_callback_ != nullptr) { shared_binary_clauses_callback_(literals[0], literals[1]); } - CHECK(binary_implication_graph_->AddBinaryClauseDuringSearch(literals[0], - literals[1])); + CHECK(binary_implication_graph_->AddBinaryClause(literals[0], literals[1])); return /*lbd=*/2; } @@ -502,7 +508,10 @@ void SatSolver::AddBinaryClauseInternal(Literal a, Literal b, shared_binary_clauses_callback_(a, b); } - binary_implication_graph_->AddBinaryClause(a, b); + if (!binary_implication_graph_->AddBinaryClause(a, b)) { + CHECK_EQ(CurrentDecisionLevel(), 0); + SetModelUnsat(); + } } bool SatSolver::ClauseIsValidUnderDebugAssignment( diff --git a/ortools/sat/synchronization.cc b/ortools/sat/synchronization.cc index 7be1bcdc24f..366517d99aa 100644 --- a/ortools/sat/synchronization.cc +++ b/ortools/sat/synchronization.cc @@ -642,8 +642,8 @@ void SharedResponseManager::NewSolution( if (model != nullptr) { const int64_t num_bool = model->Get()->NumVariables(); const int64_t num_fixed = model->Get()->NumFixedVariables(); - absl::StrAppend(&solution_message, " fixed_bools:", num_fixed, "/", - num_bool); + absl::StrAppend(&solution_message, " (fixed_bools=", num_fixed, "/", + num_bool, ")"); } if (objective_or_null_ != nullptr) {