Skip to content

Commit

Permalink
feat: add largek gain cache (inaccurate gain cache that does not alwa…
Browse files Browse the repository at this point in the history
…ys iterate over all blocks)
  • Loading branch information
DanielSeemaier committed May 7, 2024
1 parent b1cd139 commit ac74341
Show file tree
Hide file tree
Showing 5 changed files with 34 additions and 5 deletions.
7 changes: 5 additions & 2 deletions kaminpar-shm/context_io.cc
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,7 @@ std::unordered_map<std::string, GainCacheStrategy> get_gain_cache_strategies() {
return {
{"sparse", GainCacheStrategy::SPARSE},
{"dense", GainCacheStrategy::DENSE},
{"largek", GainCacheStrategy::LARGE_K},
{"on-the-fly", GainCacheStrategy::ON_THE_FLY},
{"hybrid", GainCacheStrategy::HYBRID},
{"tracing", GainCacheStrategy::TRACING},
Expand All @@ -252,6 +253,8 @@ std::ostream &operator<<(std::ostream &out, const GainCacheStrategy strategy) {
return out << "sparse";
case GainCacheStrategy::DENSE:
return out << "dense";
case GainCacheStrategy::LARGE_K:
return out << "largek";
case GainCacheStrategy::ON_THE_FLY:
return out << "on-the-fly";
case GainCacheStrategy::HYBRID:
Expand Down Expand Up @@ -567,8 +570,8 @@ void print(const Context &ctx, std::ostream &out) {
out << "Execution mode: " << ctx.parallel.num_threads << "\n";
out << "Seed: " << Random::get_seed() << "\n";
out << "Graph: " << ctx.debug.graph_name
<< " [node ordering: " << ctx.node_ordering << "]"
<< " [edge ordering: " << ctx.edge_ordering << "]\n";
<< " [node ordering: " << ctx.node_ordering << "]" << " [edge ordering: " << ctx.edge_ordering
<< "]\n";
print(ctx.partition, out);
cio::print_delimiter("Graph Compression", '-');
print(ctx.compression, out);
Expand Down
2 changes: 2 additions & 0 deletions kaminpar-shm/kaminpar.h
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,7 @@ enum class FMStoppingRule {
enum class GainCacheStrategy {
SPARSE,
DENSE,
LARGE_K,
ON_THE_FLY,
HYBRID,
TRACING,
Expand Down Expand Up @@ -434,6 +435,7 @@ Context create_default_context();
Context create_memory_context();
Context create_fast_context();
Context create_largek_context();
Context create_largek_fm_context();
Context create_strong_context();
Context create_jet_context();
Context create_noref_context();
Expand Down
17 changes: 17 additions & 0 deletions kaminpar-shm/presets.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ Context create_context_by_preset_name(const std::string &name) {
return create_fast_context();
} else if (name == "largek") {
return create_largek_context();
} else if (name == "largek-fm") {
return create_largek_fm_context();
} else if (name == "strong" || name == "fm") {
return create_strong_context();
} else if (name == "jet") {
Expand Down Expand Up @@ -233,6 +235,21 @@ Context create_largek_context() {
return ctx;
}

Context create_largek_fm_context() {
Context ctx = create_largek_context();

ctx.refinement.algorithms = {
RefinementAlgorithm::GREEDY_BALANCER,
RefinementAlgorithm::LEGACY_LABEL_PROPAGATION,
RefinementAlgorithm::KWAY_FM,
RefinementAlgorithm::GREEDY_BALANCER,
};

ctx.refinement.kway_fm.gain_cache_strategy = GainCacheStrategy::LARGE_K;

return ctx;
}

Context create_strong_context() {
Context ctx = create_default_context();

Expand Down
3 changes: 3 additions & 0 deletions kaminpar-shm/refinement/fm/fm_refiner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ std::unique_ptr<Refiner> create_fm_refiner(const Context &ctx) {
case GainCacheStrategy::DENSE:
return std::make_unique<FMRefiner<DenseGainCache<true>>>(ctx);

case GainCacheStrategy::LARGE_K:
return std::make_unique<FMRefiner<DenseGainCache<false>>>(ctx);

#ifdef KAMINPAR_EXPERIMENTAL
case GainCacheStrategy::SPARSE:
return std::make_unique<FMRefiner<SparseGainCache<true>>>(ctx);
Expand Down
10 changes: 7 additions & 3 deletions kaminpar-shm/refinement/gains/dense_gain_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -565,11 +565,15 @@ template <typename _DeltaPartitionedGraph, typename _GainCache> class DenseDelta
using DeltaPartitionedGraph = _DeltaPartitionedGraph;
using GainCache = _GainCache;

// Delta gain caches can only be used with GainCaches that iterate over all blocks, since there
// might be new connections to non-adjacent blocks in the delta graph.
static_assert(GainCache::kIteratesNonadjacentBlocks);
// Delta gain caches should only be used with GainCaches that iterate over all blocks, since there
// might be new connections to non-adjacent blocks in the delta graph. These connections might be
// missed if the gain cache does not iterate over all blocks.
constexpr static bool kIteratesExactGains = GainCache::kIteratesExactGains;

// We do not strictly enforce this to offer an easy large k FM variation.
// @todo can we implement a correct delta gain cache that dos not iterate over all blocks?
// static_assert(GainCache::kIteratesNonadjacentBlocks);

DenseDeltaGainCache(const GainCache &gain_cache, const DeltaPartitionedGraph &d_graph)
: _k(d_graph.k()),
_gain_cache(gain_cache) {}
Expand Down

0 comments on commit ac74341

Please sign in to comment.