Skip to content

Commit

Permalink
fix: track blocks only adjacent in the delta gc when using the largek gc
Browse files Browse the repository at this point in the history
  • Loading branch information
DanielSeemaier committed May 7, 2024
1 parent ac74341 commit b604744
Show file tree
Hide file tree
Showing 4 changed files with 114 additions and 14 deletions.
6 changes: 3 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -303,6 +303,9 @@ message(" dist::BlockWeight = std::int64_t")
## Search and fetch dependencies ##
################################################################################

# Google Sparsehash
find_package(Sparsehash REQUIRED)

if (KAMINPAR_BUILD_WITH_CCACHE)
find_program(CCACHE_PROGRAM ccache)
if (CCACHE_PROGRAM)
Expand All @@ -326,9 +329,6 @@ if (KAMINPAR_BUILD_DISTRIBUTED)
message(WARNING "MPI not available: cannot build the distributed partitioner")
set(KAMINPAR_BUILD_DISTRIBUTED OFF)
endif ()

# Google Sparsehash
find_package(Sparsehash REQUIRED)
endif ()

if (KAMINPAR_ASSERTION_LEVEL STREQUAL "none")
Expand Down
2 changes: 1 addition & 1 deletion kaminpar-shm/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ file(GLOB_RECURSE KAMINPAR_SHM_SOURCE_FILES CONFIGURE_DEPENDS

add_library(kaminpar_shm ${KAMINPAR_SHM_SOURCE_FILES})
target_include_directories(kaminpar_shm PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../")
target_link_libraries(kaminpar_shm PUBLIC kaminpar_common)
target_link_libraries(kaminpar_shm PUBLIC kaminpar_common Sparsehash::Sparsehash)

# If we can find Mt-KaHyPar, make it available as an option for refinement
if (KAMINPAR_BUILD_WITH_MTKAHYPAR)
Expand Down
4 changes: 2 additions & 2 deletions kaminpar-shm/refinement/fm/fm_refiner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,10 @@ SET_STATISTICS_FROM_GLOBAL();
std::unique_ptr<Refiner> create_fm_refiner(const Context &ctx) {
switch (ctx.refinement.kway_fm.gain_cache_strategy) {
case GainCacheStrategy::DENSE:
return std::make_unique<FMRefiner<DenseGainCache<true>>>(ctx);
return std::make_unique<FMRefiner<DenseGainCache<true, DenseDeltaGainCache>>>(ctx);

case GainCacheStrategy::LARGE_K:
return std::make_unique<FMRefiner<DenseGainCache<false>>>(ctx);
return std::make_unique<FMRefiner<DenseGainCache<false, LargeKDenseDeltaGainCache>>>(ctx);

#ifdef KAMINPAR_EXPERIMENTAL
case GainCacheStrategy::SPARSE:
Expand Down
116 changes: 108 additions & 8 deletions kaminpar-shm/refinement/gains/dense_gain_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
******************************************************************************/
#pragma once

#include <google/dense_hash_map>
#include <limits>

#include <tbb/enumerable_thread_specific.h>
Expand All @@ -40,14 +41,17 @@

namespace kaminpar::shm {
template <typename DeltaPartitionedGraph, typename GainCache> class DenseDeltaGainCache;
template <typename DeltaPartitionedGraph, typename GainCache> class LargeKDenseDeltaGainCache;

template <bool iterate_nonadjacent_blocks = true, bool iterate_exact_gains = false>
template <
bool iterate_nonadjacent_blocks = true,
template <typename, typename> typename DeltaGainCache = DenseDeltaGainCache,
bool iterate_exact_gains = false>
class DenseGainCache {
SET_DEBUG(false);
SET_STATISTICS_FROM_GLOBAL();

using Self = DenseGainCache<iterate_nonadjacent_blocks, iterate_exact_gains>;
template <typename, typename> friend class DenseDeltaGainCache;
using Self = DenseGainCache<iterate_nonadjacent_blocks, DeltaGainCache, iterate_exact_gains>;

// Abuse MSB bit in the _weighted_degrees[] array for locking
constexpr static UnsignedEdgeWeight kWeightedDegreeLock =
Expand Down Expand Up @@ -85,7 +89,7 @@ class DenseGainCache {

public:
template <typename DeltaPartitionedGraph>
using DeltaCache = DenseDeltaGainCache<DeltaPartitionedGraph, Self>;
using DeltaCache = DeltaGainCache<DeltaPartitionedGraph, Self>;

// gains() will iterate over all blocks, including those not adjacent to the node.
constexpr static bool kIteratesNonadjacentBlocks = iterate_nonadjacent_blocks;
Expand Down Expand Up @@ -569,10 +573,7 @@ template <typename _DeltaPartitionedGraph, typename _GainCache> class DenseDelta
// might be new connections to non-adjacent blocks in the delta graph. These connections might be
// missed if the gain cache does not iterate over all blocks.
constexpr static bool kIteratesExactGains = GainCache::kIteratesExactGains;

// We do not strictly enforce this to offer an easy large k FM variation.
// @todo can we implement a correct delta gain cache that dos not iterate over all blocks?
// static_assert(GainCache::kIteratesNonadjacentBlocks);
static_assert(GainCache::kIteratesNonadjacentBlocks);

DenseDeltaGainCache(const GainCache &gain_cache, const DeltaPartitionedGraph &d_graph)
: _k(d_graph.k()),
Expand Down Expand Up @@ -636,4 +637,103 @@ template <typename _DeltaPartitionedGraph, typename _GainCache> class DenseDelta
const GainCache &_gain_cache;
DynamicFlatMap<std::size_t, EdgeWeight> _gain_cache_delta;
};

template <typename _DeltaPartitionedGraph, typename _GainCache> class LargeKDenseDeltaGainCache {
public:
using DeltaPartitionedGraph = _DeltaPartitionedGraph;
using GainCache = _GainCache;

constexpr static bool kIteratesExactGains = GainCache::kIteratesExactGains;

// If k is large, iterating over all blocks becomes very expensive -- this delta gain cache should
// only be used when iterating over adjacent blocks only.
static_assert(!GainCache::kIteratesNonadjacentBlocks);

LargeKDenseDeltaGainCache(const GainCache &gain_cache, const DeltaPartitionedGraph &d_graph)
: _k(d_graph.k()),
_gain_cache(gain_cache) {
_adjacent_blocks_delta.set_empty_key(kInvalidNodeID);
_adjacent_blocks_delta.set_deleted_key(kInvalidNodeID - 1);
}

[[nodiscard]] KAMINPAR_INLINE EdgeWeight conn(const NodeID node, const BlockID block) const {
return _gain_cache.conn(node, block) + conn_delta(node, block);
}

[[nodiscard]] KAMINPAR_INLINE EdgeWeight
gain(const NodeID node, const BlockID from, const BlockID to) const {
return _gain_cache.gain(node, from, to) + conn_delta(node, to) - conn_delta(node, from);
}

[[nodiscard]] KAMINPAR_INLINE std::pair<EdgeWeight, EdgeWeight>
gain(const NodeID node, const BlockID b_node, const std::pair<BlockID, BlockID> &targets) {
return {gain(node, b_node, targets.first), gain(node, b_node, targets.second)};
}

template <typename Lambda>
KAMINPAR_INLINE void gains(const NodeID node, const BlockID from, Lambda &&lambda) const {
const EdgeWeight conn_from_delta = kIteratesExactGains ? conn_delta(node, from) : 0;

_gain_cache.gains(node, from, [&](const BlockID to, auto &&gain) {
lambda(to, [&] { return gain() + conn_delta(node, to) - conn_from_delta; });
});

const auto it = _adjacent_blocks_delta.find(node);
if (it != _adjacent_blocks_delta.end()) {
for (const BlockID to : it->second) {
if (to != from) {
lambda(to, [&] {
return _gain_cache.gain(node, from, to) + conn_delta(node, to) - conn_from_delta;
});
}
}
}
}

KAMINPAR_INLINE void move(
const DeltaPartitionedGraph &d_graph,
const NodeID u,
const BlockID block_from,
const BlockID block_to
) {
for (const auto &[e, v] : d_graph.neighbors(u)) {
const EdgeWeight weight = d_graph.edge_weight(e);
_gain_cache_delta[index(v, block_from)] -= weight;

if (_gain_cache.conn(v, block_to) == 0 && conn_delta(v, block_to) == 0) {
auto &additional_blocks = _adjacent_blocks_delta[v];
if (std::find(additional_blocks.begin(), additional_blocks.end(), block_to) ==
additional_blocks.end()) {
additional_blocks.push_back(block_to);
}
}

_gain_cache_delta[index(v, block_to)] += weight;
}
}

KAMINPAR_INLINE void clear() {
_gain_cache_delta.clear();
_adjacent_blocks_delta.clear();
}

private:
[[nodiscard]] KAMINPAR_INLINE std::size_t index(const NodeID node, const BlockID block) const {
// Note: this increases running times substantially due to the shifts
// return index_sparse(node, block);

return 1ull * node * _k + block;
}

[[nodiscard]] KAMINPAR_INLINE EdgeWeight
conn_delta(const NodeID node, const BlockID block) const {
const auto it = _gain_cache_delta.get_if_contained(index(node, block));
return it != _gain_cache_delta.end() ? *it : 0;
}

BlockID _k;
const GainCache &_gain_cache;
DynamicFlatMap<std::size_t, EdgeWeight> _gain_cache_delta;
google::dense_hash_map<NodeID, std::vector<BlockID>> _adjacent_blocks_delta;
};
} // namespace kaminpar::shm

0 comments on commit b604744

Please sign in to comment.