Skip to content

Commit

Permalink
refactor(shm-fm): remove dependency to Sparsehash
Browse files Browse the repository at this point in the history
  • Loading branch information
DanielSeemaier committed Mar 12, 2024
1 parent 3737b97 commit 981858a
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 28 deletions.
9 changes: 8 additions & 1 deletion kaminpar-common/datastructures/dynamic_map.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <limits>
#include <memory>

Expand Down Expand Up @@ -147,6 +146,14 @@ class DynamicFlatMap final : public DynamicMapBase<Key, Value, DynamicFlatMap<Ke

~DynamicFlatMap() = default;

template <typename Lambda> void for_each(Lambda &&lambda) {
for (std::size_t i = 0; i < _capacity; ++i) {
if (_elements[i].timestamp == _timestamp) {
lambda(_elements[i].key, _elements[i].value);
}
}
}

private:
[[nodiscard]] std::size_t size_in_bytes_impl() const {
return _capacity * sizeof(MapElement);
Expand Down
46 changes: 22 additions & 24 deletions kaminpar-shm/datastructures/delta_partitioned_graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,11 @@
******************************************************************************/
#pragma once

#include <google/dense_hash_map>
#include <type_traits>

#include "kaminpar-shm/datastructures/graph.h"
#include "kaminpar-shm/datastructures/graph_delegate.h"
#include "kaminpar-shm/datastructures/partitioned_graph.h"
#include "kaminpar-shm/kaminpar.h"

#include "kaminpar-common/datastructures/dynamic_map.h"
#include "kaminpar-common/datastructures/scalable_vector.h"
#include "kaminpar-common/ranges.h"

Expand All @@ -37,14 +34,9 @@ class GenericDeltaPartitionedGraph : public GraphDelegate {
GenericDeltaPartitionedGraph(const PartitionedGraph *p_graph)
: GraphDelegate(&p_graph->graph()),
_p_graph(p_graph) {
if constexpr (compact_block_weight_delta) {
_block_weights_delta.set_empty_key(kInvalidBlockID);
} else {
if constexpr (!compact_block_weight_delta) {
_block_weights_delta.resize(_p_graph->k());
}
if constexpr (allow_read_after_move) {
_partition_delta.set_empty_key(kInvalidNodeID);
}
}

[[nodiscard]] const PartitionedGraph &p_graph() const {
Expand All @@ -65,12 +57,8 @@ class GenericDeltaPartitionedGraph : public GraphDelegate {

[[nodiscard]] inline BlockID block(const NodeID node) const {
if constexpr (allow_read_after_move) {
const auto it = _partition_delta.find(node);
if (it != _partition_delta.end()) {
return it->second;
}

return _p_graph->block(node);
const auto *it = _partition_delta.get_if_contained(node);
return (it != _partition_delta.end()) ? *it : _p_graph->block(node);
} else {
KASSERT(
std::find_if(
Expand Down Expand Up @@ -120,9 +108,9 @@ class GenericDeltaPartitionedGraph : public GraphDelegate {
BlockWeight delta = 0;

if constexpr (compact_block_weight_delta) {
const auto it = _block_weights_delta.find(block);
const auto *it = _block_weights_delta.get_if_contained(block);
if (it != _block_weights_delta.end()) {
delta = it->second;
delta = *it;
}
} else {
delta = _block_weights_delta[block];
Expand All @@ -131,8 +119,18 @@ class GenericDeltaPartitionedGraph : public GraphDelegate {
return _p_graph->block_weight(block) + delta;
}

const auto &delta() const {
return _partition_delta;
template <typename Lambda> void for_each(Lambda &&lambda) {
if constexpr (allow_read_after_move) {
_partition_delta.for_each(std::forward<Lambda>(lambda));
} else {
for (const auto &[moved_node, moved_to] : _partition_delta) {
lambda(moved_node, moved_to);
}
}
}

std::size_t size() const {
return _partition_delta.size();
}

void clear() {
Expand All @@ -151,16 +149,16 @@ class GenericDeltaPartitionedGraph : public GraphDelegate {
// Depending on the configuration, use a hash map to be memory efficient,
// otherwise store the block weight deltas in vector (i.e., O(P * k) memory).
std::conditional_t<
compact_block_weight_delta,
google::dense_hash_map<BlockID, BlockWeight>,
compact_block_weight_delta, // false
DynamicFlatMap<BlockID, BlockWeight>,
scalable_vector<BlockWeight>>
_block_weights_delta;

// If we need random access to the partition delta, use a hash map. Otherwise,
// we can just store the moves in a vector.
std::conditional_t<
allow_read_after_move,
google::dense_hash_map<NodeID, BlockID>,
allow_read_after_move, // true
DynamicFlatMap<NodeID, BlockID>,
std::vector<DeltaEntry>>
_partition_delta;
};
Expand Down
6 changes: 3 additions & 3 deletions kaminpar-shm/refinement/fm/fm_refiner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ EdgeWeight LocalizedFMRefiner<GainCache, DeltaPartitionedGraph>::run_batch() {
_shared.node_tracker.set(node, NodeTracker::MOVED_GLOBALLY);
IFSTATS(++stats.num_committed_moves);

for (const auto &[moved_node, moved_to] : _d_graph.delta()) {
_d_graph.for_each([&](const NodeID moved_node, const BlockID moved_to) {
const BlockID moved_from = _p_graph.block(moved_node);

// The order of the moves in the delta graph is not necessarily correct (depending on
Expand All @@ -367,7 +367,7 @@ EdgeWeight LocalizedFMRefiner<GainCache, DeltaPartitionedGraph>::run_batch() {
_shared.node_tracker.set(moved_node, NodeTracker::MOVED_GLOBALLY);
_p_graph.set_block(moved_node, moved_to);
IFSTATS(++stats.num_committed_moves);
}
});

if (_record_applied_moves) {
_applied_moves.push_back(fm::AppliedMove{
Expand Down Expand Up @@ -436,7 +436,7 @@ EdgeWeight LocalizedFMRefiner<GainCache, DeltaPartitionedGraph>::run_batch() {

// Unlock all nodes that were touched but not moved, or nodes that were only moved in the
// thread-local delta graph
IFSTATS(stats.num_discarded_moves += _d_graph.delta().size());
IFSTATS(stats.num_discarded_moves += _d_graph.size());
for (const NodeID touched_node : _touched_nodes) {
unlock_touched_node(touched_node);
}
Expand Down

0 comments on commit 981858a

Please sign in to comment.