Skip to content

Commit

Permalink
feat(bipartitioning): free memory used for bipartitioning directly af…
Browse files Browse the repository at this point in the history
…ter the last use (#28)
  • Loading branch information
dsalwasser authored Nov 13, 2024
1 parent 03b9618 commit cf86741
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 24 deletions.
11 changes: 9 additions & 2 deletions kaminpar-shm/graphutils/subgraph_extractor.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,10 @@ struct SubgraphMemory {
const bool is_node_weighted = true,
const bool is_edge_weighted = true
) {
resize2(n, k, m, is_node_weighted ? n : 0, is_edge_weighted ? m : 0);
weighted_resize(n, k, m, is_node_weighted ? n : 0, is_edge_weighted ? m : 0);
}

void resize2(
void weighted_resize(
const NodeID n,
const BlockID k,
const EdgeID m,
Expand All @@ -82,6 +82,13 @@ struct SubgraphMemory {
edge_weights.resize(m_weights);
}

void free() {
nodes.free();
edges.free();
node_weights.free();
edge_weights.free();
}

[[nodiscard]] bool empty() const {
return nodes.empty();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,10 @@ class InitialBipartitionerWorkerPool {
pool.push_back(std::move(initial_partitioner));
}

void free() {
_pool_ets.clear();
}

private:
const Context &_ctx;
tbb::enumerable_thread_specific<std::vector<InitialMultilevelBipartitioner>> _pool_ets;
Expand Down
52 changes: 32 additions & 20 deletions kaminpar-shm/partitioning/deep/deep_multilevel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

#include "kaminpar-common/console_io.h"
#include "kaminpar-common/heap_profiler.h"
#include "kaminpar-common/logger.h"
#include "kaminpar-common/timer.h"

namespace kaminpar::shm {
Expand Down Expand Up @@ -52,23 +53,7 @@ PartitionedGraph DeepMultilevelPartitioner::partition() {
PartitionedGraph p_graph = initial_partition(c_graph);

SCOPED_HEAP_PROFILER("Uncoarsening");
bool refined = false;
p_graph = uncoarsen(std::move(p_graph), refined);
if (!refined || p_graph.k() < _input_ctx.partition.k) {
SCOPED_HEAP_PROFILER("Toplevel");

LOG;
LOG << "Toplevel:";
LOG << " Number of nodes: " << p_graph.n() << " | Number of edges: " << p_graph.m();

if (!refined) {
refine(p_graph);
}
if (p_graph.k() < _input_ctx.partition.k) {
extend_partition(p_graph, _input_ctx.partition.k);
refine(p_graph);
}
}
p_graph = uncoarsen(std::move(p_graph));

return p_graph;
}
Expand Down Expand Up @@ -127,14 +112,23 @@ void DeepMultilevelPartitioner::extend_partition(PartitionedGraph &p_graph, cons
);
}

if (_last_initial_partitioning_level == _coarsener->level()) {
SCOPED_TIMER("Deallocation");
_subgraph_memory.free();
_extraction_mem_pool_ets.clear();
_tmp_extraction_mem_pool_ets.clear();
_bipartitioner_pool.free();
}

if (_print_metrics) {
SCOPED_TIMER("Partition metrics");
LOG << " Cut: " << metrics::edge_cut(p_graph);
LOG << " Imbalance: " << metrics::imbalance(p_graph);
}
}

PartitionedGraph DeepMultilevelPartitioner::uncoarsen(PartitionedGraph p_graph, bool &refined) {
PartitionedGraph DeepMultilevelPartitioner::uncoarsen(PartitionedGraph p_graph) {
bool refined = false;
while (!_coarsener->empty()) {
SCOPED_HEAP_PROFILER("Level", std::to_string(_coarsener->level() - 1));

Expand All @@ -160,6 +154,22 @@ PartitionedGraph DeepMultilevelPartitioner::uncoarsen(PartitionedGraph p_graph,
}
}

if (!refined || p_graph.k() < _input_ctx.partition.k) {
SCOPED_HEAP_PROFILER("Toplevel");

LOG;
LOG << "Toplevel:";
LOG << " Number of nodes: " << p_graph.n() << " | Number of edges: " << p_graph.m();

if (!refined) {
refine(p_graph);
}
if (p_graph.k() < _input_ctx.partition.k) {
extend_partition(p_graph, _input_ctx.partition.k);
refine(p_graph);
}
}

return p_graph;
}

Expand All @@ -172,7 +182,7 @@ const Graph *DeepMultilevelPartitioner::coarsen() {
NodeWeight prev_c_graph_total_node_weight = c_graph->total_node_weight();

bool shrunk = true;
bool search_subgraph_memory_size = !_input_ctx.partitioning.use_lazy_subgraph_memory;
bool search_subgraph_memory_size = true;

while (shrunk && c_graph->n() > initial_partitioning_threshold()) {
// If requested, dump graph before each coarsening step + after coarsening
Expand All @@ -199,6 +209,8 @@ const Graph *DeepMultilevelPartitioner::coarsen() {
partitioning::compute_k_for_n(c_graph->n(), _input_ctx) < _input_ctx.partition.k) {
search_subgraph_memory_size = false;

_last_initial_partitioning_level = _coarsener->level() - 1;

_subgraph_memory_n = prev_c_graph_n;
_subgraph_memory_m = prev_c_graph_m;

Expand Down Expand Up @@ -269,7 +281,7 @@ PartitionedGraph DeepMultilevelPartitioner::initial_partition(const Graph *graph
SCOPED_HEAP_PROFILER("SubgraphMemory resize");
SCOPED_TIMER("Allocation");

_subgraph_memory.resize2(
_subgraph_memory.weighted_resize(
_subgraph_memory_n,
_input_ctx.partition.k,
_subgraph_memory_m,
Expand Down
5 changes: 3 additions & 2 deletions kaminpar-shm/partitioning/deep/deep_multilevel.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class DeepMultilevelPartitioner : public Partitioner {
PartitionedGraph partition() final;

private:
PartitionedGraph uncoarsen(PartitionedGraph p_graph, bool &refined);
PartitionedGraph uncoarsen(PartitionedGraph p_graph);

inline PartitionedGraph uncoarsen_once(PartitionedGraph p_graph);

Expand All @@ -55,10 +55,11 @@ class DeepMultilevelPartitioner : public Partitioner {
std::unique_ptr<Coarsener> _coarsener;
std::unique_ptr<Refiner> _refiner;

graph::SubgraphMemory _subgraph_memory;
std::size_t _last_initial_partitioning_level;
NodeID _subgraph_memory_n, _subgraph_memory_n_weights;
EdgeID _subgraph_memory_m, _subgraph_memory_m_weights;

graph::SubgraphMemory _subgraph_memory;
partitioning::SubgraphMemoryEts _extraction_mem_pool_ets;
partitioning::TemporarySubgraphMemoryEts _tmp_extraction_mem_pool_ets;
InitialBipartitionerWorkerPool _bipartitioner_pool;
Expand Down

0 comments on commit cf86741

Please sign in to comment.