Skip to content

Commit

Permalink
CArena: Implement alloc_in_place
Browse files Browse the repository at this point in the history
First we try to see if CArena has free space that can be used to extend the
given pointer to the specified size range. If that's unsuccessful, we use
the normal alloc function to allocate memory.

If in-place allocation is successful during vector resizing, we can avoid
the cost of copying data to the new allocation address.
  • Loading branch information
WeiqunZhang committed Jul 18, 2023
1 parent 10b6cb2 commit aa63ff5
Show file tree
Hide file tree
Showing 2 changed files with 91 additions and 1 deletion.
14 changes: 14 additions & 0 deletions Src/Base/AMReX_CArena.H
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,18 @@ public:
//! Allocate some memory.
[[nodiscard]] void* alloc (std::size_t nbytes) final;

/**
* Try to allocate in-place by extending the capacity of given pointer.
*/
[[nodiscard]] std::pair<void*,std::size_t>
alloc_in_place (void* pt, std::size_t szmin, std::size_t szmax) final;

/**
* Try to shrink in-place
*/
[[nodiscard]] void*
shrink_in_place (void* pt, std::size_t sz) final;

/**
* \brief Free up allocated memory. Merge neighboring free memory chunks
* into largest possible chunk.
Expand Down Expand Up @@ -87,6 +99,8 @@ public:

protected:

void* alloc_protected (std::size_t nbytes);

std::size_t freeUnused_protected () final;

//! The nodes in our free list and block list.
Expand Down
78 changes: 77 additions & 1 deletion Src/Base/AMReX_CArena.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,13 @@ void*
CArena::alloc (std::size_t nbytes)
{
std::lock_guard<std::mutex> lock(carena_mutex);

nbytes = Arena::align(nbytes == 0 ? 1 : nbytes);
return alloc_protected(nbytes);
}

void*
CArena::alloc_protected (std::size_t nbytes)
{
MemStat* stat = nullptr;
#ifdef AMREX_TINY_PROFILING
if (m_do_profiling) {
Expand Down Expand Up @@ -127,6 +131,78 @@ CArena::alloc (std::size_t nbytes)
return vp;
}

std::pair<void*,std::size_t>
CArena::alloc_in_place (void* pt, std::size_t szmin, std::size_t szmax)
{
std::lock_guard<std::mutex> lock(carena_mutex);

std::size_t nbytes_max = Arena::align(szmax == 0 ? 1 : szmax);

if (pt != nullptr) { // Try to allocate in-place first
auto busy_it = m_busylist.find(Node(pt,nullptr,0));
AMREX_ALWAYS_ASSERT(busy_it != m_busylist.end());
AMREX_ASSERT(m_freelist.find(*busy_it) == m_freelist.end());

if (busy_it->size() >= szmax) {
return std::make_pair(pt, busy_it->size());
}

void* next_block = (char*)pt + busy_it->size();
auto next_it = m_freelist.find(Node(next_block,nullptr,0));
if (next_it != m_freelist.end() && busy_it->coalescable(*next_it)) {
std::size_t total_size = busy_it->size() + next_it->size();
if (total_size >= szmax) {
// Must use nbytes_max instead of szmax for alignment.
std::size_t new_size = std::min(total_size, nbytes_max);
std::size_t left_size = total_size - new_size;
if (left_size <= 64) {
m_freelist.erase(next_it);
new_size = total_size;
} else {
auto& free_node = const_cast<Node&>(*next_it);
free_node.block((char*)pt + new_size);
free_node.size(left_size);
}
std::size_t extra_size = new_size - busy_it->size();
#ifdef AMREX_TINY_PROFILING
if (m_do_profiling) {
// xxxxx TODO: need to store the return value in *busy_it
TinyProfiler::memory_alloc(extra_size, m_profiling_stats);
}
#endif
m_actually_used += extra_size;
const_cast<Node&>(*busy_it).size(new_size);
return std::make_pair(pt, new_size);
} else if (total_size >= szmin) {
m_freelist.erase(next_it);
std::size_t extra_size = total_size - busy_it->size();
#ifdef AMREX_TINY_PROFILING
if (m_do_profiling) {
// xxxxx TODO: need to store the return value in *busy_it
TinyProfiler::memory_alloc(extra_size, m_profiling_stats);
}
#endif
m_actually_used += extra_size;
const_cast<Node&>(*busy_it).size(total_size);
return std::make_pair(pt, total_size);
}
}

if (busy_it->size() >= szmin) {
return std::make_pair(pt, busy_it->size());
}
}

void* newp = alloc_protected(nbytes_max);
return std::make_pair(newp, nbytes_max);
}

void*
CArena::shrink_in_place (void* /*pt*/, std::size_t sz)
{
return alloc(sz); // xxxxx TODO
}

void
CArena::free (void* vp)
{
Expand Down

0 comments on commit aa63ff5

Please sign in to comment.