forked from zbackup/zbackup
-
Notifications
You must be signed in to change notification settings - Fork 0
/
appendallocator.cc
94 lines (71 loc) · 2.41 KB
/
appendallocator.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
// Copyright (c) 2012-2014 Konstantin Isakov <[email protected]> and ZBackup contributors, see CONTRIBUTORS
// Part of ZBackup. Licensed under GNU GPLv2 or later + OpenSSL, see LICENSE
#include <stdlib.h>
#include <new>
#include "appendallocator.hh"
AppendAllocator::AppendAllocator( unsigned blockSize_, unsigned granularity ):
alignMask( granularity - 1 ),
// We may decide to enlarge the block to make sure it is a multiple of
// granularity. An improperly sized block would just waste the leftover
// bytes
blockSize( ( blockSize_ + alignMask ) & ~alignMask ), leftInBlock( -1 )
{
}
char * AppendAllocator::allocateBytes( unsigned size )
{
// For zero-sized allocations, we always return a non-zero pointer. To do
// that, we need to make sure we have it
if ( !size && !blocks.empty() )
return nextAvailable;
if ( leftInBlock < (int) size )
{
unsigned toAllocate = ( size <= blockSize ? blockSize : size );
// Need a new block
char * p = (char *) malloc( toAllocate );
if ( !p )
throw std::bad_alloc();
blocks.push_back( Record( p, nextAvailable, leftInBlock ) );
leftInBlock = (int) toAllocate;
nextAvailable = p;
}
// We may need to allocate more than was asked to preserve granularity
int toTake = (int) ( ( size + alignMask ) & ~alignMask );
char * result = nextAvailable;
nextAvailable += toTake;
leftInBlock -= toTake; // leftInBlock can become negative here, as toTake can
// actually be larger than the space left due to an added alignment
return result;
}
void AppendAllocator::returnBytes( unsigned size )
{
if ( !size )
return;
// If we are pointing to the start of the block, we need to free it and go
// back to the previous one
if ( nextAvailable == blocks.back().data )
{
if ( blocks.size() == 1 )
throw std::bad_alloc();
free( blocks.back().data );
leftInBlock = blocks.back().prevLeftInBlock;
nextAvailable = blocks.back().prevNextAvailable;
blocks.pop_back();
}
unsigned toTake = ( size + alignMask ) & ~alignMask;
// There must be enough used bytes in the block
if ( nextAvailable - blocks.back().data < (int) toTake )
throw std::bad_alloc();
nextAvailable -= toTake;
leftInBlock += toTake;
}
void AppendAllocator::clear()
{
for ( unsigned x = blocks.size(); x--; )
free( blocks[ x ].data );
blocks.clear();
leftInBlock = -1;
}
AppendAllocator::~AppendAllocator()
{
clear();
}