From 58934b69ee2c513f2e0c9578bca4c66bbe76c558 Mon Sep 17 00:00:00 2001 From: abxh Date: Sun, 13 Oct 2024 22:02:12 +0000 Subject: [PATCH] deploy: b50d6c287fea5d5e357aba233fef05673fd5e5a3 --- annotated.html | 5 +- arena_8h.html | 71 +- arena_8h_source.html | 257 ++--- classes.html | 8 +- dir_97aefd0d527b934f1d99a682da8fe6a9.html | 2 + doxygen_crawl.html | 24 +- examples.html | 1 - fhashtable_8h.html | 4 +- fhashtable_8h_source.html | 924 +++++++++--------- files.html | 5 +- fpqueue_8h_source.html | 2 +- fqueue_8h_source.html | 2 +- freelist_8h.html | 12 +- freelist_8h_source.html | 564 +++++------ fstack_8h_source.html | 2 +- functions.html | 9 +- functions_vars.html | 9 +- globals.html | 8 +- globals_func.html | 8 +- menudata.js | 6 +- pool_8h_source.html | 189 ++++ rbtree_8h_source.html | 2 +- search/all_1.js | 7 +- search/all_12.js | 5 +- search/all_3.js | 2 +- search/all_f.js | 8 +- search/classes_3.js | 3 +- search/classes_4.js | 4 + search/classes_5.js | 4 + search/functions_0.js | 2 +- search/functions_8.js | 5 + search/searchdata.js | 6 +- search/variables_1.js | 5 +- search/variables_2.js | 9 +- search/variables_3.js | 7 +- search/variables_4.js | 3 +- search/variables_5.js | 2 +- search/variables_6.js | 2 +- search/variables_7.js | 2 +- search/variables_8.js | 2 +- search/variables_9.js | 4 +- search/variables_a.js | 5 +- search/variables_b.js | 3 +- search/variables_c.js | 3 +- search/variables_d.js | 5 + structpool-members.html | 104 ++ structpool.html | 119 +++ ...tml => structpool__free__node-members.html | 15 +- structpool__free__node.html | 110 +++ structtemp__arena__state-members.html | 103 ++ structtemp__arena__state.html | 126 +++ 51 files changed, 1860 insertions(+), 929 deletions(-) create mode 100644 pool_8h_source.html create mode 100644 search/classes_4.js create mode 100644 search/classes_5.js create mode 100644 search/functions_8.js create mode 100644 search/variables_d.js create mode 100644 structpool-members.html create mode 100644 structpool.html rename examples_2freelist_2chars_example_8c-example.html => structpool__free__node-members.html (85%) create mode 100644 structpool__free__node.html create mode 100644 structtemp__arena__state-members.html create mode 100644 structtemp__arena__state.html diff --git a/annotated.html b/annotated.html index 9e10860d..7686672c 100644 --- a/annotated.html +++ b/annotated.html @@ -99,7 +99,10 @@  Cfreelist_headerFreelist header definition. This lies at the front of every block  CfstackGenerated stack struct type for a given VALUE_TYPE  Clist_nodeIntrusive list node structure - Crbtree_nodeGenerated red-black tree node struct type for a given KEY_TYPE + Cpool + Cpool_free_node + Crbtree_nodeGenerated red-black tree node struct type for a given KEY_TYPE + Ctemp_arena_stateTempory arena state struct diff --git a/arena_8h.html b/arena_8h.html index 13efb7a7..ef4f94ca 100644 --- a/arena_8h.html +++ b/arena_8h.html @@ -114,12 +114,21 @@ struct  arena  Arena data struct. More...
  +struct  temp_arena_state + Tempory arena state struct. More...
+  - - - + + + + + + + + + @@ -271,8 +280,8 @@

-

◆ arena_init()

+ +

◆ arena_init()

@@ -293,7 +302,7 @@

- +

Functions

static void arena_init (struct arena *self, const size_t len, unsigned char backing_buf[len])
 Initialize the arena.
 
struct temp_arena_state temp_arena_memory_begin (struct arena *arena_ptr)
 Store the arena state temporarily.
 
void temp_arena_memory_end (struct temp_arena_state temp)
 Restore the arena state.
 
static void arena_init (struct arena *self, const size_t len, unsigned char *backing_buf)
 Initialize the arena.
 
static void arena_deallocate_all (struct arena *self)
 Deallocate all allocations in the arena.
 
unsigned char backing_buf[len] )unsigned char * backing_buf )
@@ -433,6 +442,56 @@

+

◆ temp_arena_memory_begin()

+ +
+
+ + + + + + + +
struct temp_arena_state temp_arena_memory_begin (struct arena * arena_ptr)
+
+ +

Store the arena state temporarily.

+
Parameters
+ + +
[in]arena_ptrThe arena whose state to store.
+
+
+ +
+
+ +

◆ temp_arena_memory_end()

+ +
+
+ + + + + + + +
void temp_arena_memory_end (struct temp_arena_state temp)
+
+ +

Restore the arena state.

+
Parameters
+ + +
[in]tempStored arena state.
+
+
+
diff --git a/arena_8h_source.html b/arena_8h_source.html index 9d6a5b3c..48b95d07 100644 --- a/arena_8h_source.html +++ b/arena_8h_source.html @@ -124,133 +124,162 @@
41};
42
-
-
50static inline void arena_init(struct arena *self, const size_t len, unsigned char backing_buf[len])
-
51{
-
52 assert(self);
-
53 assert(backing_buf);
-
54
-
55 const uintptr_t padding = calc_alignment_padding(alignof(max_align_t), (uintptr_t)backing_buf);
-
56
-
57 assert(len >= padding);
-
58
-
59 self->buf_ptr = &backing_buf[padding];
-
60 self->buf_len = len - padding;
-
61 self->curr_offset = 0;
-
62 self->prev_offset = 0;
-
63}
+
+ +
47 struct arena *arena_ptr;
+
48 size_t prev_offset;
+
49 size_t curr_offset;
+
50};
-
64
-
-
70static inline void arena_deallocate_all(struct arena *self)
-
71{
-
72 assert(self);
-
73
-
74 self->curr_offset = 0;
-
75 self->prev_offset = 0;
-
76}
+
51
+
+ +
58{
+
59 struct temp_arena_state temp;
+
60 temp.arena_ptr = arena_ptr;
+ + +
63 return temp;
+
64}
-
77
-
-
88static inline void *arena_allocate_aligned(struct arena *self, const size_t alignment, const size_t size)
-
89{
-
90 assert(self);
-
91
-
92 void *ptr = (void *)&self->buf_ptr[self->curr_offset];
-
93
-
94 size_t space_left = self->buf_len - (size_t)self->curr_offset;
-
95
-
96 const bool has_space_left = align(alignment, size, &ptr, &space_left);
-
97 if (!has_space_left) {
-
98 return NULL;
-
99 }
-
100
-
101 const uintptr_t relative_offset = (uintptr_t)((unsigned char *)ptr - &self->buf_ptr[0]);
-
102
-
103 self->prev_offset = relative_offset;
-
104 self->curr_offset = relative_offset + size;
-
105
-
106 memset(ptr, 0, size);
-
107
-
108 return ptr;
-
109}
+
65
+
+ +
72{
+ + +
75}
+
+
76
+
+
84static inline void arena_init(struct arena *self, const size_t len, unsigned char *backing_buf)
+
85{
+
86 assert(self);
+
87 assert(backing_buf);
+
88
+
89 const uintptr_t padding = calc_alignment_padding(alignof(max_align_t), (uintptr_t)backing_buf);
+
90
+
91 assert(len >= padding);
+
92
+
93 self->buf_ptr = &backing_buf[padding];
+
94 self->buf_len = len - padding;
+
95 self->curr_offset = 0;
+
96 self->prev_offset = 0;
+
97}
-
110
-
-
120static inline void *arena_allocate(struct arena *self, const size_t size)
-
121{
-
122 assert(self);
-
123
-
124 return arena_allocate_aligned(self, alignof(max_align_t), size);
-
125}
+
98
+
+
104static inline void arena_deallocate_all(struct arena *self)
+
105{
+
106 assert(self);
+
107
+
108 self->curr_offset = 0;
+
109 self->prev_offset = 0;
+
110}
-
126
-
128static inline void *internal_arena_try_optimizing_w_prev_offset(struct arena *self, unsigned char *old_ptr,
-
129 const size_t old_size, const size_t new_size)
-
130{
-
131 if (&self->buf_ptr[self->prev_offset] != old_ptr) {
+
111
+
+
122static inline void *arena_allocate_aligned(struct arena *self, const size_t alignment, const size_t size)
+
123{
+
124 assert(self);
+
125
+
126 void *ptr = (void *)&self->buf_ptr[self->curr_offset];
+
127
+
128 size_t space_left = self->buf_len - (size_t)self->curr_offset;
+
129
+
130 const bool has_space_left = align(alignment, size, &ptr, &space_left);
+
131 if (!has_space_left) {
132 return NULL;
133 }
134
-
135 self->curr_offset = self->prev_offset + new_size;
+
135 const uintptr_t relative_offset = (uintptr_t)((unsigned char *)ptr - &self->buf_ptr[0]);
136
-
137 if (new_size > old_size) {
-
138 const size_t diff = new_size - old_size;
+
137 self->prev_offset = relative_offset;
+
138 self->curr_offset = relative_offset + size;
139
-
140 memset(&self->buf_ptr[self->curr_offset], 0, diff);
-
141 }
-
142
-
143 return old_ptr;
-
144}
-
146
-
-
160static inline void *arena_reallocate_aligned(struct arena *self, void *old_ptr_, const size_t alignment,
-
161 const size_t old_size, const size_t new_size)
-
162{
-
163 assert(self);
-
164 assert(is_pow2(alignment));
-
165
-
166 unsigned char *old_ptr = (unsigned char *)old_ptr_;
-
167
-
168 const bool misc_input = old_ptr == NULL || old_size == 0 || new_size == 0;
-
169 const bool inside_arena_buf = &self->buf_ptr[0] <= old_ptr && old_ptr <= &self->buf_ptr[self->buf_len - 1];
-
170 if (misc_input || !inside_arena_buf) {
-
171 return NULL;
-
172 }
+
140 memset(ptr, 0, size);
+
141
+
142 return ptr;
+
143}
+
+
144
+
+
154static inline void *arena_allocate(struct arena *self, const size_t size)
+
155{
+
156 assert(self);
+
157
+
158 return arena_allocate_aligned(self, alignof(max_align_t), size);
+
159}
+
+
160
+
162static inline void *internal_arena_try_optimizing_w_prev_offset(struct arena *self, unsigned char *old_ptr,
+
163 const size_t old_size, const size_t new_size)
+
164{
+
165 if (&self->buf_ptr[self->prev_offset] != old_ptr) {
+
166 return NULL;
+
167 }
+
168
+
169 self->curr_offset = self->prev_offset + new_size;
+
170
+
171 if (new_size > old_size) {
+
172 const size_t diff = new_size - old_size;
173
-
174 const bool has_optimized_w_prev_buf =
-
175 internal_arena_try_optimizing_w_prev_offset(self, old_ptr, old_size, new_size);
-
176 if (has_optimized_w_prev_buf) {
-
177 return old_ptr;
-
178 }
-
179
-
180 const size_t copy_size = old_size < new_size ? old_size : new_size;
-
181
-
182 void *new_mem = arena_allocate_aligned(self, alignment, new_size);
-
183
-
184 memmove(new_mem, old_ptr, copy_size);
-
185
-
186 return new_mem;
-
187}
+
174 memset(&self->buf_ptr[self->curr_offset], 0, diff);
+
175 }
+
176
+
177 return old_ptr;
+
178}
+
180
+
+
194static inline void *arena_reallocate_aligned(struct arena *self, void *old_ptr_, const size_t alignment,
+
195 const size_t old_size, const size_t new_size)
+
196{
+
197 assert(self);
+
198 assert(is_pow2(alignment));
+
199
+
200 unsigned char *old_ptr = (unsigned char *)old_ptr_;
+
201
+
202 const bool misc_input = old_ptr == NULL || old_size == 0 || new_size == 0;
+
203 const bool inside_arena_buf = &self->buf_ptr[0] <= old_ptr && old_ptr <= &self->buf_ptr[self->buf_len - 1];
+
204 if (misc_input || !inside_arena_buf) {
+
205 return NULL;
+
206 }
+
207
+
208 const bool has_optimized_w_prev_buf =
+
209 internal_arena_try_optimizing_w_prev_offset(self, old_ptr, old_size, new_size);
+
210 if (has_optimized_w_prev_buf) {
+
211 return old_ptr;
+
212 }
+
213
+
214 const size_t copy_size = old_size < new_size ? old_size : new_size;
+
215
+
216 void *new_mem = arena_allocate_aligned(self, alignment, new_size);
+
217
+
218 memmove(new_mem, old_ptr, copy_size);
+
219
+
220 return new_mem;
+
221}
-
188
-
-
200static inline void *arena_reallocate(struct arena *self, void *old_ptr, const size_t old_size, const size_t new_size)
-
201{
-
202 assert(self);
-
203
-
204 return arena_reallocate_aligned(self, old_ptr, alignof(max_align_t), old_size, new_size);
-
205}
+
222
+
+
234static inline void *arena_reallocate(struct arena *self, void *old_ptr, const size_t old_size, const size_t new_size)
+
235{
+
236 assert(self);
+
237
+
238 return arena_reallocate_aligned(self, old_ptr, alignof(max_align_t), old_size, new_size);
+
239}
Align memory.
static uintptr_t calc_alignment_padding(const size_t alignment, const uintptr_t ptr)
Calculate the alignment padding required to align a pointer.
Definition align.h:91
static void * align(const size_t alignment, const size_t size, void **ptr_ptr, size_t *space_ptr)
Align pointer to the next alignment boundary.
Definition align.h:43
-
static void arena_deallocate_all(struct arena *self)
Deallocate all allocations in the arena.
Definition arena.h:70
-
static void * arena_reallocate(struct arena *self, void *old_ptr, const size_t old_size, const size_t new_size)
Reallocate a previously allocated chunk in the arena.
Definition arena.h:200
-
static void * arena_allocate_aligned(struct arena *self, const size_t alignment, const size_t size)
Get the pointer to a chunk of the arena. With specific alignment.
Definition arena.h:88
-
static void arena_init(struct arena *self, const size_t len, unsigned char backing_buf[len])
Initialize the arena.
Definition arena.h:50
-
static void * arena_reallocate_aligned(struct arena *self, void *old_ptr_, const size_t alignment, const size_t old_size, const size_t new_size)
Reallocate a previously allocated chunk in the arena. With specific aligment.
Definition arena.h:160
-
static void * arena_allocate(struct arena *self, const size_t size)
Get the pointer to a chunk of the arena.
Definition arena.h:120
+
static void arena_deallocate_all(struct arena *self)
Deallocate all allocations in the arena.
Definition arena.h:104
+
static void * arena_reallocate(struct arena *self, void *old_ptr, const size_t old_size, const size_t new_size)
Reallocate a previously allocated chunk in the arena.
Definition arena.h:234
+
static void * arena_allocate_aligned(struct arena *self, const size_t alignment, const size_t size)
Get the pointer to a chunk of the arena. With specific alignment.
Definition arena.h:122
+
static void * arena_reallocate_aligned(struct arena *self, void *old_ptr_, const size_t alignment, const size_t old_size, const size_t new_size)
Reallocate a previously allocated chunk in the arena. With specific aligment.
Definition arena.h:194
+
void temp_arena_memory_end(struct temp_arena_state temp)
Restore the arena state.
Definition arena.h:71
+
static void arena_init(struct arena *self, const size_t len, unsigned char *backing_buf)
Initialize the arena.
Definition arena.h:84
+
struct temp_arena_state temp_arena_memory_begin(struct arena *arena_ptr)
Store the arena state temporarily.
Definition arena.h:57
+
static void * arena_allocate(struct arena *self, const size_t size)
Get the pointer to a chunk of the arena.
Definition arena.h:154
Check if a number is a power of two.
static size_t is_pow2(const size_t x)
Check if a number is a power of two.
Definition is_pow2.h:34
Arena data struct.
Definition arena.h:36
@@ -258,6 +287,10 @@
size_t prev_offset
Previous offset relative to buf_ptr.
Definition arena.h:38
size_t buf_len
Underlying buffer length.
Definition arena.h:37
size_t curr_offset
Current offset relative to buf_ptr.
Definition arena.h:39
+
Tempory arena state struct.
Definition arena.h:46
+
size_t curr_offset
arena original curr offset
Definition arena.h:49
+
struct arena * arena_ptr
arena pointer
Definition arena.h:47
+
size_t prev_offset
arena original prev offset
Definition arena.h:48
-
A | F | L | R
+
A | F | L | P | R | T
diff --git a/dir_97aefd0d527b934f1d99a682da8fe6a9.html b/dir_97aefd0d527b934f1d99a682da8fe6a9.html index 3a0ffd19..f5482b0d 100644 --- a/dir_97aefd0d527b934f1d99a682da8fe6a9.html +++ b/dir_97aefd0d527b934f1d99a682da8fe6a9.html @@ -133,6 +133,8 @@  paste.h  C preprocessor macros for pasting tokens together.
  + pool.h rbtree.h  Intrusive red-black tree.
  diff --git a/doxygen_crawl.html b/doxygen_crawl.html index 3e43bbb8..0270272e 100644 --- a/doxygen_crawl.html +++ b/doxygen_crawl.html @@ -8,7 +8,6 @@ - @@ -24,6 +23,7 @@ + @@ -61,8 +61,14 @@ + + + + + + @@ -81,6 +87,7 @@ + @@ -95,6 +102,7 @@ + @@ -110,6 +118,8 @@ + + @@ -139,15 +149,16 @@ - + + + - @@ -263,6 +274,7 @@ + @@ -323,11 +335,17 @@ + + + + + + diff --git a/examples.html b/examples.html index 1046c849..23403a1a 100644 --- a/examples.html +++ b/examples.html @@ -88,7 +88,6 @@
Here is a list of all examples:
diff --git a/fhashtable_8h.html b/fhashtable_8h.html index e30e28f6..4bc68575 100644 --- a/fhashtable_8h.html +++ b/fhashtable_8h.html @@ -240,7 +240,7 @@

for ((index) = 0; (index) < (self)->capacity; (index)++) \
&& ((key_) = (self)->slots[(index)].key, (value_) = (self)->slots[(index)].value, true))
-
#define FHASHTABLE_EMPTY_SLOT_OFFSET
Offset constant used to flag empty slots.
Definition fhashtable.h:62
+
#define FHASHTABLE_EMPTY_SLOT_OFFSET
Offset constant used to flag empty slots.
Definition fhashtable.h:61

Iterate over the non-empty slots in the hashtable in arbitary order.

Warning
Modifying the hashtable under the iteration may result in errors.
@@ -271,7 +271,7 @@

Value:
(murmur3_32((uint8_t *)&(key), sizeof(KEY_TYPE), 0))
-
#define KEY_TYPE
The key type. This must be manually defined before including this header file.
Definition fhashtable.h:107
+
#define KEY_TYPE
The key type. This must be manually defined before including this header file.
Definition fhashtable.h:106
static uint32_t murmur3_32(const uint8_t *key_ptr, const uint32_t len, const uint32_t seed)
Get the Murmur3 (32-bit) hash of a string of bytes.
Definition murmurhash.h:54

Used to compute indicies of keys. This must be manually defined before including this header file.

diff --git a/fhashtable_8h_source.html b/fhashtable_8h_source.html index 37662e3d..325b63b1 100644 --- a/fhashtable_8h_source.html +++ b/fhashtable_8h_source.html @@ -102,481 +102,481 @@
9 * See the file LICENSE included with this distribution for more
10 * information. */
11
-
42// macro definitions: {{{
-
43
-
44#ifndef FHASHTABLE_H
-
45#define FHASHTABLE_H
-
46
-
47#include "fnvhash.h" // fnvhash_32, fnvhash_32_str
-
48#include "is_pow2.h" // is_pow2
-
49#include "murmurhash.h" // murmur_32
-
50#include "paste.h" // PASTE, XPASTE, JOIN
-
51#include "round_up_pow2.h" // round_up_pow2_32
-
52
-
53#include <assert.h>
-
54#include <stdint.h>
-
55#include <stdio.h>
-
56#include <stdlib.h>
-
57
-
62#define FHASHTABLE_EMPTY_SLOT_OFFSET (UINT32_MAX)
-
63
-
-
77#define fhashtable_for_each(self, index, key_, value_) \
-
78 for ((index) = 0; (index) < (self)->capacity; (index)++) \
-
79 if ((self)->slots[(index)].offset != FHASHTABLE_EMPTY_SLOT_OFFSET \
-
80 && ((key_) = (self)->slots[(index)].key, (value_) = (self)->slots[(index)].value, true))
+
41// macro definitions: {{{
+
42
+
43#ifndef FHASHTABLE_H
+
44#define FHASHTABLE_H
+
45
+
46#include "fnvhash.h" // fnvhash_32, fnvhash_32_str
+
47#include "is_pow2.h" // is_pow2
+
48#include "murmurhash.h" // murmur_32
+
49#include "paste.h" // PASTE, XPASTE, JOIN
+
50#include "round_up_pow2.h" // round_up_pow2_32
+
51
+
52#include <assert.h>
+
53#include <stdint.h>
+
54#include <stdio.h>
+
55#include <stdlib.h>
+
56
+
61#define FHASHTABLE_EMPTY_SLOT_OFFSET (UINT32_MAX)
+
62
+
+
76#define fhashtable_for_each(self, index, key_, value_) \
+
77 for ((index) = 0; (index) < (self)->capacity; (index)++) \
+
78 if ((self)->slots[(index)].offset != FHASHTABLE_EMPTY_SLOT_OFFSET \
+
79 && ((key_) = (self)->slots[(index)].key, (value_) = (self)->slots[(index)].value, true))
-
81
-
82#endif // FHASHTABLE_H
-
83
-
91#ifndef NAME
-
92#error "Must define NAME."
-
93#define NAME fhashtable
-
94#else
-
95#define FHASHTABLE_NAME NAME
-
96#endif
-
97
-
105#ifndef KEY_TYPE
-
106#error "Must define KEY_TYPE."
-
107#define KEY_TYPE int
-
108#endif
-
109
-
117#ifndef VALUE_TYPE
-
118#error "Must define VALUE_TYPE."
-
119#define VALUE_TYPE int
-
120#endif
-
121
-
139#ifndef KEY_IS_EQUAL
-
140#error "Must define KEY_IS_EQUAL."
-
141#define KEY_IS_EQUAL(a, b) ((a) == (b))
-
142#endif
-
143
-
154#ifndef HASH_FUNCTION
-
155#error "Must define HASH_FUNCTION."
-
156#define HASH_FUNCTION(key) (murmur3_32((uint8_t *)&(key), sizeof(KEY_TYPE), 0))
-
157#endif
-
158
-
160#define FHASHTABLE_TYPE struct FHASHTABLE_NAME
-
161#define FHASHTABLE_SLOT_TYPE struct JOIN(FHASHTABLE_NAME, slot)
-
162#define FHASHTABLE_SLOT JOIN(FHASHTABLE_NAME, slot)
-
163#define FHASHTABLE_INIT JOIN(FHASHTABLE_NAME, init)
-
164#define FHASHTABLE_IS_FULL JOIN(FHASHTABLE_NAME, is_full)
-
165#define FHASHTABLE_CONTAINS_KEY JOIN(FHASHTABLE_NAME, contains_key)
-
166#define FHASHTABLE_CALC_SIZEOF JOIN(FHASHTABLE_NAME, calc_sizeof)
-
167#define FHASHTABLE_SWAP_SLOTS JOIN(internal, JOIN(FHASHTABLE_NAME, swap_slots))
-
168#define FHASHTABLE_BACKSHIFT JOIN(internal, JOIN(FHASHTABLE_NAME, backshift))
-
170
-
171// }}}
-
172
-
173// type definitions: {{{
-
174
-
-
179struct JOIN(FHASHTABLE_NAME, slot) {
-
180 uint32_t offset;
- - -
183};
+
80
+
81#endif // FHASHTABLE_H
+
82
+
90#ifndef NAME
+
91#error "Must define NAME."
+
92#define NAME fhashtable
+
93#else
+
94#define FHASHTABLE_NAME NAME
+
95#endif
+
96
+
104#ifndef KEY_TYPE
+
105#error "Must define KEY_TYPE."
+
106#define KEY_TYPE int
+
107#endif
+
108
+
116#ifndef VALUE_TYPE
+
117#error "Must define VALUE_TYPE."
+
118#define VALUE_TYPE int
+
119#endif
+
120
+
138#ifndef KEY_IS_EQUAL
+
139#error "Must define KEY_IS_EQUAL."
+
140#define KEY_IS_EQUAL(a, b) ((a) == (b))
+
141#endif
+
142
+
153#ifndef HASH_FUNCTION
+
154#error "Must define HASH_FUNCTION."
+
155#define HASH_FUNCTION(key) (murmur3_32((uint8_t *)&(key), sizeof(KEY_TYPE), 0))
+
156#endif
+
157
+
159#define FHASHTABLE_TYPE struct FHASHTABLE_NAME
+
160#define FHASHTABLE_SLOT_TYPE struct JOIN(FHASHTABLE_NAME, slot)
+
161#define FHASHTABLE_SLOT JOIN(FHASHTABLE_NAME, slot)
+
162#define FHASHTABLE_INIT JOIN(FHASHTABLE_NAME, init)
+
163#define FHASHTABLE_IS_FULL JOIN(FHASHTABLE_NAME, is_full)
+
164#define FHASHTABLE_CONTAINS_KEY JOIN(FHASHTABLE_NAME, contains_key)
+
165#define FHASHTABLE_CALC_SIZEOF JOIN(FHASHTABLE_NAME, calc_sizeof)
+
166#define FHASHTABLE_SWAP_SLOTS JOIN(internal, JOIN(FHASHTABLE_NAME, swap_slots))
+
167#define FHASHTABLE_BACKSHIFT JOIN(internal, JOIN(FHASHTABLE_NAME, backshift))
+
169
+
170// }}}
+
171
+
172// type definitions: {{{
+
173
+
+
178struct JOIN(FHASHTABLE_NAME, slot) {
+
179 uint32_t offset;
+ + +
182};
-
184
-
-
189struct FHASHTABLE_NAME {
-
190 uint32_t count;
-
191 uint32_t capacity;
-
192 FHASHTABLE_SLOT_TYPE slots[];
-
193};
+
183
+
+
188struct FHASHTABLE_NAME {
+
189 uint32_t count;
+
190 uint32_t capacity;
+
191 FHASHTABLE_SLOT_TYPE slots[];
+
192};
-
194
-
195// }}}
-
196
-
197// function definitions: {{{
-
198
-
-
210static inline bool JOIN(FHASHTABLE_NAME, calc_sizeof)(uint32_t *capacity_ptr, uint32_t *size_ptr)
-
211{
-
212 assert(capacity_ptr);
-
213 assert(size_ptr);
-
214
-
215 if (*capacity_ptr == 0 || *capacity_ptr > UINT32_MAX / 4) {
-
216 return false;
-
217 }
-
218
-
219 *capacity_ptr = round_up_pow2_32(*capacity_ptr);
-
220
-
221 *size_ptr = (uint32_t)(offsetof(FHASHTABLE_TYPE, slots) + *capacity_ptr * sizeof(FHASHTABLE_SLOT_TYPE));
-
222
-
223 return true;
-
224}
+
193
+
194// }}}
+
195
+
196// function definitions: {{{
+
197
+
+
209static inline bool JOIN(FHASHTABLE_NAME, calc_sizeof)(uint32_t *capacity_ptr, uint32_t *size_ptr)
+
210{
+
211 assert(capacity_ptr);
+
212 assert(size_ptr);
+
213
+
214 if (*capacity_ptr == 0 || *capacity_ptr > UINT32_MAX / 4) {
+
215 return false;
+
216 }
+
217
+
218 *capacity_ptr = round_up_pow2_32(*capacity_ptr);
+
219
+
220 *size_ptr = (uint32_t)(offsetof(FHASHTABLE_TYPE, slots) + *capacity_ptr * sizeof(FHASHTABLE_SLOT_TYPE));
+
221
+
222 return true;
+
223}
-
225
-
-
232static inline FHASHTABLE_TYPE *JOIN(FHASHTABLE_NAME, init)(FHASHTABLE_TYPE *self, const uint32_t pow2_capacity)
-
233{
-
234 assert(self);
-
235 assert(is_pow2(pow2_capacity));
-
236
-
237 self->count = 0;
-
238 self->capacity = pow2_capacity;
-
239
-
240 for (uint32_t i = 0; i < self->capacity; i++) {
-
241 self->slots[i].offset = FHASHTABLE_EMPTY_SLOT_OFFSET;
-
242 }
-
243
-
244 return self;
-
245}
+
224
+
+
231static inline FHASHTABLE_TYPE *JOIN(FHASHTABLE_NAME, init)(FHASHTABLE_TYPE *self, const uint32_t pow2_capacity)
+
232{
+
233 assert(self);
+
234 assert(is_pow2(pow2_capacity));
+
235
+
236 self->count = 0;
+
237 self->capacity = pow2_capacity;
+
238
+
239 for (uint32_t i = 0; i < self->capacity; i++) {
+
240 self->slots[i].offset = FHASHTABLE_EMPTY_SLOT_OFFSET;
+
241 }
+
242
+
243 return self;
+
244}
-
246
-
-
258static inline FHASHTABLE_TYPE *JOIN(FHASHTABLE_NAME, create)(uint32_t capacity)
-
259{
-
260 uint32_t size = 0;
-
261 if (!FHASHTABLE_CALC_SIZEOF(&capacity, &size)) {
-
262 return NULL;
-
263 }
-
264
-
265 FHASHTABLE_TYPE *self = (FHASHTABLE_TYPE *)calloc(1, size);
-
266
-
267 if (!self) {
-
268 return NULL;
-
269 }
-
270
-
271 FHASHTABLE_INIT(self, capacity);
-
272
-
273 return self;
-
274}
+
245
+
+
257static inline FHASHTABLE_TYPE *JOIN(FHASHTABLE_NAME, create)(uint32_t capacity)
+
258{
+
259 uint32_t size = 0;
+
260 if (!FHASHTABLE_CALC_SIZEOF(&capacity, &size)) {
+
261 return NULL;
+
262 }
+
263
+
264 FHASHTABLE_TYPE *self = (FHASHTABLE_TYPE *)calloc(1, size);
+
265
+
266 if (!self) {
+
267 return NULL;
+
268 }
+
269
+
270 FHASHTABLE_INIT(self, capacity);
+
271
+
272 return self;
+
273}
-
275
-
-
284static inline void JOIN(FHASHTABLE_NAME, destroy)(FHASHTABLE_TYPE *self)
-
285{
-
286 assert(self);
-
287
-
288 free(self);
-
289}
+
274
+
+
283static inline void JOIN(FHASHTABLE_NAME, destroy)(FHASHTABLE_TYPE *self)
+
284{
+
285 assert(self);
+
286
+
287 free(self);
+
288}
-
290
-
-
298static inline bool JOIN(FHASHTABLE_NAME, is_empty)(const FHASHTABLE_TYPE *self)
-
299{
-
300 assert(self != NULL);
-
301
-
302 return self->count == 0;
-
303}
+
289
+
+
297static inline bool JOIN(FHASHTABLE_NAME, is_empty)(const FHASHTABLE_TYPE *self)
+
298{
+
299 assert(self != NULL);
+
300
+
301 return self->count == 0;
+
302}
-
304
-
-
312static inline bool JOIN(FHASHTABLE_NAME, is_full)(const FHASHTABLE_TYPE *self)
-
313{
-
314 assert(self != NULL);
-
315
-
316 return self->count == self->capacity;
-
317}
+
303
+
+
311static inline bool JOIN(FHASHTABLE_NAME, is_full)(const FHASHTABLE_TYPE *self)
+
312{
+
313 assert(self != NULL);
+
314
+
315 return self->count == self->capacity;
+
316}
-
318
-
-
327static inline bool JOIN(FHASHTABLE_NAME, contains_key)(const FHASHTABLE_TYPE *self, const KEY_TYPE key)
-
328{
-
329 assert(self != NULL);
-
330
-
331 const uint32_t key_hash = HASH_FUNCTION(key);
-
332 const uint32_t index_mask = self->capacity - 1;
-
333
-
334 uint32_t index = key_hash & index_mask;
-
335 uint32_t max_possible_offset = 0;
-
336
-
337 while (true) {
-
338 const bool not_empty = self->slots[index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
-
339
-
340 const bool below_max = max_possible_offset <= self->slots[index].offset;
-
341
-
342 if (!(not_empty && below_max)) {
-
343 break;
-
344 }
-
345
-
346 if (KEY_IS_EQUAL(self->slots[index].key, key)) {
-
347 return true;
-
348 }
-
349
-
350 index++;
-
351 index &= index_mask;
-
352 max_possible_offset++;
-
353 }
-
354 return false;
-
355}
+
317
+
+
326static inline bool JOIN(FHASHTABLE_NAME, contains_key)(const FHASHTABLE_TYPE *self, const KEY_TYPE key)
+
327{
+
328 assert(self != NULL);
+
329
+
330 const uint32_t key_hash = HASH_FUNCTION(key);
+
331 const uint32_t index_mask = self->capacity - 1;
+
332
+
333 uint32_t index = key_hash & index_mask;
+
334 uint32_t max_possible_offset = 0;
+
335
+
336 while (true) {
+
337 const bool not_empty = self->slots[index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
+
338
+
339 const bool below_max = max_possible_offset <= self->slots[index].offset;
+
340
+
341 if (!(not_empty && below_max)) {
+
342 break;
+
343 }
+
344
+
345 if (KEY_IS_EQUAL(self->slots[index].key, key)) {
+
346 return true;
+
347 }
+
348
+
349 index++;
+
350 index &= index_mask;
+
351 max_possible_offset++;
+
352 }
+
353 return false;
+
354}
-
356
-
-
370static inline VALUE_TYPE *JOIN(FHASHTABLE_NAME, get_value_mut)(FHASHTABLE_TYPE *self, const KEY_TYPE key)
-
371{
-
372 assert(self != NULL);
-
373
-
374 const uint32_t key_hash = HASH_FUNCTION(key);
-
375 const uint32_t index_mask = self->capacity - 1;
-
376
-
377 uint32_t index = key_hash & index_mask;
-
378 uint32_t max_possible_offset = 0;
-
379
-
380 while (true) {
-
381 const bool not_empty = self->slots[index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
-
382
-
383 const bool below_max = max_possible_offset <= self->slots[index].offset;
-
384
-
385 if (!(not_empty && below_max)) {
-
386 break;
-
387 }
-
388
-
389 if (KEY_IS_EQUAL(self->slots[index].key, key)) {
-
390 return &self->slots[index].value;
-
391 }
-
392
-
393 index++;
-
394 index &= index_mask;
-
395 max_possible_offset++;
-
396 }
-
397 return NULL;
-
398}
+
355
+
+
369static inline VALUE_TYPE *JOIN(FHASHTABLE_NAME, get_value_mut)(FHASHTABLE_TYPE *self, const KEY_TYPE key)
+
370{
+
371 assert(self != NULL);
+
372
+
373 const uint32_t key_hash = HASH_FUNCTION(key);
+
374 const uint32_t index_mask = self->capacity - 1;
+
375
+
376 uint32_t index = key_hash & index_mask;
+
377 uint32_t max_possible_offset = 0;
+
378
+
379 while (true) {
+
380 const bool not_empty = self->slots[index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
+
381
+
382 const bool below_max = max_possible_offset <= self->slots[index].offset;
+
383
+
384 if (!(not_empty && below_max)) {
+
385 break;
+
386 }
+
387
+
388 if (KEY_IS_EQUAL(self->slots[index].key, key)) {
+
389 return &self->slots[index].value;
+
390 }
+
391
+
392 index++;
+
393 index &= index_mask;
+
394 max_possible_offset++;
+
395 }
+
396 return NULL;
+
397}
-
399
-
-
412static inline VALUE_TYPE JOIN(FHASHTABLE_NAME, get_value)(const FHASHTABLE_TYPE *self, const KEY_TYPE key,
-
413 VALUE_TYPE default_value)
-
414{
-
415 assert(self != NULL);
-
416
-
417 const uint32_t key_hash = HASH_FUNCTION(key);
-
418 const uint32_t index_mask = self->capacity - 1;
-
419
-
420 uint32_t index = key_hash & index_mask;
-
421 uint32_t max_possible_offset = 0;
-
422
-
423 while (true) {
-
424 const bool not_empty = self->slots[index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
-
425
-
426 const bool below_max = max_possible_offset <= self->slots[index].offset;
-
427
-
428 if (!(not_empty && below_max)) {
-
429 break;
-
430 }
-
431
-
432 if (KEY_IS_EQUAL(self->slots[index].key, key)) {
-
433 return self->slots[index].value;
-
434 }
-
435
-
436 index++;
-
437 index &= index_mask;
-
438 max_possible_offset++;
-
439 }
-
440 return default_value;
-
441}
+
398
+
+
411static inline VALUE_TYPE JOIN(FHASHTABLE_NAME, get_value)(const FHASHTABLE_TYPE *self, const KEY_TYPE key,
+
412 VALUE_TYPE default_value)
+
413{
+
414 assert(self != NULL);
+
415
+
416 const uint32_t key_hash = HASH_FUNCTION(key);
+
417 const uint32_t index_mask = self->capacity - 1;
+
418
+
419 uint32_t index = key_hash & index_mask;
+
420 uint32_t max_possible_offset = 0;
+
421
+
422 while (true) {
+
423 const bool not_empty = self->slots[index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
+
424
+
425 const bool below_max = max_possible_offset <= self->slots[index].offset;
+
426
+
427 if (!(not_empty && below_max)) {
+
428 break;
+
429 }
+
430
+
431 if (KEY_IS_EQUAL(self->slots[index].key, key)) {
+
432 return self->slots[index].value;
+
433 }
+
434
+
435 index++;
+
436 index &= index_mask;
+
437 max_possible_offset++;
+
438 }
+
439 return default_value;
+
440}
-
442
-
-
456static inline VALUE_TYPE *JOIN(FHASHTABLE_NAME, search)(FHASHTABLE_TYPE *self, const KEY_TYPE key)
-
457{
-
458 return JOIN(FHASHTABLE_NAME, get_value_mut)(self, key);
-
459}
+
441
+
+
455static inline VALUE_TYPE *JOIN(FHASHTABLE_NAME, search)(FHASHTABLE_TYPE *self, const KEY_TYPE key)
+
456{
+
457 return JOIN(FHASHTABLE_NAME, get_value_mut)(self, key);
+
458}
-
460
-
462static inline void JOIN(internal, JOIN(FHASHTABLE_NAME, swap_slots))(FHASHTABLE_SLOT_TYPE *a, FHASHTABLE_SLOT_TYPE *b)
-
463{
-
464 FHASHTABLE_SLOT_TYPE temp = *a;
-
465 *a = *b;
-
466 *b = temp;
-
467}
-
469
-
-
478static inline void JOIN(FHASHTABLE_NAME, insert)(FHASHTABLE_TYPE *self, KEY_TYPE key, VALUE_TYPE value)
-
479{
-
480 assert(self != NULL);
-
481 assert(FHASHTABLE_CONTAINS_KEY(self, key) == false);
-
482
-
483 const uint32_t index_mask = self->capacity - 1;
-
484 const uint32_t key_hash = HASH_FUNCTION(key);
-
485
-
486 uint32_t index = key_hash & index_mask;
-
487 FHASHTABLE_SLOT_TYPE current_slot = {.offset = 0, .key = key, .value = value};
-
488
-
489 while (true) {
-
490 const bool not_empty = self->slots[index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
-
491
-
492 if (!not_empty) {
-
493 break;
-
494 }
-
495
-
496 if (current_slot.offset > self->slots[index].offset) {
-
497 FHASHTABLE_SWAP_SLOTS(&self->slots[index], &current_slot);
-
498 }
-
499
-
500 index++;
-
501 index &= index_mask;
-
502 current_slot.offset++;
-
503 }
-
504 self->slots[index] = current_slot;
-
505 self->count++;
-
506}
+
459
+
461static inline void JOIN(internal, JOIN(FHASHTABLE_NAME, swap_slots))(FHASHTABLE_SLOT_TYPE *a, FHASHTABLE_SLOT_TYPE *b)
+
462{
+
463 FHASHTABLE_SLOT_TYPE temp = *a;
+
464 *a = *b;
+
465 *b = temp;
+
466}
+
468
+
+
477static inline void JOIN(FHASHTABLE_NAME, insert)(FHASHTABLE_TYPE *self, KEY_TYPE key, VALUE_TYPE value)
+
478{
+
479 assert(self != NULL);
+
480 assert(FHASHTABLE_CONTAINS_KEY(self, key) == false);
+
481
+
482 const uint32_t index_mask = self->capacity - 1;
+
483 const uint32_t key_hash = HASH_FUNCTION(key);
+
484
+
485 uint32_t index = key_hash & index_mask;
+
486 FHASHTABLE_SLOT_TYPE current_slot = {.offset = 0, .key = key, .value = value};
+
487
+
488 while (true) {
+
489 const bool not_empty = self->slots[index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
+
490
+
491 if (!not_empty) {
+
492 break;
+
493 }
+
494
+
495 if (current_slot.offset > self->slots[index].offset) {
+
496 FHASHTABLE_SWAP_SLOTS(&self->slots[index], &current_slot);
+
497 }
+
498
+
499 index++;
+
500 index &= index_mask;
+
501 current_slot.offset++;
+
502 }
+
503 self->slots[index] = current_slot;
+
504 self->count++;
+
505}
-
507
-
-
516static inline void JOIN(FHASHTABLE_NAME, update)(FHASHTABLE_TYPE *self, KEY_TYPE key, VALUE_TYPE value)
-
517{
-
518 assert(self != NULL);
-
519
-
520 const uint32_t index_mask = self->capacity - 1;
-
521 const uint32_t key_hash = HASH_FUNCTION(key);
-
522
-
523 uint32_t index = key_hash & index_mask;
-
524 FHASHTABLE_SLOT_TYPE current_slot = {.offset = 0, .key = key, .value = value};
-
525
-
526 while (true) {
-
527 const bool not_empty = self->slots[index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
-
528
-
529 if (!not_empty) {
-
530 break;
-
531 }
-
532
-
533 const bool offset_is_same = current_slot.offset == self->slots[index].offset;
-
534
-
535 const bool key_is_equal = KEY_IS_EQUAL(current_slot.key, self->slots[index].key);
-
536
-
537 if (offset_is_same && key_is_equal) {
-
538 self->slots[index].value = current_slot.value;
-
539 return;
-
540 }
-
541
-
542 if (current_slot.offset > self->slots[index].offset) {
-
543 FHASHTABLE_SWAP_SLOTS(&current_slot, &self->slots[index]);
-
544 }
-
545
-
546 index++;
-
547 index &= index_mask;
-
548 current_slot.offset++;
-
549 }
-
550
-
551 self->slots[index] = current_slot;
-
552 self->count++;
-
553}
+
506
+
+
515static inline void JOIN(FHASHTABLE_NAME, update)(FHASHTABLE_TYPE *self, KEY_TYPE key, VALUE_TYPE value)
+
516{
+
517 assert(self != NULL);
+
518
+
519 const uint32_t index_mask = self->capacity - 1;
+
520 const uint32_t key_hash = HASH_FUNCTION(key);
+
521
+
522 uint32_t index = key_hash & index_mask;
+
523 FHASHTABLE_SLOT_TYPE current_slot = {.offset = 0, .key = key, .value = value};
+
524
+
525 while (true) {
+
526 const bool not_empty = self->slots[index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
+
527
+
528 if (!not_empty) {
+
529 break;
+
530 }
+
531
+
532 const bool offset_is_same = current_slot.offset == self->slots[index].offset;
+
533
+
534 const bool key_is_equal = KEY_IS_EQUAL(current_slot.key, self->slots[index].key);
+
535
+
536 if (offset_is_same && key_is_equal) {
+
537 self->slots[index].value = current_slot.value;
+
538 return;
+
539 }
+
540
+
541 if (current_slot.offset > self->slots[index].offset) {
+
542 FHASHTABLE_SWAP_SLOTS(&current_slot, &self->slots[index]);
+
543 }
+
544
+
545 index++;
+
546 index &= index_mask;
+
547 current_slot.offset++;
+
548 }
+
549
+
550 self->slots[index] = current_slot;
+
551 self->count++;
+
552}
-
554
-
556static inline void JOIN(internal, JOIN(FHASHTABLE_NAME, backshift))(FHASHTABLE_TYPE *self, const uint32_t index_mask,
-
557 uint32_t index)
-
558{
-
559 assert(self);
-
560
-
561 uint32_t next_index = (index + 1) & index_mask;
-
562
-
563 while (true) {
-
564 const bool not_empty = self->slots[next_index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
-
565
-
566 const bool offset_is_non_zero = self->slots[next_index].offset > 0;
-
567
-
568 if (!(not_empty && offset_is_non_zero)) {
-
569 break;
-
570 }
-
571
-
572 self->slots[index] = self->slots[next_index];
-
573 self->slots[index].offset--;
-
574
-
575 self->slots[next_index].offset = FHASHTABLE_EMPTY_SLOT_OFFSET;
-
576
-
577 index = next_index;
-
578 next_index = (index + 1) & index_mask;
-
579 }
-
580}
-
582
-
-
592static inline bool JOIN(FHASHTABLE_NAME, delete)(FHASHTABLE_TYPE *self, const KEY_TYPE key)
-
593{
-
594 assert(self != NULL);
-
595
-
596 const uint32_t index_mask = self->capacity - 1;
-
597 const uint32_t key_hash = HASH_FUNCTION(key);
-
598
-
599 uint32_t index = key_hash & index_mask;
-
600 uint32_t max_possible_offset = 0;
-
601
-
602 while (true) {
-
603 const bool not_empty = self->slots[index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
-
604
-
605 const bool below_max = max_possible_offset <= self->slots[index].offset;
-
606
-
607 if (!(not_empty && below_max)) {
-
608 break;
-
609 }
-
610
-
611 const bool key_is_equal = KEY_IS_EQUAL(key, self->slots[index].key);
-
612
-
613 if (!key_is_equal) {
-
614 index = (index + 1) & index_mask;
-
615 max_possible_offset++;
-
616 continue;
-
617 }
-
618
-
619 self->slots[index].offset = FHASHTABLE_EMPTY_SLOT_OFFSET;
-
620 self->count--;
-
621
-
622 FHASHTABLE_BACKSHIFT(self, index_mask, index);
-
623
-
624 return true;
-
625 }
-
626 return false;
-
627}
+
553
+
555static inline void JOIN(internal, JOIN(FHASHTABLE_NAME, backshift))(FHASHTABLE_TYPE *self, const uint32_t index_mask,
+
556 uint32_t index)
+
557{
+
558 assert(self);
+
559
+
560 uint32_t next_index = (index + 1) & index_mask;
+
561
+
562 while (true) {
+
563 const bool not_empty = self->slots[next_index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
+
564
+
565 const bool offset_is_non_zero = self->slots[next_index].offset > 0;
+
566
+
567 if (!(not_empty && offset_is_non_zero)) {
+
568 break;
+
569 }
+
570
+
571 self->slots[index] = self->slots[next_index];
+
572 self->slots[index].offset--;
+
573
+
574 self->slots[next_index].offset = FHASHTABLE_EMPTY_SLOT_OFFSET;
+
575
+
576 index = next_index;
+
577 next_index = (index + 1) & index_mask;
+
578 }
+
579}
+
581
+
+
591static inline bool JOIN(FHASHTABLE_NAME, delete)(FHASHTABLE_TYPE *self, const KEY_TYPE key)
+
592{
+
593 assert(self != NULL);
+
594
+
595 const uint32_t index_mask = self->capacity - 1;
+
596 const uint32_t key_hash = HASH_FUNCTION(key);
+
597
+
598 uint32_t index = key_hash & index_mask;
+
599 uint32_t max_possible_offset = 0;
+
600
+
601 while (true) {
+
602 const bool not_empty = self->slots[index].offset != FHASHTABLE_EMPTY_SLOT_OFFSET;
+
603
+
604 const bool below_max = max_possible_offset <= self->slots[index].offset;
+
605
+
606 if (!(not_empty && below_max)) {
+
607 break;
+
608 }
+
609
+
610 const bool key_is_equal = KEY_IS_EQUAL(key, self->slots[index].key);
+
611
+
612 if (!key_is_equal) {
+
613 index = (index + 1) & index_mask;
+
614 max_possible_offset++;
+
615 continue;
+
616 }
+
617
+
618 self->slots[index].offset = FHASHTABLE_EMPTY_SLOT_OFFSET;
+
619 self->count--;
+
620
+
621 FHASHTABLE_BACKSHIFT(self, index_mask, index);
+
622
+
623 return true;
+
624 }
+
625 return false;
+
626}
-
628
-
-
634static inline void JOIN(FHASHTABLE_NAME, clear)(FHASHTABLE_TYPE *self)
-
635{
-
636 assert(self != NULL);
-
637
-
638 for (uint32_t i = 0; i < self->capacity; i++) {
-
639 self->slots[i].offset = FHASHTABLE_EMPTY_SLOT_OFFSET;
-
640 }
-
641 self->count = 0;
-
642}
+
627
+
+
633static inline void JOIN(FHASHTABLE_NAME, clear)(FHASHTABLE_TYPE *self)
+
634{
+
635 assert(self != NULL);
+
636
+
637 for (uint32_t i = 0; i < self->capacity; i++) {
+
638 self->slots[i].offset = FHASHTABLE_EMPTY_SLOT_OFFSET;
+
639 }
+
640 self->count = 0;
+
641}
-
643
-
-
650static inline void JOIN(FHASHTABLE_NAME, copy)(FHASHTABLE_TYPE *restrict dest_ptr,
-
651 const FHASHTABLE_TYPE *restrict src_ptr)
-
652{
-
653 assert(src_ptr != NULL);
-
654 assert(dest_ptr != NULL);
-
655 assert(src_ptr->capacity <= dest_ptr->capacity);
-
656 assert(dest_ptr->count == 0);
-
657
-
658 for (uint32_t i = 0; i < src_ptr->capacity; i++) {
-
659 dest_ptr->slots[i] = src_ptr->slots[i];
-
660 }
-
661
-
662 dest_ptr->count = src_ptr->count;
-
663}
+
642
+
+
649static inline void JOIN(FHASHTABLE_NAME, copy)(FHASHTABLE_TYPE *restrict dest_ptr,
+
650 const FHASHTABLE_TYPE *restrict src_ptr)
+
651{
+
652 assert(src_ptr != NULL);
+
653 assert(dest_ptr != NULL);
+
654 assert(src_ptr->capacity <= dest_ptr->capacity);
+
655 assert(dest_ptr->count == 0);
+
656
+
657 for (uint32_t i = 0; i < src_ptr->capacity; i++) {
+
658 dest_ptr->slots[i] = src_ptr->slots[i];
+
659 }
+
660
+
661 dest_ptr->count = src_ptr->count;
+
662}
-
664
-
665// }}}
-
666
-
667// macro undefs: {{{
-
668
-
669#undef NAME
-
670#undef KEY_TYPE
-
671#undef VALUE_TYPE
-
672#undef KEY_IS_EQUAL
-
673#undef HASH_FUNCTION
-
674
-
675#undef FHASHTABLE_TYPE
-
676#undef FHASHTABLE_SLOT_TYPE
-
677#undef FHASHTABLE_INIT
-
678#undef FHASHTABLE_IS_FULL
-
679#undef FHASHTABLE_CONTAINS_KEY
-
680#undef FHASHTABLE_CALC_SIZEOF
-
681#undef FHASHTABLE_SWAP_SLOTS
-
682#undef FHASHTABLE_BACKSHIFT
-
683
-
684// }}}
-
685
-
686// vim: ft=c fdm=marker
-
#define HASH_FUNCTION(key)
Used to compute indicies of keys. This must be manually defined before including this header file.
Definition fhashtable.h:156
-
#define KEY_IS_EQUAL(a, b)
Used to compare two keys This must be manually defined before including this header file.
Definition fhashtable.h:141
-
#define FHASHTABLE_EMPTY_SLOT_OFFSET
Offset constant used to flag empty slots.
Definition fhashtable.h:62
-
#define VALUE_TYPE
The value type. This must be manually defined before including this header file.
Definition fhashtable.h:119
-
#define KEY_TYPE
The key type. This must be manually defined before including this header file.
Definition fhashtable.h:107
+
663
+
664// }}}
+
665
+
666// macro undefs: {{{
+
667
+
668#undef NAME
+
669#undef KEY_TYPE
+
670#undef VALUE_TYPE
+
671#undef KEY_IS_EQUAL
+
672#undef HASH_FUNCTION
+
673
+
674#undef FHASHTABLE_TYPE
+
675#undef FHASHTABLE_SLOT_TYPE
+
676#undef FHASHTABLE_INIT
+
677#undef FHASHTABLE_IS_FULL
+
678#undef FHASHTABLE_CONTAINS_KEY
+
679#undef FHASHTABLE_CALC_SIZEOF
+
680#undef FHASHTABLE_SWAP_SLOTS
+
681#undef FHASHTABLE_BACKSHIFT
+
682
+
683// }}}
+
684
+
685// vim: ft=c fdm=marker
+
#define HASH_FUNCTION(key)
Used to compute indicies of keys. This must be manually defined before including this header file.
Definition fhashtable.h:155
+
#define KEY_IS_EQUAL(a, b)
Used to compare two keys This must be manually defined before including this header file.
Definition fhashtable.h:140
+
#define FHASHTABLE_EMPTY_SLOT_OFFSET
Offset constant used to flag empty slots.
Definition fhashtable.h:61
+
#define VALUE_TYPE
The value type. This must be manually defined before including this header file.
Definition fhashtable.h:118
+
#define KEY_TYPE
The key type. This must be manually defined before including this header file.
Definition fhashtable.h:106
FNV-1a hashing function.
Check if a number is a power of two.
static size_t is_pow2(const size_t x)
Check if a number is a power of two.
Definition is_pow2.h:34
@@ -585,11 +585,11 @@
#define JOIN(a, b)
First expand tokens, then paste them together with a _ in between.
Definition paste.h:35
Round up to the next power of two.
static uint32_t round_up_pow2_32(uint32_t x)
Definition round_up_pow2.h:39
-
uint32_t offset
Offset from the ideal slot index.
Definition fhashtable.h:180
-
VALUE_TYPE value
The value in this slot.
Definition fhashtable.h:182
-
KEY_TYPE key
The key in this slot.
Definition fhashtable.h:181
-
uint32_t count
Number of non-empty slots.
Definition fhashtable.h:190
-
uint32_t capacity
Number of slots.
Definition fhashtable.h:191
+
uint32_t offset
Offset from the ideal slot index.
Definition fhashtable.h:179
+
VALUE_TYPE value
The value in this slot.
Definition fhashtable.h:181
+
KEY_TYPE key
The key in this slot.
Definition fhashtable.h:180
+
uint32_t count
Number of non-empty slots.
Definition fhashtable.h:189
+
uint32_t capacity
Number of slots.
Definition fhashtable.h:190
diff --git a/fpqueue_8h_source.html b/fpqueue_8h_source.html index e4fc24db..fe0076fe 100644 --- a/fpqueue_8h_source.html +++ b/fpqueue_8h_source.html @@ -402,7 +402,7 @@
435// }}}
436
437// vim: ft=c fdm=marker
-
#define VALUE_TYPE
The value type. This must be manually defined before including this header file.
Definition fhashtable.h:119
+
#define VALUE_TYPE
The value type. This must be manually defined before including this header file.
Definition fhashtable.h:118
#define fpqueue_parent(index)
Given an element index, get the index of the parent.
Definition fpqueue.h:59
#define fpqueue_left_child(index)
Given an element index, get the index of the left child.
Definition fpqueue.h:43
#define fpqueue_right_child(index)
Given an element index, get the index of the right child.
Definition fpqueue.h:51
diff --git a/fqueue_8h_source.html b/fqueue_8h_source.html index 2ffa0e7a..52db44a7 100644 --- a/fqueue_8h_source.html +++ b/fqueue_8h_source.html @@ -369,7 +369,7 @@
398// }}}
399
400// vim: ft=c fdm=marker
-
#define VALUE_TYPE
The value type. This must be manually defined before including this header file.
Definition fhashtable.h:119
+
#define VALUE_TYPE
The value type. This must be manually defined before including this header file.
Definition fhashtable.h:118
#define VALUE_TYPE
Queue value type. This must be manually defined before including this header file.
Definition fqueue.h:90
Check if a number is a power of two.
static size_t is_pow2(const size_t x)
Check if a number is a power of two.
Definition is_pow2.h:34
diff --git a/freelist_8h.html b/freelist_8h.html index a4a1bb07..ffb6203b 100644 --- a/freelist_8h.html +++ b/freelist_8h.html @@ -139,18 +139,18 @@ static void freelist_deallocate_all (struct freelist *self)   - -static void freelist_init (struct freelist *self, const size_t len, unsigned char backing_buf[len]) -  - -static void * internal_freelist_init_block (struct freelist *self, char *block_ptr, struct freelist_header *next, const size_t prev_size, const size_t block_size, const size_t remaining_size) -  + +static void freelist_init (struct freelist *self, const size_t len, unsigned char *backing_buf) +  static void * freelist_allocate (struct freelist *self, const size_t requested_size)   static void freelist_deallocate (struct freelist *self, void *ptr)   + +static void * freelist_reallocate (struct freelist *self, void *ptr, const size_t new_size) + 

Detailed Description

Best-fit free list allocator.

diff --git a/freelist_8h_source.html b/freelist_8h_source.html index 370f806f..f714f372 100644 --- a/freelist_8h_source.html +++ b/freelist_8h_source.html @@ -102,304 +102,310 @@
9 * See the file LICENSE included with this distribution for more
10 * information. */
11
-
29#pragma once
-
30
-
31#include <stdalign.h>
-
32#include <stdbool.h>
-
33#include <stddef.h>
-
34#include <stdint.h>
-
35#include <string.h>
-
36
-
37#include "align.h" // align, calc_alignment_padding, CALC_ALIGNMENT_PADDING
-
38
-
- - - -
46 char : CALC_ALIGNMENT_PADDING(alignof(max_align_t), 2 * sizeof(size_t));
-
47};
+
24#pragma once
+
25
+
26#include <stdalign.h>
+
27#include <stdbool.h>
+
28#include <stddef.h>
+
29#include <stdint.h>
+
30#include <string.h>
+
31
+
32#include "align.h" // align, calc_alignment_padding, CALC_ALIGNMENT_PADDING
+
33
+
+ + + +
41 char : CALC_ALIGNMENT_PADDING(alignof(max_align_t), 2 * sizeof(size_t));
+
42};
-
48
-
50#define NAME freetree
-
51#define KEY_TYPE struct freelist_header
-
52#define KEY_IS_STRICTLY_LESS(a, b) ((a).curr_block_size < (b).curr_block_size)
-
53#define KEY_MEMBER_IS_FIRST
-
54#define ALLOW_DUPLICATES
-
56#include "rbtree.h"
-
57
-
-
61struct freelist {
-
62 size_t buf_len;
-
63 size_t buf_used;
- -
65 struct freetree_node *rb_rootptr;
-
66 unsigned char *buf_ptr;
-
67};
+
43
+
45#define NAME freetree
+
46#define KEY_TYPE struct freelist_header
+
47#define KEY_IS_STRICTLY_LESS(a, b) ((a).curr_block_size < (b).curr_block_size)
+
48#define KEY_MEMBER_IS_FIRST
+
49#define ALLOW_DUPLICATES
+
51#include "rbtree.h"
+
52
+
+
56struct freelist {
+
57 size_t buf_len;
+
58 size_t buf_used;
+ +
60 struct freetree_node *rb_rootptr;
+
61 unsigned char *buf_ptr;
+
62};
-
68
-
70static inline struct freetree_node *internal_freetree_search_best_block(struct freetree_node **rootptr_ptr,
-
71 const size_t block_size);
-
72
-
73static inline void internal_freelist_coalescence(struct freelist *self, struct freelist_header *header);
-
75
-
76/* Create a freelist header from given params */
-
77static inline struct freelist_header freelist_header_from(const bool is_freed, const size_t prev_size,
-
78 const size_t curr_size)
-
79{
-
80 return (struct freelist_header){.__prev_block_size_w_freed_bit = prev_size + (is_freed ? 1 : 0),
-
81 .curr_block_size = curr_size};
-
82}
-
83
-
84/* Get the prev size from header */
-
85static inline size_t freelist_header_prev_size(const struct freelist_header *header)
-
86{
-
87 return (header->__prev_block_size_w_freed_bit & ~(size_t)1);
-
88}
-
89
-
90/* Check if the memory block under header is freed */
-
91static inline bool freelist_header_is_freed(const struct freelist_header *header)
-
92{
-
93 return header->__prev_block_size_w_freed_bit & 1;
-
94}
+
63
+
65static inline struct freetree_node *internal_freetree_search_best_block(struct freetree_node **rootptr_ptr,
+
66 const size_t block_size);
+
67
+
68static inline void internal_freelist_coalescence(struct freelist *self, struct freelist_header *header);
+
70
+
71/* Create a freelist header from given params */
+
72static inline struct freelist_header freelist_header_from(const bool is_freed, const size_t prev_size,
+
73 const size_t curr_size)
+
74{
+
75 return (struct freelist_header){.__prev_block_size_w_freed_bit = prev_size + (is_freed ? 1 : 0),
+
76 .curr_block_size = curr_size};
+
77}
+
78
+
79/* Get the prev size from header */
+
80static inline size_t freelist_header_prev_size(const struct freelist_header *header)
+
81{
+
82 return (header->__prev_block_size_w_freed_bit & ~(size_t)1);
+
83}
+
84
+
85/* Check if the memory block under header is freed */
+
86static inline bool freelist_header_is_freed(const struct freelist_header *header)
+
87{
+
88 return header->__prev_block_size_w_freed_bit & 1;
+
89}
+
90
+
91/* Get the pointer to previous header */
+
92static inline struct freelist_header *freelist_header_prev(struct freelist_header *header)
+
93{
+
94 const size_t prev_size = freelist_header_prev_size(header);
95
-
96/* Get the pointer to previous header */
-
97static inline struct freelist_header *freelist_header_prev(struct freelist_header *header)
-
98{
-
99 const size_t prev_size = freelist_header_prev_size(header);
-
100
-
101 return prev_size == 0 ? NULL : (struct freelist_header *)((char *)header - prev_size);
-
102}
-
103
-
104/* Get the pointer to next header */
-
105static inline struct freelist_header *freelist_header_next(struct freelist_header *header,
-
106 const struct freelist *freelist_ptr)
-
107{
-
108 const char *next = (char *)header + header->curr_block_size;
-
109 const bool is_end = next >= (char *)freelist_ptr->buf_ptr + freelist_ptr->buf_len;
-
110
-
111 return is_end ? NULL : (struct freelist_header *)next;
-
112}
-
113
-
114/* Check if the freelist header *should* be in the freetree */
-
115static inline bool freelist_header_should_be_in_freetree(const struct freelist_header *header)
-
116{
-
117 return freelist_header_is_freed(header) && header->curr_block_size >= sizeof(struct freetree_node);
-
118}
+
96 return prev_size == 0 ? NULL : (struct freelist_header *)((char *)header - prev_size);
+
97}
+
98
+
99/* Get the pointer to next header */
+
100static inline struct freelist_header *freelist_header_next(struct freelist_header *header,
+
101 const struct freelist *freelist_ptr)
+
102{
+
103 const char *next = (char *)header + header->curr_block_size;
+
104 const bool is_end = next >= (char *)freelist_ptr->buf_ptr + freelist_ptr->buf_len;
+
105
+
106 return is_end ? NULL : (struct freelist_header *)next;
+
107}
+
108
+
109/* Check if the freelist header *should* be in the freetree */
+
110static inline bool freelist_header_should_be_in_freetree(const struct freelist_header *header)
+
111{
+
112 return freelist_header_is_freed(header) && header->curr_block_size >= sizeof(struct freetree_node);
+
113}
+
114
+
115/* Deallocate everything contained in the freelist */
+
116static inline void freelist_deallocate_all(struct freelist *self)
+
117{
+
118 assert(self);
119
-
120/* Deallocate everything contained in the freelist */
-
121static inline void freelist_deallocate_all(struct freelist *self)
-
122{
-
123 assert(self);
-
124
-
125 struct freetree_node *node = (struct freetree_node *)self->buf_ptr;
-
126
-
127 freetree_node_init(node, freelist_header_from(true, 0, self->buf_len));
+
120 struct freetree_node *node = (struct freetree_node *)self->buf_ptr;
+
121
+
122 freetree_node_init(node, freelist_header_from(true, 0, self->buf_len));
+
123
+
124 self->buf_used = self->buf_len - node->key.curr_block_size;
+
125 self->rb_rootptr = node;
+
126 self->head = &node->key;
+
127}
128
-
129 self->buf_used = self->buf_len - node->key.curr_block_size;
-
130 self->rb_rootptr = node;
-
131 self->head = &node->key;
-
132}
-
133
-
134/* Initialize the freelist */
-
135static inline void freelist_init(struct freelist *self, const size_t len, unsigned char backing_buf[len])
-
136{
-
137 assert(self);
-
138 assert(backing_buf);
-
139
-
140 const uintptr_t padding = calc_alignment_padding(alignof(struct freelist_header), (uintptr_t)&backing_buf[0]);
+
129/* Initialize the freelist */
+
130static inline void freelist_init(struct freelist *self, const size_t len, unsigned char *backing_buf)
+
131{
+
132 assert(self);
+
133 assert(backing_buf);
+
134
+
135 const uintptr_t padding = calc_alignment_padding(alignof(struct freelist_header), (uintptr_t)&backing_buf[0]);
+
136
+
137 assert(len >= sizeof(struct freetree_node) + padding);
+
138
+
139 self->buf_ptr = &backing_buf[padding];
+
140 self->buf_len = len - padding;
141
-
142 assert(len >= sizeof(struct freetree_node) + padding);
-
143
-
144 self->buf_ptr = &backing_buf[padding];
-
145 self->buf_len = len - padding;
-
146
-
147 freelist_deallocate_all(self);
-
148}
-
149
-
150static inline void *internal_freelist_init_block(struct freelist *self, char *block_ptr, struct freelist_header *next,
-
151 const size_t prev_size, const size_t block_size,
-
152 const size_t remaining_size)
-
153{
-
154 if (remaining_size > 0) {
-
155 struct freelist_header *remaining_header = (struct freelist_header *)(block_ptr + block_size);
-
156 *remaining_header = freelist_header_from(true, block_size, remaining_size);
+
142 freelist_deallocate_all(self);
+
143}
+
144
+
146static inline void *internal_freelist_init_block(struct freelist *self, char *block_ptr, struct freelist_header *next,
+
147 const size_t prev_size, const size_t block_size,
+
148 const size_t remaining_size)
+
149{
+
150 if (remaining_size > 0) {
+
151 struct freelist_header *remaining_header = (struct freelist_header *)(block_ptr + block_size);
+
152 *remaining_header = freelist_header_from(true, block_size, remaining_size);
+
153
+
154 if (next) {
+
155 *next = freelist_header_from(freelist_header_is_freed(next), remaining_size, next->curr_block_size);
+
156 }
157
-
158 if (next) {
-
159 *next = freelist_header_from(freelist_header_is_freed(next), remaining_size, next->curr_block_size);
-
160 }
-
161
-
162 if (freelist_header_should_be_in_freetree(remaining_header)) {
-
163 struct freetree_node *new_node = (struct freetree_node *)remaining_header;
-
164 freetree_node_init(new_node, *remaining_header);
-
165 freetree_insert_node(&self->rb_rootptr, new_node);
-
166 }
-
167 }
-
168 struct freelist_header *curr_header = (struct freelist_header *)block_ptr;
-
169 *curr_header = freelist_header_from(false, prev_size, block_size);
-
170
-
171 return (void *)((char *)curr_header + sizeof(struct freelist_header));
-
172}
-
173
-
174/* Get the pointer to a block of the freelist.*/
-
175static inline void *freelist_allocate(struct freelist *self, const size_t requested_size)
-
176{
-
177 assert(self);
-
178 assert(requested_size != 0);
+
158 if (freelist_header_should_be_in_freetree(remaining_header)) {
+
159 struct freetree_node *new_node = (struct freetree_node *)remaining_header;
+
160 freetree_node_init(new_node, *remaining_header);
+
161 freetree_insert_node(&self->rb_rootptr, new_node);
+
162 }
+
163 }
+
164 struct freelist_header *curr_header = (struct freelist_header *)block_ptr;
+
165 *curr_header = freelist_header_from(false, prev_size, block_size);
+
166
+
167 return (void *)((char *)curr_header + sizeof(struct freelist_header));
+
168}
+
169
+
170static inline size_t internal_freelist_calc_block_size(size_t requested_size)
+
171{
+
172 size_t block_size = sizeof(struct freelist_header) + requested_size;
+
173 block_size = block_size >= sizeof(struct freetree_node) ? block_size : sizeof(struct freetree_node);
+
174 block_size = block_size + calc_alignment_padding(alignof(struct freetree_node), block_size);
+
175
+
176 return block_size;
+
177}
179
-
180 size_t block_size = sizeof(struct freelist_header) + requested_size;
-
181 block_size = block_size >= sizeof(struct freetree_node) ? block_size : sizeof(struct freetree_node);
-
182 block_size = block_size + calc_alignment_padding(alignof(struct freetree_node), block_size);
-
183
-
184 struct freetree_node *node = internal_freetree_search_best_block(&self->rb_rootptr, block_size);
+
180/* Get the pointer to a block of the freelist.*/
+
181static inline void *freelist_allocate(struct freelist *self, const size_t requested_size)
+
182{
+
183 assert(self);
+
184 assert(requested_size != 0);
185
-
186 if (node == NULL) {
-
187 return NULL;
-
188 }
+
186 const size_t block_size = internal_freelist_calc_block_size(requested_size);
+
187
+
188 struct freetree_node *node = internal_freetree_search_best_block(&self->rb_rootptr, block_size);
189
-
190 freetree_delete_node(&self->rb_rootptr, node);
-
191
-
192 self->buf_used += block_size;
+
190 if (node == NULL) {
+
191 return NULL;
+
192 }
193
-
194 return internal_freelist_init_block(self, (char *)node, freelist_header_next(&node->key, self),
-
195 freelist_header_prev_size(&node->key), block_size,
-
196 node->key.curr_block_size - block_size);
-
197}
-
198
-
199/* Deallocate a block from the freelist for further use. */
-
200static inline void freelist_deallocate(struct freelist *self, void *ptr)
-
201{
-
202 struct freelist_header *header = (struct freelist_header *)((char *)ptr - sizeof(struct freelist_header));
-
203
-
204 assert(!freelist_header_is_freed(header) && "double free detected!");
-
205
-
206 self->buf_used -= header->curr_block_size;
+
194 freetree_delete_node(&self->rb_rootptr, node);
+
195
+
196 self->buf_used += block_size;
+
197
+
198 return internal_freelist_init_block(self, (char *)node, freelist_header_next(&node->key, self),
+
199 freelist_header_prev_size(&node->key), block_size,
+
200 node->key.curr_block_size - block_size);
+
201}
+
202
+
203/* Deallocate a block from the freelist for further use. */
+
204static inline void freelist_deallocate(struct freelist *self, void *ptr)
+
205{
+
206 assert(self->buf_ptr <= (unsigned char *)ptr && (unsigned char *)ptr < &self->buf_ptr[self->buf_len]);
207
-
208 internal_freelist_coalescence(self, header);
-
209}
-
210
-
211/* Reallocate a block and grow / shrink the block. */
-
212/*
-
213static inline void *freelist_reallocate(struct freelist *self, void *ptr, const size_t new_size)
-
214{
-
215 assert(self);
-
216 assert(new_size != 0);
-
217
-
218 struct freelist_header *header = (struct freelist_header *)((char *)ptr - sizeof(struct freelist_header));
-
219 const size_t old_size = header->curr_block_size - sizeof(struct freelist_header);
-
220
-
221 assert(freelist_header_is_freed(header) && "reallocating freed block!");
-
222
-
223 if (new_size <= old_size) {
-
224 size_t block_size = sizeof(struct freelist_header) + new_size;
-
225 block_size = block_size >= sizeof(struct freetree_node) ? block_size : sizeof(struct freetree_node);
-
226 block_size = block_size + calc_alignment_padding(alignof(max_align_t), block_size);
-
227
-
228 if (block_size == header->curr_block_size) {
-
229 return ptr;
-
230 }
-
231 else {
-
232 return internal_freelist_init_block(self, (char *)header, freelist_header_next(header, self),
-
233 freelist_header_prev_size(header), block_size,
-
234 header->curr_block_size - block_size);
-
235 }
-
236 }
-
237
-
238 size_t bytes_acc = header->curr_block_size;
-
239 struct freelist_header *next = freelist_header_next(header, self);
-
240 while (bytes_acc < new_size && next != NULL && !freelist_header_is_freed(next)) {
-
241 bytes_acc += next->curr_block_size;
-
242 next = freelist_header_next(next, self);
-
243 }
-
244
-
245 if (bytes_acc >= new_size) {
-
246 return internal_freelist_init_block(self, (char *)header, freelist_header_next(header, self),
-
247 freelist_header_prev_size(header),
-
248 new_size + sizeof(struct freelist_header), header->curr_block_size -);
-
249 }
-
250
-
251 void *ptr_new = freelist_allocate(self, new_size);
-
252 if (!ptr_new) {
-
253 return NULL;
-
254 }
-
255 memcpy(ptr_new, ptr, header->curr_block_size - sizeof(struct freelist_header));
-
256 freelist_deallocate(self, ptr);
-
257 return ptr_new;
-
258}
-
259*/
+
208 struct freelist_header *header = (struct freelist_header *)((char *)ptr - sizeof(struct freelist_header));
+
209
+
210 assert(!freelist_header_is_freed(header) && "double free detected!");
+
211
+
212 self->buf_used -= header->curr_block_size;
+
213
+
214 internal_freelist_coalescence(self, header);
+
215}
+
216
+
217/* Reallocate a block and grow / shrink the block. */
+
218static inline void *freelist_reallocate(struct freelist *self, void *ptr, const size_t new_size)
+
219{
+
220 assert(self);
+
221 assert(new_size != 0);
+
222
+
223 assert(self->buf_ptr <= (unsigned char *)ptr && (unsigned char *)ptr < &self->buf_ptr[self->buf_len]);
+
224
+
225 struct freelist_header *header = (struct freelist_header *)((char *)ptr - sizeof(struct freelist_header));
+
226 const size_t prev_size = freelist_header_prev_size(header);
+
227 const size_t old_size = header->curr_block_size - sizeof(struct freelist_header);
+
228
+
229 assert(freelist_header_is_freed(header) && "reallocating freed block!");
+
230
+
231 if (new_size <= old_size) {
+
232 const size_t block_size = internal_freelist_calc_block_size(new_size);
+
233
+
234 if (block_size == header->curr_block_size) {
+
235 return ptr;
+
236 }
+
237 return internal_freelist_init_block(self, (char *)header, freelist_header_next(header, self), prev_size,
+
238 block_size, header->curr_block_size - block_size);
+
239 }
+
240
+
241 size_t bytes_acc = header->curr_block_size;
+
242 struct freelist_header *next = freelist_header_next(header, self);
+
243 while (bytes_acc < new_size && next != NULL && !freelist_header_is_freed(next)) {
+
244 bytes_acc += next->curr_block_size;
+
245 next = freelist_header_next(next, self);
+
246 }
+
247
+
248 if (bytes_acc >= new_size) {
+
249 return internal_freelist_init_block(self, (char *)header, next, prev_size, new_size, bytes_acc - new_size);
+
250 }
+
251
+
252 void *ptr_new = freelist_allocate(self, new_size);
+
253 if (!ptr_new) {
+
254 return NULL;
+
255 }
+
256
+
257 memcpy(ptr_new, ptr, old_size);
+
258
+
259 freelist_deallocate(self, ptr);
260
-
262static inline struct freetree_node *internal_freetree_search_best_block(struct freetree_node **rootptr_ptr,
-
263 const size_t block_size)
-
264{
-
265 assert(rootptr_ptr != NULL);
-
266
-
267 struct freetree_node *prev_ptr = NULL;
-
268 struct freetree_node *curr_ptr = *rootptr_ptr;
+
261 return ptr_new;
+
262}
+
263
+
265static inline struct freetree_node *internal_freetree_search_best_block(struct freetree_node **rootptr_ptr,
+
266 const size_t block_size)
+
267{
+
268 assert(rootptr_ptr != NULL);
269
-
270 while (curr_ptr != NULL) {
-
271 if (block_size > curr_ptr->key.curr_block_size) {
-
272 curr_ptr = curr_ptr->right_ptr;
-
273 }
-
274 else {
-
275 prev_ptr = curr_ptr;
-
276 curr_ptr = curr_ptr->left_ptr;
-
277 }
-
278 }
-
279 return prev_ptr;
-
280}
-
281
-
282static inline void internal_freelist_coalescence(struct freelist *self, struct freelist_header *header)
-
283{
-
284 assert(self);
-
285 assert(header);
-
286
-
287 struct freelist_header *prev = freelist_header_prev(header);
-
288 struct freelist_header *next = freelist_header_next(header, self);
+
270 struct freetree_node *prev_ptr = NULL;
+
271 struct freetree_node *curr_ptr = *rootptr_ptr;
+
272
+
273 while (curr_ptr != NULL) {
+
274 if (block_size > curr_ptr->key.curr_block_size) {
+
275 curr_ptr = curr_ptr->right_ptr;
+
276 }
+
277 else {
+
278 prev_ptr = curr_ptr;
+
279 curr_ptr = curr_ptr->left_ptr;
+
280 }
+
281 }
+
282 return prev_ptr;
+
283}
+
284
+
285static inline void internal_freelist_coalescence(struct freelist *self, struct freelist_header *header)
+
286{
+
287 assert(self);
+
288 assert(header);
289
-
290 struct freelist_header header_new = *header;
-
291 struct freelist_header *header_addr = header;
-
292 struct freelist_header *header_next = next;
-
293
-
294 if (prev && freelist_header_is_freed(prev)) {
-
295 header_new.curr_block_size += prev->curr_block_size;
-
296 header_new.__prev_block_size_w_freed_bit = prev->__prev_block_size_w_freed_bit & ~(uintptr_t)1;
-
297 header_addr = prev;
-
298
-
299 if (freelist_header_should_be_in_freetree(prev)) {
-
300 freetree_delete_node(&self->rb_rootptr, (struct freetree_node *)prev);
-
301 }
-
302 }
-
303 if (next && freelist_header_is_freed(next)) {
-
304 header_new.curr_block_size += next->curr_block_size;
-
305 header_next = freelist_header_next(next, self);
-
306
-
307 if (freelist_header_should_be_in_freetree(next)) {
-
308 freetree_delete_node(&self->rb_rootptr, (struct freetree_node *)next);
-
309 }
-
310 }
-
311 if (header_next) {
-
312 *header_next = freelist_header_from(freelist_header_is_freed(header_next), header_new.curr_block_size,
-
313 header_next->curr_block_size);
-
314 }
-
315 header_new.__prev_block_size_w_freed_bit |= 1;
-
316 assert(freelist_header_should_be_in_freetree(&header_new));
-
317
-
318 struct freetree_node *node_addr = (struct freetree_node *)header_addr;
-
319 freetree_node_init(node_addr, header_new);
-
320 freetree_insert_node(&self->rb_rootptr, node_addr);
-
321}
+
290 struct freelist_header *prev = freelist_header_prev(header);
+
291 struct freelist_header *next = freelist_header_next(header, self);
+
292
+
293 struct freelist_header header_new = *header;
+
294 struct freelist_header *header_addr = header;
+
295 struct freelist_header *header_next = next;
+
296
+
297 if (prev && freelist_header_is_freed(prev)) {
+
298 header_new.curr_block_size += prev->curr_block_size;
+
299 header_new.__prev_block_size_w_freed_bit = prev->__prev_block_size_w_freed_bit & ~(uintptr_t)1;
+
300 header_addr = prev;
+
301
+
302 if (freelist_header_should_be_in_freetree(prev)) {
+
303 freetree_delete_node(&self->rb_rootptr, (struct freetree_node *)prev);
+
304 }
+
305 }
+
306 if (next && freelist_header_is_freed(next)) {
+
307 header_new.curr_block_size += next->curr_block_size;
+
308 header_next = freelist_header_next(next, self);
+
309
+
310 if (freelist_header_should_be_in_freetree(next)) {
+
311 freetree_delete_node(&self->rb_rootptr, (struct freetree_node *)next);
+
312 }
+
313 }
+
314 if (header_next) {
+
315 *header_next = freelist_header_from(freelist_header_is_freed(header_next), header_new.curr_block_size,
+
316 header_next->curr_block_size);
+
317 }
+
318 header_new.__prev_block_size_w_freed_bit |= 1;
+
319 assert(freelist_header_should_be_in_freetree(&header_new));
+
320
+
321 struct freetree_node *node_addr = (struct freetree_node *)header_addr;
+
322 freetree_node_init(node_addr, header_new);
+
323 freetree_insert_node(&self->rb_rootptr, node_addr);
+
324}
Align memory.
static uintptr_t calc_alignment_padding(const size_t alignment, const uintptr_t ptr)
Calculate the alignment padding required to align a pointer.
Definition align.h:91
#define CALC_ALIGNMENT_PADDING(alignment, ptr)
compile time calc_alignment_padding.
Definition align.h:105
Intrusive red-black tree.
-
Freelist header definition. This lies at the front of every block.
Definition freelist.h:42
-
size_t __prev_block_size_w_freed_bit
Definition freelist.h:43
-
size_t curr_block_size
Current block size.
Definition freelist.h:45
-
Freelist struct definition.
Definition freelist.h:61
-
struct freetree_node * rb_rootptr
Header of freetree (freed memory blocks)
Definition freelist.h:65
-
unsigned char * buf_ptr
Underlying buffer.
Definition freelist.h:66
-
size_t buf_used
Number of bytes used of buffer.
Definition freelist.h:63
-
struct freelist_header * head
Header of freelist headers (all memory blocks)
Definition freelist.h:64
-
size_t buf_len
Buffer length.
Definition freelist.h:62
+
Freelist header definition. This lies at the front of every block.
Definition freelist.h:37
+
size_t __prev_block_size_w_freed_bit
Definition freelist.h:38
+
size_t curr_block_size
Current block size.
Definition freelist.h:40
+
Freelist struct definition.
Definition freelist.h:56
+
struct freetree_node * rb_rootptr
Header of freetree (freed memory blocks)
Definition freelist.h:60
+
unsigned char * buf_ptr
Underlying buffer.
Definition freelist.h:61
+
size_t buf_used
Number of bytes used of buffer.
Definition freelist.h:58
+
struct freelist_header * head
Header of freelist headers (all memory blocks)
Definition freelist.h:59
+
size_t buf_len
Buffer length.
Definition freelist.h:57
-
+ + + + + + + + + + + +
+
+ + + + + + +
+
data-structures-c +
+
+
+ + + + + + + + + + +
+
+ + +
+
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
+
+ +
+
+
+ +
pool_free_node Struct Reference
+
+
+ + + + +

+Public Attributes

+struct pool_free_nodenext_ptr
 
+
The documentation for this struct was generated from the following file: +
+ + +
+ + diff --git a/structtemp__arena__state-members.html b/structtemp__arena__state-members.html new file mode 100644 index 00000000..277485b2 --- /dev/null +++ b/structtemp__arena__state-members.html @@ -0,0 +1,103 @@ + + + + + + + +data-structures-c: Member List + + + + + + + + + + + + + +
+
+ + + + + + +
+
data-structures-c +
+
+
+ + + + + + + + + + +
+
+ + +
+
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
+
+ +
+
+
+
temp_arena_state Member List
+
+
+ +

This is the complete list of members for temp_arena_state, including all inherited members.

+ + + + +
arena_ptrtemp_arena_state
curr_offsettemp_arena_state
prev_offsettemp_arena_state
+ + +
+ + diff --git a/structtemp__arena__state.html b/structtemp__arena__state.html new file mode 100644 index 00000000..42e6fe0e --- /dev/null +++ b/structtemp__arena__state.html @@ -0,0 +1,126 @@ + + + + + + + +data-structures-c: temp_arena_state Struct Reference + + + + + + + + + + + + + +
+
+ + + + + + +
+
data-structures-c +
+
+
+ + + + + + + + + + +
+
+ + +
+
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
+
+ +
+
+
+ +
temp_arena_state Struct Reference
+
+
+ +

Tempory arena state struct. + More...

+ +

#include <arena.h>

+ + + + + + + + + + + +

+Public Attributes

+struct arenaarena_ptr
 arena pointer
 
+size_t prev_offset
 arena original prev offset
 
+size_t curr_offset
 arena original curr offset
 
+

Detailed Description

+

Tempory arena state struct.

+

The documentation for this struct was generated from the following file: +
+ + +
+ +