-
Notifications
You must be signed in to change notification settings - Fork 21
/
cache.c
223 lines (187 loc) · 6.15 KB
/
cache.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "ptypes.h"
#include "cache.h"
#include "sieve.h"
#include "constants.h" /* _MPU_FILL_EXTRA_N and _MPU_INITIAL_CACHE_SIZE */
#include "threadlock.h"
/*
* These functions are used internally by the .c and .xs files.
* They handle a cached primary set of primes, as well as a segment
* area for use by all the functions that want to do segmented operation.
*
* We must be thread-safe, and we want to allow a good deal of concurrency.
* It is imperative these be used correctly. After calling the get method,
* use the sieve or segment, then release. You MUST call release before you
* return or croak. You ought to release as soon as you're done using the
* sieve or segment.
*/
static int mutex_init = 0;
MUTEX_DECL(segment);
READ_WRITE_LOCK_DECL(primary_cache);
static unsigned char* prime_cache_sieve = 0;
static UV prime_cache_size = 0;
/* Erase the primary cache and fill up to n. */
/* Note: You must have a write lock before calling this! */
static void _erase_and_fill_prime_cache(UV n) {
UV padded_n;
if (n >= (UV_MAX-_MPU_FILL_EXTRA_N))
padded_n = UV_MAX;
else
padded_n = ((n + _MPU_FILL_EXTRA_N)/30)*30;
/* If new size isn't larger or smaller, then we're done. */
if (prime_cache_size == padded_n)
return;
if (prime_cache_sieve != 0)
Safefree(prime_cache_sieve);
prime_cache_sieve = 0;
prime_cache_size = 0;
if (n > 0) {
prime_cache_sieve = sieve_erat30(padded_n);
MPUassert(prime_cache_sieve != 0, "sieve returned null");
prime_cache_size = padded_n;
}
}
/*
* Get the size and a pointer to the cached prime sieve.
* Returns the maximum sieved value available.
* Allocates and sieves if needed.
*
* The sieve holds 30 numbers per byte, using a mod-30 wheel.
*/
UV get_prime_cache(UV n, const unsigned char** sieve)
{
#ifdef USE_ITHREADS
if (sieve == 0) {
if (prime_cache_size < n) {
WRITE_LOCK_START(primary_cache);
_erase_and_fill_prime_cache(n);
WRITE_LOCK_END(primary_cache);
}
return prime_cache_size;
}
/* This could be done more efficiently if we converted a write lock to a
* reader after doing the expansion. But I think this solution is less
* error prone (though could lead to starvation in pathological cases).
*/
READ_LOCK_START(primary_cache);
while (prime_cache_size < n) {
/* The cache isn't big enough. Expand it. */
READ_LOCK_END(primary_cache);
/* thread reminder: the world can change right here */
WRITE_LOCK_START(primary_cache);
if (prime_cache_size < n)
_erase_and_fill_prime_cache(n);
WRITE_LOCK_END(primary_cache);
/* thread reminder: the world can change right here */
READ_LOCK_START(primary_cache);
}
MPUassert(prime_cache_size >= n, "prime cache is too small!");
*sieve = prime_cache_sieve;
return prime_cache_size;
#else
if (prime_cache_size < n)
_erase_and_fill_prime_cache(n);
MPUassert(prime_cache_size >= n, "prime cache is too small!");
if (sieve != 0)
*sieve = prime_cache_sieve;
return prime_cache_size;
#endif
}
#ifdef USE_ITHREADS
void release_prime_cache(const unsigned char* mem) {
(void)mem; /* We don't currently care about the pointer */
READ_LOCK_END(primary_cache);
}
#endif
/* The segment everyone is trying to share */
#define PRIMARY_SEGMENT_CHUNK_SIZE UVCONST(32*1024-16)
static unsigned char* prime_segment = 0;
static int prime_segment_is_available = 1;
/* If that's in use, malloc a new one of this size */
#define SECONDARY_SEGMENT_CHUNK_SIZE UVCONST(32*1024-16)
unsigned char* get_prime_segment(UV *size) {
unsigned char* mem;
int use_prime_segment = 0;
MPUassert(size != 0, "get_prime_segment given null size pointer");
MPUassert(mutex_init == 1, "segment mutex has not been initialized");
MUTEX_LOCK(&segment_mutex);
if (prime_segment_is_available) {
prime_segment_is_available = 0;
use_prime_segment = 1;
}
MUTEX_UNLOCK(&segment_mutex);
if (use_prime_segment) {
if (prime_segment == 0)
New(0, prime_segment, PRIMARY_SEGMENT_CHUNK_SIZE, unsigned char);
*size = PRIMARY_SEGMENT_CHUNK_SIZE;
mem = prime_segment;
} else {
New(0, mem, SECONDARY_SEGMENT_CHUNK_SIZE, unsigned char);
*size = SECONDARY_SEGMENT_CHUNK_SIZE;
}
MPUassert(mem != 0, "get_prime_segment allocation failure");
return mem;
}
void release_prime_segment(unsigned char* mem) {
MUTEX_LOCK(&segment_mutex);
if (mem == prime_segment) {
prime_segment_is_available = 1;
mem = 0;
}
MUTEX_UNLOCK(&segment_mutex);
if (mem)
Safefree(mem);
}
void prime_precalc(UV n)
{
if (!mutex_init) {
MUTEX_INIT(&segment_mutex);
MUTEX_INIT(&primary_cache_mutex);
COND_INIT(&primary_cache_turn);
mutex_init = 1;
}
/* On initialization, make a few primes (30k per 1k memory) */
if (n == 0)
n = _MPU_INITIAL_CACHE_SIZE;
get_prime_cache(n, 0); /* Sieve to n */
/* TODO: should we prealloc the segment here? */
}
void prime_memfree(void)
{
unsigned char* old_segment = 0;
/* This can happen in global destructor, and PL_dirty has porting issues */
/* MPUassert(mutex_init == 1, "cache mutexes have not been initialized"); */
if (mutex_init == 0) return;
MUTEX_LOCK(&segment_mutex);
/* Don't free if another thread is using it */
if ( (prime_segment != 0) && (prime_segment_is_available) ) {\
unsigned char* new_segment = old_segment;
old_segment = prime_segment;
prime_segment = new_segment; /* Exchanged old_segment / prime_segment */
}
MUTEX_UNLOCK(&segment_mutex);
if (old_segment) Safefree(old_segment);
WRITE_LOCK_START(primary_cache);
/* Put primary cache back to initial state */
_erase_and_fill_prime_cache(_MPU_INITIAL_CACHE_SIZE);
WRITE_LOCK_END(primary_cache);
}
void _prime_memfreeall(void)
{
/* No locks. We're shutting everything down. */
if (mutex_init) {
mutex_init = 0;
MUTEX_DESTROY(&segment_mutex);
MUTEX_DESTROY(&primary_cache_mutex);
COND_DESTROY(&primary_cache_turn);
}
if (prime_cache_sieve != 0)
Safefree(prime_cache_sieve);
prime_cache_sieve = 0;
prime_cache_size = 0;
if (prime_segment != 0)
Safefree(prime_segment);
prime_segment = 0;
}