File Coverage

cache.c
Criterion Covered Total %
statement 70 71 98.5
branch 39 50 78.0
condition n/a
subroutine n/a
pod n/a
total 109 121 90.0


line stmt bran cond sub pod time code
1             #include
2             #include
3             #include
4              
5             #include "ptypes.h"
6             #include "cache.h"
7             #include "sieve.h"
8             #include "constants.h" /* _MPU_FILL_EXTRA_N and _MPU_INITIAL_CACHE_SIZE */
9              
10             #include "threadlock.h"
11              
12             /*
13             * These functions are used internally by the .c and .xs files.
14             * They handle a cached primary set of primes, as well as a segment
15             * area for use by all the functions that want to do segmented operation.
16             *
17             * We must be thread-safe, and we want to allow a good deal of concurrency.
18             * It is imperative these be used correctly. After calling the get method,
19             * use the sieve or segment, then release. You MUST call release before you
20             * return or croak. You ought to release as soon as you're done using the
21             * sieve or segment.
22             */
23              
24             static int mutex_init = 0;
25             MUTEX_DECL(segment);
26             READ_WRITE_LOCK_DECL(primary_cache);
27              
28             static unsigned char* prime_cache_sieve = 0;
29             static UV prime_cache_size = 0;
30              
31             /* Erase the primary cache and fill up to n. */
32             /* Note: You must have a write lock before calling this! */
33 109           static void _erase_and_fill_prime_cache(UV n) {
34             UV padded_n;
35              
36 109 50         if (n >= (UV_MAX-_MPU_FILL_EXTRA_N))
37 0           padded_n = UV_MAX;
38             else
39 109           padded_n = ((n + _MPU_FILL_EXTRA_N)/30)*30;
40              
41             /* If new size isn't larger or smaller, then we're done. */
42 109 100         if (prime_cache_size == padded_n)
43 1           return;
44              
45 108 100         if (prime_cache_sieve != 0)
46 36           Safefree(prime_cache_sieve);
47 108           prime_cache_sieve = 0;
48 108           prime_cache_size = 0;
49              
50 108 50         if (n > 0) {
51 108           prime_cache_sieve = sieve_erat30(padded_n);
52 108 50         MPUassert(prime_cache_sieve != 0, "sieve returned null");
53 108           prime_cache_size = padded_n;
54             }
55             }
56              
57             /*
58             * Get the size and a pointer to the cached prime sieve.
59             * Returns the maximum sieved value available.
60             * Allocates and sieves if needed.
61             *
62             * The sieve holds 30 numbers per byte, using a mod-30 wheel.
63             */
64 258166           UV get_prime_cache(UV n, const unsigned char** sieve)
65             {
66             #ifdef USE_ITHREADS
67             if (sieve == 0) {
68             if (prime_cache_size < n) {
69             WRITE_LOCK_START(primary_cache);
70             _erase_and_fill_prime_cache(n);
71             WRITE_LOCK_END(primary_cache);
72             }
73             return prime_cache_size;
74             }
75              
76             /* This could be done more efficiently if we converted a write lock to a
77             * reader after doing the expansion. But I think this solution is less
78             * error prone (though could lead to starvation in pathological cases).
79             */
80             READ_LOCK_START(primary_cache);
81             while (prime_cache_size < n) {
82             /* The cache isn't big enough. Expand it. */
83             READ_LOCK_END(primary_cache);
84             /* thread reminder: the world can change right here */
85             WRITE_LOCK_START(primary_cache);
86             if (prime_cache_size < n)
87             _erase_and_fill_prime_cache(n);
88             WRITE_LOCK_END(primary_cache);
89             /* thread reminder: the world can change right here */
90             READ_LOCK_START(primary_cache);
91             }
92             MPUassert(prime_cache_size >= n, "prime cache is too small!");
93              
94             *sieve = prime_cache_sieve;
95             return prime_cache_size;
96             #else
97 258166 100         if (prime_cache_size < n)
98 99           _erase_and_fill_prime_cache(n);
99 258166 50         MPUassert(prime_cache_size >= n, "prime cache is too small!");
100 258166 100         if (sieve != 0)
101 123095           *sieve = prime_cache_sieve;
102 258166           return prime_cache_size;
103             #endif
104             }
105              
106             #ifdef USE_ITHREADS
107             void release_prime_cache(const unsigned char* mem) {
108             (void)mem; /* We don't currently care about the pointer */
109             READ_LOCK_END(primary_cache);
110             }
111             #endif
112              
113              
114              
115             /* The segment everyone is trying to share */
116             #define PRIMARY_SEGMENT_CHUNK_SIZE UVCONST(32*1024-16)
117             static unsigned char* prime_segment = 0;
118             static int prime_segment_is_available = 1;
119             /* If that's in use, malloc a new one of this size */
120             #define SECONDARY_SEGMENT_CHUNK_SIZE UVCONST(32*1024-16)
121              
122 4504           unsigned char* get_prime_segment(UV *size) {
123             unsigned char* mem;
124 4504           int use_prime_segment = 0;
125              
126 4504 50         MPUassert(size != 0, "get_prime_segment given null size pointer");
127 4504 50         MPUassert(mutex_init == 1, "segment mutex has not been initialized");
128              
129             MUTEX_LOCK(&segment_mutex);
130 4504 100         if (prime_segment_is_available) {
131 4445           prime_segment_is_available = 0;
132 4445           use_prime_segment = 1;
133             }
134             MUTEX_UNLOCK(&segment_mutex);
135              
136 4504 100         if (use_prime_segment) {
137 4445 100         if (prime_segment == 0)
138 16           New(0, prime_segment, PRIMARY_SEGMENT_CHUNK_SIZE, unsigned char);
139 4445           *size = PRIMARY_SEGMENT_CHUNK_SIZE;
140 4445           mem = prime_segment;
141             } else {
142 59           New(0, mem, SECONDARY_SEGMENT_CHUNK_SIZE, unsigned char);
143 59           *size = SECONDARY_SEGMENT_CHUNK_SIZE;
144             }
145 4504 50         MPUassert(mem != 0, "get_prime_segment allocation failure");
146              
147 4504           return mem;
148             }
149              
150 4504           void release_prime_segment(unsigned char* mem) {
151             MUTEX_LOCK(&segment_mutex);
152 4504 100         if (mem == prime_segment) {
153 4445           prime_segment_is_available = 1;
154 4445           mem = 0;
155             }
156             MUTEX_UNLOCK(&segment_mutex);
157 4504 100         if (mem)
158 59           Safefree(mem);
159 4504           }
160              
161              
162              
163 110           void prime_precalc(UV n)
164             {
165 110 100         if (!mutex_init) {
166             MUTEX_INIT(&segment_mutex);
167             MUTEX_INIT(&primary_cache_mutex);
168             COND_INIT(&primary_cache_turn);
169 72           mutex_init = 1;
170             }
171              
172             /* On initialization, make a few primes (30k per 1k memory) */
173 110 100         if (n == 0)
174 72           n = _MPU_INITIAL_CACHE_SIZE;
175 110           get_prime_cache(n, 0); /* Sieve to n */
176              
177             /* TODO: should we prealloc the segment here? */
178 110           }
179              
180              
181 10           void prime_memfree(void)
182             {
183 10           unsigned char* old_segment = 0;
184              
185             /* This can happen in global destructor, and PL_dirty has porting issues */
186             /* MPUassert(mutex_init == 1, "cache mutexes have not been initialized"); */
187 10 50         if (mutex_init == 0) return;
188              
189             MUTEX_LOCK(&segment_mutex);
190             /* Don't free if another thread is using it */
191 10 100         if ( (prime_segment != 0) && (prime_segment_is_available) ) {\
    50          
192 1           unsigned char* new_segment = old_segment;
193 1           old_segment = prime_segment;
194 1           prime_segment = new_segment; /* Exchanged old_segment / prime_segment */
195             }
196             MUTEX_UNLOCK(&segment_mutex);
197 10 100         if (old_segment) Safefree(old_segment);
198              
199             WRITE_LOCK_START(primary_cache);
200             /* Put primary cache back to initial state */
201 10           _erase_and_fill_prime_cache(_MPU_INITIAL_CACHE_SIZE);
202             WRITE_LOCK_END(primary_cache);
203             }
204              
205              
206 72           void _prime_memfreeall(void)
207             {
208             /* No locks. We're shutting everything down. */
209 72 50         if (mutex_init) {
210 72           mutex_init = 0;
211             MUTEX_DESTROY(&segment_mutex);
212             MUTEX_DESTROY(&primary_cache_mutex);
213             COND_DESTROY(&primary_cache_turn);
214             }
215 72 50         if (prime_cache_sieve != 0)
216 72           Safefree(prime_cache_sieve);
217 72           prime_cache_sieve = 0;
218 72           prime_cache_size = 0;
219              
220 72 100         if (prime_segment != 0)
221 15           Safefree(prime_segment);
222 72           prime_segment = 0;
223 72           }