Make sure macro int_errorcode is last declaration
[apr-util.git] / misc / apr_rmm.c
blobed67dc25c8073ffb6108a9b6014f2ec7cebd949a
1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "apr_general.h"
18 #include "apr_rmm.h"
19 #include "apr_errno.h"
20 #include "apr_lib.h"
21 #include "apr_strings.h"
23 /* The RMM region is made up of two doubly-linked-list of blocks; the
24 * list of used blocks, and the list of free blocks (either list may
25 * be empty). The base pointer, rmm->base, points at the beginning of
26 * the shmem region in use. Each block is addressable by an
27 * apr_rmm_off_t value, which represents the offset from the base
28 * pointer. The term "address" is used here to mean such a value; an
29 * "offset from rmm->base".
31 * The RMM region contains exactly one "rmm_hdr_block_t" structure,
32 * the "header block", which is always stored at the base pointer.
33 * The firstused field in this structure is the address of the first
34 * block in the "used blocks" list; the firstfree field is the address
35 * of the first block in the "free blocks" list.
37 * Each block is prefixed by an "rmm_block_t" structure, followed by
38 * the caller-usable region represented by the block. The next and
39 * prev fields of the structure are zero if the block is at the end or
40 * beginning of the linked-list respectively, or otherwise hold the
41 * address of the next and previous blocks in the list. ("address 0",
42 * i.e. rmm->base is *not* a valid address for a block, since the
43 * header block is always stored at that address).
45 * At creation, the RMM region is initialized to hold a single block
46 * on the free list representing the entire available shm segment
47 * (minus header block); subsequent allocation and deallocation of
48 * blocks involves splitting blocks and coalescing adjacent blocks,
49 * and switching them between the free and used lists as
50 * appropriate. */
52 typedef struct rmm_block_t {
53 apr_size_t size;
54 apr_rmm_off_t prev;
55 apr_rmm_off_t next;
56 } rmm_block_t;
58 /* Always at our apr_rmm_off(0):
60 typedef struct rmm_hdr_block_t {
61 apr_size_t abssize;
62 apr_rmm_off_t /* rmm_block_t */ firstused;
63 apr_rmm_off_t /* rmm_block_t */ firstfree;
64 } rmm_hdr_block_t;
66 #define RMM_HDR_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_hdr_block_t)))
67 #define RMM_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_block_t)))
69 struct apr_rmm_t {
70 apr_pool_t *p;
71 rmm_hdr_block_t *base;
72 apr_size_t size;
73 apr_anylock_t lock;
76 static apr_rmm_off_t find_block_by_offset(apr_rmm_t *rmm, apr_rmm_off_t next,
77 apr_rmm_off_t find, int includes)
79 apr_rmm_off_t prev = 0;
81 while (next) {
82 struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
84 if (find == next)
85 return next;
87 /* Overshot? */
88 if (find < next)
89 return includes ? prev : 0;
91 prev = next;
92 next = blk->next;
94 return includes ? prev : 0;
97 static apr_rmm_off_t find_block_of_size(apr_rmm_t *rmm, apr_size_t size)
99 apr_rmm_off_t next = rmm->base->firstfree;
100 apr_rmm_off_t best = 0;
101 apr_rmm_off_t bestsize = 0;
103 while (next) {
104 struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
106 if (blk->size == size)
107 return next;
109 if (blk->size >= size) {
110 /* XXX: sub optimal algorithm
111 * We need the most thorough best-fit logic, since we can
112 * never grow our rmm, we are SOL when we hit the wall.
114 if (!bestsize || (blk->size < bestsize)) {
115 bestsize = blk->size;
116 best = next;
120 next = blk->next;
123 if (bestsize > RMM_BLOCK_SIZE + size) {
124 struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + best);
125 struct rmm_block_t *new = (rmm_block_t*)((char*)rmm->base + best + size);
127 new->size = blk->size - size;
128 new->next = blk->next;
129 new->prev = best;
131 blk->size = size;
132 blk->next = best + size;
134 if (new->next) {
135 blk = (rmm_block_t*)((char*)rmm->base + new->next);
136 blk->prev = best + size;
140 return best;
143 static void move_block(apr_rmm_t *rmm, apr_rmm_off_t this, int free)
145 struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + this);
147 /* close the gap */
148 if (blk->prev) {
149 struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
150 prev->next = blk->next;
152 else {
153 if (free) {
154 rmm->base->firstused = blk->next;
156 else {
157 rmm->base->firstfree = blk->next;
160 if (blk->next) {
161 struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
162 next->prev = blk->prev;
165 /* now find it in the other list, pushing it to the head if required */
166 if (free) {
167 blk->prev = find_block_by_offset(rmm, rmm->base->firstfree, this, 1);
168 if (!blk->prev) {
169 blk->next = rmm->base->firstfree;
170 rmm->base->firstfree = this;
173 else {
174 blk->prev = find_block_by_offset(rmm, rmm->base->firstused, this, 1);
175 if (!blk->prev) {
176 blk->next = rmm->base->firstused;
177 rmm->base->firstused = this;
181 /* and open it up */
182 if (blk->prev) {
183 struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
184 if (free && (blk->prev + prev->size == this)) {
185 /* Collapse us into our predecessor */
186 prev->size += blk->size;
187 this = blk->prev;
188 blk = prev;
190 else {
191 blk->next = prev->next;
192 prev->next = this;
196 if (blk->next) {
197 struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
198 if (free && (this + blk->size == blk->next)) {
199 /* Collapse us into our successor */
200 blk->size += next->size;
201 blk->next = next->next;
202 if (blk->next) {
203 next = (rmm_block_t*)((char*)rmm->base + blk->next);
204 next->prev = this;
207 else {
208 next->prev = this;
213 APU_DECLARE(apr_status_t) apr_rmm_init(apr_rmm_t **rmm, apr_anylock_t *lock,
214 void *base, apr_size_t size,
215 apr_pool_t *p)
217 apr_status_t rv;
218 rmm_block_t *blk;
219 apr_anylock_t nulllock;
221 if (!lock) {
222 nulllock.type = apr_anylock_none;
223 nulllock.lock.pm = NULL;
224 lock = &nulllock;
226 if ((rv = APR_ANYLOCK_LOCK(lock)) != APR_SUCCESS)
227 return rv;
229 (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
230 (*rmm)->p = p;
231 (*rmm)->base = base;
232 (*rmm)->size = size;
233 (*rmm)->lock = *lock;
235 (*rmm)->base->abssize = size;
236 (*rmm)->base->firstused = 0;
237 (*rmm)->base->firstfree = RMM_HDR_BLOCK_SIZE;
239 blk = (rmm_block_t *)((char*)base + (*rmm)->base->firstfree);
241 blk->size = size - (*rmm)->base->firstfree;
242 blk->prev = 0;
243 blk->next = 0;
245 return APR_ANYLOCK_UNLOCK(lock);
248 APU_DECLARE(apr_status_t) apr_rmm_destroy(apr_rmm_t *rmm)
250 apr_status_t rv;
251 rmm_block_t *blk;
253 if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
254 return rv;
256 /* Blast it all --- no going back :) */
257 if (rmm->base->firstused) {
258 apr_rmm_off_t this = rmm->base->firstused;
259 do {
260 blk = (rmm_block_t *)((char*)rmm->base + this);
261 this = blk->next;
262 blk->next = blk->prev = 0;
263 } while (this);
264 rmm->base->firstused = 0;
266 if (rmm->base->firstfree) {
267 apr_rmm_off_t this = rmm->base->firstfree;
268 do {
269 blk = (rmm_block_t *)((char*)rmm->base + this);
270 this = blk->next;
271 blk->next = blk->prev = 0;
272 } while (this);
273 rmm->base->firstfree = 0;
275 rmm->base->abssize = 0;
276 rmm->size = 0;
278 return APR_ANYLOCK_UNLOCK(&rmm->lock);
281 APU_DECLARE(apr_status_t) apr_rmm_attach(apr_rmm_t **rmm, apr_anylock_t *lock,
282 void *base, apr_pool_t *p)
284 apr_anylock_t nulllock;
286 if (!lock) {
287 nulllock.type = apr_anylock_none;
288 nulllock.lock.pm = NULL;
289 lock = &nulllock;
292 /* sanity would be good here */
293 (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
294 (*rmm)->p = p;
295 (*rmm)->base = base;
296 (*rmm)->size = (*rmm)->base->abssize;
297 (*rmm)->lock = *lock;
298 return APR_SUCCESS;
301 APU_DECLARE(apr_status_t) apr_rmm_detach(apr_rmm_t *rmm)
303 /* A noop until we introduce locked/refcounts */
304 return APR_SUCCESS;
307 APU_DECLARE(apr_rmm_off_t) apr_rmm_malloc(apr_rmm_t *rmm, apr_size_t reqsize)
309 apr_rmm_off_t this;
311 reqsize = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
313 APR_ANYLOCK_LOCK(&rmm->lock);
315 this = find_block_of_size(rmm, reqsize);
317 if (this) {
318 move_block(rmm, this, 0);
319 this += RMM_BLOCK_SIZE;
322 APR_ANYLOCK_UNLOCK(&rmm->lock);
323 return this;
326 APU_DECLARE(apr_rmm_off_t) apr_rmm_calloc(apr_rmm_t *rmm, apr_size_t reqsize)
328 apr_rmm_off_t this;
330 reqsize = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
332 APR_ANYLOCK_LOCK(&rmm->lock);
334 this = find_block_of_size(rmm, reqsize);
336 if (this) {
337 move_block(rmm, this, 0);
338 this += RMM_BLOCK_SIZE;
339 memset((char*)rmm->base + this, 0, reqsize - RMM_BLOCK_SIZE);
342 APR_ANYLOCK_UNLOCK(&rmm->lock);
343 return this;
346 APU_DECLARE(apr_rmm_off_t) apr_rmm_realloc(apr_rmm_t *rmm, void *entity,
347 apr_size_t reqsize)
349 apr_rmm_off_t this;
350 apr_rmm_off_t old;
351 struct rmm_block_t *blk;
352 apr_size_t oldsize;
354 if (!entity) {
355 return apr_rmm_malloc(rmm, reqsize);
358 reqsize = APR_ALIGN_DEFAULT(reqsize);
359 old = apr_rmm_offset_get(rmm, entity);
361 if ((this = apr_rmm_malloc(rmm, reqsize)) == 0) {
362 return 0;
365 blk = (rmm_block_t*)((char*)rmm->base + old - RMM_BLOCK_SIZE);
366 oldsize = blk->size;
368 memcpy(apr_rmm_addr_get(rmm, this),
369 apr_rmm_addr_get(rmm, old), oldsize < reqsize ? oldsize : reqsize);
370 apr_rmm_free(rmm, old);
372 return this;
375 APU_DECLARE(apr_status_t) apr_rmm_free(apr_rmm_t *rmm, apr_rmm_off_t this)
377 apr_status_t rv;
378 struct rmm_block_t *blk;
380 /* A little sanity check is always healthy, especially here.
381 * If we really cared, we could make this compile-time
383 if (this < RMM_HDR_BLOCK_SIZE + RMM_BLOCK_SIZE) {
384 return APR_EINVAL;
387 this -= RMM_BLOCK_SIZE;
389 blk = (rmm_block_t*)((char*)rmm->base + this);
391 if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
392 return rv;
394 if (blk->prev) {
395 struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
396 if (prev->next != this) {
397 APR_ANYLOCK_UNLOCK(&rmm->lock);
398 return APR_EINVAL;
401 else {
402 if (rmm->base->firstused != this) {
403 APR_ANYLOCK_UNLOCK(&rmm->lock);
404 return APR_EINVAL;
408 if (blk->next) {
409 struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
410 if (next->prev != this) {
411 APR_ANYLOCK_UNLOCK(&rmm->lock);
412 return APR_EINVAL;
416 /* Ok, it remained [apparently] sane, so unlink it
418 move_block(rmm, this, 1);
420 return APR_ANYLOCK_UNLOCK(&rmm->lock);
423 APU_DECLARE(void *) apr_rmm_addr_get(apr_rmm_t *rmm, apr_rmm_off_t entity)
425 /* debug-sanity checking here would be good
427 return (void*)((char*)rmm->base + entity);
430 APU_DECLARE(apr_rmm_off_t) apr_rmm_offset_get(apr_rmm_t *rmm, void* entity)
432 /* debug, or always, sanity checking here would be good
433 * since the primitive is apr_rmm_off_t, I don't mind penalizing
434 * inverse conversions for safety, unless someone can prove that
435 * there is no choice in some cases.
437 return ((char*)entity - (char*)rmm->base);
440 APU_DECLARE(apr_size_t) apr_rmm_overhead_get(int n)
442 /* overhead per block is at most APR_ALIGN_DEFAULT(1) wasted bytes
443 * for alignment overhead, plus the size of the rmm_block_t
444 * structure. */
445 return RMM_HDR_BLOCK_SIZE + n * (RMM_BLOCK_SIZE + APR_ALIGN_DEFAULT(1));