1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "apr_general.h"
19 #include "apr_errno.h"
21 #include "apr_strings.h"
23 /* The RMM region is made up of two doubly-linked-list of blocks; the
24 * list of used blocks, and the list of free blocks (either list may
25 * be empty). The base pointer, rmm->base, points at the beginning of
26 * the shmem region in use. Each block is addressable by an
27 * apr_rmm_off_t value, which represents the offset from the base
28 * pointer. The term "address" is used here to mean such a value; an
29 * "offset from rmm->base".
31 * The RMM region contains exactly one "rmm_hdr_block_t" structure,
32 * the "header block", which is always stored at the base pointer.
33 * The firstused field in this structure is the address of the first
34 * block in the "used blocks" list; the firstfree field is the address
35 * of the first block in the "free blocks" list.
37 * Each block is prefixed by an "rmm_block_t" structure, followed by
38 * the caller-usable region represented by the block. The next and
39 * prev fields of the structure are zero if the block is at the end or
40 * beginning of the linked-list respectively, or otherwise hold the
41 * address of the next and previous blocks in the list. ("address 0",
42 * i.e. rmm->base is *not* a valid address for a block, since the
43 * header block is always stored at that address).
45 * At creation, the RMM region is initialized to hold a single block
46 * on the free list representing the entire available shm segment
47 * (minus header block); subsequent allocation and deallocation of
48 * blocks involves splitting blocks and coalescing adjacent blocks,
49 * and switching them between the free and used lists as
52 typedef struct rmm_block_t
{
58 /* Always at our apr_rmm_off(0):
60 typedef struct rmm_hdr_block_t
{
62 apr_rmm_off_t
/* rmm_block_t */ firstused
;
63 apr_rmm_off_t
/* rmm_block_t */ firstfree
;
66 #define RMM_HDR_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_hdr_block_t)))
67 #define RMM_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_block_t)))
71 rmm_hdr_block_t
*base
;
76 static apr_rmm_off_t
find_block_by_offset(apr_rmm_t
*rmm
, apr_rmm_off_t next
,
77 apr_rmm_off_t find
, int includes
)
79 apr_rmm_off_t prev
= 0;
82 struct rmm_block_t
*blk
= (rmm_block_t
*)((char*)rmm
->base
+ next
);
89 return includes
? prev
: 0;
94 return includes
? prev
: 0;
97 static apr_rmm_off_t
find_block_of_size(apr_rmm_t
*rmm
, apr_size_t size
)
99 apr_rmm_off_t next
= rmm
->base
->firstfree
;
100 apr_rmm_off_t best
= 0;
101 apr_rmm_off_t bestsize
= 0;
104 struct rmm_block_t
*blk
= (rmm_block_t
*)((char*)rmm
->base
+ next
);
106 if (blk
->size
== size
)
109 if (blk
->size
>= size
) {
110 /* XXX: sub optimal algorithm
111 * We need the most thorough best-fit logic, since we can
112 * never grow our rmm, we are SOL when we hit the wall.
114 if (!bestsize
|| (blk
->size
< bestsize
)) {
115 bestsize
= blk
->size
;
123 if (bestsize
> RMM_BLOCK_SIZE
+ size
) {
124 struct rmm_block_t
*blk
= (rmm_block_t
*)((char*)rmm
->base
+ best
);
125 struct rmm_block_t
*new = (rmm_block_t
*)((char*)rmm
->base
+ best
+ size
);
127 new->size
= blk
->size
- size
;
128 new->next
= blk
->next
;
132 blk
->next
= best
+ size
;
135 blk
= (rmm_block_t
*)((char*)rmm
->base
+ new->next
);
136 blk
->prev
= best
+ size
;
143 static void move_block(apr_rmm_t
*rmm
, apr_rmm_off_t
this, int free
)
145 struct rmm_block_t
*blk
= (rmm_block_t
*)((char*)rmm
->base
+ this);
149 struct rmm_block_t
*prev
= (rmm_block_t
*)((char*)rmm
->base
+ blk
->prev
);
150 prev
->next
= blk
->next
;
154 rmm
->base
->firstused
= blk
->next
;
157 rmm
->base
->firstfree
= blk
->next
;
161 struct rmm_block_t
*next
= (rmm_block_t
*)((char*)rmm
->base
+ blk
->next
);
162 next
->prev
= blk
->prev
;
165 /* now find it in the other list, pushing it to the head if required */
167 blk
->prev
= find_block_by_offset(rmm
, rmm
->base
->firstfree
, this, 1);
169 blk
->next
= rmm
->base
->firstfree
;
170 rmm
->base
->firstfree
= this;
174 blk
->prev
= find_block_by_offset(rmm
, rmm
->base
->firstused
, this, 1);
176 blk
->next
= rmm
->base
->firstused
;
177 rmm
->base
->firstused
= this;
183 struct rmm_block_t
*prev
= (rmm_block_t
*)((char*)rmm
->base
+ blk
->prev
);
184 if (free
&& (blk
->prev
+ prev
->size
== this)) {
185 /* Collapse us into our predecessor */
186 prev
->size
+= blk
->size
;
191 blk
->next
= prev
->next
;
197 struct rmm_block_t
*next
= (rmm_block_t
*)((char*)rmm
->base
+ blk
->next
);
198 if (free
&& (this + blk
->size
== blk
->next
)) {
199 /* Collapse us into our successor */
200 blk
->size
+= next
->size
;
201 blk
->next
= next
->next
;
203 next
= (rmm_block_t
*)((char*)rmm
->base
+ blk
->next
);
213 APU_DECLARE(apr_status_t
) apr_rmm_init(apr_rmm_t
**rmm
, apr_anylock_t
*lock
,
214 void *base
, apr_size_t size
,
219 apr_anylock_t nulllock
;
222 nulllock
.type
= apr_anylock_none
;
223 nulllock
.lock
.pm
= NULL
;
226 if ((rv
= APR_ANYLOCK_LOCK(lock
)) != APR_SUCCESS
)
229 (*rmm
) = (apr_rmm_t
*)apr_pcalloc(p
, sizeof(apr_rmm_t
));
233 (*rmm
)->lock
= *lock
;
235 (*rmm
)->base
->abssize
= size
;
236 (*rmm
)->base
->firstused
= 0;
237 (*rmm
)->base
->firstfree
= RMM_HDR_BLOCK_SIZE
;
239 blk
= (rmm_block_t
*)((char*)base
+ (*rmm
)->base
->firstfree
);
241 blk
->size
= size
- (*rmm
)->base
->firstfree
;
245 return APR_ANYLOCK_UNLOCK(lock
);
248 APU_DECLARE(apr_status_t
) apr_rmm_destroy(apr_rmm_t
*rmm
)
253 if ((rv
= APR_ANYLOCK_LOCK(&rmm
->lock
)) != APR_SUCCESS
) {
256 /* Blast it all --- no going back :) */
257 if (rmm
->base
->firstused
) {
258 apr_rmm_off_t
this = rmm
->base
->firstused
;
260 blk
= (rmm_block_t
*)((char*)rmm
->base
+ this);
262 blk
->next
= blk
->prev
= 0;
264 rmm
->base
->firstused
= 0;
266 if (rmm
->base
->firstfree
) {
267 apr_rmm_off_t
this = rmm
->base
->firstfree
;
269 blk
= (rmm_block_t
*)((char*)rmm
->base
+ this);
271 blk
->next
= blk
->prev
= 0;
273 rmm
->base
->firstfree
= 0;
275 rmm
->base
->abssize
= 0;
278 return APR_ANYLOCK_UNLOCK(&rmm
->lock
);
281 APU_DECLARE(apr_status_t
) apr_rmm_attach(apr_rmm_t
**rmm
, apr_anylock_t
*lock
,
282 void *base
, apr_pool_t
*p
)
284 apr_anylock_t nulllock
;
287 nulllock
.type
= apr_anylock_none
;
288 nulllock
.lock
.pm
= NULL
;
292 /* sanity would be good here */
293 (*rmm
) = (apr_rmm_t
*)apr_pcalloc(p
, sizeof(apr_rmm_t
));
296 (*rmm
)->size
= (*rmm
)->base
->abssize
;
297 (*rmm
)->lock
= *lock
;
301 APU_DECLARE(apr_status_t
) apr_rmm_detach(apr_rmm_t
*rmm
)
303 /* A noop until we introduce locked/refcounts */
307 APU_DECLARE(apr_rmm_off_t
) apr_rmm_malloc(apr_rmm_t
*rmm
, apr_size_t reqsize
)
311 reqsize
= APR_ALIGN_DEFAULT(reqsize
) + RMM_BLOCK_SIZE
;
313 APR_ANYLOCK_LOCK(&rmm
->lock
);
315 this = find_block_of_size(rmm
, reqsize
);
318 move_block(rmm
, this, 0);
319 this += RMM_BLOCK_SIZE
;
322 APR_ANYLOCK_UNLOCK(&rmm
->lock
);
326 APU_DECLARE(apr_rmm_off_t
) apr_rmm_calloc(apr_rmm_t
*rmm
, apr_size_t reqsize
)
330 reqsize
= APR_ALIGN_DEFAULT(reqsize
) + RMM_BLOCK_SIZE
;
332 APR_ANYLOCK_LOCK(&rmm
->lock
);
334 this = find_block_of_size(rmm
, reqsize
);
337 move_block(rmm
, this, 0);
338 this += RMM_BLOCK_SIZE
;
339 memset((char*)rmm
->base
+ this, 0, reqsize
- RMM_BLOCK_SIZE
);
342 APR_ANYLOCK_UNLOCK(&rmm
->lock
);
346 APU_DECLARE(apr_rmm_off_t
) apr_rmm_realloc(apr_rmm_t
*rmm
, void *entity
,
351 struct rmm_block_t
*blk
;
355 return apr_rmm_malloc(rmm
, reqsize
);
358 reqsize
= APR_ALIGN_DEFAULT(reqsize
);
359 old
= apr_rmm_offset_get(rmm
, entity
);
361 if ((this = apr_rmm_malloc(rmm
, reqsize
)) == 0) {
365 blk
= (rmm_block_t
*)((char*)rmm
->base
+ old
- RMM_BLOCK_SIZE
);
368 memcpy(apr_rmm_addr_get(rmm
, this),
369 apr_rmm_addr_get(rmm
, old
), oldsize
< reqsize
? oldsize
: reqsize
);
370 apr_rmm_free(rmm
, old
);
375 APU_DECLARE(apr_status_t
) apr_rmm_free(apr_rmm_t
*rmm
, apr_rmm_off_t
this)
378 struct rmm_block_t
*blk
;
380 /* A little sanity check is always healthy, especially here.
381 * If we really cared, we could make this compile-time
383 if (this < RMM_HDR_BLOCK_SIZE
+ RMM_BLOCK_SIZE
) {
387 this -= RMM_BLOCK_SIZE
;
389 blk
= (rmm_block_t
*)((char*)rmm
->base
+ this);
391 if ((rv
= APR_ANYLOCK_LOCK(&rmm
->lock
)) != APR_SUCCESS
) {
395 struct rmm_block_t
*prev
= (rmm_block_t
*)((char*)rmm
->base
+ blk
->prev
);
396 if (prev
->next
!= this) {
397 APR_ANYLOCK_UNLOCK(&rmm
->lock
);
402 if (rmm
->base
->firstused
!= this) {
403 APR_ANYLOCK_UNLOCK(&rmm
->lock
);
409 struct rmm_block_t
*next
= (rmm_block_t
*)((char*)rmm
->base
+ blk
->next
);
410 if (next
->prev
!= this) {
411 APR_ANYLOCK_UNLOCK(&rmm
->lock
);
416 /* Ok, it remained [apparently] sane, so unlink it
418 move_block(rmm
, this, 1);
420 return APR_ANYLOCK_UNLOCK(&rmm
->lock
);
423 APU_DECLARE(void *) apr_rmm_addr_get(apr_rmm_t
*rmm
, apr_rmm_off_t entity
)
425 /* debug-sanity checking here would be good
427 return (void*)((char*)rmm
->base
+ entity
);
430 APU_DECLARE(apr_rmm_off_t
) apr_rmm_offset_get(apr_rmm_t
*rmm
, void* entity
)
432 /* debug, or always, sanity checking here would be good
433 * since the primitive is apr_rmm_off_t, I don't mind penalizing
434 * inverse conversions for safety, unless someone can prove that
435 * there is no choice in some cases.
437 return ((char*)entity
- (char*)rmm
->base
);
440 APU_DECLARE(apr_size_t
) apr_rmm_overhead_get(int n
)
442 /* overhead per block is at most APR_ALIGN_DEFAULT(1) wasted bytes
443 * for alignment overhead, plus the size of the rmm_block_t
445 return RMM_HDR_BLOCK_SIZE
+ n
* (RMM_BLOCK_SIZE
+ APR_ALIGN_DEFAULT(1));