update credits
[LibreOffice.git] / sal / rtl / alloc_arena.cxx
blobb826f1347571bc1151832c17f0836c03d3e62a4e
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
2 /*
3 * This file is part of the LibreOffice project.
5 * This Source Code Form is subject to the terms of the Mozilla Public
6 * License, v. 2.0. If a copy of the MPL was not distributed with this
7 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 * This file incorporates work covered by the following license notice:
11 * Licensed to the Apache Software Foundation (ASF) under one or more
12 * contributor license agreements. See the NOTICE file distributed
13 * with this work for additional information regarding copyright
14 * ownership. The ASF licenses this file to you under the Apache
15 * License, Version 2.0 (the "License"); you may not use this file
16 * except in compliance with the License. You may obtain a copy of
17 * the License at http://www.apache.org/licenses/LICENSE-2.0 .
20 #include "alloc_arena.hxx"
22 #include "alloc_impl.hxx"
23 #include "internal/rtllifecycle.h"
24 #include "sal/macros.h"
25 #include "osl/diagnose.h"
27 #include <cassert>
28 #include <string.h>
29 #include <stdio.h>
31 extern AllocMode alloc_mode;
33 /* ================================================================= *
35 * arena internals.
37 * ================================================================= */
39 /** g_arena_list
40 * @internal
42 struct rtl_arena_list_st
44 rtl_memory_lock_type m_lock;
45 rtl_arena_type m_arena_head;
48 static rtl_arena_list_st g_arena_list;
50 /** gp_arena_arena
51 * provided for arena_type allocations, and hash_table resizing.
53 * @internal
55 static rtl_arena_type * gp_arena_arena = 0;
57 /** gp_machdep_arena
59 * Low level virtual memory (pseudo) arena
60 * (platform dependent implementation)
62 * @internal
64 static rtl_arena_type * gp_machdep_arena = 0;
66 /** gp_default_arena
68 rtl_arena_type * gp_default_arena = 0;
70 namespace
73 void *
74 SAL_CALL rtl_machdep_alloc (
75 rtl_arena_type * pArena,
76 sal_Size * pSize
79 void
80 SAL_CALL rtl_machdep_free (
81 rtl_arena_type * pArena,
82 void * pAddr,
83 sal_Size nSize
86 sal_Size
87 rtl_machdep_pagesize();
89 /* ================================================================= */
91 /** rtl_arena_segment_constructor()
93 int
94 rtl_arena_segment_constructor (void * obj)
96 rtl_arena_segment_type * segment = (rtl_arena_segment_type*)(obj);
98 QUEUE_START_NAMED(segment, s);
99 QUEUE_START_NAMED(segment, f);
101 return (1);
104 /** rtl_arena_segment_destructor()
106 void
107 rtl_arena_segment_destructor (void * obj)
109 rtl_arena_segment_type * segment = static_cast< rtl_arena_segment_type * >(
110 obj);
111 assert(QUEUE_STARTED_NAMED(segment, s));
112 assert(QUEUE_STARTED_NAMED(segment, f));
113 (void) segment; // avoid warnings
116 /* ================================================================= */
118 /** rtl_arena_segment_populate()
120 * @precond arena->m_lock acquired.
122 bool
123 rtl_arena_segment_populate (
124 rtl_arena_type * arena
127 rtl_arena_segment_type *span;
128 sal_Size size = rtl_machdep_pagesize();
130 span = static_cast< rtl_arena_segment_type * >(
131 rtl_machdep_alloc(gp_machdep_arena, &size));
132 if (span != 0)
134 rtl_arena_segment_type *first, *last, *head;
135 sal_Size count = size / sizeof(rtl_arena_segment_type);
137 /* insert onto reserve span list */
138 QUEUE_INSERT_TAIL_NAMED(&(arena->m_segment_reserve_span_head), span, s);
139 QUEUE_START_NAMED(span, f);
140 span->m_addr = (sal_uIntPtr)(span);
141 span->m_size = size;
142 span->m_type = RTL_ARENA_SEGMENT_TYPE_SPAN;
144 /* insert onto reserve list */
145 head = &(arena->m_segment_reserve_head);
146 for (first = span + 1, last = span + count; first < last; ++first)
148 QUEUE_INSERT_TAIL_NAMED(head, first, s);
149 QUEUE_START_NAMED(first, f);
150 first->m_addr = 0;
151 first->m_size = 0;
152 first->m_type = 0;
155 return (span != 0);
158 /** rtl_arena_segment_get()
160 * @precond arena->m_lock acquired.
161 * @precond (*ppSegment == 0)
163 inline void
164 rtl_arena_segment_get (
165 rtl_arena_type * arena,
166 rtl_arena_segment_type ** ppSegment
169 rtl_arena_segment_type * head;
171 assert(*ppSegment == 0);
173 head = &(arena->m_segment_reserve_head);
174 if ((head->m_snext != head) || rtl_arena_segment_populate (arena))
176 (*ppSegment) = head->m_snext;
177 QUEUE_REMOVE_NAMED((*ppSegment), s);
181 /** rtl_arena_segment_put()
183 * @precond arena->m_lock acquired.
184 * @postcond (*ppSegment == 0)
186 inline void
187 rtl_arena_segment_put (
188 rtl_arena_type * arena,
189 rtl_arena_segment_type ** ppSegment
192 rtl_arena_segment_type * head;
194 assert(QUEUE_STARTED_NAMED((*ppSegment), s));
195 assert(QUEUE_STARTED_NAMED((*ppSegment), f));
197 (*ppSegment)->m_addr = 0;
198 (*ppSegment)->m_size = 0;
200 assert((*ppSegment)->m_type != RTL_ARENA_SEGMENT_TYPE_HEAD);
201 (*ppSegment)->m_type = 0;
203 /* keep as reserve */
204 head = &(arena->m_segment_reserve_head);
205 QUEUE_INSERT_HEAD_NAMED(head, (*ppSegment), s);
207 /* clear */
208 (*ppSegment) = 0;
211 /** rtl_arena_freelist_insert()
213 * @precond arena->m_lock acquired.
215 inline void
216 rtl_arena_freelist_insert (
217 rtl_arena_type * arena,
218 rtl_arena_segment_type * segment
221 rtl_arena_segment_type * head;
223 head = &(arena->m_freelist_head[highbit(segment->m_size) - 1]);
224 QUEUE_INSERT_TAIL_NAMED(head, segment, f);
226 arena->m_freelist_bitmap |= head->m_size;
229 /** rtl_arena_freelist_remove()
231 * @precond arena->m_lock acquired.
233 inline void
234 rtl_arena_freelist_remove (
235 rtl_arena_type * arena,
236 rtl_arena_segment_type * segment
239 if ((segment->m_fnext->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD) &&
240 (segment->m_fprev->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD) )
242 rtl_arena_segment_type * head;
244 head = segment->m_fprev;
245 assert(arena->m_freelist_bitmap & head->m_size);
246 arena->m_freelist_bitmap ^= head->m_size;
248 QUEUE_REMOVE_NAMED(segment, f);
251 /* ================================================================= */
253 /** RTL_ARENA_HASH_INDEX()
255 #define RTL_ARENA_HASH_INDEX_IMPL(a, s, q, m) \
256 ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
258 #define RTL_ARENA_HASH_INDEX(arena, addr) \
259 RTL_ARENA_HASH_INDEX_IMPL((addr), (arena)->m_hash_shift, (arena)->m_quantum_shift, ((arena)->m_hash_size - 1))
261 /** rtl_arena_hash_rescale()
263 * @precond arena->m_lock released.
265 void
266 rtl_arena_hash_rescale (
267 rtl_arena_type * arena,
268 sal_Size new_size
271 assert(new_size != 0);
273 rtl_arena_segment_type ** new_table;
274 sal_Size new_bytes;
276 new_bytes = new_size * sizeof(rtl_arena_segment_type*);
277 new_table = (rtl_arena_segment_type **)rtl_arena_alloc (gp_arena_arena, &new_bytes);
279 if (new_table != 0)
281 rtl_arena_segment_type ** old_table;
282 sal_Size old_size, i;
284 memset (new_table, 0, new_bytes);
286 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
288 old_table = arena->m_hash_table;
289 old_size = arena->m_hash_size;
291 // SAL_INFO(
292 // "sal.rtl",
293 // "rtl_arena_hash_rescale(" << arena->m_name << "): nseg: "
294 // << (arena->m_stats.m_alloc - arena->m_stats.m_free) << " (ave: "
295 // << ((arena->m_stats.m_alloc - arena->m_stats.m_free)
296 // >> arena->m_hash_shift)
297 // << "), frees: " << arena->m_stats.m_free << " [old_size: "
298 // << old_size << ", new_size: " << new_size << ']');
300 arena->m_hash_table = new_table;
301 arena->m_hash_size = new_size;
302 arena->m_hash_shift = highbit(arena->m_hash_size) - 1;
304 for (i = 0; i < old_size; i++)
306 rtl_arena_segment_type * curr = old_table[i];
307 while (curr != 0)
309 rtl_arena_segment_type * next = curr->m_fnext;
310 rtl_arena_segment_type ** head;
312 // coverity[negative_shift]
313 head = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, curr->m_addr)]);
314 curr->m_fnext = (*head);
315 (*head) = curr;
317 curr = next;
319 old_table[i] = 0;
322 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
324 if (old_table != arena->m_hash_table_0)
326 sal_Size old_bytes = old_size * sizeof(rtl_arena_segment_type*);
327 rtl_arena_free (gp_arena_arena, old_table, old_bytes);
332 /** rtl_arena_hash_insert()
333 * ...and update stats.
335 inline void
336 rtl_arena_hash_insert (
337 rtl_arena_type * arena,
338 rtl_arena_segment_type * segment
341 rtl_arena_segment_type ** ppSegment;
343 ppSegment = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, segment->m_addr)]);
345 segment->m_fnext = (*ppSegment);
346 (*ppSegment) = segment;
348 arena->m_stats.m_alloc += 1;
349 arena->m_stats.m_mem_alloc += segment->m_size;
352 /** rtl_arena_hash_remove()
353 * ...and update stats.
355 rtl_arena_segment_type *
356 rtl_arena_hash_remove (
357 rtl_arena_type * arena,
358 sal_uIntPtr addr,
359 sal_Size size
362 rtl_arena_segment_type *segment, **segpp;
363 sal_Size lookups = 0;
365 segpp = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, addr)]);
366 while ((segment = *segpp) != 0)
368 if (segment->m_addr == addr)
370 *segpp = segment->m_fnext, segment->m_fnext = segment->m_fprev = segment;
371 break;
374 /* update lookup miss stats */
375 lookups += 1;
376 segpp = &(segment->m_fnext);
379 assert(segment != 0); // bad free
380 if (segment != 0)
382 assert(segment->m_size == size);
383 (void) size; // avoid warnings
385 arena->m_stats.m_free += 1;
386 arena->m_stats.m_mem_alloc -= segment->m_size;
388 if (lookups > 1)
390 sal_Size nseg = (sal_Size)(arena->m_stats.m_alloc - arena->m_stats.m_free);
391 if (nseg > 4 * arena->m_hash_size)
393 if (!(arena->m_flags & RTL_ARENA_FLAG_RESCALE))
395 sal_Size ave = nseg >> arena->m_hash_shift;
396 assert(ave != 0);
397 sal_Size new_size = arena->m_hash_size << (highbit(ave) - 1);
399 arena->m_flags |= RTL_ARENA_FLAG_RESCALE;
400 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
401 rtl_arena_hash_rescale (arena, new_size);
402 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
403 arena->m_flags &= ~RTL_ARENA_FLAG_RESCALE;
409 return (segment);
412 /* ================================================================= */
414 /** rtl_arena_segment_alloc()
415 * allocate (and remove) segment from freelist
417 * @precond arena->m_lock acquired
418 * @precond (*ppSegment == 0)
420 bool
421 rtl_arena_segment_alloc (
422 rtl_arena_type * arena,
423 sal_Size size,
424 rtl_arena_segment_type ** ppSegment
427 int index = 0;
429 assert(*ppSegment == 0);
430 if (!RTL_MEMORY_ISP2(size))
432 int msb = highbit(size);
433 if (RTL_ARENA_FREELIST_SIZE == sal::static_int_cast< size_t >(msb))
435 /* highest possible freelist: fall back to first fit */
436 rtl_arena_segment_type *head, *segment;
438 head = &(arena->m_freelist_head[msb - 1]);
439 for (segment = head->m_fnext; segment != head; segment = segment->m_fnext)
441 if (segment->m_size >= size)
443 /* allocate first fit segment */
444 (*ppSegment) = segment;
445 break;
448 goto dequeue_and_leave;
451 /* roundup to next power of 2 */
452 size = (((sal_Size)1) << msb);
455 index = lowbit(RTL_MEMORY_P2ALIGN(arena->m_freelist_bitmap, size));
456 if (index > 0)
458 /* instant fit: allocate first free segment */
459 rtl_arena_segment_type *head;
461 head = &(arena->m_freelist_head[index - 1]);
462 (*ppSegment) = head->m_fnext;
463 assert((*ppSegment) != head);
466 dequeue_and_leave:
467 if (*ppSegment != 0)
469 /* remove from freelist */
470 rtl_arena_freelist_remove (arena, (*ppSegment));
472 return (*ppSegment != 0);
475 /** rtl_arena_segment_create()
476 * import new (span) segment from source arena
478 * @precond arena->m_lock acquired
479 * @precond (*ppSegment == 0)
482 rtl_arena_segment_create (
483 rtl_arena_type * arena,
484 sal_Size size,
485 rtl_arena_segment_type ** ppSegment
488 assert((*ppSegment) == 0);
489 if (arena->m_source_alloc != 0)
491 rtl_arena_segment_get (arena, ppSegment);
492 if (*ppSegment != 0)
494 rtl_arena_segment_type * span = 0;
495 rtl_arena_segment_get (arena, &span);
496 if (span != 0)
498 /* import new span from source arena */
499 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
501 span->m_size = size;
502 span->m_addr = (sal_uIntPtr)(arena->m_source_alloc)(
503 arena->m_source_arena, &(span->m_size));
505 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
506 if (span->m_addr != 0)
508 /* insert onto segment list, update stats */
509 span->m_type = RTL_ARENA_SEGMENT_TYPE_SPAN;
510 QUEUE_INSERT_HEAD_NAMED(&(arena->m_segment_head), span, s);
511 arena->m_stats.m_mem_total += span->m_size;
513 (*ppSegment)->m_addr = span->m_addr;
514 (*ppSegment)->m_size = span->m_size;
515 (*ppSegment)->m_type = RTL_ARENA_SEGMENT_TYPE_FREE;
516 QUEUE_INSERT_HEAD_NAMED(span, (*ppSegment), s);
518 /* report success */
519 return (1);
521 rtl_arena_segment_put (arena, &span);
523 rtl_arena_segment_put (arena, ppSegment);
526 return (0);
529 /** rtl_arena_segment_coalesce()
530 * mark as free and join with adjacent free segment(s)
532 * @precond arena->m_lock acquired
533 * @precond segment marked 'used'
535 void
536 rtl_arena_segment_coalesce (
537 rtl_arena_type * arena,
538 rtl_arena_segment_type * segment
541 rtl_arena_segment_type *next, *prev;
543 /* mark segment free */
544 assert(segment->m_type == RTL_ARENA_SEGMENT_TYPE_USED);
545 segment->m_type = RTL_ARENA_SEGMENT_TYPE_FREE;
547 /* try to merge w/ next segment */
548 next = segment->m_snext;
549 if (next->m_type == RTL_ARENA_SEGMENT_TYPE_FREE)
551 assert(segment->m_addr + segment->m_size == next->m_addr);
552 segment->m_size += next->m_size;
554 /* remove from freelist */
555 rtl_arena_freelist_remove (arena, next);
557 /* remove from segment list */
558 QUEUE_REMOVE_NAMED(next, s);
560 /* release segment descriptor */
561 rtl_arena_segment_put (arena, &next);
564 /* try to merge w/ prev segment */
565 prev = segment->m_sprev;
566 if (prev->m_type == RTL_ARENA_SEGMENT_TYPE_FREE)
568 assert(prev->m_addr + prev->m_size == segment->m_addr);
569 segment->m_addr = prev->m_addr;
570 segment->m_size += prev->m_size;
572 /* remove from freelist */
573 rtl_arena_freelist_remove (arena, prev);
575 /* remove from segment list */
576 QUEUE_REMOVE_NAMED(prev, s);
578 /* release segment descriptor */
579 rtl_arena_segment_put (arena, &prev);
583 /* ================================================================= */
585 /** rtl_arena_constructor()
587 void
588 rtl_arena_constructor (void * obj)
590 rtl_arena_type * arena = (rtl_arena_type*)(obj);
591 rtl_arena_segment_type * head;
592 size_t i;
594 memset (arena, 0, sizeof(rtl_arena_type));
596 QUEUE_START_NAMED(arena, arena_);
598 (void) RTL_MEMORY_LOCK_INIT(&(arena->m_lock));
600 head = &(arena->m_segment_reserve_span_head);
601 rtl_arena_segment_constructor (head);
602 head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
604 head = &(arena->m_segment_reserve_head);
605 rtl_arena_segment_constructor (head);
606 head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
608 head = &(arena->m_segment_head);
609 rtl_arena_segment_constructor (head);
610 head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
612 for (i = 0; i < RTL_ARENA_FREELIST_SIZE; i++)
614 head = &(arena->m_freelist_head[i]);
615 rtl_arena_segment_constructor (head);
617 head->m_size = (((sal_Size)1) << i);
618 head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
621 arena->m_hash_table = arena->m_hash_table_0;
622 arena->m_hash_size = RTL_ARENA_HASH_SIZE;
623 arena->m_hash_shift = highbit(arena->m_hash_size) - 1;
626 /** rtl_arena_destructor()
628 void
629 rtl_arena_destructor (void * obj)
631 rtl_arena_type * arena = (rtl_arena_type*)(obj);
632 rtl_arena_segment_type * head;
633 size_t i;
635 assert(QUEUE_STARTED_NAMED(arena, arena_));
637 RTL_MEMORY_LOCK_DESTROY(&(arena->m_lock));
639 head = &(arena->m_segment_reserve_span_head);
640 assert(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
641 rtl_arena_segment_destructor (head);
643 head = &(arena->m_segment_reserve_head);
644 assert(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
645 rtl_arena_segment_destructor (head);
647 head = &(arena->m_segment_head);
648 assert(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
649 rtl_arena_segment_destructor (head);
651 for (i = 0; i < RTL_ARENA_FREELIST_SIZE; i++)
653 head = &(arena->m_freelist_head[i]);
655 assert(head->m_size == (((sal_Size)1) << i));
656 assert(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
658 rtl_arena_segment_destructor (head);
661 assert(arena->m_hash_table == arena->m_hash_table_0);
662 assert(arena->m_hash_size == RTL_ARENA_HASH_SIZE);
663 assert(
664 arena->m_hash_shift ==
665 sal::static_int_cast< unsigned >(highbit(arena->m_hash_size) - 1));
668 /* ================================================================= */
670 /** rtl_arena_activate()
672 rtl_arena_type *
673 rtl_arena_activate (
674 rtl_arena_type * arena,
675 const char * name,
676 sal_Size quantum,
677 sal_Size quantum_cache_max,
678 rtl_arena_type * source_arena,
679 void * (SAL_CALL * source_alloc)(rtl_arena_type *, sal_Size *),
680 void (SAL_CALL * source_free) (rtl_arena_type *, void *, sal_Size)
683 assert(arena != 0);
684 if (arena != 0)
686 (void) snprintf (arena->m_name, sizeof(arena->m_name), "%s", name);
688 if (!RTL_MEMORY_ISP2(quantum))
690 /* roundup to next power of 2 */
691 quantum = (((sal_Size)1) << highbit(quantum));
693 quantum_cache_max = RTL_MEMORY_P2ROUNDUP(quantum_cache_max, quantum);
695 arena->m_quantum = quantum;
696 arena->m_quantum_shift = highbit(arena->m_quantum) - 1;
697 arena->m_qcache_max = quantum_cache_max;
699 arena->m_source_arena = source_arena;
700 arena->m_source_alloc = source_alloc;
701 arena->m_source_free = source_free;
703 if (arena->m_qcache_max > 0)
705 char namebuf[RTL_ARENA_NAME_LENGTH + 1];
706 int i, n = (arena->m_qcache_max >> arena->m_quantum_shift);
708 sal_Size size = n * sizeof(rtl_cache_type*);
709 arena->m_qcache_ptr = (rtl_cache_type**)rtl_arena_alloc (gp_arena_arena, &size);
710 if (!(arena->m_qcache_ptr))
712 /* out of memory */
713 return (0);
715 for (i = 1; i <= n; i++)
717 size = i * arena->m_quantum;
718 (void) snprintf (namebuf, sizeof(namebuf), "%s_%" SAL_PRIuUINTPTR, arena->m_name, size);
719 arena->m_qcache_ptr[i - 1] = rtl_cache_create(namebuf, size, 0, NULL, NULL, NULL, NULL, arena, RTL_CACHE_FLAG_QUANTUMCACHE);
723 /* insert into arena list */
724 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock));
725 QUEUE_INSERT_TAIL_NAMED(&(g_arena_list.m_arena_head), arena, arena_);
726 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock));
728 return (arena);
731 /** rtl_arena_deactivate()
733 void
734 rtl_arena_deactivate (
735 rtl_arena_type * arena
738 rtl_arena_segment_type * head, * segment;
740 /* remove from arena list */
741 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock));
742 QUEUE_REMOVE_NAMED(arena, arena_);
743 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock));
745 /* cleanup quantum cache(s) */
746 if ((arena->m_qcache_max > 0) && (arena->m_qcache_ptr != 0))
748 int i, n = (arena->m_qcache_max >> arena->m_quantum_shift);
749 for (i = 1; i <= n; i++)
751 if (arena->m_qcache_ptr[i - 1] != 0)
753 rtl_cache_destroy (arena->m_qcache_ptr[i - 1]);
754 arena->m_qcache_ptr[i - 1] = 0;
757 rtl_arena_free (
758 gp_arena_arena,
759 arena->m_qcache_ptr,
760 n * sizeof(rtl_cache_type*));
762 arena->m_qcache_ptr = 0;
765 /* check for leaked segments */
766 // SAL_INFO(
767 // "sal.rtl",
768 // "rtl_arena_deactivate(" << arena->m_name << "): allocs: "
769 // << arena->m_stats.m_alloc << ", frees: " << arena->m_stats.m_free
770 // << "; total: " << arena->m_stats.m_mem_total << ", used: "
771 // << arena->m_stats.m_mem_alloc);
772 if (arena->m_stats.m_alloc > arena->m_stats.m_free)
774 sal_Size i, n;
776 // SAL_INFO(
777 // "sal.rtl",
778 // "rtl_arena_deactivate(" << arena->m_name << "): cleaning up "
779 // << (arena->m_stats.m_alloc - arena->m_stats.m_free)
780 // << " leaked segment(s) [" << arena->m_stats.m_mem_alloc
781 // << " bytes]");
783 /* cleanup still used segment(s) */
784 for (i = 0, n = arena->m_hash_size; i < n; i++)
786 while ((segment = arena->m_hash_table[i]) != 0)
788 /* pop from hash table */
789 arena->m_hash_table[i] = segment->m_fnext, segment->m_fnext = segment->m_fprev = segment;
791 /* coalesce w/ adjacent free segment(s) */
792 rtl_arena_segment_coalesce (arena, segment);
794 /* insert onto freelist */
795 rtl_arena_freelist_insert (arena, segment);
800 /* cleanup hash table */
801 if (arena->m_hash_table != arena->m_hash_table_0)
803 rtl_arena_free (
804 gp_arena_arena,
805 arena->m_hash_table,
806 arena->m_hash_size * sizeof(rtl_arena_segment_type*));
808 arena->m_hash_table = arena->m_hash_table_0;
809 arena->m_hash_size = RTL_ARENA_HASH_SIZE;
810 arena->m_hash_shift = highbit(arena->m_hash_size) - 1;
813 /* cleanup segment list */
814 head = &(arena->m_segment_head);
815 for (segment = head->m_snext; segment != head; segment = head->m_snext)
817 if (segment->m_type == RTL_ARENA_SEGMENT_TYPE_FREE)
819 /* remove from freelist */
820 rtl_arena_freelist_remove (arena, segment);
822 else
824 /* can have only free and span segments here */
825 assert(segment->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN);
828 /* remove from segment list */
829 QUEUE_REMOVE_NAMED(segment, s);
831 /* release segment descriptor */
832 rtl_arena_segment_put (arena, &segment);
835 /* cleanup segment reserve list */
836 head = &(arena->m_segment_reserve_head);
837 for (segment = head->m_snext; segment != head; segment = head->m_snext)
839 /* remove from segment list */
840 QUEUE_REMOVE_NAMED(segment, s);
843 /* cleanup segment reserve span(s) */
844 head = &(arena->m_segment_reserve_span_head);
845 for (segment = head->m_snext; segment != head; segment = head->m_snext)
847 /* can have only span segments here */
848 assert(segment->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN);
850 /* remove from segment list */
851 QUEUE_REMOVE_NAMED(segment, s);
853 /* return span to g_machdep_arena */
854 rtl_machdep_free (gp_machdep_arena, (void*)(segment->m_addr), segment->m_size);
858 } //namespace
859 /* ================================================================= *
861 * arena implementation.
863 * ================================================================= */
865 /** rtl_arena_create()
867 rtl_arena_type *
868 SAL_CALL rtl_arena_create (
869 const char * name,
870 sal_Size quantum,
871 sal_Size quantum_cache_max,
872 rtl_arena_type * source_arena,
873 void * (SAL_CALL * source_alloc)(rtl_arena_type *, sal_Size *),
874 void (SAL_CALL * source_free) (rtl_arena_type *, void *, sal_Size),
875 SAL_UNUSED_PARAMETER int
876 ) SAL_THROW_EXTERN_C()
878 rtl_arena_type * result = 0;
879 sal_Size size = sizeof(rtl_arena_type);
881 try_alloc:
882 result = (rtl_arena_type*)rtl_arena_alloc (gp_arena_arena, &size);
883 if (result != 0)
885 rtl_arena_type * arena = result;
886 rtl_arena_constructor (arena);
888 if (!source_arena)
890 assert(gp_default_arena != 0);
891 source_arena = gp_default_arena;
894 result = rtl_arena_activate (
895 arena,
896 name,
897 quantum,
898 quantum_cache_max,
899 source_arena,
900 source_alloc,
901 source_free
904 if (result == 0)
906 rtl_arena_deactivate (arena);
907 rtl_arena_destructor (arena);
908 rtl_arena_free (gp_arena_arena, arena, size);
911 else if (gp_arena_arena == 0)
913 ensureArenaSingleton();
914 if (gp_arena_arena)
916 /* try again */
917 goto try_alloc;
920 return (result);
923 /** rtl_arena_destroy()
925 void
926 SAL_CALL rtl_arena_destroy (
927 rtl_arena_type * arena
928 ) SAL_THROW_EXTERN_C()
930 if (arena != 0)
932 rtl_arena_deactivate (arena);
933 rtl_arena_destructor (arena);
934 rtl_arena_free (gp_arena_arena, arena, sizeof(rtl_arena_type));
938 /** rtl_arena_alloc()
940 void *
941 SAL_CALL rtl_arena_alloc (
942 rtl_arena_type * arena,
943 sal_Size * pSize
944 ) SAL_THROW_EXTERN_C()
946 void * addr = 0;
948 if ((arena != 0) && (pSize != 0))
950 sal_Size size;
952 if (alloc_mode == AMode_SYSTEM)
953 return rtl_allocateMemory(*pSize);
955 size = RTL_MEMORY_ALIGN((*pSize), arena->m_quantum);
956 if (size > arena->m_qcache_max)
958 /* allocate from segment list */
959 rtl_arena_segment_type *segment = 0;
961 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
962 if (rtl_arena_segment_alloc (arena, size, &segment) ||
963 rtl_arena_segment_create(arena, size, &segment) )
965 /* shrink to fit */
966 sal_Size oversize;
968 /* mark segment used */
969 assert(segment->m_type == RTL_ARENA_SEGMENT_TYPE_FREE);
970 segment->m_type = RTL_ARENA_SEGMENT_TYPE_USED;
972 /* resize */
973 assert(segment->m_size >= size);
974 oversize = segment->m_size - size;
975 if ((oversize >= arena->m_quantum) && (oversize >= arena->m_qcache_max))
977 rtl_arena_segment_type * remainder = 0;
978 rtl_arena_segment_get (arena, &remainder);
979 if (remainder != 0)
981 segment->m_size = size;
983 remainder->m_addr = segment->m_addr + segment->m_size;
984 remainder->m_size = oversize;
985 remainder->m_type = RTL_ARENA_SEGMENT_TYPE_FREE;
986 QUEUE_INSERT_HEAD_NAMED(segment, remainder, s);
988 rtl_arena_freelist_insert (arena, remainder);
992 rtl_arena_hash_insert (arena, segment);
994 (*pSize) = segment->m_size;
995 addr = (void*)(segment->m_addr);
997 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
999 else if (size > 0)
1001 /* allocate from quantum cache(s) */
1002 int index = (size >> arena->m_quantum_shift) - 1;
1003 assert(arena->m_qcache_ptr[index] != 0);
1005 addr = rtl_cache_alloc (arena->m_qcache_ptr[index]);
1006 if (addr != 0)
1007 (*pSize) = size;
1010 return (addr);
1013 /** rtl_arena_free()
1015 void
1016 SAL_CALL rtl_arena_free (
1017 rtl_arena_type * arena,
1018 void * addr,
1019 sal_Size size
1020 ) SAL_THROW_EXTERN_C()
1022 if (arena != 0)
1024 if (alloc_mode == AMode_SYSTEM)
1026 rtl_freeMemory(addr);
1027 return;
1030 size = RTL_MEMORY_ALIGN(size, arena->m_quantum);
1031 if (size > arena->m_qcache_max)
1033 /* free to segment list */
1034 rtl_arena_segment_type * segment;
1036 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
1038 segment = rtl_arena_hash_remove (arena, (sal_uIntPtr)(addr), size);
1039 if (segment != 0)
1041 rtl_arena_segment_type *next, *prev;
1043 /* coalesce w/ adjacent free segment(s) */
1044 rtl_arena_segment_coalesce (arena, segment);
1046 /* determine (new) next and prev segment */
1047 next = segment->m_snext, prev = segment->m_sprev;
1049 /* entire span free when prev is a span, and next is either a span or a list head */
1050 if (((prev->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN)) &&
1051 ((next->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN) ||
1052 (next->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD)) )
1054 assert(
1055 prev->m_addr == segment->m_addr
1056 && prev->m_size == segment->m_size);
1058 if (arena->m_source_free)
1060 addr = (void*)(prev->m_addr);
1061 size = prev->m_size;
1063 /* remove from segment list */
1064 QUEUE_REMOVE_NAMED(segment, s);
1066 /* release segment descriptor */
1067 rtl_arena_segment_put (arena, &segment);
1069 /* remove from segment list */
1070 QUEUE_REMOVE_NAMED(prev, s);
1072 /* release (span) segment descriptor */
1073 rtl_arena_segment_put (arena, &prev);
1075 /* update stats, return span to source arena */
1076 arena->m_stats.m_mem_total -= size;
1077 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
1079 (arena->m_source_free)(arena->m_source_arena, addr, size);
1080 return;
1084 /* insert onto freelist */
1085 rtl_arena_freelist_insert (arena, segment);
1088 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
1090 else if (size > 0)
1092 /* free to quantum cache(s) */
1093 int index = (size >> arena->m_quantum_shift) - 1;
1094 assert(arena->m_qcache_ptr[index] != 0);
1096 rtl_cache_free (arena->m_qcache_ptr[index], addr);
1101 /* ================================================================= *
1103 * machdep internals.
1105 * ================================================================= */
1107 #if defined(SAL_UNX)
1108 #include <sys/mman.h>
1109 #elif defined(SAL_W32)
1110 #define MAP_FAILED 0
1111 #endif /* SAL_UNX || SAL_W32 */
1113 namespace
1116 /** rtl_machdep_alloc()
1118 void *
1119 SAL_CALL rtl_machdep_alloc (
1120 rtl_arena_type * pArena,
1121 sal_Size * pSize
1124 void * addr;
1125 sal_Size size = (*pSize);
1127 assert(pArena == gp_machdep_arena);
1129 #if defined(SOLARIS) && defined(SPARC)
1130 /* see @ mmap(2) man pages */
1131 size += (pArena->m_quantum + pArena->m_quantum); /* "red-zone" pages */
1132 if (size > (4 << 20))
1133 size = RTL_MEMORY_P2ROUNDUP(size, (4 << 20));
1134 else if (size > (512 << 10))
1135 size = RTL_MEMORY_P2ROUNDUP(size, (512 << 10));
1136 else
1137 size = RTL_MEMORY_P2ROUNDUP(size, (64 << 10));
1138 size -= (pArena->m_quantum + pArena->m_quantum); /* "red-zone" pages */
1139 #else
1140 /* default allocation granularity */
1141 if(pArena->m_quantum < (64 << 10))
1143 size = RTL_MEMORY_P2ROUNDUP(size, (64 << 10));
1145 else
1147 size = RTL_MEMORY_P2ROUNDUP(size, pArena->m_quantum);
1149 #endif
1151 #if defined(SAL_UNX)
1152 addr = mmap (NULL, (size_t)(size), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1153 #elif defined(SAL_W32)
1154 addr = VirtualAlloc (NULL, (SIZE_T)(size), MEM_COMMIT, PAGE_READWRITE);
1155 #endif /* (SAL_UNX || SAL_W32) */
1157 if (addr != MAP_FAILED)
1159 pArena->m_stats.m_alloc += 1;
1160 pArena->m_stats.m_mem_total += size;
1161 pArena->m_stats.m_mem_alloc += size;
1163 (*pSize) = size;
1164 return (addr);
1166 return (NULL);
1169 /** rtl_machdep_free()
1171 void
1172 SAL_CALL rtl_machdep_free (
1173 rtl_arena_type * pArena,
1174 void * pAddr,
1175 sal_Size nSize
1178 assert(pArena == gp_machdep_arena);
1180 pArena->m_stats.m_free += 1;
1181 pArena->m_stats.m_mem_total -= nSize;
1182 pArena->m_stats.m_mem_alloc -= nSize;
1184 #if defined(SAL_UNX)
1185 (void) munmap(pAddr, nSize);
1186 #elif defined(SAL_W32)
1187 (void) VirtualFree ((LPVOID)(pAddr), (SIZE_T)(0), MEM_RELEASE);
1188 #endif /* (SAL_UNX || SAL_W32) */
1191 sal_Size
1192 rtl_machdep_pagesize()
1194 #if defined(SAL_UNX)
1195 #if defined(FREEBSD) || defined(NETBSD) || defined(DRAGONFLY)
1196 return ((sal_Size)getpagesize());
1197 #else /* POSIX */
1198 return ((sal_Size)sysconf(_SC_PAGESIZE));
1199 #endif /* xBSD || POSIX */
1200 #elif defined(SAL_W32)
1201 SYSTEM_INFO info;
1202 GetSystemInfo (&info);
1203 return ((sal_Size)(info.dwPageSize));
1204 #endif /* (SAL_UNX || SAL_W32) */
1207 } //namespace
1209 /* ================================================================= *
1211 * arena initialization.
1213 * ================================================================= */
1215 void
1216 rtl_arena_init()
1219 /* list of arenas */
1220 RTL_MEMORY_LOCK_INIT(&(g_arena_list.m_lock));
1221 rtl_arena_constructor (&(g_arena_list.m_arena_head));
1224 /* machdep (pseudo) arena */
1225 static rtl_arena_type g_machdep_arena;
1227 assert(gp_machdep_arena == 0);
1228 rtl_arena_constructor (&g_machdep_arena);
1230 gp_machdep_arena = rtl_arena_activate (
1231 &g_machdep_arena,
1232 "rtl_machdep_arena",
1233 rtl_machdep_pagesize(),
1234 0, /* no quantum caching */
1235 0, 0, 0 /* no source */
1237 assert(gp_machdep_arena != 0);
1240 /* default arena */
1241 static rtl_arena_type g_default_arena;
1243 assert(gp_default_arena == 0);
1244 rtl_arena_constructor (&g_default_arena);
1246 gp_default_arena = rtl_arena_activate (
1247 &g_default_arena,
1248 "rtl_default_arena",
1249 rtl_machdep_pagesize(),
1250 0, /* no quantum caching */
1251 gp_machdep_arena, /* source */
1252 rtl_machdep_alloc,
1253 rtl_machdep_free
1255 assert(gp_default_arena != 0);
1258 /* arena internal arena */
1259 static rtl_arena_type g_arena_arena;
1261 assert(gp_arena_arena == 0);
1262 rtl_arena_constructor (&g_arena_arena);
1264 gp_arena_arena = rtl_arena_activate (
1265 &g_arena_arena,
1266 "rtl_arena_internal_arena",
1267 64, /* quantum */
1268 0, /* no quantum caching */
1269 gp_default_arena, /* source */
1270 rtl_arena_alloc,
1271 rtl_arena_free
1273 assert(gp_arena_arena != 0);
1275 // SAL_INFO("sal.rtl", "rtl_arena_init completed");
1278 /* ================================================================= */
1280 void
1281 rtl_arena_fini()
1283 if (gp_arena_arena != 0)
1285 rtl_arena_type * arena, * head;
1287 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock));
1288 head = &(g_arena_list.m_arena_head);
1290 for (arena = head->m_arena_next; arena != head; arena = arena->m_arena_next)
1292 // SAL_INFO(
1293 // "sal.rtl",
1294 // "rtl_arena_fini(" << arena->m_name << "): allocs: "
1295 // << arena->m_stats.m_alloc << ", frees: "
1296 // << arena->m_stats.m_free << "; total: "
1297 // << arena->m_stats.m_mem_total << ", used: "
1298 // << arena->m_stats.m_mem_alloc);
1300 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock));
1302 // SAL_INFO("sal.rtl", "rtl_arena_fini completed");
1305 /* ================================================================= */
1307 /* vim:set shiftwidth=4 softtabstop=4 expandtab: */