Merge branch 'source-get-id-docs' into 'master'
[glib.git] / glib / gslice.c
blobd1b1fc6397d034cf043f35dc51f6c71a3687666a
1 /* GLIB sliced memory - fast concurrent memory chunk allocator
2 * Copyright (C) 2005 Tim Janik
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 /* MT safe */
19 #include "config.h"
20 #include "glibconfig.h"
22 #if defined(HAVE_POSIX_MEMALIGN) && !defined(_XOPEN_SOURCE)
23 #define _XOPEN_SOURCE 600 /* posix_memalign() */
24 #endif
25 #include <stdlib.h> /* posix_memalign() */
26 #include <string.h>
27 #include <errno.h>
29 #ifdef G_OS_UNIX
30 #include <unistd.h> /* sysconf() */
31 #endif
32 #ifdef G_OS_WIN32
33 #include <windows.h>
34 #include <process.h>
35 #endif
37 #include <stdio.h> /* fputs */
39 #include "gslice.h"
41 #include "gmain.h"
42 #include "gmem.h" /* gslice.h */
43 #include "gstrfuncs.h"
44 #include "gutils.h"
45 #include "gtrashstack.h"
46 #include "gtestutils.h"
47 #include "gthread.h"
48 #include "glib_trace.h"
49 #include "gprintf.h"
51 #include "gvalgrind.h"
53 /**
54 * SECTION:memory_slices
55 * @title: Memory Slices
56 * @short_description: efficient way to allocate groups of equal-sized
57 * chunks of memory
59 * Memory slices provide a space-efficient and multi-processing scalable
60 * way to allocate equal-sized pieces of memory, just like the original
61 * #GMemChunks (from GLib 2.8), while avoiding their excessive
62 * memory-waste, scalability and performance problems.
64 * To achieve these goals, the slice allocator uses a sophisticated,
65 * layered design that has been inspired by Bonwick's slab allocator
66 * ([Bonwick94](http://citeseer.ist.psu.edu/bonwick94slab.html)
67 * Jeff Bonwick, The slab allocator: An object-caching kernel
68 * memory allocator. USENIX 1994, and
69 * [Bonwick01](http://citeseer.ist.psu.edu/bonwick01magazines.html)
70 * Bonwick and Jonathan Adams, Magazines and vmem: Extending the
71 * slab allocator to many cpu's and arbitrary resources. USENIX 2001)
73 * It uses posix_memalign() to optimize allocations of many equally-sized
74 * chunks, and has per-thread free lists (the so-called magazine layer)
75 * to quickly satisfy allocation requests of already known structure sizes.
76 * This is accompanied by extra caching logic to keep freed memory around
77 * for some time before returning it to the system. Memory that is unused
78 * due to alignment constraints is used for cache colorization (random
79 * distribution of chunk addresses) to improve CPU cache utilization. The
80 * caching layer of the slice allocator adapts itself to high lock contention
81 * to improve scalability.
83 * The slice allocator can allocate blocks as small as two pointers, and
84 * unlike malloc(), it does not reserve extra space per block. For large block
85 * sizes, g_slice_new() and g_slice_alloc() will automatically delegate to the
86 * system malloc() implementation. For newly written code it is recommended
87 * to use the new `g_slice` API instead of g_malloc() and
88 * friends, as long as objects are not resized during their lifetime and the
89 * object size used at allocation time is still available when freeing.
91 * Here is an example for using the slice allocator:
92 * |[<!-- language="C" -->
93 * gchar *mem[10000];
94 * gint i;
96 * // Allocate 10000 blocks.
97 * for (i = 0; i < 10000; i++)
98 * {
99 * mem[i] = g_slice_alloc (50);
101 * // Fill in the memory with some junk.
102 * for (j = 0; j < 50; j++)
103 * mem[i][j] = i * j;
106 * // Now free all of the blocks.
107 * for (i = 0; i < 10000; i++)
108 * g_slice_free1 (50, mem[i]);
109 * ]|
111 * And here is an example for using the using the slice allocator
112 * with data structures:
113 * |[<!-- language="C" -->
114 * GRealArray *array;
116 * // Allocate one block, using the g_slice_new() macro.
117 * array = g_slice_new (GRealArray);
119 * // We can now use array just like a normal pointer to a structure.
120 * array->data = NULL;
121 * array->len = 0;
122 * array->alloc = 0;
123 * array->zero_terminated = (zero_terminated ? 1 : 0);
124 * array->clear = (clear ? 1 : 0);
125 * array->elt_size = elt_size;
127 * // We can free the block, so it can be reused.
128 * g_slice_free (GRealArray, array);
129 * ]|
132 /* the GSlice allocator is split up into 4 layers, roughly modelled after the slab
133 * allocator and magazine extensions as outlined in:
134 * + [Bonwick94] Jeff Bonwick, The slab allocator: An object-caching kernel
135 * memory allocator. USENIX 1994, http://citeseer.ist.psu.edu/bonwick94slab.html
136 * + [Bonwick01] Bonwick and Jonathan Adams, Magazines and vmem: Extending the
137 * slab allocator to many cpu's and arbitrary resources.
138 * USENIX 2001, http://citeseer.ist.psu.edu/bonwick01magazines.html
139 * the layers are:
140 * - the thread magazines. for each (aligned) chunk size, a magazine (a list)
141 * of recently freed and soon to be allocated chunks is maintained per thread.
142 * this way, most alloc/free requests can be quickly satisfied from per-thread
143 * free lists which only require one g_private_get() call to retrive the
144 * thread handle.
145 * - the magazine cache. allocating and freeing chunks to/from threads only
146 * occours at magazine sizes from a global depot of magazines. the depot
147 * maintaines a 15 second working set of allocated magazines, so full
148 * magazines are not allocated and released too often.
149 * the chunk size dependent magazine sizes automatically adapt (within limits,
150 * see [3]) to lock contention to properly scale performance across a variety
151 * of SMP systems.
152 * - the slab allocator. this allocator allocates slabs (blocks of memory) close
153 * to the system page size or multiples thereof which have to be page aligned.
154 * the blocks are divided into smaller chunks which are used to satisfy
155 * allocations from the upper layers. the space provided by the reminder of
156 * the chunk size division is used for cache colorization (random distribution
157 * of chunk addresses) to improve processor cache utilization. multiple slabs
158 * with the same chunk size are kept in a partially sorted ring to allow O(1)
159 * freeing and allocation of chunks (as long as the allocation of an entirely
160 * new slab can be avoided).
161 * - the page allocator. on most modern systems, posix_memalign(3) or
162 * memalign(3) should be available, so this is used to allocate blocks with
163 * system page size based alignments and sizes or multiples thereof.
164 * if no memalign variant is provided, valloc() is used instead and
165 * block sizes are limited to the system page size (no multiples thereof).
166 * as a fallback, on system without even valloc(), a malloc(3)-based page
167 * allocator with alloc-only behaviour is used.
169 * NOTES:
170 * [1] some systems memalign(3) implementations may rely on boundary tagging for
171 * the handed out memory chunks. to avoid excessive page-wise fragmentation,
172 * we reserve 2 * sizeof (void*) per block size for the systems memalign(3),
173 * specified in NATIVE_MALLOC_PADDING.
174 * [2] using the slab allocator alone already provides for a fast and efficient
175 * allocator, it doesn't properly scale beyond single-threaded uses though.
176 * also, the slab allocator implements eager free(3)-ing, i.e. does not
177 * provide any form of caching or working set maintenance. so if used alone,
178 * it's vulnerable to trashing for sequences of balanced (alloc, free) pairs
179 * at certain thresholds.
180 * [3] magazine sizes are bound by an implementation specific minimum size and
181 * a chunk size specific maximum to limit magazine storage sizes to roughly
182 * 16KB.
183 * [4] allocating ca. 8 chunks per block/page keeps a good balance between
184 * external and internal fragmentation (<= 12.5%). [Bonwick94]
187 /* --- macros and constants --- */
188 #define LARGEALIGNMENT (256)
189 #define P2ALIGNMENT (2 * sizeof (gsize)) /* fits 2 pointers (assumed to be 2 * GLIB_SIZEOF_SIZE_T below) */
190 #define ALIGN(size, base) ((base) * (gsize) (((size) + (base) - 1) / (base)))
191 #define NATIVE_MALLOC_PADDING P2ALIGNMENT /* per-page padding left for native malloc(3) see [1] */
192 #define SLAB_INFO_SIZE P2ALIGN (sizeof (SlabInfo) + NATIVE_MALLOC_PADDING)
193 #define MAX_MAGAZINE_SIZE (256) /* see [3] and allocator_get_magazine_threshold() for this */
194 #define MIN_MAGAZINE_SIZE (4)
195 #define MAX_STAMP_COUNTER (7) /* distributes the load of gettimeofday() */
196 #define MAX_SLAB_CHUNK_SIZE(al) (((al)->max_page_size - SLAB_INFO_SIZE) / 8) /* we want at last 8 chunks per page, see [4] */
197 #define MAX_SLAB_INDEX(al) (SLAB_INDEX (al, MAX_SLAB_CHUNK_SIZE (al)) + 1)
198 #define SLAB_INDEX(al, asize) ((asize) / P2ALIGNMENT - 1) /* asize must be P2ALIGNMENT aligned */
199 #define SLAB_CHUNK_SIZE(al, ix) (((ix) + 1) * P2ALIGNMENT)
200 #define SLAB_BPAGE_SIZE(al,csz) (8 * (csz) + SLAB_INFO_SIZE)
202 /* optimized version of ALIGN (size, P2ALIGNMENT) */
203 #if GLIB_SIZEOF_SIZE_T * 2 == 8 /* P2ALIGNMENT */
204 #define P2ALIGN(size) (((size) + 0x7) & ~(gsize) 0x7)
205 #elif GLIB_SIZEOF_SIZE_T * 2 == 16 /* P2ALIGNMENT */
206 #define P2ALIGN(size) (((size) + 0xf) & ~(gsize) 0xf)
207 #else
208 #define P2ALIGN(size) ALIGN (size, P2ALIGNMENT)
209 #endif
211 /* special helpers to avoid gmessage.c dependency */
212 static void mem_error (const char *format, ...) G_GNUC_PRINTF (1,2);
213 #define mem_assert(cond) do { if (G_LIKELY (cond)) ; else mem_error ("assertion failed: %s", #cond); } while (0)
215 /* --- structures --- */
216 typedef struct _ChunkLink ChunkLink;
217 typedef struct _SlabInfo SlabInfo;
218 typedef struct _CachedMagazine CachedMagazine;
219 struct _ChunkLink {
220 ChunkLink *next;
221 ChunkLink *data;
223 struct _SlabInfo {
224 ChunkLink *chunks;
225 guint n_allocated;
226 SlabInfo *next, *prev;
228 typedef struct {
229 ChunkLink *chunks;
230 gsize count; /* approximative chunks list length */
231 } Magazine;
232 typedef struct {
233 Magazine *magazine1; /* array of MAX_SLAB_INDEX (allocator) */
234 Magazine *magazine2; /* array of MAX_SLAB_INDEX (allocator) */
235 } ThreadMemory;
236 typedef struct {
237 gboolean always_malloc;
238 gboolean bypass_magazines;
239 gboolean debug_blocks;
240 gsize working_set_msecs;
241 guint color_increment;
242 } SliceConfig;
243 typedef struct {
244 /* const after initialization */
245 gsize min_page_size, max_page_size;
246 SliceConfig config;
247 gsize max_slab_chunk_size_for_magazine_cache;
248 /* magazine cache */
249 GMutex magazine_mutex;
250 ChunkLink **magazines; /* array of MAX_SLAB_INDEX (allocator) */
251 guint *contention_counters; /* array of MAX_SLAB_INDEX (allocator) */
252 gint mutex_counter;
253 guint stamp_counter;
254 guint last_stamp;
255 /* slab allocator */
256 GMutex slab_mutex;
257 SlabInfo **slab_stack; /* array of MAX_SLAB_INDEX (allocator) */
258 guint color_accu;
259 } Allocator;
261 /* --- g-slice prototypes --- */
262 static gpointer slab_allocator_alloc_chunk (gsize chunk_size);
263 static void slab_allocator_free_chunk (gsize chunk_size,
264 gpointer mem);
265 static void private_thread_memory_cleanup (gpointer data);
266 static gpointer allocator_memalign (gsize alignment,
267 gsize memsize);
268 static void allocator_memfree (gsize memsize,
269 gpointer mem);
270 static inline void magazine_cache_update_stamp (void);
271 static inline gsize allocator_get_magazine_threshold (Allocator *allocator,
272 guint ix);
274 /* --- g-slice memory checker --- */
275 static void smc_notify_alloc (void *pointer,
276 size_t size);
277 static int smc_notify_free (void *pointer,
278 size_t size);
280 /* --- variables --- */
281 static GPrivate private_thread_memory = G_PRIVATE_INIT (private_thread_memory_cleanup);
282 static gsize sys_page_size = 0;
283 static Allocator allocator[1] = { { 0, }, };
284 static SliceConfig slice_config = {
285 FALSE, /* always_malloc */
286 FALSE, /* bypass_magazines */
287 FALSE, /* debug_blocks */
288 15 * 1000, /* working_set_msecs */
289 1, /* color increment, alt: 0x7fffffff */
291 static GMutex smc_tree_mutex; /* mutex for G_SLICE=debug-blocks */
293 /* --- auxiliary funcitons --- */
294 void
295 g_slice_set_config (GSliceConfig ckey,
296 gint64 value)
298 g_return_if_fail (sys_page_size == 0);
299 switch (ckey)
301 case G_SLICE_CONFIG_ALWAYS_MALLOC:
302 slice_config.always_malloc = value != 0;
303 break;
304 case G_SLICE_CONFIG_BYPASS_MAGAZINES:
305 slice_config.bypass_magazines = value != 0;
306 break;
307 case G_SLICE_CONFIG_WORKING_SET_MSECS:
308 slice_config.working_set_msecs = value;
309 break;
310 case G_SLICE_CONFIG_COLOR_INCREMENT:
311 slice_config.color_increment = value;
312 default: ;
316 gint64
317 g_slice_get_config (GSliceConfig ckey)
319 switch (ckey)
321 case G_SLICE_CONFIG_ALWAYS_MALLOC:
322 return slice_config.always_malloc;
323 case G_SLICE_CONFIG_BYPASS_MAGAZINES:
324 return slice_config.bypass_magazines;
325 case G_SLICE_CONFIG_WORKING_SET_MSECS:
326 return slice_config.working_set_msecs;
327 case G_SLICE_CONFIG_CHUNK_SIZES:
328 return MAX_SLAB_INDEX (allocator);
329 case G_SLICE_CONFIG_COLOR_INCREMENT:
330 return slice_config.color_increment;
331 default:
332 return 0;
336 gint64*
337 g_slice_get_config_state (GSliceConfig ckey,
338 gint64 address,
339 guint *n_values)
341 guint i = 0;
342 g_return_val_if_fail (n_values != NULL, NULL);
343 *n_values = 0;
344 switch (ckey)
346 gint64 array[64];
347 case G_SLICE_CONFIG_CONTENTION_COUNTER:
348 array[i++] = SLAB_CHUNK_SIZE (allocator, address);
349 array[i++] = allocator->contention_counters[address];
350 array[i++] = allocator_get_magazine_threshold (allocator, address);
351 *n_values = i;
352 return g_memdup (array, sizeof (array[0]) * *n_values);
353 default:
354 return NULL;
358 static void
359 slice_config_init (SliceConfig *config)
361 const gchar *val;
363 *config = slice_config;
365 val = getenv ("G_SLICE");
366 if (val != NULL)
368 gint flags;
369 const GDebugKey keys[] = {
370 { "always-malloc", 1 << 0 },
371 { "debug-blocks", 1 << 1 },
374 flags = g_parse_debug_string (val, keys, G_N_ELEMENTS (keys));
375 if (flags & (1 << 0))
376 config->always_malloc = TRUE;
377 if (flags & (1 << 1))
378 config->debug_blocks = TRUE;
380 else
382 /* G_SLICE was not specified, so check if valgrind is running and
383 * disable ourselves if it is.
385 * This way it's possible to force gslice to be enabled under
386 * valgrind just by setting G_SLICE to the empty string.
388 #ifdef ENABLE_VALGRIND
389 if (RUNNING_ON_VALGRIND)
390 config->always_malloc = TRUE;
391 #endif
395 static void
396 g_slice_init_nomessage (void)
398 /* we may not use g_error() or friends here */
399 mem_assert (sys_page_size == 0);
400 mem_assert (MIN_MAGAZINE_SIZE >= 4);
402 #ifdef G_OS_WIN32
404 SYSTEM_INFO system_info;
405 GetSystemInfo (&system_info);
406 sys_page_size = system_info.dwPageSize;
408 #else
409 sys_page_size = sysconf (_SC_PAGESIZE); /* = sysconf (_SC_PAGE_SIZE); = getpagesize(); */
410 #endif
411 mem_assert (sys_page_size >= 2 * LARGEALIGNMENT);
412 mem_assert ((sys_page_size & (sys_page_size - 1)) == 0);
413 slice_config_init (&allocator->config);
414 allocator->min_page_size = sys_page_size;
415 #if HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN
416 /* allow allocation of pages up to 8KB (with 8KB alignment).
417 * this is useful because many medium to large sized structures
418 * fit less than 8 times (see [4]) into 4KB pages.
419 * we allow very small page sizes here, to reduce wastage in
420 * threads if only small allocations are required (this does
421 * bear the risk of increasing allocation times and fragmentation
422 * though).
424 allocator->min_page_size = MAX (allocator->min_page_size, 4096);
425 allocator->max_page_size = MAX (allocator->min_page_size, 8192);
426 allocator->min_page_size = MIN (allocator->min_page_size, 128);
427 #else
428 /* we can only align to system page size */
429 allocator->max_page_size = sys_page_size;
430 #endif
431 if (allocator->config.always_malloc)
433 allocator->contention_counters = NULL;
434 allocator->magazines = NULL;
435 allocator->slab_stack = NULL;
437 else
439 allocator->contention_counters = g_new0 (guint, MAX_SLAB_INDEX (allocator));
440 allocator->magazines = g_new0 (ChunkLink*, MAX_SLAB_INDEX (allocator));
441 allocator->slab_stack = g_new0 (SlabInfo*, MAX_SLAB_INDEX (allocator));
444 allocator->mutex_counter = 0;
445 allocator->stamp_counter = MAX_STAMP_COUNTER; /* force initial update */
446 allocator->last_stamp = 0;
447 allocator->color_accu = 0;
448 magazine_cache_update_stamp();
449 /* values cached for performance reasons */
450 allocator->max_slab_chunk_size_for_magazine_cache = MAX_SLAB_CHUNK_SIZE (allocator);
451 if (allocator->config.always_malloc || allocator->config.bypass_magazines)
452 allocator->max_slab_chunk_size_for_magazine_cache = 0; /* non-optimized cases */
455 static inline guint
456 allocator_categorize (gsize aligned_chunk_size)
458 /* speed up the likely path */
459 if (G_LIKELY (aligned_chunk_size && aligned_chunk_size <= allocator->max_slab_chunk_size_for_magazine_cache))
460 return 1; /* use magazine cache */
462 if (!allocator->config.always_malloc &&
463 aligned_chunk_size &&
464 aligned_chunk_size <= MAX_SLAB_CHUNK_SIZE (allocator))
466 if (allocator->config.bypass_magazines)
467 return 2; /* use slab allocator, see [2] */
468 return 1; /* use magazine cache */
470 return 0; /* use malloc() */
473 static inline void
474 g_mutex_lock_a (GMutex *mutex,
475 guint *contention_counter)
477 gboolean contention = FALSE;
478 if (!g_mutex_trylock (mutex))
480 g_mutex_lock (mutex);
481 contention = TRUE;
483 if (contention)
485 allocator->mutex_counter++;
486 if (allocator->mutex_counter >= 1) /* quickly adapt to contention */
488 allocator->mutex_counter = 0;
489 *contention_counter = MIN (*contention_counter + 1, MAX_MAGAZINE_SIZE);
492 else /* !contention */
494 allocator->mutex_counter--;
495 if (allocator->mutex_counter < -11) /* moderately recover magazine sizes */
497 allocator->mutex_counter = 0;
498 *contention_counter = MAX (*contention_counter, 1) - 1;
503 static inline ThreadMemory*
504 thread_memory_from_self (void)
506 ThreadMemory *tmem = g_private_get (&private_thread_memory);
507 if (G_UNLIKELY (!tmem))
509 static GMutex init_mutex;
510 guint n_magazines;
512 g_mutex_lock (&init_mutex);
513 if G_UNLIKELY (sys_page_size == 0)
514 g_slice_init_nomessage ();
515 g_mutex_unlock (&init_mutex);
517 n_magazines = MAX_SLAB_INDEX (allocator);
518 tmem = g_malloc0 (sizeof (ThreadMemory) + sizeof (Magazine) * 2 * n_magazines);
519 tmem->magazine1 = (Magazine*) (tmem + 1);
520 tmem->magazine2 = &tmem->magazine1[n_magazines];
521 g_private_set (&private_thread_memory, tmem);
523 return tmem;
526 static inline ChunkLink*
527 magazine_chain_pop_head (ChunkLink **magazine_chunks)
529 /* magazine chains are linked via ChunkLink->next.
530 * each ChunkLink->data of the toplevel chain may point to a subchain,
531 * linked via ChunkLink->next. ChunkLink->data of the subchains just
532 * contains uninitialized junk.
534 ChunkLink *chunk = (*magazine_chunks)->data;
535 if (G_UNLIKELY (chunk))
537 /* allocating from freed list */
538 (*magazine_chunks)->data = chunk->next;
540 else
542 chunk = *magazine_chunks;
543 *magazine_chunks = chunk->next;
545 return chunk;
548 #if 0 /* useful for debugging */
549 static guint
550 magazine_count (ChunkLink *head)
552 guint count = 0;
553 if (!head)
554 return 0;
555 while (head)
557 ChunkLink *child = head->data;
558 count += 1;
559 for (child = head->data; child; child = child->next)
560 count += 1;
561 head = head->next;
563 return count;
565 #endif
567 static inline gsize
568 allocator_get_magazine_threshold (Allocator *allocator,
569 guint ix)
571 /* the magazine size calculated here has a lower bound of MIN_MAGAZINE_SIZE,
572 * which is required by the implementation. also, for moderately sized chunks
573 * (say >= 64 bytes), magazine sizes shouldn't be much smaller then the number
574 * of chunks available per page/2 to avoid excessive traffic in the magazine
575 * cache for small to medium sized structures.
576 * the upper bound of the magazine size is effectively provided by
577 * MAX_MAGAZINE_SIZE. for larger chunks, this number is scaled down so that
578 * the content of a single magazine doesn't exceed ca. 16KB.
580 gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
581 guint threshold = MAX (MIN_MAGAZINE_SIZE, allocator->max_page_size / MAX (5 * chunk_size, 5 * 32));
582 guint contention_counter = allocator->contention_counters[ix];
583 if (G_UNLIKELY (contention_counter)) /* single CPU bias */
585 /* adapt contention counter thresholds to chunk sizes */
586 contention_counter = contention_counter * 64 / chunk_size;
587 threshold = MAX (threshold, contention_counter);
589 return threshold;
592 /* --- magazine cache --- */
593 static inline void
594 magazine_cache_update_stamp (void)
596 if (allocator->stamp_counter >= MAX_STAMP_COUNTER)
598 GTimeVal tv;
599 g_get_current_time (&tv);
600 allocator->last_stamp = tv.tv_sec * 1000 + tv.tv_usec / 1000; /* milli seconds */
601 allocator->stamp_counter = 0;
603 else
604 allocator->stamp_counter++;
607 static inline ChunkLink*
608 magazine_chain_prepare_fields (ChunkLink *magazine_chunks)
610 ChunkLink *chunk1;
611 ChunkLink *chunk2;
612 ChunkLink *chunk3;
613 ChunkLink *chunk4;
614 /* checked upon initialization: mem_assert (MIN_MAGAZINE_SIZE >= 4); */
615 /* ensure a magazine with at least 4 unused data pointers */
616 chunk1 = magazine_chain_pop_head (&magazine_chunks);
617 chunk2 = magazine_chain_pop_head (&magazine_chunks);
618 chunk3 = magazine_chain_pop_head (&magazine_chunks);
619 chunk4 = magazine_chain_pop_head (&magazine_chunks);
620 chunk4->next = magazine_chunks;
621 chunk3->next = chunk4;
622 chunk2->next = chunk3;
623 chunk1->next = chunk2;
624 return chunk1;
627 /* access the first 3 fields of a specially prepared magazine chain */
628 #define magazine_chain_prev(mc) ((mc)->data)
629 #define magazine_chain_stamp(mc) ((mc)->next->data)
630 #define magazine_chain_uint_stamp(mc) GPOINTER_TO_UINT ((mc)->next->data)
631 #define magazine_chain_next(mc) ((mc)->next->next->data)
632 #define magazine_chain_count(mc) ((mc)->next->next->next->data)
634 static void
635 magazine_cache_trim (Allocator *allocator,
636 guint ix,
637 guint stamp)
639 /* g_mutex_lock (allocator->mutex); done by caller */
640 /* trim magazine cache from tail */
641 ChunkLink *current = magazine_chain_prev (allocator->magazines[ix]);
642 ChunkLink *trash = NULL;
643 while (ABS (stamp - magazine_chain_uint_stamp (current)) >= allocator->config.working_set_msecs)
645 /* unlink */
646 ChunkLink *prev = magazine_chain_prev (current);
647 ChunkLink *next = magazine_chain_next (current);
648 magazine_chain_next (prev) = next;
649 magazine_chain_prev (next) = prev;
650 /* clear special fields, put on trash stack */
651 magazine_chain_next (current) = NULL;
652 magazine_chain_count (current) = NULL;
653 magazine_chain_stamp (current) = NULL;
654 magazine_chain_prev (current) = trash;
655 trash = current;
656 /* fixup list head if required */
657 if (current == allocator->magazines[ix])
659 allocator->magazines[ix] = NULL;
660 break;
662 current = prev;
664 g_mutex_unlock (&allocator->magazine_mutex);
665 /* free trash */
666 if (trash)
668 const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
669 g_mutex_lock (&allocator->slab_mutex);
670 while (trash)
672 current = trash;
673 trash = magazine_chain_prev (current);
674 magazine_chain_prev (current) = NULL; /* clear special field */
675 while (current)
677 ChunkLink *chunk = magazine_chain_pop_head (&current);
678 slab_allocator_free_chunk (chunk_size, chunk);
681 g_mutex_unlock (&allocator->slab_mutex);
685 static void
686 magazine_cache_push_magazine (guint ix,
687 ChunkLink *magazine_chunks,
688 gsize count) /* must be >= MIN_MAGAZINE_SIZE */
690 ChunkLink *current = magazine_chain_prepare_fields (magazine_chunks);
691 ChunkLink *next, *prev;
692 g_mutex_lock (&allocator->magazine_mutex);
693 /* add magazine at head */
694 next = allocator->magazines[ix];
695 if (next)
696 prev = magazine_chain_prev (next);
697 else
698 next = prev = current;
699 magazine_chain_next (prev) = current;
700 magazine_chain_prev (next) = current;
701 magazine_chain_prev (current) = prev;
702 magazine_chain_next (current) = next;
703 magazine_chain_count (current) = (gpointer) count;
704 /* stamp magazine */
705 magazine_cache_update_stamp();
706 magazine_chain_stamp (current) = GUINT_TO_POINTER (allocator->last_stamp);
707 allocator->magazines[ix] = current;
708 /* free old magazines beyond a certain threshold */
709 magazine_cache_trim (allocator, ix, allocator->last_stamp);
710 /* g_mutex_unlock (allocator->mutex); was done by magazine_cache_trim() */
713 static ChunkLink*
714 magazine_cache_pop_magazine (guint ix,
715 gsize *countp)
717 g_mutex_lock_a (&allocator->magazine_mutex, &allocator->contention_counters[ix]);
718 if (!allocator->magazines[ix])
720 guint magazine_threshold = allocator_get_magazine_threshold (allocator, ix);
721 gsize i, chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
722 ChunkLink *chunk, *head;
723 g_mutex_unlock (&allocator->magazine_mutex);
724 g_mutex_lock (&allocator->slab_mutex);
725 head = slab_allocator_alloc_chunk (chunk_size);
726 head->data = NULL;
727 chunk = head;
728 for (i = 1; i < magazine_threshold; i++)
730 chunk->next = slab_allocator_alloc_chunk (chunk_size);
731 chunk = chunk->next;
732 chunk->data = NULL;
734 chunk->next = NULL;
735 g_mutex_unlock (&allocator->slab_mutex);
736 *countp = i;
737 return head;
739 else
741 ChunkLink *current = allocator->magazines[ix];
742 ChunkLink *prev = magazine_chain_prev (current);
743 ChunkLink *next = magazine_chain_next (current);
744 /* unlink */
745 magazine_chain_next (prev) = next;
746 magazine_chain_prev (next) = prev;
747 allocator->magazines[ix] = next == current ? NULL : next;
748 g_mutex_unlock (&allocator->magazine_mutex);
749 /* clear special fields and hand out */
750 *countp = (gsize) magazine_chain_count (current);
751 magazine_chain_prev (current) = NULL;
752 magazine_chain_next (current) = NULL;
753 magazine_chain_count (current) = NULL;
754 magazine_chain_stamp (current) = NULL;
755 return current;
759 /* --- thread magazines --- */
760 static void
761 private_thread_memory_cleanup (gpointer data)
763 ThreadMemory *tmem = data;
764 const guint n_magazines = MAX_SLAB_INDEX (allocator);
765 guint ix;
766 for (ix = 0; ix < n_magazines; ix++)
768 Magazine *mags[2];
769 guint j;
770 mags[0] = &tmem->magazine1[ix];
771 mags[1] = &tmem->magazine2[ix];
772 for (j = 0; j < 2; j++)
774 Magazine *mag = mags[j];
775 if (mag->count >= MIN_MAGAZINE_SIZE)
776 magazine_cache_push_magazine (ix, mag->chunks, mag->count);
777 else
779 const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
780 g_mutex_lock (&allocator->slab_mutex);
781 while (mag->chunks)
783 ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
784 slab_allocator_free_chunk (chunk_size, chunk);
786 g_mutex_unlock (&allocator->slab_mutex);
790 g_free (tmem);
793 static void
794 thread_memory_magazine1_reload (ThreadMemory *tmem,
795 guint ix)
797 Magazine *mag = &tmem->magazine1[ix];
798 mem_assert (mag->chunks == NULL); /* ensure that we may reset mag->count */
799 mag->count = 0;
800 mag->chunks = magazine_cache_pop_magazine (ix, &mag->count);
803 static void
804 thread_memory_magazine2_unload (ThreadMemory *tmem,
805 guint ix)
807 Magazine *mag = &tmem->magazine2[ix];
808 magazine_cache_push_magazine (ix, mag->chunks, mag->count);
809 mag->chunks = NULL;
810 mag->count = 0;
813 static inline void
814 thread_memory_swap_magazines (ThreadMemory *tmem,
815 guint ix)
817 Magazine xmag = tmem->magazine1[ix];
818 tmem->magazine1[ix] = tmem->magazine2[ix];
819 tmem->magazine2[ix] = xmag;
822 static inline gboolean
823 thread_memory_magazine1_is_empty (ThreadMemory *tmem,
824 guint ix)
826 return tmem->magazine1[ix].chunks == NULL;
829 static inline gboolean
830 thread_memory_magazine2_is_full (ThreadMemory *tmem,
831 guint ix)
833 return tmem->magazine2[ix].count >= allocator_get_magazine_threshold (allocator, ix);
836 static inline gpointer
837 thread_memory_magazine1_alloc (ThreadMemory *tmem,
838 guint ix)
840 Magazine *mag = &tmem->magazine1[ix];
841 ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
842 if (G_LIKELY (mag->count > 0))
843 mag->count--;
844 return chunk;
847 static inline void
848 thread_memory_magazine2_free (ThreadMemory *tmem,
849 guint ix,
850 gpointer mem)
852 Magazine *mag = &tmem->magazine2[ix];
853 ChunkLink *chunk = mem;
854 chunk->data = NULL;
855 chunk->next = mag->chunks;
856 mag->chunks = chunk;
857 mag->count++;
860 /* --- API functions --- */
863 * g_slice_new:
864 * @type: the type to allocate, typically a structure name
866 * A convenience macro to allocate a block of memory from the
867 * slice allocator.
869 * It calls g_slice_alloc() with `sizeof (@type)` and casts the
870 * returned pointer to a pointer of the given type, avoiding a type
871 * cast in the source code. Note that the underlying slice allocation
872 * mechanism can be changed with the [`G_SLICE=always-malloc`][G_SLICE]
873 * environment variable.
875 * This can never return %NULL as the minimum allocation size from
876 * `sizeof (@type)` is 1 byte.
878 * Returns: (not nullable): a pointer to the allocated block, cast to a pointer
879 * to @type
881 * Since: 2.10
885 * g_slice_new0:
886 * @type: the type to allocate, typically a structure name
888 * A convenience macro to allocate a block of memory from the
889 * slice allocator and set the memory to 0.
891 * It calls g_slice_alloc0() with `sizeof (@type)`
892 * and casts the returned pointer to a pointer of the given type,
893 * avoiding a type cast in the source code.
894 * Note that the underlying slice allocation mechanism can
895 * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
896 * environment variable.
898 * This can never return %NULL as the minimum allocation size from
899 * `sizeof (@type)` is 1 byte.
901 * Returns: (not nullable): a pointer to the allocated block, cast to a pointer
902 * to @type
904 * Since: 2.10
908 * g_slice_dup:
909 * @type: the type to duplicate, typically a structure name
910 * @mem: (not nullable): the memory to copy into the allocated block
912 * A convenience macro to duplicate a block of memory using
913 * the slice allocator.
915 * It calls g_slice_copy() with `sizeof (@type)`
916 * and casts the returned pointer to a pointer of the given type,
917 * avoiding a type cast in the source code.
918 * Note that the underlying slice allocation mechanism can
919 * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
920 * environment variable.
922 * This can never return %NULL.
924 * Returns: (not nullable): a pointer to the allocated block, cast to a pointer
925 * to @type
927 * Since: 2.14
931 * g_slice_free:
932 * @type: the type of the block to free, typically a structure name
933 * @mem: a pointer to the block to free
935 * A convenience macro to free a block of memory that has
936 * been allocated from the slice allocator.
938 * It calls g_slice_free1() using `sizeof (type)`
939 * as the block size.
940 * Note that the exact release behaviour can be changed with the
941 * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
942 * [`G_SLICE`][G_SLICE] for related debugging options.
944 * If @mem is %NULL, this macro does nothing.
946 * Since: 2.10
950 * g_slice_free_chain:
951 * @type: the type of the @mem_chain blocks
952 * @mem_chain: a pointer to the first block of the chain
953 * @next: the field name of the next pointer in @type
955 * Frees a linked list of memory blocks of structure type @type.
956 * The memory blocks must be equal-sized, allocated via
957 * g_slice_alloc() or g_slice_alloc0() and linked together by
958 * a @next pointer (similar to #GSList). The name of the
959 * @next field in @type is passed as third argument.
960 * Note that the exact release behaviour can be changed with the
961 * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
962 * [`G_SLICE`][G_SLICE] for related debugging options.
964 * If @mem_chain is %NULL, this function does nothing.
966 * Since: 2.10
970 * g_slice_alloc:
971 * @block_size: the number of bytes to allocate
973 * Allocates a block of memory from the slice allocator.
974 * The block address handed out can be expected to be aligned
975 * to at least 1 * sizeof (void*),
976 * though in general slices are 2 * sizeof (void*) bytes aligned,
977 * if a malloc() fallback implementation is used instead,
978 * the alignment may be reduced in a libc dependent fashion.
979 * Note that the underlying slice allocation mechanism can
980 * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
981 * environment variable.
983 * Returns: a pointer to the allocated memory block, which will be %NULL if and
984 * only if @mem_size is 0
986 * Since: 2.10
988 gpointer
989 g_slice_alloc (gsize mem_size)
991 ThreadMemory *tmem;
992 gsize chunk_size;
993 gpointer mem;
994 guint acat;
996 /* This gets the private structure for this thread. If the private
997 * structure does not yet exist, it is created.
999 * This has a side effect of causing GSlice to be initialised, so it
1000 * must come first.
1002 tmem = thread_memory_from_self ();
1004 chunk_size = P2ALIGN (mem_size);
1005 acat = allocator_categorize (chunk_size);
1006 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
1008 guint ix = SLAB_INDEX (allocator, chunk_size);
1009 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
1011 thread_memory_swap_magazines (tmem, ix);
1012 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
1013 thread_memory_magazine1_reload (tmem, ix);
1015 mem = thread_memory_magazine1_alloc (tmem, ix);
1017 else if (acat == 2) /* allocate through slab allocator */
1019 g_mutex_lock (&allocator->slab_mutex);
1020 mem = slab_allocator_alloc_chunk (chunk_size);
1021 g_mutex_unlock (&allocator->slab_mutex);
1023 else /* delegate to system malloc */
1024 mem = g_malloc (mem_size);
1025 if (G_UNLIKELY (allocator->config.debug_blocks))
1026 smc_notify_alloc (mem, mem_size);
1028 TRACE (GLIB_SLICE_ALLOC((void*)mem, mem_size));
1030 return mem;
1034 * g_slice_alloc0:
1035 * @block_size: the number of bytes to allocate
1037 * Allocates a block of memory via g_slice_alloc() and initializes
1038 * the returned memory to 0. Note that the underlying slice allocation
1039 * mechanism can be changed with the [`G_SLICE=always-malloc`][G_SLICE]
1040 * environment variable.
1042 * Returns: a pointer to the allocated block, which will be %NULL if and only
1043 * if @mem_size is 0
1045 * Since: 2.10
1047 gpointer
1048 g_slice_alloc0 (gsize mem_size)
1050 gpointer mem = g_slice_alloc (mem_size);
1051 if (mem)
1052 memset (mem, 0, mem_size);
1053 return mem;
1057 * g_slice_copy:
1058 * @block_size: the number of bytes to allocate
1059 * @mem_block: the memory to copy
1061 * Allocates a block of memory from the slice allocator
1062 * and copies @block_size bytes into it from @mem_block.
1064 * @mem_block must be non-%NULL if @block_size is non-zero.
1066 * Returns: a pointer to the allocated memory block, which will be %NULL if and
1067 * only if @mem_size is 0
1069 * Since: 2.14
1071 gpointer
1072 g_slice_copy (gsize mem_size,
1073 gconstpointer mem_block)
1075 gpointer mem = g_slice_alloc (mem_size);
1076 if (mem)
1077 memcpy (mem, mem_block, mem_size);
1078 return mem;
1082 * g_slice_free1:
1083 * @block_size: the size of the block
1084 * @mem_block: a pointer to the block to free
1086 * Frees a block of memory.
1088 * The memory must have been allocated via g_slice_alloc() or
1089 * g_slice_alloc0() and the @block_size has to match the size
1090 * specified upon allocation. Note that the exact release behaviour
1091 * can be changed with the [`G_DEBUG=gc-friendly`][G_DEBUG] environment
1092 * variable, also see [`G_SLICE`][G_SLICE] for related debugging options.
1094 * If @mem_block is %NULL, this function does nothing.
1096 * Since: 2.10
1098 void
1099 g_slice_free1 (gsize mem_size,
1100 gpointer mem_block)
1102 gsize chunk_size = P2ALIGN (mem_size);
1103 guint acat = allocator_categorize (chunk_size);
1104 if (G_UNLIKELY (!mem_block))
1105 return;
1106 if (G_UNLIKELY (allocator->config.debug_blocks) &&
1107 !smc_notify_free (mem_block, mem_size))
1108 abort();
1109 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
1111 ThreadMemory *tmem = thread_memory_from_self();
1112 guint ix = SLAB_INDEX (allocator, chunk_size);
1113 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1115 thread_memory_swap_magazines (tmem, ix);
1116 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1117 thread_memory_magazine2_unload (tmem, ix);
1119 if (G_UNLIKELY (g_mem_gc_friendly))
1120 memset (mem_block, 0, chunk_size);
1121 thread_memory_magazine2_free (tmem, ix, mem_block);
1123 else if (acat == 2) /* allocate through slab allocator */
1125 if (G_UNLIKELY (g_mem_gc_friendly))
1126 memset (mem_block, 0, chunk_size);
1127 g_mutex_lock (&allocator->slab_mutex);
1128 slab_allocator_free_chunk (chunk_size, mem_block);
1129 g_mutex_unlock (&allocator->slab_mutex);
1131 else /* delegate to system malloc */
1133 if (G_UNLIKELY (g_mem_gc_friendly))
1134 memset (mem_block, 0, mem_size);
1135 g_free (mem_block);
1137 TRACE (GLIB_SLICE_FREE((void*)mem_block, mem_size));
1141 * g_slice_free_chain_with_offset:
1142 * @block_size: the size of the blocks
1143 * @mem_chain: a pointer to the first block of the chain
1144 * @next_offset: the offset of the @next field in the blocks
1146 * Frees a linked list of memory blocks of structure type @type.
1148 * The memory blocks must be equal-sized, allocated via
1149 * g_slice_alloc() or g_slice_alloc0() and linked together by a
1150 * @next pointer (similar to #GSList). The offset of the @next
1151 * field in each block is passed as third argument.
1152 * Note that the exact release behaviour can be changed with the
1153 * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
1154 * [`G_SLICE`][G_SLICE] for related debugging options.
1156 * If @mem_chain is %NULL, this function does nothing.
1158 * Since: 2.10
1160 void
1161 g_slice_free_chain_with_offset (gsize mem_size,
1162 gpointer mem_chain,
1163 gsize next_offset)
1165 gpointer slice = mem_chain;
1166 /* while the thread magazines and the magazine cache are implemented so that
1167 * they can easily be extended to allow for free lists containing more free
1168 * lists for the first level nodes, which would allow O(1) freeing in this
1169 * function, the benefit of such an extension is questionable, because:
1170 * - the magazine size counts will become mere lower bounds which confuses
1171 * the code adapting to lock contention;
1172 * - freeing a single node to the thread magazines is very fast, so this
1173 * O(list_length) operation is multiplied by a fairly small factor;
1174 * - memory usage histograms on larger applications seem to indicate that
1175 * the amount of released multi node lists is negligible in comparison
1176 * to single node releases.
1177 * - the major performance bottle neck, namely g_private_get() or
1178 * g_mutex_lock()/g_mutex_unlock() has already been moved out of the
1179 * inner loop for freeing chained slices.
1181 gsize chunk_size = P2ALIGN (mem_size);
1182 guint acat = allocator_categorize (chunk_size);
1183 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
1185 ThreadMemory *tmem = thread_memory_from_self();
1186 guint ix = SLAB_INDEX (allocator, chunk_size);
1187 while (slice)
1189 guint8 *current = slice;
1190 slice = *(gpointer*) (current + next_offset);
1191 if (G_UNLIKELY (allocator->config.debug_blocks) &&
1192 !smc_notify_free (current, mem_size))
1193 abort();
1194 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1196 thread_memory_swap_magazines (tmem, ix);
1197 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1198 thread_memory_magazine2_unload (tmem, ix);
1200 if (G_UNLIKELY (g_mem_gc_friendly))
1201 memset (current, 0, chunk_size);
1202 thread_memory_magazine2_free (tmem, ix, current);
1205 else if (acat == 2) /* allocate through slab allocator */
1207 g_mutex_lock (&allocator->slab_mutex);
1208 while (slice)
1210 guint8 *current = slice;
1211 slice = *(gpointer*) (current + next_offset);
1212 if (G_UNLIKELY (allocator->config.debug_blocks) &&
1213 !smc_notify_free (current, mem_size))
1214 abort();
1215 if (G_UNLIKELY (g_mem_gc_friendly))
1216 memset (current, 0, chunk_size);
1217 slab_allocator_free_chunk (chunk_size, current);
1219 g_mutex_unlock (&allocator->slab_mutex);
1221 else /* delegate to system malloc */
1222 while (slice)
1224 guint8 *current = slice;
1225 slice = *(gpointer*) (current + next_offset);
1226 if (G_UNLIKELY (allocator->config.debug_blocks) &&
1227 !smc_notify_free (current, mem_size))
1228 abort();
1229 if (G_UNLIKELY (g_mem_gc_friendly))
1230 memset (current, 0, mem_size);
1231 g_free (current);
1235 /* --- single page allocator --- */
1236 static void
1237 allocator_slab_stack_push (Allocator *allocator,
1238 guint ix,
1239 SlabInfo *sinfo)
1241 /* insert slab at slab ring head */
1242 if (!allocator->slab_stack[ix])
1244 sinfo->next = sinfo;
1245 sinfo->prev = sinfo;
1247 else
1249 SlabInfo *next = allocator->slab_stack[ix], *prev = next->prev;
1250 next->prev = sinfo;
1251 prev->next = sinfo;
1252 sinfo->next = next;
1253 sinfo->prev = prev;
1255 allocator->slab_stack[ix] = sinfo;
1258 static gsize
1259 allocator_aligned_page_size (Allocator *allocator,
1260 gsize n_bytes)
1262 gsize val = 1 << g_bit_storage (n_bytes - 1);
1263 val = MAX (val, allocator->min_page_size);
1264 return val;
1267 static void
1268 allocator_add_slab (Allocator *allocator,
1269 guint ix,
1270 gsize chunk_size)
1272 ChunkLink *chunk;
1273 SlabInfo *sinfo;
1274 gsize addr, padding, n_chunks, color = 0;
1275 gsize page_size;
1276 int errsv;
1277 gpointer aligned_memory;
1278 guint8 *mem;
1279 guint i;
1281 page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
1282 /* allocate 1 page for the chunks and the slab */
1283 aligned_memory = allocator_memalign (page_size, page_size - NATIVE_MALLOC_PADDING);
1284 errsv = errno;
1285 mem = aligned_memory;
1287 if (!mem)
1289 const gchar *syserr = strerror (errsv);
1290 mem_error ("failed to allocate %u bytes (alignment: %u): %s\n",
1291 (guint) (page_size - NATIVE_MALLOC_PADDING), (guint) page_size, syserr);
1293 /* mask page address */
1294 addr = ((gsize) mem / page_size) * page_size;
1295 /* assert alignment */
1296 mem_assert (aligned_memory == (gpointer) addr);
1297 /* basic slab info setup */
1298 sinfo = (SlabInfo*) (mem + page_size - SLAB_INFO_SIZE);
1299 sinfo->n_allocated = 0;
1300 sinfo->chunks = NULL;
1301 /* figure cache colorization */
1302 n_chunks = ((guint8*) sinfo - mem) / chunk_size;
1303 padding = ((guint8*) sinfo - mem) - n_chunks * chunk_size;
1304 if (padding)
1306 color = (allocator->color_accu * P2ALIGNMENT) % padding;
1307 allocator->color_accu += allocator->config.color_increment;
1309 /* add chunks to free list */
1310 chunk = (ChunkLink*) (mem + color);
1311 sinfo->chunks = chunk;
1312 for (i = 0; i < n_chunks - 1; i++)
1314 chunk->next = (ChunkLink*) ((guint8*) chunk + chunk_size);
1315 chunk = chunk->next;
1317 chunk->next = NULL; /* last chunk */
1318 /* add slab to slab ring */
1319 allocator_slab_stack_push (allocator, ix, sinfo);
1322 static gpointer
1323 slab_allocator_alloc_chunk (gsize chunk_size)
1325 ChunkLink *chunk;
1326 guint ix = SLAB_INDEX (allocator, chunk_size);
1327 /* ensure non-empty slab */
1328 if (!allocator->slab_stack[ix] || !allocator->slab_stack[ix]->chunks)
1329 allocator_add_slab (allocator, ix, chunk_size);
1330 /* allocate chunk */
1331 chunk = allocator->slab_stack[ix]->chunks;
1332 allocator->slab_stack[ix]->chunks = chunk->next;
1333 allocator->slab_stack[ix]->n_allocated++;
1334 /* rotate empty slabs */
1335 if (!allocator->slab_stack[ix]->chunks)
1336 allocator->slab_stack[ix] = allocator->slab_stack[ix]->next;
1337 return chunk;
1340 static void
1341 slab_allocator_free_chunk (gsize chunk_size,
1342 gpointer mem)
1344 ChunkLink *chunk;
1345 gboolean was_empty;
1346 guint ix = SLAB_INDEX (allocator, chunk_size);
1347 gsize page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
1348 gsize addr = ((gsize) mem / page_size) * page_size;
1349 /* mask page address */
1350 guint8 *page = (guint8*) addr;
1351 SlabInfo *sinfo = (SlabInfo*) (page + page_size - SLAB_INFO_SIZE);
1352 /* assert valid chunk count */
1353 mem_assert (sinfo->n_allocated > 0);
1354 /* add chunk to free list */
1355 was_empty = sinfo->chunks == NULL;
1356 chunk = (ChunkLink*) mem;
1357 chunk->next = sinfo->chunks;
1358 sinfo->chunks = chunk;
1359 sinfo->n_allocated--;
1360 /* keep slab ring partially sorted, empty slabs at end */
1361 if (was_empty)
1363 /* unlink slab */
1364 SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1365 next->prev = prev;
1366 prev->next = next;
1367 if (allocator->slab_stack[ix] == sinfo)
1368 allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1369 /* insert slab at head */
1370 allocator_slab_stack_push (allocator, ix, sinfo);
1372 /* eagerly free complete unused slabs */
1373 if (!sinfo->n_allocated)
1375 /* unlink slab */
1376 SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1377 next->prev = prev;
1378 prev->next = next;
1379 if (allocator->slab_stack[ix] == sinfo)
1380 allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1381 /* free slab */
1382 allocator_memfree (page_size, page);
1386 /* --- memalign implementation --- */
1387 #ifdef HAVE_MALLOC_H
1388 #include <malloc.h> /* memalign() */
1389 #endif
1391 /* from config.h:
1392 * define HAVE_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works, <stdlib.h>
1393 * define HAVE_MEMALIGN 1 // if free(memalign(3)) works, <malloc.h>
1394 * define HAVE_VALLOC 1 // if free(valloc(3)) works, <stdlib.h> or <malloc.h>
1395 * if none is provided, we implement malloc(3)-based alloc-only page alignment
1398 #if !(HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC)
1399 static GTrashStack *compat_valloc_trash = NULL;
1400 #endif
1402 static gpointer
1403 allocator_memalign (gsize alignment,
1404 gsize memsize)
1406 gpointer aligned_memory = NULL;
1407 gint err = ENOMEM;
1408 #if HAVE_POSIX_MEMALIGN
1409 err = posix_memalign (&aligned_memory, alignment, memsize);
1410 #elif HAVE_MEMALIGN
1411 errno = 0;
1412 aligned_memory = memalign (alignment, memsize);
1413 err = errno;
1414 #elif HAVE_VALLOC
1415 errno = 0;
1416 aligned_memory = valloc (memsize);
1417 err = errno;
1418 #else
1419 /* simplistic non-freeing page allocator */
1420 mem_assert (alignment == sys_page_size);
1421 mem_assert (memsize <= sys_page_size);
1422 if (!compat_valloc_trash)
1424 const guint n_pages = 16;
1425 guint8 *mem = malloc (n_pages * sys_page_size);
1426 err = errno;
1427 if (mem)
1429 gint i = n_pages;
1430 guint8 *amem = (guint8*) ALIGN ((gsize) mem, sys_page_size);
1431 if (amem != mem)
1432 i--; /* mem wasn't page aligned */
1433 G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1434 while (--i >= 0)
1435 g_trash_stack_push (&compat_valloc_trash, amem + i * sys_page_size);
1436 G_GNUC_END_IGNORE_DEPRECATIONS
1439 G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1440 aligned_memory = g_trash_stack_pop (&compat_valloc_trash);
1441 G_GNUC_END_IGNORE_DEPRECATIONS
1442 #endif
1443 if (!aligned_memory)
1444 errno = err;
1445 return aligned_memory;
1448 static void
1449 allocator_memfree (gsize memsize,
1450 gpointer mem)
1452 #if HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC
1453 free (mem);
1454 #else
1455 mem_assert (memsize <= sys_page_size);
1456 G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1457 g_trash_stack_push (&compat_valloc_trash, mem);
1458 G_GNUC_END_IGNORE_DEPRECATIONS
1459 #endif
1462 static void
1463 mem_error (const char *format,
1464 ...)
1466 const char *pname;
1467 va_list args;
1468 /* at least, put out "MEMORY-ERROR", in case we segfault during the rest of the function */
1469 fputs ("\n***MEMORY-ERROR***: ", stderr);
1470 pname = g_get_prgname();
1471 g_fprintf (stderr, "%s[%ld]: GSlice: ", pname ? pname : "", (long)getpid());
1472 va_start (args, format);
1473 g_vfprintf (stderr, format, args);
1474 va_end (args);
1475 fputs ("\n", stderr);
1476 abort();
1477 _exit (1);
1480 /* --- g-slice memory checker tree --- */
1481 typedef size_t SmcKType; /* key type */
1482 typedef size_t SmcVType; /* value type */
1483 typedef struct {
1484 SmcKType key;
1485 SmcVType value;
1486 } SmcEntry;
1487 static void smc_tree_insert (SmcKType key,
1488 SmcVType value);
1489 static gboolean smc_tree_lookup (SmcKType key,
1490 SmcVType *value_p);
1491 static gboolean smc_tree_remove (SmcKType key);
1494 /* --- g-slice memory checker implementation --- */
1495 static void
1496 smc_notify_alloc (void *pointer,
1497 size_t size)
1499 size_t address = (size_t) pointer;
1500 if (pointer)
1501 smc_tree_insert (address, size);
1504 #if 0
1505 static void
1506 smc_notify_ignore (void *pointer)
1508 size_t address = (size_t) pointer;
1509 if (pointer)
1510 smc_tree_remove (address);
1512 #endif
1514 static int
1515 smc_notify_free (void *pointer,
1516 size_t size)
1518 size_t address = (size_t) pointer;
1519 SmcVType real_size;
1520 gboolean found_one;
1522 if (!pointer)
1523 return 1; /* ignore */
1524 found_one = smc_tree_lookup (address, &real_size);
1525 if (!found_one)
1527 g_fprintf (stderr, "GSlice: MemChecker: attempt to release non-allocated block: %p size=%" G_GSIZE_FORMAT "\n", pointer, size);
1528 return 0;
1530 if (real_size != size && (real_size || size))
1532 g_fprintf (stderr, "GSlice: MemChecker: attempt to release block with invalid size: %p size=%" G_GSIZE_FORMAT " invalid-size=%" G_GSIZE_FORMAT "\n", pointer, real_size, size);
1533 return 0;
1535 if (!smc_tree_remove (address))
1537 g_fprintf (stderr, "GSlice: MemChecker: attempt to release non-allocated block: %p size=%" G_GSIZE_FORMAT "\n", pointer, size);
1538 return 0;
1540 return 1; /* all fine */
1543 /* --- g-slice memory checker tree implementation --- */
1544 #define SMC_TRUNK_COUNT (4093 /* 16381 */) /* prime, to distribute trunk collisions (big, allocated just once) */
1545 #define SMC_BRANCH_COUNT (511) /* prime, to distribute branch collisions */
1546 #define SMC_TRUNK_EXTENT (SMC_BRANCH_COUNT * 2039) /* key address space per trunk, should distribute uniformly across BRANCH_COUNT */
1547 #define SMC_TRUNK_HASH(k) ((k / SMC_TRUNK_EXTENT) % SMC_TRUNK_COUNT) /* generate new trunk hash per megabyte (roughly) */
1548 #define SMC_BRANCH_HASH(k) (k % SMC_BRANCH_COUNT)
1550 typedef struct {
1551 SmcEntry *entries;
1552 unsigned int n_entries;
1553 } SmcBranch;
1555 static SmcBranch **smc_tree_root = NULL;
1557 static void
1558 smc_tree_abort (int errval)
1560 const char *syserr = strerror (errval);
1561 mem_error ("MemChecker: failure in debugging tree: %s", syserr);
1564 static inline SmcEntry*
1565 smc_tree_branch_grow_L (SmcBranch *branch,
1566 unsigned int index)
1568 unsigned int old_size = branch->n_entries * sizeof (branch->entries[0]);
1569 unsigned int new_size = old_size + sizeof (branch->entries[0]);
1570 SmcEntry *entry;
1571 mem_assert (index <= branch->n_entries);
1572 branch->entries = (SmcEntry*) realloc (branch->entries, new_size);
1573 if (!branch->entries)
1574 smc_tree_abort (errno);
1575 entry = branch->entries + index;
1576 memmove (entry + 1, entry, (branch->n_entries - index) * sizeof (entry[0]));
1577 branch->n_entries += 1;
1578 return entry;
1581 static inline SmcEntry*
1582 smc_tree_branch_lookup_nearest_L (SmcBranch *branch,
1583 SmcKType key)
1585 unsigned int n_nodes = branch->n_entries, offs = 0;
1586 SmcEntry *check = branch->entries;
1587 int cmp = 0;
1588 while (offs < n_nodes)
1590 unsigned int i = (offs + n_nodes) >> 1;
1591 check = branch->entries + i;
1592 cmp = key < check->key ? -1 : key != check->key;
1593 if (cmp == 0)
1594 return check; /* return exact match */
1595 else if (cmp < 0)
1596 n_nodes = i;
1597 else /* (cmp > 0) */
1598 offs = i + 1;
1600 /* check points at last mismatch, cmp > 0 indicates greater key */
1601 return cmp > 0 ? check + 1 : check; /* return insertion position for inexact match */
1604 static void
1605 smc_tree_insert (SmcKType key,
1606 SmcVType value)
1608 unsigned int ix0, ix1;
1609 SmcEntry *entry;
1611 g_mutex_lock (&smc_tree_mutex);
1612 ix0 = SMC_TRUNK_HASH (key);
1613 ix1 = SMC_BRANCH_HASH (key);
1614 if (!smc_tree_root)
1616 smc_tree_root = calloc (SMC_TRUNK_COUNT, sizeof (smc_tree_root[0]));
1617 if (!smc_tree_root)
1618 smc_tree_abort (errno);
1620 if (!smc_tree_root[ix0])
1622 smc_tree_root[ix0] = calloc (SMC_BRANCH_COUNT, sizeof (smc_tree_root[0][0]));
1623 if (!smc_tree_root[ix0])
1624 smc_tree_abort (errno);
1626 entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1627 if (!entry || /* need create */
1628 entry >= smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries || /* need append */
1629 entry->key != key) /* need insert */
1630 entry = smc_tree_branch_grow_L (&smc_tree_root[ix0][ix1], entry - smc_tree_root[ix0][ix1].entries);
1631 entry->key = key;
1632 entry->value = value;
1633 g_mutex_unlock (&smc_tree_mutex);
1636 static gboolean
1637 smc_tree_lookup (SmcKType key,
1638 SmcVType *value_p)
1640 SmcEntry *entry = NULL;
1641 unsigned int ix0 = SMC_TRUNK_HASH (key), ix1 = SMC_BRANCH_HASH (key);
1642 gboolean found_one = FALSE;
1643 *value_p = 0;
1644 g_mutex_lock (&smc_tree_mutex);
1645 if (smc_tree_root && smc_tree_root[ix0])
1647 entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1648 if (entry &&
1649 entry < smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries &&
1650 entry->key == key)
1652 found_one = TRUE;
1653 *value_p = entry->value;
1656 g_mutex_unlock (&smc_tree_mutex);
1657 return found_one;
1660 static gboolean
1661 smc_tree_remove (SmcKType key)
1663 unsigned int ix0 = SMC_TRUNK_HASH (key), ix1 = SMC_BRANCH_HASH (key);
1664 gboolean found_one = FALSE;
1665 g_mutex_lock (&smc_tree_mutex);
1666 if (smc_tree_root && smc_tree_root[ix0])
1668 SmcEntry *entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1669 if (entry &&
1670 entry < smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries &&
1671 entry->key == key)
1673 unsigned int i = entry - smc_tree_root[ix0][ix1].entries;
1674 smc_tree_root[ix0][ix1].n_entries -= 1;
1675 memmove (entry, entry + 1, (smc_tree_root[ix0][ix1].n_entries - i) * sizeof (entry[0]));
1676 if (!smc_tree_root[ix0][ix1].n_entries)
1678 /* avoid useless pressure on the memory system */
1679 free (smc_tree_root[ix0][ix1].entries);
1680 smc_tree_root[ix0][ix1].entries = NULL;
1682 found_one = TRUE;
1685 g_mutex_unlock (&smc_tree_mutex);
1686 return found_one;
1689 #ifdef G_ENABLE_DEBUG
1690 void
1691 g_slice_debug_tree_statistics (void)
1693 g_mutex_lock (&smc_tree_mutex);
1694 if (smc_tree_root)
1696 unsigned int i, j, t = 0, o = 0, b = 0, su = 0, ex = 0, en = 4294967295u;
1697 double tf, bf;
1698 for (i = 0; i < SMC_TRUNK_COUNT; i++)
1699 if (smc_tree_root[i])
1701 t++;
1702 for (j = 0; j < SMC_BRANCH_COUNT; j++)
1703 if (smc_tree_root[i][j].n_entries)
1705 b++;
1706 su += smc_tree_root[i][j].n_entries;
1707 en = MIN (en, smc_tree_root[i][j].n_entries);
1708 ex = MAX (ex, smc_tree_root[i][j].n_entries);
1710 else if (smc_tree_root[i][j].entries)
1711 o++; /* formerly used, now empty */
1713 en = b ? en : 0;
1714 tf = MAX (t, 1.0); /* max(1) to be a valid divisor */
1715 bf = MAX (b, 1.0); /* max(1) to be a valid divisor */
1716 g_fprintf (stderr, "GSlice: MemChecker: %u trunks, %u branches, %u old branches\n", t, b, o);
1717 g_fprintf (stderr, "GSlice: MemChecker: %f branches per trunk, %.2f%% utilization\n",
1718 b / tf,
1719 100.0 - (SMC_BRANCH_COUNT - b / tf) / (0.01 * SMC_BRANCH_COUNT));
1720 g_fprintf (stderr, "GSlice: MemChecker: %f entries per branch, %u minimum, %u maximum\n",
1721 su / bf, en, ex);
1723 else
1724 g_fprintf (stderr, "GSlice: MemChecker: root=NULL\n");
1725 g_mutex_unlock (&smc_tree_mutex);
1727 /* sample statistics (beast + GSLice + 24h scripted core & GUI activity):
1728 * PID %CPU %MEM VSZ RSS COMMAND
1729 * 8887 30.3 45.8 456068 414856 beast-0.7.1 empty.bse
1730 * $ cat /proc/8887/statm # total-program-size resident-set-size shared-pages text/code data/stack library dirty-pages
1731 * 114017 103714 2354 344 0 108676 0
1732 * $ cat /proc/8887/status
1733 * Name: beast-0.7.1
1734 * VmSize: 456068 kB
1735 * VmLck: 0 kB
1736 * VmRSS: 414856 kB
1737 * VmData: 434620 kB
1738 * VmStk: 84 kB
1739 * VmExe: 1376 kB
1740 * VmLib: 13036 kB
1741 * VmPTE: 456 kB
1742 * Threads: 3
1743 * (gdb) print g_slice_debug_tree_statistics ()
1744 * GSlice: MemChecker: 422 trunks, 213068 branches, 0 old branches
1745 * GSlice: MemChecker: 504.900474 branches per trunk, 98.81% utilization
1746 * GSlice: MemChecker: 4.965039 entries per branch, 1 minimum, 37 maximum
1749 #endif /* G_ENABLE_DEBUG */