recipes: x-apps/emacs: Enable emacs again, trying the -no-pie option
[dragora.git] / patches / glib2 / 0001-gslice-remove-slice-allocator.patch
blob13067799e527b93dffced0cf2c5ea7265109960f
1 From a56821893c2f23eed656c94086ba7814c4a73e31 Mon Sep 17 00:00:00 2001
2 From: Natanael Copa <ncopa@alpinelinux.org>
3 Date: Wed, 5 Oct 2022 13:05:58 +0200
4 Subject: [PATCH] gslice: remove slice allocator
6 Keep the API for ABI compatibility.
7 fixes https://gitlab.gnome.org/GNOME/glib/-/issues/1079
8 ---
9 glib/gslice.c | 1560 +------------------------------------------------
10 1 file changed, 20 insertions(+), 1540 deletions(-)
12 diff --git a/glib/gslice.c b/glib/gslice.c
13 index 36fa0841f..bf4f099b3 100644
14 --- a/glib/gslice.c
15 +++ b/glib/gslice.c
16 @@ -21,320 +21,23 @@
17 #include "config.h"
18 #include "glibconfig.h"
20 -#if defined(HAVE_POSIX_MEMALIGN) && !defined(_XOPEN_SOURCE)
21 -#define _XOPEN_SOURCE 600 /* posix_memalign() */
22 -#endif
23 -#include <stdlib.h> /* posix_memalign() */
24 #include <string.h>
25 -#include <errno.h>
27 -#ifdef G_OS_UNIX
28 -#include <unistd.h> /* sysconf() */
29 -#endif
30 -#ifdef G_OS_WIN32
31 -#include <windows.h>
32 -#include <process.h>
33 -#endif
35 -#include <stdio.h> /* fputs */
37 #include "gslice.h"
39 -#include "gmain.h"
40 #include "gmem.h" /* gslice.h */
41 -#include "gstrfuncs.h"
42 -#include "gutils.h"
43 -#include "gtrashstack.h"
44 -#include "gtestutils.h"
45 -#include "gthread.h"
46 -#include "gthreadprivate.h"
47 #include "glib_trace.h"
48 -#include "gprintf.h"
50 -#include "gvalgrind.h"
52 -/**
53 - * SECTION:memory_slices
54 - * @title: Memory Slices
55 - * @short_description: efficient way to allocate groups of equal-sized
56 - * chunks of memory
57 - *
58 - * Memory slices provide a space-efficient and multi-processing scalable
59 - * way to allocate equal-sized pieces of memory, just like the original
60 - * #GMemChunks (from GLib 2.8), while avoiding their excessive
61 - * memory-waste, scalability and performance problems.
62 - *
63 - * To achieve these goals, the slice allocator uses a sophisticated,
64 - * layered design that has been inspired by Bonwick's slab allocator
65 - * ([Bonwick94](http://citeseer.ist.psu.edu/bonwick94slab.html)
66 - * Jeff Bonwick, The slab allocator: An object-caching kernel
67 - * memory allocator. USENIX 1994, and
68 - * [Bonwick01](http://citeseer.ist.psu.edu/bonwick01magazines.html)
69 - * Bonwick and Jonathan Adams, Magazines and vmem: Extending the
70 - * slab allocator to many cpu's and arbitrary resources. USENIX 2001)
71 - *
72 - * It uses posix_memalign() to optimize allocations of many equally-sized
73 - * chunks, and has per-thread free lists (the so-called magazine layer)
74 - * to quickly satisfy allocation requests of already known structure sizes.
75 - * This is accompanied by extra caching logic to keep freed memory around
76 - * for some time before returning it to the system. Memory that is unused
77 - * due to alignment constraints is used for cache colorization (random
78 - * distribution of chunk addresses) to improve CPU cache utilization. The
79 - * caching layer of the slice allocator adapts itself to high lock contention
80 - * to improve scalability.
81 - *
82 - * The slice allocator can allocate blocks as small as two pointers, and
83 - * unlike malloc(), it does not reserve extra space per block. For large block
84 - * sizes, g_slice_new() and g_slice_alloc() will automatically delegate to the
85 - * system malloc() implementation. For newly written code it is recommended
86 - * to use the new `g_slice` API instead of g_malloc() and
87 - * friends, as long as objects are not resized during their lifetime and the
88 - * object size used at allocation time is still available when freeing.
89 - *
90 - * Here is an example for using the slice allocator:
91 - * |[<!-- language="C" -->
92 - * gchar *mem[10000];
93 - * gint i;
94 - *
95 - * // Allocate 10000 blocks.
96 - * for (i = 0; i < 10000; i++)
97 - * {
98 - * mem[i] = g_slice_alloc (50);
99 - *
100 - * // Fill in the memory with some junk.
101 - * for (j = 0; j < 50; j++)
102 - * mem[i][j] = i * j;
103 - * }
105 - * // Now free all of the blocks.
106 - * for (i = 0; i < 10000; i++)
107 - * g_slice_free1 (50, mem[i]);
108 - * ]|
110 - * And here is an example for using the using the slice allocator
111 - * with data structures:
112 - * |[<!-- language="C" -->
113 - * GRealArray *array;
115 - * // Allocate one block, using the g_slice_new() macro.
116 - * array = g_slice_new (GRealArray);
118 - * // We can now use array just like a normal pointer to a structure.
119 - * array->data = NULL;
120 - * array->len = 0;
121 - * array->alloc = 0;
122 - * array->zero_terminated = (zero_terminated ? 1 : 0);
123 - * array->clear = (clear ? 1 : 0);
124 - * array->elt_size = elt_size;
126 - * // We can free the block, so it can be reused.
127 - * g_slice_free (GRealArray, array);
128 - * ]|
129 - */
131 -/* the GSlice allocator is split up into 4 layers, roughly modelled after the slab
132 - * allocator and magazine extensions as outlined in:
133 - * + [Bonwick94] Jeff Bonwick, The slab allocator: An object-caching kernel
134 - * memory allocator. USENIX 1994, http://citeseer.ist.psu.edu/bonwick94slab.html
135 - * + [Bonwick01] Bonwick and Jonathan Adams, Magazines and vmem: Extending the
136 - * slab allocator to many cpu's and arbitrary resources.
137 - * USENIX 2001, http://citeseer.ist.psu.edu/bonwick01magazines.html
138 - * the layers are:
139 - * - the thread magazines. for each (aligned) chunk size, a magazine (a list)
140 - * of recently freed and soon to be allocated chunks is maintained per thread.
141 - * this way, most alloc/free requests can be quickly satisfied from per-thread
142 - * free lists which only require one g_private_get() call to retrieve the
143 - * thread handle.
144 - * - the magazine cache. allocating and freeing chunks to/from threads only
145 - * occurs at magazine sizes from a global depot of magazines. the depot
146 - * maintaines a 15 second working set of allocated magazines, so full
147 - * magazines are not allocated and released too often.
148 - * the chunk size dependent magazine sizes automatically adapt (within limits,
149 - * see [3]) to lock contention to properly scale performance across a variety
150 - * of SMP systems.
151 - * - the slab allocator. this allocator allocates slabs (blocks of memory) close
152 - * to the system page size or multiples thereof which have to be page aligned.
153 - * the blocks are divided into smaller chunks which are used to satisfy
154 - * allocations from the upper layers. the space provided by the reminder of
155 - * the chunk size division is used for cache colorization (random distribution
156 - * of chunk addresses) to improve processor cache utilization. multiple slabs
157 - * with the same chunk size are kept in a partially sorted ring to allow O(1)
158 - * freeing and allocation of chunks (as long as the allocation of an entirely
159 - * new slab can be avoided).
160 - * - the page allocator. on most modern systems, posix_memalign(3) or
161 - * memalign(3) should be available, so this is used to allocate blocks with
162 - * system page size based alignments and sizes or multiples thereof.
163 - * if no memalign variant is provided, valloc() is used instead and
164 - * block sizes are limited to the system page size (no multiples thereof).
165 - * as a fallback, on system without even valloc(), a malloc(3)-based page
166 - * allocator with alloc-only behaviour is used.
168 - * NOTES:
169 - * [1] some systems memalign(3) implementations may rely on boundary tagging for
170 - * the handed out memory chunks. to avoid excessive page-wise fragmentation,
171 - * we reserve 2 * sizeof (void*) per block size for the systems memalign(3),
172 - * specified in NATIVE_MALLOC_PADDING.
173 - * [2] using the slab allocator alone already provides for a fast and efficient
174 - * allocator, it doesn't properly scale beyond single-threaded uses though.
175 - * also, the slab allocator implements eager free(3)-ing, i.e. does not
176 - * provide any form of caching or working set maintenance. so if used alone,
177 - * it's vulnerable to trashing for sequences of balanced (alloc, free) pairs
178 - * at certain thresholds.
179 - * [3] magazine sizes are bound by an implementation specific minimum size and
180 - * a chunk size specific maximum to limit magazine storage sizes to roughly
181 - * 16KB.
182 - * [4] allocating ca. 8 chunks per block/page keeps a good balance between
183 - * external and internal fragmentation (<= 12.5%). [Bonwick94]
184 - */
186 -/* --- macros and constants --- */
187 -#define LARGEALIGNMENT (256)
188 -#define P2ALIGNMENT (2 * sizeof (gsize)) /* fits 2 pointers (assumed to be 2 * GLIB_SIZEOF_SIZE_T below) */
189 -#define ALIGN(size, base) ((base) * (gsize) (((size) + (base) - 1) / (base)))
190 -#define NATIVE_MALLOC_PADDING P2ALIGNMENT /* per-page padding left for native malloc(3) see [1] */
191 -#define SLAB_INFO_SIZE P2ALIGN (sizeof (SlabInfo) + NATIVE_MALLOC_PADDING)
192 -#define MAX_MAGAZINE_SIZE (256) /* see [3] and allocator_get_magazine_threshold() for this */
193 -#define MIN_MAGAZINE_SIZE (4)
194 -#define MAX_STAMP_COUNTER (7) /* distributes the load of gettimeofday() */
195 -#define MAX_SLAB_CHUNK_SIZE(al) (((al)->max_page_size - SLAB_INFO_SIZE) / 8) /* we want at last 8 chunks per page, see [4] */
196 -#define MAX_SLAB_INDEX(al) (SLAB_INDEX (al, MAX_SLAB_CHUNK_SIZE (al)) + 1)
197 -#define SLAB_INDEX(al, asize) ((asize) / P2ALIGNMENT - 1) /* asize must be P2ALIGNMENT aligned */
198 -#define SLAB_CHUNK_SIZE(al, ix) (((ix) + 1) * P2ALIGNMENT)
199 -#define SLAB_BPAGE_SIZE(al,csz) (8 * (csz) + SLAB_INFO_SIZE)
201 -/* optimized version of ALIGN (size, P2ALIGNMENT) */
202 -#if GLIB_SIZEOF_SIZE_T * 2 == 8 /* P2ALIGNMENT */
203 -#define P2ALIGN(size) (((size) + 0x7) & ~(gsize) 0x7)
204 -#elif GLIB_SIZEOF_SIZE_T * 2 == 16 /* P2ALIGNMENT */
205 -#define P2ALIGN(size) (((size) + 0xf) & ~(gsize) 0xf)
206 -#else
207 -#define P2ALIGN(size) ALIGN (size, P2ALIGNMENT)
208 -#endif
210 -/* special helpers to avoid gmessage.c dependency */
211 -static void mem_error (const char *format, ...) G_GNUC_PRINTF (1,2);
212 -#define mem_assert(cond) do { if (G_LIKELY (cond)) ; else mem_error ("assertion failed: %s", #cond); } while (0)
214 -/* --- structures --- */
215 -typedef struct _ChunkLink ChunkLink;
216 -typedef struct _SlabInfo SlabInfo;
217 -typedef struct _CachedMagazine CachedMagazine;
218 -struct _ChunkLink {
219 - ChunkLink *next;
220 - ChunkLink *data;
222 -struct _SlabInfo {
223 - ChunkLink *chunks;
224 - guint n_allocated;
225 - SlabInfo *next, *prev;
227 -typedef struct {
228 - ChunkLink *chunks;
229 - gsize count; /* approximative chunks list length */
230 -} Magazine;
231 -typedef struct {
232 - Magazine *magazine1; /* array of MAX_SLAB_INDEX (allocator) */
233 - Magazine *magazine2; /* array of MAX_SLAB_INDEX (allocator) */
234 -} ThreadMemory;
235 -typedef struct {
236 - gboolean always_malloc;
237 - gboolean bypass_magazines;
238 - gboolean debug_blocks;
239 - gsize working_set_msecs;
240 - guint color_increment;
241 -} SliceConfig;
242 -typedef struct {
243 - /* const after initialization */
244 - gsize min_page_size, max_page_size;
245 - SliceConfig config;
246 - gsize max_slab_chunk_size_for_magazine_cache;
247 - /* magazine cache */
248 - GMutex magazine_mutex;
249 - ChunkLink **magazines; /* array of MAX_SLAB_INDEX (allocator) */
250 - guint *contention_counters; /* array of MAX_SLAB_INDEX (allocator) */
251 - gint mutex_counter;
252 - guint stamp_counter;
253 - guint last_stamp;
254 - /* slab allocator */
255 - GMutex slab_mutex;
256 - SlabInfo **slab_stack; /* array of MAX_SLAB_INDEX (allocator) */
257 - guint color_accu;
258 -} Allocator;
260 -/* --- g-slice prototypes --- */
261 -static gpointer slab_allocator_alloc_chunk (gsize chunk_size);
262 -static void slab_allocator_free_chunk (gsize chunk_size,
263 - gpointer mem);
264 -static void private_thread_memory_cleanup (gpointer data);
265 -static gpointer allocator_memalign (gsize alignment,
266 - gsize memsize);
267 -static void allocator_memfree (gsize memsize,
268 - gpointer mem);
269 -static inline void magazine_cache_update_stamp (void);
270 -static inline gsize allocator_get_magazine_threshold (Allocator *allocator,
271 - guint ix);
273 -/* --- g-slice memory checker --- */
274 -static void smc_notify_alloc (void *pointer,
275 - size_t size);
276 -static int smc_notify_free (void *pointer,
277 - size_t size);
279 -/* --- variables --- */
280 -static GPrivate private_thread_memory = G_PRIVATE_INIT (private_thread_memory_cleanup);
281 -static gsize sys_page_size = 0;
282 -static Allocator allocator[1] = { { 0, }, };
283 -static SliceConfig slice_config = {
284 - FALSE, /* always_malloc */
285 - FALSE, /* bypass_magazines */
286 - FALSE, /* debug_blocks */
287 - 15 * 1000, /* working_set_msecs */
288 - 1, /* color increment, alt: 0x7fffffff */
290 -static GMutex smc_tree_mutex; /* mutex for G_SLICE=debug-blocks */
292 /* --- auxiliary functions --- */
293 void
294 g_slice_set_config (GSliceConfig ckey,
295 gint64 value)
297 - g_return_if_fail (sys_page_size == 0);
298 - switch (ckey)
300 - case G_SLICE_CONFIG_ALWAYS_MALLOC:
301 - slice_config.always_malloc = value != 0;
302 - break;
303 - case G_SLICE_CONFIG_BYPASS_MAGAZINES:
304 - slice_config.bypass_magazines = value != 0;
305 - break;
306 - case G_SLICE_CONFIG_WORKING_SET_MSECS:
307 - slice_config.working_set_msecs = value;
308 - break;
309 - case G_SLICE_CONFIG_COLOR_INCREMENT:
310 - slice_config.color_increment = value;
311 - break;
312 - default: ;
315 +{ }
317 gint64
318 g_slice_get_config (GSliceConfig ckey)
320 - switch (ckey)
322 - case G_SLICE_CONFIG_ALWAYS_MALLOC:
323 - return slice_config.always_malloc;
324 - case G_SLICE_CONFIG_BYPASS_MAGAZINES:
325 - return slice_config.bypass_magazines;
326 - case G_SLICE_CONFIG_WORKING_SET_MSECS:
327 - return slice_config.working_set_msecs;
328 - case G_SLICE_CONFIG_CHUNK_SIZES:
329 - return MAX_SLAB_INDEX (allocator);
330 - case G_SLICE_CONFIG_COLOR_INCREMENT:
331 - return slice_config.color_increment;
332 - default:
333 - return 0;
335 + return 0;
338 gint64*
339 @@ -342,566 +45,7 @@ g_slice_get_config_state (GSliceConfig ckey,
340 gint64 address,
341 guint *n_values)
343 - guint i = 0;
344 - g_return_val_if_fail (n_values != NULL, NULL);
345 - *n_values = 0;
346 - switch (ckey)
348 - gint64 array[64];
349 - case G_SLICE_CONFIG_CONTENTION_COUNTER:
350 - array[i++] = SLAB_CHUNK_SIZE (allocator, address);
351 - array[i++] = allocator->contention_counters[address];
352 - array[i++] = allocator_get_magazine_threshold (allocator, address);
353 - *n_values = i;
354 - return g_memdup2 (array, sizeof (array[0]) * *n_values);
355 - default:
356 - return NULL;
360 -static void
361 -slice_config_init (SliceConfig *config)
363 - const gchar *val;
364 - gchar *val_allocated = NULL;
366 - *config = slice_config;
368 - /* Note that the empty string (`G_SLICE=""`) is treated differently from the
369 - * envvar being unset. In the latter case, we also check whether running under
370 - * valgrind. */
371 -#ifndef G_OS_WIN32
372 - val = g_getenv ("G_SLICE");
373 -#else
374 - /* The win32 implementation of g_getenv() has to do UTF-8 ↔ UTF-16 conversions
375 - * which use the slice allocator, leading to deadlock. Use a simple in-place
376 - * implementation here instead.
378 - * Ignore references to other environment variables: only support values which
379 - * are a combination of always-malloc and debug-blocks. */
382 - wchar_t wvalue[128]; /* at least big enough for `always-malloc,debug-blocks` */
383 - gsize len;
385 - len = GetEnvironmentVariableW (L"G_SLICE", wvalue, G_N_ELEMENTS (wvalue));
387 - if (len == 0)
389 - if (GetLastError () == ERROR_ENVVAR_NOT_FOUND)
390 - val = NULL;
391 - else
392 - val = "";
394 - else if (len >= G_N_ELEMENTS (wvalue))
396 - /* @wvalue isn’t big enough. Give up. */
397 - g_warning ("Unsupported G_SLICE value");
398 - val = NULL;
400 - else
402 - /* it’s safe to use g_utf16_to_utf8() here as it only allocates using
403 - * malloc() rather than GSlice */
404 - val = val_allocated = g_utf16_to_utf8 (wvalue, -1, NULL, NULL, NULL);
408 -#endif /* G_OS_WIN32 */
410 - if (val != NULL)
412 - gint flags;
413 - const GDebugKey keys[] = {
414 - { "always-malloc", 1 << 0 },
415 - { "debug-blocks", 1 << 1 },
416 - };
418 - flags = g_parse_debug_string (val, keys, G_N_ELEMENTS (keys));
419 - if (flags & (1 << 0))
420 - config->always_malloc = TRUE;
421 - if (flags & (1 << 1))
422 - config->debug_blocks = TRUE;
424 - else
426 - /* G_SLICE was not specified, so check if valgrind is running and
427 - * disable ourselves if it is.
429 - * This way it's possible to force gslice to be enabled under
430 - * valgrind just by setting G_SLICE to the empty string.
431 - */
432 -#ifdef ENABLE_VALGRIND
433 - if (RUNNING_ON_VALGRIND)
434 - config->always_malloc = TRUE;
435 -#endif
438 - g_free (val_allocated);
441 -static void
442 -g_slice_init_nomessage (void)
444 - /* we may not use g_error() or friends here */
445 - mem_assert (sys_page_size == 0);
446 - mem_assert (MIN_MAGAZINE_SIZE >= 4);
448 -#ifdef G_OS_WIN32
450 - SYSTEM_INFO system_info;
451 - GetSystemInfo (&system_info);
452 - sys_page_size = system_info.dwPageSize;
454 -#else
455 - sys_page_size = sysconf (_SC_PAGESIZE); /* = sysconf (_SC_PAGE_SIZE); = getpagesize(); */
456 -#endif
457 - mem_assert (sys_page_size >= 2 * LARGEALIGNMENT);
458 - mem_assert ((sys_page_size & (sys_page_size - 1)) == 0);
459 - slice_config_init (&allocator->config);
460 - allocator->min_page_size = sys_page_size;
461 -#if HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN
462 - /* allow allocation of pages up to 8KB (with 8KB alignment).
463 - * this is useful because many medium to large sized structures
464 - * fit less than 8 times (see [4]) into 4KB pages.
465 - * we allow very small page sizes here, to reduce wastage in
466 - * threads if only small allocations are required (this does
467 - * bear the risk of increasing allocation times and fragmentation
468 - * though).
469 - */
470 - allocator->min_page_size = MAX (allocator->min_page_size, 4096);
471 - allocator->max_page_size = MAX (allocator->min_page_size, 8192);
472 - allocator->min_page_size = MIN (allocator->min_page_size, 128);
473 -#else
474 - /* we can only align to system page size */
475 - allocator->max_page_size = sys_page_size;
476 -#endif
477 - if (allocator->config.always_malloc)
479 - allocator->contention_counters = NULL;
480 - allocator->magazines = NULL;
481 - allocator->slab_stack = NULL;
483 - else
485 - allocator->contention_counters = g_new0 (guint, MAX_SLAB_INDEX (allocator));
486 - allocator->magazines = g_new0 (ChunkLink*, MAX_SLAB_INDEX (allocator));
487 - allocator->slab_stack = g_new0 (SlabInfo*, MAX_SLAB_INDEX (allocator));
490 - allocator->mutex_counter = 0;
491 - allocator->stamp_counter = MAX_STAMP_COUNTER; /* force initial update */
492 - allocator->last_stamp = 0;
493 - allocator->color_accu = 0;
494 - magazine_cache_update_stamp();
495 - /* values cached for performance reasons */
496 - allocator->max_slab_chunk_size_for_magazine_cache = MAX_SLAB_CHUNK_SIZE (allocator);
497 - if (allocator->config.always_malloc || allocator->config.bypass_magazines)
498 - allocator->max_slab_chunk_size_for_magazine_cache = 0; /* non-optimized cases */
501 -static inline guint
502 -allocator_categorize (gsize aligned_chunk_size)
504 - /* speed up the likely path */
505 - if (G_LIKELY (aligned_chunk_size && aligned_chunk_size <= allocator->max_slab_chunk_size_for_magazine_cache))
506 - return 1; /* use magazine cache */
508 - if (!allocator->config.always_malloc &&
509 - aligned_chunk_size &&
510 - aligned_chunk_size <= MAX_SLAB_CHUNK_SIZE (allocator))
512 - if (allocator->config.bypass_magazines)
513 - return 2; /* use slab allocator, see [2] */
514 - return 1; /* use magazine cache */
516 - return 0; /* use malloc() */
519 -static inline void
520 -g_mutex_lock_a (GMutex *mutex,
521 - guint *contention_counter)
523 - gboolean contention = FALSE;
524 - if (!g_mutex_trylock (mutex))
526 - g_mutex_lock (mutex);
527 - contention = TRUE;
529 - if (contention)
531 - allocator->mutex_counter++;
532 - if (allocator->mutex_counter >= 1) /* quickly adapt to contention */
534 - allocator->mutex_counter = 0;
535 - *contention_counter = MIN (*contention_counter + 1, MAX_MAGAZINE_SIZE);
538 - else /* !contention */
540 - allocator->mutex_counter--;
541 - if (allocator->mutex_counter < -11) /* moderately recover magazine sizes */
543 - allocator->mutex_counter = 0;
544 - *contention_counter = MAX (*contention_counter, 1) - 1;
549 -static inline ThreadMemory*
550 -thread_memory_from_self (void)
552 - ThreadMemory *tmem = g_private_get (&private_thread_memory);
553 - if (G_UNLIKELY (!tmem))
555 - static GMutex init_mutex;
556 - guint n_magazines;
558 - g_mutex_lock (&init_mutex);
559 - if G_UNLIKELY (sys_page_size == 0)
560 - g_slice_init_nomessage ();
561 - g_mutex_unlock (&init_mutex);
563 - n_magazines = MAX_SLAB_INDEX (allocator);
564 - tmem = g_private_set_alloc0 (&private_thread_memory, sizeof (ThreadMemory) + sizeof (Magazine) * 2 * n_magazines);
565 - tmem->magazine1 = (Magazine*) (tmem + 1);
566 - tmem->magazine2 = &tmem->magazine1[n_magazines];
568 - return tmem;
571 -static inline ChunkLink*
572 -magazine_chain_pop_head (ChunkLink **magazine_chunks)
574 - /* magazine chains are linked via ChunkLink->next.
575 - * each ChunkLink->data of the toplevel chain may point to a subchain,
576 - * linked via ChunkLink->next. ChunkLink->data of the subchains just
577 - * contains uninitialized junk.
578 - */
579 - ChunkLink *chunk = (*magazine_chunks)->data;
580 - if (G_UNLIKELY (chunk))
582 - /* allocating from freed list */
583 - (*magazine_chunks)->data = chunk->next;
585 - else
587 - chunk = *magazine_chunks;
588 - *magazine_chunks = chunk->next;
590 - return chunk;
593 -#if 0 /* useful for debugging */
594 -static guint
595 -magazine_count (ChunkLink *head)
597 - guint count = 0;
598 - if (!head)
599 - return 0;
600 - while (head)
602 - ChunkLink *child = head->data;
603 - count += 1;
604 - for (child = head->data; child; child = child->next)
605 - count += 1;
606 - head = head->next;
608 - return count;
610 -#endif
612 -static inline gsize
613 -allocator_get_magazine_threshold (Allocator *local_allocator,
614 - guint ix)
616 - /* the magazine size calculated here has a lower bound of MIN_MAGAZINE_SIZE,
617 - * which is required by the implementation. also, for moderately sized chunks
618 - * (say >= 64 bytes), magazine sizes shouldn't be much smaller then the number
619 - * of chunks available per page/2 to avoid excessive traffic in the magazine
620 - * cache for small to medium sized structures.
621 - * the upper bound of the magazine size is effectively provided by
622 - * MAX_MAGAZINE_SIZE. for larger chunks, this number is scaled down so that
623 - * the content of a single magazine doesn't exceed ca. 16KB.
624 - */
625 - gsize chunk_size = SLAB_CHUNK_SIZE (local_allocator, ix);
626 - guint threshold = MAX (MIN_MAGAZINE_SIZE, local_allocator->max_page_size / MAX (5 * chunk_size, 5 * 32));
627 - guint contention_counter = local_allocator->contention_counters[ix];
628 - if (G_UNLIKELY (contention_counter)) /* single CPU bias */
630 - /* adapt contention counter thresholds to chunk sizes */
631 - contention_counter = contention_counter * 64 / chunk_size;
632 - threshold = MAX (threshold, contention_counter);
634 - return threshold;
637 -/* --- magazine cache --- */
638 -static inline void
639 -magazine_cache_update_stamp (void)
641 - if (allocator->stamp_counter >= MAX_STAMP_COUNTER)
643 - gint64 now_us = g_get_real_time ();
644 - allocator->last_stamp = now_us / 1000; /* milli seconds */
645 - allocator->stamp_counter = 0;
647 - else
648 - allocator->stamp_counter++;
651 -static inline ChunkLink*
652 -magazine_chain_prepare_fields (ChunkLink *magazine_chunks)
654 - ChunkLink *chunk1;
655 - ChunkLink *chunk2;
656 - ChunkLink *chunk3;
657 - ChunkLink *chunk4;
658 - /* checked upon initialization: mem_assert (MIN_MAGAZINE_SIZE >= 4); */
659 - /* ensure a magazine with at least 4 unused data pointers */
660 - chunk1 = magazine_chain_pop_head (&magazine_chunks);
661 - chunk2 = magazine_chain_pop_head (&magazine_chunks);
662 - chunk3 = magazine_chain_pop_head (&magazine_chunks);
663 - chunk4 = magazine_chain_pop_head (&magazine_chunks);
664 - chunk4->next = magazine_chunks;
665 - chunk3->next = chunk4;
666 - chunk2->next = chunk3;
667 - chunk1->next = chunk2;
668 - return chunk1;
671 -/* access the first 3 fields of a specially prepared magazine chain */
672 -#define magazine_chain_prev(mc) ((mc)->data)
673 -#define magazine_chain_stamp(mc) ((mc)->next->data)
674 -#define magazine_chain_uint_stamp(mc) GPOINTER_TO_UINT ((mc)->next->data)
675 -#define magazine_chain_next(mc) ((mc)->next->next->data)
676 -#define magazine_chain_count(mc) ((mc)->next->next->next->data)
678 -static void
679 -magazine_cache_trim (Allocator *local_allocator,
680 - guint ix,
681 - guint stamp)
683 - /* g_mutex_lock (local_allocator->mutex); done by caller */
684 - /* trim magazine cache from tail */
685 - ChunkLink *current = magazine_chain_prev (local_allocator->magazines[ix]);
686 - ChunkLink *trash = NULL;
687 - while (!G_APPROX_VALUE (stamp, magazine_chain_uint_stamp (current),
688 - local_allocator->config.working_set_msecs))
690 - /* unlink */
691 - ChunkLink *prev = magazine_chain_prev (current);
692 - ChunkLink *next = magazine_chain_next (current);
693 - magazine_chain_next (prev) = next;
694 - magazine_chain_prev (next) = prev;
695 - /* clear special fields, put on trash stack */
696 - magazine_chain_next (current) = NULL;
697 - magazine_chain_count (current) = NULL;
698 - magazine_chain_stamp (current) = NULL;
699 - magazine_chain_prev (current) = trash;
700 - trash = current;
701 - /* fixup list head if required */
702 - if (current == local_allocator->magazines[ix])
704 - local_allocator->magazines[ix] = NULL;
705 - break;
707 - current = prev;
709 - g_mutex_unlock (&local_allocator->magazine_mutex);
710 - /* free trash */
711 - if (trash)
713 - const gsize chunk_size = SLAB_CHUNK_SIZE (local_allocator, ix);
714 - g_mutex_lock (&local_allocator->slab_mutex);
715 - while (trash)
717 - current = trash;
718 - trash = magazine_chain_prev (current);
719 - magazine_chain_prev (current) = NULL; /* clear special field */
720 - while (current)
722 - ChunkLink *chunk = magazine_chain_pop_head (&current);
723 - slab_allocator_free_chunk (chunk_size, chunk);
726 - g_mutex_unlock (&local_allocator->slab_mutex);
730 -static void
731 -magazine_cache_push_magazine (guint ix,
732 - ChunkLink *magazine_chunks,
733 - gsize count) /* must be >= MIN_MAGAZINE_SIZE */
735 - ChunkLink *current = magazine_chain_prepare_fields (magazine_chunks);
736 - ChunkLink *next, *prev;
737 - g_mutex_lock (&allocator->magazine_mutex);
738 - /* add magazine at head */
739 - next = allocator->magazines[ix];
740 - if (next)
741 - prev = magazine_chain_prev (next);
742 - else
743 - next = prev = current;
744 - magazine_chain_next (prev) = current;
745 - magazine_chain_prev (next) = current;
746 - magazine_chain_prev (current) = prev;
747 - magazine_chain_next (current) = next;
748 - magazine_chain_count (current) = (gpointer) count;
749 - /* stamp magazine */
750 - magazine_cache_update_stamp();
751 - magazine_chain_stamp (current) = GUINT_TO_POINTER (allocator->last_stamp);
752 - allocator->magazines[ix] = current;
753 - /* free old magazines beyond a certain threshold */
754 - magazine_cache_trim (allocator, ix, allocator->last_stamp);
755 - /* g_mutex_unlock (allocator->mutex); was done by magazine_cache_trim() */
758 -static ChunkLink*
759 -magazine_cache_pop_magazine (guint ix,
760 - gsize *countp)
762 - g_mutex_lock_a (&allocator->magazine_mutex, &allocator->contention_counters[ix]);
763 - if (!allocator->magazines[ix])
765 - guint magazine_threshold = allocator_get_magazine_threshold (allocator, ix);
766 - gsize i, chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
767 - ChunkLink *chunk, *head;
768 - g_mutex_unlock (&allocator->magazine_mutex);
769 - g_mutex_lock (&allocator->slab_mutex);
770 - head = slab_allocator_alloc_chunk (chunk_size);
771 - head->data = NULL;
772 - chunk = head;
773 - for (i = 1; i < magazine_threshold; i++)
775 - chunk->next = slab_allocator_alloc_chunk (chunk_size);
776 - chunk = chunk->next;
777 - chunk->data = NULL;
779 - chunk->next = NULL;
780 - g_mutex_unlock (&allocator->slab_mutex);
781 - *countp = i;
782 - return head;
784 - else
786 - ChunkLink *current = allocator->magazines[ix];
787 - ChunkLink *prev = magazine_chain_prev (current);
788 - ChunkLink *next = magazine_chain_next (current);
789 - /* unlink */
790 - magazine_chain_next (prev) = next;
791 - magazine_chain_prev (next) = prev;
792 - allocator->magazines[ix] = next == current ? NULL : next;
793 - g_mutex_unlock (&allocator->magazine_mutex);
794 - /* clear special fields and hand out */
795 - *countp = (gsize) magazine_chain_count (current);
796 - magazine_chain_prev (current) = NULL;
797 - magazine_chain_next (current) = NULL;
798 - magazine_chain_count (current) = NULL;
799 - magazine_chain_stamp (current) = NULL;
800 - return current;
804 -/* --- thread magazines --- */
805 -static void
806 -private_thread_memory_cleanup (gpointer data)
808 - ThreadMemory *tmem = data;
809 - const guint n_magazines = MAX_SLAB_INDEX (allocator);
810 - guint ix;
811 - for (ix = 0; ix < n_magazines; ix++)
813 - Magazine *mags[2];
814 - guint j;
815 - mags[0] = &tmem->magazine1[ix];
816 - mags[1] = &tmem->magazine2[ix];
817 - for (j = 0; j < 2; j++)
819 - Magazine *mag = mags[j];
820 - if (mag->count >= MIN_MAGAZINE_SIZE)
821 - magazine_cache_push_magazine (ix, mag->chunks, mag->count);
822 - else
824 - const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
825 - g_mutex_lock (&allocator->slab_mutex);
826 - while (mag->chunks)
828 - ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
829 - slab_allocator_free_chunk (chunk_size, chunk);
831 - g_mutex_unlock (&allocator->slab_mutex);
835 - g_free (tmem);
838 -static void
839 -thread_memory_magazine1_reload (ThreadMemory *tmem,
840 - guint ix)
842 - Magazine *mag = &tmem->magazine1[ix];
843 - mem_assert (mag->chunks == NULL); /* ensure that we may reset mag->count */
844 - mag->count = 0;
845 - mag->chunks = magazine_cache_pop_magazine (ix, &mag->count);
848 -static void
849 -thread_memory_magazine2_unload (ThreadMemory *tmem,
850 - guint ix)
852 - Magazine *mag = &tmem->magazine2[ix];
853 - magazine_cache_push_magazine (ix, mag->chunks, mag->count);
854 - mag->chunks = NULL;
855 - mag->count = 0;
858 -static inline void
859 -thread_memory_swap_magazines (ThreadMemory *tmem,
860 - guint ix)
862 - Magazine xmag = tmem->magazine1[ix];
863 - tmem->magazine1[ix] = tmem->magazine2[ix];
864 - tmem->magazine2[ix] = xmag;
867 -static inline gboolean
868 -thread_memory_magazine1_is_empty (ThreadMemory *tmem,
869 - guint ix)
871 - return tmem->magazine1[ix].chunks == NULL;
874 -static inline gboolean
875 -thread_memory_magazine2_is_full (ThreadMemory *tmem,
876 - guint ix)
878 - return tmem->magazine2[ix].count >= allocator_get_magazine_threshold (allocator, ix);
881 -static inline gpointer
882 -thread_memory_magazine1_alloc (ThreadMemory *tmem,
883 - guint ix)
885 - Magazine *mag = &tmem->magazine1[ix];
886 - ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
887 - if (G_LIKELY (mag->count > 0))
888 - mag->count--;
889 - return chunk;
892 -static inline void
893 -thread_memory_magazine2_free (ThreadMemory *tmem,
894 - guint ix,
895 - gpointer mem)
897 - Magazine *mag = &tmem->magazine2[ix];
898 - ChunkLink *chunk = mem;
899 - chunk->data = NULL;
900 - chunk->next = mag->chunks;
901 - mag->chunks = chunk;
902 - mag->count++;
903 + return NULL;
906 /* --- API functions --- */
907 @@ -915,9 +59,7 @@ thread_memory_magazine2_free (ThreadMemory *tmem,
909 * It calls g_slice_alloc() with `sizeof (@type)` and casts the
910 * returned pointer to a pointer of the given type, avoiding a type
911 - * cast in the source code. Note that the underlying slice allocation
912 - * mechanism can be changed with the [`G_SLICE=always-malloc`][G_SLICE]
913 - * environment variable.
914 + * cast in the source code.
916 * This can never return %NULL as the minimum allocation size from
917 * `sizeof (@type)` is 1 byte.
918 @@ -938,9 +80,6 @@ thread_memory_magazine2_free (ThreadMemory *tmem,
919 * It calls g_slice_alloc0() with `sizeof (@type)`
920 * and casts the returned pointer to a pointer of the given type,
921 * avoiding a type cast in the source code.
922 - * Note that the underlying slice allocation mechanism can
923 - * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
924 - * environment variable.
926 * This can never return %NULL as the minimum allocation size from
927 * `sizeof (@type)` is 1 byte.
928 @@ -962,9 +101,6 @@ thread_memory_magazine2_free (ThreadMemory *tmem,
929 * It calls g_slice_copy() with `sizeof (@type)`
930 * and casts the returned pointer to a pointer of the given type,
931 * avoiding a type cast in the source code.
932 - * Note that the underlying slice allocation mechanism can
933 - * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
934 - * environment variable.
936 * This can never return %NULL.
938 @@ -985,8 +121,7 @@ thread_memory_magazine2_free (ThreadMemory *tmem,
939 * It calls g_slice_free1() using `sizeof (type)`
940 * as the block size.
941 * Note that the exact release behaviour can be changed with the
942 - * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
943 - * [`G_SLICE`][G_SLICE] for related debugging options.
944 + * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable.
946 * If @mem is %NULL, this macro does nothing.
948 @@ -1006,8 +141,7 @@ thread_memory_magazine2_free (ThreadMemory *tmem,
949 * a @next pointer (similar to #GSList). The name of the
950 * @next field in @type is passed as third argument.
951 * Note that the exact release behaviour can be changed with the
952 - * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
953 - * [`G_SLICE`][G_SLICE] for related debugging options.
954 + * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable.
956 * If @mem_chain is %NULL, this function does nothing.
958 @@ -1018,17 +152,7 @@ thread_memory_magazine2_free (ThreadMemory *tmem,
959 * g_slice_alloc:
960 * @block_size: the number of bytes to allocate
962 - * Allocates a block of memory from the slice allocator.
964 - * The block address handed out can be expected to be aligned
965 - * to at least `1 * sizeof (void*)`, though in general slices
966 - * are `2 * sizeof (void*)` bytes aligned; if a `malloc()`
967 - * fallback implementation is used instead, the alignment may
968 - * be reduced in a libc dependent fashion.
970 - * Note that the underlying slice allocation mechanism can
971 - * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
972 - * environment variable.
973 + * Allocates a block of memory from the libc allocator.
975 * Returns: a pointer to the allocated memory block, which will
976 * be %NULL if and only if @mem_size is 0
977 @@ -1038,43 +162,9 @@ thread_memory_magazine2_free (ThreadMemory *tmem,
978 gpointer
979 g_slice_alloc (gsize mem_size)
981 - ThreadMemory *tmem;
982 - gsize chunk_size;
983 gpointer mem;
984 - guint acat;
986 - /* This gets the private structure for this thread. If the private
987 - * structure does not yet exist, it is created.
989 - * This has a side effect of causing GSlice to be initialised, so it
990 - * must come first.
991 - */
992 - tmem = thread_memory_from_self ();
994 - chunk_size = P2ALIGN (mem_size);
995 - acat = allocator_categorize (chunk_size);
996 - if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
998 - guint ix = SLAB_INDEX (allocator, chunk_size);
999 - if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
1001 - thread_memory_swap_magazines (tmem, ix);
1002 - if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
1003 - thread_memory_magazine1_reload (tmem, ix);
1005 - mem = thread_memory_magazine1_alloc (tmem, ix);
1007 - else if (acat == 2) /* allocate through slab allocator */
1009 - g_mutex_lock (&allocator->slab_mutex);
1010 - mem = slab_allocator_alloc_chunk (chunk_size);
1011 - g_mutex_unlock (&allocator->slab_mutex);
1013 - else /* delegate to system malloc */
1014 - mem = g_malloc (mem_size);
1015 - if (G_UNLIKELY (allocator->config.debug_blocks))
1016 - smc_notify_alloc (mem, mem_size);
1018 + mem = g_malloc (mem_size);
1019 TRACE (GLIB_SLICE_ALLOC((void*)mem, mem_size));
1021 return mem;
1022 @@ -1085,9 +175,7 @@ g_slice_alloc (gsize mem_size)
1023 * @block_size: the number of bytes to allocate
1025 * Allocates a block of memory via g_slice_alloc() and initializes
1026 - * the returned memory to 0. Note that the underlying slice allocation
1027 - * mechanism can be changed with the [`G_SLICE=always-malloc`][G_SLICE]
1028 - * environment variable.
1029 + * the returned memory to 0.
1031 * Returns: a pointer to the allocated block, which will be %NULL if and only
1032 * if @mem_size is 0
1033 @@ -1139,7 +227,7 @@ g_slice_copy (gsize mem_size,
1034 * g_slice_alloc0() and the @block_size has to match the size
1035 * specified upon allocation. Note that the exact release behaviour
1036 * can be changed with the [`G_DEBUG=gc-friendly`][G_DEBUG] environment
1037 - * variable, also see [`G_SLICE`][G_SLICE] for related debugging options.
1038 + * variable.
1040 * If @mem_block is %NULL, this function does nothing.
1042 @@ -1149,41 +237,11 @@ void
1043 g_slice_free1 (gsize mem_size,
1044 gpointer mem_block)
1046 - gsize chunk_size = P2ALIGN (mem_size);
1047 - guint acat = allocator_categorize (chunk_size);
1048 if (G_UNLIKELY (!mem_block))
1049 return;
1050 - if (G_UNLIKELY (allocator->config.debug_blocks) &&
1051 - !smc_notify_free (mem_block, mem_size))
1052 - abort();
1053 - if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
1055 - ThreadMemory *tmem = thread_memory_from_self();
1056 - guint ix = SLAB_INDEX (allocator, chunk_size);
1057 - if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1059 - thread_memory_swap_magazines (tmem, ix);
1060 - if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1061 - thread_memory_magazine2_unload (tmem, ix);
1063 - if (G_UNLIKELY (g_mem_gc_friendly))
1064 - memset (mem_block, 0, chunk_size);
1065 - thread_memory_magazine2_free (tmem, ix, mem_block);
1067 - else if (acat == 2) /* allocate through slab allocator */
1069 - if (G_UNLIKELY (g_mem_gc_friendly))
1070 - memset (mem_block, 0, chunk_size);
1071 - g_mutex_lock (&allocator->slab_mutex);
1072 - slab_allocator_free_chunk (chunk_size, mem_block);
1073 - g_mutex_unlock (&allocator->slab_mutex);
1075 - else /* delegate to system malloc */
1077 - if (G_UNLIKELY (g_mem_gc_friendly))
1078 - memset (mem_block, 0, mem_size);
1079 - g_free (mem_block);
1081 + if (G_UNLIKELY (g_mem_gc_friendly))
1082 + memset (mem_block, 0, mem_size);
1083 + g_free (mem_block);
1084 TRACE (GLIB_SLICE_FREE((void*)mem_block, mem_size));
1087 @@ -1200,8 +258,7 @@ g_slice_free1 (gsize mem_size,
1088 * @next pointer (similar to #GSList). The offset of the @next
1089 * field in each block is passed as third argument.
1090 * Note that the exact release behaviour can be changed with the
1091 - * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
1092 - * [`G_SLICE`][G_SLICE] for related debugging options.
1093 + * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable.
1095 * If @mem_chain is %NULL, this function does nothing.
1097 @@ -1213,589 +270,12 @@ g_slice_free_chain_with_offset (gsize mem_size,
1098 gsize next_offset)
1100 gpointer slice = mem_chain;
1101 - /* while the thread magazines and the magazine cache are implemented so that
1102 - * they can easily be extended to allow for free lists containing more free
1103 - * lists for the first level nodes, which would allow O(1) freeing in this
1104 - * function, the benefit of such an extension is questionable, because:
1105 - * - the magazine size counts will become mere lower bounds which confuses
1106 - * the code adapting to lock contention;
1107 - * - freeing a single node to the thread magazines is very fast, so this
1108 - * O(list_length) operation is multiplied by a fairly small factor;
1109 - * - memory usage histograms on larger applications seem to indicate that
1110 - * the amount of released multi node lists is negligible in comparison
1111 - * to single node releases.
1112 - * - the major performance bottle neck, namely g_private_get() or
1113 - * g_mutex_lock()/g_mutex_unlock() has already been moved out of the
1114 - * inner loop for freeing chained slices.
1115 - */
1116 - gsize chunk_size = P2ALIGN (mem_size);
1117 - guint acat = allocator_categorize (chunk_size);
1118 - if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
1120 - ThreadMemory *tmem = thread_memory_from_self();
1121 - guint ix = SLAB_INDEX (allocator, chunk_size);
1122 - while (slice)
1124 - guint8 *current = slice;
1125 - slice = *(gpointer*) (current + next_offset);
1126 - if (G_UNLIKELY (allocator->config.debug_blocks) &&
1127 - !smc_notify_free (current, mem_size))
1128 - abort();
1129 - if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1131 - thread_memory_swap_magazines (tmem, ix);
1132 - if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1133 - thread_memory_magazine2_unload (tmem, ix);
1135 - if (G_UNLIKELY (g_mem_gc_friendly))
1136 - memset (current, 0, chunk_size);
1137 - thread_memory_magazine2_free (tmem, ix, current);
1140 - else if (acat == 2) /* allocate through slab allocator */
1142 - g_mutex_lock (&allocator->slab_mutex);
1143 - while (slice)
1145 - guint8 *current = slice;
1146 - slice = *(gpointer*) (current + next_offset);
1147 - if (G_UNLIKELY (allocator->config.debug_blocks) &&
1148 - !smc_notify_free (current, mem_size))
1149 - abort();
1150 - if (G_UNLIKELY (g_mem_gc_friendly))
1151 - memset (current, 0, chunk_size);
1152 - slab_allocator_free_chunk (chunk_size, current);
1154 - g_mutex_unlock (&allocator->slab_mutex);
1156 - else /* delegate to system malloc */
1157 - while (slice)
1159 - guint8 *current = slice;
1160 - slice = *(gpointer*) (current + next_offset);
1161 - if (G_UNLIKELY (allocator->config.debug_blocks) &&
1162 - !smc_notify_free (current, mem_size))
1163 - abort();
1164 - if (G_UNLIKELY (g_mem_gc_friendly))
1165 - memset (current, 0, mem_size);
1166 - g_free (current);
1170 -/* --- single page allocator --- */
1171 -static void
1172 -allocator_slab_stack_push (Allocator *local_allocator,
1173 - guint ix,
1174 - SlabInfo *sinfo)
1176 - /* insert slab at slab ring head */
1177 - if (!local_allocator->slab_stack[ix])
1179 - sinfo->next = sinfo;
1180 - sinfo->prev = sinfo;
1182 - else
1184 - SlabInfo *next = local_allocator->slab_stack[ix], *prev = next->prev;
1185 - next->prev = sinfo;
1186 - prev->next = sinfo;
1187 - sinfo->next = next;
1188 - sinfo->prev = prev;
1190 - local_allocator->slab_stack[ix] = sinfo;
1193 -static gsize
1194 -allocator_aligned_page_size (Allocator *local_allocator,
1195 - gsize n_bytes)
1197 - gsize val = (gsize) 1 << g_bit_storage (n_bytes - 1);
1198 - val = MAX (val, local_allocator->min_page_size);
1199 - return val;
1202 -static void
1203 -allocator_add_slab (Allocator *local_allocator,
1204 - guint ix,
1205 - gsize chunk_size)
1207 - ChunkLink *chunk;
1208 - SlabInfo *sinfo;
1209 - gsize addr, padding, n_chunks, color = 0;
1210 - gsize page_size;
1211 - int errsv;
1212 - gpointer aligned_memory;
1213 - guint8 *mem;
1214 - guint i;
1216 - page_size = allocator_aligned_page_size (local_allocator, SLAB_BPAGE_SIZE (local_allocator, chunk_size));
1217 - /* allocate 1 page for the chunks and the slab */
1218 - aligned_memory = allocator_memalign (page_size, page_size - NATIVE_MALLOC_PADDING);
1219 - errsv = errno;
1220 - mem = aligned_memory;
1222 - if (!mem)
1224 - const gchar *syserr = strerror (errsv);
1225 - mem_error ("failed to allocate %u bytes (alignment: %u): %s\n",
1226 - (guint) (page_size - NATIVE_MALLOC_PADDING), (guint) page_size, syserr);
1228 - /* mask page address */
1229 - addr = ((gsize) mem / page_size) * page_size;
1230 - /* assert alignment */
1231 - mem_assert (aligned_memory == (gpointer) addr);
1232 - /* basic slab info setup */
1233 - sinfo = (SlabInfo*) (mem + page_size - SLAB_INFO_SIZE);
1234 - sinfo->n_allocated = 0;
1235 - sinfo->chunks = NULL;
1236 - /* figure cache colorization */
1237 - n_chunks = ((guint8*) sinfo - mem) / chunk_size;
1238 - padding = ((guint8*) sinfo - mem) - n_chunks * chunk_size;
1239 - if (padding)
1241 - color = (local_allocator->color_accu * P2ALIGNMENT) % padding;
1242 - local_allocator->color_accu += local_allocator->config.color_increment;
1244 - /* add chunks to free list */
1245 - chunk = (ChunkLink*) (mem + color);
1246 - sinfo->chunks = chunk;
1247 - for (i = 0; i < n_chunks - 1; i++)
1249 - chunk->next = (ChunkLink*) ((guint8*) chunk + chunk_size);
1250 - chunk = chunk->next;
1252 - chunk->next = NULL; /* last chunk */
1253 - /* add slab to slab ring */
1254 - allocator_slab_stack_push (local_allocator, ix, sinfo);
1257 -static gpointer
1258 -slab_allocator_alloc_chunk (gsize chunk_size)
1260 - ChunkLink *chunk;
1261 - guint ix = SLAB_INDEX (allocator, chunk_size);
1262 - /* ensure non-empty slab */
1263 - if (!allocator->slab_stack[ix] || !allocator->slab_stack[ix]->chunks)
1264 - allocator_add_slab (allocator, ix, chunk_size);
1265 - /* allocate chunk */
1266 - chunk = allocator->slab_stack[ix]->chunks;
1267 - allocator->slab_stack[ix]->chunks = chunk->next;
1268 - allocator->slab_stack[ix]->n_allocated++;
1269 - /* rotate empty slabs */
1270 - if (!allocator->slab_stack[ix]->chunks)
1271 - allocator->slab_stack[ix] = allocator->slab_stack[ix]->next;
1272 - return chunk;
1275 -static void
1276 -slab_allocator_free_chunk (gsize chunk_size,
1277 - gpointer mem)
1279 - ChunkLink *chunk;
1280 - gboolean was_empty;
1281 - guint ix = SLAB_INDEX (allocator, chunk_size);
1282 - gsize page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
1283 - gsize addr = ((gsize) mem / page_size) * page_size;
1284 - /* mask page address */
1285 - guint8 *page = (guint8*) addr;
1286 - SlabInfo *sinfo = (SlabInfo*) (page + page_size - SLAB_INFO_SIZE);
1287 - /* assert valid chunk count */
1288 - mem_assert (sinfo->n_allocated > 0);
1289 - /* add chunk to free list */
1290 - was_empty = sinfo->chunks == NULL;
1291 - chunk = (ChunkLink*) mem;
1292 - chunk->next = sinfo->chunks;
1293 - sinfo->chunks = chunk;
1294 - sinfo->n_allocated--;
1295 - /* keep slab ring partially sorted, empty slabs at end */
1296 - if (was_empty)
1298 - /* unlink slab */
1299 - SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1300 - next->prev = prev;
1301 - prev->next = next;
1302 - if (allocator->slab_stack[ix] == sinfo)
1303 - allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1304 - /* insert slab at head */
1305 - allocator_slab_stack_push (allocator, ix, sinfo);
1307 - /* eagerly free complete unused slabs */
1308 - if (!sinfo->n_allocated)
1310 - /* unlink slab */
1311 - SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1312 - next->prev = prev;
1313 - prev->next = next;
1314 - if (allocator->slab_stack[ix] == sinfo)
1315 - allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1316 - /* free slab */
1317 - allocator_memfree (page_size, page);
1321 -/* --- memalign implementation --- */
1322 -#ifdef HAVE_MALLOC_H
1323 -#include <malloc.h> /* memalign() */
1324 -#endif
1326 -/* from config.h:
1327 - * define HAVE_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works, <stdlib.h>
1328 - * define HAVE_MEMALIGN 1 // if free(memalign(3)) works, <malloc.h>
1329 - * define HAVE_VALLOC 1 // if free(valloc(3)) works, <stdlib.h> or <malloc.h>
1330 - * if none is provided, we implement malloc(3)-based alloc-only page alignment
1331 - */
1333 -#if !(HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC)
1334 -G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1335 -static GTrashStack *compat_valloc_trash = NULL;
1336 -G_GNUC_END_IGNORE_DEPRECATIONS
1337 -#endif
1339 -static gpointer
1340 -allocator_memalign (gsize alignment,
1341 - gsize memsize)
1343 - gpointer aligned_memory = NULL;
1344 - gint err = ENOMEM;
1345 -#if HAVE_POSIX_MEMALIGN
1346 - err = posix_memalign (&aligned_memory, alignment, memsize);
1347 -#elif HAVE_MEMALIGN
1348 - errno = 0;
1349 - aligned_memory = memalign (alignment, memsize);
1350 - err = errno;
1351 -#elif HAVE_VALLOC
1352 - errno = 0;
1353 - aligned_memory = valloc (memsize);
1354 - err = errno;
1355 -#else
1356 - /* simplistic non-freeing page allocator */
1357 - mem_assert (alignment == sys_page_size);
1358 - mem_assert (memsize <= sys_page_size);
1359 - if (!compat_valloc_trash)
1361 - const guint n_pages = 16;
1362 - guint8 *mem = malloc (n_pages * sys_page_size);
1363 - err = errno;
1364 - if (mem)
1366 - gint i = n_pages;
1367 - guint8 *amem = (guint8*) ALIGN ((gsize) mem, sys_page_size);
1368 - if (amem != mem)
1369 - i--; /* mem wasn't page aligned */
1370 - G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1371 - while (--i >= 0)
1372 - g_trash_stack_push (&compat_valloc_trash, amem + i * sys_page_size);
1373 - G_GNUC_END_IGNORE_DEPRECATIONS
1376 - G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1377 - aligned_memory = g_trash_stack_pop (&compat_valloc_trash);
1378 - G_GNUC_END_IGNORE_DEPRECATIONS
1379 -#endif
1380 - if (!aligned_memory)
1381 - errno = err;
1382 - return aligned_memory;
1385 -static void
1386 -allocator_memfree (gsize memsize,
1387 - gpointer mem)
1389 -#if HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC
1390 - free (mem);
1391 -#else
1392 - mem_assert (memsize <= sys_page_size);
1393 - G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1394 - g_trash_stack_push (&compat_valloc_trash, mem);
1395 - G_GNUC_END_IGNORE_DEPRECATIONS
1396 -#endif
1399 -static void
1400 -mem_error (const char *format,
1401 - ...)
1403 - const char *pname;
1404 - va_list args;
1405 - /* at least, put out "MEMORY-ERROR", in case we segfault during the rest of the function */
1406 - fputs ("\n***MEMORY-ERROR***: ", stderr);
1407 - pname = g_get_prgname();
1408 - g_fprintf (stderr, "%s[%ld]: GSlice: ", pname ? pname : "", (long)getpid());
1409 - va_start (args, format);
1410 - g_vfprintf (stderr, format, args);
1411 - va_end (args);
1412 - fputs ("\n", stderr);
1413 - abort();
1414 - _exit (1);
1417 -/* --- g-slice memory checker tree --- */
1418 -typedef size_t SmcKType; /* key type */
1419 -typedef size_t SmcVType; /* value type */
1420 -typedef struct {
1421 - SmcKType key;
1422 - SmcVType value;
1423 -} SmcEntry;
1424 -static void smc_tree_insert (SmcKType key,
1425 - SmcVType value);
1426 -static gboolean smc_tree_lookup (SmcKType key,
1427 - SmcVType *value_p);
1428 -static gboolean smc_tree_remove (SmcKType key);
1431 -/* --- g-slice memory checker implementation --- */
1432 -static void
1433 -smc_notify_alloc (void *pointer,
1434 - size_t size)
1436 - size_t address = (size_t) pointer;
1437 - if (pointer)
1438 - smc_tree_insert (address, size);
1441 -#if 0
1442 -static void
1443 -smc_notify_ignore (void *pointer)
1445 - size_t address = (size_t) pointer;
1446 - if (pointer)
1447 - smc_tree_remove (address);
1449 -#endif
1451 -static int
1452 -smc_notify_free (void *pointer,
1453 - size_t size)
1455 - size_t address = (size_t) pointer;
1456 - SmcVType real_size;
1457 - gboolean found_one;
1459 - if (!pointer)
1460 - return 1; /* ignore */
1461 - found_one = smc_tree_lookup (address, &real_size);
1462 - if (!found_one)
1464 - g_fprintf (stderr, "GSlice: MemChecker: attempt to release non-allocated block: %p size=%" G_GSIZE_FORMAT "\n", pointer, size);
1465 - return 0;
1467 - if (real_size != size && (real_size || size))
1469 - g_fprintf (stderr, "GSlice: MemChecker: attempt to release block with invalid size: %p size=%" G_GSIZE_FORMAT " invalid-size=%" G_GSIZE_FORMAT "\n", pointer, real_size, size);
1470 - return 0;
1472 - if (!smc_tree_remove (address))
1474 - g_fprintf (stderr, "GSlice: MemChecker: attempt to release non-allocated block: %p size=%" G_GSIZE_FORMAT "\n", pointer, size);
1475 - return 0;
1477 - return 1; /* all fine */
1480 -/* --- g-slice memory checker tree implementation --- */
1481 -#define SMC_TRUNK_COUNT (4093 /* 16381 */) /* prime, to distribute trunk collisions (big, allocated just once) */
1482 -#define SMC_BRANCH_COUNT (511) /* prime, to distribute branch collisions */
1483 -#define SMC_TRUNK_EXTENT (SMC_BRANCH_COUNT * 2039) /* key address space per trunk, should distribute uniformly across BRANCH_COUNT */
1484 -#define SMC_TRUNK_HASH(k) ((k / SMC_TRUNK_EXTENT) % SMC_TRUNK_COUNT) /* generate new trunk hash per megabyte (roughly) */
1485 -#define SMC_BRANCH_HASH(k) (k % SMC_BRANCH_COUNT)
1487 -typedef struct {
1488 - SmcEntry *entries;
1489 - unsigned int n_entries;
1490 -} SmcBranch;
1492 -static SmcBranch **smc_tree_root = NULL;
1494 -static void
1495 -smc_tree_abort (int errval)
1497 - const char *syserr = strerror (errval);
1498 - mem_error ("MemChecker: failure in debugging tree: %s", syserr);
1501 -static inline SmcEntry*
1502 -smc_tree_branch_grow_L (SmcBranch *branch,
1503 - unsigned int index)
1505 - unsigned int old_size = branch->n_entries * sizeof (branch->entries[0]);
1506 - unsigned int new_size = old_size + sizeof (branch->entries[0]);
1507 - SmcEntry *entry;
1508 - mem_assert (index <= branch->n_entries);
1509 - branch->entries = (SmcEntry*) realloc (branch->entries, new_size);
1510 - if (!branch->entries)
1511 - smc_tree_abort (errno);
1512 - entry = branch->entries + index;
1513 - memmove (entry + 1, entry, (branch->n_entries - index) * sizeof (entry[0]));
1514 - branch->n_entries += 1;
1515 - return entry;
1518 -static inline SmcEntry*
1519 -smc_tree_branch_lookup_nearest_L (SmcBranch *branch,
1520 - SmcKType key)
1522 - unsigned int n_nodes = branch->n_entries, offs = 0;
1523 - SmcEntry *check = branch->entries;
1524 - int cmp = 0;
1525 - while (offs < n_nodes)
1527 - unsigned int i = (offs + n_nodes) >> 1;
1528 - check = branch->entries + i;
1529 - cmp = key < check->key ? -1 : key != check->key;
1530 - if (cmp == 0)
1531 - return check; /* return exact match */
1532 - else if (cmp < 0)
1533 - n_nodes = i;
1534 - else /* (cmp > 0) */
1535 - offs = i + 1;
1537 - /* check points at last mismatch, cmp > 0 indicates greater key */
1538 - return cmp > 0 ? check + 1 : check; /* return insertion position for inexact match */
1541 -static void
1542 -smc_tree_insert (SmcKType key,
1543 - SmcVType value)
1545 - unsigned int ix0, ix1;
1546 - SmcEntry *entry;
1548 - g_mutex_lock (&smc_tree_mutex);
1549 - ix0 = SMC_TRUNK_HASH (key);
1550 - ix1 = SMC_BRANCH_HASH (key);
1551 - if (!smc_tree_root)
1553 - smc_tree_root = calloc (SMC_TRUNK_COUNT, sizeof (smc_tree_root[0]));
1554 - if (!smc_tree_root)
1555 - smc_tree_abort (errno);
1557 - if (!smc_tree_root[ix0])
1558 + while (slice)
1560 - smc_tree_root[ix0] = calloc (SMC_BRANCH_COUNT, sizeof (smc_tree_root[0][0]));
1561 - if (!smc_tree_root[ix0])
1562 - smc_tree_abort (errno);
1564 - entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1565 - if (!entry || /* need create */
1566 - entry >= smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries || /* need append */
1567 - entry->key != key) /* need insert */
1568 - entry = smc_tree_branch_grow_L (&smc_tree_root[ix0][ix1], entry - smc_tree_root[ix0][ix1].entries);
1569 - entry->key = key;
1570 - entry->value = value;
1571 - g_mutex_unlock (&smc_tree_mutex);
1574 -static gboolean
1575 -smc_tree_lookup (SmcKType key,
1576 - SmcVType *value_p)
1578 - SmcEntry *entry = NULL;
1579 - unsigned int ix0 = SMC_TRUNK_HASH (key), ix1 = SMC_BRANCH_HASH (key);
1580 - gboolean found_one = FALSE;
1581 - *value_p = 0;
1582 - g_mutex_lock (&smc_tree_mutex);
1583 - if (smc_tree_root && smc_tree_root[ix0])
1585 - entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1586 - if (entry &&
1587 - entry < smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries &&
1588 - entry->key == key)
1590 - found_one = TRUE;
1591 - *value_p = entry->value;
1594 - g_mutex_unlock (&smc_tree_mutex);
1595 - return found_one;
1598 -static gboolean
1599 -smc_tree_remove (SmcKType key)
1601 - unsigned int ix0 = SMC_TRUNK_HASH (key), ix1 = SMC_BRANCH_HASH (key);
1602 - gboolean found_one = FALSE;
1603 - g_mutex_lock (&smc_tree_mutex);
1604 - if (smc_tree_root && smc_tree_root[ix0])
1606 - SmcEntry *entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1607 - if (entry &&
1608 - entry < smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries &&
1609 - entry->key == key)
1611 - unsigned int i = entry - smc_tree_root[ix0][ix1].entries;
1612 - smc_tree_root[ix0][ix1].n_entries -= 1;
1613 - memmove (entry, entry + 1, (smc_tree_root[ix0][ix1].n_entries - i) * sizeof (entry[0]));
1614 - if (!smc_tree_root[ix0][ix1].n_entries)
1616 - /* avoid useless pressure on the memory system */
1617 - free (smc_tree_root[ix0][ix1].entries);
1618 - smc_tree_root[ix0][ix1].entries = NULL;
1620 - found_one = TRUE;
1623 - g_mutex_unlock (&smc_tree_mutex);
1624 - return found_one;
1627 -#ifdef G_ENABLE_DEBUG
1628 -void
1629 -g_slice_debug_tree_statistics (void)
1631 - g_mutex_lock (&smc_tree_mutex);
1632 - if (smc_tree_root)
1634 - unsigned int i, j, t = 0, o = 0, b = 0, su = 0, ex = 0, en = 4294967295u;
1635 - double tf, bf;
1636 - for (i = 0; i < SMC_TRUNK_COUNT; i++)
1637 - if (smc_tree_root[i])
1639 - t++;
1640 - for (j = 0; j < SMC_BRANCH_COUNT; j++)
1641 - if (smc_tree_root[i][j].n_entries)
1643 - b++;
1644 - su += smc_tree_root[i][j].n_entries;
1645 - en = MIN (en, smc_tree_root[i][j].n_entries);
1646 - ex = MAX (ex, smc_tree_root[i][j].n_entries);
1648 - else if (smc_tree_root[i][j].entries)
1649 - o++; /* formerly used, now empty */
1651 - en = b ? en : 0;
1652 - tf = MAX (t, 1.0); /* max(1) to be a valid divisor */
1653 - bf = MAX (b, 1.0); /* max(1) to be a valid divisor */
1654 - g_fprintf (stderr, "GSlice: MemChecker: %u trunks, %u branches, %u old branches\n", t, b, o);
1655 - g_fprintf (stderr, "GSlice: MemChecker: %f branches per trunk, %.2f%% utilization\n",
1656 - b / tf,
1657 - 100.0 - (SMC_BRANCH_COUNT - b / tf) / (0.01 * SMC_BRANCH_COUNT));
1658 - g_fprintf (stderr, "GSlice: MemChecker: %f entries per branch, %u minimum, %u maximum\n",
1659 - su / bf, en, ex);
1660 + guint8 *current = slice;
1661 + slice = *(gpointer*) (current + next_offset);
1662 + if (G_UNLIKELY (g_mem_gc_friendly))
1663 + memset (current, 0, mem_size);
1664 + g_free (current);
1666 - else
1667 - g_fprintf (stderr, "GSlice: MemChecker: root=NULL\n");
1668 - g_mutex_unlock (&smc_tree_mutex);
1670 - /* sample statistics (beast + GSLice + 24h scripted core & GUI activity):
1671 - * PID %CPU %MEM VSZ RSS COMMAND
1672 - * 8887 30.3 45.8 456068 414856 beast-0.7.1 empty.bse
1673 - * $ cat /proc/8887/statm # total-program-size resident-set-size shared-pages text/code data/stack library dirty-pages
1674 - * 114017 103714 2354 344 0 108676 0
1675 - * $ cat /proc/8887/status
1676 - * Name: beast-0.7.1
1677 - * VmSize: 456068 kB
1678 - * VmLck: 0 kB
1679 - * VmRSS: 414856 kB
1680 - * VmData: 434620 kB
1681 - * VmStk: 84 kB
1682 - * VmExe: 1376 kB
1683 - * VmLib: 13036 kB
1684 - * VmPTE: 456 kB
1685 - * Threads: 3
1686 - * (gdb) print g_slice_debug_tree_statistics ()
1687 - * GSlice: MemChecker: 422 trunks, 213068 branches, 0 old branches
1688 - * GSlice: MemChecker: 504.900474 branches per trunk, 98.81% utilization
1689 - * GSlice: MemChecker: 4.965039 entries per branch, 1 minimum, 37 maximum
1690 - */
1692 -#endif /* G_ENABLE_DEBUG */
1694 2.38.1