auth:creds: Introduce CRED_SMB_CONF
[Samba.git] / lib / talloc / talloc.c
blobe476f3e2d052ad4ff8f48d3f59861ed4e2ed85fd
1 /*
2 Samba Unix SMB/CIFS implementation.
4 Samba trivial allocation library - new interface
6 NOTE: Please read talloc_guide.txt for full documentation
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
13 ** under the LGPL
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 inspired by http://swapped.cc/halloc/
33 #include "replace.h"
34 #include "talloc.h"
36 #ifdef HAVE_SYS_AUXV_H
37 #include <sys/auxv.h>
38 #endif
40 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
41 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
42 #endif
44 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
45 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
46 #endif
48 /* Special macros that are no-ops except when run under Valgrind on
49 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
50 #ifdef HAVE_VALGRIND_MEMCHECK_H
51 /* memcheck.h includes valgrind.h */
52 #include <valgrind/memcheck.h>
53 #elif defined(HAVE_VALGRIND_H)
54 #include <valgrind.h>
55 #endif
57 /* use this to force every realloc to change the pointer, to stress test
58 code that might not cope */
59 #define ALWAYS_REALLOC 0
62 #define MAX_TALLOC_SIZE 0x10000000
64 #define TALLOC_FLAG_FREE 0x01
65 #define TALLOC_FLAG_LOOP 0x02
66 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
67 #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
70 * Bits above this are random, used to make it harder to fake talloc
71 * headers during an attack. Try not to change this without good reason.
73 #define TALLOC_FLAG_MASK 0x0F
75 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
77 #define TALLOC_MAGIC_BASE 0xe814ec70
78 #define TALLOC_MAGIC_NON_RANDOM ( \
79 ~TALLOC_FLAG_MASK & ( \
80 TALLOC_MAGIC_BASE + \
81 (TALLOC_BUILD_VERSION_MAJOR << 24) + \
82 (TALLOC_BUILD_VERSION_MINOR << 16) + \
83 (TALLOC_BUILD_VERSION_RELEASE << 8)))
84 static unsigned int talloc_magic = TALLOC_MAGIC_NON_RANDOM;
86 /* by default we abort when given a bad pointer (such as when talloc_free() is called
87 on a pointer that came from malloc() */
88 #ifndef TALLOC_ABORT
89 #define TALLOC_ABORT(reason) abort()
90 #endif
92 #ifndef discard_const_p
93 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
94 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
95 #else
96 # define discard_const_p(type, ptr) ((type *)(ptr))
97 #endif
98 #endif
100 /* these macros gain us a few percent of speed on gcc */
101 #if (__GNUC__ >= 3)
102 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
103 as its first argument */
104 #ifndef likely
105 #define likely(x) __builtin_expect(!!(x), 1)
106 #endif
107 #ifndef unlikely
108 #define unlikely(x) __builtin_expect(!!(x), 0)
109 #endif
110 #else
111 #ifndef likely
112 #define likely(x) (x)
113 #endif
114 #ifndef unlikely
115 #define unlikely(x) (x)
116 #endif
117 #endif
119 /* this null_context is only used if talloc_enable_leak_report() or
120 talloc_enable_leak_report_full() is called, otherwise it remains
121 NULL
123 static void *null_context;
124 static bool talloc_report_null;
125 static bool talloc_report_null_full;
126 static void *autofree_context;
128 static void talloc_setup_atexit(void);
130 /* used to enable fill of memory on free, which can be useful for
131 * catching use after free errors when valgrind is too slow
133 static struct {
134 bool initialised;
135 bool enabled;
136 uint8_t fill_value;
137 } talloc_fill;
139 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
142 * do not wipe the header, to allow the
143 * double-free logic to still work
145 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
146 if (unlikely(talloc_fill.enabled)) { \
147 size_t _flen = (_tc)->size; \
148 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
149 memset(_fptr, talloc_fill.fill_value, _flen); \
151 } while (0)
153 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
154 /* Mark the whole chunk as not accessable */
155 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
156 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
157 char *_fptr = (char *)(_tc); \
158 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
159 } while(0)
160 #else
161 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
162 #endif
164 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
165 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
166 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
167 } while (0)
169 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
170 if (unlikely(talloc_fill.enabled)) { \
171 size_t _flen = (_tc)->size - (_new_size); \
172 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
173 _fptr += (_new_size); \
174 memset(_fptr, talloc_fill.fill_value, _flen); \
176 } while (0)
178 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
179 /* Mark the unused bytes not accessable */
180 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
181 size_t _flen = (_tc)->size - (_new_size); \
182 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
183 _fptr += (_new_size); \
184 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
185 } while (0)
186 #else
187 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
188 #endif
190 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
191 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
192 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
193 } while (0)
195 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
196 if (unlikely(talloc_fill.enabled)) { \
197 size_t _flen = (_tc)->size - (_new_size); \
198 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
199 _fptr += (_new_size); \
200 memset(_fptr, talloc_fill.fill_value, _flen); \
202 } while (0)
204 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
205 /* Mark the unused bytes as undefined */
206 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
207 size_t _flen = (_tc)->size - (_new_size); \
208 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
209 _fptr += (_new_size); \
210 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
211 } while (0)
212 #else
213 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
214 #endif
216 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
217 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
218 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
219 } while (0)
221 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
222 /* Mark the new bytes as undefined */
223 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
224 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
225 size_t _new_used = TC_HDR_SIZE + (_new_size); \
226 size_t _flen = _new_used - _old_used; \
227 char *_fptr = _old_used + (char *)(_tc); \
228 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
229 } while (0)
230 #else
231 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
232 #endif
234 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
235 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
236 } while (0)
238 struct talloc_reference_handle {
239 struct talloc_reference_handle *next, *prev;
240 void *ptr;
241 const char *location;
244 struct talloc_memlimit {
245 struct talloc_chunk *parent;
246 struct talloc_memlimit *upper;
247 size_t max_size;
248 size_t cur_size;
251 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
252 static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
253 size_t size);
254 static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
255 size_t size);
256 static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
258 static inline void _tc_set_name_const(struct talloc_chunk *tc,
259 const char *name);
260 static struct talloc_chunk *_vasprintf_tc(const void *t,
261 const char *fmt,
262 va_list ap);
264 typedef int (*talloc_destructor_t)(void *);
266 struct talloc_pool_hdr;
268 struct talloc_chunk {
270 * flags includes the talloc magic, which is randomised to
271 * make overwrite attacks harder
273 unsigned flags;
276 * If you have a logical tree like:
278 * <parent>
279 * / | \
280 * / | \
281 * / | \
282 * <child 1> <child 2> <child 3>
284 * The actual talloc tree is:
286 * <parent>
288 * <child 1> - <child 2> - <child 3>
290 * The children are linked with next/prev pointers, and
291 * child 1 is linked to the parent with parent/child
292 * pointers.
295 struct talloc_chunk *next, *prev;
296 struct talloc_chunk *parent, *child;
297 struct talloc_reference_handle *refs;
298 talloc_destructor_t destructor;
299 const char *name;
300 size_t size;
303 * limit semantics:
304 * if 'limit' is set it means all *new* children of the context will
305 * be limited to a total aggregate size ox max_size for memory
306 * allocations.
307 * cur_size is used to keep track of the current use
309 struct talloc_memlimit *limit;
312 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
313 * is a pointer to the struct talloc_chunk of the pool that it was
314 * allocated from. This way children can quickly find the pool to chew
315 * from.
317 struct talloc_pool_hdr *pool;
320 union talloc_chunk_cast_u {
321 uint8_t *ptr;
322 struct talloc_chunk *chunk;
325 /* 16 byte alignment seems to keep everyone happy */
326 #define TC_ALIGN16(s) (((s)+15)&~15)
327 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
328 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
330 _PUBLIC_ int talloc_version_major(void)
332 return TALLOC_VERSION_MAJOR;
335 _PUBLIC_ int talloc_version_minor(void)
337 return TALLOC_VERSION_MINOR;
340 _PUBLIC_ int talloc_test_get_magic(void)
342 return talloc_magic;
345 static inline void _talloc_chunk_set_free(struct talloc_chunk *tc,
346 const char *location)
349 * Mark this memory as free, and also over-stamp the talloc
350 * magic with the old-style magic.
352 * Why? This tries to avoid a memory read use-after-free from
353 * disclosing our talloc magic, which would then allow an
354 * attacker to prepare a valid header and so run a destructor.
357 tc->flags = TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE
358 | (tc->flags & TALLOC_FLAG_MASK);
360 /* we mark the freed memory with where we called the free
361 * from. This means on a double free error we can report where
362 * the first free came from
364 if (location) {
365 tc->name = location;
369 static inline void _talloc_chunk_set_not_free(struct talloc_chunk *tc)
372 * Mark this memory as not free.
374 * Why? This is memory either in a pool (and so available for
375 * talloc's re-use or after the realloc(). We need to mark
376 * the memory as free() before any realloc() call as we can't
377 * write to the memory after that.
379 * We put back the normal magic instead of the 'not random'
380 * magic.
383 tc->flags = talloc_magic |
384 ((tc->flags & TALLOC_FLAG_MASK) & ~TALLOC_FLAG_FREE);
387 static void (*talloc_log_fn)(const char *message);
389 _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
391 talloc_log_fn = log_fn;
394 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
395 void talloc_lib_init(void) __attribute__((constructor));
396 void talloc_lib_init(void)
398 uint32_t random_value;
399 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
400 uint8_t *p;
402 * Use the kernel-provided random values used for
403 * ASLR. This won't change per-exec, which is ideal for us
405 p = (uint8_t *) getauxval(AT_RANDOM);
406 if (p) {
408 * We get 16 bytes from getauxval. By calling rand(),
409 * a totally insecure PRNG, but one that will
410 * deterministically have a different value when called
411 * twice, we ensure that if two talloc-like libraries
412 * are somehow loaded in the same address space, that
413 * because we choose different bytes, we will keep the
414 * protection against collision of multiple talloc
415 * libs.
417 * This protection is important because the effects of
418 * passing a talloc pointer from one to the other may
419 * be very hard to determine.
421 int offset = rand() % (16 - sizeof(random_value));
422 memcpy(&random_value, p + offset, sizeof(random_value));
423 } else
424 #endif
427 * Otherwise, hope the location we are loaded in
428 * memory is randomised by someone else
430 random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
432 talloc_magic = random_value & ~TALLOC_FLAG_MASK;
434 #else
435 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
436 #endif
438 static void talloc_lib_atexit(void)
440 TALLOC_FREE(autofree_context);
442 if (talloc_total_size(null_context) == 0) {
443 return;
446 if (talloc_report_null_full) {
447 talloc_report_full(null_context, stderr);
448 } else if (talloc_report_null) {
449 talloc_report(null_context, stderr);
453 static void talloc_setup_atexit(void)
455 static bool done;
457 if (done) {
458 return;
461 atexit(talloc_lib_atexit);
462 done = true;
465 static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
466 static void talloc_log(const char *fmt, ...)
468 va_list ap;
469 char *message;
471 if (!talloc_log_fn) {
472 return;
475 va_start(ap, fmt);
476 message = talloc_vasprintf(NULL, fmt, ap);
477 va_end(ap);
479 talloc_log_fn(message);
480 talloc_free(message);
483 static void talloc_log_stderr(const char *message)
485 fprintf(stderr, "%s", message);
488 _PUBLIC_ void talloc_set_log_stderr(void)
490 talloc_set_log_fn(talloc_log_stderr);
493 static void (*talloc_abort_fn)(const char *reason);
495 _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
497 talloc_abort_fn = abort_fn;
500 static void talloc_abort(const char *reason)
502 talloc_log("%s\n", reason);
504 if (!talloc_abort_fn) {
505 TALLOC_ABORT(reason);
508 talloc_abort_fn(reason);
511 static void talloc_abort_access_after_free(void)
513 talloc_abort("Bad talloc magic value - access after free");
516 static void talloc_abort_unknown_value(void)
518 talloc_abort("Bad talloc magic value - unknown value");
521 /* panic if we get a bad magic value */
522 static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
524 const char *pp = (const char *)ptr;
525 struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
526 if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
527 if ((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK))
528 == (TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE)) {
529 talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
530 talloc_abort_access_after_free();
531 return NULL;
534 talloc_abort_unknown_value();
535 return NULL;
537 return tc;
540 /* hook into the front of the list */
541 #define _TLIST_ADD(list, p) \
542 do { \
543 if (!(list)) { \
544 (list) = (p); \
545 (p)->next = (p)->prev = NULL; \
546 } else { \
547 (list)->prev = (p); \
548 (p)->next = (list); \
549 (p)->prev = NULL; \
550 (list) = (p); \
552 } while (0)
554 /* remove an element from a list - element doesn't have to be in list. */
555 #define _TLIST_REMOVE(list, p) \
556 do { \
557 if ((p) == (list)) { \
558 (list) = (p)->next; \
559 if (list) (list)->prev = NULL; \
560 } else { \
561 if ((p)->prev) (p)->prev->next = (p)->next; \
562 if ((p)->next) (p)->next->prev = (p)->prev; \
564 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
565 } while (0)
569 return the parent chunk of a pointer
571 static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
573 struct talloc_chunk *tc;
575 if (unlikely(ptr == NULL)) {
576 return NULL;
579 tc = talloc_chunk_from_ptr(ptr);
580 while (tc->prev) tc=tc->prev;
582 return tc->parent;
585 _PUBLIC_ void *talloc_parent(const void *ptr)
587 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
588 return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
592 find parents name
594 _PUBLIC_ const char *talloc_parent_name(const void *ptr)
596 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
597 return tc? tc->name : NULL;
601 A pool carries an in-pool object count count in the first 16 bytes.
602 bytes. This is done to support talloc_steal() to a parent outside of the
603 pool. The count includes the pool itself, so a talloc_free() on a pool will
604 only destroy the pool if the count has dropped to zero. A talloc_free() of a
605 pool member will reduce the count, and eventually also call free(3) on the
606 pool memory.
608 The object count is not put into "struct talloc_chunk" because it is only
609 relevant for talloc pools and the alignment to 16 bytes would increase the
610 memory footprint of each talloc chunk by those 16 bytes.
613 struct talloc_pool_hdr {
614 void *end;
615 unsigned int object_count;
616 size_t poolsize;
619 union talloc_pool_hdr_cast_u {
620 uint8_t *ptr;
621 struct talloc_pool_hdr *hdr;
624 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
626 static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
628 union talloc_chunk_cast_u tcc = { .chunk = c };
629 union talloc_pool_hdr_cast_u tphc = { tcc.ptr - TP_HDR_SIZE };
630 return tphc.hdr;
633 static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
635 union talloc_pool_hdr_cast_u tphc = { .hdr = h };
636 union talloc_chunk_cast_u tcc = { .ptr = tphc.ptr + TP_HDR_SIZE };
637 return tcc.chunk;
640 static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
642 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
643 return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
646 static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
648 return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
651 /* If tc is inside a pool, this gives the next neighbour. */
652 static inline void *tc_next_chunk(struct talloc_chunk *tc)
654 return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
657 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
659 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
660 return tc_next_chunk(tc);
663 /* Mark the whole remaining pool as not accessable */
664 static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
666 size_t flen = tc_pool_space_left(pool_hdr);
668 if (unlikely(talloc_fill.enabled)) {
669 memset(pool_hdr->end, talloc_fill.fill_value, flen);
672 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
673 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
674 #endif
678 Allocate from a pool
681 static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
682 size_t size, size_t prefix_len)
684 struct talloc_pool_hdr *pool_hdr = NULL;
685 union talloc_chunk_cast_u tcc;
686 size_t space_left;
687 struct talloc_chunk *result;
688 size_t chunk_size;
690 if (parent == NULL) {
691 return NULL;
694 if (parent->flags & TALLOC_FLAG_POOL) {
695 pool_hdr = talloc_pool_from_chunk(parent);
697 else if (parent->flags & TALLOC_FLAG_POOLMEM) {
698 pool_hdr = parent->pool;
701 if (pool_hdr == NULL) {
702 return NULL;
705 space_left = tc_pool_space_left(pool_hdr);
708 * Align size to 16 bytes
710 chunk_size = TC_ALIGN16(size + prefix_len);
712 if (space_left < chunk_size) {
713 return NULL;
716 tcc = (union talloc_chunk_cast_u) {
717 .ptr = ((uint8_t *)pool_hdr->end) + prefix_len
719 result = tcc.chunk;
721 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
722 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
723 #endif
725 pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
727 result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
728 result->pool = pool_hdr;
730 pool_hdr->object_count++;
732 return result;
736 Allocate a bit of memory as a child of an existing pointer
738 static inline void *__talloc_with_prefix(const void *context,
739 size_t size,
740 size_t prefix_len,
741 struct talloc_chunk **tc_ret)
743 struct talloc_chunk *tc = NULL;
744 struct talloc_memlimit *limit = NULL;
745 size_t total_len = TC_HDR_SIZE + size + prefix_len;
746 struct talloc_chunk *parent = NULL;
748 if (unlikely(context == NULL)) {
749 context = null_context;
752 if (unlikely(size >= MAX_TALLOC_SIZE)) {
753 return NULL;
756 if (unlikely(total_len < TC_HDR_SIZE)) {
757 return NULL;
760 if (likely(context != NULL)) {
761 parent = talloc_chunk_from_ptr(context);
763 if (parent->limit != NULL) {
764 limit = parent->limit;
767 tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
770 if (tc == NULL) {
771 uint8_t *ptr = NULL;
772 union talloc_chunk_cast_u tcc;
775 * Only do the memlimit check/update on actual allocation.
777 if (!talloc_memlimit_check(limit, total_len)) {
778 errno = ENOMEM;
779 return NULL;
782 ptr = malloc(total_len);
783 if (unlikely(ptr == NULL)) {
784 return NULL;
786 tcc = (union talloc_chunk_cast_u) { .ptr = ptr + prefix_len };
787 tc = tcc.chunk;
788 tc->flags = talloc_magic;
789 tc->pool = NULL;
791 talloc_memlimit_grow(limit, total_len);
794 tc->limit = limit;
795 tc->size = size;
796 tc->destructor = NULL;
797 tc->child = NULL;
798 tc->name = NULL;
799 tc->refs = NULL;
801 if (likely(context != NULL)) {
802 if (parent->child) {
803 parent->child->parent = NULL;
804 tc->next = parent->child;
805 tc->next->prev = tc;
806 } else {
807 tc->next = NULL;
809 tc->parent = parent;
810 tc->prev = NULL;
811 parent->child = tc;
812 } else {
813 tc->next = tc->prev = tc->parent = NULL;
816 *tc_ret = tc;
817 return TC_PTR_FROM_CHUNK(tc);
820 static inline void *__talloc(const void *context,
821 size_t size,
822 struct talloc_chunk **tc)
824 return __talloc_with_prefix(context, size, 0, tc);
828 * Create a talloc pool
831 static inline void *_talloc_pool(const void *context, size_t size)
833 struct talloc_chunk *tc;
834 struct talloc_pool_hdr *pool_hdr;
835 void *result;
837 result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
839 if (unlikely(result == NULL)) {
840 return NULL;
843 pool_hdr = talloc_pool_from_chunk(tc);
845 tc->flags |= TALLOC_FLAG_POOL;
846 tc->size = 0;
848 pool_hdr->object_count = 1;
849 pool_hdr->end = result;
850 pool_hdr->poolsize = size;
852 tc_invalidate_pool(pool_hdr);
854 return result;
857 _PUBLIC_ void *talloc_pool(const void *context, size_t size)
859 return _talloc_pool(context, size);
863 * Create a talloc pool correctly sized for a basic size plus
864 * a number of subobjects whose total size is given. Essentially
865 * a custom allocator for talloc to reduce fragmentation.
868 _PUBLIC_ void *_talloc_pooled_object(const void *ctx,
869 size_t type_size,
870 const char *type_name,
871 unsigned num_subobjects,
872 size_t total_subobjects_size)
874 size_t poolsize, subobjects_slack, tmp;
875 struct talloc_chunk *tc;
876 struct talloc_pool_hdr *pool_hdr;
877 void *ret;
879 poolsize = type_size + total_subobjects_size;
881 if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
882 goto overflow;
885 if (num_subobjects == UINT_MAX) {
886 goto overflow;
888 num_subobjects += 1; /* the object body itself */
891 * Alignment can increase the pool size by at most 15 bytes per object
892 * plus alignment for the object itself
894 subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
895 if (subobjects_slack < num_subobjects) {
896 goto overflow;
899 tmp = poolsize + subobjects_slack;
900 if ((tmp < poolsize) || (tmp < subobjects_slack)) {
901 goto overflow;
903 poolsize = tmp;
905 ret = _talloc_pool(ctx, poolsize);
906 if (ret == NULL) {
907 return NULL;
910 tc = talloc_chunk_from_ptr(ret);
911 tc->size = type_size;
913 pool_hdr = talloc_pool_from_chunk(tc);
915 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
916 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
917 #endif
919 pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
921 _tc_set_name_const(tc, type_name);
922 return ret;
924 overflow:
925 return NULL;
929 setup a destructor to be called on free of a pointer
930 the destructor should return 0 on success, or -1 on failure.
931 if the destructor fails then the free is failed, and the memory can
932 be continued to be used
934 _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
936 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
937 tc->destructor = destructor;
941 increase the reference count on a piece of memory.
943 _PUBLIC_ int talloc_increase_ref_count(const void *ptr)
945 if (unlikely(!talloc_reference(null_context, ptr))) {
946 return -1;
948 return 0;
952 helper for talloc_reference()
954 this is referenced by a function pointer and should not be inline
956 static int talloc_reference_destructor(struct talloc_reference_handle *handle)
958 struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
959 _TLIST_REMOVE(ptr_tc->refs, handle);
960 return 0;
964 more efficient way to add a name to a pointer - the name must point to a
965 true string constant
967 static inline void _tc_set_name_const(struct talloc_chunk *tc,
968 const char *name)
970 tc->name = name;
974 internal talloc_named_const()
976 static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
978 void *ptr;
979 struct talloc_chunk *tc;
981 ptr = __talloc(context, size, &tc);
982 if (unlikely(ptr == NULL)) {
983 return NULL;
986 _tc_set_name_const(tc, name);
988 return ptr;
992 make a secondary reference to a pointer, hanging off the given context.
993 the pointer remains valid until both the original caller and this given
994 context are freed.
996 the major use for this is when two different structures need to reference the
997 same underlying data, and you want to be able to free the two instances separately,
998 and in either order
1000 _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
1002 struct talloc_chunk *tc;
1003 struct talloc_reference_handle *handle;
1004 if (unlikely(ptr == NULL)) return NULL;
1006 tc = talloc_chunk_from_ptr(ptr);
1007 handle = (struct talloc_reference_handle *)_talloc_named_const(context,
1008 sizeof(struct talloc_reference_handle),
1009 TALLOC_MAGIC_REFERENCE);
1010 if (unlikely(handle == NULL)) return NULL;
1012 /* note that we hang the destructor off the handle, not the
1013 main context as that allows the caller to still setup their
1014 own destructor on the context if they want to */
1015 talloc_set_destructor(handle, talloc_reference_destructor);
1016 handle->ptr = discard_const_p(void, ptr);
1017 handle->location = location;
1018 _TLIST_ADD(tc->refs, handle);
1019 return handle->ptr;
1022 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
1024 static inline void _tc_free_poolmem(struct talloc_chunk *tc,
1025 const char *location)
1027 struct talloc_pool_hdr *pool;
1028 struct talloc_chunk *pool_tc;
1029 void *next_tc;
1031 pool = tc->pool;
1032 pool_tc = talloc_chunk_from_pool(pool);
1033 next_tc = tc_next_chunk(tc);
1035 _talloc_chunk_set_free(tc, location);
1037 TC_INVALIDATE_FULL_CHUNK(tc);
1039 if (unlikely(pool->object_count == 0)) {
1040 talloc_abort("Pool object count zero!");
1041 return;
1044 pool->object_count--;
1046 if (unlikely(pool->object_count == 1
1047 && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
1049 * if there is just one object left in the pool
1050 * and pool->flags does not have TALLOC_FLAG_FREE,
1051 * it means this is the pool itself and
1052 * the rest is available for new objects
1053 * again.
1055 pool->end = tc_pool_first_chunk(pool);
1056 tc_invalidate_pool(pool);
1057 return;
1060 if (unlikely(pool->object_count == 0)) {
1062 * we mark the freed memory with where we called the free
1063 * from. This means on a double free error we can report where
1064 * the first free came from
1066 pool_tc->name = location;
1068 if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
1069 _tc_free_poolmem(pool_tc, location);
1070 } else {
1072 * The tc_memlimit_update_on_free()
1073 * call takes into account the
1074 * prefix TP_HDR_SIZE allocated before
1075 * the pool talloc_chunk.
1077 tc_memlimit_update_on_free(pool_tc);
1078 TC_INVALIDATE_FULL_CHUNK(pool_tc);
1079 free(pool);
1081 return;
1084 if (pool->end == next_tc) {
1086 * if pool->pool still points to end of
1087 * 'tc' (which is stored in the 'next_tc' variable),
1088 * we can reclaim the memory of 'tc'.
1090 pool->end = tc;
1091 return;
1095 * Do nothing. The memory is just "wasted", waiting for the pool
1096 * itself to be freed.
1100 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1101 void *ptr,
1102 const char *location);
1104 static inline int _talloc_free_internal(void *ptr, const char *location);
1107 internal free call that takes a struct talloc_chunk *.
1109 static inline int _tc_free_internal(struct talloc_chunk *tc,
1110 const char *location)
1112 void *ptr_to_free;
1113 void *ptr = TC_PTR_FROM_CHUNK(tc);
1115 if (unlikely(tc->refs)) {
1116 int is_child;
1117 /* check if this is a reference from a child or
1118 * grandchild back to it's parent or grandparent
1120 * in that case we need to remove the reference and
1121 * call another instance of talloc_free() on the current
1122 * pointer.
1124 is_child = talloc_is_parent(tc->refs, ptr);
1125 _talloc_free_internal(tc->refs, location);
1126 if (is_child) {
1127 return _talloc_free_internal(ptr, location);
1129 return -1;
1132 if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1133 /* we have a free loop - stop looping */
1134 return 0;
1137 if (unlikely(tc->destructor)) {
1138 talloc_destructor_t d = tc->destructor;
1141 * Protect the destructor against some overwrite
1142 * attacks, by explicitly checking it has the right
1143 * magic here.
1145 if (talloc_chunk_from_ptr(ptr) != tc) {
1147 * This can't actually happen, the
1148 * call itself will panic.
1150 TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1153 if (d == (talloc_destructor_t)-1) {
1154 return -1;
1156 tc->destructor = (talloc_destructor_t)-1;
1157 if (d(ptr) == -1) {
1159 * Only replace the destructor pointer if
1160 * calling the destructor didn't modify it.
1162 if (tc->destructor == (talloc_destructor_t)-1) {
1163 tc->destructor = d;
1165 return -1;
1167 tc->destructor = NULL;
1170 if (tc->parent) {
1171 _TLIST_REMOVE(tc->parent->child, tc);
1172 if (tc->parent->child) {
1173 tc->parent->child->parent = tc->parent;
1175 } else {
1176 if (tc->prev) tc->prev->next = tc->next;
1177 if (tc->next) tc->next->prev = tc->prev;
1178 tc->prev = tc->next = NULL;
1181 tc->flags |= TALLOC_FLAG_LOOP;
1183 _tc_free_children_internal(tc, ptr, location);
1185 _talloc_chunk_set_free(tc, location);
1187 if (tc->flags & TALLOC_FLAG_POOL) {
1188 struct talloc_pool_hdr *pool;
1190 pool = talloc_pool_from_chunk(tc);
1192 if (unlikely(pool->object_count == 0)) {
1193 talloc_abort("Pool object count zero!");
1194 return 0;
1197 pool->object_count--;
1199 if (likely(pool->object_count != 0)) {
1200 return 0;
1204 * With object_count==0, a pool becomes a normal piece of
1205 * memory to free. If it's allocated inside a pool, it needs
1206 * to be freed as poolmem, else it needs to be just freed.
1208 ptr_to_free = pool;
1209 } else {
1210 ptr_to_free = tc;
1213 if (tc->flags & TALLOC_FLAG_POOLMEM) {
1214 _tc_free_poolmem(tc, location);
1215 return 0;
1218 tc_memlimit_update_on_free(tc);
1220 TC_INVALIDATE_FULL_CHUNK(tc);
1221 free(ptr_to_free);
1222 return 0;
1226 internal talloc_free call
1228 static inline int _talloc_free_internal(void *ptr, const char *location)
1230 struct talloc_chunk *tc;
1232 if (unlikely(ptr == NULL)) {
1233 return -1;
1236 /* possibly initialised the talloc fill value */
1237 if (unlikely(!talloc_fill.initialised)) {
1238 const char *fill = getenv(TALLOC_FILL_ENV);
1239 if (fill != NULL) {
1240 talloc_fill.enabled = true;
1241 talloc_fill.fill_value = strtoul(fill, NULL, 0);
1243 talloc_fill.initialised = true;
1246 tc = talloc_chunk_from_ptr(ptr);
1247 return _tc_free_internal(tc, location);
1250 static inline size_t _talloc_total_limit_size(const void *ptr,
1251 struct talloc_memlimit *old_limit,
1252 struct talloc_memlimit *new_limit);
1255 move a lump of memory from one talloc context to another return the
1256 ptr on success, or NULL if it could not be transferred.
1257 passing NULL as ptr will always return NULL with no side effects.
1259 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1261 struct talloc_chunk *tc, *new_tc;
1262 size_t ctx_size = 0;
1264 if (unlikely(!ptr)) {
1265 return NULL;
1268 if (unlikely(new_ctx == NULL)) {
1269 new_ctx = null_context;
1272 tc = talloc_chunk_from_ptr(ptr);
1274 if (tc->limit != NULL) {
1276 ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1278 /* Decrement the memory limit from the source .. */
1279 talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1281 if (tc->limit->parent == tc) {
1282 tc->limit->upper = NULL;
1283 } else {
1284 tc->limit = NULL;
1288 if (unlikely(new_ctx == NULL)) {
1289 if (tc->parent) {
1290 _TLIST_REMOVE(tc->parent->child, tc);
1291 if (tc->parent->child) {
1292 tc->parent->child->parent = tc->parent;
1294 } else {
1295 if (tc->prev) tc->prev->next = tc->next;
1296 if (tc->next) tc->next->prev = tc->prev;
1299 tc->parent = tc->next = tc->prev = NULL;
1300 return discard_const_p(void, ptr);
1303 new_tc = talloc_chunk_from_ptr(new_ctx);
1305 if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1306 return discard_const_p(void, ptr);
1309 if (tc->parent) {
1310 _TLIST_REMOVE(tc->parent->child, tc);
1311 if (tc->parent->child) {
1312 tc->parent->child->parent = tc->parent;
1314 } else {
1315 if (tc->prev) tc->prev->next = tc->next;
1316 if (tc->next) tc->next->prev = tc->prev;
1317 tc->prev = tc->next = NULL;
1320 tc->parent = new_tc;
1321 if (new_tc->child) new_tc->child->parent = NULL;
1322 _TLIST_ADD(new_tc->child, tc);
1324 if (tc->limit || new_tc->limit) {
1325 ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1326 new_tc->limit);
1327 /* .. and increment it in the destination. */
1328 if (new_tc->limit) {
1329 talloc_memlimit_grow(new_tc->limit, ctx_size);
1333 return discard_const_p(void, ptr);
1337 move a lump of memory from one talloc context to another return the
1338 ptr on success, or NULL if it could not be transferred.
1339 passing NULL as ptr will always return NULL with no side effects.
1341 _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1343 struct talloc_chunk *tc;
1345 if (unlikely(ptr == NULL)) {
1346 return NULL;
1349 tc = talloc_chunk_from_ptr(ptr);
1351 if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1352 struct talloc_reference_handle *h;
1354 talloc_log("WARNING: talloc_steal with references at %s\n",
1355 location);
1357 for (h=tc->refs; h; h=h->next) {
1358 talloc_log("\treference at %s\n",
1359 h->location);
1363 #if 0
1364 /* this test is probably too expensive to have on in the
1365 normal build, but it useful for debugging */
1366 if (talloc_is_parent(new_ctx, ptr)) {
1367 talloc_log("WARNING: stealing into talloc child at %s\n", location);
1369 #endif
1371 return _talloc_steal_internal(new_ctx, ptr);
1375 this is like a talloc_steal(), but you must supply the old
1376 parent. This resolves the ambiguity in a talloc_steal() which is
1377 called on a context that has more than one parent (via references)
1379 The old parent can be either a reference or a parent
1381 _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1383 struct talloc_chunk *tc;
1384 struct talloc_reference_handle *h;
1386 if (unlikely(ptr == NULL)) {
1387 return NULL;
1390 if (old_parent == talloc_parent(ptr)) {
1391 return _talloc_steal_internal(new_parent, ptr);
1394 tc = talloc_chunk_from_ptr(ptr);
1395 for (h=tc->refs;h;h=h->next) {
1396 if (talloc_parent(h) == old_parent) {
1397 if (_talloc_steal_internal(new_parent, h) != h) {
1398 return NULL;
1400 return discard_const_p(void, ptr);
1404 /* it wasn't a parent */
1405 return NULL;
1409 remove a secondary reference to a pointer. This undo's what
1410 talloc_reference() has done. The context and pointer arguments
1411 must match those given to a talloc_reference()
1413 static inline int talloc_unreference(const void *context, const void *ptr)
1415 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1416 struct talloc_reference_handle *h;
1418 if (unlikely(context == NULL)) {
1419 context = null_context;
1422 for (h=tc->refs;h;h=h->next) {
1423 struct talloc_chunk *p = talloc_parent_chunk(h);
1424 if (p == NULL) {
1425 if (context == NULL) break;
1426 } else if (TC_PTR_FROM_CHUNK(p) == context) {
1427 break;
1430 if (h == NULL) {
1431 return -1;
1434 return _talloc_free_internal(h, __location__);
1438 remove a specific parent context from a pointer. This is a more
1439 controlled variant of talloc_free()
1442 /* coverity[ -tainted_data_sink : arg-1 ] */
1443 _PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1445 struct talloc_chunk *tc_p, *new_p, *tc_c;
1446 void *new_parent;
1448 if (ptr == NULL) {
1449 return -1;
1452 if (context == NULL) {
1453 context = null_context;
1456 if (talloc_unreference(context, ptr) == 0) {
1457 return 0;
1460 if (context != NULL) {
1461 tc_c = talloc_chunk_from_ptr(context);
1462 } else {
1463 tc_c = NULL;
1465 if (tc_c != talloc_parent_chunk(ptr)) {
1466 return -1;
1469 tc_p = talloc_chunk_from_ptr(ptr);
1471 if (tc_p->refs == NULL) {
1472 return _talloc_free_internal(ptr, __location__);
1475 new_p = talloc_parent_chunk(tc_p->refs);
1476 if (new_p) {
1477 new_parent = TC_PTR_FROM_CHUNK(new_p);
1478 } else {
1479 new_parent = NULL;
1482 if (talloc_unreference(new_parent, ptr) != 0) {
1483 return -1;
1486 _talloc_steal_internal(new_parent, ptr);
1488 return 0;
1492 add a name to an existing pointer - va_list version
1494 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1495 const char *fmt,
1496 va_list ap) PRINTF_ATTRIBUTE(2,0);
1498 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1499 const char *fmt,
1500 va_list ap)
1502 struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1503 fmt,
1504 ap);
1505 if (likely(name_tc)) {
1506 tc->name = TC_PTR_FROM_CHUNK(name_tc);
1507 _tc_set_name_const(name_tc, ".name");
1508 } else {
1509 tc->name = NULL;
1511 return tc->name;
1515 add a name to an existing pointer
1517 _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1519 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1520 const char *name;
1521 va_list ap;
1522 va_start(ap, fmt);
1523 name = tc_set_name_v(tc, fmt, ap);
1524 va_end(ap);
1525 return name;
1530 create a named talloc pointer. Any talloc pointer can be named, and
1531 talloc_named() operates just like talloc() except that it allows you
1532 to name the pointer.
1534 _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1536 va_list ap;
1537 void *ptr;
1538 const char *name;
1539 struct talloc_chunk *tc;
1541 ptr = __talloc(context, size, &tc);
1542 if (unlikely(ptr == NULL)) return NULL;
1544 va_start(ap, fmt);
1545 name = tc_set_name_v(tc, fmt, ap);
1546 va_end(ap);
1548 if (unlikely(name == NULL)) {
1549 _talloc_free_internal(ptr, __location__);
1550 return NULL;
1553 return ptr;
1557 return the name of a talloc ptr, or "UNNAMED"
1559 static inline const char *__talloc_get_name(const void *ptr)
1561 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1562 if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1563 return ".reference";
1565 if (likely(tc->name)) {
1566 return tc->name;
1568 return "UNNAMED";
1571 _PUBLIC_ const char *talloc_get_name(const void *ptr)
1573 return __talloc_get_name(ptr);
1577 check if a pointer has the given name. If it does, return the pointer,
1578 otherwise return NULL
1580 _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1582 const char *pname;
1583 if (unlikely(ptr == NULL)) return NULL;
1584 pname = __talloc_get_name(ptr);
1585 if (likely(pname == name || strcmp(pname, name) == 0)) {
1586 return discard_const_p(void, ptr);
1588 return NULL;
1591 static void talloc_abort_type_mismatch(const char *location,
1592 const char *name,
1593 const char *expected)
1595 const char *reason;
1597 reason = talloc_asprintf(NULL,
1598 "%s: Type mismatch: name[%s] expected[%s]",
1599 location,
1600 name?name:"NULL",
1601 expected);
1602 if (!reason) {
1603 reason = "Type mismatch";
1606 talloc_abort(reason);
1609 _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1611 const char *pname;
1613 if (unlikely(ptr == NULL)) {
1614 talloc_abort_type_mismatch(location, NULL, name);
1615 return NULL;
1618 pname = __talloc_get_name(ptr);
1619 if (likely(pname == name || strcmp(pname, name) == 0)) {
1620 return discard_const_p(void, ptr);
1623 talloc_abort_type_mismatch(location, pname, name);
1624 return NULL;
1628 this is for compatibility with older versions of talloc
1630 _PUBLIC_ void *talloc_init(const char *fmt, ...)
1632 va_list ap;
1633 void *ptr;
1634 const char *name;
1635 struct talloc_chunk *tc;
1637 ptr = __talloc(NULL, 0, &tc);
1638 if (unlikely(ptr == NULL)) return NULL;
1640 va_start(ap, fmt);
1641 name = tc_set_name_v(tc, fmt, ap);
1642 va_end(ap);
1644 if (unlikely(name == NULL)) {
1645 _talloc_free_internal(ptr, __location__);
1646 return NULL;
1649 return ptr;
1652 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1653 void *ptr,
1654 const char *location)
1656 while (tc->child) {
1657 /* we need to work out who will own an abandoned child
1658 if it cannot be freed. In priority order, the first
1659 choice is owner of any remaining reference to this
1660 pointer, the second choice is our parent, and the
1661 final choice is the null context. */
1662 void *child = TC_PTR_FROM_CHUNK(tc->child);
1663 const void *new_parent = null_context;
1664 if (unlikely(tc->child->refs)) {
1665 struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1666 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1668 if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1669 if (talloc_parent_chunk(child) != tc) {
1671 * Destructor already reparented this child.
1672 * No further reparenting needed.
1674 continue;
1676 if (new_parent == null_context) {
1677 struct talloc_chunk *p = talloc_parent_chunk(ptr);
1678 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1680 _talloc_steal_internal(new_parent, child);
1686 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1687 should probably not be used in new code. It's in here to keep the talloc
1688 code consistent across Samba 3 and 4.
1690 _PUBLIC_ void talloc_free_children(void *ptr)
1692 struct talloc_chunk *tc_name = NULL;
1693 struct talloc_chunk *tc;
1695 if (unlikely(ptr == NULL)) {
1696 return;
1699 tc = talloc_chunk_from_ptr(ptr);
1701 /* we do not want to free the context name if it is a child .. */
1702 if (likely(tc->child)) {
1703 for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1704 if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1706 if (tc_name) {
1707 _TLIST_REMOVE(tc->child, tc_name);
1708 if (tc->child) {
1709 tc->child->parent = tc;
1714 _tc_free_children_internal(tc, ptr, __location__);
1716 /* .. so we put it back after all other children have been freed */
1717 if (tc_name) {
1718 if (tc->child) {
1719 tc->child->parent = NULL;
1721 tc_name->parent = tc;
1722 _TLIST_ADD(tc->child, tc_name);
1727 Allocate a bit of memory as a child of an existing pointer
1729 _PUBLIC_ void *_talloc(const void *context, size_t size)
1731 struct talloc_chunk *tc;
1732 return __talloc(context, size, &tc);
1736 externally callable talloc_set_name_const()
1738 _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1740 _tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1744 create a named talloc pointer. Any talloc pointer can be named, and
1745 talloc_named() operates just like talloc() except that it allows you
1746 to name the pointer.
1748 _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1750 return _talloc_named_const(context, size, name);
1754 free a talloc pointer. This also frees all child pointers of this
1755 pointer recursively
1757 return 0 if the memory is actually freed, otherwise -1. The memory
1758 will not be freed if the ref_count is > 1 or the destructor (if
1759 any) returns non-zero
1761 _PUBLIC_ int _talloc_free(void *ptr, const char *location)
1763 struct talloc_chunk *tc;
1765 if (unlikely(ptr == NULL)) {
1766 return -1;
1769 tc = talloc_chunk_from_ptr(ptr);
1771 if (unlikely(tc->refs != NULL)) {
1772 struct talloc_reference_handle *h;
1774 if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1775 /* in this case we do know which parent should
1776 get this pointer, as there is really only
1777 one parent */
1778 return talloc_unlink(null_context, ptr);
1781 talloc_log("ERROR: talloc_free with references at %s\n",
1782 location);
1784 for (h=tc->refs; h; h=h->next) {
1785 talloc_log("\treference at %s\n",
1786 h->location);
1788 return -1;
1791 return _talloc_free_internal(ptr, location);
1797 A talloc version of realloc. The context argument is only used if
1798 ptr is NULL
1800 _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1802 struct talloc_chunk *tc;
1803 void *new_ptr;
1804 bool malloced = false;
1805 struct talloc_pool_hdr *pool_hdr = NULL;
1806 size_t old_size = 0;
1807 size_t new_size = 0;
1809 /* size zero is equivalent to free() */
1810 if (unlikely(size == 0)) {
1811 talloc_unlink(context, ptr);
1812 return NULL;
1815 if (unlikely(size >= MAX_TALLOC_SIZE)) {
1816 return NULL;
1819 /* realloc(NULL) is equivalent to malloc() */
1820 if (ptr == NULL) {
1821 return _talloc_named_const(context, size, name);
1824 tc = talloc_chunk_from_ptr(ptr);
1826 /* don't allow realloc on referenced pointers */
1827 if (unlikely(tc->refs)) {
1828 return NULL;
1831 /* don't let anybody try to realloc a talloc_pool */
1832 if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1833 return NULL;
1836 if (tc->limit && (size > tc->size)) {
1837 if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1838 errno = ENOMEM;
1839 return NULL;
1843 /* handle realloc inside a talloc_pool */
1844 if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1845 pool_hdr = tc->pool;
1848 #if (ALWAYS_REALLOC == 0)
1849 /* don't shrink if we have less than 1k to gain */
1850 if (size < tc->size && tc->limit == NULL) {
1851 if (pool_hdr) {
1852 void *next_tc = tc_next_chunk(tc);
1853 TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1854 tc->size = size;
1855 if (next_tc == pool_hdr->end) {
1856 /* note: tc->size has changed, so this works */
1857 pool_hdr->end = tc_next_chunk(tc);
1859 return ptr;
1860 } else if ((tc->size - size) < 1024) {
1862 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1863 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1864 * after each realloc call, which slows down
1865 * testing a lot :-(.
1867 * That is why we only mark memory as undefined here.
1869 TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1871 /* do not shrink if we have less than 1k to gain */
1872 tc->size = size;
1873 return ptr;
1875 } else if (tc->size == size) {
1877 * do not change the pointer if it is exactly
1878 * the same size.
1880 return ptr;
1882 #endif
1885 * by resetting magic we catch users of the old memory
1887 * We mark this memory as free, and also over-stamp the talloc
1888 * magic with the old-style magic.
1890 * Why? This tries to avoid a memory read use-after-free from
1891 * disclosing our talloc magic, which would then allow an
1892 * attacker to prepare a valid header and so run a destructor.
1894 * What else? We have to re-stamp back a valid normal magic
1895 * on this memory once realloc() is done, as it will have done
1896 * a memcpy() into the new valid memory. We can't do this in
1897 * reverse as that would be a real use-after-free.
1899 _talloc_chunk_set_free(tc, NULL);
1901 #if ALWAYS_REALLOC
1902 if (pool_hdr) {
1903 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1904 pool_hdr->object_count--;
1906 if (new_ptr == NULL) {
1907 new_ptr = malloc(TC_HDR_SIZE+size);
1908 malloced = true;
1909 new_size = size;
1912 if (new_ptr) {
1913 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1914 TC_INVALIDATE_FULL_CHUNK(tc);
1916 } else {
1917 /* We're doing malloc then free here, so record the difference. */
1918 old_size = tc->size;
1919 new_size = size;
1920 new_ptr = malloc(size + TC_HDR_SIZE);
1921 if (new_ptr) {
1922 memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1923 free(tc);
1926 #else
1927 if (pool_hdr) {
1928 struct talloc_chunk *pool_tc;
1929 void *next_tc = tc_next_chunk(tc);
1930 size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1931 size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1932 size_t space_needed;
1933 size_t space_left;
1934 unsigned int chunk_count = pool_hdr->object_count;
1936 pool_tc = talloc_chunk_from_pool(pool_hdr);
1937 if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1938 chunk_count -= 1;
1941 if (chunk_count == 1) {
1943 * optimize for the case where 'tc' is the only
1944 * chunk in the pool.
1946 char *start = tc_pool_first_chunk(pool_hdr);
1947 space_needed = new_chunk_size;
1948 space_left = (char *)tc_pool_end(pool_hdr) - start;
1950 if (space_left >= space_needed) {
1951 size_t old_used = TC_HDR_SIZE + tc->size;
1952 size_t new_used = TC_HDR_SIZE + size;
1953 new_ptr = start;
1955 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1958 * The area from
1959 * start -> tc may have
1960 * been freed and thus been marked as
1961 * VALGRIND_MEM_NOACCESS. Set it to
1962 * VALGRIND_MEM_UNDEFINED so we can
1963 * copy into it without valgrind errors.
1964 * We can't just mark
1965 * new_ptr -> new_ptr + old_used
1966 * as this may overlap on top of tc,
1967 * (which is why we use memmove, not
1968 * memcpy below) hence the MIN.
1970 size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1971 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1973 #endif
1975 memmove(new_ptr, tc, old_used);
1977 tc = (struct talloc_chunk *)new_ptr;
1978 TC_UNDEFINE_GROW_CHUNK(tc, size);
1981 * first we do not align the pool pointer
1982 * because we want to invalidate the padding
1983 * too.
1985 pool_hdr->end = new_used + (char *)new_ptr;
1986 tc_invalidate_pool(pool_hdr);
1988 /* now the aligned pointer */
1989 pool_hdr->end = new_chunk_size + (char *)new_ptr;
1990 goto got_new_ptr;
1993 next_tc = NULL;
1996 if (new_chunk_size == old_chunk_size) {
1997 TC_UNDEFINE_GROW_CHUNK(tc, size);
1998 _talloc_chunk_set_not_free(tc);
1999 tc->size = size;
2000 return ptr;
2003 if (next_tc == pool_hdr->end) {
2005 * optimize for the case where 'tc' is the last
2006 * chunk in the pool.
2008 space_needed = new_chunk_size - old_chunk_size;
2009 space_left = tc_pool_space_left(pool_hdr);
2011 if (space_left >= space_needed) {
2012 TC_UNDEFINE_GROW_CHUNK(tc, size);
2013 _talloc_chunk_set_not_free(tc);
2014 tc->size = size;
2015 pool_hdr->end = tc_next_chunk(tc);
2016 return ptr;
2020 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
2022 if (new_ptr == NULL) {
2023 new_ptr = malloc(TC_HDR_SIZE+size);
2024 malloced = true;
2025 new_size = size;
2028 if (new_ptr) {
2029 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
2031 _tc_free_poolmem(tc, __location__ "_talloc_realloc");
2034 else {
2035 /* We're doing realloc here, so record the difference. */
2036 old_size = tc->size;
2037 new_size = size;
2038 new_ptr = realloc(tc, size + TC_HDR_SIZE);
2040 got_new_ptr:
2041 #endif
2042 if (unlikely(!new_ptr)) {
2044 * Ok, this is a strange spot. We have to put back
2045 * the old talloc_magic and any flags, except the
2046 * TALLOC_FLAG_FREE as this was not free'ed by the
2047 * realloc() call after all
2049 _talloc_chunk_set_not_free(tc);
2050 return NULL;
2054 * tc is now the new value from realloc(), the old memory we
2055 * can't access any more and was preemptively marked as
2056 * TALLOC_FLAG_FREE before the call. Now we mark it as not
2057 * free again
2059 tc = (struct talloc_chunk *)new_ptr;
2060 _talloc_chunk_set_not_free(tc);
2061 if (malloced) {
2062 tc->flags &= ~TALLOC_FLAG_POOLMEM;
2064 if (tc->parent) {
2065 tc->parent->child = tc;
2067 if (tc->child) {
2068 tc->child->parent = tc;
2071 if (tc->prev) {
2072 tc->prev->next = tc;
2074 if (tc->next) {
2075 tc->next->prev = tc;
2078 if (new_size > old_size) {
2079 talloc_memlimit_grow(tc->limit, new_size - old_size);
2080 } else if (new_size < old_size) {
2081 talloc_memlimit_shrink(tc->limit, old_size - new_size);
2084 tc->size = size;
2085 _tc_set_name_const(tc, name);
2087 return TC_PTR_FROM_CHUNK(tc);
2091 a wrapper around talloc_steal() for situations where you are moving a pointer
2092 between two structures, and want the old pointer to be set to NULL
2094 _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
2096 const void **pptr = discard_const_p(const void *,_pptr);
2097 void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
2098 (*pptr) = NULL;
2099 return ret;
2102 enum talloc_mem_count_type {
2103 TOTAL_MEM_SIZE,
2104 TOTAL_MEM_BLOCKS,
2105 TOTAL_MEM_LIMIT,
2108 static inline size_t _talloc_total_mem_internal(const void *ptr,
2109 enum talloc_mem_count_type type,
2110 struct talloc_memlimit *old_limit,
2111 struct talloc_memlimit *new_limit)
2113 size_t total = 0;
2114 struct talloc_chunk *c, *tc;
2116 if (ptr == NULL) {
2117 ptr = null_context;
2119 if (ptr == NULL) {
2120 return 0;
2123 tc = talloc_chunk_from_ptr(ptr);
2125 if (old_limit || new_limit) {
2126 if (tc->limit && tc->limit->upper == old_limit) {
2127 tc->limit->upper = new_limit;
2131 /* optimize in the memlimits case */
2132 if (type == TOTAL_MEM_LIMIT &&
2133 tc->limit != NULL &&
2134 tc->limit != old_limit &&
2135 tc->limit->parent == tc) {
2136 return tc->limit->cur_size;
2139 if (tc->flags & TALLOC_FLAG_LOOP) {
2140 return 0;
2143 tc->flags |= TALLOC_FLAG_LOOP;
2145 if (old_limit || new_limit) {
2146 if (old_limit == tc->limit) {
2147 tc->limit = new_limit;
2151 switch (type) {
2152 case TOTAL_MEM_SIZE:
2153 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2154 total = tc->size;
2156 break;
2157 case TOTAL_MEM_BLOCKS:
2158 total++;
2159 break;
2160 case TOTAL_MEM_LIMIT:
2161 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2163 * Don't count memory allocated from a pool
2164 * when calculating limits. Only count the
2165 * pool itself.
2167 if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2168 if (tc->flags & TALLOC_FLAG_POOL) {
2170 * If this is a pool, the allocated
2171 * size is in the pool header, and
2172 * remember to add in the prefix
2173 * length.
2175 struct talloc_pool_hdr *pool_hdr
2176 = talloc_pool_from_chunk(tc);
2177 total = pool_hdr->poolsize +
2178 TC_HDR_SIZE +
2179 TP_HDR_SIZE;
2180 } else {
2181 total = tc->size + TC_HDR_SIZE;
2185 break;
2187 for (c = tc->child; c; c = c->next) {
2188 total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2189 old_limit, new_limit);
2192 tc->flags &= ~TALLOC_FLAG_LOOP;
2194 return total;
2198 return the total size of a talloc pool (subtree)
2200 _PUBLIC_ size_t talloc_total_size(const void *ptr)
2202 return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2206 return the total number of blocks in a talloc pool (subtree)
2208 _PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2210 return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2214 return the number of external references to a pointer
2216 _PUBLIC_ size_t talloc_reference_count(const void *ptr)
2218 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2219 struct talloc_reference_handle *h;
2220 size_t ret = 0;
2222 for (h=tc->refs;h;h=h->next) {
2223 ret++;
2225 return ret;
2229 report on memory usage by all children of a pointer, giving a full tree view
2231 _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2232 void (*callback)(const void *ptr,
2233 int depth, int max_depth,
2234 int is_ref,
2235 void *private_data),
2236 void *private_data)
2238 struct talloc_chunk *c, *tc;
2240 if (ptr == NULL) {
2241 ptr = null_context;
2243 if (ptr == NULL) return;
2245 tc = talloc_chunk_from_ptr(ptr);
2247 if (tc->flags & TALLOC_FLAG_LOOP) {
2248 return;
2251 callback(ptr, depth, max_depth, 0, private_data);
2253 if (max_depth >= 0 && depth >= max_depth) {
2254 return;
2257 tc->flags |= TALLOC_FLAG_LOOP;
2258 for (c=tc->child;c;c=c->next) {
2259 if (c->name == TALLOC_MAGIC_REFERENCE) {
2260 struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2261 callback(h->ptr, depth + 1, max_depth, 1, private_data);
2262 } else {
2263 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2266 tc->flags &= ~TALLOC_FLAG_LOOP;
2269 static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2271 const char *name = __talloc_get_name(ptr);
2272 struct talloc_chunk *tc;
2273 FILE *f = (FILE *)_f;
2275 if (is_ref) {
2276 fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2277 return;
2280 tc = talloc_chunk_from_ptr(ptr);
2281 if (tc->limit && tc->limit->parent == tc) {
2282 fprintf(f, "%*s%-30s is a memlimit context"
2283 " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2284 depth*4, "",
2285 name,
2286 (unsigned long)tc->limit->max_size,
2287 (unsigned long)tc->limit->cur_size);
2290 if (depth == 0) {
2291 fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2292 (max_depth < 0 ? "full " :""), name,
2293 (unsigned long)talloc_total_size(ptr),
2294 (unsigned long)talloc_total_blocks(ptr));
2295 return;
2298 fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2299 depth*4, "",
2300 name,
2301 (unsigned long)talloc_total_size(ptr),
2302 (unsigned long)talloc_total_blocks(ptr),
2303 (int)talloc_reference_count(ptr), ptr);
2305 #if 0
2306 fprintf(f, "content: ");
2307 if (talloc_total_size(ptr)) {
2308 int tot = talloc_total_size(ptr);
2309 int i;
2311 for (i = 0; i < tot; i++) {
2312 if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2313 fprintf(f, "%c", ((char *)ptr)[i]);
2314 } else {
2315 fprintf(f, "~%02x", ((char *)ptr)[i]);
2319 fprintf(f, "\n");
2320 #endif
2324 report on memory usage by all children of a pointer, giving a full tree view
2326 _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2328 if (f) {
2329 talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2330 fflush(f);
2335 report on memory usage by all children of a pointer, giving a full tree view
2337 _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2339 talloc_report_depth_file(ptr, 0, -1, f);
2343 report on memory usage by all children of a pointer
2345 _PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2347 talloc_report_depth_file(ptr, 0, 1, f);
2351 enable tracking of the NULL context
2353 _PUBLIC_ void talloc_enable_null_tracking(void)
2355 if (null_context == NULL) {
2356 null_context = _talloc_named_const(NULL, 0, "null_context");
2357 if (autofree_context != NULL) {
2358 talloc_reparent(NULL, null_context, autofree_context);
2364 enable tracking of the NULL context, not moving the autofree context
2365 into the NULL context. This is needed for the talloc testsuite
2367 _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2369 if (null_context == NULL) {
2370 null_context = _talloc_named_const(NULL, 0, "null_context");
2375 disable tracking of the NULL context
2377 _PUBLIC_ void talloc_disable_null_tracking(void)
2379 if (null_context != NULL) {
2380 /* we have to move any children onto the real NULL
2381 context */
2382 struct talloc_chunk *tc, *tc2;
2383 tc = talloc_chunk_from_ptr(null_context);
2384 for (tc2 = tc->child; tc2; tc2=tc2->next) {
2385 if (tc2->parent == tc) tc2->parent = NULL;
2386 if (tc2->prev == tc) tc2->prev = NULL;
2388 for (tc2 = tc->next; tc2; tc2=tc2->next) {
2389 if (tc2->parent == tc) tc2->parent = NULL;
2390 if (tc2->prev == tc) tc2->prev = NULL;
2392 tc->child = NULL;
2393 tc->next = NULL;
2395 talloc_free(null_context);
2396 null_context = NULL;
2400 enable leak reporting on exit
2402 _PUBLIC_ void talloc_enable_leak_report(void)
2404 talloc_enable_null_tracking();
2405 talloc_report_null = true;
2406 talloc_setup_atexit();
2410 enable full leak reporting on exit
2412 _PUBLIC_ void talloc_enable_leak_report_full(void)
2414 talloc_enable_null_tracking();
2415 talloc_report_null_full = true;
2416 talloc_setup_atexit();
2420 talloc and zero memory.
2422 _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2424 void *p = _talloc_named_const(ctx, size, name);
2426 if (p) {
2427 memset(p, '\0', size);
2430 return p;
2434 memdup with a talloc.
2436 _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2438 void *newp = NULL;
2440 if (likely(size > 0) && unlikely(p == NULL)) {
2441 return NULL;
2444 newp = _talloc_named_const(t, size, name);
2445 if (likely(newp != NULL) && likely(size > 0)) {
2446 memcpy(newp, p, size);
2449 return newp;
2452 static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2454 char *ret;
2455 struct talloc_chunk *tc;
2457 ret = (char *)__talloc(t, len + 1, &tc);
2458 if (unlikely(!ret)) return NULL;
2460 memcpy(ret, p, len);
2461 ret[len] = 0;
2463 _tc_set_name_const(tc, ret);
2464 return ret;
2468 strdup with a talloc
2470 _PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2472 if (unlikely(!p)) return NULL;
2473 return __talloc_strlendup(t, p, strlen(p));
2477 strndup with a talloc
2479 _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2481 if (unlikely(!p)) return NULL;
2482 return __talloc_strlendup(t, p, strnlen(p, n));
2485 static inline char *__talloc_strlendup_append(char *s, size_t slen,
2486 const char *a, size_t alen)
2488 char *ret;
2490 ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2491 if (unlikely(!ret)) return NULL;
2493 /* append the string and the trailing \0 */
2494 memcpy(&ret[slen], a, alen);
2495 ret[slen+alen] = 0;
2497 _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2498 return ret;
2502 * Appends at the end of the string.
2504 _PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2506 if (unlikely(!s)) {
2507 return talloc_strdup(NULL, a);
2510 if (unlikely(!a)) {
2511 return s;
2514 return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2518 * Appends at the end of the talloc'ed buffer,
2519 * not the end of the string.
2521 _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2523 size_t slen;
2525 if (unlikely(!s)) {
2526 return talloc_strdup(NULL, a);
2529 if (unlikely(!a)) {
2530 return s;
2533 slen = talloc_get_size(s);
2534 if (likely(slen > 0)) {
2535 slen--;
2538 return __talloc_strlendup_append(s, slen, a, strlen(a));
2542 * Appends at the end of the string.
2544 _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2546 if (unlikely(!s)) {
2547 return talloc_strndup(NULL, a, n);
2550 if (unlikely(!a)) {
2551 return s;
2554 return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2558 * Appends at the end of the talloc'ed buffer,
2559 * not the end of the string.
2561 _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2563 size_t slen;
2565 if (unlikely(!s)) {
2566 return talloc_strndup(NULL, a, n);
2569 if (unlikely(!a)) {
2570 return s;
2573 slen = talloc_get_size(s);
2574 if (likely(slen > 0)) {
2575 slen--;
2578 return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2581 #ifndef HAVE_VA_COPY
2582 #ifdef HAVE___VA_COPY
2583 #define va_copy(dest, src) __va_copy(dest, src)
2584 #else
2585 #define va_copy(dest, src) (dest) = (src)
2586 #endif
2587 #endif
2589 static struct talloc_chunk *_vasprintf_tc(const void *t,
2590 const char *fmt,
2591 va_list ap) PRINTF_ATTRIBUTE(2,0);
2593 static struct talloc_chunk *_vasprintf_tc(const void *t,
2594 const char *fmt,
2595 va_list ap)
2597 int vlen;
2598 size_t len;
2599 char *ret;
2600 va_list ap2;
2601 struct talloc_chunk *tc;
2602 char buf[1024];
2604 /* this call looks strange, but it makes it work on older solaris boxes */
2605 va_copy(ap2, ap);
2606 vlen = vsnprintf(buf, sizeof(buf), fmt, ap2);
2607 va_end(ap2);
2608 if (unlikely(vlen < 0)) {
2609 return NULL;
2611 len = vlen;
2612 if (unlikely(len + 1 < len)) {
2613 return NULL;
2616 ret = (char *)__talloc(t, len+1, &tc);
2617 if (unlikely(!ret)) return NULL;
2619 if (len < sizeof(buf)) {
2620 memcpy(ret, buf, len+1);
2621 } else {
2622 va_copy(ap2, ap);
2623 vsnprintf(ret, len+1, fmt, ap2);
2624 va_end(ap2);
2627 _tc_set_name_const(tc, ret);
2628 return tc;
2631 _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2633 struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2634 if (tc == NULL) {
2635 return NULL;
2637 return TC_PTR_FROM_CHUNK(tc);
2642 Perform string formatting, and return a pointer to newly allocated
2643 memory holding the result, inside a memory pool.
2645 _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2647 va_list ap;
2648 char *ret;
2650 va_start(ap, fmt);
2651 ret = talloc_vasprintf(t, fmt, ap);
2652 va_end(ap);
2653 return ret;
2656 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2657 const char *fmt, va_list ap)
2658 PRINTF_ATTRIBUTE(3,0);
2660 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2661 const char *fmt, va_list ap)
2663 ssize_t alen;
2664 va_list ap2;
2665 char c;
2667 va_copy(ap2, ap);
2668 alen = vsnprintf(&c, 1, fmt, ap2);
2669 va_end(ap2);
2671 if (alen <= 0) {
2672 /* Either the vsnprintf failed or the format resulted in
2673 * no characters being formatted. In the former case, we
2674 * ought to return NULL, in the latter we ought to return
2675 * the original string. Most current callers of this
2676 * function expect it to never return NULL.
2678 return s;
2681 s = talloc_realloc(NULL, s, char, slen + alen + 1);
2682 if (!s) return NULL;
2684 va_copy(ap2, ap);
2685 vsnprintf(s + slen, alen + 1, fmt, ap2);
2686 va_end(ap2);
2688 _tc_set_name_const(talloc_chunk_from_ptr(s), s);
2689 return s;
2693 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2694 * and return @p s, which may have moved. Good for gradually
2695 * accumulating output into a string buffer. Appends at the end
2696 * of the string.
2698 _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2700 if (unlikely(!s)) {
2701 return talloc_vasprintf(NULL, fmt, ap);
2704 return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2708 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2709 * and return @p s, which may have moved. Always appends at the
2710 * end of the talloc'ed buffer, not the end of the string.
2712 _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2714 size_t slen;
2716 if (unlikely(!s)) {
2717 return talloc_vasprintf(NULL, fmt, ap);
2720 slen = talloc_get_size(s);
2721 if (likely(slen > 0)) {
2722 slen--;
2725 return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2729 Realloc @p s to append the formatted result of @p fmt and return @p
2730 s, which may have moved. Good for gradually accumulating output
2731 into a string buffer.
2733 _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2735 va_list ap;
2737 va_start(ap, fmt);
2738 s = talloc_vasprintf_append(s, fmt, ap);
2739 va_end(ap);
2740 return s;
2744 Realloc @p s to append the formatted result of @p fmt and return @p
2745 s, which may have moved. Good for gradually accumulating output
2746 into a buffer.
2748 _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2750 va_list ap;
2752 va_start(ap, fmt);
2753 s = talloc_vasprintf_append_buffer(s, fmt, ap);
2754 va_end(ap);
2755 return s;
2759 alloc an array, checking for integer overflow in the array size
2761 _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2763 if (count >= MAX_TALLOC_SIZE/el_size) {
2764 return NULL;
2766 return _talloc_named_const(ctx, el_size * count, name);
2770 alloc an zero array, checking for integer overflow in the array size
2772 _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2774 if (count >= MAX_TALLOC_SIZE/el_size) {
2775 return NULL;
2777 return _talloc_zero(ctx, el_size * count, name);
2781 realloc an array, checking for integer overflow in the array size
2783 _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2785 if (count >= MAX_TALLOC_SIZE/el_size) {
2786 return NULL;
2788 return _talloc_realloc(ctx, ptr, el_size * count, name);
2792 a function version of talloc_realloc(), so it can be passed as a function pointer
2793 to libraries that want a realloc function (a realloc function encapsulates
2794 all the basic capabilities of an allocation library, which is why this is useful)
2796 _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2798 return _talloc_realloc(context, ptr, size, NULL);
2802 static int talloc_autofree_destructor(void *ptr)
2804 autofree_context = NULL;
2805 return 0;
2809 return a context which will be auto-freed on exit
2810 this is useful for reducing the noise in leak reports
2812 _PUBLIC_ void *talloc_autofree_context(void)
2814 if (autofree_context == NULL) {
2815 autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2816 talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2817 talloc_setup_atexit();
2819 return autofree_context;
2822 _PUBLIC_ size_t talloc_get_size(const void *context)
2824 struct talloc_chunk *tc;
2826 if (context == NULL) {
2827 return 0;
2830 tc = talloc_chunk_from_ptr(context);
2832 return tc->size;
2836 find a parent of this context that has the given name, if any
2838 _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2840 struct talloc_chunk *tc;
2842 if (context == NULL) {
2843 return NULL;
2846 tc = talloc_chunk_from_ptr(context);
2847 while (tc) {
2848 if (tc->name && strcmp(tc->name, name) == 0) {
2849 return TC_PTR_FROM_CHUNK(tc);
2851 while (tc && tc->prev) tc = tc->prev;
2852 if (tc) {
2853 tc = tc->parent;
2856 return NULL;
2860 show the parentage of a context
2862 _PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2864 struct talloc_chunk *tc;
2866 if (context == NULL) {
2867 fprintf(file, "talloc no parents for NULL\n");
2868 return;
2871 tc = talloc_chunk_from_ptr(context);
2872 fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2873 while (tc) {
2874 fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2875 while (tc && tc->prev) tc = tc->prev;
2876 if (tc) {
2877 tc = tc->parent;
2880 fflush(file);
2884 return 1 if ptr is a parent of context
2886 static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2888 struct talloc_chunk *tc;
2890 if (context == NULL) {
2891 return 0;
2894 tc = talloc_chunk_from_ptr(context);
2895 while (tc) {
2896 if (depth <= 0) {
2897 return 0;
2899 if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2900 while (tc && tc->prev) tc = tc->prev;
2901 if (tc) {
2902 tc = tc->parent;
2903 depth--;
2906 return 0;
2910 return 1 if ptr is a parent of context
2912 _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2914 return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2918 return the total size of memory used by this context and all children
2920 static inline size_t _talloc_total_limit_size(const void *ptr,
2921 struct talloc_memlimit *old_limit,
2922 struct talloc_memlimit *new_limit)
2924 return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2925 old_limit, new_limit);
2928 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2930 struct talloc_memlimit *l;
2932 for (l = limit; l != NULL; l = l->upper) {
2933 if (l->max_size != 0 &&
2934 ((l->max_size <= l->cur_size) ||
2935 (l->max_size - l->cur_size < size))) {
2936 return false;
2940 return true;
2944 Update memory limits when freeing a talloc_chunk.
2946 static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2948 size_t limit_shrink_size;
2950 if (!tc->limit) {
2951 return;
2955 * Pool entries don't count. Only the pools
2956 * themselves are counted as part of the memory
2957 * limits. Note that this also takes care of
2958 * nested pools which have both flags
2959 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2961 if (tc->flags & TALLOC_FLAG_POOLMEM) {
2962 return;
2966 * If we are part of a memory limited context hierarchy
2967 * we need to subtract the memory used from the counters
2970 limit_shrink_size = tc->size+TC_HDR_SIZE;
2973 * If we're deallocating a pool, take into
2974 * account the prefix size added for the pool.
2977 if (tc->flags & TALLOC_FLAG_POOL) {
2978 limit_shrink_size += TP_HDR_SIZE;
2981 talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2983 if (tc->limit->parent == tc) {
2984 free(tc->limit);
2987 tc->limit = NULL;
2991 Increase memory limit accounting after a malloc/realloc.
2993 static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2994 size_t size)
2996 struct talloc_memlimit *l;
2998 for (l = limit; l != NULL; l = l->upper) {
2999 size_t new_cur_size = l->cur_size + size;
3000 if (new_cur_size < l->cur_size) {
3001 talloc_abort("logic error in talloc_memlimit_grow\n");
3002 return;
3004 l->cur_size = new_cur_size;
3009 Decrease memory limit accounting after a free/realloc.
3011 static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
3012 size_t size)
3014 struct talloc_memlimit *l;
3016 for (l = limit; l != NULL; l = l->upper) {
3017 if (l->cur_size < size) {
3018 talloc_abort("logic error in talloc_memlimit_shrink\n");
3019 return;
3021 l->cur_size = l->cur_size - size;
3025 _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
3027 struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
3028 struct talloc_memlimit *orig_limit;
3029 struct talloc_memlimit *limit = NULL;
3031 if (tc->limit && tc->limit->parent == tc) {
3032 tc->limit->max_size = max_size;
3033 return 0;
3035 orig_limit = tc->limit;
3037 limit = malloc(sizeof(struct talloc_memlimit));
3038 if (limit == NULL) {
3039 return 1;
3041 limit->parent = tc;
3042 limit->max_size = max_size;
3043 limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
3045 if (orig_limit) {
3046 limit->upper = orig_limit;
3047 } else {
3048 limit->upper = NULL;
3051 return 0;