Search for location of waf script
[Samba.git] / lib / talloc / talloc.c
blob073a3e50d4b327d0c444e248e8ed7f325badaf7c
1 /*
2 Samba Unix SMB/CIFS implementation.
4 Samba trivial allocation library - new interface
6 NOTE: Please read talloc_guide.txt for full documentation
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
13 ** under the LGPL
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 inspired by http://swapped.cc/halloc/
33 #include "replace.h"
34 #include "talloc.h"
36 #ifdef HAVE_SYS_AUXV_H
37 #include <sys/auxv.h>
38 #endif
40 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
41 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
42 #endif
44 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
45 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
46 #endif
48 /* Special macros that are no-ops except when run under Valgrind on
49 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
50 #ifdef HAVE_VALGRIND_MEMCHECK_H
51 /* memcheck.h includes valgrind.h */
52 #include <valgrind/memcheck.h>
53 #elif defined(HAVE_VALGRIND_H)
54 #include <valgrind.h>
55 #endif
57 /* use this to force every realloc to change the pointer, to stress test
58 code that might not cope */
59 #define ALWAYS_REALLOC 0
62 #define MAX_TALLOC_SIZE 0x10000000
64 #define TALLOC_FLAG_FREE 0x01
65 #define TALLOC_FLAG_LOOP 0x02
66 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
67 #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
70 * Bits above this are random, used to make it harder to fake talloc
71 * headers during an attack. Try not to change this without good reason.
73 #define TALLOC_FLAG_MASK 0x0F
75 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
77 #define TALLOC_MAGIC_BASE 0xe814ec70
78 #define TALLOC_MAGIC_NON_RANDOM ( \
79 ~TALLOC_FLAG_MASK & ( \
80 TALLOC_MAGIC_BASE + \
81 (TALLOC_BUILD_VERSION_MAJOR << 24) + \
82 (TALLOC_BUILD_VERSION_MINOR << 16) + \
83 (TALLOC_BUILD_VERSION_RELEASE << 8)))
84 static unsigned int talloc_magic = TALLOC_MAGIC_NON_RANDOM;
86 /* by default we abort when given a bad pointer (such as when talloc_free() is called
87 on a pointer that came from malloc() */
88 #ifndef TALLOC_ABORT
89 #define TALLOC_ABORT(reason) abort()
90 #endif
92 #ifndef discard_const_p
93 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
94 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
95 #else
96 # define discard_const_p(type, ptr) ((type *)(ptr))
97 #endif
98 #endif
100 /* these macros gain us a few percent of speed on gcc */
101 #if (__GNUC__ >= 3)
102 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
103 as its first argument */
104 #ifndef likely
105 #define likely(x) __builtin_expect(!!(x), 1)
106 #endif
107 #ifndef unlikely
108 #define unlikely(x) __builtin_expect(!!(x), 0)
109 #endif
110 #else
111 #ifndef likely
112 #define likely(x) (x)
113 #endif
114 #ifndef unlikely
115 #define unlikely(x) (x)
116 #endif
117 #endif
119 /* this null_context is only used if talloc_enable_leak_report() or
120 talloc_enable_leak_report_full() is called, otherwise it remains
121 NULL
123 static void *null_context;
124 static bool talloc_report_null;
125 static bool talloc_report_null_full;
126 static void *autofree_context;
128 static void talloc_setup_atexit(void);
130 /* used to enable fill of memory on free, which can be useful for
131 * catching use after free errors when valgrind is too slow
133 static struct {
134 bool initialised;
135 bool enabled;
136 uint8_t fill_value;
137 } talloc_fill;
139 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
142 * do not wipe the header, to allow the
143 * double-free logic to still work
145 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
146 if (unlikely(talloc_fill.enabled)) { \
147 size_t _flen = (_tc)->size; \
148 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
149 memset(_fptr, talloc_fill.fill_value, _flen); \
151 } while (0)
153 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
154 /* Mark the whole chunk as not accessable */
155 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
156 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
157 char *_fptr = (char *)(_tc); \
158 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
159 } while(0)
160 #else
161 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
162 #endif
164 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
165 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
166 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
167 } while (0)
169 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
170 if (unlikely(talloc_fill.enabled)) { \
171 size_t _flen = (_tc)->size - (_new_size); \
172 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
173 _fptr += (_new_size); \
174 memset(_fptr, talloc_fill.fill_value, _flen); \
176 } while (0)
178 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
179 /* Mark the unused bytes not accessable */
180 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
181 size_t _flen = (_tc)->size - (_new_size); \
182 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
183 _fptr += (_new_size); \
184 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
185 } while (0)
186 #else
187 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
188 #endif
190 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
191 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
192 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
193 } while (0)
195 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
196 if (unlikely(talloc_fill.enabled)) { \
197 size_t _flen = (_tc)->size - (_new_size); \
198 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
199 _fptr += (_new_size); \
200 memset(_fptr, talloc_fill.fill_value, _flen); \
202 } while (0)
204 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
205 /* Mark the unused bytes as undefined */
206 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
207 size_t _flen = (_tc)->size - (_new_size); \
208 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
209 _fptr += (_new_size); \
210 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
211 } while (0)
212 #else
213 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
214 #endif
216 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
217 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
218 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
219 } while (0)
221 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
222 /* Mark the new bytes as undefined */
223 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
224 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
225 size_t _new_used = TC_HDR_SIZE + (_new_size); \
226 size_t _flen = _new_used - _old_used; \
227 char *_fptr = _old_used + (char *)(_tc); \
228 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
229 } while (0)
230 #else
231 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
232 #endif
234 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
235 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
236 } while (0)
238 struct talloc_reference_handle {
239 struct talloc_reference_handle *next, *prev;
240 void *ptr;
241 const char *location;
244 struct talloc_memlimit {
245 struct talloc_chunk *parent;
246 struct talloc_memlimit *upper;
247 size_t max_size;
248 size_t cur_size;
251 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
252 static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
253 size_t size);
254 static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
255 size_t size);
256 static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
258 static inline void _tc_set_name_const(struct talloc_chunk *tc,
259 const char *name);
260 static struct talloc_chunk *_vasprintf_tc(const void *t,
261 const char *fmt,
262 va_list ap);
264 typedef int (*talloc_destructor_t)(void *);
266 struct talloc_pool_hdr;
268 struct talloc_chunk {
270 * flags includes the talloc magic, which is randomised to
271 * make overwrite attacks harder
273 unsigned flags;
276 * If you have a logical tree like:
278 * <parent>
279 * / | \
280 * / | \
281 * / | \
282 * <child 1> <child 2> <child 3>
284 * The actual talloc tree is:
286 * <parent>
288 * <child 1> - <child 2> - <child 3>
290 * The children are linked with next/prev pointers, and
291 * child 1 is linked to the parent with parent/child
292 * pointers.
295 struct talloc_chunk *next, *prev;
296 struct talloc_chunk *parent, *child;
297 struct talloc_reference_handle *refs;
298 talloc_destructor_t destructor;
299 const char *name;
300 size_t size;
303 * limit semantics:
304 * if 'limit' is set it means all *new* children of the context will
305 * be limited to a total aggregate size ox max_size for memory
306 * allocations.
307 * cur_size is used to keep track of the current use
309 struct talloc_memlimit *limit;
312 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
313 * is a pointer to the struct talloc_chunk of the pool that it was
314 * allocated from. This way children can quickly find the pool to chew
315 * from.
317 struct talloc_pool_hdr *pool;
320 /* 16 byte alignment seems to keep everyone happy */
321 #define TC_ALIGN16(s) (((s)+15)&~15)
322 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
323 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
325 _PUBLIC_ int talloc_version_major(void)
327 return TALLOC_VERSION_MAJOR;
330 _PUBLIC_ int talloc_version_minor(void)
332 return TALLOC_VERSION_MINOR;
335 _PUBLIC_ int talloc_test_get_magic(void)
337 return talloc_magic;
340 static inline void _talloc_chunk_set_free(struct talloc_chunk *tc,
341 const char *location)
344 * Mark this memory as free, and also over-stamp the talloc
345 * magic with the old-style magic.
347 * Why? This tries to avoid a memory read use-after-free from
348 * disclosing our talloc magic, which would then allow an
349 * attacker to prepare a valid header and so run a destructor.
352 tc->flags = TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE
353 | (tc->flags & TALLOC_FLAG_MASK);
355 /* we mark the freed memory with where we called the free
356 * from. This means on a double free error we can report where
357 * the first free came from
359 if (location) {
360 tc->name = location;
364 static inline void _talloc_chunk_set_not_free(struct talloc_chunk *tc)
367 * Mark this memory as not free.
369 * Why? This is memory either in a pool (and so available for
370 * talloc's re-use or after the realloc(). We need to mark
371 * the memory as free() before any realloc() call as we can't
372 * write to the memory after that.
374 * We put back the normal magic instead of the 'not random'
375 * magic.
378 tc->flags = talloc_magic |
379 ((tc->flags & TALLOC_FLAG_MASK) & ~TALLOC_FLAG_FREE);
382 static void (*talloc_log_fn)(const char *message);
384 _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
386 talloc_log_fn = log_fn;
389 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
390 void talloc_lib_init(void) __attribute__((constructor));
391 void talloc_lib_init(void)
393 uint32_t random_value;
394 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
395 uint8_t *p;
397 * Use the kernel-provided random values used for
398 * ASLR. This won't change per-exec, which is ideal for us
400 p = (uint8_t *) getauxval(AT_RANDOM);
401 if (p) {
403 * We get 16 bytes from getauxval. By calling rand(),
404 * a totally insecure PRNG, but one that will
405 * deterministically have a different value when called
406 * twice, we ensure that if two talloc-like libraries
407 * are somehow loaded in the same address space, that
408 * because we choose different bytes, we will keep the
409 * protection against collision of multiple talloc
410 * libs.
412 * This protection is important because the effects of
413 * passing a talloc pointer from one to the other may
414 * be very hard to determine.
416 int offset = rand() % (16 - sizeof(random_value));
417 memcpy(&random_value, p + offset, sizeof(random_value));
418 } else
419 #endif
422 * Otherwise, hope the location we are loaded in
423 * memory is randomised by someone else
425 random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
427 talloc_magic = random_value & ~TALLOC_FLAG_MASK;
429 #else
430 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
431 #endif
433 static void talloc_lib_atexit(void)
435 TALLOC_FREE(autofree_context);
437 if (talloc_total_size(null_context) == 0) {
438 return;
441 if (talloc_report_null_full) {
442 talloc_report_full(null_context, stderr);
443 } else if (talloc_report_null) {
444 talloc_report(null_context, stderr);
448 static void talloc_setup_atexit(void)
450 static bool done;
452 if (done) {
453 return;
456 atexit(talloc_lib_atexit);
457 done = true;
460 static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
461 static void talloc_log(const char *fmt, ...)
463 va_list ap;
464 char *message;
466 if (!talloc_log_fn) {
467 return;
470 va_start(ap, fmt);
471 message = talloc_vasprintf(NULL, fmt, ap);
472 va_end(ap);
474 talloc_log_fn(message);
475 talloc_free(message);
478 static void talloc_log_stderr(const char *message)
480 fprintf(stderr, "%s", message);
483 _PUBLIC_ void talloc_set_log_stderr(void)
485 talloc_set_log_fn(talloc_log_stderr);
488 static void (*talloc_abort_fn)(const char *reason);
490 _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
492 talloc_abort_fn = abort_fn;
495 static void talloc_abort(const char *reason)
497 talloc_log("%s\n", reason);
499 if (!talloc_abort_fn) {
500 TALLOC_ABORT(reason);
503 talloc_abort_fn(reason);
506 static void talloc_abort_access_after_free(void)
508 talloc_abort("Bad talloc magic value - access after free");
511 static void talloc_abort_unknown_value(void)
513 talloc_abort("Bad talloc magic value - unknown value");
516 /* panic if we get a bad magic value */
517 static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
519 const char *pp = (const char *)ptr;
520 struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
521 if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
522 if ((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK))
523 == (TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE)) {
524 talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
525 talloc_abort_access_after_free();
526 return NULL;
529 talloc_abort_unknown_value();
530 return NULL;
532 return tc;
535 /* hook into the front of the list */
536 #define _TLIST_ADD(list, p) \
537 do { \
538 if (!(list)) { \
539 (list) = (p); \
540 (p)->next = (p)->prev = NULL; \
541 } else { \
542 (list)->prev = (p); \
543 (p)->next = (list); \
544 (p)->prev = NULL; \
545 (list) = (p); \
547 } while (0)
549 /* remove an element from a list - element doesn't have to be in list. */
550 #define _TLIST_REMOVE(list, p) \
551 do { \
552 if ((p) == (list)) { \
553 (list) = (p)->next; \
554 if (list) (list)->prev = NULL; \
555 } else { \
556 if ((p)->prev) (p)->prev->next = (p)->next; \
557 if ((p)->next) (p)->next->prev = (p)->prev; \
559 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
560 } while (0)
564 return the parent chunk of a pointer
566 static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
568 struct talloc_chunk *tc;
570 if (unlikely(ptr == NULL)) {
571 return NULL;
574 tc = talloc_chunk_from_ptr(ptr);
575 while (tc->prev) tc=tc->prev;
577 return tc->parent;
580 _PUBLIC_ void *talloc_parent(const void *ptr)
582 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
583 return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
587 find parents name
589 _PUBLIC_ const char *talloc_parent_name(const void *ptr)
591 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
592 return tc? tc->name : NULL;
596 A pool carries an in-pool object count count in the first 16 bytes.
597 bytes. This is done to support talloc_steal() to a parent outside of the
598 pool. The count includes the pool itself, so a talloc_free() on a pool will
599 only destroy the pool if the count has dropped to zero. A talloc_free() of a
600 pool member will reduce the count, and eventually also call free(3) on the
601 pool memory.
603 The object count is not put into "struct talloc_chunk" because it is only
604 relevant for talloc pools and the alignment to 16 bytes would increase the
605 memory footprint of each talloc chunk by those 16 bytes.
608 struct talloc_pool_hdr {
609 void *end;
610 unsigned int object_count;
611 size_t poolsize;
614 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
616 static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
618 return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE);
621 static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
623 return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE);
626 static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
628 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
629 return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
632 static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
634 return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
637 /* If tc is inside a pool, this gives the next neighbour. */
638 static inline void *tc_next_chunk(struct talloc_chunk *tc)
640 return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
643 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
645 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
646 return tc_next_chunk(tc);
649 /* Mark the whole remaining pool as not accessable */
650 static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
652 size_t flen = tc_pool_space_left(pool_hdr);
654 if (unlikely(talloc_fill.enabled)) {
655 memset(pool_hdr->end, talloc_fill.fill_value, flen);
658 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
659 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
660 #endif
664 Allocate from a pool
667 static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
668 size_t size, size_t prefix_len)
670 struct talloc_pool_hdr *pool_hdr = NULL;
671 size_t space_left;
672 struct talloc_chunk *result;
673 size_t chunk_size;
675 if (parent == NULL) {
676 return NULL;
679 if (parent->flags & TALLOC_FLAG_POOL) {
680 pool_hdr = talloc_pool_from_chunk(parent);
682 else if (parent->flags & TALLOC_FLAG_POOLMEM) {
683 pool_hdr = parent->pool;
686 if (pool_hdr == NULL) {
687 return NULL;
690 space_left = tc_pool_space_left(pool_hdr);
693 * Align size to 16 bytes
695 chunk_size = TC_ALIGN16(size + prefix_len);
697 if (space_left < chunk_size) {
698 return NULL;
701 result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len);
703 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
704 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
705 #endif
707 pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
709 result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
710 result->pool = pool_hdr;
712 pool_hdr->object_count++;
714 return result;
718 Allocate a bit of memory as a child of an existing pointer
720 static inline void *__talloc_with_prefix(const void *context,
721 size_t size,
722 size_t prefix_len,
723 struct talloc_chunk **tc_ret)
725 struct talloc_chunk *tc = NULL;
726 struct talloc_memlimit *limit = NULL;
727 size_t total_len = TC_HDR_SIZE + size + prefix_len;
728 struct talloc_chunk *parent = NULL;
730 if (unlikely(context == NULL)) {
731 context = null_context;
734 if (unlikely(size >= MAX_TALLOC_SIZE)) {
735 return NULL;
738 if (unlikely(total_len < TC_HDR_SIZE)) {
739 return NULL;
742 if (likely(context != NULL)) {
743 parent = talloc_chunk_from_ptr(context);
745 if (parent->limit != NULL) {
746 limit = parent->limit;
749 tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
752 if (tc == NULL) {
753 char *ptr;
756 * Only do the memlimit check/update on actual allocation.
758 if (!talloc_memlimit_check(limit, total_len)) {
759 errno = ENOMEM;
760 return NULL;
763 ptr = malloc(total_len);
764 if (unlikely(ptr == NULL)) {
765 return NULL;
767 tc = (struct talloc_chunk *)(ptr + prefix_len);
768 tc->flags = talloc_magic;
769 tc->pool = NULL;
771 talloc_memlimit_grow(limit, total_len);
774 tc->limit = limit;
775 tc->size = size;
776 tc->destructor = NULL;
777 tc->child = NULL;
778 tc->name = NULL;
779 tc->refs = NULL;
781 if (likely(context != NULL)) {
782 if (parent->child) {
783 parent->child->parent = NULL;
784 tc->next = parent->child;
785 tc->next->prev = tc;
786 } else {
787 tc->next = NULL;
789 tc->parent = parent;
790 tc->prev = NULL;
791 parent->child = tc;
792 } else {
793 tc->next = tc->prev = tc->parent = NULL;
796 *tc_ret = tc;
797 return TC_PTR_FROM_CHUNK(tc);
800 static inline void *__talloc(const void *context,
801 size_t size,
802 struct talloc_chunk **tc)
804 return __talloc_with_prefix(context, size, 0, tc);
808 * Create a talloc pool
811 static inline void *_talloc_pool(const void *context, size_t size)
813 struct talloc_chunk *tc;
814 struct talloc_pool_hdr *pool_hdr;
815 void *result;
817 result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
819 if (unlikely(result == NULL)) {
820 return NULL;
823 pool_hdr = talloc_pool_from_chunk(tc);
825 tc->flags |= TALLOC_FLAG_POOL;
826 tc->size = 0;
828 pool_hdr->object_count = 1;
829 pool_hdr->end = result;
830 pool_hdr->poolsize = size;
832 tc_invalidate_pool(pool_hdr);
834 return result;
837 _PUBLIC_ void *talloc_pool(const void *context, size_t size)
839 return _talloc_pool(context, size);
843 * Create a talloc pool correctly sized for a basic size plus
844 * a number of subobjects whose total size is given. Essentially
845 * a custom allocator for talloc to reduce fragmentation.
848 _PUBLIC_ void *_talloc_pooled_object(const void *ctx,
849 size_t type_size,
850 const char *type_name,
851 unsigned num_subobjects,
852 size_t total_subobjects_size)
854 size_t poolsize, subobjects_slack, tmp;
855 struct talloc_chunk *tc;
856 struct talloc_pool_hdr *pool_hdr;
857 void *ret;
859 poolsize = type_size + total_subobjects_size;
861 if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
862 goto overflow;
865 if (num_subobjects == UINT_MAX) {
866 goto overflow;
868 num_subobjects += 1; /* the object body itself */
871 * Alignment can increase the pool size by at most 15 bytes per object
872 * plus alignment for the object itself
874 subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
875 if (subobjects_slack < num_subobjects) {
876 goto overflow;
879 tmp = poolsize + subobjects_slack;
880 if ((tmp < poolsize) || (tmp < subobjects_slack)) {
881 goto overflow;
883 poolsize = tmp;
885 ret = _talloc_pool(ctx, poolsize);
886 if (ret == NULL) {
887 return NULL;
890 tc = talloc_chunk_from_ptr(ret);
891 tc->size = type_size;
893 pool_hdr = talloc_pool_from_chunk(tc);
895 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
896 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
897 #endif
899 pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
901 _tc_set_name_const(tc, type_name);
902 return ret;
904 overflow:
905 return NULL;
909 setup a destructor to be called on free of a pointer
910 the destructor should return 0 on success, or -1 on failure.
911 if the destructor fails then the free is failed, and the memory can
912 be continued to be used
914 _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
916 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
917 tc->destructor = destructor;
921 increase the reference count on a piece of memory.
923 _PUBLIC_ int talloc_increase_ref_count(const void *ptr)
925 if (unlikely(!talloc_reference(null_context, ptr))) {
926 return -1;
928 return 0;
932 helper for talloc_reference()
934 this is referenced by a function pointer and should not be inline
936 static int talloc_reference_destructor(struct talloc_reference_handle *handle)
938 struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
939 _TLIST_REMOVE(ptr_tc->refs, handle);
940 return 0;
944 more efficient way to add a name to a pointer - the name must point to a
945 true string constant
947 static inline void _tc_set_name_const(struct talloc_chunk *tc,
948 const char *name)
950 tc->name = name;
954 internal talloc_named_const()
956 static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
958 void *ptr;
959 struct talloc_chunk *tc;
961 ptr = __talloc(context, size, &tc);
962 if (unlikely(ptr == NULL)) {
963 return NULL;
966 _tc_set_name_const(tc, name);
968 return ptr;
972 make a secondary reference to a pointer, hanging off the given context.
973 the pointer remains valid until both the original caller and this given
974 context are freed.
976 the major use for this is when two different structures need to reference the
977 same underlying data, and you want to be able to free the two instances separately,
978 and in either order
980 _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
982 struct talloc_chunk *tc;
983 struct talloc_reference_handle *handle;
984 if (unlikely(ptr == NULL)) return NULL;
986 tc = talloc_chunk_from_ptr(ptr);
987 handle = (struct talloc_reference_handle *)_talloc_named_const(context,
988 sizeof(struct talloc_reference_handle),
989 TALLOC_MAGIC_REFERENCE);
990 if (unlikely(handle == NULL)) return NULL;
992 /* note that we hang the destructor off the handle, not the
993 main context as that allows the caller to still setup their
994 own destructor on the context if they want to */
995 talloc_set_destructor(handle, talloc_reference_destructor);
996 handle->ptr = discard_const_p(void, ptr);
997 handle->location = location;
998 _TLIST_ADD(tc->refs, handle);
999 return handle->ptr;
1002 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
1004 static inline void _tc_free_poolmem(struct talloc_chunk *tc,
1005 const char *location)
1007 struct talloc_pool_hdr *pool;
1008 struct talloc_chunk *pool_tc;
1009 void *next_tc;
1011 pool = tc->pool;
1012 pool_tc = talloc_chunk_from_pool(pool);
1013 next_tc = tc_next_chunk(tc);
1015 _talloc_chunk_set_free(tc, location);
1017 TC_INVALIDATE_FULL_CHUNK(tc);
1019 if (unlikely(pool->object_count == 0)) {
1020 talloc_abort("Pool object count zero!");
1021 return;
1024 pool->object_count--;
1026 if (unlikely(pool->object_count == 1
1027 && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
1029 * if there is just one object left in the pool
1030 * and pool->flags does not have TALLOC_FLAG_FREE,
1031 * it means this is the pool itself and
1032 * the rest is available for new objects
1033 * again.
1035 pool->end = tc_pool_first_chunk(pool);
1036 tc_invalidate_pool(pool);
1037 return;
1040 if (unlikely(pool->object_count == 0)) {
1042 * we mark the freed memory with where we called the free
1043 * from. This means on a double free error we can report where
1044 * the first free came from
1046 pool_tc->name = location;
1048 if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
1049 _tc_free_poolmem(pool_tc, location);
1050 } else {
1052 * The tc_memlimit_update_on_free()
1053 * call takes into account the
1054 * prefix TP_HDR_SIZE allocated before
1055 * the pool talloc_chunk.
1057 tc_memlimit_update_on_free(pool_tc);
1058 TC_INVALIDATE_FULL_CHUNK(pool_tc);
1059 free(pool);
1061 return;
1064 if (pool->end == next_tc) {
1066 * if pool->pool still points to end of
1067 * 'tc' (which is stored in the 'next_tc' variable),
1068 * we can reclaim the memory of 'tc'.
1070 pool->end = tc;
1071 return;
1075 * Do nothing. The memory is just "wasted", waiting for the pool
1076 * itself to be freed.
1080 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1081 void *ptr,
1082 const char *location);
1084 static inline int _talloc_free_internal(void *ptr, const char *location);
1087 internal free call that takes a struct talloc_chunk *.
1089 static inline int _tc_free_internal(struct talloc_chunk *tc,
1090 const char *location)
1092 void *ptr_to_free;
1093 void *ptr = TC_PTR_FROM_CHUNK(tc);
1095 if (unlikely(tc->refs)) {
1096 int is_child;
1097 /* check if this is a reference from a child or
1098 * grandchild back to it's parent or grandparent
1100 * in that case we need to remove the reference and
1101 * call another instance of talloc_free() on the current
1102 * pointer.
1104 is_child = talloc_is_parent(tc->refs, ptr);
1105 _talloc_free_internal(tc->refs, location);
1106 if (is_child) {
1107 return _talloc_free_internal(ptr, location);
1109 return -1;
1112 if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1113 /* we have a free loop - stop looping */
1114 return 0;
1117 if (unlikely(tc->destructor)) {
1118 talloc_destructor_t d = tc->destructor;
1121 * Protect the destructor against some overwrite
1122 * attacks, by explicitly checking it has the right
1123 * magic here.
1125 if (talloc_chunk_from_ptr(ptr) != tc) {
1127 * This can't actually happen, the
1128 * call itself will panic.
1130 TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1133 if (d == (talloc_destructor_t)-1) {
1134 return -1;
1136 tc->destructor = (talloc_destructor_t)-1;
1137 if (d(ptr) == -1) {
1139 * Only replace the destructor pointer if
1140 * calling the destructor didn't modify it.
1142 if (tc->destructor == (talloc_destructor_t)-1) {
1143 tc->destructor = d;
1145 return -1;
1147 tc->destructor = NULL;
1150 if (tc->parent) {
1151 _TLIST_REMOVE(tc->parent->child, tc);
1152 if (tc->parent->child) {
1153 tc->parent->child->parent = tc->parent;
1155 } else {
1156 if (tc->prev) tc->prev->next = tc->next;
1157 if (tc->next) tc->next->prev = tc->prev;
1158 tc->prev = tc->next = NULL;
1161 tc->flags |= TALLOC_FLAG_LOOP;
1163 _tc_free_children_internal(tc, ptr, location);
1165 _talloc_chunk_set_free(tc, location);
1167 if (tc->flags & TALLOC_FLAG_POOL) {
1168 struct talloc_pool_hdr *pool;
1170 pool = talloc_pool_from_chunk(tc);
1172 if (unlikely(pool->object_count == 0)) {
1173 talloc_abort("Pool object count zero!");
1174 return 0;
1177 pool->object_count--;
1179 if (likely(pool->object_count != 0)) {
1180 return 0;
1184 * With object_count==0, a pool becomes a normal piece of
1185 * memory to free. If it's allocated inside a pool, it needs
1186 * to be freed as poolmem, else it needs to be just freed.
1188 ptr_to_free = pool;
1189 } else {
1190 ptr_to_free = tc;
1193 if (tc->flags & TALLOC_FLAG_POOLMEM) {
1194 _tc_free_poolmem(tc, location);
1195 return 0;
1198 tc_memlimit_update_on_free(tc);
1200 TC_INVALIDATE_FULL_CHUNK(tc);
1201 free(ptr_to_free);
1202 return 0;
1206 internal talloc_free call
1208 static inline int _talloc_free_internal(void *ptr, const char *location)
1210 struct talloc_chunk *tc;
1212 if (unlikely(ptr == NULL)) {
1213 return -1;
1216 /* possibly initialised the talloc fill value */
1217 if (unlikely(!talloc_fill.initialised)) {
1218 const char *fill = getenv(TALLOC_FILL_ENV);
1219 if (fill != NULL) {
1220 talloc_fill.enabled = true;
1221 talloc_fill.fill_value = strtoul(fill, NULL, 0);
1223 talloc_fill.initialised = true;
1226 tc = talloc_chunk_from_ptr(ptr);
1227 return _tc_free_internal(tc, location);
1230 static inline size_t _talloc_total_limit_size(const void *ptr,
1231 struct talloc_memlimit *old_limit,
1232 struct talloc_memlimit *new_limit);
1235 move a lump of memory from one talloc context to another return the
1236 ptr on success, or NULL if it could not be transferred.
1237 passing NULL as ptr will always return NULL with no side effects.
1239 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1241 struct talloc_chunk *tc, *new_tc;
1242 size_t ctx_size = 0;
1244 if (unlikely(!ptr)) {
1245 return NULL;
1248 if (unlikely(new_ctx == NULL)) {
1249 new_ctx = null_context;
1252 tc = talloc_chunk_from_ptr(ptr);
1254 if (tc->limit != NULL) {
1256 ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1258 /* Decrement the memory limit from the source .. */
1259 talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1261 if (tc->limit->parent == tc) {
1262 tc->limit->upper = NULL;
1263 } else {
1264 tc->limit = NULL;
1268 if (unlikely(new_ctx == NULL)) {
1269 if (tc->parent) {
1270 _TLIST_REMOVE(tc->parent->child, tc);
1271 if (tc->parent->child) {
1272 tc->parent->child->parent = tc->parent;
1274 } else {
1275 if (tc->prev) tc->prev->next = tc->next;
1276 if (tc->next) tc->next->prev = tc->prev;
1279 tc->parent = tc->next = tc->prev = NULL;
1280 return discard_const_p(void, ptr);
1283 new_tc = talloc_chunk_from_ptr(new_ctx);
1285 if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1286 return discard_const_p(void, ptr);
1289 if (tc->parent) {
1290 _TLIST_REMOVE(tc->parent->child, tc);
1291 if (tc->parent->child) {
1292 tc->parent->child->parent = tc->parent;
1294 } else {
1295 if (tc->prev) tc->prev->next = tc->next;
1296 if (tc->next) tc->next->prev = tc->prev;
1297 tc->prev = tc->next = NULL;
1300 tc->parent = new_tc;
1301 if (new_tc->child) new_tc->child->parent = NULL;
1302 _TLIST_ADD(new_tc->child, tc);
1304 if (tc->limit || new_tc->limit) {
1305 ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1306 new_tc->limit);
1307 /* .. and increment it in the destination. */
1308 if (new_tc->limit) {
1309 talloc_memlimit_grow(new_tc->limit, ctx_size);
1313 return discard_const_p(void, ptr);
1317 move a lump of memory from one talloc context to another return the
1318 ptr on success, or NULL if it could not be transferred.
1319 passing NULL as ptr will always return NULL with no side effects.
1321 _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1323 struct talloc_chunk *tc;
1325 if (unlikely(ptr == NULL)) {
1326 return NULL;
1329 tc = talloc_chunk_from_ptr(ptr);
1331 if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1332 struct talloc_reference_handle *h;
1334 talloc_log("WARNING: talloc_steal with references at %s\n",
1335 location);
1337 for (h=tc->refs; h; h=h->next) {
1338 talloc_log("\treference at %s\n",
1339 h->location);
1343 #if 0
1344 /* this test is probably too expensive to have on in the
1345 normal build, but it useful for debugging */
1346 if (talloc_is_parent(new_ctx, ptr)) {
1347 talloc_log("WARNING: stealing into talloc child at %s\n", location);
1349 #endif
1351 return _talloc_steal_internal(new_ctx, ptr);
1355 this is like a talloc_steal(), but you must supply the old
1356 parent. This resolves the ambiguity in a talloc_steal() which is
1357 called on a context that has more than one parent (via references)
1359 The old parent can be either a reference or a parent
1361 _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1363 struct talloc_chunk *tc;
1364 struct talloc_reference_handle *h;
1366 if (unlikely(ptr == NULL)) {
1367 return NULL;
1370 if (old_parent == talloc_parent(ptr)) {
1371 return _talloc_steal_internal(new_parent, ptr);
1374 tc = talloc_chunk_from_ptr(ptr);
1375 for (h=tc->refs;h;h=h->next) {
1376 if (talloc_parent(h) == old_parent) {
1377 if (_talloc_steal_internal(new_parent, h) != h) {
1378 return NULL;
1380 return discard_const_p(void, ptr);
1384 /* it wasn't a parent */
1385 return NULL;
1389 remove a secondary reference to a pointer. This undo's what
1390 talloc_reference() has done. The context and pointer arguments
1391 must match those given to a talloc_reference()
1393 static inline int talloc_unreference(const void *context, const void *ptr)
1395 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1396 struct talloc_reference_handle *h;
1398 if (unlikely(context == NULL)) {
1399 context = null_context;
1402 for (h=tc->refs;h;h=h->next) {
1403 struct talloc_chunk *p = talloc_parent_chunk(h);
1404 if (p == NULL) {
1405 if (context == NULL) break;
1406 } else if (TC_PTR_FROM_CHUNK(p) == context) {
1407 break;
1410 if (h == NULL) {
1411 return -1;
1414 return _talloc_free_internal(h, __location__);
1418 remove a specific parent context from a pointer. This is a more
1419 controlled variant of talloc_free()
1421 _PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1423 struct talloc_chunk *tc_p, *new_p, *tc_c;
1424 void *new_parent;
1426 if (ptr == NULL) {
1427 return -1;
1430 if (context == NULL) {
1431 context = null_context;
1434 if (talloc_unreference(context, ptr) == 0) {
1435 return 0;
1438 if (context != NULL) {
1439 tc_c = talloc_chunk_from_ptr(context);
1440 } else {
1441 tc_c = NULL;
1443 if (tc_c != talloc_parent_chunk(ptr)) {
1444 return -1;
1447 tc_p = talloc_chunk_from_ptr(ptr);
1449 if (tc_p->refs == NULL) {
1450 return _talloc_free_internal(ptr, __location__);
1453 new_p = talloc_parent_chunk(tc_p->refs);
1454 if (new_p) {
1455 new_parent = TC_PTR_FROM_CHUNK(new_p);
1456 } else {
1457 new_parent = NULL;
1460 if (talloc_unreference(new_parent, ptr) != 0) {
1461 return -1;
1464 _talloc_steal_internal(new_parent, ptr);
1466 return 0;
1470 add a name to an existing pointer - va_list version
1472 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1473 const char *fmt,
1474 va_list ap) PRINTF_ATTRIBUTE(2,0);
1476 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1477 const char *fmt,
1478 va_list ap)
1480 struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1481 fmt,
1482 ap);
1483 if (likely(name_tc)) {
1484 tc->name = TC_PTR_FROM_CHUNK(name_tc);
1485 _tc_set_name_const(name_tc, ".name");
1486 } else {
1487 tc->name = NULL;
1489 return tc->name;
1493 add a name to an existing pointer
1495 _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1497 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1498 const char *name;
1499 va_list ap;
1500 va_start(ap, fmt);
1501 name = tc_set_name_v(tc, fmt, ap);
1502 va_end(ap);
1503 return name;
1508 create a named talloc pointer. Any talloc pointer can be named, and
1509 talloc_named() operates just like talloc() except that it allows you
1510 to name the pointer.
1512 _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1514 va_list ap;
1515 void *ptr;
1516 const char *name;
1517 struct talloc_chunk *tc;
1519 ptr = __talloc(context, size, &tc);
1520 if (unlikely(ptr == NULL)) return NULL;
1522 va_start(ap, fmt);
1523 name = tc_set_name_v(tc, fmt, ap);
1524 va_end(ap);
1526 if (unlikely(name == NULL)) {
1527 _talloc_free_internal(ptr, __location__);
1528 return NULL;
1531 return ptr;
1535 return the name of a talloc ptr, or "UNNAMED"
1537 static inline const char *__talloc_get_name(const void *ptr)
1539 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1540 if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1541 return ".reference";
1543 if (likely(tc->name)) {
1544 return tc->name;
1546 return "UNNAMED";
1549 _PUBLIC_ const char *talloc_get_name(const void *ptr)
1551 return __talloc_get_name(ptr);
1555 check if a pointer has the given name. If it does, return the pointer,
1556 otherwise return NULL
1558 _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1560 const char *pname;
1561 if (unlikely(ptr == NULL)) return NULL;
1562 pname = __talloc_get_name(ptr);
1563 if (likely(pname == name || strcmp(pname, name) == 0)) {
1564 return discard_const_p(void, ptr);
1566 return NULL;
1569 static void talloc_abort_type_mismatch(const char *location,
1570 const char *name,
1571 const char *expected)
1573 const char *reason;
1575 reason = talloc_asprintf(NULL,
1576 "%s: Type mismatch: name[%s] expected[%s]",
1577 location,
1578 name?name:"NULL",
1579 expected);
1580 if (!reason) {
1581 reason = "Type mismatch";
1584 talloc_abort(reason);
1587 _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1589 const char *pname;
1591 if (unlikely(ptr == NULL)) {
1592 talloc_abort_type_mismatch(location, NULL, name);
1593 return NULL;
1596 pname = __talloc_get_name(ptr);
1597 if (likely(pname == name || strcmp(pname, name) == 0)) {
1598 return discard_const_p(void, ptr);
1601 talloc_abort_type_mismatch(location, pname, name);
1602 return NULL;
1606 this is for compatibility with older versions of talloc
1608 _PUBLIC_ void *talloc_init(const char *fmt, ...)
1610 va_list ap;
1611 void *ptr;
1612 const char *name;
1613 struct talloc_chunk *tc;
1615 ptr = __talloc(NULL, 0, &tc);
1616 if (unlikely(ptr == NULL)) return NULL;
1618 va_start(ap, fmt);
1619 name = tc_set_name_v(tc, fmt, ap);
1620 va_end(ap);
1622 if (unlikely(name == NULL)) {
1623 _talloc_free_internal(ptr, __location__);
1624 return NULL;
1627 return ptr;
1630 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1631 void *ptr,
1632 const char *location)
1634 while (tc->child) {
1635 /* we need to work out who will own an abandoned child
1636 if it cannot be freed. In priority order, the first
1637 choice is owner of any remaining reference to this
1638 pointer, the second choice is our parent, and the
1639 final choice is the null context. */
1640 void *child = TC_PTR_FROM_CHUNK(tc->child);
1641 const void *new_parent = null_context;
1642 if (unlikely(tc->child->refs)) {
1643 struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1644 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1646 if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1647 if (talloc_parent_chunk(child) != tc) {
1649 * Destructor already reparented this child.
1650 * No further reparenting needed.
1652 continue;
1654 if (new_parent == null_context) {
1655 struct talloc_chunk *p = talloc_parent_chunk(ptr);
1656 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1658 _talloc_steal_internal(new_parent, child);
1664 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1665 should probably not be used in new code. It's in here to keep the talloc
1666 code consistent across Samba 3 and 4.
1668 _PUBLIC_ void talloc_free_children(void *ptr)
1670 struct talloc_chunk *tc_name = NULL;
1671 struct talloc_chunk *tc;
1673 if (unlikely(ptr == NULL)) {
1674 return;
1677 tc = talloc_chunk_from_ptr(ptr);
1679 /* we do not want to free the context name if it is a child .. */
1680 if (likely(tc->child)) {
1681 for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1682 if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1684 if (tc_name) {
1685 _TLIST_REMOVE(tc->child, tc_name);
1686 if (tc->child) {
1687 tc->child->parent = tc;
1692 _tc_free_children_internal(tc, ptr, __location__);
1694 /* .. so we put it back after all other children have been freed */
1695 if (tc_name) {
1696 if (tc->child) {
1697 tc->child->parent = NULL;
1699 tc_name->parent = tc;
1700 _TLIST_ADD(tc->child, tc_name);
1705 Allocate a bit of memory as a child of an existing pointer
1707 _PUBLIC_ void *_talloc(const void *context, size_t size)
1709 struct talloc_chunk *tc;
1710 return __talloc(context, size, &tc);
1714 externally callable talloc_set_name_const()
1716 _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1718 _tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1722 create a named talloc pointer. Any talloc pointer can be named, and
1723 talloc_named() operates just like talloc() except that it allows you
1724 to name the pointer.
1726 _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1728 return _talloc_named_const(context, size, name);
1732 free a talloc pointer. This also frees all child pointers of this
1733 pointer recursively
1735 return 0 if the memory is actually freed, otherwise -1. The memory
1736 will not be freed if the ref_count is > 1 or the destructor (if
1737 any) returns non-zero
1739 _PUBLIC_ int _talloc_free(void *ptr, const char *location)
1741 struct talloc_chunk *tc;
1743 if (unlikely(ptr == NULL)) {
1744 return -1;
1747 tc = talloc_chunk_from_ptr(ptr);
1749 if (unlikely(tc->refs != NULL)) {
1750 struct talloc_reference_handle *h;
1752 if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1753 /* in this case we do know which parent should
1754 get this pointer, as there is really only
1755 one parent */
1756 return talloc_unlink(null_context, ptr);
1759 talloc_log("ERROR: talloc_free with references at %s\n",
1760 location);
1762 for (h=tc->refs; h; h=h->next) {
1763 talloc_log("\treference at %s\n",
1764 h->location);
1766 return -1;
1769 return _talloc_free_internal(ptr, location);
1775 A talloc version of realloc. The context argument is only used if
1776 ptr is NULL
1778 _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1780 struct talloc_chunk *tc;
1781 void *new_ptr;
1782 bool malloced = false;
1783 struct talloc_pool_hdr *pool_hdr = NULL;
1784 size_t old_size = 0;
1785 size_t new_size = 0;
1787 /* size zero is equivalent to free() */
1788 if (unlikely(size == 0)) {
1789 talloc_unlink(context, ptr);
1790 return NULL;
1793 if (unlikely(size >= MAX_TALLOC_SIZE)) {
1794 return NULL;
1797 /* realloc(NULL) is equivalent to malloc() */
1798 if (ptr == NULL) {
1799 return _talloc_named_const(context, size, name);
1802 tc = talloc_chunk_from_ptr(ptr);
1804 /* don't allow realloc on referenced pointers */
1805 if (unlikely(tc->refs)) {
1806 return NULL;
1809 /* don't let anybody try to realloc a talloc_pool */
1810 if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1811 return NULL;
1814 if (tc->limit && (size > tc->size)) {
1815 if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1816 errno = ENOMEM;
1817 return NULL;
1821 /* handle realloc inside a talloc_pool */
1822 if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1823 pool_hdr = tc->pool;
1826 #if (ALWAYS_REALLOC == 0)
1827 /* don't shrink if we have less than 1k to gain */
1828 if (size < tc->size && tc->limit == NULL) {
1829 if (pool_hdr) {
1830 void *next_tc = tc_next_chunk(tc);
1831 TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1832 tc->size = size;
1833 if (next_tc == pool_hdr->end) {
1834 /* note: tc->size has changed, so this works */
1835 pool_hdr->end = tc_next_chunk(tc);
1837 return ptr;
1838 } else if ((tc->size - size) < 1024) {
1840 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1841 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1842 * after each realloc call, which slows down
1843 * testing a lot :-(.
1845 * That is why we only mark memory as undefined here.
1847 TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1849 /* do not shrink if we have less than 1k to gain */
1850 tc->size = size;
1851 return ptr;
1853 } else if (tc->size == size) {
1855 * do not change the pointer if it is exactly
1856 * the same size.
1858 return ptr;
1860 #endif
1863 * by resetting magic we catch users of the old memory
1865 * We mark this memory as free, and also over-stamp the talloc
1866 * magic with the old-style magic.
1868 * Why? This tries to avoid a memory read use-after-free from
1869 * disclosing our talloc magic, which would then allow an
1870 * attacker to prepare a valid header and so run a destructor.
1872 * What else? We have to re-stamp back a valid normal magic
1873 * on this memory once realloc() is done, as it will have done
1874 * a memcpy() into the new valid memory. We can't do this in
1875 * reverse as that would be a real use-after-free.
1877 _talloc_chunk_set_free(tc, NULL);
1879 #if ALWAYS_REALLOC
1880 if (pool_hdr) {
1881 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1882 pool_hdr->object_count--;
1884 if (new_ptr == NULL) {
1885 new_ptr = malloc(TC_HDR_SIZE+size);
1886 malloced = true;
1887 new_size = size;
1890 if (new_ptr) {
1891 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1892 TC_INVALIDATE_FULL_CHUNK(tc);
1894 } else {
1895 /* We're doing malloc then free here, so record the difference. */
1896 old_size = tc->size;
1897 new_size = size;
1898 new_ptr = malloc(size + TC_HDR_SIZE);
1899 if (new_ptr) {
1900 memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1901 free(tc);
1904 #else
1905 if (pool_hdr) {
1906 struct talloc_chunk *pool_tc;
1907 void *next_tc = tc_next_chunk(tc);
1908 size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1909 size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1910 size_t space_needed;
1911 size_t space_left;
1912 unsigned int chunk_count = pool_hdr->object_count;
1914 pool_tc = talloc_chunk_from_pool(pool_hdr);
1915 if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1916 chunk_count -= 1;
1919 if (chunk_count == 1) {
1921 * optimize for the case where 'tc' is the only
1922 * chunk in the pool.
1924 char *start = tc_pool_first_chunk(pool_hdr);
1925 space_needed = new_chunk_size;
1926 space_left = (char *)tc_pool_end(pool_hdr) - start;
1928 if (space_left >= space_needed) {
1929 size_t old_used = TC_HDR_SIZE + tc->size;
1930 size_t new_used = TC_HDR_SIZE + size;
1931 new_ptr = start;
1933 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1936 * The area from
1937 * start -> tc may have
1938 * been freed and thus been marked as
1939 * VALGRIND_MEM_NOACCESS. Set it to
1940 * VALGRIND_MEM_UNDEFINED so we can
1941 * copy into it without valgrind errors.
1942 * We can't just mark
1943 * new_ptr -> new_ptr + old_used
1944 * as this may overlap on top of tc,
1945 * (which is why we use memmove, not
1946 * memcpy below) hence the MIN.
1948 size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1949 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1951 #endif
1953 memmove(new_ptr, tc, old_used);
1955 tc = (struct talloc_chunk *)new_ptr;
1956 TC_UNDEFINE_GROW_CHUNK(tc, size);
1959 * first we do not align the pool pointer
1960 * because we want to invalidate the padding
1961 * too.
1963 pool_hdr->end = new_used + (char *)new_ptr;
1964 tc_invalidate_pool(pool_hdr);
1966 /* now the aligned pointer */
1967 pool_hdr->end = new_chunk_size + (char *)new_ptr;
1968 goto got_new_ptr;
1971 next_tc = NULL;
1974 if (new_chunk_size == old_chunk_size) {
1975 TC_UNDEFINE_GROW_CHUNK(tc, size);
1976 _talloc_chunk_set_not_free(tc);
1977 tc->size = size;
1978 return ptr;
1981 if (next_tc == pool_hdr->end) {
1983 * optimize for the case where 'tc' is the last
1984 * chunk in the pool.
1986 space_needed = new_chunk_size - old_chunk_size;
1987 space_left = tc_pool_space_left(pool_hdr);
1989 if (space_left >= space_needed) {
1990 TC_UNDEFINE_GROW_CHUNK(tc, size);
1991 _talloc_chunk_set_not_free(tc);
1992 tc->size = size;
1993 pool_hdr->end = tc_next_chunk(tc);
1994 return ptr;
1998 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
2000 if (new_ptr == NULL) {
2001 new_ptr = malloc(TC_HDR_SIZE+size);
2002 malloced = true;
2003 new_size = size;
2006 if (new_ptr) {
2007 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
2009 _tc_free_poolmem(tc, __location__ "_talloc_realloc");
2012 else {
2013 /* We're doing realloc here, so record the difference. */
2014 old_size = tc->size;
2015 new_size = size;
2016 new_ptr = realloc(tc, size + TC_HDR_SIZE);
2018 got_new_ptr:
2019 #endif
2020 if (unlikely(!new_ptr)) {
2022 * Ok, this is a strange spot. We have to put back
2023 * the old talloc_magic and any flags, except the
2024 * TALLOC_FLAG_FREE as this was not free'ed by the
2025 * realloc() call after all
2027 _talloc_chunk_set_not_free(tc);
2028 return NULL;
2032 * tc is now the new value from realloc(), the old memory we
2033 * can't access any more and was preemptively marked as
2034 * TALLOC_FLAG_FREE before the call. Now we mark it as not
2035 * free again
2037 tc = (struct talloc_chunk *)new_ptr;
2038 _talloc_chunk_set_not_free(tc);
2039 if (malloced) {
2040 tc->flags &= ~TALLOC_FLAG_POOLMEM;
2042 if (tc->parent) {
2043 tc->parent->child = tc;
2045 if (tc->child) {
2046 tc->child->parent = tc;
2049 if (tc->prev) {
2050 tc->prev->next = tc;
2052 if (tc->next) {
2053 tc->next->prev = tc;
2056 if (new_size > old_size) {
2057 talloc_memlimit_grow(tc->limit, new_size - old_size);
2058 } else if (new_size < old_size) {
2059 talloc_memlimit_shrink(tc->limit, old_size - new_size);
2062 tc->size = size;
2063 _tc_set_name_const(tc, name);
2065 return TC_PTR_FROM_CHUNK(tc);
2069 a wrapper around talloc_steal() for situations where you are moving a pointer
2070 between two structures, and want the old pointer to be set to NULL
2072 _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
2074 const void **pptr = discard_const_p(const void *,_pptr);
2075 void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
2076 (*pptr) = NULL;
2077 return ret;
2080 enum talloc_mem_count_type {
2081 TOTAL_MEM_SIZE,
2082 TOTAL_MEM_BLOCKS,
2083 TOTAL_MEM_LIMIT,
2086 static inline size_t _talloc_total_mem_internal(const void *ptr,
2087 enum talloc_mem_count_type type,
2088 struct talloc_memlimit *old_limit,
2089 struct talloc_memlimit *new_limit)
2091 size_t total = 0;
2092 struct talloc_chunk *c, *tc;
2094 if (ptr == NULL) {
2095 ptr = null_context;
2097 if (ptr == NULL) {
2098 return 0;
2101 tc = talloc_chunk_from_ptr(ptr);
2103 if (old_limit || new_limit) {
2104 if (tc->limit && tc->limit->upper == old_limit) {
2105 tc->limit->upper = new_limit;
2109 /* optimize in the memlimits case */
2110 if (type == TOTAL_MEM_LIMIT &&
2111 tc->limit != NULL &&
2112 tc->limit != old_limit &&
2113 tc->limit->parent == tc) {
2114 return tc->limit->cur_size;
2117 if (tc->flags & TALLOC_FLAG_LOOP) {
2118 return 0;
2121 tc->flags |= TALLOC_FLAG_LOOP;
2123 if (old_limit || new_limit) {
2124 if (old_limit == tc->limit) {
2125 tc->limit = new_limit;
2129 switch (type) {
2130 case TOTAL_MEM_SIZE:
2131 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2132 total = tc->size;
2134 break;
2135 case TOTAL_MEM_BLOCKS:
2136 total++;
2137 break;
2138 case TOTAL_MEM_LIMIT:
2139 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2141 * Don't count memory allocated from a pool
2142 * when calculating limits. Only count the
2143 * pool itself.
2145 if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2146 if (tc->flags & TALLOC_FLAG_POOL) {
2148 * If this is a pool, the allocated
2149 * size is in the pool header, and
2150 * remember to add in the prefix
2151 * length.
2153 struct talloc_pool_hdr *pool_hdr
2154 = talloc_pool_from_chunk(tc);
2155 total = pool_hdr->poolsize +
2156 TC_HDR_SIZE +
2157 TP_HDR_SIZE;
2158 } else {
2159 total = tc->size + TC_HDR_SIZE;
2163 break;
2165 for (c = tc->child; c; c = c->next) {
2166 total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2167 old_limit, new_limit);
2170 tc->flags &= ~TALLOC_FLAG_LOOP;
2172 return total;
2176 return the total size of a talloc pool (subtree)
2178 _PUBLIC_ size_t talloc_total_size(const void *ptr)
2180 return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2184 return the total number of blocks in a talloc pool (subtree)
2186 _PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2188 return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2192 return the number of external references to a pointer
2194 _PUBLIC_ size_t talloc_reference_count(const void *ptr)
2196 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2197 struct talloc_reference_handle *h;
2198 size_t ret = 0;
2200 for (h=tc->refs;h;h=h->next) {
2201 ret++;
2203 return ret;
2207 report on memory usage by all children of a pointer, giving a full tree view
2209 _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2210 void (*callback)(const void *ptr,
2211 int depth, int max_depth,
2212 int is_ref,
2213 void *private_data),
2214 void *private_data)
2216 struct talloc_chunk *c, *tc;
2218 if (ptr == NULL) {
2219 ptr = null_context;
2221 if (ptr == NULL) return;
2223 tc = talloc_chunk_from_ptr(ptr);
2225 if (tc->flags & TALLOC_FLAG_LOOP) {
2226 return;
2229 callback(ptr, depth, max_depth, 0, private_data);
2231 if (max_depth >= 0 && depth >= max_depth) {
2232 return;
2235 tc->flags |= TALLOC_FLAG_LOOP;
2236 for (c=tc->child;c;c=c->next) {
2237 if (c->name == TALLOC_MAGIC_REFERENCE) {
2238 struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2239 callback(h->ptr, depth + 1, max_depth, 1, private_data);
2240 } else {
2241 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2244 tc->flags &= ~TALLOC_FLAG_LOOP;
2247 static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2249 const char *name = __talloc_get_name(ptr);
2250 struct talloc_chunk *tc;
2251 FILE *f = (FILE *)_f;
2253 if (is_ref) {
2254 fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2255 return;
2258 tc = talloc_chunk_from_ptr(ptr);
2259 if (tc->limit && tc->limit->parent == tc) {
2260 fprintf(f, "%*s%-30s is a memlimit context"
2261 " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2262 depth*4, "",
2263 name,
2264 (unsigned long)tc->limit->max_size,
2265 (unsigned long)tc->limit->cur_size);
2268 if (depth == 0) {
2269 fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2270 (max_depth < 0 ? "full " :""), name,
2271 (unsigned long)talloc_total_size(ptr),
2272 (unsigned long)talloc_total_blocks(ptr));
2273 return;
2276 fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2277 depth*4, "",
2278 name,
2279 (unsigned long)talloc_total_size(ptr),
2280 (unsigned long)talloc_total_blocks(ptr),
2281 (int)talloc_reference_count(ptr), ptr);
2283 #if 0
2284 fprintf(f, "content: ");
2285 if (talloc_total_size(ptr)) {
2286 int tot = talloc_total_size(ptr);
2287 int i;
2289 for (i = 0; i < tot; i++) {
2290 if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2291 fprintf(f, "%c", ((char *)ptr)[i]);
2292 } else {
2293 fprintf(f, "~%02x", ((char *)ptr)[i]);
2297 fprintf(f, "\n");
2298 #endif
2302 report on memory usage by all children of a pointer, giving a full tree view
2304 _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2306 if (f) {
2307 talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2308 fflush(f);
2313 report on memory usage by all children of a pointer, giving a full tree view
2315 _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2317 talloc_report_depth_file(ptr, 0, -1, f);
2321 report on memory usage by all children of a pointer
2323 _PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2325 talloc_report_depth_file(ptr, 0, 1, f);
2329 enable tracking of the NULL context
2331 _PUBLIC_ void talloc_enable_null_tracking(void)
2333 if (null_context == NULL) {
2334 null_context = _talloc_named_const(NULL, 0, "null_context");
2335 if (autofree_context != NULL) {
2336 talloc_reparent(NULL, null_context, autofree_context);
2342 enable tracking of the NULL context, not moving the autofree context
2343 into the NULL context. This is needed for the talloc testsuite
2345 _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2347 if (null_context == NULL) {
2348 null_context = _talloc_named_const(NULL, 0, "null_context");
2353 disable tracking of the NULL context
2355 _PUBLIC_ void talloc_disable_null_tracking(void)
2357 if (null_context != NULL) {
2358 /* we have to move any children onto the real NULL
2359 context */
2360 struct talloc_chunk *tc, *tc2;
2361 tc = talloc_chunk_from_ptr(null_context);
2362 for (tc2 = tc->child; tc2; tc2=tc2->next) {
2363 if (tc2->parent == tc) tc2->parent = NULL;
2364 if (tc2->prev == tc) tc2->prev = NULL;
2366 for (tc2 = tc->next; tc2; tc2=tc2->next) {
2367 if (tc2->parent == tc) tc2->parent = NULL;
2368 if (tc2->prev == tc) tc2->prev = NULL;
2370 tc->child = NULL;
2371 tc->next = NULL;
2373 talloc_free(null_context);
2374 null_context = NULL;
2378 enable leak reporting on exit
2380 _PUBLIC_ void talloc_enable_leak_report(void)
2382 talloc_enable_null_tracking();
2383 talloc_report_null = true;
2384 talloc_setup_atexit();
2388 enable full leak reporting on exit
2390 _PUBLIC_ void talloc_enable_leak_report_full(void)
2392 talloc_enable_null_tracking();
2393 talloc_report_null_full = true;
2394 talloc_setup_atexit();
2398 talloc and zero memory.
2400 _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2402 void *p = _talloc_named_const(ctx, size, name);
2404 if (p) {
2405 memset(p, '\0', size);
2408 return p;
2412 memdup with a talloc.
2414 _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2416 void *newp = NULL;
2418 if (likely(size > 0) && unlikely(p == NULL)) {
2419 return NULL;
2422 newp = _talloc_named_const(t, size, name);
2423 if (likely(newp != NULL) && likely(size > 0)) {
2424 memcpy(newp, p, size);
2427 return newp;
2430 static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2432 char *ret;
2433 struct talloc_chunk *tc;
2435 ret = (char *)__talloc(t, len + 1, &tc);
2436 if (unlikely(!ret)) return NULL;
2438 memcpy(ret, p, len);
2439 ret[len] = 0;
2441 _tc_set_name_const(tc, ret);
2442 return ret;
2446 strdup with a talloc
2448 _PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2450 if (unlikely(!p)) return NULL;
2451 return __talloc_strlendup(t, p, strlen(p));
2455 strndup with a talloc
2457 _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2459 if (unlikely(!p)) return NULL;
2460 return __talloc_strlendup(t, p, strnlen(p, n));
2463 static inline char *__talloc_strlendup_append(char *s, size_t slen,
2464 const char *a, size_t alen)
2466 char *ret;
2468 ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2469 if (unlikely(!ret)) return NULL;
2471 /* append the string and the trailing \0 */
2472 memcpy(&ret[slen], a, alen);
2473 ret[slen+alen] = 0;
2475 _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2476 return ret;
2480 * Appends at the end of the string.
2482 _PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2484 if (unlikely(!s)) {
2485 return talloc_strdup(NULL, a);
2488 if (unlikely(!a)) {
2489 return s;
2492 return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2496 * Appends at the end of the talloc'ed buffer,
2497 * not the end of the string.
2499 _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2501 size_t slen;
2503 if (unlikely(!s)) {
2504 return talloc_strdup(NULL, a);
2507 if (unlikely(!a)) {
2508 return s;
2511 slen = talloc_get_size(s);
2512 if (likely(slen > 0)) {
2513 slen--;
2516 return __talloc_strlendup_append(s, slen, a, strlen(a));
2520 * Appends at the end of the string.
2522 _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2524 if (unlikely(!s)) {
2525 return talloc_strndup(NULL, a, n);
2528 if (unlikely(!a)) {
2529 return s;
2532 return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2536 * Appends at the end of the talloc'ed buffer,
2537 * not the end of the string.
2539 _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2541 size_t slen;
2543 if (unlikely(!s)) {
2544 return talloc_strndup(NULL, a, n);
2547 if (unlikely(!a)) {
2548 return s;
2551 slen = talloc_get_size(s);
2552 if (likely(slen > 0)) {
2553 slen--;
2556 return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2559 #ifndef HAVE_VA_COPY
2560 #ifdef HAVE___VA_COPY
2561 #define va_copy(dest, src) __va_copy(dest, src)
2562 #else
2563 #define va_copy(dest, src) (dest) = (src)
2564 #endif
2565 #endif
2567 static struct talloc_chunk *_vasprintf_tc(const void *t,
2568 const char *fmt,
2569 va_list ap) PRINTF_ATTRIBUTE(2,0);
2571 static struct talloc_chunk *_vasprintf_tc(const void *t,
2572 const char *fmt,
2573 va_list ap)
2575 int vlen;
2576 size_t len;
2577 char *ret;
2578 va_list ap2;
2579 struct talloc_chunk *tc;
2580 char buf[1024];
2582 /* this call looks strange, but it makes it work on older solaris boxes */
2583 va_copy(ap2, ap);
2584 vlen = vsnprintf(buf, sizeof(buf), fmt, ap2);
2585 va_end(ap2);
2586 if (unlikely(vlen < 0)) {
2587 return NULL;
2589 len = vlen;
2590 if (unlikely(len + 1 < len)) {
2591 return NULL;
2594 ret = (char *)__talloc(t, len+1, &tc);
2595 if (unlikely(!ret)) return NULL;
2597 if (len < sizeof(buf)) {
2598 memcpy(ret, buf, len+1);
2599 } else {
2600 va_copy(ap2, ap);
2601 vsnprintf(ret, len+1, fmt, ap2);
2602 va_end(ap2);
2605 _tc_set_name_const(tc, ret);
2606 return tc;
2609 _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2611 struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2612 if (tc == NULL) {
2613 return NULL;
2615 return TC_PTR_FROM_CHUNK(tc);
2620 Perform string formatting, and return a pointer to newly allocated
2621 memory holding the result, inside a memory pool.
2623 _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2625 va_list ap;
2626 char *ret;
2628 va_start(ap, fmt);
2629 ret = talloc_vasprintf(t, fmt, ap);
2630 va_end(ap);
2631 return ret;
2634 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2635 const char *fmt, va_list ap)
2636 PRINTF_ATTRIBUTE(3,0);
2638 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2639 const char *fmt, va_list ap)
2641 ssize_t alen;
2642 va_list ap2;
2643 char c;
2645 va_copy(ap2, ap);
2646 alen = vsnprintf(&c, 1, fmt, ap2);
2647 va_end(ap2);
2649 if (alen <= 0) {
2650 /* Either the vsnprintf failed or the format resulted in
2651 * no characters being formatted. In the former case, we
2652 * ought to return NULL, in the latter we ought to return
2653 * the original string. Most current callers of this
2654 * function expect it to never return NULL.
2656 return s;
2659 s = talloc_realloc(NULL, s, char, slen + alen + 1);
2660 if (!s) return NULL;
2662 va_copy(ap2, ap);
2663 vsnprintf(s + slen, alen + 1, fmt, ap2);
2664 va_end(ap2);
2666 _tc_set_name_const(talloc_chunk_from_ptr(s), s);
2667 return s;
2671 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2672 * and return @p s, which may have moved. Good for gradually
2673 * accumulating output into a string buffer. Appends at the end
2674 * of the string.
2676 _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2678 if (unlikely(!s)) {
2679 return talloc_vasprintf(NULL, fmt, ap);
2682 return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2686 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2687 * and return @p s, which may have moved. Always appends at the
2688 * end of the talloc'ed buffer, not the end of the string.
2690 _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2692 size_t slen;
2694 if (unlikely(!s)) {
2695 return talloc_vasprintf(NULL, fmt, ap);
2698 slen = talloc_get_size(s);
2699 if (likely(slen > 0)) {
2700 slen--;
2703 return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2707 Realloc @p s to append the formatted result of @p fmt and return @p
2708 s, which may have moved. Good for gradually accumulating output
2709 into a string buffer.
2711 _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2713 va_list ap;
2715 va_start(ap, fmt);
2716 s = talloc_vasprintf_append(s, fmt, ap);
2717 va_end(ap);
2718 return s;
2722 Realloc @p s to append the formatted result of @p fmt and return @p
2723 s, which may have moved. Good for gradually accumulating output
2724 into a buffer.
2726 _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2728 va_list ap;
2730 va_start(ap, fmt);
2731 s = talloc_vasprintf_append_buffer(s, fmt, ap);
2732 va_end(ap);
2733 return s;
2737 alloc an array, checking for integer overflow in the array size
2739 _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2741 if (count >= MAX_TALLOC_SIZE/el_size) {
2742 return NULL;
2744 return _talloc_named_const(ctx, el_size * count, name);
2748 alloc an zero array, checking for integer overflow in the array size
2750 _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2752 if (count >= MAX_TALLOC_SIZE/el_size) {
2753 return NULL;
2755 return _talloc_zero(ctx, el_size * count, name);
2759 realloc an array, checking for integer overflow in the array size
2761 _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2763 if (count >= MAX_TALLOC_SIZE/el_size) {
2764 return NULL;
2766 return _talloc_realloc(ctx, ptr, el_size * count, name);
2770 a function version of talloc_realloc(), so it can be passed as a function pointer
2771 to libraries that want a realloc function (a realloc function encapsulates
2772 all the basic capabilities of an allocation library, which is why this is useful)
2774 _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2776 return _talloc_realloc(context, ptr, size, NULL);
2780 static int talloc_autofree_destructor(void *ptr)
2782 autofree_context = NULL;
2783 return 0;
2787 return a context which will be auto-freed on exit
2788 this is useful for reducing the noise in leak reports
2790 _PUBLIC_ void *talloc_autofree_context(void)
2792 if (autofree_context == NULL) {
2793 autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2794 talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2795 talloc_setup_atexit();
2797 return autofree_context;
2800 _PUBLIC_ size_t talloc_get_size(const void *context)
2802 struct talloc_chunk *tc;
2804 if (context == NULL) {
2805 return 0;
2808 tc = talloc_chunk_from_ptr(context);
2810 return tc->size;
2814 find a parent of this context that has the given name, if any
2816 _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2818 struct talloc_chunk *tc;
2820 if (context == NULL) {
2821 return NULL;
2824 tc = talloc_chunk_from_ptr(context);
2825 while (tc) {
2826 if (tc->name && strcmp(tc->name, name) == 0) {
2827 return TC_PTR_FROM_CHUNK(tc);
2829 while (tc && tc->prev) tc = tc->prev;
2830 if (tc) {
2831 tc = tc->parent;
2834 return NULL;
2838 show the parentage of a context
2840 _PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2842 struct talloc_chunk *tc;
2844 if (context == NULL) {
2845 fprintf(file, "talloc no parents for NULL\n");
2846 return;
2849 tc = talloc_chunk_from_ptr(context);
2850 fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2851 while (tc) {
2852 fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2853 while (tc && tc->prev) tc = tc->prev;
2854 if (tc) {
2855 tc = tc->parent;
2858 fflush(file);
2862 return 1 if ptr is a parent of context
2864 static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2866 struct talloc_chunk *tc;
2868 if (context == NULL) {
2869 return 0;
2872 tc = talloc_chunk_from_ptr(context);
2873 while (tc) {
2874 if (depth <= 0) {
2875 return 0;
2877 if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2878 while (tc && tc->prev) tc = tc->prev;
2879 if (tc) {
2880 tc = tc->parent;
2881 depth--;
2884 return 0;
2888 return 1 if ptr is a parent of context
2890 _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2892 return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2896 return the total size of memory used by this context and all children
2898 static inline size_t _talloc_total_limit_size(const void *ptr,
2899 struct talloc_memlimit *old_limit,
2900 struct talloc_memlimit *new_limit)
2902 return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2903 old_limit, new_limit);
2906 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2908 struct talloc_memlimit *l;
2910 for (l = limit; l != NULL; l = l->upper) {
2911 if (l->max_size != 0 &&
2912 ((l->max_size <= l->cur_size) ||
2913 (l->max_size - l->cur_size < size))) {
2914 return false;
2918 return true;
2922 Update memory limits when freeing a talloc_chunk.
2924 static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2926 size_t limit_shrink_size;
2928 if (!tc->limit) {
2929 return;
2933 * Pool entries don't count. Only the pools
2934 * themselves are counted as part of the memory
2935 * limits. Note that this also takes care of
2936 * nested pools which have both flags
2937 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2939 if (tc->flags & TALLOC_FLAG_POOLMEM) {
2940 return;
2944 * If we are part of a memory limited context hierarchy
2945 * we need to subtract the memory used from the counters
2948 limit_shrink_size = tc->size+TC_HDR_SIZE;
2951 * If we're deallocating a pool, take into
2952 * account the prefix size added for the pool.
2955 if (tc->flags & TALLOC_FLAG_POOL) {
2956 limit_shrink_size += TP_HDR_SIZE;
2959 talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2961 if (tc->limit->parent == tc) {
2962 free(tc->limit);
2965 tc->limit = NULL;
2969 Increase memory limit accounting after a malloc/realloc.
2971 static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2972 size_t size)
2974 struct talloc_memlimit *l;
2976 for (l = limit; l != NULL; l = l->upper) {
2977 size_t new_cur_size = l->cur_size + size;
2978 if (new_cur_size < l->cur_size) {
2979 talloc_abort("logic error in talloc_memlimit_grow\n");
2980 return;
2982 l->cur_size = new_cur_size;
2987 Decrease memory limit accounting after a free/realloc.
2989 static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
2990 size_t size)
2992 struct talloc_memlimit *l;
2994 for (l = limit; l != NULL; l = l->upper) {
2995 if (l->cur_size < size) {
2996 talloc_abort("logic error in talloc_memlimit_shrink\n");
2997 return;
2999 l->cur_size = l->cur_size - size;
3003 _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
3005 struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
3006 struct talloc_memlimit *orig_limit;
3007 struct talloc_memlimit *limit = NULL;
3009 if (tc->limit && tc->limit->parent == tc) {
3010 tc->limit->max_size = max_size;
3011 return 0;
3013 orig_limit = tc->limit;
3015 limit = malloc(sizeof(struct talloc_memlimit));
3016 if (limit == NULL) {
3017 return 1;
3019 limit->parent = tc;
3020 limit->max_size = max_size;
3021 limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
3023 if (orig_limit) {
3024 limit->upper = orig_limit;
3025 } else {
3026 limit->upper = NULL;
3029 return 0;