winbind: use tevent_queue_wait_send/recv in wb_child_request_*()
[Samba.git] / lib / talloc / talloc.c
blobcd159ef89c2eae848ca531cc210585133795b8e6
1 /*
2 Samba Unix SMB/CIFS implementation.
4 Samba trivial allocation library - new interface
6 NOTE: Please read talloc_guide.txt for full documentation
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
13 ** under the LGPL
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 inspired by http://swapped.cc/halloc/
33 #include "replace.h"
34 #include "talloc.h"
36 #ifdef HAVE_SYS_AUXV_H
37 #include <sys/auxv.h>
38 #endif
40 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
41 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
42 #endif
44 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
45 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
46 #endif
48 /* Special macros that are no-ops except when run under Valgrind on
49 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
50 #ifdef HAVE_VALGRIND_MEMCHECK_H
51 /* memcheck.h includes valgrind.h */
52 #include <valgrind/memcheck.h>
53 #elif defined(HAVE_VALGRIND_H)
54 #include <valgrind.h>
55 #endif
57 /* use this to force every realloc to change the pointer, to stress test
58 code that might not cope */
59 #define ALWAYS_REALLOC 0
62 #define MAX_TALLOC_SIZE 0x10000000
64 #define TALLOC_FLAG_FREE 0x01
65 #define TALLOC_FLAG_LOOP 0x02
66 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
67 #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
70 * Bits above this are random, used to make it harder to fake talloc
71 * headers during an attack. Try not to change this without good reason.
73 #define TALLOC_FLAG_MASK 0x0F
75 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
77 #define TALLOC_MAGIC_BASE 0xe814ec70
78 #define TALLOC_MAGIC_NON_RANDOM ( \
79 ~TALLOC_FLAG_MASK & ( \
80 TALLOC_MAGIC_BASE + \
81 (TALLOC_BUILD_VERSION_MAJOR << 24) + \
82 (TALLOC_BUILD_VERSION_MINOR << 16) + \
83 (TALLOC_BUILD_VERSION_RELEASE << 8)))
84 static unsigned int talloc_magic = TALLOC_MAGIC_NON_RANDOM;
86 /* by default we abort when given a bad pointer (such as when talloc_free() is called
87 on a pointer that came from malloc() */
88 #ifndef TALLOC_ABORT
89 #define TALLOC_ABORT(reason) abort()
90 #endif
92 #ifndef discard_const_p
93 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
94 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
95 #else
96 # define discard_const_p(type, ptr) ((type *)(ptr))
97 #endif
98 #endif
100 /* these macros gain us a few percent of speed on gcc */
101 #if (__GNUC__ >= 3)
102 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
103 as its first argument */
104 #ifndef likely
105 #define likely(x) __builtin_expect(!!(x), 1)
106 #endif
107 #ifndef unlikely
108 #define unlikely(x) __builtin_expect(!!(x), 0)
109 #endif
110 #else
111 #ifndef likely
112 #define likely(x) (x)
113 #endif
114 #ifndef unlikely
115 #define unlikely(x) (x)
116 #endif
117 #endif
119 /* this null_context is only used if talloc_enable_leak_report() or
120 talloc_enable_leak_report_full() is called, otherwise it remains
121 NULL
123 static void *null_context;
124 static void *autofree_context;
126 /* used to enable fill of memory on free, which can be useful for
127 * catching use after free errors when valgrind is too slow
129 static struct {
130 bool initialised;
131 bool enabled;
132 uint8_t fill_value;
133 } talloc_fill;
135 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
138 * do not wipe the header, to allow the
139 * double-free logic to still work
141 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
142 if (unlikely(talloc_fill.enabled)) { \
143 size_t _flen = (_tc)->size; \
144 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
145 memset(_fptr, talloc_fill.fill_value, _flen); \
147 } while (0)
149 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
150 /* Mark the whole chunk as not accessable */
151 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
152 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
153 char *_fptr = (char *)(_tc); \
154 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
155 } while(0)
156 #else
157 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
158 #endif
160 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
161 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
162 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
163 } while (0)
165 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
166 if (unlikely(talloc_fill.enabled)) { \
167 size_t _flen = (_tc)->size - (_new_size); \
168 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
169 _fptr += (_new_size); \
170 memset(_fptr, talloc_fill.fill_value, _flen); \
172 } while (0)
174 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
175 /* Mark the unused bytes not accessable */
176 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
177 size_t _flen = (_tc)->size - (_new_size); \
178 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
179 _fptr += (_new_size); \
180 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
181 } while (0)
182 #else
183 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
184 #endif
186 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
187 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
188 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
189 } while (0)
191 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
192 if (unlikely(talloc_fill.enabled)) { \
193 size_t _flen = (_tc)->size - (_new_size); \
194 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
195 _fptr += (_new_size); \
196 memset(_fptr, talloc_fill.fill_value, _flen); \
198 } while (0)
200 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
201 /* Mark the unused bytes as undefined */
202 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
203 size_t _flen = (_tc)->size - (_new_size); \
204 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
205 _fptr += (_new_size); \
206 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
207 } while (0)
208 #else
209 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
210 #endif
212 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
213 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
214 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
215 } while (0)
217 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
218 /* Mark the new bytes as undefined */
219 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
220 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
221 size_t _new_used = TC_HDR_SIZE + (_new_size); \
222 size_t _flen = _new_used - _old_used; \
223 char *_fptr = _old_used + (char *)(_tc); \
224 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
225 } while (0)
226 #else
227 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
228 #endif
230 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
231 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
232 } while (0)
234 struct talloc_reference_handle {
235 struct talloc_reference_handle *next, *prev;
236 void *ptr;
237 const char *location;
240 struct talloc_memlimit {
241 struct talloc_chunk *parent;
242 struct talloc_memlimit *upper;
243 size_t max_size;
244 size_t cur_size;
247 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
248 static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
249 size_t size);
250 static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
251 size_t size);
252 static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
254 static inline void _tc_set_name_const(struct talloc_chunk *tc,
255 const char *name);
256 static struct talloc_chunk *_vasprintf_tc(const void *t,
257 const char *fmt,
258 va_list ap);
260 typedef int (*talloc_destructor_t)(void *);
262 struct talloc_pool_hdr;
264 struct talloc_chunk {
266 * flags includes the talloc magic, which is randomised to
267 * make overwrite attacks harder
269 unsigned flags;
272 * If you have a logical tree like:
274 * <parent>
275 * / | \
276 * / | \
277 * / | \
278 * <child 1> <child 2> <child 3>
280 * The actual talloc tree is:
282 * <parent>
284 * <child 1> - <child 2> - <child 3>
286 * The children are linked with next/prev pointers, and
287 * child 1 is linked to the parent with parent/child
288 * pointers.
291 struct talloc_chunk *next, *prev;
292 struct talloc_chunk *parent, *child;
293 struct talloc_reference_handle *refs;
294 talloc_destructor_t destructor;
295 const char *name;
296 size_t size;
299 * limit semantics:
300 * if 'limit' is set it means all *new* children of the context will
301 * be limited to a total aggregate size ox max_size for memory
302 * allocations.
303 * cur_size is used to keep track of the current use
305 struct talloc_memlimit *limit;
308 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
309 * is a pointer to the struct talloc_chunk of the pool that it was
310 * allocated from. This way children can quickly find the pool to chew
311 * from.
313 struct talloc_pool_hdr *pool;
316 /* 16 byte alignment seems to keep everyone happy */
317 #define TC_ALIGN16(s) (((s)+15)&~15)
318 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
319 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
321 _PUBLIC_ int talloc_version_major(void)
323 return TALLOC_VERSION_MAJOR;
326 _PUBLIC_ int talloc_version_minor(void)
328 return TALLOC_VERSION_MINOR;
331 _PUBLIC_ int talloc_test_get_magic(void)
333 return talloc_magic;
336 static inline void _talloc_chunk_set_free(struct talloc_chunk *tc,
337 const char *location)
340 * Mark this memory as free, and also over-stamp the talloc
341 * magic with the old-style magic.
343 * Why? This tries to avoid a memory read use-after-free from
344 * disclosing our talloc magic, which would then allow an
345 * attacker to prepare a valid header and so run a destructor.
348 tc->flags = TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE
349 | (tc->flags & TALLOC_FLAG_MASK);
351 /* we mark the freed memory with where we called the free
352 * from. This means on a double free error we can report where
353 * the first free came from
355 if (location) {
356 tc->name = location;
360 static inline void _talloc_chunk_set_not_free(struct talloc_chunk *tc)
363 * Mark this memory as not free.
365 * Why? This is memory either in a pool (and so available for
366 * talloc's re-use or after the realloc(). We need to mark
367 * the memory as free() before any realloc() call as we can't
368 * write to the memory after that.
370 * We put back the normal magic instead of the 'not random'
371 * magic.
374 tc->flags = talloc_magic |
375 ((tc->flags & TALLOC_FLAG_MASK) & ~TALLOC_FLAG_FREE);
378 static void (*talloc_log_fn)(const char *message);
380 _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
382 talloc_log_fn = log_fn;
385 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
386 void talloc_lib_init(void) __attribute__((constructor));
387 void talloc_lib_init(void)
389 uint32_t random_value;
390 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
391 uint8_t *p;
393 * Use the kernel-provided random values used for
394 * ASLR. This won't change per-exec, which is ideal for us
396 p = (uint8_t *) getauxval(AT_RANDOM);
397 if (p) {
399 * We get 16 bytes from getauxval. By calling rand(),
400 * a totally insecure PRNG, but one that will
401 * deterministically have a different value when called
402 * twice, we ensure that if two talloc-like libraries
403 * are somehow loaded in the same address space, that
404 * because we choose different bytes, we will keep the
405 * protection against collision of multiple talloc
406 * libs.
408 * This protection is important because the effects of
409 * passing a talloc pointer from one to the other may
410 * be very hard to determine.
412 int offset = rand() % (16 - sizeof(random_value));
413 memcpy(&random_value, p + offset, sizeof(random_value));
414 } else
415 #endif
418 * Otherwise, hope the location we are loaded in
419 * memory is randomised by someone else
421 random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
423 talloc_magic = random_value & ~TALLOC_FLAG_MASK;
425 #else
426 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
427 #endif
429 static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
430 static void talloc_log(const char *fmt, ...)
432 va_list ap;
433 char *message;
435 if (!talloc_log_fn) {
436 return;
439 va_start(ap, fmt);
440 message = talloc_vasprintf(NULL, fmt, ap);
441 va_end(ap);
443 talloc_log_fn(message);
444 talloc_free(message);
447 static void talloc_log_stderr(const char *message)
449 fprintf(stderr, "%s", message);
452 _PUBLIC_ void talloc_set_log_stderr(void)
454 talloc_set_log_fn(talloc_log_stderr);
457 static void (*talloc_abort_fn)(const char *reason);
459 _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
461 talloc_abort_fn = abort_fn;
464 static void talloc_abort(const char *reason)
466 talloc_log("%s\n", reason);
468 if (!talloc_abort_fn) {
469 TALLOC_ABORT(reason);
472 talloc_abort_fn(reason);
475 static void talloc_abort_access_after_free(void)
477 talloc_abort("Bad talloc magic value - access after free");
480 static void talloc_abort_unknown_value(void)
482 talloc_abort("Bad talloc magic value - unknown value");
485 /* panic if we get a bad magic value */
486 static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
488 const char *pp = (const char *)ptr;
489 struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
490 if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
491 if ((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK))
492 == (TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE)) {
493 talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
494 talloc_abort_access_after_free();
495 return NULL;
498 talloc_abort_unknown_value();
499 return NULL;
501 return tc;
504 /* hook into the front of the list */
505 #define _TLIST_ADD(list, p) \
506 do { \
507 if (!(list)) { \
508 (list) = (p); \
509 (p)->next = (p)->prev = NULL; \
510 } else { \
511 (list)->prev = (p); \
512 (p)->next = (list); \
513 (p)->prev = NULL; \
514 (list) = (p); \
516 } while (0)
518 /* remove an element from a list - element doesn't have to be in list. */
519 #define _TLIST_REMOVE(list, p) \
520 do { \
521 if ((p) == (list)) { \
522 (list) = (p)->next; \
523 if (list) (list)->prev = NULL; \
524 } else { \
525 if ((p)->prev) (p)->prev->next = (p)->next; \
526 if ((p)->next) (p)->next->prev = (p)->prev; \
528 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
529 } while (0)
533 return the parent chunk of a pointer
535 static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
537 struct talloc_chunk *tc;
539 if (unlikely(ptr == NULL)) {
540 return NULL;
543 tc = talloc_chunk_from_ptr(ptr);
544 while (tc->prev) tc=tc->prev;
546 return tc->parent;
549 _PUBLIC_ void *talloc_parent(const void *ptr)
551 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
552 return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
556 find parents name
558 _PUBLIC_ const char *talloc_parent_name(const void *ptr)
560 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
561 return tc? tc->name : NULL;
565 A pool carries an in-pool object count count in the first 16 bytes.
566 bytes. This is done to support talloc_steal() to a parent outside of the
567 pool. The count includes the pool itself, so a talloc_free() on a pool will
568 only destroy the pool if the count has dropped to zero. A talloc_free() of a
569 pool member will reduce the count, and eventually also call free(3) on the
570 pool memory.
572 The object count is not put into "struct talloc_chunk" because it is only
573 relevant for talloc pools and the alignment to 16 bytes would increase the
574 memory footprint of each talloc chunk by those 16 bytes.
577 struct talloc_pool_hdr {
578 void *end;
579 unsigned int object_count;
580 size_t poolsize;
583 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
585 static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
587 return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE);
590 static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
592 return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE);
595 static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
597 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
598 return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
601 static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
603 return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
606 /* If tc is inside a pool, this gives the next neighbour. */
607 static inline void *tc_next_chunk(struct talloc_chunk *tc)
609 return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
612 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
614 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
615 return tc_next_chunk(tc);
618 /* Mark the whole remaining pool as not accessable */
619 static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
621 size_t flen = tc_pool_space_left(pool_hdr);
623 if (unlikely(talloc_fill.enabled)) {
624 memset(pool_hdr->end, talloc_fill.fill_value, flen);
627 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
628 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
629 #endif
633 Allocate from a pool
636 static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
637 size_t size, size_t prefix_len)
639 struct talloc_pool_hdr *pool_hdr = NULL;
640 size_t space_left;
641 struct talloc_chunk *result;
642 size_t chunk_size;
644 if (parent == NULL) {
645 return NULL;
648 if (parent->flags & TALLOC_FLAG_POOL) {
649 pool_hdr = talloc_pool_from_chunk(parent);
651 else if (parent->flags & TALLOC_FLAG_POOLMEM) {
652 pool_hdr = parent->pool;
655 if (pool_hdr == NULL) {
656 return NULL;
659 space_left = tc_pool_space_left(pool_hdr);
662 * Align size to 16 bytes
664 chunk_size = TC_ALIGN16(size + prefix_len);
666 if (space_left < chunk_size) {
667 return NULL;
670 result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len);
672 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
673 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
674 #endif
676 pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
678 result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
679 result->pool = pool_hdr;
681 pool_hdr->object_count++;
683 return result;
687 Allocate a bit of memory as a child of an existing pointer
689 static inline void *__talloc_with_prefix(const void *context,
690 size_t size,
691 size_t prefix_len,
692 struct talloc_chunk **tc_ret)
694 struct talloc_chunk *tc = NULL;
695 struct talloc_memlimit *limit = NULL;
696 size_t total_len = TC_HDR_SIZE + size + prefix_len;
697 struct talloc_chunk *parent = NULL;
699 if (unlikely(context == NULL)) {
700 context = null_context;
703 if (unlikely(size >= MAX_TALLOC_SIZE)) {
704 return NULL;
707 if (unlikely(total_len < TC_HDR_SIZE)) {
708 return NULL;
711 if (likely(context != NULL)) {
712 parent = talloc_chunk_from_ptr(context);
714 if (parent->limit != NULL) {
715 limit = parent->limit;
718 tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
721 if (tc == NULL) {
722 char *ptr;
725 * Only do the memlimit check/update on actual allocation.
727 if (!talloc_memlimit_check(limit, total_len)) {
728 errno = ENOMEM;
729 return NULL;
732 ptr = malloc(total_len);
733 if (unlikely(ptr == NULL)) {
734 return NULL;
736 tc = (struct talloc_chunk *)(ptr + prefix_len);
737 tc->flags = talloc_magic;
738 tc->pool = NULL;
740 talloc_memlimit_grow(limit, total_len);
743 tc->limit = limit;
744 tc->size = size;
745 tc->destructor = NULL;
746 tc->child = NULL;
747 tc->name = NULL;
748 tc->refs = NULL;
750 if (likely(context != NULL)) {
751 if (parent->child) {
752 parent->child->parent = NULL;
753 tc->next = parent->child;
754 tc->next->prev = tc;
755 } else {
756 tc->next = NULL;
758 tc->parent = parent;
759 tc->prev = NULL;
760 parent->child = tc;
761 } else {
762 tc->next = tc->prev = tc->parent = NULL;
765 *tc_ret = tc;
766 return TC_PTR_FROM_CHUNK(tc);
769 static inline void *__talloc(const void *context,
770 size_t size,
771 struct talloc_chunk **tc)
773 return __talloc_with_prefix(context, size, 0, tc);
777 * Create a talloc pool
780 static inline void *_talloc_pool(const void *context, size_t size)
782 struct talloc_chunk *tc;
783 struct talloc_pool_hdr *pool_hdr;
784 void *result;
786 result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
788 if (unlikely(result == NULL)) {
789 return NULL;
792 pool_hdr = talloc_pool_from_chunk(tc);
794 tc->flags |= TALLOC_FLAG_POOL;
795 tc->size = 0;
797 pool_hdr->object_count = 1;
798 pool_hdr->end = result;
799 pool_hdr->poolsize = size;
801 tc_invalidate_pool(pool_hdr);
803 return result;
806 _PUBLIC_ void *talloc_pool(const void *context, size_t size)
808 return _talloc_pool(context, size);
812 * Create a talloc pool correctly sized for a basic size plus
813 * a number of subobjects whose total size is given. Essentially
814 * a custom allocator for talloc to reduce fragmentation.
817 _PUBLIC_ void *_talloc_pooled_object(const void *ctx,
818 size_t type_size,
819 const char *type_name,
820 unsigned num_subobjects,
821 size_t total_subobjects_size)
823 size_t poolsize, subobjects_slack, tmp;
824 struct talloc_chunk *tc;
825 struct talloc_pool_hdr *pool_hdr;
826 void *ret;
828 poolsize = type_size + total_subobjects_size;
830 if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
831 goto overflow;
834 if (num_subobjects == UINT_MAX) {
835 goto overflow;
837 num_subobjects += 1; /* the object body itself */
840 * Alignment can increase the pool size by at most 15 bytes per object
841 * plus alignment for the object itself
843 subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
844 if (subobjects_slack < num_subobjects) {
845 goto overflow;
848 tmp = poolsize + subobjects_slack;
849 if ((tmp < poolsize) || (tmp < subobjects_slack)) {
850 goto overflow;
852 poolsize = tmp;
854 ret = _talloc_pool(ctx, poolsize);
855 if (ret == NULL) {
856 return NULL;
859 tc = talloc_chunk_from_ptr(ret);
860 tc->size = type_size;
862 pool_hdr = talloc_pool_from_chunk(tc);
864 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
865 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
866 #endif
868 pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
870 _tc_set_name_const(tc, type_name);
871 return ret;
873 overflow:
874 return NULL;
878 setup a destructor to be called on free of a pointer
879 the destructor should return 0 on success, or -1 on failure.
880 if the destructor fails then the free is failed, and the memory can
881 be continued to be used
883 _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
885 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
886 tc->destructor = destructor;
890 increase the reference count on a piece of memory.
892 _PUBLIC_ int talloc_increase_ref_count(const void *ptr)
894 if (unlikely(!talloc_reference(null_context, ptr))) {
895 return -1;
897 return 0;
901 helper for talloc_reference()
903 this is referenced by a function pointer and should not be inline
905 static int talloc_reference_destructor(struct talloc_reference_handle *handle)
907 struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
908 _TLIST_REMOVE(ptr_tc->refs, handle);
909 return 0;
913 more efficient way to add a name to a pointer - the name must point to a
914 true string constant
916 static inline void _tc_set_name_const(struct talloc_chunk *tc,
917 const char *name)
919 tc->name = name;
923 internal talloc_named_const()
925 static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
927 void *ptr;
928 struct talloc_chunk *tc;
930 ptr = __talloc(context, size, &tc);
931 if (unlikely(ptr == NULL)) {
932 return NULL;
935 _tc_set_name_const(tc, name);
937 return ptr;
941 make a secondary reference to a pointer, hanging off the given context.
942 the pointer remains valid until both the original caller and this given
943 context are freed.
945 the major use for this is when two different structures need to reference the
946 same underlying data, and you want to be able to free the two instances separately,
947 and in either order
949 _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
951 struct talloc_chunk *tc;
952 struct talloc_reference_handle *handle;
953 if (unlikely(ptr == NULL)) return NULL;
955 tc = talloc_chunk_from_ptr(ptr);
956 handle = (struct talloc_reference_handle *)_talloc_named_const(context,
957 sizeof(struct talloc_reference_handle),
958 TALLOC_MAGIC_REFERENCE);
959 if (unlikely(handle == NULL)) return NULL;
961 /* note that we hang the destructor off the handle, not the
962 main context as that allows the caller to still setup their
963 own destructor on the context if they want to */
964 talloc_set_destructor(handle, talloc_reference_destructor);
965 handle->ptr = discard_const_p(void, ptr);
966 handle->location = location;
967 _TLIST_ADD(tc->refs, handle);
968 return handle->ptr;
971 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
973 static inline void _tc_free_poolmem(struct talloc_chunk *tc,
974 const char *location)
976 struct talloc_pool_hdr *pool;
977 struct talloc_chunk *pool_tc;
978 void *next_tc;
980 pool = tc->pool;
981 pool_tc = talloc_chunk_from_pool(pool);
982 next_tc = tc_next_chunk(tc);
984 _talloc_chunk_set_free(tc, location);
986 TC_INVALIDATE_FULL_CHUNK(tc);
988 if (unlikely(pool->object_count == 0)) {
989 talloc_abort("Pool object count zero!");
990 return;
993 pool->object_count--;
995 if (unlikely(pool->object_count == 1
996 && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
998 * if there is just one object left in the pool
999 * and pool->flags does not have TALLOC_FLAG_FREE,
1000 * it means this is the pool itself and
1001 * the rest is available for new objects
1002 * again.
1004 pool->end = tc_pool_first_chunk(pool);
1005 tc_invalidate_pool(pool);
1006 return;
1009 if (unlikely(pool->object_count == 0)) {
1011 * we mark the freed memory with where we called the free
1012 * from. This means on a double free error we can report where
1013 * the first free came from
1015 pool_tc->name = location;
1017 if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
1018 _tc_free_poolmem(pool_tc, location);
1019 } else {
1021 * The tc_memlimit_update_on_free()
1022 * call takes into account the
1023 * prefix TP_HDR_SIZE allocated before
1024 * the pool talloc_chunk.
1026 tc_memlimit_update_on_free(pool_tc);
1027 TC_INVALIDATE_FULL_CHUNK(pool_tc);
1028 free(pool);
1030 return;
1033 if (pool->end == next_tc) {
1035 * if pool->pool still points to end of
1036 * 'tc' (which is stored in the 'next_tc' variable),
1037 * we can reclaim the memory of 'tc'.
1039 pool->end = tc;
1040 return;
1044 * Do nothing. The memory is just "wasted", waiting for the pool
1045 * itself to be freed.
1049 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1050 void *ptr,
1051 const char *location);
1053 static inline int _talloc_free_internal(void *ptr, const char *location);
1056 internal free call that takes a struct talloc_chunk *.
1058 static inline int _tc_free_internal(struct talloc_chunk *tc,
1059 const char *location)
1061 void *ptr_to_free;
1062 void *ptr = TC_PTR_FROM_CHUNK(tc);
1064 if (unlikely(tc->refs)) {
1065 int is_child;
1066 /* check if this is a reference from a child or
1067 * grandchild back to it's parent or grandparent
1069 * in that case we need to remove the reference and
1070 * call another instance of talloc_free() on the current
1071 * pointer.
1073 is_child = talloc_is_parent(tc->refs, ptr);
1074 _talloc_free_internal(tc->refs, location);
1075 if (is_child) {
1076 return _talloc_free_internal(ptr, location);
1078 return -1;
1081 if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1082 /* we have a free loop - stop looping */
1083 return 0;
1086 if (unlikely(tc->destructor)) {
1087 talloc_destructor_t d = tc->destructor;
1090 * Protect the destructor against some overwrite
1091 * attacks, by explicitly checking it has the right
1092 * magic here.
1094 if (talloc_chunk_from_ptr(ptr) != tc) {
1096 * This can't actually happen, the
1097 * call itself will panic.
1099 TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1102 if (d == (talloc_destructor_t)-1) {
1103 return -1;
1105 tc->destructor = (talloc_destructor_t)-1;
1106 if (d(ptr) == -1) {
1108 * Only replace the destructor pointer if
1109 * calling the destructor didn't modify it.
1111 if (tc->destructor == (talloc_destructor_t)-1) {
1112 tc->destructor = d;
1114 return -1;
1116 tc->destructor = NULL;
1119 if (tc->parent) {
1120 _TLIST_REMOVE(tc->parent->child, tc);
1121 if (tc->parent->child) {
1122 tc->parent->child->parent = tc->parent;
1124 } else {
1125 if (tc->prev) tc->prev->next = tc->next;
1126 if (tc->next) tc->next->prev = tc->prev;
1127 tc->prev = tc->next = NULL;
1130 tc->flags |= TALLOC_FLAG_LOOP;
1132 _tc_free_children_internal(tc, ptr, location);
1134 _talloc_chunk_set_free(tc, location);
1136 if (tc->flags & TALLOC_FLAG_POOL) {
1137 struct talloc_pool_hdr *pool;
1139 pool = talloc_pool_from_chunk(tc);
1141 if (unlikely(pool->object_count == 0)) {
1142 talloc_abort("Pool object count zero!");
1143 return 0;
1146 pool->object_count--;
1148 if (likely(pool->object_count != 0)) {
1149 return 0;
1153 * With object_count==0, a pool becomes a normal piece of
1154 * memory to free. If it's allocated inside a pool, it needs
1155 * to be freed as poolmem, else it needs to be just freed.
1157 ptr_to_free = pool;
1158 } else {
1159 ptr_to_free = tc;
1162 if (tc->flags & TALLOC_FLAG_POOLMEM) {
1163 _tc_free_poolmem(tc, location);
1164 return 0;
1167 tc_memlimit_update_on_free(tc);
1169 TC_INVALIDATE_FULL_CHUNK(tc);
1170 free(ptr_to_free);
1171 return 0;
1175 internal talloc_free call
1177 static inline int _talloc_free_internal(void *ptr, const char *location)
1179 struct talloc_chunk *tc;
1181 if (unlikely(ptr == NULL)) {
1182 return -1;
1185 /* possibly initialised the talloc fill value */
1186 if (unlikely(!talloc_fill.initialised)) {
1187 const char *fill = getenv(TALLOC_FILL_ENV);
1188 if (fill != NULL) {
1189 talloc_fill.enabled = true;
1190 talloc_fill.fill_value = strtoul(fill, NULL, 0);
1192 talloc_fill.initialised = true;
1195 tc = talloc_chunk_from_ptr(ptr);
1196 return _tc_free_internal(tc, location);
1199 static inline size_t _talloc_total_limit_size(const void *ptr,
1200 struct talloc_memlimit *old_limit,
1201 struct talloc_memlimit *new_limit);
1204 move a lump of memory from one talloc context to another return the
1205 ptr on success, or NULL if it could not be transferred.
1206 passing NULL as ptr will always return NULL with no side effects.
1208 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1210 struct talloc_chunk *tc, *new_tc;
1211 size_t ctx_size = 0;
1213 if (unlikely(!ptr)) {
1214 return NULL;
1217 if (unlikely(new_ctx == NULL)) {
1218 new_ctx = null_context;
1221 tc = talloc_chunk_from_ptr(ptr);
1223 if (tc->limit != NULL) {
1225 ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1227 /* Decrement the memory limit from the source .. */
1228 talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1230 if (tc->limit->parent == tc) {
1231 tc->limit->upper = NULL;
1232 } else {
1233 tc->limit = NULL;
1237 if (unlikely(new_ctx == NULL)) {
1238 if (tc->parent) {
1239 _TLIST_REMOVE(tc->parent->child, tc);
1240 if (tc->parent->child) {
1241 tc->parent->child->parent = tc->parent;
1243 } else {
1244 if (tc->prev) tc->prev->next = tc->next;
1245 if (tc->next) tc->next->prev = tc->prev;
1248 tc->parent = tc->next = tc->prev = NULL;
1249 return discard_const_p(void, ptr);
1252 new_tc = talloc_chunk_from_ptr(new_ctx);
1254 if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1255 return discard_const_p(void, ptr);
1258 if (tc->parent) {
1259 _TLIST_REMOVE(tc->parent->child, tc);
1260 if (tc->parent->child) {
1261 tc->parent->child->parent = tc->parent;
1263 } else {
1264 if (tc->prev) tc->prev->next = tc->next;
1265 if (tc->next) tc->next->prev = tc->prev;
1266 tc->prev = tc->next = NULL;
1269 tc->parent = new_tc;
1270 if (new_tc->child) new_tc->child->parent = NULL;
1271 _TLIST_ADD(new_tc->child, tc);
1273 if (tc->limit || new_tc->limit) {
1274 ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1275 new_tc->limit);
1276 /* .. and increment it in the destination. */
1277 if (new_tc->limit) {
1278 talloc_memlimit_grow(new_tc->limit, ctx_size);
1282 return discard_const_p(void, ptr);
1286 move a lump of memory from one talloc context to another return the
1287 ptr on success, or NULL if it could not be transferred.
1288 passing NULL as ptr will always return NULL with no side effects.
1290 _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1292 struct talloc_chunk *tc;
1294 if (unlikely(ptr == NULL)) {
1295 return NULL;
1298 tc = talloc_chunk_from_ptr(ptr);
1300 if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1301 struct talloc_reference_handle *h;
1303 talloc_log("WARNING: talloc_steal with references at %s\n",
1304 location);
1306 for (h=tc->refs; h; h=h->next) {
1307 talloc_log("\treference at %s\n",
1308 h->location);
1312 #if 0
1313 /* this test is probably too expensive to have on in the
1314 normal build, but it useful for debugging */
1315 if (talloc_is_parent(new_ctx, ptr)) {
1316 talloc_log("WARNING: stealing into talloc child at %s\n", location);
1318 #endif
1320 return _talloc_steal_internal(new_ctx, ptr);
1324 this is like a talloc_steal(), but you must supply the old
1325 parent. This resolves the ambiguity in a talloc_steal() which is
1326 called on a context that has more than one parent (via references)
1328 The old parent can be either a reference or a parent
1330 _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1332 struct talloc_chunk *tc;
1333 struct talloc_reference_handle *h;
1335 if (unlikely(ptr == NULL)) {
1336 return NULL;
1339 if (old_parent == talloc_parent(ptr)) {
1340 return _talloc_steal_internal(new_parent, ptr);
1343 tc = talloc_chunk_from_ptr(ptr);
1344 for (h=tc->refs;h;h=h->next) {
1345 if (talloc_parent(h) == old_parent) {
1346 if (_talloc_steal_internal(new_parent, h) != h) {
1347 return NULL;
1349 return discard_const_p(void, ptr);
1353 /* it wasn't a parent */
1354 return NULL;
1358 remove a secondary reference to a pointer. This undo's what
1359 talloc_reference() has done. The context and pointer arguments
1360 must match those given to a talloc_reference()
1362 static inline int talloc_unreference(const void *context, const void *ptr)
1364 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1365 struct talloc_reference_handle *h;
1367 if (unlikely(context == NULL)) {
1368 context = null_context;
1371 for (h=tc->refs;h;h=h->next) {
1372 struct talloc_chunk *p = talloc_parent_chunk(h);
1373 if (p == NULL) {
1374 if (context == NULL) break;
1375 } else if (TC_PTR_FROM_CHUNK(p) == context) {
1376 break;
1379 if (h == NULL) {
1380 return -1;
1383 return _talloc_free_internal(h, __location__);
1387 remove a specific parent context from a pointer. This is a more
1388 controlled variant of talloc_free()
1390 _PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1392 struct talloc_chunk *tc_p, *new_p, *tc_c;
1393 void *new_parent;
1395 if (ptr == NULL) {
1396 return -1;
1399 if (context == NULL) {
1400 context = null_context;
1403 if (talloc_unreference(context, ptr) == 0) {
1404 return 0;
1407 if (context != NULL) {
1408 tc_c = talloc_chunk_from_ptr(context);
1409 } else {
1410 tc_c = NULL;
1412 if (tc_c != talloc_parent_chunk(ptr)) {
1413 return -1;
1416 tc_p = talloc_chunk_from_ptr(ptr);
1418 if (tc_p->refs == NULL) {
1419 return _talloc_free_internal(ptr, __location__);
1422 new_p = talloc_parent_chunk(tc_p->refs);
1423 if (new_p) {
1424 new_parent = TC_PTR_FROM_CHUNK(new_p);
1425 } else {
1426 new_parent = NULL;
1429 if (talloc_unreference(new_parent, ptr) != 0) {
1430 return -1;
1433 _talloc_steal_internal(new_parent, ptr);
1435 return 0;
1439 add a name to an existing pointer - va_list version
1441 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1442 const char *fmt,
1443 va_list ap) PRINTF_ATTRIBUTE(2,0);
1445 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1446 const char *fmt,
1447 va_list ap)
1449 struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1450 fmt,
1451 ap);
1452 if (likely(name_tc)) {
1453 tc->name = TC_PTR_FROM_CHUNK(name_tc);
1454 _tc_set_name_const(name_tc, ".name");
1455 } else {
1456 tc->name = NULL;
1458 return tc->name;
1462 add a name to an existing pointer
1464 _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1466 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1467 const char *name;
1468 va_list ap;
1469 va_start(ap, fmt);
1470 name = tc_set_name_v(tc, fmt, ap);
1471 va_end(ap);
1472 return name;
1477 create a named talloc pointer. Any talloc pointer can be named, and
1478 talloc_named() operates just like talloc() except that it allows you
1479 to name the pointer.
1481 _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1483 va_list ap;
1484 void *ptr;
1485 const char *name;
1486 struct talloc_chunk *tc;
1488 ptr = __talloc(context, size, &tc);
1489 if (unlikely(ptr == NULL)) return NULL;
1491 va_start(ap, fmt);
1492 name = tc_set_name_v(tc, fmt, ap);
1493 va_end(ap);
1495 if (unlikely(name == NULL)) {
1496 _talloc_free_internal(ptr, __location__);
1497 return NULL;
1500 return ptr;
1504 return the name of a talloc ptr, or "UNNAMED"
1506 static inline const char *__talloc_get_name(const void *ptr)
1508 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1509 if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1510 return ".reference";
1512 if (likely(tc->name)) {
1513 return tc->name;
1515 return "UNNAMED";
1518 _PUBLIC_ const char *talloc_get_name(const void *ptr)
1520 return __talloc_get_name(ptr);
1524 check if a pointer has the given name. If it does, return the pointer,
1525 otherwise return NULL
1527 _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1529 const char *pname;
1530 if (unlikely(ptr == NULL)) return NULL;
1531 pname = __talloc_get_name(ptr);
1532 if (likely(pname == name || strcmp(pname, name) == 0)) {
1533 return discard_const_p(void, ptr);
1535 return NULL;
1538 static void talloc_abort_type_mismatch(const char *location,
1539 const char *name,
1540 const char *expected)
1542 const char *reason;
1544 reason = talloc_asprintf(NULL,
1545 "%s: Type mismatch: name[%s] expected[%s]",
1546 location,
1547 name?name:"NULL",
1548 expected);
1549 if (!reason) {
1550 reason = "Type mismatch";
1553 talloc_abort(reason);
1556 _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1558 const char *pname;
1560 if (unlikely(ptr == NULL)) {
1561 talloc_abort_type_mismatch(location, NULL, name);
1562 return NULL;
1565 pname = __talloc_get_name(ptr);
1566 if (likely(pname == name || strcmp(pname, name) == 0)) {
1567 return discard_const_p(void, ptr);
1570 talloc_abort_type_mismatch(location, pname, name);
1571 return NULL;
1575 this is for compatibility with older versions of talloc
1577 _PUBLIC_ void *talloc_init(const char *fmt, ...)
1579 va_list ap;
1580 void *ptr;
1581 const char *name;
1582 struct talloc_chunk *tc;
1584 ptr = __talloc(NULL, 0, &tc);
1585 if (unlikely(ptr == NULL)) return NULL;
1587 va_start(ap, fmt);
1588 name = tc_set_name_v(tc, fmt, ap);
1589 va_end(ap);
1591 if (unlikely(name == NULL)) {
1592 _talloc_free_internal(ptr, __location__);
1593 return NULL;
1596 return ptr;
1599 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1600 void *ptr,
1601 const char *location)
1603 while (tc->child) {
1604 /* we need to work out who will own an abandoned child
1605 if it cannot be freed. In priority order, the first
1606 choice is owner of any remaining reference to this
1607 pointer, the second choice is our parent, and the
1608 final choice is the null context. */
1609 void *child = TC_PTR_FROM_CHUNK(tc->child);
1610 const void *new_parent = null_context;
1611 if (unlikely(tc->child->refs)) {
1612 struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1613 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1615 if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1616 if (talloc_parent_chunk(child) != tc) {
1618 * Destructor already reparented this child.
1619 * No further reparenting needed.
1621 continue;
1623 if (new_parent == null_context) {
1624 struct talloc_chunk *p = talloc_parent_chunk(ptr);
1625 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1627 _talloc_steal_internal(new_parent, child);
1633 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1634 should probably not be used in new code. It's in here to keep the talloc
1635 code consistent across Samba 3 and 4.
1637 _PUBLIC_ void talloc_free_children(void *ptr)
1639 struct talloc_chunk *tc_name = NULL;
1640 struct talloc_chunk *tc;
1642 if (unlikely(ptr == NULL)) {
1643 return;
1646 tc = talloc_chunk_from_ptr(ptr);
1648 /* we do not want to free the context name if it is a child .. */
1649 if (likely(tc->child)) {
1650 for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1651 if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1653 if (tc_name) {
1654 _TLIST_REMOVE(tc->child, tc_name);
1655 if (tc->child) {
1656 tc->child->parent = tc;
1661 _tc_free_children_internal(tc, ptr, __location__);
1663 /* .. so we put it back after all other children have been freed */
1664 if (tc_name) {
1665 if (tc->child) {
1666 tc->child->parent = NULL;
1668 tc_name->parent = tc;
1669 _TLIST_ADD(tc->child, tc_name);
1674 Allocate a bit of memory as a child of an existing pointer
1676 _PUBLIC_ void *_talloc(const void *context, size_t size)
1678 struct talloc_chunk *tc;
1679 return __talloc(context, size, &tc);
1683 externally callable talloc_set_name_const()
1685 _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1687 _tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1691 create a named talloc pointer. Any talloc pointer can be named, and
1692 talloc_named() operates just like talloc() except that it allows you
1693 to name the pointer.
1695 _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1697 return _talloc_named_const(context, size, name);
1701 free a talloc pointer. This also frees all child pointers of this
1702 pointer recursively
1704 return 0 if the memory is actually freed, otherwise -1. The memory
1705 will not be freed if the ref_count is > 1 or the destructor (if
1706 any) returns non-zero
1708 _PUBLIC_ int _talloc_free(void *ptr, const char *location)
1710 struct talloc_chunk *tc;
1712 if (unlikely(ptr == NULL)) {
1713 return -1;
1716 tc = talloc_chunk_from_ptr(ptr);
1718 if (unlikely(tc->refs != NULL)) {
1719 struct talloc_reference_handle *h;
1721 if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1722 /* in this case we do know which parent should
1723 get this pointer, as there is really only
1724 one parent */
1725 return talloc_unlink(null_context, ptr);
1728 talloc_log("ERROR: talloc_free with references at %s\n",
1729 location);
1731 for (h=tc->refs; h; h=h->next) {
1732 talloc_log("\treference at %s\n",
1733 h->location);
1735 return -1;
1738 return _talloc_free_internal(ptr, location);
1744 A talloc version of realloc. The context argument is only used if
1745 ptr is NULL
1747 _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1749 struct talloc_chunk *tc;
1750 void *new_ptr;
1751 bool malloced = false;
1752 struct talloc_pool_hdr *pool_hdr = NULL;
1753 size_t old_size = 0;
1754 size_t new_size = 0;
1756 /* size zero is equivalent to free() */
1757 if (unlikely(size == 0)) {
1758 talloc_unlink(context, ptr);
1759 return NULL;
1762 if (unlikely(size >= MAX_TALLOC_SIZE)) {
1763 return NULL;
1766 /* realloc(NULL) is equivalent to malloc() */
1767 if (ptr == NULL) {
1768 return _talloc_named_const(context, size, name);
1771 tc = talloc_chunk_from_ptr(ptr);
1773 /* don't allow realloc on referenced pointers */
1774 if (unlikely(tc->refs)) {
1775 return NULL;
1778 /* don't let anybody try to realloc a talloc_pool */
1779 if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1780 return NULL;
1783 if (tc->limit && (size > tc->size)) {
1784 if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1785 errno = ENOMEM;
1786 return NULL;
1790 /* handle realloc inside a talloc_pool */
1791 if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1792 pool_hdr = tc->pool;
1795 #if (ALWAYS_REALLOC == 0)
1796 /* don't shrink if we have less than 1k to gain */
1797 if (size < tc->size && tc->limit == NULL) {
1798 if (pool_hdr) {
1799 void *next_tc = tc_next_chunk(tc);
1800 TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1801 tc->size = size;
1802 if (next_tc == pool_hdr->end) {
1803 /* note: tc->size has changed, so this works */
1804 pool_hdr->end = tc_next_chunk(tc);
1806 return ptr;
1807 } else if ((tc->size - size) < 1024) {
1809 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1810 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1811 * after each realloc call, which slows down
1812 * testing a lot :-(.
1814 * That is why we only mark memory as undefined here.
1816 TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1818 /* do not shrink if we have less than 1k to gain */
1819 tc->size = size;
1820 return ptr;
1822 } else if (tc->size == size) {
1824 * do not change the pointer if it is exactly
1825 * the same size.
1827 return ptr;
1829 #endif
1832 * by resetting magic we catch users of the old memory
1834 * We mark this memory as free, and also over-stamp the talloc
1835 * magic with the old-style magic.
1837 * Why? This tries to avoid a memory read use-after-free from
1838 * disclosing our talloc magic, which would then allow an
1839 * attacker to prepare a valid header and so run a destructor.
1841 * What else? We have to re-stamp back a valid normal magic
1842 * on this memory once realloc() is done, as it will have done
1843 * a memcpy() into the new valid memory. We can't do this in
1844 * reverse as that would be a real use-after-free.
1846 _talloc_chunk_set_free(tc, NULL);
1848 #if ALWAYS_REALLOC
1849 if (pool_hdr) {
1850 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1851 pool_hdr->object_count--;
1853 if (new_ptr == NULL) {
1854 new_ptr = malloc(TC_HDR_SIZE+size);
1855 malloced = true;
1856 new_size = size;
1859 if (new_ptr) {
1860 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1861 TC_INVALIDATE_FULL_CHUNK(tc);
1863 } else {
1864 /* We're doing malloc then free here, so record the difference. */
1865 old_size = tc->size;
1866 new_size = size;
1867 new_ptr = malloc(size + TC_HDR_SIZE);
1868 if (new_ptr) {
1869 memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1870 free(tc);
1873 #else
1874 if (pool_hdr) {
1875 struct talloc_chunk *pool_tc;
1876 void *next_tc = tc_next_chunk(tc);
1877 size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1878 size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1879 size_t space_needed;
1880 size_t space_left;
1881 unsigned int chunk_count = pool_hdr->object_count;
1883 pool_tc = talloc_chunk_from_pool(pool_hdr);
1884 if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1885 chunk_count -= 1;
1888 if (chunk_count == 1) {
1890 * optimize for the case where 'tc' is the only
1891 * chunk in the pool.
1893 char *start = tc_pool_first_chunk(pool_hdr);
1894 space_needed = new_chunk_size;
1895 space_left = (char *)tc_pool_end(pool_hdr) - start;
1897 if (space_left >= space_needed) {
1898 size_t old_used = TC_HDR_SIZE + tc->size;
1899 size_t new_used = TC_HDR_SIZE + size;
1900 new_ptr = start;
1902 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1905 * The area from
1906 * start -> tc may have
1907 * been freed and thus been marked as
1908 * VALGRIND_MEM_NOACCESS. Set it to
1909 * VALGRIND_MEM_UNDEFINED so we can
1910 * copy into it without valgrind errors.
1911 * We can't just mark
1912 * new_ptr -> new_ptr + old_used
1913 * as this may overlap on top of tc,
1914 * (which is why we use memmove, not
1915 * memcpy below) hence the MIN.
1917 size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1918 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1920 #endif
1922 memmove(new_ptr, tc, old_used);
1924 tc = (struct talloc_chunk *)new_ptr;
1925 TC_UNDEFINE_GROW_CHUNK(tc, size);
1928 * first we do not align the pool pointer
1929 * because we want to invalidate the padding
1930 * too.
1932 pool_hdr->end = new_used + (char *)new_ptr;
1933 tc_invalidate_pool(pool_hdr);
1935 /* now the aligned pointer */
1936 pool_hdr->end = new_chunk_size + (char *)new_ptr;
1937 goto got_new_ptr;
1940 next_tc = NULL;
1943 if (new_chunk_size == old_chunk_size) {
1944 TC_UNDEFINE_GROW_CHUNK(tc, size);
1945 _talloc_chunk_set_not_free(tc);
1946 tc->size = size;
1947 return ptr;
1950 if (next_tc == pool_hdr->end) {
1952 * optimize for the case where 'tc' is the last
1953 * chunk in the pool.
1955 space_needed = new_chunk_size - old_chunk_size;
1956 space_left = tc_pool_space_left(pool_hdr);
1958 if (space_left >= space_needed) {
1959 TC_UNDEFINE_GROW_CHUNK(tc, size);
1960 _talloc_chunk_set_not_free(tc);
1961 tc->size = size;
1962 pool_hdr->end = tc_next_chunk(tc);
1963 return ptr;
1967 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1969 if (new_ptr == NULL) {
1970 new_ptr = malloc(TC_HDR_SIZE+size);
1971 malloced = true;
1972 new_size = size;
1975 if (new_ptr) {
1976 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1978 _tc_free_poolmem(tc, __location__ "_talloc_realloc");
1981 else {
1982 /* We're doing realloc here, so record the difference. */
1983 old_size = tc->size;
1984 new_size = size;
1985 new_ptr = realloc(tc, size + TC_HDR_SIZE);
1987 got_new_ptr:
1988 #endif
1989 if (unlikely(!new_ptr)) {
1991 * Ok, this is a strange spot. We have to put back
1992 * the old talloc_magic and any flags, except the
1993 * TALLOC_FLAG_FREE as this was not free'ed by the
1994 * realloc() call after all
1996 _talloc_chunk_set_not_free(tc);
1997 return NULL;
2001 * tc is now the new value from realloc(), the old memory we
2002 * can't access any more and was preemptively marked as
2003 * TALLOC_FLAG_FREE before the call. Now we mark it as not
2004 * free again
2006 tc = (struct talloc_chunk *)new_ptr;
2007 _talloc_chunk_set_not_free(tc);
2008 if (malloced) {
2009 tc->flags &= ~TALLOC_FLAG_POOLMEM;
2011 if (tc->parent) {
2012 tc->parent->child = tc;
2014 if (tc->child) {
2015 tc->child->parent = tc;
2018 if (tc->prev) {
2019 tc->prev->next = tc;
2021 if (tc->next) {
2022 tc->next->prev = tc;
2025 if (new_size > old_size) {
2026 talloc_memlimit_grow(tc->limit, new_size - old_size);
2027 } else if (new_size < old_size) {
2028 talloc_memlimit_shrink(tc->limit, old_size - new_size);
2031 tc->size = size;
2032 _tc_set_name_const(tc, name);
2034 return TC_PTR_FROM_CHUNK(tc);
2038 a wrapper around talloc_steal() for situations where you are moving a pointer
2039 between two structures, and want the old pointer to be set to NULL
2041 _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
2043 const void **pptr = discard_const_p(const void *,_pptr);
2044 void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
2045 (*pptr) = NULL;
2046 return ret;
2049 enum talloc_mem_count_type {
2050 TOTAL_MEM_SIZE,
2051 TOTAL_MEM_BLOCKS,
2052 TOTAL_MEM_LIMIT,
2055 static inline size_t _talloc_total_mem_internal(const void *ptr,
2056 enum talloc_mem_count_type type,
2057 struct talloc_memlimit *old_limit,
2058 struct talloc_memlimit *new_limit)
2060 size_t total = 0;
2061 struct talloc_chunk *c, *tc;
2063 if (ptr == NULL) {
2064 ptr = null_context;
2066 if (ptr == NULL) {
2067 return 0;
2070 tc = talloc_chunk_from_ptr(ptr);
2072 if (old_limit || new_limit) {
2073 if (tc->limit && tc->limit->upper == old_limit) {
2074 tc->limit->upper = new_limit;
2078 /* optimize in the memlimits case */
2079 if (type == TOTAL_MEM_LIMIT &&
2080 tc->limit != NULL &&
2081 tc->limit != old_limit &&
2082 tc->limit->parent == tc) {
2083 return tc->limit->cur_size;
2086 if (tc->flags & TALLOC_FLAG_LOOP) {
2087 return 0;
2090 tc->flags |= TALLOC_FLAG_LOOP;
2092 if (old_limit || new_limit) {
2093 if (old_limit == tc->limit) {
2094 tc->limit = new_limit;
2098 switch (type) {
2099 case TOTAL_MEM_SIZE:
2100 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2101 total = tc->size;
2103 break;
2104 case TOTAL_MEM_BLOCKS:
2105 total++;
2106 break;
2107 case TOTAL_MEM_LIMIT:
2108 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2110 * Don't count memory allocated from a pool
2111 * when calculating limits. Only count the
2112 * pool itself.
2114 if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2115 if (tc->flags & TALLOC_FLAG_POOL) {
2117 * If this is a pool, the allocated
2118 * size is in the pool header, and
2119 * remember to add in the prefix
2120 * length.
2122 struct talloc_pool_hdr *pool_hdr
2123 = talloc_pool_from_chunk(tc);
2124 total = pool_hdr->poolsize +
2125 TC_HDR_SIZE +
2126 TP_HDR_SIZE;
2127 } else {
2128 total = tc->size + TC_HDR_SIZE;
2132 break;
2134 for (c = tc->child; c; c = c->next) {
2135 total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2136 old_limit, new_limit);
2139 tc->flags &= ~TALLOC_FLAG_LOOP;
2141 return total;
2145 return the total size of a talloc pool (subtree)
2147 _PUBLIC_ size_t talloc_total_size(const void *ptr)
2149 return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2153 return the total number of blocks in a talloc pool (subtree)
2155 _PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2157 return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2161 return the number of external references to a pointer
2163 _PUBLIC_ size_t talloc_reference_count(const void *ptr)
2165 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2166 struct talloc_reference_handle *h;
2167 size_t ret = 0;
2169 for (h=tc->refs;h;h=h->next) {
2170 ret++;
2172 return ret;
2176 report on memory usage by all children of a pointer, giving a full tree view
2178 _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2179 void (*callback)(const void *ptr,
2180 int depth, int max_depth,
2181 int is_ref,
2182 void *private_data),
2183 void *private_data)
2185 struct talloc_chunk *c, *tc;
2187 if (ptr == NULL) {
2188 ptr = null_context;
2190 if (ptr == NULL) return;
2192 tc = talloc_chunk_from_ptr(ptr);
2194 if (tc->flags & TALLOC_FLAG_LOOP) {
2195 return;
2198 callback(ptr, depth, max_depth, 0, private_data);
2200 if (max_depth >= 0 && depth >= max_depth) {
2201 return;
2204 tc->flags |= TALLOC_FLAG_LOOP;
2205 for (c=tc->child;c;c=c->next) {
2206 if (c->name == TALLOC_MAGIC_REFERENCE) {
2207 struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2208 callback(h->ptr, depth + 1, max_depth, 1, private_data);
2209 } else {
2210 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2213 tc->flags &= ~TALLOC_FLAG_LOOP;
2216 static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2218 const char *name = __talloc_get_name(ptr);
2219 struct talloc_chunk *tc;
2220 FILE *f = (FILE *)_f;
2222 if (is_ref) {
2223 fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2224 return;
2227 tc = talloc_chunk_from_ptr(ptr);
2228 if (tc->limit && tc->limit->parent == tc) {
2229 fprintf(f, "%*s%-30s is a memlimit context"
2230 " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2231 depth*4, "",
2232 name,
2233 (unsigned long)tc->limit->max_size,
2234 (unsigned long)tc->limit->cur_size);
2237 if (depth == 0) {
2238 fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2239 (max_depth < 0 ? "full " :""), name,
2240 (unsigned long)talloc_total_size(ptr),
2241 (unsigned long)talloc_total_blocks(ptr));
2242 return;
2245 fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2246 depth*4, "",
2247 name,
2248 (unsigned long)talloc_total_size(ptr),
2249 (unsigned long)talloc_total_blocks(ptr),
2250 (int)talloc_reference_count(ptr), ptr);
2252 #if 0
2253 fprintf(f, "content: ");
2254 if (talloc_total_size(ptr)) {
2255 int tot = talloc_total_size(ptr);
2256 int i;
2258 for (i = 0; i < tot; i++) {
2259 if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2260 fprintf(f, "%c", ((char *)ptr)[i]);
2261 } else {
2262 fprintf(f, "~%02x", ((char *)ptr)[i]);
2266 fprintf(f, "\n");
2267 #endif
2271 report on memory usage by all children of a pointer, giving a full tree view
2273 _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2275 if (f) {
2276 talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2277 fflush(f);
2282 report on memory usage by all children of a pointer, giving a full tree view
2284 _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2286 talloc_report_depth_file(ptr, 0, -1, f);
2290 report on memory usage by all children of a pointer
2292 _PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2294 talloc_report_depth_file(ptr, 0, 1, f);
2298 report on any memory hanging off the null context
2300 static void talloc_report_null(void)
2302 if (talloc_total_size(null_context) != 0) {
2303 talloc_report(null_context, stderr);
2308 report on any memory hanging off the null context
2310 static void talloc_report_null_full(void)
2312 if (talloc_total_size(null_context) != 0) {
2313 talloc_report_full(null_context, stderr);
2318 enable tracking of the NULL context
2320 _PUBLIC_ void talloc_enable_null_tracking(void)
2322 if (null_context == NULL) {
2323 null_context = _talloc_named_const(NULL, 0, "null_context");
2324 if (autofree_context != NULL) {
2325 talloc_reparent(NULL, null_context, autofree_context);
2331 enable tracking of the NULL context, not moving the autofree context
2332 into the NULL context. This is needed for the talloc testsuite
2334 _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2336 if (null_context == NULL) {
2337 null_context = _talloc_named_const(NULL, 0, "null_context");
2342 disable tracking of the NULL context
2344 _PUBLIC_ void talloc_disable_null_tracking(void)
2346 if (null_context != NULL) {
2347 /* we have to move any children onto the real NULL
2348 context */
2349 struct talloc_chunk *tc, *tc2;
2350 tc = talloc_chunk_from_ptr(null_context);
2351 for (tc2 = tc->child; tc2; tc2=tc2->next) {
2352 if (tc2->parent == tc) tc2->parent = NULL;
2353 if (tc2->prev == tc) tc2->prev = NULL;
2355 for (tc2 = tc->next; tc2; tc2=tc2->next) {
2356 if (tc2->parent == tc) tc2->parent = NULL;
2357 if (tc2->prev == tc) tc2->prev = NULL;
2359 tc->child = NULL;
2360 tc->next = NULL;
2362 talloc_free(null_context);
2363 null_context = NULL;
2367 enable leak reporting on exit
2369 _PUBLIC_ void talloc_enable_leak_report(void)
2371 talloc_enable_null_tracking();
2372 atexit(talloc_report_null);
2376 enable full leak reporting on exit
2378 _PUBLIC_ void talloc_enable_leak_report_full(void)
2380 talloc_enable_null_tracking();
2381 atexit(talloc_report_null_full);
2385 talloc and zero memory.
2387 _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2389 void *p = _talloc_named_const(ctx, size, name);
2391 if (p) {
2392 memset(p, '\0', size);
2395 return p;
2399 memdup with a talloc.
2401 _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2403 void *newp = _talloc_named_const(t, size, name);
2405 if (likely(newp)) {
2406 memcpy(newp, p, size);
2409 return newp;
2412 static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2414 char *ret;
2415 struct talloc_chunk *tc;
2417 ret = (char *)__talloc(t, len + 1, &tc);
2418 if (unlikely(!ret)) return NULL;
2420 memcpy(ret, p, len);
2421 ret[len] = 0;
2423 _tc_set_name_const(tc, ret);
2424 return ret;
2428 strdup with a talloc
2430 _PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2432 if (unlikely(!p)) return NULL;
2433 return __talloc_strlendup(t, p, strlen(p));
2437 strndup with a talloc
2439 _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2441 if (unlikely(!p)) return NULL;
2442 return __talloc_strlendup(t, p, strnlen(p, n));
2445 static inline char *__talloc_strlendup_append(char *s, size_t slen,
2446 const char *a, size_t alen)
2448 char *ret;
2450 ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2451 if (unlikely(!ret)) return NULL;
2453 /* append the string and the trailing \0 */
2454 memcpy(&ret[slen], a, alen);
2455 ret[slen+alen] = 0;
2457 _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2458 return ret;
2462 * Appends at the end of the string.
2464 _PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2466 if (unlikely(!s)) {
2467 return talloc_strdup(NULL, a);
2470 if (unlikely(!a)) {
2471 return s;
2474 return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2478 * Appends at the end of the talloc'ed buffer,
2479 * not the end of the string.
2481 _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2483 size_t slen;
2485 if (unlikely(!s)) {
2486 return talloc_strdup(NULL, a);
2489 if (unlikely(!a)) {
2490 return s;
2493 slen = talloc_get_size(s);
2494 if (likely(slen > 0)) {
2495 slen--;
2498 return __talloc_strlendup_append(s, slen, a, strlen(a));
2502 * Appends at the end of the string.
2504 _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2506 if (unlikely(!s)) {
2507 return talloc_strndup(NULL, a, n);
2510 if (unlikely(!a)) {
2511 return s;
2514 return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2518 * Appends at the end of the talloc'ed buffer,
2519 * not the end of the string.
2521 _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2523 size_t slen;
2525 if (unlikely(!s)) {
2526 return talloc_strndup(NULL, a, n);
2529 if (unlikely(!a)) {
2530 return s;
2533 slen = talloc_get_size(s);
2534 if (likely(slen > 0)) {
2535 slen--;
2538 return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2541 #ifndef HAVE_VA_COPY
2542 #ifdef HAVE___VA_COPY
2543 #define va_copy(dest, src) __va_copy(dest, src)
2544 #else
2545 #define va_copy(dest, src) (dest) = (src)
2546 #endif
2547 #endif
2549 static struct talloc_chunk *_vasprintf_tc(const void *t,
2550 const char *fmt,
2551 va_list ap) PRINTF_ATTRIBUTE(2,0);
2553 static struct talloc_chunk *_vasprintf_tc(const void *t,
2554 const char *fmt,
2555 va_list ap)
2557 int len;
2558 char *ret;
2559 va_list ap2;
2560 struct talloc_chunk *tc;
2561 char buf[1024];
2563 /* this call looks strange, but it makes it work on older solaris boxes */
2564 va_copy(ap2, ap);
2565 len = vsnprintf(buf, sizeof(buf), fmt, ap2);
2566 va_end(ap2);
2567 if (unlikely(len < 0)) {
2568 return NULL;
2571 ret = (char *)__talloc(t, len+1, &tc);
2572 if (unlikely(!ret)) return NULL;
2574 if (len < sizeof(buf)) {
2575 memcpy(ret, buf, len+1);
2576 } else {
2577 va_copy(ap2, ap);
2578 vsnprintf(ret, len+1, fmt, ap2);
2579 va_end(ap2);
2582 _tc_set_name_const(tc, ret);
2583 return tc;
2586 _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2588 struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2589 if (tc == NULL) {
2590 return NULL;
2592 return TC_PTR_FROM_CHUNK(tc);
2597 Perform string formatting, and return a pointer to newly allocated
2598 memory holding the result, inside a memory pool.
2600 _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2602 va_list ap;
2603 char *ret;
2605 va_start(ap, fmt);
2606 ret = talloc_vasprintf(t, fmt, ap);
2607 va_end(ap);
2608 return ret;
2611 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2612 const char *fmt, va_list ap)
2613 PRINTF_ATTRIBUTE(3,0);
2615 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2616 const char *fmt, va_list ap)
2618 ssize_t alen;
2619 va_list ap2;
2620 char c;
2622 va_copy(ap2, ap);
2623 alen = vsnprintf(&c, 1, fmt, ap2);
2624 va_end(ap2);
2626 if (alen <= 0) {
2627 /* Either the vsnprintf failed or the format resulted in
2628 * no characters being formatted. In the former case, we
2629 * ought to return NULL, in the latter we ought to return
2630 * the original string. Most current callers of this
2631 * function expect it to never return NULL.
2633 return s;
2636 s = talloc_realloc(NULL, s, char, slen + alen + 1);
2637 if (!s) return NULL;
2639 va_copy(ap2, ap);
2640 vsnprintf(s + slen, alen + 1, fmt, ap2);
2641 va_end(ap2);
2643 _tc_set_name_const(talloc_chunk_from_ptr(s), s);
2644 return s;
2648 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2649 * and return @p s, which may have moved. Good for gradually
2650 * accumulating output into a string buffer. Appends at the end
2651 * of the string.
2653 _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2655 if (unlikely(!s)) {
2656 return talloc_vasprintf(NULL, fmt, ap);
2659 return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2663 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2664 * and return @p s, which may have moved. Always appends at the
2665 * end of the talloc'ed buffer, not the end of the string.
2667 _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2669 size_t slen;
2671 if (unlikely(!s)) {
2672 return talloc_vasprintf(NULL, fmt, ap);
2675 slen = talloc_get_size(s);
2676 if (likely(slen > 0)) {
2677 slen--;
2680 return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2684 Realloc @p s to append the formatted result of @p fmt and return @p
2685 s, which may have moved. Good for gradually accumulating output
2686 into a string buffer.
2688 _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2690 va_list ap;
2692 va_start(ap, fmt);
2693 s = talloc_vasprintf_append(s, fmt, ap);
2694 va_end(ap);
2695 return s;
2699 Realloc @p s to append the formatted result of @p fmt and return @p
2700 s, which may have moved. Good for gradually accumulating output
2701 into a buffer.
2703 _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2705 va_list ap;
2707 va_start(ap, fmt);
2708 s = talloc_vasprintf_append_buffer(s, fmt, ap);
2709 va_end(ap);
2710 return s;
2714 alloc an array, checking for integer overflow in the array size
2716 _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2718 if (count >= MAX_TALLOC_SIZE/el_size) {
2719 return NULL;
2721 return _talloc_named_const(ctx, el_size * count, name);
2725 alloc an zero array, checking for integer overflow in the array size
2727 _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2729 if (count >= MAX_TALLOC_SIZE/el_size) {
2730 return NULL;
2732 return _talloc_zero(ctx, el_size * count, name);
2736 realloc an array, checking for integer overflow in the array size
2738 _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2740 if (count >= MAX_TALLOC_SIZE/el_size) {
2741 return NULL;
2743 return _talloc_realloc(ctx, ptr, el_size * count, name);
2747 a function version of talloc_realloc(), so it can be passed as a function pointer
2748 to libraries that want a realloc function (a realloc function encapsulates
2749 all the basic capabilities of an allocation library, which is why this is useful)
2751 _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2753 return _talloc_realloc(context, ptr, size, NULL);
2757 static int talloc_autofree_destructor(void *ptr)
2759 autofree_context = NULL;
2760 return 0;
2763 static void talloc_autofree(void)
2765 talloc_free(autofree_context);
2769 return a context which will be auto-freed on exit
2770 this is useful for reducing the noise in leak reports
2772 _PUBLIC_ void *talloc_autofree_context(void)
2774 if (autofree_context == NULL) {
2775 autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2776 talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2777 atexit(talloc_autofree);
2779 return autofree_context;
2782 _PUBLIC_ size_t talloc_get_size(const void *context)
2784 struct talloc_chunk *tc;
2786 if (context == NULL) {
2787 return 0;
2790 tc = talloc_chunk_from_ptr(context);
2792 return tc->size;
2796 find a parent of this context that has the given name, if any
2798 _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2800 struct talloc_chunk *tc;
2802 if (context == NULL) {
2803 return NULL;
2806 tc = talloc_chunk_from_ptr(context);
2807 while (tc) {
2808 if (tc->name && strcmp(tc->name, name) == 0) {
2809 return TC_PTR_FROM_CHUNK(tc);
2811 while (tc && tc->prev) tc = tc->prev;
2812 if (tc) {
2813 tc = tc->parent;
2816 return NULL;
2820 show the parentage of a context
2822 _PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2824 struct talloc_chunk *tc;
2826 if (context == NULL) {
2827 fprintf(file, "talloc no parents for NULL\n");
2828 return;
2831 tc = talloc_chunk_from_ptr(context);
2832 fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2833 while (tc) {
2834 fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2835 while (tc && tc->prev) tc = tc->prev;
2836 if (tc) {
2837 tc = tc->parent;
2840 fflush(file);
2844 return 1 if ptr is a parent of context
2846 static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2848 struct talloc_chunk *tc;
2850 if (context == NULL) {
2851 return 0;
2854 tc = talloc_chunk_from_ptr(context);
2855 while (tc) {
2856 if (depth <= 0) {
2857 return 0;
2859 if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2860 while (tc && tc->prev) tc = tc->prev;
2861 if (tc) {
2862 tc = tc->parent;
2863 depth--;
2866 return 0;
2870 return 1 if ptr is a parent of context
2872 _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2874 return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2878 return the total size of memory used by this context and all children
2880 static inline size_t _talloc_total_limit_size(const void *ptr,
2881 struct talloc_memlimit *old_limit,
2882 struct talloc_memlimit *new_limit)
2884 return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2885 old_limit, new_limit);
2888 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2890 struct talloc_memlimit *l;
2892 for (l = limit; l != NULL; l = l->upper) {
2893 if (l->max_size != 0 &&
2894 ((l->max_size <= l->cur_size) ||
2895 (l->max_size - l->cur_size < size))) {
2896 return false;
2900 return true;
2904 Update memory limits when freeing a talloc_chunk.
2906 static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2908 size_t limit_shrink_size;
2910 if (!tc->limit) {
2911 return;
2915 * Pool entries don't count. Only the pools
2916 * themselves are counted as part of the memory
2917 * limits. Note that this also takes care of
2918 * nested pools which have both flags
2919 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2921 if (tc->flags & TALLOC_FLAG_POOLMEM) {
2922 return;
2926 * If we are part of a memory limited context hierarchy
2927 * we need to subtract the memory used from the counters
2930 limit_shrink_size = tc->size+TC_HDR_SIZE;
2933 * If we're deallocating a pool, take into
2934 * account the prefix size added for the pool.
2937 if (tc->flags & TALLOC_FLAG_POOL) {
2938 limit_shrink_size += TP_HDR_SIZE;
2941 talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2943 if (tc->limit->parent == tc) {
2944 free(tc->limit);
2947 tc->limit = NULL;
2951 Increase memory limit accounting after a malloc/realloc.
2953 static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2954 size_t size)
2956 struct talloc_memlimit *l;
2958 for (l = limit; l != NULL; l = l->upper) {
2959 size_t new_cur_size = l->cur_size + size;
2960 if (new_cur_size < l->cur_size) {
2961 talloc_abort("logic error in talloc_memlimit_grow\n");
2962 return;
2964 l->cur_size = new_cur_size;
2969 Decrease memory limit accounting after a free/realloc.
2971 static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
2972 size_t size)
2974 struct talloc_memlimit *l;
2976 for (l = limit; l != NULL; l = l->upper) {
2977 if (l->cur_size < size) {
2978 talloc_abort("logic error in talloc_memlimit_shrink\n");
2979 return;
2981 l->cur_size = l->cur_size - size;
2985 _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
2987 struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
2988 struct talloc_memlimit *orig_limit;
2989 struct talloc_memlimit *limit = NULL;
2991 if (tc->limit && tc->limit->parent == tc) {
2992 tc->limit->max_size = max_size;
2993 return 0;
2995 orig_limit = tc->limit;
2997 limit = malloc(sizeof(struct talloc_memlimit));
2998 if (limit == NULL) {
2999 return 1;
3001 limit->parent = tc;
3002 limit->max_size = max_size;
3003 limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
3005 if (orig_limit) {
3006 limit->upper = orig_limit;
3007 } else {
3008 limit->upper = NULL;
3011 return 0;