winbindd: as DC we should try to get the target_domain from @SOMETHING part of the...
[Samba.git] / lib / talloc / talloc.c
blob7721fa4a9c6d7d59c3f85b82eaeeae2b3f09d4b7
1 /*
2 Samba Unix SMB/CIFS implementation.
4 Samba trivial allocation library - new interface
6 NOTE: Please read talloc_guide.txt for full documentation
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
13 ** under the LGPL
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 inspired by http://swapped.cc/halloc/
33 #include "replace.h"
34 #include "talloc.h"
36 #ifdef HAVE_SYS_AUXV_H
37 #include <sys/auxv.h>
38 #endif
40 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
41 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
42 #endif
44 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
45 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
46 #endif
48 /* Special macros that are no-ops except when run under Valgrind on
49 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
50 #ifdef HAVE_VALGRIND_MEMCHECK_H
51 /* memcheck.h includes valgrind.h */
52 #include <valgrind/memcheck.h>
53 #elif defined(HAVE_VALGRIND_H)
54 #include <valgrind.h>
55 #endif
57 /* use this to force every realloc to change the pointer, to stress test
58 code that might not cope */
59 #define ALWAYS_REALLOC 0
62 #define MAX_TALLOC_SIZE 0x10000000
64 #define TALLOC_FLAG_FREE 0x01
65 #define TALLOC_FLAG_LOOP 0x02
66 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
67 #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
70 * Bits above this are random, used to make it harder to fake talloc
71 * headers during an attack. Try not to change this without good reason.
73 #define TALLOC_FLAG_MASK 0x0F
75 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
77 #define TALLOC_MAGIC_BASE 0xe814ec70
78 static unsigned int talloc_magic = (
79 ~TALLOC_FLAG_MASK & (
80 TALLOC_MAGIC_BASE +
81 (TALLOC_BUILD_VERSION_MAJOR << 24) +
82 (TALLOC_BUILD_VERSION_MINOR << 16) +
83 (TALLOC_BUILD_VERSION_RELEASE << 8)));
85 /* by default we abort when given a bad pointer (such as when talloc_free() is called
86 on a pointer that came from malloc() */
87 #ifndef TALLOC_ABORT
88 #define TALLOC_ABORT(reason) abort()
89 #endif
91 #ifndef discard_const_p
92 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
93 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
94 #else
95 # define discard_const_p(type, ptr) ((type *)(ptr))
96 #endif
97 #endif
99 /* these macros gain us a few percent of speed on gcc */
100 #if (__GNUC__ >= 3)
101 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
102 as its first argument */
103 #ifndef likely
104 #define likely(x) __builtin_expect(!!(x), 1)
105 #endif
106 #ifndef unlikely
107 #define unlikely(x) __builtin_expect(!!(x), 0)
108 #endif
109 #else
110 #ifndef likely
111 #define likely(x) (x)
112 #endif
113 #ifndef unlikely
114 #define unlikely(x) (x)
115 #endif
116 #endif
118 /* this null_context is only used if talloc_enable_leak_report() or
119 talloc_enable_leak_report_full() is called, otherwise it remains
120 NULL
122 static void *null_context;
123 static void *autofree_context;
125 /* used to enable fill of memory on free, which can be useful for
126 * catching use after free errors when valgrind is too slow
128 static struct {
129 bool initialised;
130 bool enabled;
131 uint8_t fill_value;
132 } talloc_fill;
134 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
137 * do not wipe the header, to allow the
138 * double-free logic to still work
140 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
141 if (unlikely(talloc_fill.enabled)) { \
142 size_t _flen = (_tc)->size; \
143 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
144 memset(_fptr, talloc_fill.fill_value, _flen); \
146 } while (0)
148 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
149 /* Mark the whole chunk as not accessable */
150 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
151 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
152 char *_fptr = (char *)(_tc); \
153 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
154 } while(0)
155 #else
156 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
157 #endif
159 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
160 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
161 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
162 } while (0)
164 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
165 if (unlikely(talloc_fill.enabled)) { \
166 size_t _flen = (_tc)->size - (_new_size); \
167 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
168 _fptr += (_new_size); \
169 memset(_fptr, talloc_fill.fill_value, _flen); \
171 } while (0)
173 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
174 /* Mark the unused bytes not accessable */
175 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
176 size_t _flen = (_tc)->size - (_new_size); \
177 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
178 _fptr += (_new_size); \
179 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
180 } while (0)
181 #else
182 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
183 #endif
185 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
186 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
187 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
188 } while (0)
190 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
191 if (unlikely(talloc_fill.enabled)) { \
192 size_t _flen = (_tc)->size - (_new_size); \
193 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
194 _fptr += (_new_size); \
195 memset(_fptr, talloc_fill.fill_value, _flen); \
197 } while (0)
199 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
200 /* Mark the unused bytes as undefined */
201 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
202 size_t _flen = (_tc)->size - (_new_size); \
203 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
204 _fptr += (_new_size); \
205 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
206 } while (0)
207 #else
208 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
209 #endif
211 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
212 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
213 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
214 } while (0)
216 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
217 /* Mark the new bytes as undefined */
218 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
219 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
220 size_t _new_used = TC_HDR_SIZE + (_new_size); \
221 size_t _flen = _new_used - _old_used; \
222 char *_fptr = _old_used + (char *)(_tc); \
223 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
224 } while (0)
225 #else
226 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
227 #endif
229 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
230 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
231 } while (0)
233 struct talloc_reference_handle {
234 struct talloc_reference_handle *next, *prev;
235 void *ptr;
236 const char *location;
239 struct talloc_memlimit {
240 struct talloc_chunk *parent;
241 struct talloc_memlimit *upper;
242 size_t max_size;
243 size_t cur_size;
246 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
247 static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
248 size_t size);
249 static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
250 size_t size);
251 static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
253 static inline void _tc_set_name_const(struct talloc_chunk *tc,
254 const char *name);
255 static struct talloc_chunk *_vasprintf_tc(const void *t,
256 const char *fmt,
257 va_list ap);
259 typedef int (*talloc_destructor_t)(void *);
261 struct talloc_pool_hdr;
263 struct talloc_chunk {
265 * flags includes the talloc magic, which is randomised to
266 * make overwrite attacks harder
268 unsigned flags;
271 * If you have a logical tree like:
273 * <parent>
274 * / | \
275 * / | \
276 * / | \
277 * <child 1> <child 2> <child 3>
279 * The actual talloc tree is:
281 * <parent>
283 * <child 1> - <child 2> - <child 3>
285 * The children are linked with next/prev pointers, and
286 * child 1 is linked to the parent with parent/child
287 * pointers.
290 struct talloc_chunk *next, *prev;
291 struct talloc_chunk *parent, *child;
292 struct talloc_reference_handle *refs;
293 talloc_destructor_t destructor;
294 const char *name;
295 size_t size;
298 * limit semantics:
299 * if 'limit' is set it means all *new* children of the context will
300 * be limited to a total aggregate size ox max_size for memory
301 * allocations.
302 * cur_size is used to keep track of the current use
304 struct talloc_memlimit *limit;
307 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
308 * is a pointer to the struct talloc_chunk of the pool that it was
309 * allocated from. This way children can quickly find the pool to chew
310 * from.
312 struct talloc_pool_hdr *pool;
315 /* 16 byte alignment seems to keep everyone happy */
316 #define TC_ALIGN16(s) (((s)+15)&~15)
317 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
318 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
320 _PUBLIC_ int talloc_version_major(void)
322 return TALLOC_VERSION_MAJOR;
325 _PUBLIC_ int talloc_version_minor(void)
327 return TALLOC_VERSION_MINOR;
330 _PUBLIC_ int talloc_test_get_magic(void)
332 return talloc_magic;
335 static void (*talloc_log_fn)(const char *message);
337 _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
339 talloc_log_fn = log_fn;
342 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
343 void talloc_lib_init(void) __attribute__((constructor));
344 void talloc_lib_init(void)
346 uint32_t random_value;
347 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
348 uint8_t *p;
350 * Use the kernel-provided random values used for
351 * ASLR. This won't change per-exec, which is ideal for us
353 p = (uint8_t *) getauxval(AT_RANDOM);
354 if (p) {
356 * We get 16 bytes from getauxval. By calling rand(),
357 * a totally insecure PRNG, but one that will
358 * deterministically have a different value when called
359 * twice, we ensure that if two talloc-like libraries
360 * are somehow loaded in the same address space, that
361 * because we choose different bytes, we will keep the
362 * protection against collision of multiple talloc
363 * libs.
365 * This protection is important because the effects of
366 * passing a talloc pointer from one to the other may
367 * be very hard to determine.
369 int offset = rand() % (16 - sizeof(random_value));
370 memcpy(&random_value, p + offset, sizeof(random_value));
371 } else
372 #endif
375 * Otherwise, hope the location we are loaded in
376 * memory is randomised by someone else
378 random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
380 talloc_magic = random_value & ~TALLOC_FLAG_MASK;
382 #else
383 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
384 #endif
386 static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
387 static void talloc_log(const char *fmt, ...)
389 va_list ap;
390 char *message;
392 if (!talloc_log_fn) {
393 return;
396 va_start(ap, fmt);
397 message = talloc_vasprintf(NULL, fmt, ap);
398 va_end(ap);
400 talloc_log_fn(message);
401 talloc_free(message);
404 static void talloc_log_stderr(const char *message)
406 fprintf(stderr, "%s", message);
409 _PUBLIC_ void talloc_set_log_stderr(void)
411 talloc_set_log_fn(talloc_log_stderr);
414 static void (*talloc_abort_fn)(const char *reason);
416 _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
418 talloc_abort_fn = abort_fn;
421 static void talloc_abort(const char *reason)
423 talloc_log("%s\n", reason);
425 if (!talloc_abort_fn) {
426 TALLOC_ABORT(reason);
429 talloc_abort_fn(reason);
432 static void talloc_abort_magic(unsigned magic)
434 talloc_abort("Bad talloc magic value - wrong talloc version used/mixed");
437 static void talloc_abort_access_after_free(void)
439 talloc_abort("Bad talloc magic value - access after free");
442 static void talloc_abort_unknown_value(void)
444 talloc_abort("Bad talloc magic value - unknown value");
447 /* panic if we get a bad magic value */
448 static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
450 const char *pp = (const char *)ptr;
451 struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
452 if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
453 if ((tc->flags & (~TALLOC_FLAG_MASK)) == talloc_magic) {
454 talloc_abort_magic(tc->flags & (~TALLOC_FLAG_MASK));
455 return NULL;
458 if (tc->flags & TALLOC_FLAG_FREE) {
459 talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
460 talloc_abort_access_after_free();
461 return NULL;
462 } else {
463 talloc_abort_unknown_value();
464 return NULL;
467 return tc;
470 /* hook into the front of the list */
471 #define _TLIST_ADD(list, p) \
472 do { \
473 if (!(list)) { \
474 (list) = (p); \
475 (p)->next = (p)->prev = NULL; \
476 } else { \
477 (list)->prev = (p); \
478 (p)->next = (list); \
479 (p)->prev = NULL; \
480 (list) = (p); \
482 } while (0)
484 /* remove an element from a list - element doesn't have to be in list. */
485 #define _TLIST_REMOVE(list, p) \
486 do { \
487 if ((p) == (list)) { \
488 (list) = (p)->next; \
489 if (list) (list)->prev = NULL; \
490 } else { \
491 if ((p)->prev) (p)->prev->next = (p)->next; \
492 if ((p)->next) (p)->next->prev = (p)->prev; \
494 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
495 } while (0)
499 return the parent chunk of a pointer
501 static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
503 struct talloc_chunk *tc;
505 if (unlikely(ptr == NULL)) {
506 return NULL;
509 tc = talloc_chunk_from_ptr(ptr);
510 while (tc->prev) tc=tc->prev;
512 return tc->parent;
515 _PUBLIC_ void *talloc_parent(const void *ptr)
517 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
518 return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
522 find parents name
524 _PUBLIC_ const char *talloc_parent_name(const void *ptr)
526 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
527 return tc? tc->name : NULL;
531 A pool carries an in-pool object count count in the first 16 bytes.
532 bytes. This is done to support talloc_steal() to a parent outside of the
533 pool. The count includes the pool itself, so a talloc_free() on a pool will
534 only destroy the pool if the count has dropped to zero. A talloc_free() of a
535 pool member will reduce the count, and eventually also call free(3) on the
536 pool memory.
538 The object count is not put into "struct talloc_chunk" because it is only
539 relevant for talloc pools and the alignment to 16 bytes would increase the
540 memory footprint of each talloc chunk by those 16 bytes.
543 struct talloc_pool_hdr {
544 void *end;
545 unsigned int object_count;
546 size_t poolsize;
549 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
551 static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
553 return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE);
556 static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
558 return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE);
561 static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
563 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
564 return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
567 static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
569 return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
572 /* If tc is inside a pool, this gives the next neighbour. */
573 static inline void *tc_next_chunk(struct talloc_chunk *tc)
575 return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
578 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
580 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
581 return tc_next_chunk(tc);
584 /* Mark the whole remaining pool as not accessable */
585 static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
587 size_t flen = tc_pool_space_left(pool_hdr);
589 if (unlikely(talloc_fill.enabled)) {
590 memset(pool_hdr->end, talloc_fill.fill_value, flen);
593 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
594 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
595 #endif
599 Allocate from a pool
602 static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
603 size_t size, size_t prefix_len)
605 struct talloc_pool_hdr *pool_hdr = NULL;
606 size_t space_left;
607 struct talloc_chunk *result;
608 size_t chunk_size;
610 if (parent == NULL) {
611 return NULL;
614 if (parent->flags & TALLOC_FLAG_POOL) {
615 pool_hdr = talloc_pool_from_chunk(parent);
617 else if (parent->flags & TALLOC_FLAG_POOLMEM) {
618 pool_hdr = parent->pool;
621 if (pool_hdr == NULL) {
622 return NULL;
625 space_left = tc_pool_space_left(pool_hdr);
628 * Align size to 16 bytes
630 chunk_size = TC_ALIGN16(size + prefix_len);
632 if (space_left < chunk_size) {
633 return NULL;
636 result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len);
638 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
639 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
640 #endif
642 pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
644 result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
645 result->pool = pool_hdr;
647 pool_hdr->object_count++;
649 return result;
653 Allocate a bit of memory as a child of an existing pointer
655 static inline void *__talloc_with_prefix(const void *context,
656 size_t size,
657 size_t prefix_len,
658 struct talloc_chunk **tc_ret)
660 struct talloc_chunk *tc = NULL;
661 struct talloc_memlimit *limit = NULL;
662 size_t total_len = TC_HDR_SIZE + size + prefix_len;
663 struct talloc_chunk *parent = NULL;
665 if (unlikely(context == NULL)) {
666 context = null_context;
669 if (unlikely(size >= MAX_TALLOC_SIZE)) {
670 return NULL;
673 if (unlikely(total_len < TC_HDR_SIZE)) {
674 return NULL;
677 if (likely(context != NULL)) {
678 parent = talloc_chunk_from_ptr(context);
680 if (parent->limit != NULL) {
681 limit = parent->limit;
684 tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
687 if (tc == NULL) {
688 char *ptr;
691 * Only do the memlimit check/update on actual allocation.
693 if (!talloc_memlimit_check(limit, total_len)) {
694 errno = ENOMEM;
695 return NULL;
698 ptr = malloc(total_len);
699 if (unlikely(ptr == NULL)) {
700 return NULL;
702 tc = (struct talloc_chunk *)(ptr + prefix_len);
703 tc->flags = talloc_magic;
704 tc->pool = NULL;
706 talloc_memlimit_grow(limit, total_len);
709 tc->limit = limit;
710 tc->size = size;
711 tc->destructor = NULL;
712 tc->child = NULL;
713 tc->name = NULL;
714 tc->refs = NULL;
716 if (likely(context != NULL)) {
717 if (parent->child) {
718 parent->child->parent = NULL;
719 tc->next = parent->child;
720 tc->next->prev = tc;
721 } else {
722 tc->next = NULL;
724 tc->parent = parent;
725 tc->prev = NULL;
726 parent->child = tc;
727 } else {
728 tc->next = tc->prev = tc->parent = NULL;
731 *tc_ret = tc;
732 return TC_PTR_FROM_CHUNK(tc);
735 static inline void *__talloc(const void *context,
736 size_t size,
737 struct talloc_chunk **tc)
739 return __talloc_with_prefix(context, size, 0, tc);
743 * Create a talloc pool
746 static inline void *_talloc_pool(const void *context, size_t size)
748 struct talloc_chunk *tc;
749 struct talloc_pool_hdr *pool_hdr;
750 void *result;
752 result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
754 if (unlikely(result == NULL)) {
755 return NULL;
758 pool_hdr = talloc_pool_from_chunk(tc);
760 tc->flags |= TALLOC_FLAG_POOL;
761 tc->size = 0;
763 pool_hdr->object_count = 1;
764 pool_hdr->end = result;
765 pool_hdr->poolsize = size;
767 tc_invalidate_pool(pool_hdr);
769 return result;
772 _PUBLIC_ void *talloc_pool(const void *context, size_t size)
774 return _talloc_pool(context, size);
778 * Create a talloc pool correctly sized for a basic size plus
779 * a number of subobjects whose total size is given. Essentially
780 * a custom allocator for talloc to reduce fragmentation.
783 _PUBLIC_ void *_talloc_pooled_object(const void *ctx,
784 size_t type_size,
785 const char *type_name,
786 unsigned num_subobjects,
787 size_t total_subobjects_size)
789 size_t poolsize, subobjects_slack, tmp;
790 struct talloc_chunk *tc;
791 struct talloc_pool_hdr *pool_hdr;
792 void *ret;
794 poolsize = type_size + total_subobjects_size;
796 if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
797 goto overflow;
800 if (num_subobjects == UINT_MAX) {
801 goto overflow;
803 num_subobjects += 1; /* the object body itself */
806 * Alignment can increase the pool size by at most 15 bytes per object
807 * plus alignment for the object itself
809 subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
810 if (subobjects_slack < num_subobjects) {
811 goto overflow;
814 tmp = poolsize + subobjects_slack;
815 if ((tmp < poolsize) || (tmp < subobjects_slack)) {
816 goto overflow;
818 poolsize = tmp;
820 ret = _talloc_pool(ctx, poolsize);
821 if (ret == NULL) {
822 return NULL;
825 tc = talloc_chunk_from_ptr(ret);
826 tc->size = type_size;
828 pool_hdr = talloc_pool_from_chunk(tc);
830 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
831 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
832 #endif
834 pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
836 _tc_set_name_const(tc, type_name);
837 return ret;
839 overflow:
840 return NULL;
844 setup a destructor to be called on free of a pointer
845 the destructor should return 0 on success, or -1 on failure.
846 if the destructor fails then the free is failed, and the memory can
847 be continued to be used
849 _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
851 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
852 tc->destructor = destructor;
856 increase the reference count on a piece of memory.
858 _PUBLIC_ int talloc_increase_ref_count(const void *ptr)
860 if (unlikely(!talloc_reference(null_context, ptr))) {
861 return -1;
863 return 0;
867 helper for talloc_reference()
869 this is referenced by a function pointer and should not be inline
871 static int talloc_reference_destructor(struct talloc_reference_handle *handle)
873 struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
874 _TLIST_REMOVE(ptr_tc->refs, handle);
875 return 0;
879 more efficient way to add a name to a pointer - the name must point to a
880 true string constant
882 static inline void _tc_set_name_const(struct talloc_chunk *tc,
883 const char *name)
885 tc->name = name;
889 internal talloc_named_const()
891 static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
893 void *ptr;
894 struct talloc_chunk *tc;
896 ptr = __talloc(context, size, &tc);
897 if (unlikely(ptr == NULL)) {
898 return NULL;
901 _tc_set_name_const(tc, name);
903 return ptr;
907 make a secondary reference to a pointer, hanging off the given context.
908 the pointer remains valid until both the original caller and this given
909 context are freed.
911 the major use for this is when two different structures need to reference the
912 same underlying data, and you want to be able to free the two instances separately,
913 and in either order
915 _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
917 struct talloc_chunk *tc;
918 struct talloc_reference_handle *handle;
919 if (unlikely(ptr == NULL)) return NULL;
921 tc = talloc_chunk_from_ptr(ptr);
922 handle = (struct talloc_reference_handle *)_talloc_named_const(context,
923 sizeof(struct talloc_reference_handle),
924 TALLOC_MAGIC_REFERENCE);
925 if (unlikely(handle == NULL)) return NULL;
927 /* note that we hang the destructor off the handle, not the
928 main context as that allows the caller to still setup their
929 own destructor on the context if they want to */
930 talloc_set_destructor(handle, talloc_reference_destructor);
931 handle->ptr = discard_const_p(void, ptr);
932 handle->location = location;
933 _TLIST_ADD(tc->refs, handle);
934 return handle->ptr;
937 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
939 static inline void _tc_free_poolmem(struct talloc_chunk *tc,
940 const char *location)
942 struct talloc_pool_hdr *pool;
943 struct talloc_chunk *pool_tc;
944 void *next_tc;
946 pool = tc->pool;
947 pool_tc = talloc_chunk_from_pool(pool);
948 next_tc = tc_next_chunk(tc);
950 tc->flags |= TALLOC_FLAG_FREE;
952 /* we mark the freed memory with where we called the free
953 * from. This means on a double free error we can report where
954 * the first free came from
956 tc->name = location;
958 TC_INVALIDATE_FULL_CHUNK(tc);
960 if (unlikely(pool->object_count == 0)) {
961 talloc_abort("Pool object count zero!");
962 return;
965 pool->object_count--;
967 if (unlikely(pool->object_count == 1
968 && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
970 * if there is just one object left in the pool
971 * and pool->flags does not have TALLOC_FLAG_FREE,
972 * it means this is the pool itself and
973 * the rest is available for new objects
974 * again.
976 pool->end = tc_pool_first_chunk(pool);
977 tc_invalidate_pool(pool);
978 return;
981 if (unlikely(pool->object_count == 0)) {
983 * we mark the freed memory with where we called the free
984 * from. This means on a double free error we can report where
985 * the first free came from
987 pool_tc->name = location;
989 if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
990 _tc_free_poolmem(pool_tc, location);
991 } else {
993 * The tc_memlimit_update_on_free()
994 * call takes into account the
995 * prefix TP_HDR_SIZE allocated before
996 * the pool talloc_chunk.
998 tc_memlimit_update_on_free(pool_tc);
999 TC_INVALIDATE_FULL_CHUNK(pool_tc);
1000 free(pool);
1002 return;
1005 if (pool->end == next_tc) {
1007 * if pool->pool still points to end of
1008 * 'tc' (which is stored in the 'next_tc' variable),
1009 * we can reclaim the memory of 'tc'.
1011 pool->end = tc;
1012 return;
1016 * Do nothing. The memory is just "wasted", waiting for the pool
1017 * itself to be freed.
1021 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1022 void *ptr,
1023 const char *location);
1025 static inline int _talloc_free_internal(void *ptr, const char *location);
1028 internal free call that takes a struct talloc_chunk *.
1030 static inline int _tc_free_internal(struct talloc_chunk *tc,
1031 const char *location)
1033 void *ptr_to_free;
1034 void *ptr = TC_PTR_FROM_CHUNK(tc);
1036 if (unlikely(tc->refs)) {
1037 int is_child;
1038 /* check if this is a reference from a child or
1039 * grandchild back to it's parent or grandparent
1041 * in that case we need to remove the reference and
1042 * call another instance of talloc_free() on the current
1043 * pointer.
1045 is_child = talloc_is_parent(tc->refs, ptr);
1046 _talloc_free_internal(tc->refs, location);
1047 if (is_child) {
1048 return _talloc_free_internal(ptr, location);
1050 return -1;
1053 if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1054 /* we have a free loop - stop looping */
1055 return 0;
1058 if (unlikely(tc->destructor)) {
1059 talloc_destructor_t d = tc->destructor;
1062 * Protect the destructor against some overwrite
1063 * attacks, by explicitly checking it has the right
1064 * magic here.
1066 if (talloc_chunk_from_ptr(ptr) != tc) {
1068 * This can't actually happen, the
1069 * call itself will panic.
1071 TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1074 if (d == (talloc_destructor_t)-1) {
1075 return -1;
1077 tc->destructor = (talloc_destructor_t)-1;
1078 if (d(ptr) == -1) {
1080 * Only replace the destructor pointer if
1081 * calling the destructor didn't modify it.
1083 if (tc->destructor == (talloc_destructor_t)-1) {
1084 tc->destructor = d;
1086 return -1;
1088 tc->destructor = NULL;
1091 if (tc->parent) {
1092 _TLIST_REMOVE(tc->parent->child, tc);
1093 if (tc->parent->child) {
1094 tc->parent->child->parent = tc->parent;
1096 } else {
1097 if (tc->prev) tc->prev->next = tc->next;
1098 if (tc->next) tc->next->prev = tc->prev;
1099 tc->prev = tc->next = NULL;
1102 tc->flags |= TALLOC_FLAG_LOOP;
1104 _tc_free_children_internal(tc, ptr, location);
1106 tc->flags |= TALLOC_FLAG_FREE;
1108 /* we mark the freed memory with where we called the free
1109 * from. This means on a double free error we can report where
1110 * the first free came from
1112 tc->name = location;
1114 if (tc->flags & TALLOC_FLAG_POOL) {
1115 struct talloc_pool_hdr *pool;
1117 pool = talloc_pool_from_chunk(tc);
1119 if (unlikely(pool->object_count == 0)) {
1120 talloc_abort("Pool object count zero!");
1121 return 0;
1124 pool->object_count--;
1126 if (likely(pool->object_count != 0)) {
1127 return 0;
1131 * With object_count==0, a pool becomes a normal piece of
1132 * memory to free. If it's allocated inside a pool, it needs
1133 * to be freed as poolmem, else it needs to be just freed.
1135 ptr_to_free = pool;
1136 } else {
1137 ptr_to_free = tc;
1140 if (tc->flags & TALLOC_FLAG_POOLMEM) {
1141 _tc_free_poolmem(tc, location);
1142 return 0;
1145 tc_memlimit_update_on_free(tc);
1147 TC_INVALIDATE_FULL_CHUNK(tc);
1148 free(ptr_to_free);
1149 return 0;
1153 internal talloc_free call
1155 static inline int _talloc_free_internal(void *ptr, const char *location)
1157 struct talloc_chunk *tc;
1159 if (unlikely(ptr == NULL)) {
1160 return -1;
1163 /* possibly initialised the talloc fill value */
1164 if (unlikely(!talloc_fill.initialised)) {
1165 const char *fill = getenv(TALLOC_FILL_ENV);
1166 if (fill != NULL) {
1167 talloc_fill.enabled = true;
1168 talloc_fill.fill_value = strtoul(fill, NULL, 0);
1170 talloc_fill.initialised = true;
1173 tc = talloc_chunk_from_ptr(ptr);
1174 return _tc_free_internal(tc, location);
1177 static inline size_t _talloc_total_limit_size(const void *ptr,
1178 struct talloc_memlimit *old_limit,
1179 struct talloc_memlimit *new_limit);
1182 move a lump of memory from one talloc context to another return the
1183 ptr on success, or NULL if it could not be transferred.
1184 passing NULL as ptr will always return NULL with no side effects.
1186 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1188 struct talloc_chunk *tc, *new_tc;
1189 size_t ctx_size = 0;
1191 if (unlikely(!ptr)) {
1192 return NULL;
1195 if (unlikely(new_ctx == NULL)) {
1196 new_ctx = null_context;
1199 tc = talloc_chunk_from_ptr(ptr);
1201 if (tc->limit != NULL) {
1203 ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1205 /* Decrement the memory limit from the source .. */
1206 talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1208 if (tc->limit->parent == tc) {
1209 tc->limit->upper = NULL;
1210 } else {
1211 tc->limit = NULL;
1215 if (unlikely(new_ctx == NULL)) {
1216 if (tc->parent) {
1217 _TLIST_REMOVE(tc->parent->child, tc);
1218 if (tc->parent->child) {
1219 tc->parent->child->parent = tc->parent;
1221 } else {
1222 if (tc->prev) tc->prev->next = tc->next;
1223 if (tc->next) tc->next->prev = tc->prev;
1226 tc->parent = tc->next = tc->prev = NULL;
1227 return discard_const_p(void, ptr);
1230 new_tc = talloc_chunk_from_ptr(new_ctx);
1232 if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1233 return discard_const_p(void, ptr);
1236 if (tc->parent) {
1237 _TLIST_REMOVE(tc->parent->child, tc);
1238 if (tc->parent->child) {
1239 tc->parent->child->parent = tc->parent;
1241 } else {
1242 if (tc->prev) tc->prev->next = tc->next;
1243 if (tc->next) tc->next->prev = tc->prev;
1244 tc->prev = tc->next = NULL;
1247 tc->parent = new_tc;
1248 if (new_tc->child) new_tc->child->parent = NULL;
1249 _TLIST_ADD(new_tc->child, tc);
1251 if (tc->limit || new_tc->limit) {
1252 ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1253 new_tc->limit);
1254 /* .. and increment it in the destination. */
1255 if (new_tc->limit) {
1256 talloc_memlimit_grow(new_tc->limit, ctx_size);
1260 return discard_const_p(void, ptr);
1264 move a lump of memory from one talloc context to another return the
1265 ptr on success, or NULL if it could not be transferred.
1266 passing NULL as ptr will always return NULL with no side effects.
1268 _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1270 struct talloc_chunk *tc;
1272 if (unlikely(ptr == NULL)) {
1273 return NULL;
1276 tc = talloc_chunk_from_ptr(ptr);
1278 if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1279 struct talloc_reference_handle *h;
1281 talloc_log("WARNING: talloc_steal with references at %s\n",
1282 location);
1284 for (h=tc->refs; h; h=h->next) {
1285 talloc_log("\treference at %s\n",
1286 h->location);
1290 #if 0
1291 /* this test is probably too expensive to have on in the
1292 normal build, but it useful for debugging */
1293 if (talloc_is_parent(new_ctx, ptr)) {
1294 talloc_log("WARNING: stealing into talloc child at %s\n", location);
1296 #endif
1298 return _talloc_steal_internal(new_ctx, ptr);
1302 this is like a talloc_steal(), but you must supply the old
1303 parent. This resolves the ambiguity in a talloc_steal() which is
1304 called on a context that has more than one parent (via references)
1306 The old parent can be either a reference or a parent
1308 _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1310 struct talloc_chunk *tc;
1311 struct talloc_reference_handle *h;
1313 if (unlikely(ptr == NULL)) {
1314 return NULL;
1317 if (old_parent == talloc_parent(ptr)) {
1318 return _talloc_steal_internal(new_parent, ptr);
1321 tc = talloc_chunk_from_ptr(ptr);
1322 for (h=tc->refs;h;h=h->next) {
1323 if (talloc_parent(h) == old_parent) {
1324 if (_talloc_steal_internal(new_parent, h) != h) {
1325 return NULL;
1327 return discard_const_p(void, ptr);
1331 /* it wasn't a parent */
1332 return NULL;
1336 remove a secondary reference to a pointer. This undo's what
1337 talloc_reference() has done. The context and pointer arguments
1338 must match those given to a talloc_reference()
1340 static inline int talloc_unreference(const void *context, const void *ptr)
1342 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1343 struct talloc_reference_handle *h;
1345 if (unlikely(context == NULL)) {
1346 context = null_context;
1349 for (h=tc->refs;h;h=h->next) {
1350 struct talloc_chunk *p = talloc_parent_chunk(h);
1351 if (p == NULL) {
1352 if (context == NULL) break;
1353 } else if (TC_PTR_FROM_CHUNK(p) == context) {
1354 break;
1357 if (h == NULL) {
1358 return -1;
1361 return _talloc_free_internal(h, __location__);
1365 remove a specific parent context from a pointer. This is a more
1366 controlled variant of talloc_free()
1368 _PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1370 struct talloc_chunk *tc_p, *new_p, *tc_c;
1371 void *new_parent;
1373 if (ptr == NULL) {
1374 return -1;
1377 if (context == NULL) {
1378 context = null_context;
1381 if (talloc_unreference(context, ptr) == 0) {
1382 return 0;
1385 if (context != NULL) {
1386 tc_c = talloc_chunk_from_ptr(context);
1387 } else {
1388 tc_c = NULL;
1390 if (tc_c != talloc_parent_chunk(ptr)) {
1391 return -1;
1394 tc_p = talloc_chunk_from_ptr(ptr);
1396 if (tc_p->refs == NULL) {
1397 return _talloc_free_internal(ptr, __location__);
1400 new_p = talloc_parent_chunk(tc_p->refs);
1401 if (new_p) {
1402 new_parent = TC_PTR_FROM_CHUNK(new_p);
1403 } else {
1404 new_parent = NULL;
1407 if (talloc_unreference(new_parent, ptr) != 0) {
1408 return -1;
1411 _talloc_steal_internal(new_parent, ptr);
1413 return 0;
1417 add a name to an existing pointer - va_list version
1419 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1420 const char *fmt,
1421 va_list ap) PRINTF_ATTRIBUTE(2,0);
1423 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1424 const char *fmt,
1425 va_list ap)
1427 struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1428 fmt,
1429 ap);
1430 if (likely(name_tc)) {
1431 tc->name = TC_PTR_FROM_CHUNK(name_tc);
1432 _tc_set_name_const(name_tc, ".name");
1433 } else {
1434 tc->name = NULL;
1436 return tc->name;
1440 add a name to an existing pointer
1442 _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1444 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1445 const char *name;
1446 va_list ap;
1447 va_start(ap, fmt);
1448 name = tc_set_name_v(tc, fmt, ap);
1449 va_end(ap);
1450 return name;
1455 create a named talloc pointer. Any talloc pointer can be named, and
1456 talloc_named() operates just like talloc() except that it allows you
1457 to name the pointer.
1459 _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1461 va_list ap;
1462 void *ptr;
1463 const char *name;
1464 struct talloc_chunk *tc;
1466 ptr = __talloc(context, size, &tc);
1467 if (unlikely(ptr == NULL)) return NULL;
1469 va_start(ap, fmt);
1470 name = tc_set_name_v(tc, fmt, ap);
1471 va_end(ap);
1473 if (unlikely(name == NULL)) {
1474 _talloc_free_internal(ptr, __location__);
1475 return NULL;
1478 return ptr;
1482 return the name of a talloc ptr, or "UNNAMED"
1484 static inline const char *__talloc_get_name(const void *ptr)
1486 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1487 if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1488 return ".reference";
1490 if (likely(tc->name)) {
1491 return tc->name;
1493 return "UNNAMED";
1496 _PUBLIC_ const char *talloc_get_name(const void *ptr)
1498 return __talloc_get_name(ptr);
1502 check if a pointer has the given name. If it does, return the pointer,
1503 otherwise return NULL
1505 _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1507 const char *pname;
1508 if (unlikely(ptr == NULL)) return NULL;
1509 pname = __talloc_get_name(ptr);
1510 if (likely(pname == name || strcmp(pname, name) == 0)) {
1511 return discard_const_p(void, ptr);
1513 return NULL;
1516 static void talloc_abort_type_mismatch(const char *location,
1517 const char *name,
1518 const char *expected)
1520 const char *reason;
1522 reason = talloc_asprintf(NULL,
1523 "%s: Type mismatch: name[%s] expected[%s]",
1524 location,
1525 name?name:"NULL",
1526 expected);
1527 if (!reason) {
1528 reason = "Type mismatch";
1531 talloc_abort(reason);
1534 _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1536 const char *pname;
1538 if (unlikely(ptr == NULL)) {
1539 talloc_abort_type_mismatch(location, NULL, name);
1540 return NULL;
1543 pname = __talloc_get_name(ptr);
1544 if (likely(pname == name || strcmp(pname, name) == 0)) {
1545 return discard_const_p(void, ptr);
1548 talloc_abort_type_mismatch(location, pname, name);
1549 return NULL;
1553 this is for compatibility with older versions of talloc
1555 _PUBLIC_ void *talloc_init(const char *fmt, ...)
1557 va_list ap;
1558 void *ptr;
1559 const char *name;
1560 struct talloc_chunk *tc;
1562 ptr = __talloc(NULL, 0, &tc);
1563 if (unlikely(ptr == NULL)) return NULL;
1565 va_start(ap, fmt);
1566 name = tc_set_name_v(tc, fmt, ap);
1567 va_end(ap);
1569 if (unlikely(name == NULL)) {
1570 _talloc_free_internal(ptr, __location__);
1571 return NULL;
1574 return ptr;
1577 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1578 void *ptr,
1579 const char *location)
1581 while (tc->child) {
1582 /* we need to work out who will own an abandoned child
1583 if it cannot be freed. In priority order, the first
1584 choice is owner of any remaining reference to this
1585 pointer, the second choice is our parent, and the
1586 final choice is the null context. */
1587 void *child = TC_PTR_FROM_CHUNK(tc->child);
1588 const void *new_parent = null_context;
1589 if (unlikely(tc->child->refs)) {
1590 struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1591 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1593 if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1594 if (talloc_parent_chunk(child) != tc) {
1596 * Destructor already reparented this child.
1597 * No further reparenting needed.
1599 continue;
1601 if (new_parent == null_context) {
1602 struct talloc_chunk *p = talloc_parent_chunk(ptr);
1603 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1605 _talloc_steal_internal(new_parent, child);
1611 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1612 should probably not be used in new code. It's in here to keep the talloc
1613 code consistent across Samba 3 and 4.
1615 _PUBLIC_ void talloc_free_children(void *ptr)
1617 struct talloc_chunk *tc_name = NULL;
1618 struct talloc_chunk *tc;
1620 if (unlikely(ptr == NULL)) {
1621 return;
1624 tc = talloc_chunk_from_ptr(ptr);
1626 /* we do not want to free the context name if it is a child .. */
1627 if (likely(tc->child)) {
1628 for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1629 if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1631 if (tc_name) {
1632 _TLIST_REMOVE(tc->child, tc_name);
1633 if (tc->child) {
1634 tc->child->parent = tc;
1639 _tc_free_children_internal(tc, ptr, __location__);
1641 /* .. so we put it back after all other children have been freed */
1642 if (tc_name) {
1643 if (tc->child) {
1644 tc->child->parent = NULL;
1646 tc_name->parent = tc;
1647 _TLIST_ADD(tc->child, tc_name);
1652 Allocate a bit of memory as a child of an existing pointer
1654 _PUBLIC_ void *_talloc(const void *context, size_t size)
1656 struct talloc_chunk *tc;
1657 return __talloc(context, size, &tc);
1661 externally callable talloc_set_name_const()
1663 _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1665 _tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1669 create a named talloc pointer. Any talloc pointer can be named, and
1670 talloc_named() operates just like talloc() except that it allows you
1671 to name the pointer.
1673 _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1675 return _talloc_named_const(context, size, name);
1679 free a talloc pointer. This also frees all child pointers of this
1680 pointer recursively
1682 return 0 if the memory is actually freed, otherwise -1. The memory
1683 will not be freed if the ref_count is > 1 or the destructor (if
1684 any) returns non-zero
1686 _PUBLIC_ int _talloc_free(void *ptr, const char *location)
1688 struct talloc_chunk *tc;
1690 if (unlikely(ptr == NULL)) {
1691 return -1;
1694 tc = talloc_chunk_from_ptr(ptr);
1696 if (unlikely(tc->refs != NULL)) {
1697 struct talloc_reference_handle *h;
1699 if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1700 /* in this case we do know which parent should
1701 get this pointer, as there is really only
1702 one parent */
1703 return talloc_unlink(null_context, ptr);
1706 talloc_log("ERROR: talloc_free with references at %s\n",
1707 location);
1709 for (h=tc->refs; h; h=h->next) {
1710 talloc_log("\treference at %s\n",
1711 h->location);
1713 return -1;
1716 return _talloc_free_internal(ptr, location);
1722 A talloc version of realloc. The context argument is only used if
1723 ptr is NULL
1725 _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1727 struct talloc_chunk *tc;
1728 void *new_ptr;
1729 bool malloced = false;
1730 struct talloc_pool_hdr *pool_hdr = NULL;
1731 size_t old_size = 0;
1732 size_t new_size = 0;
1734 /* size zero is equivalent to free() */
1735 if (unlikely(size == 0)) {
1736 talloc_unlink(context, ptr);
1737 return NULL;
1740 if (unlikely(size >= MAX_TALLOC_SIZE)) {
1741 return NULL;
1744 /* realloc(NULL) is equivalent to malloc() */
1745 if (ptr == NULL) {
1746 return _talloc_named_const(context, size, name);
1749 tc = talloc_chunk_from_ptr(ptr);
1751 /* don't allow realloc on referenced pointers */
1752 if (unlikely(tc->refs)) {
1753 return NULL;
1756 /* don't let anybody try to realloc a talloc_pool */
1757 if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1758 return NULL;
1761 if (tc->limit && (size > tc->size)) {
1762 if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1763 errno = ENOMEM;
1764 return NULL;
1768 /* handle realloc inside a talloc_pool */
1769 if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1770 pool_hdr = tc->pool;
1773 #if (ALWAYS_REALLOC == 0)
1774 /* don't shrink if we have less than 1k to gain */
1775 if (size < tc->size && tc->limit == NULL) {
1776 if (pool_hdr) {
1777 void *next_tc = tc_next_chunk(tc);
1778 TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1779 tc->size = size;
1780 if (next_tc == pool_hdr->end) {
1781 /* note: tc->size has changed, so this works */
1782 pool_hdr->end = tc_next_chunk(tc);
1784 return ptr;
1785 } else if ((tc->size - size) < 1024) {
1787 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1788 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1789 * after each realloc call, which slows down
1790 * testing a lot :-(.
1792 * That is why we only mark memory as undefined here.
1794 TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1796 /* do not shrink if we have less than 1k to gain */
1797 tc->size = size;
1798 return ptr;
1800 } else if (tc->size == size) {
1802 * do not change the pointer if it is exactly
1803 * the same size.
1805 return ptr;
1807 #endif
1809 /* by resetting magic we catch users of the old memory */
1810 tc->flags |= TALLOC_FLAG_FREE;
1812 #if ALWAYS_REALLOC
1813 if (pool_hdr) {
1814 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1815 pool_hdr->object_count--;
1817 if (new_ptr == NULL) {
1818 new_ptr = malloc(TC_HDR_SIZE+size);
1819 malloced = true;
1820 new_size = size;
1823 if (new_ptr) {
1824 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1825 TC_INVALIDATE_FULL_CHUNK(tc);
1827 } else {
1828 /* We're doing malloc then free here, so record the difference. */
1829 old_size = tc->size;
1830 new_size = size;
1831 new_ptr = malloc(size + TC_HDR_SIZE);
1832 if (new_ptr) {
1833 memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1834 free(tc);
1837 #else
1838 if (pool_hdr) {
1839 struct talloc_chunk *pool_tc;
1840 void *next_tc = tc_next_chunk(tc);
1841 size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1842 size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1843 size_t space_needed;
1844 size_t space_left;
1845 unsigned int chunk_count = pool_hdr->object_count;
1847 pool_tc = talloc_chunk_from_pool(pool_hdr);
1848 if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1849 chunk_count -= 1;
1852 if (chunk_count == 1) {
1854 * optimize for the case where 'tc' is the only
1855 * chunk in the pool.
1857 char *start = tc_pool_first_chunk(pool_hdr);
1858 space_needed = new_chunk_size;
1859 space_left = (char *)tc_pool_end(pool_hdr) - start;
1861 if (space_left >= space_needed) {
1862 size_t old_used = TC_HDR_SIZE + tc->size;
1863 size_t new_used = TC_HDR_SIZE + size;
1864 new_ptr = start;
1866 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1869 * The area from
1870 * start -> tc may have
1871 * been freed and thus been marked as
1872 * VALGRIND_MEM_NOACCESS. Set it to
1873 * VALGRIND_MEM_UNDEFINED so we can
1874 * copy into it without valgrind errors.
1875 * We can't just mark
1876 * new_ptr -> new_ptr + old_used
1877 * as this may overlap on top of tc,
1878 * (which is why we use memmove, not
1879 * memcpy below) hence the MIN.
1881 size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1882 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1884 #endif
1886 memmove(new_ptr, tc, old_used);
1888 tc = (struct talloc_chunk *)new_ptr;
1889 TC_UNDEFINE_GROW_CHUNK(tc, size);
1892 * first we do not align the pool pointer
1893 * because we want to invalidate the padding
1894 * too.
1896 pool_hdr->end = new_used + (char *)new_ptr;
1897 tc_invalidate_pool(pool_hdr);
1899 /* now the aligned pointer */
1900 pool_hdr->end = new_chunk_size + (char *)new_ptr;
1901 goto got_new_ptr;
1904 next_tc = NULL;
1907 if (new_chunk_size == old_chunk_size) {
1908 TC_UNDEFINE_GROW_CHUNK(tc, size);
1909 tc->flags &= ~TALLOC_FLAG_FREE;
1910 tc->size = size;
1911 return ptr;
1914 if (next_tc == pool_hdr->end) {
1916 * optimize for the case where 'tc' is the last
1917 * chunk in the pool.
1919 space_needed = new_chunk_size - old_chunk_size;
1920 space_left = tc_pool_space_left(pool_hdr);
1922 if (space_left >= space_needed) {
1923 TC_UNDEFINE_GROW_CHUNK(tc, size);
1924 tc->flags &= ~TALLOC_FLAG_FREE;
1925 tc->size = size;
1926 pool_hdr->end = tc_next_chunk(tc);
1927 return ptr;
1931 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1933 if (new_ptr == NULL) {
1934 new_ptr = malloc(TC_HDR_SIZE+size);
1935 malloced = true;
1936 new_size = size;
1939 if (new_ptr) {
1940 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1942 _tc_free_poolmem(tc, __location__ "_talloc_realloc");
1945 else {
1946 /* We're doing realloc here, so record the difference. */
1947 old_size = tc->size;
1948 new_size = size;
1949 new_ptr = realloc(tc, size + TC_HDR_SIZE);
1951 got_new_ptr:
1952 #endif
1953 if (unlikely(!new_ptr)) {
1954 tc->flags &= ~TALLOC_FLAG_FREE;
1955 return NULL;
1958 tc = (struct talloc_chunk *)new_ptr;
1959 tc->flags &= ~TALLOC_FLAG_FREE;
1960 if (malloced) {
1961 tc->flags &= ~TALLOC_FLAG_POOLMEM;
1963 if (tc->parent) {
1964 tc->parent->child = tc;
1966 if (tc->child) {
1967 tc->child->parent = tc;
1970 if (tc->prev) {
1971 tc->prev->next = tc;
1973 if (tc->next) {
1974 tc->next->prev = tc;
1977 if (new_size > old_size) {
1978 talloc_memlimit_grow(tc->limit, new_size - old_size);
1979 } else if (new_size < old_size) {
1980 talloc_memlimit_shrink(tc->limit, old_size - new_size);
1983 tc->size = size;
1984 _tc_set_name_const(tc, name);
1986 return TC_PTR_FROM_CHUNK(tc);
1990 a wrapper around talloc_steal() for situations where you are moving a pointer
1991 between two structures, and want the old pointer to be set to NULL
1993 _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
1995 const void **pptr = discard_const_p(const void *,_pptr);
1996 void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
1997 (*pptr) = NULL;
1998 return ret;
2001 enum talloc_mem_count_type {
2002 TOTAL_MEM_SIZE,
2003 TOTAL_MEM_BLOCKS,
2004 TOTAL_MEM_LIMIT,
2007 static inline size_t _talloc_total_mem_internal(const void *ptr,
2008 enum talloc_mem_count_type type,
2009 struct talloc_memlimit *old_limit,
2010 struct talloc_memlimit *new_limit)
2012 size_t total = 0;
2013 struct talloc_chunk *c, *tc;
2015 if (ptr == NULL) {
2016 ptr = null_context;
2018 if (ptr == NULL) {
2019 return 0;
2022 tc = talloc_chunk_from_ptr(ptr);
2024 if (old_limit || new_limit) {
2025 if (tc->limit && tc->limit->upper == old_limit) {
2026 tc->limit->upper = new_limit;
2030 /* optimize in the memlimits case */
2031 if (type == TOTAL_MEM_LIMIT &&
2032 tc->limit != NULL &&
2033 tc->limit != old_limit &&
2034 tc->limit->parent == tc) {
2035 return tc->limit->cur_size;
2038 if (tc->flags & TALLOC_FLAG_LOOP) {
2039 return 0;
2042 tc->flags |= TALLOC_FLAG_LOOP;
2044 if (old_limit || new_limit) {
2045 if (old_limit == tc->limit) {
2046 tc->limit = new_limit;
2050 switch (type) {
2051 case TOTAL_MEM_SIZE:
2052 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2053 total = tc->size;
2055 break;
2056 case TOTAL_MEM_BLOCKS:
2057 total++;
2058 break;
2059 case TOTAL_MEM_LIMIT:
2060 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2062 * Don't count memory allocated from a pool
2063 * when calculating limits. Only count the
2064 * pool itself.
2066 if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2067 if (tc->flags & TALLOC_FLAG_POOL) {
2069 * If this is a pool, the allocated
2070 * size is in the pool header, and
2071 * remember to add in the prefix
2072 * length.
2074 struct talloc_pool_hdr *pool_hdr
2075 = talloc_pool_from_chunk(tc);
2076 total = pool_hdr->poolsize +
2077 TC_HDR_SIZE +
2078 TP_HDR_SIZE;
2079 } else {
2080 total = tc->size + TC_HDR_SIZE;
2084 break;
2086 for (c = tc->child; c; c = c->next) {
2087 total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2088 old_limit, new_limit);
2091 tc->flags &= ~TALLOC_FLAG_LOOP;
2093 return total;
2097 return the total size of a talloc pool (subtree)
2099 _PUBLIC_ size_t talloc_total_size(const void *ptr)
2101 return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2105 return the total number of blocks in a talloc pool (subtree)
2107 _PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2109 return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2113 return the number of external references to a pointer
2115 _PUBLIC_ size_t talloc_reference_count(const void *ptr)
2117 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2118 struct talloc_reference_handle *h;
2119 size_t ret = 0;
2121 for (h=tc->refs;h;h=h->next) {
2122 ret++;
2124 return ret;
2128 report on memory usage by all children of a pointer, giving a full tree view
2130 _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2131 void (*callback)(const void *ptr,
2132 int depth, int max_depth,
2133 int is_ref,
2134 void *private_data),
2135 void *private_data)
2137 struct talloc_chunk *c, *tc;
2139 if (ptr == NULL) {
2140 ptr = null_context;
2142 if (ptr == NULL) return;
2144 tc = talloc_chunk_from_ptr(ptr);
2146 if (tc->flags & TALLOC_FLAG_LOOP) {
2147 return;
2150 callback(ptr, depth, max_depth, 0, private_data);
2152 if (max_depth >= 0 && depth >= max_depth) {
2153 return;
2156 tc->flags |= TALLOC_FLAG_LOOP;
2157 for (c=tc->child;c;c=c->next) {
2158 if (c->name == TALLOC_MAGIC_REFERENCE) {
2159 struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2160 callback(h->ptr, depth + 1, max_depth, 1, private_data);
2161 } else {
2162 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2165 tc->flags &= ~TALLOC_FLAG_LOOP;
2168 static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2170 const char *name = __talloc_get_name(ptr);
2171 struct talloc_chunk *tc;
2172 FILE *f = (FILE *)_f;
2174 if (is_ref) {
2175 fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2176 return;
2179 tc = talloc_chunk_from_ptr(ptr);
2180 if (tc->limit && tc->limit->parent == tc) {
2181 fprintf(f, "%*s%-30s is a memlimit context"
2182 " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2183 depth*4, "",
2184 name,
2185 (unsigned long)tc->limit->max_size,
2186 (unsigned long)tc->limit->cur_size);
2189 if (depth == 0) {
2190 fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2191 (max_depth < 0 ? "full " :""), name,
2192 (unsigned long)talloc_total_size(ptr),
2193 (unsigned long)talloc_total_blocks(ptr));
2194 return;
2197 fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2198 depth*4, "",
2199 name,
2200 (unsigned long)talloc_total_size(ptr),
2201 (unsigned long)talloc_total_blocks(ptr),
2202 (int)talloc_reference_count(ptr), ptr);
2204 #if 0
2205 fprintf(f, "content: ");
2206 if (talloc_total_size(ptr)) {
2207 int tot = talloc_total_size(ptr);
2208 int i;
2210 for (i = 0; i < tot; i++) {
2211 if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2212 fprintf(f, "%c", ((char *)ptr)[i]);
2213 } else {
2214 fprintf(f, "~%02x", ((char *)ptr)[i]);
2218 fprintf(f, "\n");
2219 #endif
2223 report on memory usage by all children of a pointer, giving a full tree view
2225 _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2227 if (f) {
2228 talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2229 fflush(f);
2234 report on memory usage by all children of a pointer, giving a full tree view
2236 _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2238 talloc_report_depth_file(ptr, 0, -1, f);
2242 report on memory usage by all children of a pointer
2244 _PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2246 talloc_report_depth_file(ptr, 0, 1, f);
2250 report on any memory hanging off the null context
2252 static void talloc_report_null(void)
2254 if (talloc_total_size(null_context) != 0) {
2255 talloc_report(null_context, stderr);
2260 report on any memory hanging off the null context
2262 static void talloc_report_null_full(void)
2264 if (talloc_total_size(null_context) != 0) {
2265 talloc_report_full(null_context, stderr);
2270 enable tracking of the NULL context
2272 _PUBLIC_ void talloc_enable_null_tracking(void)
2274 if (null_context == NULL) {
2275 null_context = _talloc_named_const(NULL, 0, "null_context");
2276 if (autofree_context != NULL) {
2277 talloc_reparent(NULL, null_context, autofree_context);
2283 enable tracking of the NULL context, not moving the autofree context
2284 into the NULL context. This is needed for the talloc testsuite
2286 _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2288 if (null_context == NULL) {
2289 null_context = _talloc_named_const(NULL, 0, "null_context");
2294 disable tracking of the NULL context
2296 _PUBLIC_ void talloc_disable_null_tracking(void)
2298 if (null_context != NULL) {
2299 /* we have to move any children onto the real NULL
2300 context */
2301 struct talloc_chunk *tc, *tc2;
2302 tc = talloc_chunk_from_ptr(null_context);
2303 for (tc2 = tc->child; tc2; tc2=tc2->next) {
2304 if (tc2->parent == tc) tc2->parent = NULL;
2305 if (tc2->prev == tc) tc2->prev = NULL;
2307 for (tc2 = tc->next; tc2; tc2=tc2->next) {
2308 if (tc2->parent == tc) tc2->parent = NULL;
2309 if (tc2->prev == tc) tc2->prev = NULL;
2311 tc->child = NULL;
2312 tc->next = NULL;
2314 talloc_free(null_context);
2315 null_context = NULL;
2319 enable leak reporting on exit
2321 _PUBLIC_ void talloc_enable_leak_report(void)
2323 talloc_enable_null_tracking();
2324 atexit(talloc_report_null);
2328 enable full leak reporting on exit
2330 _PUBLIC_ void talloc_enable_leak_report_full(void)
2332 talloc_enable_null_tracking();
2333 atexit(talloc_report_null_full);
2337 talloc and zero memory.
2339 _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2341 void *p = _talloc_named_const(ctx, size, name);
2343 if (p) {
2344 memset(p, '\0', size);
2347 return p;
2351 memdup with a talloc.
2353 _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2355 void *newp = _talloc_named_const(t, size, name);
2357 if (likely(newp)) {
2358 memcpy(newp, p, size);
2361 return newp;
2364 static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2366 char *ret;
2367 struct talloc_chunk *tc;
2369 ret = (char *)__talloc(t, len + 1, &tc);
2370 if (unlikely(!ret)) return NULL;
2372 memcpy(ret, p, len);
2373 ret[len] = 0;
2375 _tc_set_name_const(tc, ret);
2376 return ret;
2380 strdup with a talloc
2382 _PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2384 if (unlikely(!p)) return NULL;
2385 return __talloc_strlendup(t, p, strlen(p));
2389 strndup with a talloc
2391 _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2393 if (unlikely(!p)) return NULL;
2394 return __talloc_strlendup(t, p, strnlen(p, n));
2397 static inline char *__talloc_strlendup_append(char *s, size_t slen,
2398 const char *a, size_t alen)
2400 char *ret;
2402 ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2403 if (unlikely(!ret)) return NULL;
2405 /* append the string and the trailing \0 */
2406 memcpy(&ret[slen], a, alen);
2407 ret[slen+alen] = 0;
2409 _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2410 return ret;
2414 * Appends at the end of the string.
2416 _PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2418 if (unlikely(!s)) {
2419 return talloc_strdup(NULL, a);
2422 if (unlikely(!a)) {
2423 return s;
2426 return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2430 * Appends at the end of the talloc'ed buffer,
2431 * not the end of the string.
2433 _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2435 size_t slen;
2437 if (unlikely(!s)) {
2438 return talloc_strdup(NULL, a);
2441 if (unlikely(!a)) {
2442 return s;
2445 slen = talloc_get_size(s);
2446 if (likely(slen > 0)) {
2447 slen--;
2450 return __talloc_strlendup_append(s, slen, a, strlen(a));
2454 * Appends at the end of the string.
2456 _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2458 if (unlikely(!s)) {
2459 return talloc_strndup(NULL, a, n);
2462 if (unlikely(!a)) {
2463 return s;
2466 return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2470 * Appends at the end of the talloc'ed buffer,
2471 * not the end of the string.
2473 _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2475 size_t slen;
2477 if (unlikely(!s)) {
2478 return talloc_strndup(NULL, a, n);
2481 if (unlikely(!a)) {
2482 return s;
2485 slen = talloc_get_size(s);
2486 if (likely(slen > 0)) {
2487 slen--;
2490 return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2493 #ifndef HAVE_VA_COPY
2494 #ifdef HAVE___VA_COPY
2495 #define va_copy(dest, src) __va_copy(dest, src)
2496 #else
2497 #define va_copy(dest, src) (dest) = (src)
2498 #endif
2499 #endif
2501 static struct talloc_chunk *_vasprintf_tc(const void *t,
2502 const char *fmt,
2503 va_list ap) PRINTF_ATTRIBUTE(2,0);
2505 static struct talloc_chunk *_vasprintf_tc(const void *t,
2506 const char *fmt,
2507 va_list ap)
2509 int len;
2510 char *ret;
2511 va_list ap2;
2512 struct talloc_chunk *tc;
2513 char buf[1024];
2515 /* this call looks strange, but it makes it work on older solaris boxes */
2516 va_copy(ap2, ap);
2517 len = vsnprintf(buf, sizeof(buf), fmt, ap2);
2518 va_end(ap2);
2519 if (unlikely(len < 0)) {
2520 return NULL;
2523 ret = (char *)__talloc(t, len+1, &tc);
2524 if (unlikely(!ret)) return NULL;
2526 if (len < sizeof(buf)) {
2527 memcpy(ret, buf, len+1);
2528 } else {
2529 va_copy(ap2, ap);
2530 vsnprintf(ret, len+1, fmt, ap2);
2531 va_end(ap2);
2534 _tc_set_name_const(tc, ret);
2535 return tc;
2538 _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2540 struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2541 if (tc == NULL) {
2542 return NULL;
2544 return TC_PTR_FROM_CHUNK(tc);
2549 Perform string formatting, and return a pointer to newly allocated
2550 memory holding the result, inside a memory pool.
2552 _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2554 va_list ap;
2555 char *ret;
2557 va_start(ap, fmt);
2558 ret = talloc_vasprintf(t, fmt, ap);
2559 va_end(ap);
2560 return ret;
2563 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2564 const char *fmt, va_list ap)
2565 PRINTF_ATTRIBUTE(3,0);
2567 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2568 const char *fmt, va_list ap)
2570 ssize_t alen;
2571 va_list ap2;
2572 char c;
2574 va_copy(ap2, ap);
2575 alen = vsnprintf(&c, 1, fmt, ap2);
2576 va_end(ap2);
2578 if (alen <= 0) {
2579 /* Either the vsnprintf failed or the format resulted in
2580 * no characters being formatted. In the former case, we
2581 * ought to return NULL, in the latter we ought to return
2582 * the original string. Most current callers of this
2583 * function expect it to never return NULL.
2585 return s;
2588 s = talloc_realloc(NULL, s, char, slen + alen + 1);
2589 if (!s) return NULL;
2591 va_copy(ap2, ap);
2592 vsnprintf(s + slen, alen + 1, fmt, ap2);
2593 va_end(ap2);
2595 _tc_set_name_const(talloc_chunk_from_ptr(s), s);
2596 return s;
2600 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2601 * and return @p s, which may have moved. Good for gradually
2602 * accumulating output into a string buffer. Appends at the end
2603 * of the string.
2605 _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2607 if (unlikely(!s)) {
2608 return talloc_vasprintf(NULL, fmt, ap);
2611 return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2615 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2616 * and return @p s, which may have moved. Always appends at the
2617 * end of the talloc'ed buffer, not the end of the string.
2619 _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2621 size_t slen;
2623 if (unlikely(!s)) {
2624 return talloc_vasprintf(NULL, fmt, ap);
2627 slen = talloc_get_size(s);
2628 if (likely(slen > 0)) {
2629 slen--;
2632 return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2636 Realloc @p s to append the formatted result of @p fmt and return @p
2637 s, which may have moved. Good for gradually accumulating output
2638 into a string buffer.
2640 _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2642 va_list ap;
2644 va_start(ap, fmt);
2645 s = talloc_vasprintf_append(s, fmt, ap);
2646 va_end(ap);
2647 return s;
2651 Realloc @p s to append the formatted result of @p fmt and return @p
2652 s, which may have moved. Good for gradually accumulating output
2653 into a buffer.
2655 _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2657 va_list ap;
2659 va_start(ap, fmt);
2660 s = talloc_vasprintf_append_buffer(s, fmt, ap);
2661 va_end(ap);
2662 return s;
2666 alloc an array, checking for integer overflow in the array size
2668 _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2670 if (count >= MAX_TALLOC_SIZE/el_size) {
2671 return NULL;
2673 return _talloc_named_const(ctx, el_size * count, name);
2677 alloc an zero array, checking for integer overflow in the array size
2679 _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2681 if (count >= MAX_TALLOC_SIZE/el_size) {
2682 return NULL;
2684 return _talloc_zero(ctx, el_size * count, name);
2688 realloc an array, checking for integer overflow in the array size
2690 _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2692 if (count >= MAX_TALLOC_SIZE/el_size) {
2693 return NULL;
2695 return _talloc_realloc(ctx, ptr, el_size * count, name);
2699 a function version of talloc_realloc(), so it can be passed as a function pointer
2700 to libraries that want a realloc function (a realloc function encapsulates
2701 all the basic capabilities of an allocation library, which is why this is useful)
2703 _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2705 return _talloc_realloc(context, ptr, size, NULL);
2709 static int talloc_autofree_destructor(void *ptr)
2711 autofree_context = NULL;
2712 return 0;
2715 static void talloc_autofree(void)
2717 talloc_free(autofree_context);
2721 return a context which will be auto-freed on exit
2722 this is useful for reducing the noise in leak reports
2724 _PUBLIC_ void *talloc_autofree_context(void)
2726 if (autofree_context == NULL) {
2727 autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2728 talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2729 atexit(talloc_autofree);
2731 return autofree_context;
2734 _PUBLIC_ size_t talloc_get_size(const void *context)
2736 struct talloc_chunk *tc;
2738 if (context == NULL) {
2739 return 0;
2742 tc = talloc_chunk_from_ptr(context);
2744 return tc->size;
2748 find a parent of this context that has the given name, if any
2750 _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2752 struct talloc_chunk *tc;
2754 if (context == NULL) {
2755 return NULL;
2758 tc = talloc_chunk_from_ptr(context);
2759 while (tc) {
2760 if (tc->name && strcmp(tc->name, name) == 0) {
2761 return TC_PTR_FROM_CHUNK(tc);
2763 while (tc && tc->prev) tc = tc->prev;
2764 if (tc) {
2765 tc = tc->parent;
2768 return NULL;
2772 show the parentage of a context
2774 _PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2776 struct talloc_chunk *tc;
2778 if (context == NULL) {
2779 fprintf(file, "talloc no parents for NULL\n");
2780 return;
2783 tc = talloc_chunk_from_ptr(context);
2784 fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2785 while (tc) {
2786 fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2787 while (tc && tc->prev) tc = tc->prev;
2788 if (tc) {
2789 tc = tc->parent;
2792 fflush(file);
2796 return 1 if ptr is a parent of context
2798 static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2800 struct talloc_chunk *tc;
2802 if (context == NULL) {
2803 return 0;
2806 tc = talloc_chunk_from_ptr(context);
2807 while (tc) {
2808 if (depth <= 0) {
2809 return 0;
2811 if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2812 while (tc && tc->prev) tc = tc->prev;
2813 if (tc) {
2814 tc = tc->parent;
2815 depth--;
2818 return 0;
2822 return 1 if ptr is a parent of context
2824 _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2826 return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2830 return the total size of memory used by this context and all children
2832 static inline size_t _talloc_total_limit_size(const void *ptr,
2833 struct talloc_memlimit *old_limit,
2834 struct talloc_memlimit *new_limit)
2836 return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2837 old_limit, new_limit);
2840 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2842 struct talloc_memlimit *l;
2844 for (l = limit; l != NULL; l = l->upper) {
2845 if (l->max_size != 0 &&
2846 ((l->max_size <= l->cur_size) ||
2847 (l->max_size - l->cur_size < size))) {
2848 return false;
2852 return true;
2856 Update memory limits when freeing a talloc_chunk.
2858 static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2860 size_t limit_shrink_size;
2862 if (!tc->limit) {
2863 return;
2867 * Pool entries don't count. Only the pools
2868 * themselves are counted as part of the memory
2869 * limits. Note that this also takes care of
2870 * nested pools which have both flags
2871 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2873 if (tc->flags & TALLOC_FLAG_POOLMEM) {
2874 return;
2878 * If we are part of a memory limited context hierarchy
2879 * we need to subtract the memory used from the counters
2882 limit_shrink_size = tc->size+TC_HDR_SIZE;
2885 * If we're deallocating a pool, take into
2886 * account the prefix size added for the pool.
2889 if (tc->flags & TALLOC_FLAG_POOL) {
2890 limit_shrink_size += TP_HDR_SIZE;
2893 talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2895 if (tc->limit->parent == tc) {
2896 free(tc->limit);
2899 tc->limit = NULL;
2903 Increase memory limit accounting after a malloc/realloc.
2905 static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2906 size_t size)
2908 struct talloc_memlimit *l;
2910 for (l = limit; l != NULL; l = l->upper) {
2911 size_t new_cur_size = l->cur_size + size;
2912 if (new_cur_size < l->cur_size) {
2913 talloc_abort("logic error in talloc_memlimit_grow\n");
2914 return;
2916 l->cur_size = new_cur_size;
2921 Decrease memory limit accounting after a free/realloc.
2923 static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
2924 size_t size)
2926 struct talloc_memlimit *l;
2928 for (l = limit; l != NULL; l = l->upper) {
2929 if (l->cur_size < size) {
2930 talloc_abort("logic error in talloc_memlimit_shrink\n");
2931 return;
2933 l->cur_size = l->cur_size - size;
2937 _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
2939 struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
2940 struct talloc_memlimit *orig_limit;
2941 struct talloc_memlimit *limit = NULL;
2943 if (tc->limit && tc->limit->parent == tc) {
2944 tc->limit->max_size = max_size;
2945 return 0;
2947 orig_limit = tc->limit;
2949 limit = malloc(sizeof(struct talloc_memlimit));
2950 if (limit == NULL) {
2951 return 1;
2953 limit->parent = tc;
2954 limit->max_size = max_size;
2955 limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
2957 if (orig_limit) {
2958 limit->upper = orig_limit;
2959 } else {
2960 limit->upper = NULL;
2963 return 0;