ctdb-common: Fix CID 1125581 Dereference after null check (FORWARD_NULL)
[Samba.git] / lib / talloc / talloc.c
blob90b9d963ca9504dd3992b563e2683c090aa31ed7
1 /*
2 Samba Unix SMB/CIFS implementation.
4 Samba trivial allocation library - new interface
6 NOTE: Please read talloc_guide.txt for full documentation
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
13 ** under the LGPL
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 inspired by http://swapped.cc/halloc/
33 #include "replace.h"
34 #include "talloc.h"
36 #ifdef HAVE_SYS_AUXV_H
37 #include <sys/auxv.h>
38 #endif
40 #ifdef TALLOC_BUILD_VERSION_MAJOR
41 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
42 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
43 #endif
44 #endif
46 #ifdef TALLOC_BUILD_VERSION_MINOR
47 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
48 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
49 #endif
50 #endif
52 /* Special macros that are no-ops except when run under Valgrind on
53 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
54 #ifdef HAVE_VALGRIND_MEMCHECK_H
55 /* memcheck.h includes valgrind.h */
56 #include <valgrind/memcheck.h>
57 #elif defined(HAVE_VALGRIND_H)
58 #include <valgrind.h>
59 #endif
61 /* use this to force every realloc to change the pointer, to stress test
62 code that might not cope */
63 #define ALWAYS_REALLOC 0
66 #define MAX_TALLOC_SIZE 0x10000000
68 #define TALLOC_FLAG_FREE 0x01
69 #define TALLOC_FLAG_LOOP 0x02
70 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
71 #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
74 * Bits above this are random, used to make it harder to fake talloc
75 * headers during an attack. Try not to change this without good reason.
77 #define TALLOC_FLAG_MASK 0x0F
79 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
81 #define TALLOC_MAGIC_BASE 0xe814ec70
82 static unsigned int talloc_magic = (
83 TALLOC_MAGIC_BASE +
84 (TALLOC_VERSION_MAJOR << 12) +
85 (TALLOC_VERSION_MINOR << 4));
87 /* by default we abort when given a bad pointer (such as when talloc_free() is called
88 on a pointer that came from malloc() */
89 #ifndef TALLOC_ABORT
90 #define TALLOC_ABORT(reason) abort()
91 #endif
93 #ifndef discard_const_p
94 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
95 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
96 #else
97 # define discard_const_p(type, ptr) ((type *)(ptr))
98 #endif
99 #endif
101 /* these macros gain us a few percent of speed on gcc */
102 #if (__GNUC__ >= 3)
103 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
104 as its first argument */
105 #ifndef likely
106 #define likely(x) __builtin_expect(!!(x), 1)
107 #endif
108 #ifndef unlikely
109 #define unlikely(x) __builtin_expect(!!(x), 0)
110 #endif
111 #else
112 #ifndef likely
113 #define likely(x) (x)
114 #endif
115 #ifndef unlikely
116 #define unlikely(x) (x)
117 #endif
118 #endif
120 /* this null_context is only used if talloc_enable_leak_report() or
121 talloc_enable_leak_report_full() is called, otherwise it remains
122 NULL
124 static void *null_context;
125 static void *autofree_context;
127 /* used to enable fill of memory on free, which can be useful for
128 * catching use after free errors when valgrind is too slow
130 static struct {
131 bool initialised;
132 bool enabled;
133 uint8_t fill_value;
134 } talloc_fill;
136 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
139 * do not wipe the header, to allow the
140 * double-free logic to still work
142 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
143 if (unlikely(talloc_fill.enabled)) { \
144 size_t _flen = (_tc)->size; \
145 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
146 memset(_fptr, talloc_fill.fill_value, _flen); \
148 } while (0)
150 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
151 /* Mark the whole chunk as not accessable */
152 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
153 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
154 char *_fptr = (char *)(_tc); \
155 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
156 } while(0)
157 #else
158 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
159 #endif
161 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
162 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
163 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
164 } while (0)
166 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
167 if (unlikely(talloc_fill.enabled)) { \
168 size_t _flen = (_tc)->size - (_new_size); \
169 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
170 _fptr += (_new_size); \
171 memset(_fptr, talloc_fill.fill_value, _flen); \
173 } while (0)
175 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
176 /* Mark the unused bytes not accessable */
177 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
178 size_t _flen = (_tc)->size - (_new_size); \
179 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
180 _fptr += (_new_size); \
181 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
182 } while (0)
183 #else
184 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
185 #endif
187 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
188 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
189 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
190 } while (0)
192 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
193 if (unlikely(talloc_fill.enabled)) { \
194 size_t _flen = (_tc)->size - (_new_size); \
195 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
196 _fptr += (_new_size); \
197 memset(_fptr, talloc_fill.fill_value, _flen); \
199 } while (0)
201 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
202 /* Mark the unused bytes as undefined */
203 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
204 size_t _flen = (_tc)->size - (_new_size); \
205 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
206 _fptr += (_new_size); \
207 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
208 } while (0)
209 #else
210 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
211 #endif
213 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
214 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
215 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
216 } while (0)
218 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
219 /* Mark the new bytes as undefined */
220 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
221 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
222 size_t _new_used = TC_HDR_SIZE + (_new_size); \
223 size_t _flen = _new_used - _old_used; \
224 char *_fptr = _old_used + (char *)(_tc); \
225 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
226 } while (0)
227 #else
228 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
229 #endif
231 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
232 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
233 } while (0)
235 struct talloc_reference_handle {
236 struct talloc_reference_handle *next, *prev;
237 void *ptr;
238 const char *location;
241 struct talloc_memlimit {
242 struct talloc_chunk *parent;
243 struct talloc_memlimit *upper;
244 size_t max_size;
245 size_t cur_size;
248 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
249 static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
250 size_t size);
251 static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
252 size_t size);
253 static inline void talloc_memlimit_update_on_free(struct talloc_chunk *tc);
255 static inline void _talloc_set_name_const(const void *ptr, const char *name);
257 typedef int (*talloc_destructor_t)(void *);
259 struct talloc_pool_hdr;
261 struct talloc_chunk {
262 unsigned flags;
263 struct talloc_chunk *next, *prev;
264 struct talloc_chunk *parent, *child;
265 struct talloc_reference_handle *refs;
266 talloc_destructor_t destructor;
267 const char *name;
268 size_t size;
271 * limit semantics:
272 * if 'limit' is set it means all *new* children of the context will
273 * be limited to a total aggregate size ox max_size for memory
274 * allocations.
275 * cur_size is used to keep track of the current use
277 struct talloc_memlimit *limit;
280 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
281 * is a pointer to the struct talloc_chunk of the pool that it was
282 * allocated from. This way children can quickly find the pool to chew
283 * from.
285 struct talloc_pool_hdr *pool;
288 /* 16 byte alignment seems to keep everyone happy */
289 #define TC_ALIGN16(s) (((s)+15)&~15)
290 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
291 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
293 _PUBLIC_ int talloc_version_major(void)
295 return TALLOC_VERSION_MAJOR;
298 _PUBLIC_ int talloc_version_minor(void)
300 return TALLOC_VERSION_MINOR;
303 _PUBLIC_ int talloc_test_get_magic(void)
305 return talloc_magic;
308 static void (*talloc_log_fn)(const char *message);
310 _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
312 talloc_log_fn = log_fn;
315 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
316 void talloc_lib_init(void) __attribute__((constructor));
317 void talloc_lib_init(void)
319 uint32_t random_value;
320 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
321 uint8_t *p;
323 * Use the kernel-provided random values used for
324 * ASLR. This won't change per-exec, which is ideal for us
326 p = (uint8_t *) getauxval(AT_RANDOM);
327 if (p) {
329 * We get 16 bytes from getauxval. By calling rand(),
330 * a totally insecure PRNG, but one that will
331 * deterministically have a different value when called
332 * twice, we ensure that if two talloc-like libraries
333 * are somehow loaded in the same address space, that
334 * because we choose different bytes, we will keep the
335 * protection against collision of multiple talloc
336 * libs.
338 * This protection is important because the effects of
339 * passing a talloc pointer from one to the other may
340 * be very hard to determine.
342 int offset = rand() % (16 - sizeof(random_value));
343 memcpy(&random_value, p + offset, sizeof(random_value));
344 } else
345 #endif
348 * Otherwise, hope the location we are loaded in
349 * memory is randomised by someone else
351 random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
353 talloc_magic = random_value & ~TALLOC_FLAG_MASK;
355 #else
356 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
357 #endif
359 static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
360 static void talloc_log(const char *fmt, ...)
362 va_list ap;
363 char *message;
365 if (!talloc_log_fn) {
366 return;
369 va_start(ap, fmt);
370 message = talloc_vasprintf(NULL, fmt, ap);
371 va_end(ap);
373 talloc_log_fn(message);
374 talloc_free(message);
377 static void talloc_log_stderr(const char *message)
379 fprintf(stderr, "%s", message);
382 _PUBLIC_ void talloc_set_log_stderr(void)
384 talloc_set_log_fn(talloc_log_stderr);
387 static void (*talloc_abort_fn)(const char *reason);
389 _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
391 talloc_abort_fn = abort_fn;
394 static void talloc_abort(const char *reason)
396 talloc_log("%s\n", reason);
398 if (!talloc_abort_fn) {
399 TALLOC_ABORT(reason);
402 talloc_abort_fn(reason);
405 static void talloc_abort_magic(unsigned magic)
407 talloc_abort("Bad talloc magic value - wrong talloc version used/mixed");
410 static void talloc_abort_access_after_free(void)
412 talloc_abort("Bad talloc magic value - access after free");
415 static void talloc_abort_unknown_value(void)
417 talloc_abort("Bad talloc magic value - unknown value");
420 /* panic if we get a bad magic value */
421 static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
423 const char *pp = (const char *)ptr;
424 struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
425 if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
426 if ((tc->flags & (~0xF)) == talloc_magic) {
427 talloc_abort_magic(tc->flags & (~TALLOC_FLAG_MASK));
428 return NULL;
431 if (tc->flags & TALLOC_FLAG_FREE) {
432 talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
433 talloc_abort_access_after_free();
434 return NULL;
435 } else {
436 talloc_abort_unknown_value();
437 return NULL;
440 return tc;
443 /* hook into the front of the list */
444 #define _TLIST_ADD(list, p) \
445 do { \
446 if (!(list)) { \
447 (list) = (p); \
448 (p)->next = (p)->prev = NULL; \
449 } else { \
450 (list)->prev = (p); \
451 (p)->next = (list); \
452 (p)->prev = NULL; \
453 (list) = (p); \
455 } while (0)
457 /* remove an element from a list - element doesn't have to be in list. */
458 #define _TLIST_REMOVE(list, p) \
459 do { \
460 if ((p) == (list)) { \
461 (list) = (p)->next; \
462 if (list) (list)->prev = NULL; \
463 } else { \
464 if ((p)->prev) (p)->prev->next = (p)->next; \
465 if ((p)->next) (p)->next->prev = (p)->prev; \
467 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
468 } while (0)
472 return the parent chunk of a pointer
474 static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
476 struct talloc_chunk *tc;
478 if (unlikely(ptr == NULL)) {
479 return NULL;
482 tc = talloc_chunk_from_ptr(ptr);
483 while (tc->prev) tc=tc->prev;
485 return tc->parent;
488 _PUBLIC_ void *talloc_parent(const void *ptr)
490 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
491 return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
495 find parents name
497 _PUBLIC_ const char *talloc_parent_name(const void *ptr)
499 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
500 return tc? tc->name : NULL;
504 A pool carries an in-pool object count count in the first 16 bytes.
505 bytes. This is done to support talloc_steal() to a parent outside of the
506 pool. The count includes the pool itself, so a talloc_free() on a pool will
507 only destroy the pool if the count has dropped to zero. A talloc_free() of a
508 pool member will reduce the count, and eventually also call free(3) on the
509 pool memory.
511 The object count is not put into "struct talloc_chunk" because it is only
512 relevant for talloc pools and the alignment to 16 bytes would increase the
513 memory footprint of each talloc chunk by those 16 bytes.
516 struct talloc_pool_hdr {
517 void *end;
518 unsigned int object_count;
519 size_t poolsize;
522 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
524 static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
526 return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE);
529 static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
531 return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE);
534 static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
536 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
537 return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
540 static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
542 return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
545 /* If tc is inside a pool, this gives the next neighbour. */
546 static inline void *tc_next_chunk(struct talloc_chunk *tc)
548 return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
551 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
553 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
554 return tc_next_chunk(tc);
557 /* Mark the whole remaining pool as not accessable */
558 static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
560 size_t flen = tc_pool_space_left(pool_hdr);
562 if (unlikely(talloc_fill.enabled)) {
563 memset(pool_hdr->end, talloc_fill.fill_value, flen);
566 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
567 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
568 #endif
572 Allocate from a pool
575 static inline struct talloc_chunk *talloc_alloc_pool(struct talloc_chunk *parent,
576 size_t size, size_t prefix_len)
578 struct talloc_pool_hdr *pool_hdr = NULL;
579 size_t space_left;
580 struct talloc_chunk *result;
581 size_t chunk_size;
583 if (parent == NULL) {
584 return NULL;
587 if (parent->flags & TALLOC_FLAG_POOL) {
588 pool_hdr = talloc_pool_from_chunk(parent);
590 else if (parent->flags & TALLOC_FLAG_POOLMEM) {
591 pool_hdr = parent->pool;
594 if (pool_hdr == NULL) {
595 return NULL;
598 space_left = tc_pool_space_left(pool_hdr);
601 * Align size to 16 bytes
603 chunk_size = TC_ALIGN16(size + prefix_len);
605 if (space_left < chunk_size) {
606 return NULL;
609 result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len);
611 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
612 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
613 #endif
615 pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
617 result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
618 result->pool = pool_hdr;
620 pool_hdr->object_count++;
622 return result;
626 Allocate a bit of memory as a child of an existing pointer
628 static inline void *__talloc_with_prefix(const void *context, size_t size,
629 size_t prefix_len)
631 struct talloc_chunk *tc = NULL;
632 struct talloc_memlimit *limit = NULL;
633 size_t total_len = TC_HDR_SIZE + size + prefix_len;
635 if (unlikely(context == NULL)) {
636 context = null_context;
639 if (unlikely(size >= MAX_TALLOC_SIZE)) {
640 return NULL;
643 if (unlikely(total_len < TC_HDR_SIZE)) {
644 return NULL;
647 if (context != NULL) {
648 struct talloc_chunk *ptc = talloc_chunk_from_ptr(context);
650 if (ptc->limit != NULL) {
651 limit = ptc->limit;
654 tc = talloc_alloc_pool(ptc, TC_HDR_SIZE+size, prefix_len);
657 if (tc == NULL) {
658 char *ptr;
661 * Only do the memlimit check/update on actual allocation.
663 if (!talloc_memlimit_check(limit, total_len)) {
664 errno = ENOMEM;
665 return NULL;
668 ptr = malloc(total_len);
669 if (unlikely(ptr == NULL)) {
670 return NULL;
672 tc = (struct talloc_chunk *)(ptr + prefix_len);
673 tc->flags = talloc_magic;
674 tc->pool = NULL;
676 talloc_memlimit_grow(limit, total_len);
679 tc->limit = limit;
680 tc->size = size;
681 tc->destructor = NULL;
682 tc->child = NULL;
683 tc->name = NULL;
684 tc->refs = NULL;
686 if (likely(context)) {
687 struct talloc_chunk *parent = talloc_chunk_from_ptr(context);
689 if (parent->child) {
690 parent->child->parent = NULL;
691 tc->next = parent->child;
692 tc->next->prev = tc;
693 } else {
694 tc->next = NULL;
696 tc->parent = parent;
697 tc->prev = NULL;
698 parent->child = tc;
699 } else {
700 tc->next = tc->prev = tc->parent = NULL;
703 return TC_PTR_FROM_CHUNK(tc);
706 static inline void *__talloc(const void *context, size_t size)
708 return __talloc_with_prefix(context, size, 0);
712 * Create a talloc pool
715 static inline void *_talloc_pool(const void *context, size_t size)
717 struct talloc_chunk *tc;
718 struct talloc_pool_hdr *pool_hdr;
719 void *result;
721 result = __talloc_with_prefix(context, size, TP_HDR_SIZE);
723 if (unlikely(result == NULL)) {
724 return NULL;
727 tc = talloc_chunk_from_ptr(result);
728 pool_hdr = talloc_pool_from_chunk(tc);
730 tc->flags |= TALLOC_FLAG_POOL;
731 tc->size = 0;
733 pool_hdr->object_count = 1;
734 pool_hdr->end = result;
735 pool_hdr->poolsize = size;
737 tc_invalidate_pool(pool_hdr);
739 return result;
742 _PUBLIC_ void *talloc_pool(const void *context, size_t size)
744 return _talloc_pool(context, size);
748 * Create a talloc pool correctly sized for a basic size plus
749 * a number of subobjects whose total size is given. Essentially
750 * a custom allocator for talloc to reduce fragmentation.
753 _PUBLIC_ void *_talloc_pooled_object(const void *ctx,
754 size_t type_size,
755 const char *type_name,
756 unsigned num_subobjects,
757 size_t total_subobjects_size)
759 size_t poolsize, subobjects_slack, tmp;
760 struct talloc_chunk *tc;
761 struct talloc_pool_hdr *pool_hdr;
762 void *ret;
764 poolsize = type_size + total_subobjects_size;
766 if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
767 goto overflow;
770 if (num_subobjects == UINT_MAX) {
771 goto overflow;
773 num_subobjects += 1; /* the object body itself */
776 * Alignment can increase the pool size by at most 15 bytes per object
777 * plus alignment for the object itself
779 subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
780 if (subobjects_slack < num_subobjects) {
781 goto overflow;
784 tmp = poolsize + subobjects_slack;
785 if ((tmp < poolsize) || (tmp < subobjects_slack)) {
786 goto overflow;
788 poolsize = tmp;
790 ret = _talloc_pool(ctx, poolsize);
791 if (ret == NULL) {
792 return NULL;
795 tc = talloc_chunk_from_ptr(ret);
796 tc->size = type_size;
798 pool_hdr = talloc_pool_from_chunk(tc);
800 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
801 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
802 #endif
804 pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
806 _talloc_set_name_const(ret, type_name);
807 return ret;
809 overflow:
810 return NULL;
814 setup a destructor to be called on free of a pointer
815 the destructor should return 0 on success, or -1 on failure.
816 if the destructor fails then the free is failed, and the memory can
817 be continued to be used
819 _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
821 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
822 tc->destructor = destructor;
826 increase the reference count on a piece of memory.
828 _PUBLIC_ int talloc_increase_ref_count(const void *ptr)
830 if (unlikely(!talloc_reference(null_context, ptr))) {
831 return -1;
833 return 0;
837 helper for talloc_reference()
839 this is referenced by a function pointer and should not be inline
841 static int talloc_reference_destructor(struct talloc_reference_handle *handle)
843 struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
844 _TLIST_REMOVE(ptr_tc->refs, handle);
845 return 0;
849 more efficient way to add a name to a pointer - the name must point to a
850 true string constant
852 static inline void _talloc_set_name_const(const void *ptr, const char *name)
854 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
855 tc->name = name;
859 internal talloc_named_const()
861 static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
863 void *ptr;
865 ptr = __talloc(context, size);
866 if (unlikely(ptr == NULL)) {
867 return NULL;
870 _talloc_set_name_const(ptr, name);
872 return ptr;
876 make a secondary reference to a pointer, hanging off the given context.
877 the pointer remains valid until both the original caller and this given
878 context are freed.
880 the major use for this is when two different structures need to reference the
881 same underlying data, and you want to be able to free the two instances separately,
882 and in either order
884 _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
886 struct talloc_chunk *tc;
887 struct talloc_reference_handle *handle;
888 if (unlikely(ptr == NULL)) return NULL;
890 tc = talloc_chunk_from_ptr(ptr);
891 handle = (struct talloc_reference_handle *)_talloc_named_const(context,
892 sizeof(struct talloc_reference_handle),
893 TALLOC_MAGIC_REFERENCE);
894 if (unlikely(handle == NULL)) return NULL;
896 /* note that we hang the destructor off the handle, not the
897 main context as that allows the caller to still setup their
898 own destructor on the context if they want to */
899 talloc_set_destructor(handle, talloc_reference_destructor);
900 handle->ptr = discard_const_p(void, ptr);
901 handle->location = location;
902 _TLIST_ADD(tc->refs, handle);
903 return handle->ptr;
906 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
908 static inline void _talloc_free_poolmem(struct talloc_chunk *tc,
909 const char *location)
911 struct talloc_pool_hdr *pool;
912 struct talloc_chunk *pool_tc;
913 void *next_tc;
915 pool = tc->pool;
916 pool_tc = talloc_chunk_from_pool(pool);
917 next_tc = tc_next_chunk(tc);
919 tc->flags |= TALLOC_FLAG_FREE;
921 /* we mark the freed memory with where we called the free
922 * from. This means on a double free error we can report where
923 * the first free came from
925 tc->name = location;
927 TC_INVALIDATE_FULL_CHUNK(tc);
929 if (unlikely(pool->object_count == 0)) {
930 talloc_abort("Pool object count zero!");
931 return;
934 pool->object_count--;
936 if (unlikely(pool->object_count == 1
937 && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
939 * if there is just one object left in the pool
940 * and pool->flags does not have TALLOC_FLAG_FREE,
941 * it means this is the pool itself and
942 * the rest is available for new objects
943 * again.
945 pool->end = tc_pool_first_chunk(pool);
946 tc_invalidate_pool(pool);
947 return;
950 if (unlikely(pool->object_count == 0)) {
952 * we mark the freed memory with where we called the free
953 * from. This means on a double free error we can report where
954 * the first free came from
956 pool_tc->name = location;
958 if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
959 _talloc_free_poolmem(pool_tc, location);
960 } else {
962 * The talloc_memlimit_update_on_free()
963 * call takes into account the
964 * prefix TP_HDR_SIZE allocated before
965 * the pool talloc_chunk.
967 talloc_memlimit_update_on_free(pool_tc);
968 TC_INVALIDATE_FULL_CHUNK(pool_tc);
969 free(pool);
971 return;
974 if (pool->end == next_tc) {
976 * if pool->pool still points to end of
977 * 'tc' (which is stored in the 'next_tc' variable),
978 * we can reclaim the memory of 'tc'.
980 pool->end = tc;
981 return;
985 * Do nothing. The memory is just "wasted", waiting for the pool
986 * itself to be freed.
990 static inline void _talloc_free_children_internal(struct talloc_chunk *tc,
991 void *ptr,
992 const char *location);
995 internal talloc_free call
997 static inline int _talloc_free_internal(void *ptr, const char *location)
999 struct talloc_chunk *tc;
1000 void *ptr_to_free;
1002 if (unlikely(ptr == NULL)) {
1003 return -1;
1006 /* possibly initialised the talloc fill value */
1007 if (unlikely(!talloc_fill.initialised)) {
1008 const char *fill = getenv(TALLOC_FILL_ENV);
1009 if (fill != NULL) {
1010 talloc_fill.enabled = true;
1011 talloc_fill.fill_value = strtoul(fill, NULL, 0);
1013 talloc_fill.initialised = true;
1016 tc = talloc_chunk_from_ptr(ptr);
1018 if (unlikely(tc->refs)) {
1019 int is_child;
1020 /* check if this is a reference from a child or
1021 * grandchild back to it's parent or grandparent
1023 * in that case we need to remove the reference and
1024 * call another instance of talloc_free() on the current
1025 * pointer.
1027 is_child = talloc_is_parent(tc->refs, ptr);
1028 _talloc_free_internal(tc->refs, location);
1029 if (is_child) {
1030 return _talloc_free_internal(ptr, location);
1032 return -1;
1035 if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1036 /* we have a free loop - stop looping */
1037 return 0;
1040 if (unlikely(tc->destructor)) {
1041 talloc_destructor_t d = tc->destructor;
1042 if (d == (talloc_destructor_t)-1) {
1043 return -1;
1045 tc->destructor = (talloc_destructor_t)-1;
1046 if (d(ptr) == -1) {
1048 * Only replace the destructor pointer if
1049 * calling the destructor didn't modify it.
1051 if (tc->destructor == (talloc_destructor_t)-1) {
1052 tc->destructor = d;
1054 return -1;
1056 tc->destructor = NULL;
1059 if (tc->parent) {
1060 _TLIST_REMOVE(tc->parent->child, tc);
1061 if (tc->parent->child) {
1062 tc->parent->child->parent = tc->parent;
1064 } else {
1065 if (tc->prev) tc->prev->next = tc->next;
1066 if (tc->next) tc->next->prev = tc->prev;
1067 tc->prev = tc->next = NULL;
1070 tc->flags |= TALLOC_FLAG_LOOP;
1072 _talloc_free_children_internal(tc, ptr, location);
1074 tc->flags |= TALLOC_FLAG_FREE;
1076 /* we mark the freed memory with where we called the free
1077 * from. This means on a double free error we can report where
1078 * the first free came from
1080 tc->name = location;
1082 if (tc->flags & TALLOC_FLAG_POOL) {
1083 struct talloc_pool_hdr *pool;
1085 pool = talloc_pool_from_chunk(tc);
1087 if (unlikely(pool->object_count == 0)) {
1088 talloc_abort("Pool object count zero!");
1089 return 0;
1092 pool->object_count--;
1094 if (likely(pool->object_count != 0)) {
1095 return 0;
1099 * With object_count==0, a pool becomes a normal piece of
1100 * memory to free. If it's allocated inside a pool, it needs
1101 * to be freed as poolmem, else it needs to be just freed.
1103 ptr_to_free = pool;
1104 } else {
1105 ptr_to_free = tc;
1108 if (tc->flags & TALLOC_FLAG_POOLMEM) {
1109 _talloc_free_poolmem(tc, location);
1110 return 0;
1113 talloc_memlimit_update_on_free(tc);
1115 TC_INVALIDATE_FULL_CHUNK(tc);
1116 free(ptr_to_free);
1117 return 0;
1120 static inline size_t _talloc_total_limit_size(const void *ptr,
1121 struct talloc_memlimit *old_limit,
1122 struct talloc_memlimit *new_limit);
1125 move a lump of memory from one talloc context to another return the
1126 ptr on success, or NULL if it could not be transferred.
1127 passing NULL as ptr will always return NULL with no side effects.
1129 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1131 struct talloc_chunk *tc, *new_tc;
1132 size_t ctx_size = 0;
1134 if (unlikely(!ptr)) {
1135 return NULL;
1138 if (unlikely(new_ctx == NULL)) {
1139 new_ctx = null_context;
1142 tc = talloc_chunk_from_ptr(ptr);
1144 if (tc->limit != NULL) {
1146 ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1148 /* Decrement the memory limit from the source .. */
1149 talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1151 if (tc->limit->parent == tc) {
1152 tc->limit->upper = NULL;
1153 } else {
1154 tc->limit = NULL;
1158 if (unlikely(new_ctx == NULL)) {
1159 if (tc->parent) {
1160 _TLIST_REMOVE(tc->parent->child, tc);
1161 if (tc->parent->child) {
1162 tc->parent->child->parent = tc->parent;
1164 } else {
1165 if (tc->prev) tc->prev->next = tc->next;
1166 if (tc->next) tc->next->prev = tc->prev;
1169 tc->parent = tc->next = tc->prev = NULL;
1170 return discard_const_p(void, ptr);
1173 new_tc = talloc_chunk_from_ptr(new_ctx);
1175 if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1176 return discard_const_p(void, ptr);
1179 if (tc->parent) {
1180 _TLIST_REMOVE(tc->parent->child, tc);
1181 if (tc->parent->child) {
1182 tc->parent->child->parent = tc->parent;
1184 } else {
1185 if (tc->prev) tc->prev->next = tc->next;
1186 if (tc->next) tc->next->prev = tc->prev;
1187 tc->prev = tc->next = NULL;
1190 tc->parent = new_tc;
1191 if (new_tc->child) new_tc->child->parent = NULL;
1192 _TLIST_ADD(new_tc->child, tc);
1194 if (tc->limit || new_tc->limit) {
1195 ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1196 new_tc->limit);
1197 /* .. and increment it in the destination. */
1198 if (new_tc->limit) {
1199 talloc_memlimit_grow(new_tc->limit, ctx_size);
1203 return discard_const_p(void, ptr);
1207 move a lump of memory from one talloc context to another return the
1208 ptr on success, or NULL if it could not be transferred.
1209 passing NULL as ptr will always return NULL with no side effects.
1211 _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1213 struct talloc_chunk *tc;
1215 if (unlikely(ptr == NULL)) {
1216 return NULL;
1219 tc = talloc_chunk_from_ptr(ptr);
1221 if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1222 struct talloc_reference_handle *h;
1224 talloc_log("WARNING: talloc_steal with references at %s\n",
1225 location);
1227 for (h=tc->refs; h; h=h->next) {
1228 talloc_log("\treference at %s\n",
1229 h->location);
1233 #if 0
1234 /* this test is probably too expensive to have on in the
1235 normal build, but it useful for debugging */
1236 if (talloc_is_parent(new_ctx, ptr)) {
1237 talloc_log("WARNING: stealing into talloc child at %s\n", location);
1239 #endif
1241 return _talloc_steal_internal(new_ctx, ptr);
1245 this is like a talloc_steal(), but you must supply the old
1246 parent. This resolves the ambiguity in a talloc_steal() which is
1247 called on a context that has more than one parent (via references)
1249 The old parent can be either a reference or a parent
1251 _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1253 struct talloc_chunk *tc;
1254 struct talloc_reference_handle *h;
1256 if (unlikely(ptr == NULL)) {
1257 return NULL;
1260 if (old_parent == talloc_parent(ptr)) {
1261 return _talloc_steal_internal(new_parent, ptr);
1264 tc = talloc_chunk_from_ptr(ptr);
1265 for (h=tc->refs;h;h=h->next) {
1266 if (talloc_parent(h) == old_parent) {
1267 if (_talloc_steal_internal(new_parent, h) != h) {
1268 return NULL;
1270 return discard_const_p(void, ptr);
1274 /* it wasn't a parent */
1275 return NULL;
1279 remove a secondary reference to a pointer. This undo's what
1280 talloc_reference() has done. The context and pointer arguments
1281 must match those given to a talloc_reference()
1283 static inline int talloc_unreference(const void *context, const void *ptr)
1285 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1286 struct talloc_reference_handle *h;
1288 if (unlikely(context == NULL)) {
1289 context = null_context;
1292 for (h=tc->refs;h;h=h->next) {
1293 struct talloc_chunk *p = talloc_parent_chunk(h);
1294 if (p == NULL) {
1295 if (context == NULL) break;
1296 } else if (TC_PTR_FROM_CHUNK(p) == context) {
1297 break;
1300 if (h == NULL) {
1301 return -1;
1304 return _talloc_free_internal(h, __location__);
1308 remove a specific parent context from a pointer. This is a more
1309 controlled variant of talloc_free()
1311 _PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1313 struct talloc_chunk *tc_p, *new_p, *tc_c;
1314 void *new_parent;
1316 if (ptr == NULL) {
1317 return -1;
1320 if (context == NULL) {
1321 context = null_context;
1324 if (talloc_unreference(context, ptr) == 0) {
1325 return 0;
1328 if (context != NULL) {
1329 tc_c = talloc_chunk_from_ptr(context);
1330 } else {
1331 tc_c = NULL;
1333 if (tc_c != talloc_parent_chunk(ptr)) {
1334 return -1;
1337 tc_p = talloc_chunk_from_ptr(ptr);
1339 if (tc_p->refs == NULL) {
1340 return _talloc_free_internal(ptr, __location__);
1343 new_p = talloc_parent_chunk(tc_p->refs);
1344 if (new_p) {
1345 new_parent = TC_PTR_FROM_CHUNK(new_p);
1346 } else {
1347 new_parent = NULL;
1350 if (talloc_unreference(new_parent, ptr) != 0) {
1351 return -1;
1354 _talloc_steal_internal(new_parent, ptr);
1356 return 0;
1360 add a name to an existing pointer - va_list version
1362 static inline const char *talloc_set_name_v(const void *ptr, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0);
1364 static inline const char *talloc_set_name_v(const void *ptr, const char *fmt, va_list ap)
1366 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1367 tc->name = talloc_vasprintf(ptr, fmt, ap);
1368 if (likely(tc->name)) {
1369 _talloc_set_name_const(tc->name, ".name");
1371 return tc->name;
1375 add a name to an existing pointer
1377 _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1379 const char *name;
1380 va_list ap;
1381 va_start(ap, fmt);
1382 name = talloc_set_name_v(ptr, fmt, ap);
1383 va_end(ap);
1384 return name;
1389 create a named talloc pointer. Any talloc pointer can be named, and
1390 talloc_named() operates just like talloc() except that it allows you
1391 to name the pointer.
1393 _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1395 va_list ap;
1396 void *ptr;
1397 const char *name;
1399 ptr = __talloc(context, size);
1400 if (unlikely(ptr == NULL)) return NULL;
1402 va_start(ap, fmt);
1403 name = talloc_set_name_v(ptr, fmt, ap);
1404 va_end(ap);
1406 if (unlikely(name == NULL)) {
1407 _talloc_free_internal(ptr, __location__);
1408 return NULL;
1411 return ptr;
1415 return the name of a talloc ptr, or "UNNAMED"
1417 static inline const char *__talloc_get_name(const void *ptr)
1419 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1420 if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1421 return ".reference";
1423 if (likely(tc->name)) {
1424 return tc->name;
1426 return "UNNAMED";
1429 _PUBLIC_ const char *talloc_get_name(const void *ptr)
1431 return __talloc_get_name(ptr);
1435 check if a pointer has the given name. If it does, return the pointer,
1436 otherwise return NULL
1438 _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1440 const char *pname;
1441 if (unlikely(ptr == NULL)) return NULL;
1442 pname = __talloc_get_name(ptr);
1443 if (likely(pname == name || strcmp(pname, name) == 0)) {
1444 return discard_const_p(void, ptr);
1446 return NULL;
1449 static void talloc_abort_type_mismatch(const char *location,
1450 const char *name,
1451 const char *expected)
1453 const char *reason;
1455 reason = talloc_asprintf(NULL,
1456 "%s: Type mismatch: name[%s] expected[%s]",
1457 location,
1458 name?name:"NULL",
1459 expected);
1460 if (!reason) {
1461 reason = "Type mismatch";
1464 talloc_abort(reason);
1467 _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1469 const char *pname;
1471 if (unlikely(ptr == NULL)) {
1472 talloc_abort_type_mismatch(location, NULL, name);
1473 return NULL;
1476 pname = __talloc_get_name(ptr);
1477 if (likely(pname == name || strcmp(pname, name) == 0)) {
1478 return discard_const_p(void, ptr);
1481 talloc_abort_type_mismatch(location, pname, name);
1482 return NULL;
1486 this is for compatibility with older versions of talloc
1488 _PUBLIC_ void *talloc_init(const char *fmt, ...)
1490 va_list ap;
1491 void *ptr;
1492 const char *name;
1494 ptr = __talloc(NULL, 0);
1495 if (unlikely(ptr == NULL)) return NULL;
1497 va_start(ap, fmt);
1498 name = talloc_set_name_v(ptr, fmt, ap);
1499 va_end(ap);
1501 if (unlikely(name == NULL)) {
1502 _talloc_free_internal(ptr, __location__);
1503 return NULL;
1506 return ptr;
1509 static inline void _talloc_free_children_internal(struct talloc_chunk *tc,
1510 void *ptr,
1511 const char *location)
1513 while (tc->child) {
1514 /* we need to work out who will own an abandoned child
1515 if it cannot be freed. In priority order, the first
1516 choice is owner of any remaining reference to this
1517 pointer, the second choice is our parent, and the
1518 final choice is the null context. */
1519 void *child = TC_PTR_FROM_CHUNK(tc->child);
1520 const void *new_parent = null_context;
1521 if (unlikely(tc->child->refs)) {
1522 struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1523 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1525 if (unlikely(_talloc_free_internal(child, location) == -1)) {
1526 if (talloc_parent_chunk(child) != tc) {
1528 * Destructor already reparented this child.
1529 * No further reparenting needed.
1531 return;
1533 if (new_parent == null_context) {
1534 struct talloc_chunk *p = talloc_parent_chunk(ptr);
1535 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1537 _talloc_steal_internal(new_parent, child);
1543 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1544 should probably not be used in new code. It's in here to keep the talloc
1545 code consistent across Samba 3 and 4.
1547 _PUBLIC_ void talloc_free_children(void *ptr)
1549 struct talloc_chunk *tc_name = NULL;
1550 struct talloc_chunk *tc;
1552 if (unlikely(ptr == NULL)) {
1553 return;
1556 tc = talloc_chunk_from_ptr(ptr);
1558 /* we do not want to free the context name if it is a child .. */
1559 if (likely(tc->child)) {
1560 for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1561 if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1563 if (tc_name) {
1564 _TLIST_REMOVE(tc->child, tc_name);
1565 if (tc->child) {
1566 tc->child->parent = tc;
1571 _talloc_free_children_internal(tc, ptr, __location__);
1573 /* .. so we put it back after all other children have been freed */
1574 if (tc_name) {
1575 if (tc->child) {
1576 tc->child->parent = NULL;
1578 tc_name->parent = tc;
1579 _TLIST_ADD(tc->child, tc_name);
1584 Allocate a bit of memory as a child of an existing pointer
1586 _PUBLIC_ void *_talloc(const void *context, size_t size)
1588 return __talloc(context, size);
1592 externally callable talloc_set_name_const()
1594 _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1596 _talloc_set_name_const(ptr, name);
1600 create a named talloc pointer. Any talloc pointer can be named, and
1601 talloc_named() operates just like talloc() except that it allows you
1602 to name the pointer.
1604 _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1606 return _talloc_named_const(context, size, name);
1610 free a talloc pointer. This also frees all child pointers of this
1611 pointer recursively
1613 return 0 if the memory is actually freed, otherwise -1. The memory
1614 will not be freed if the ref_count is > 1 or the destructor (if
1615 any) returns non-zero
1617 _PUBLIC_ int _talloc_free(void *ptr, const char *location)
1619 struct talloc_chunk *tc;
1621 if (unlikely(ptr == NULL)) {
1622 return -1;
1625 tc = talloc_chunk_from_ptr(ptr);
1627 if (unlikely(tc->refs != NULL)) {
1628 struct talloc_reference_handle *h;
1630 if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1631 /* in this case we do know which parent should
1632 get this pointer, as there is really only
1633 one parent */
1634 return talloc_unlink(null_context, ptr);
1637 talloc_log("ERROR: talloc_free with references at %s\n",
1638 location);
1640 for (h=tc->refs; h; h=h->next) {
1641 talloc_log("\treference at %s\n",
1642 h->location);
1644 return -1;
1647 return _talloc_free_internal(ptr, location);
1653 A talloc version of realloc. The context argument is only used if
1654 ptr is NULL
1656 _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1658 struct talloc_chunk *tc;
1659 void *new_ptr;
1660 bool malloced = false;
1661 struct talloc_pool_hdr *pool_hdr = NULL;
1662 size_t old_size = 0;
1663 size_t new_size = 0;
1665 /* size zero is equivalent to free() */
1666 if (unlikely(size == 0)) {
1667 talloc_unlink(context, ptr);
1668 return NULL;
1671 if (unlikely(size >= MAX_TALLOC_SIZE)) {
1672 return NULL;
1675 /* realloc(NULL) is equivalent to malloc() */
1676 if (ptr == NULL) {
1677 return _talloc_named_const(context, size, name);
1680 tc = talloc_chunk_from_ptr(ptr);
1682 /* don't allow realloc on referenced pointers */
1683 if (unlikely(tc->refs)) {
1684 return NULL;
1687 /* don't let anybody try to realloc a talloc_pool */
1688 if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1689 return NULL;
1692 if (tc->limit && (size > tc->size)) {
1693 if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1694 errno = ENOMEM;
1695 return NULL;
1699 /* handle realloc inside a talloc_pool */
1700 if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1701 pool_hdr = tc->pool;
1704 #if (ALWAYS_REALLOC == 0)
1705 /* don't shrink if we have less than 1k to gain */
1706 if (size < tc->size && tc->limit == NULL) {
1707 if (pool_hdr) {
1708 void *next_tc = tc_next_chunk(tc);
1709 TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1710 tc->size = size;
1711 if (next_tc == pool_hdr->end) {
1712 /* note: tc->size has changed, so this works */
1713 pool_hdr->end = tc_next_chunk(tc);
1715 return ptr;
1716 } else if ((tc->size - size) < 1024) {
1718 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1719 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1720 * after each realloc call, which slows down
1721 * testing a lot :-(.
1723 * That is why we only mark memory as undefined here.
1725 TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1727 /* do not shrink if we have less than 1k to gain */
1728 tc->size = size;
1729 return ptr;
1731 } else if (tc->size == size) {
1733 * do not change the pointer if it is exactly
1734 * the same size.
1736 return ptr;
1738 #endif
1740 /* by resetting magic we catch users of the old memory */
1741 tc->flags |= TALLOC_FLAG_FREE;
1743 #if ALWAYS_REALLOC
1744 if (pool_hdr) {
1745 new_ptr = talloc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1746 pool_hdr->object_count--;
1748 if (new_ptr == NULL) {
1749 new_ptr = malloc(TC_HDR_SIZE+size);
1750 malloced = true;
1751 new_size = size;
1754 if (new_ptr) {
1755 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1756 TC_INVALIDATE_FULL_CHUNK(tc);
1758 } else {
1759 /* We're doing malloc then free here, so record the difference. */
1760 old_size = tc->size;
1761 new_size = size;
1762 new_ptr = malloc(size + TC_HDR_SIZE);
1763 if (new_ptr) {
1764 memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1765 free(tc);
1768 #else
1769 if (pool_hdr) {
1770 struct talloc_chunk *pool_tc;
1771 void *next_tc = tc_next_chunk(tc);
1772 size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1773 size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1774 size_t space_needed;
1775 size_t space_left;
1776 unsigned int chunk_count = pool_hdr->object_count;
1778 pool_tc = talloc_chunk_from_pool(pool_hdr);
1779 if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1780 chunk_count -= 1;
1783 if (chunk_count == 1) {
1785 * optimize for the case where 'tc' is the only
1786 * chunk in the pool.
1788 char *start = tc_pool_first_chunk(pool_hdr);
1789 space_needed = new_chunk_size;
1790 space_left = (char *)tc_pool_end(pool_hdr) - start;
1792 if (space_left >= space_needed) {
1793 size_t old_used = TC_HDR_SIZE + tc->size;
1794 size_t new_used = TC_HDR_SIZE + size;
1795 new_ptr = start;
1797 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1800 * The area from
1801 * start -> tc may have
1802 * been freed and thus been marked as
1803 * VALGRIND_MEM_NOACCESS. Set it to
1804 * VALGRIND_MEM_UNDEFINED so we can
1805 * copy into it without valgrind errors.
1806 * We can't just mark
1807 * new_ptr -> new_ptr + old_used
1808 * as this may overlap on top of tc,
1809 * (which is why we use memmove, not
1810 * memcpy below) hence the MIN.
1812 size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1813 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1815 #endif
1817 memmove(new_ptr, tc, old_used);
1819 tc = (struct talloc_chunk *)new_ptr;
1820 TC_UNDEFINE_GROW_CHUNK(tc, size);
1823 * first we do not align the pool pointer
1824 * because we want to invalidate the padding
1825 * too.
1827 pool_hdr->end = new_used + (char *)new_ptr;
1828 tc_invalidate_pool(pool_hdr);
1830 /* now the aligned pointer */
1831 pool_hdr->end = new_chunk_size + (char *)new_ptr;
1832 goto got_new_ptr;
1835 next_tc = NULL;
1838 if (new_chunk_size == old_chunk_size) {
1839 TC_UNDEFINE_GROW_CHUNK(tc, size);
1840 tc->flags &= ~TALLOC_FLAG_FREE;
1841 tc->size = size;
1842 return ptr;
1845 if (next_tc == pool_hdr->end) {
1847 * optimize for the case where 'tc' is the last
1848 * chunk in the pool.
1850 space_needed = new_chunk_size - old_chunk_size;
1851 space_left = tc_pool_space_left(pool_hdr);
1853 if (space_left >= space_needed) {
1854 TC_UNDEFINE_GROW_CHUNK(tc, size);
1855 tc->flags &= ~TALLOC_FLAG_FREE;
1856 tc->size = size;
1857 pool_hdr->end = tc_next_chunk(tc);
1858 return ptr;
1862 new_ptr = talloc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1864 if (new_ptr == NULL) {
1865 new_ptr = malloc(TC_HDR_SIZE+size);
1866 malloced = true;
1867 new_size = size;
1870 if (new_ptr) {
1871 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1873 _talloc_free_poolmem(tc, __location__ "_talloc_realloc");
1876 else {
1877 /* We're doing realloc here, so record the difference. */
1878 old_size = tc->size;
1879 new_size = size;
1880 new_ptr = realloc(tc, size + TC_HDR_SIZE);
1882 got_new_ptr:
1883 #endif
1884 if (unlikely(!new_ptr)) {
1885 tc->flags &= ~TALLOC_FLAG_FREE;
1886 return NULL;
1889 tc = (struct talloc_chunk *)new_ptr;
1890 tc->flags &= ~TALLOC_FLAG_FREE;
1891 if (malloced) {
1892 tc->flags &= ~TALLOC_FLAG_POOLMEM;
1894 if (tc->parent) {
1895 tc->parent->child = tc;
1897 if (tc->child) {
1898 tc->child->parent = tc;
1901 if (tc->prev) {
1902 tc->prev->next = tc;
1904 if (tc->next) {
1905 tc->next->prev = tc;
1908 if (new_size > old_size) {
1909 talloc_memlimit_grow(tc->limit, new_size - old_size);
1910 } else if (new_size < old_size) {
1911 talloc_memlimit_shrink(tc->limit, old_size - new_size);
1914 tc->size = size;
1915 _talloc_set_name_const(TC_PTR_FROM_CHUNK(tc), name);
1917 return TC_PTR_FROM_CHUNK(tc);
1921 a wrapper around talloc_steal() for situations where you are moving a pointer
1922 between two structures, and want the old pointer to be set to NULL
1924 _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
1926 const void **pptr = discard_const_p(const void *,_pptr);
1927 void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
1928 (*pptr) = NULL;
1929 return ret;
1932 enum talloc_mem_count_type {
1933 TOTAL_MEM_SIZE,
1934 TOTAL_MEM_BLOCKS,
1935 TOTAL_MEM_LIMIT,
1938 static inline size_t _talloc_total_mem_internal(const void *ptr,
1939 enum talloc_mem_count_type type,
1940 struct talloc_memlimit *old_limit,
1941 struct talloc_memlimit *new_limit)
1943 size_t total = 0;
1944 struct talloc_chunk *c, *tc;
1946 if (ptr == NULL) {
1947 ptr = null_context;
1949 if (ptr == NULL) {
1950 return 0;
1953 tc = talloc_chunk_from_ptr(ptr);
1955 if (old_limit || new_limit) {
1956 if (tc->limit && tc->limit->upper == old_limit) {
1957 tc->limit->upper = new_limit;
1961 /* optimize in the memlimits case */
1962 if (type == TOTAL_MEM_LIMIT &&
1963 tc->limit != NULL &&
1964 tc->limit != old_limit &&
1965 tc->limit->parent == tc) {
1966 return tc->limit->cur_size;
1969 if (tc->flags & TALLOC_FLAG_LOOP) {
1970 return 0;
1973 tc->flags |= TALLOC_FLAG_LOOP;
1975 if (old_limit || new_limit) {
1976 if (old_limit == tc->limit) {
1977 tc->limit = new_limit;
1981 switch (type) {
1982 case TOTAL_MEM_SIZE:
1983 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
1984 total = tc->size;
1986 break;
1987 case TOTAL_MEM_BLOCKS:
1988 total++;
1989 break;
1990 case TOTAL_MEM_LIMIT:
1991 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
1993 * Don't count memory allocated from a pool
1994 * when calculating limits. Only count the
1995 * pool itself.
1997 if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
1998 if (tc->flags & TALLOC_FLAG_POOL) {
2000 * If this is a pool, the allocated
2001 * size is in the pool header, and
2002 * remember to add in the prefix
2003 * length.
2005 struct talloc_pool_hdr *pool_hdr
2006 = talloc_pool_from_chunk(tc);
2007 total = pool_hdr->poolsize +
2008 TC_HDR_SIZE +
2009 TP_HDR_SIZE;
2010 } else {
2011 total = tc->size + TC_HDR_SIZE;
2015 break;
2017 for (c = tc->child; c; c = c->next) {
2018 total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2019 old_limit, new_limit);
2022 tc->flags &= ~TALLOC_FLAG_LOOP;
2024 return total;
2028 return the total size of a talloc pool (subtree)
2030 _PUBLIC_ size_t talloc_total_size(const void *ptr)
2032 return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2036 return the total number of blocks in a talloc pool (subtree)
2038 _PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2040 return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2044 return the number of external references to a pointer
2046 _PUBLIC_ size_t talloc_reference_count(const void *ptr)
2048 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2049 struct talloc_reference_handle *h;
2050 size_t ret = 0;
2052 for (h=tc->refs;h;h=h->next) {
2053 ret++;
2055 return ret;
2059 report on memory usage by all children of a pointer, giving a full tree view
2061 _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2062 void (*callback)(const void *ptr,
2063 int depth, int max_depth,
2064 int is_ref,
2065 void *private_data),
2066 void *private_data)
2068 struct talloc_chunk *c, *tc;
2070 if (ptr == NULL) {
2071 ptr = null_context;
2073 if (ptr == NULL) return;
2075 tc = talloc_chunk_from_ptr(ptr);
2077 if (tc->flags & TALLOC_FLAG_LOOP) {
2078 return;
2081 callback(ptr, depth, max_depth, 0, private_data);
2083 if (max_depth >= 0 && depth >= max_depth) {
2084 return;
2087 tc->flags |= TALLOC_FLAG_LOOP;
2088 for (c=tc->child;c;c=c->next) {
2089 if (c->name == TALLOC_MAGIC_REFERENCE) {
2090 struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2091 callback(h->ptr, depth + 1, max_depth, 1, private_data);
2092 } else {
2093 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2096 tc->flags &= ~TALLOC_FLAG_LOOP;
2099 static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2101 const char *name = __talloc_get_name(ptr);
2102 struct talloc_chunk *tc;
2103 FILE *f = (FILE *)_f;
2105 if (is_ref) {
2106 fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2107 return;
2110 tc = talloc_chunk_from_ptr(ptr);
2111 if (tc->limit && tc->limit->parent == tc) {
2112 fprintf(f, "%*s%-30s is a memlimit context"
2113 " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2114 depth*4, "",
2115 name,
2116 (unsigned long)tc->limit->max_size,
2117 (unsigned long)tc->limit->cur_size);
2120 if (depth == 0) {
2121 fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2122 (max_depth < 0 ? "full " :""), name,
2123 (unsigned long)talloc_total_size(ptr),
2124 (unsigned long)talloc_total_blocks(ptr));
2125 return;
2128 fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2129 depth*4, "",
2130 name,
2131 (unsigned long)talloc_total_size(ptr),
2132 (unsigned long)talloc_total_blocks(ptr),
2133 (int)talloc_reference_count(ptr), ptr);
2135 #if 0
2136 fprintf(f, "content: ");
2137 if (talloc_total_size(ptr)) {
2138 int tot = talloc_total_size(ptr);
2139 int i;
2141 for (i = 0; i < tot; i++) {
2142 if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2143 fprintf(f, "%c", ((char *)ptr)[i]);
2144 } else {
2145 fprintf(f, "~%02x", ((char *)ptr)[i]);
2149 fprintf(f, "\n");
2150 #endif
2154 report on memory usage by all children of a pointer, giving a full tree view
2156 _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2158 if (f) {
2159 talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2160 fflush(f);
2165 report on memory usage by all children of a pointer, giving a full tree view
2167 _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2169 talloc_report_depth_file(ptr, 0, -1, f);
2173 report on memory usage by all children of a pointer
2175 _PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2177 talloc_report_depth_file(ptr, 0, 1, f);
2181 report on any memory hanging off the null context
2183 static void talloc_report_null(void)
2185 if (talloc_total_size(null_context) != 0) {
2186 talloc_report(null_context, stderr);
2191 report on any memory hanging off the null context
2193 static void talloc_report_null_full(void)
2195 if (talloc_total_size(null_context) != 0) {
2196 talloc_report_full(null_context, stderr);
2201 enable tracking of the NULL context
2203 _PUBLIC_ void talloc_enable_null_tracking(void)
2205 if (null_context == NULL) {
2206 null_context = _talloc_named_const(NULL, 0, "null_context");
2207 if (autofree_context != NULL) {
2208 talloc_reparent(NULL, null_context, autofree_context);
2214 enable tracking of the NULL context, not moving the autofree context
2215 into the NULL context. This is needed for the talloc testsuite
2217 _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2219 if (null_context == NULL) {
2220 null_context = _talloc_named_const(NULL, 0, "null_context");
2225 disable tracking of the NULL context
2227 _PUBLIC_ void talloc_disable_null_tracking(void)
2229 if (null_context != NULL) {
2230 /* we have to move any children onto the real NULL
2231 context */
2232 struct talloc_chunk *tc, *tc2;
2233 tc = talloc_chunk_from_ptr(null_context);
2234 for (tc2 = tc->child; tc2; tc2=tc2->next) {
2235 if (tc2->parent == tc) tc2->parent = NULL;
2236 if (tc2->prev == tc) tc2->prev = NULL;
2238 for (tc2 = tc->next; tc2; tc2=tc2->next) {
2239 if (tc2->parent == tc) tc2->parent = NULL;
2240 if (tc2->prev == tc) tc2->prev = NULL;
2242 tc->child = NULL;
2243 tc->next = NULL;
2245 talloc_free(null_context);
2246 null_context = NULL;
2250 enable leak reporting on exit
2252 _PUBLIC_ void talloc_enable_leak_report(void)
2254 talloc_enable_null_tracking();
2255 atexit(talloc_report_null);
2259 enable full leak reporting on exit
2261 _PUBLIC_ void talloc_enable_leak_report_full(void)
2263 talloc_enable_null_tracking();
2264 atexit(talloc_report_null_full);
2268 talloc and zero memory.
2270 _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2272 void *p = _talloc_named_const(ctx, size, name);
2274 if (p) {
2275 memset(p, '\0', size);
2278 return p;
2282 memdup with a talloc.
2284 _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2286 void *newp = _talloc_named_const(t, size, name);
2288 if (likely(newp)) {
2289 memcpy(newp, p, size);
2292 return newp;
2295 static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2297 char *ret;
2299 ret = (char *)__talloc(t, len + 1);
2300 if (unlikely(!ret)) return NULL;
2302 memcpy(ret, p, len);
2303 ret[len] = 0;
2305 _talloc_set_name_const(ret, ret);
2306 return ret;
2310 strdup with a talloc
2312 _PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2314 if (unlikely(!p)) return NULL;
2315 return __talloc_strlendup(t, p, strlen(p));
2319 strndup with a talloc
2321 _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2323 if (unlikely(!p)) return NULL;
2324 return __talloc_strlendup(t, p, strnlen(p, n));
2327 static inline char *__talloc_strlendup_append(char *s, size_t slen,
2328 const char *a, size_t alen)
2330 char *ret;
2332 ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2333 if (unlikely(!ret)) return NULL;
2335 /* append the string and the trailing \0 */
2336 memcpy(&ret[slen], a, alen);
2337 ret[slen+alen] = 0;
2339 _talloc_set_name_const(ret, ret);
2340 return ret;
2344 * Appends at the end of the string.
2346 _PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2348 if (unlikely(!s)) {
2349 return talloc_strdup(NULL, a);
2352 if (unlikely(!a)) {
2353 return s;
2356 return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2360 * Appends at the end of the talloc'ed buffer,
2361 * not the end of the string.
2363 _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2365 size_t slen;
2367 if (unlikely(!s)) {
2368 return talloc_strdup(NULL, a);
2371 if (unlikely(!a)) {
2372 return s;
2375 slen = talloc_get_size(s);
2376 if (likely(slen > 0)) {
2377 slen--;
2380 return __talloc_strlendup_append(s, slen, a, strlen(a));
2384 * Appends at the end of the string.
2386 _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2388 if (unlikely(!s)) {
2389 return talloc_strndup(NULL, a, n);
2392 if (unlikely(!a)) {
2393 return s;
2396 return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2400 * Appends at the end of the talloc'ed buffer,
2401 * not the end of the string.
2403 _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2405 size_t slen;
2407 if (unlikely(!s)) {
2408 return talloc_strndup(NULL, a, n);
2411 if (unlikely(!a)) {
2412 return s;
2415 slen = talloc_get_size(s);
2416 if (likely(slen > 0)) {
2417 slen--;
2420 return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2423 #ifndef HAVE_VA_COPY
2424 #ifdef HAVE___VA_COPY
2425 #define va_copy(dest, src) __va_copy(dest, src)
2426 #else
2427 #define va_copy(dest, src) (dest) = (src)
2428 #endif
2429 #endif
2431 _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2433 int len;
2434 char *ret;
2435 va_list ap2;
2436 char buf[1024];
2438 /* this call looks strange, but it makes it work on older solaris boxes */
2439 va_copy(ap2, ap);
2440 len = vsnprintf(buf, sizeof(buf), fmt, ap2);
2441 va_end(ap2);
2442 if (unlikely(len < 0)) {
2443 return NULL;
2446 ret = (char *)__talloc(t, len+1);
2447 if (unlikely(!ret)) return NULL;
2449 if (len < sizeof(buf)) {
2450 memcpy(ret, buf, len+1);
2451 } else {
2452 va_copy(ap2, ap);
2453 vsnprintf(ret, len+1, fmt, ap2);
2454 va_end(ap2);
2457 _talloc_set_name_const(ret, ret);
2458 return ret;
2463 Perform string formatting, and return a pointer to newly allocated
2464 memory holding the result, inside a memory pool.
2466 _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2468 va_list ap;
2469 char *ret;
2471 va_start(ap, fmt);
2472 ret = talloc_vasprintf(t, fmt, ap);
2473 va_end(ap);
2474 return ret;
2477 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2478 const char *fmt, va_list ap)
2479 PRINTF_ATTRIBUTE(3,0);
2481 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2482 const char *fmt, va_list ap)
2484 ssize_t alen;
2485 va_list ap2;
2486 char c;
2488 va_copy(ap2, ap);
2489 alen = vsnprintf(&c, 1, fmt, ap2);
2490 va_end(ap2);
2492 if (alen <= 0) {
2493 /* Either the vsnprintf failed or the format resulted in
2494 * no characters being formatted. In the former case, we
2495 * ought to return NULL, in the latter we ought to return
2496 * the original string. Most current callers of this
2497 * function expect it to never return NULL.
2499 return s;
2502 s = talloc_realloc(NULL, s, char, slen + alen + 1);
2503 if (!s) return NULL;
2505 va_copy(ap2, ap);
2506 vsnprintf(s + slen, alen + 1, fmt, ap2);
2507 va_end(ap2);
2509 _talloc_set_name_const(s, s);
2510 return s;
2514 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2515 * and return @p s, which may have moved. Good for gradually
2516 * accumulating output into a string buffer. Appends at the end
2517 * of the string.
2519 _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2521 if (unlikely(!s)) {
2522 return talloc_vasprintf(NULL, fmt, ap);
2525 return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2529 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2530 * and return @p s, which may have moved. Always appends at the
2531 * end of the talloc'ed buffer, not the end of the string.
2533 _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2535 size_t slen;
2537 if (unlikely(!s)) {
2538 return talloc_vasprintf(NULL, fmt, ap);
2541 slen = talloc_get_size(s);
2542 if (likely(slen > 0)) {
2543 slen--;
2546 return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2550 Realloc @p s to append the formatted result of @p fmt and return @p
2551 s, which may have moved. Good for gradually accumulating output
2552 into a string buffer.
2554 _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2556 va_list ap;
2558 va_start(ap, fmt);
2559 s = talloc_vasprintf_append(s, fmt, ap);
2560 va_end(ap);
2561 return s;
2565 Realloc @p s to append the formatted result of @p fmt and return @p
2566 s, which may have moved. Good for gradually accumulating output
2567 into a buffer.
2569 _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2571 va_list ap;
2573 va_start(ap, fmt);
2574 s = talloc_vasprintf_append_buffer(s, fmt, ap);
2575 va_end(ap);
2576 return s;
2580 alloc an array, checking for integer overflow in the array size
2582 _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2584 if (count >= MAX_TALLOC_SIZE/el_size) {
2585 return NULL;
2587 return _talloc_named_const(ctx, el_size * count, name);
2591 alloc an zero array, checking for integer overflow in the array size
2593 _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2595 if (count >= MAX_TALLOC_SIZE/el_size) {
2596 return NULL;
2598 return _talloc_zero(ctx, el_size * count, name);
2602 realloc an array, checking for integer overflow in the array size
2604 _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2606 if (count >= MAX_TALLOC_SIZE/el_size) {
2607 return NULL;
2609 return _talloc_realloc(ctx, ptr, el_size * count, name);
2613 a function version of talloc_realloc(), so it can be passed as a function pointer
2614 to libraries that want a realloc function (a realloc function encapsulates
2615 all the basic capabilities of an allocation library, which is why this is useful)
2617 _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2619 return _talloc_realloc(context, ptr, size, NULL);
2623 static int talloc_autofree_destructor(void *ptr)
2625 autofree_context = NULL;
2626 return 0;
2629 static void talloc_autofree(void)
2631 talloc_free(autofree_context);
2635 return a context which will be auto-freed on exit
2636 this is useful for reducing the noise in leak reports
2638 _PUBLIC_ void *talloc_autofree_context(void)
2640 if (autofree_context == NULL) {
2641 autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2642 talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2643 atexit(talloc_autofree);
2645 return autofree_context;
2648 _PUBLIC_ size_t talloc_get_size(const void *context)
2650 struct talloc_chunk *tc;
2652 if (context == NULL) {
2653 context = null_context;
2655 if (context == NULL) {
2656 return 0;
2659 tc = talloc_chunk_from_ptr(context);
2661 return tc->size;
2665 find a parent of this context that has the given name, if any
2667 _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2669 struct talloc_chunk *tc;
2671 if (context == NULL) {
2672 return NULL;
2675 tc = talloc_chunk_from_ptr(context);
2676 while (tc) {
2677 if (tc->name && strcmp(tc->name, name) == 0) {
2678 return TC_PTR_FROM_CHUNK(tc);
2680 while (tc && tc->prev) tc = tc->prev;
2681 if (tc) {
2682 tc = tc->parent;
2685 return NULL;
2689 show the parentage of a context
2691 _PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2693 struct talloc_chunk *tc;
2695 if (context == NULL) {
2696 fprintf(file, "talloc no parents for NULL\n");
2697 return;
2700 tc = talloc_chunk_from_ptr(context);
2701 fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2702 while (tc) {
2703 fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2704 while (tc && tc->prev) tc = tc->prev;
2705 if (tc) {
2706 tc = tc->parent;
2709 fflush(file);
2713 return 1 if ptr is a parent of context
2715 static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2717 struct talloc_chunk *tc;
2719 if (context == NULL) {
2720 return 0;
2723 tc = talloc_chunk_from_ptr(context);
2724 while (tc) {
2725 if (depth <= 0) {
2726 return 0;
2728 if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2729 while (tc && tc->prev) tc = tc->prev;
2730 if (tc) {
2731 tc = tc->parent;
2732 depth--;
2735 return 0;
2739 return 1 if ptr is a parent of context
2741 _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2743 return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2747 return the total size of memory used by this context and all children
2749 static inline size_t _talloc_total_limit_size(const void *ptr,
2750 struct talloc_memlimit *old_limit,
2751 struct talloc_memlimit *new_limit)
2753 return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2754 old_limit, new_limit);
2757 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2759 struct talloc_memlimit *l;
2761 for (l = limit; l != NULL; l = l->upper) {
2762 if (l->max_size != 0 &&
2763 ((l->max_size <= l->cur_size) ||
2764 (l->max_size - l->cur_size < size))) {
2765 return false;
2769 return true;
2773 Update memory limits when freeing a talloc_chunk.
2775 static void talloc_memlimit_update_on_free(struct talloc_chunk *tc)
2777 size_t limit_shrink_size;
2779 if (!tc->limit) {
2780 return;
2784 * Pool entries don't count. Only the pools
2785 * themselves are counted as part of the memory
2786 * limits. Note that this also takes care of
2787 * nested pools which have both flags
2788 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2790 if (tc->flags & TALLOC_FLAG_POOLMEM) {
2791 return;
2795 * If we are part of a memory limited context hierarchy
2796 * we need to subtract the memory used from the counters
2799 limit_shrink_size = tc->size+TC_HDR_SIZE;
2802 * If we're deallocating a pool, take into
2803 * account the prefix size added for the pool.
2806 if (tc->flags & TALLOC_FLAG_POOL) {
2807 limit_shrink_size += TP_HDR_SIZE;
2810 talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2812 if (tc->limit->parent == tc) {
2813 free(tc->limit);
2816 tc->limit = NULL;
2820 Increase memory limit accounting after a malloc/realloc.
2822 static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2823 size_t size)
2825 struct talloc_memlimit *l;
2827 for (l = limit; l != NULL; l = l->upper) {
2828 size_t new_cur_size = l->cur_size + size;
2829 if (new_cur_size < l->cur_size) {
2830 talloc_abort("logic error in talloc_memlimit_grow\n");
2831 return;
2833 l->cur_size = new_cur_size;
2838 Decrease memory limit accounting after a free/realloc.
2840 static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
2841 size_t size)
2843 struct talloc_memlimit *l;
2845 for (l = limit; l != NULL; l = l->upper) {
2846 if (l->cur_size < size) {
2847 talloc_abort("logic error in talloc_memlimit_shrink\n");
2848 return;
2850 l->cur_size = l->cur_size - size;
2854 _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
2856 struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
2857 struct talloc_memlimit *orig_limit;
2858 struct talloc_memlimit *limit = NULL;
2860 if (tc->limit && tc->limit->parent == tc) {
2861 tc->limit->max_size = max_size;
2862 return 0;
2864 orig_limit = tc->limit;
2866 limit = malloc(sizeof(struct talloc_memlimit));
2867 if (limit == NULL) {
2868 return 1;
2870 limit->parent = tc;
2871 limit->max_size = max_size;
2872 limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
2874 if (orig_limit) {
2875 limit->upper = orig_limit;
2876 } else {
2877 limit->upper = NULL;
2880 return 0;