lib: talloc: Remove the ALWAYS_REALLOC code paths.
[Samba.git] / lib / talloc / talloc.c
blob29da190880ab05eec91b8461d2501502985243cf
1 /*
2 Samba Unix SMB/CIFS implementation.
4 Samba trivial allocation library - new interface
6 NOTE: Please read talloc_guide.txt for full documentation
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
13 ** under the LGPL
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 inspired by http://swapped.cc/halloc/
33 #include "replace.h"
34 #include "talloc.h"
36 #ifdef HAVE_SYS_AUXV_H
37 #include <sys/auxv.h>
38 #endif
40 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
41 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
42 #endif
44 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
45 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
46 #endif
48 /* Special macros that are no-ops except when run under Valgrind on
49 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
50 #ifdef HAVE_VALGRIND_MEMCHECK_H
51 /* memcheck.h includes valgrind.h */
52 #include <valgrind/memcheck.h>
53 #elif defined(HAVE_VALGRIND_H)
54 #include <valgrind.h>
55 #endif
57 #define MAX_TALLOC_SIZE 0x10000000
59 #define TALLOC_FLAG_FREE 0x01
60 #define TALLOC_FLAG_LOOP 0x02
61 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
62 #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
65 * Bits above this are random, used to make it harder to fake talloc
66 * headers during an attack. Try not to change this without good reason.
68 #define TALLOC_FLAG_MASK 0x0F
70 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
72 #define TALLOC_MAGIC_BASE 0xe814ec70
73 #define TALLOC_MAGIC_NON_RANDOM ( \
74 ~TALLOC_FLAG_MASK & ( \
75 TALLOC_MAGIC_BASE + \
76 (TALLOC_BUILD_VERSION_MAJOR << 24) + \
77 (TALLOC_BUILD_VERSION_MINOR << 16) + \
78 (TALLOC_BUILD_VERSION_RELEASE << 8)))
79 static unsigned int talloc_magic = TALLOC_MAGIC_NON_RANDOM;
81 /* by default we abort when given a bad pointer (such as when talloc_free() is called
82 on a pointer that came from malloc() */
83 #ifndef TALLOC_ABORT
84 #define TALLOC_ABORT(reason) abort()
85 #endif
87 #ifndef discard_const_p
88 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
89 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
90 #else
91 # define discard_const_p(type, ptr) ((type *)(ptr))
92 #endif
93 #endif
95 /* these macros gain us a few percent of speed on gcc */
96 #if (__GNUC__ >= 3)
97 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
98 as its first argument */
99 #ifndef likely
100 #define likely(x) __builtin_expect(!!(x), 1)
101 #endif
102 #ifndef unlikely
103 #define unlikely(x) __builtin_expect(!!(x), 0)
104 #endif
105 #else
106 #ifndef likely
107 #define likely(x) (x)
108 #endif
109 #ifndef unlikely
110 #define unlikely(x) (x)
111 #endif
112 #endif
114 /* this null_context is only used if talloc_enable_leak_report() or
115 talloc_enable_leak_report_full() is called, otherwise it remains
116 NULL
118 static void *null_context;
119 static bool talloc_report_null;
120 static bool talloc_report_null_full;
121 static void *autofree_context;
123 static void talloc_setup_atexit(void);
125 /* used to enable fill of memory on free, which can be useful for
126 * catching use after free errors when valgrind is too slow
128 static struct {
129 bool initialised;
130 bool enabled;
131 uint8_t fill_value;
132 } talloc_fill;
134 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
137 * do not wipe the header, to allow the
138 * double-free logic to still work
140 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
141 if (unlikely(talloc_fill.enabled)) { \
142 size_t _flen = (_tc)->size; \
143 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
144 memset(_fptr, talloc_fill.fill_value, _flen); \
146 } while (0)
148 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
149 /* Mark the whole chunk as not accessable */
150 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
151 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
152 char *_fptr = (char *)(_tc); \
153 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
154 } while(0)
155 #else
156 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
157 #endif
159 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
160 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
161 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
162 } while (0)
164 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
165 if (unlikely(talloc_fill.enabled)) { \
166 size_t _flen = (_tc)->size - (_new_size); \
167 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
168 _fptr += (_new_size); \
169 memset(_fptr, talloc_fill.fill_value, _flen); \
171 } while (0)
173 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
174 /* Mark the unused bytes not accessable */
175 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
176 size_t _flen = (_tc)->size - (_new_size); \
177 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
178 _fptr += (_new_size); \
179 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
180 } while (0)
181 #else
182 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
183 #endif
185 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
186 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
187 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
188 } while (0)
190 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
191 if (unlikely(talloc_fill.enabled)) { \
192 size_t _flen = (_tc)->size - (_new_size); \
193 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
194 _fptr += (_new_size); \
195 memset(_fptr, talloc_fill.fill_value, _flen); \
197 } while (0)
199 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
200 /* Mark the unused bytes as undefined */
201 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
202 size_t _flen = (_tc)->size - (_new_size); \
203 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
204 _fptr += (_new_size); \
205 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
206 } while (0)
207 #else
208 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
209 #endif
211 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
212 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
213 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
214 } while (0)
216 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
217 /* Mark the new bytes as undefined */
218 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
219 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
220 size_t _new_used = TC_HDR_SIZE + (_new_size); \
221 size_t _flen = _new_used - _old_used; \
222 char *_fptr = _old_used + (char *)(_tc); \
223 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
224 } while (0)
225 #else
226 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
227 #endif
229 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
230 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
231 } while (0)
233 struct talloc_reference_handle {
234 struct talloc_reference_handle *next, *prev;
235 void *ptr;
236 const char *location;
239 struct talloc_memlimit {
240 struct talloc_chunk *parent;
241 struct talloc_memlimit *upper;
242 size_t max_size;
243 size_t cur_size;
246 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
247 static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
248 size_t size);
249 static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
250 size_t size);
251 static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
253 static inline void _tc_set_name_const(struct talloc_chunk *tc,
254 const char *name);
255 static struct talloc_chunk *_vasprintf_tc(const void *t,
256 const char *fmt,
257 va_list ap);
259 typedef int (*talloc_destructor_t)(void *);
261 struct talloc_pool_hdr;
263 struct talloc_chunk {
265 * flags includes the talloc magic, which is randomised to
266 * make overwrite attacks harder
268 unsigned flags;
271 * If you have a logical tree like:
273 * <parent>
274 * / | \
275 * / | \
276 * / | \
277 * <child 1> <child 2> <child 3>
279 * The actual talloc tree is:
281 * <parent>
283 * <child 1> - <child 2> - <child 3>
285 * The children are linked with next/prev pointers, and
286 * child 1 is linked to the parent with parent/child
287 * pointers.
290 struct talloc_chunk *next, *prev;
291 struct talloc_chunk *parent, *child;
292 struct talloc_reference_handle *refs;
293 talloc_destructor_t destructor;
294 const char *name;
295 size_t size;
298 * limit semantics:
299 * if 'limit' is set it means all *new* children of the context will
300 * be limited to a total aggregate size ox max_size for memory
301 * allocations.
302 * cur_size is used to keep track of the current use
304 struct talloc_memlimit *limit;
307 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
308 * is a pointer to the struct talloc_chunk of the pool that it was
309 * allocated from. This way children can quickly find the pool to chew
310 * from.
312 struct talloc_pool_hdr *pool;
315 union talloc_chunk_cast_u {
316 uint8_t *ptr;
317 struct talloc_chunk *chunk;
320 /* 16 byte alignment seems to keep everyone happy */
321 #define TC_ALIGN16(s) (((s)+15)&~15)
322 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
323 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
325 _PUBLIC_ int talloc_version_major(void)
327 return TALLOC_VERSION_MAJOR;
330 _PUBLIC_ int talloc_version_minor(void)
332 return TALLOC_VERSION_MINOR;
335 _PUBLIC_ int talloc_test_get_magic(void)
337 return talloc_magic;
340 static inline void _talloc_chunk_set_free(struct talloc_chunk *tc,
341 const char *location)
344 * Mark this memory as free, and also over-stamp the talloc
345 * magic with the old-style magic.
347 * Why? This tries to avoid a memory read use-after-free from
348 * disclosing our talloc magic, which would then allow an
349 * attacker to prepare a valid header and so run a destructor.
352 tc->flags = TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE
353 | (tc->flags & TALLOC_FLAG_MASK);
355 /* we mark the freed memory with where we called the free
356 * from. This means on a double free error we can report where
357 * the first free came from
359 if (location) {
360 tc->name = location;
364 static inline void _talloc_chunk_set_not_free(struct talloc_chunk *tc)
367 * Mark this memory as not free.
369 * Why? This is memory either in a pool (and so available for
370 * talloc's re-use or after the realloc(). We need to mark
371 * the memory as free() before any realloc() call as we can't
372 * write to the memory after that.
374 * We put back the normal magic instead of the 'not random'
375 * magic.
378 tc->flags = talloc_magic |
379 ((tc->flags & TALLOC_FLAG_MASK) & ~TALLOC_FLAG_FREE);
382 static void (*talloc_log_fn)(const char *message);
384 _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
386 talloc_log_fn = log_fn;
389 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
390 #define CONSTRUCTOR __attribute__((constructor))
391 #elif defined(HAVE_PRAGMA_INIT)
392 #define CONSTRUCTOR
393 #pragma init (talloc_lib_init)
394 #endif
395 #if defined(HAVE_CONSTRUCTOR_ATTRIBUTE) || defined(HAVE_PRAGMA_INIT)
396 void talloc_lib_init(void) CONSTRUCTOR;
397 void talloc_lib_init(void)
399 uint32_t random_value;
400 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
401 uint8_t *p;
403 * Use the kernel-provided random values used for
404 * ASLR. This won't change per-exec, which is ideal for us
406 p = (uint8_t *) getauxval(AT_RANDOM);
407 if (p) {
409 * We get 16 bytes from getauxval. By calling rand(),
410 * a totally insecure PRNG, but one that will
411 * deterministically have a different value when called
412 * twice, we ensure that if two talloc-like libraries
413 * are somehow loaded in the same address space, that
414 * because we choose different bytes, we will keep the
415 * protection against collision of multiple talloc
416 * libs.
418 * This protection is important because the effects of
419 * passing a talloc pointer from one to the other may
420 * be very hard to determine.
422 int offset = rand() % (16 - sizeof(random_value));
423 memcpy(&random_value, p + offset, sizeof(random_value));
424 } else
425 #endif
428 * Otherwise, hope the location we are loaded in
429 * memory is randomised by someone else
431 random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
433 talloc_magic = random_value & ~TALLOC_FLAG_MASK;
435 #else
436 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
437 #endif
439 static void talloc_lib_atexit(void)
441 TALLOC_FREE(autofree_context);
443 if (talloc_total_size(null_context) == 0) {
444 return;
447 if (talloc_report_null_full) {
448 talloc_report_full(null_context, stderr);
449 } else if (talloc_report_null) {
450 talloc_report(null_context, stderr);
454 static void talloc_setup_atexit(void)
456 static bool done;
458 if (done) {
459 return;
462 atexit(talloc_lib_atexit);
463 done = true;
466 static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
467 static void talloc_log(const char *fmt, ...)
469 va_list ap;
470 char *message;
472 if (!talloc_log_fn) {
473 return;
476 va_start(ap, fmt);
477 message = talloc_vasprintf(NULL, fmt, ap);
478 va_end(ap);
480 talloc_log_fn(message);
481 talloc_free(message);
484 static void talloc_log_stderr(const char *message)
486 fprintf(stderr, "%s", message);
489 _PUBLIC_ void talloc_set_log_stderr(void)
491 talloc_set_log_fn(talloc_log_stderr);
494 static void (*talloc_abort_fn)(const char *reason);
496 _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
498 talloc_abort_fn = abort_fn;
501 static void talloc_abort(const char *reason)
503 talloc_log("%s\n", reason);
505 if (!talloc_abort_fn) {
506 TALLOC_ABORT(reason);
509 talloc_abort_fn(reason);
512 static void talloc_abort_access_after_free(void)
514 talloc_abort("Bad talloc magic value - access after free");
517 static void talloc_abort_unknown_value(void)
519 talloc_abort("Bad talloc magic value - unknown value");
522 /* panic if we get a bad magic value */
523 static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
525 const char *pp = (const char *)ptr;
526 struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
527 if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
528 if ((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK))
529 == (TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE)) {
530 talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
531 talloc_abort_access_after_free();
532 return NULL;
535 talloc_abort_unknown_value();
536 return NULL;
538 return tc;
541 /* hook into the front of the list */
542 #define _TLIST_ADD(list, p) \
543 do { \
544 if (!(list)) { \
545 (list) = (p); \
546 (p)->next = (p)->prev = NULL; \
547 } else { \
548 (list)->prev = (p); \
549 (p)->next = (list); \
550 (p)->prev = NULL; \
551 (list) = (p); \
553 } while (0)
555 /* remove an element from a list - element doesn't have to be in list. */
556 #define _TLIST_REMOVE(list, p) \
557 do { \
558 if ((p) == (list)) { \
559 (list) = (p)->next; \
560 if (list) (list)->prev = NULL; \
561 } else { \
562 if ((p)->prev) (p)->prev->next = (p)->next; \
563 if ((p)->next) (p)->next->prev = (p)->prev; \
565 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
566 } while (0)
570 return the parent chunk of a pointer
572 static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
574 struct talloc_chunk *tc;
576 if (unlikely(ptr == NULL)) {
577 return NULL;
580 tc = talloc_chunk_from_ptr(ptr);
581 while (tc->prev) tc=tc->prev;
583 return tc->parent;
586 _PUBLIC_ void *talloc_parent(const void *ptr)
588 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
589 return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
593 find parents name
595 _PUBLIC_ const char *talloc_parent_name(const void *ptr)
597 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
598 return tc? tc->name : NULL;
602 A pool carries an in-pool object count count in the first 16 bytes.
603 bytes. This is done to support talloc_steal() to a parent outside of the
604 pool. The count includes the pool itself, so a talloc_free() on a pool will
605 only destroy the pool if the count has dropped to zero. A talloc_free() of a
606 pool member will reduce the count, and eventually also call free(3) on the
607 pool memory.
609 The object count is not put into "struct talloc_chunk" because it is only
610 relevant for talloc pools and the alignment to 16 bytes would increase the
611 memory footprint of each talloc chunk by those 16 bytes.
614 struct talloc_pool_hdr {
615 void *end;
616 unsigned int object_count;
617 size_t poolsize;
620 union talloc_pool_hdr_cast_u {
621 uint8_t *ptr;
622 struct talloc_pool_hdr *hdr;
625 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
627 static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
629 union talloc_chunk_cast_u tcc = { .chunk = c };
630 union talloc_pool_hdr_cast_u tphc = { tcc.ptr - TP_HDR_SIZE };
631 return tphc.hdr;
634 static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
636 union talloc_pool_hdr_cast_u tphc = { .hdr = h };
637 union talloc_chunk_cast_u tcc = { .ptr = tphc.ptr + TP_HDR_SIZE };
638 return tcc.chunk;
641 static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
643 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
644 return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
647 static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
649 return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
652 /* If tc is inside a pool, this gives the next neighbour. */
653 static inline void *tc_next_chunk(struct talloc_chunk *tc)
655 return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
658 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
660 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
661 return tc_next_chunk(tc);
664 /* Mark the whole remaining pool as not accessable */
665 static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
667 size_t flen = tc_pool_space_left(pool_hdr);
669 if (unlikely(talloc_fill.enabled)) {
670 memset(pool_hdr->end, talloc_fill.fill_value, flen);
673 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
674 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
675 #endif
679 Allocate from a pool
682 static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
683 size_t size, size_t prefix_len)
685 struct talloc_pool_hdr *pool_hdr = NULL;
686 union talloc_chunk_cast_u tcc;
687 size_t space_left;
688 struct talloc_chunk *result;
689 size_t chunk_size;
691 if (parent == NULL) {
692 return NULL;
695 if (parent->flags & TALLOC_FLAG_POOL) {
696 pool_hdr = talloc_pool_from_chunk(parent);
698 else if (parent->flags & TALLOC_FLAG_POOLMEM) {
699 pool_hdr = parent->pool;
702 if (pool_hdr == NULL) {
703 return NULL;
706 space_left = tc_pool_space_left(pool_hdr);
709 * Align size to 16 bytes
711 chunk_size = TC_ALIGN16(size + prefix_len);
713 if (space_left < chunk_size) {
714 return NULL;
717 tcc = (union talloc_chunk_cast_u) {
718 .ptr = ((uint8_t *)pool_hdr->end) + prefix_len
720 result = tcc.chunk;
722 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
723 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
724 #endif
726 pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
728 result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
729 result->pool = pool_hdr;
731 pool_hdr->object_count++;
733 return result;
737 Allocate a bit of memory as a child of an existing pointer
739 static inline void *__talloc_with_prefix(const void *context,
740 size_t size,
741 size_t prefix_len,
742 struct talloc_chunk **tc_ret)
744 struct talloc_chunk *tc = NULL;
745 struct talloc_memlimit *limit = NULL;
746 size_t total_len = TC_HDR_SIZE + size + prefix_len;
747 struct talloc_chunk *parent = NULL;
749 if (unlikely(context == NULL)) {
750 context = null_context;
753 if (unlikely(size >= MAX_TALLOC_SIZE)) {
754 return NULL;
757 if (unlikely(total_len < TC_HDR_SIZE)) {
758 return NULL;
761 if (likely(context != NULL)) {
762 parent = talloc_chunk_from_ptr(context);
764 if (parent->limit != NULL) {
765 limit = parent->limit;
768 tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
771 if (tc == NULL) {
772 uint8_t *ptr = NULL;
773 union talloc_chunk_cast_u tcc;
776 * Only do the memlimit check/update on actual allocation.
778 if (!talloc_memlimit_check(limit, total_len)) {
779 errno = ENOMEM;
780 return NULL;
783 ptr = malloc(total_len);
784 if (unlikely(ptr == NULL)) {
785 return NULL;
787 tcc = (union talloc_chunk_cast_u) { .ptr = ptr + prefix_len };
788 tc = tcc.chunk;
789 tc->flags = talloc_magic;
790 tc->pool = NULL;
792 talloc_memlimit_grow(limit, total_len);
795 tc->limit = limit;
796 tc->size = size;
797 tc->destructor = NULL;
798 tc->child = NULL;
799 tc->name = NULL;
800 tc->refs = NULL;
802 if (likely(context != NULL)) {
803 if (parent->child) {
804 parent->child->parent = NULL;
805 tc->next = parent->child;
806 tc->next->prev = tc;
807 } else {
808 tc->next = NULL;
810 tc->parent = parent;
811 tc->prev = NULL;
812 parent->child = tc;
813 } else {
814 tc->next = tc->prev = tc->parent = NULL;
817 *tc_ret = tc;
818 return TC_PTR_FROM_CHUNK(tc);
821 static inline void *__talloc(const void *context,
822 size_t size,
823 struct talloc_chunk **tc)
825 return __talloc_with_prefix(context, size, 0, tc);
829 * Create a talloc pool
832 static inline void *_talloc_pool(const void *context, size_t size)
834 struct talloc_chunk *tc;
835 struct talloc_pool_hdr *pool_hdr;
836 void *result;
838 result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
840 if (unlikely(result == NULL)) {
841 return NULL;
844 pool_hdr = talloc_pool_from_chunk(tc);
846 tc->flags |= TALLOC_FLAG_POOL;
847 tc->size = 0;
849 pool_hdr->object_count = 1;
850 pool_hdr->end = result;
851 pool_hdr->poolsize = size;
853 tc_invalidate_pool(pool_hdr);
855 return result;
858 _PUBLIC_ void *talloc_pool(const void *context, size_t size)
860 return _talloc_pool(context, size);
864 * Create a talloc pool correctly sized for a basic size plus
865 * a number of subobjects whose total size is given. Essentially
866 * a custom allocator for talloc to reduce fragmentation.
869 _PUBLIC_ void *_talloc_pooled_object(const void *ctx,
870 size_t type_size,
871 const char *type_name,
872 unsigned num_subobjects,
873 size_t total_subobjects_size)
875 size_t poolsize, subobjects_slack, tmp;
876 struct talloc_chunk *tc;
877 struct talloc_pool_hdr *pool_hdr;
878 void *ret;
880 poolsize = type_size + total_subobjects_size;
882 if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
883 goto overflow;
886 if (num_subobjects == UINT_MAX) {
887 goto overflow;
889 num_subobjects += 1; /* the object body itself */
892 * Alignment can increase the pool size by at most 15 bytes per object
893 * plus alignment for the object itself
895 subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
896 if (subobjects_slack < num_subobjects) {
897 goto overflow;
900 tmp = poolsize + subobjects_slack;
901 if ((tmp < poolsize) || (tmp < subobjects_slack)) {
902 goto overflow;
904 poolsize = tmp;
906 ret = _talloc_pool(ctx, poolsize);
907 if (ret == NULL) {
908 return NULL;
911 tc = talloc_chunk_from_ptr(ret);
912 tc->size = type_size;
914 pool_hdr = talloc_pool_from_chunk(tc);
916 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
917 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
918 #endif
920 pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
922 _tc_set_name_const(tc, type_name);
923 return ret;
925 overflow:
926 return NULL;
930 setup a destructor to be called on free of a pointer
931 the destructor should return 0 on success, or -1 on failure.
932 if the destructor fails then the free is failed, and the memory can
933 be continued to be used
935 _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
937 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
938 tc->destructor = destructor;
942 increase the reference count on a piece of memory.
944 _PUBLIC_ int talloc_increase_ref_count(const void *ptr)
946 if (unlikely(!talloc_reference(null_context, ptr))) {
947 return -1;
949 return 0;
953 helper for talloc_reference()
955 this is referenced by a function pointer and should not be inline
957 static int talloc_reference_destructor(struct talloc_reference_handle *handle)
959 struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
960 _TLIST_REMOVE(ptr_tc->refs, handle);
961 return 0;
965 more efficient way to add a name to a pointer - the name must point to a
966 true string constant
968 static inline void _tc_set_name_const(struct talloc_chunk *tc,
969 const char *name)
971 tc->name = name;
975 internal talloc_named_const()
977 static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
979 void *ptr;
980 struct talloc_chunk *tc;
982 ptr = __talloc(context, size, &tc);
983 if (unlikely(ptr == NULL)) {
984 return NULL;
987 _tc_set_name_const(tc, name);
989 return ptr;
993 make a secondary reference to a pointer, hanging off the given context.
994 the pointer remains valid until both the original caller and this given
995 context are freed.
997 the major use for this is when two different structures need to reference the
998 same underlying data, and you want to be able to free the two instances separately,
999 and in either order
1001 _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
1003 struct talloc_chunk *tc;
1004 struct talloc_reference_handle *handle;
1005 if (unlikely(ptr == NULL)) return NULL;
1007 tc = talloc_chunk_from_ptr(ptr);
1008 handle = (struct talloc_reference_handle *)_talloc_named_const(context,
1009 sizeof(struct talloc_reference_handle),
1010 TALLOC_MAGIC_REFERENCE);
1011 if (unlikely(handle == NULL)) return NULL;
1013 /* note that we hang the destructor off the handle, not the
1014 main context as that allows the caller to still setup their
1015 own destructor on the context if they want to */
1016 talloc_set_destructor(handle, talloc_reference_destructor);
1017 handle->ptr = discard_const_p(void, ptr);
1018 handle->location = location;
1019 _TLIST_ADD(tc->refs, handle);
1020 return handle->ptr;
1023 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
1025 static inline void _tc_free_poolmem(struct talloc_chunk *tc,
1026 const char *location)
1028 struct talloc_pool_hdr *pool;
1029 struct talloc_chunk *pool_tc;
1030 void *next_tc;
1032 pool = tc->pool;
1033 pool_tc = talloc_chunk_from_pool(pool);
1034 next_tc = tc_next_chunk(tc);
1036 _talloc_chunk_set_free(tc, location);
1038 TC_INVALIDATE_FULL_CHUNK(tc);
1040 if (unlikely(pool->object_count == 0)) {
1041 talloc_abort("Pool object count zero!");
1042 return;
1045 pool->object_count--;
1047 if (unlikely(pool->object_count == 1
1048 && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
1050 * if there is just one object left in the pool
1051 * and pool->flags does not have TALLOC_FLAG_FREE,
1052 * it means this is the pool itself and
1053 * the rest is available for new objects
1054 * again.
1056 pool->end = tc_pool_first_chunk(pool);
1057 tc_invalidate_pool(pool);
1058 return;
1061 if (unlikely(pool->object_count == 0)) {
1063 * we mark the freed memory with where we called the free
1064 * from. This means on a double free error we can report where
1065 * the first free came from
1067 pool_tc->name = location;
1069 if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
1070 _tc_free_poolmem(pool_tc, location);
1071 } else {
1073 * The tc_memlimit_update_on_free()
1074 * call takes into account the
1075 * prefix TP_HDR_SIZE allocated before
1076 * the pool talloc_chunk.
1078 tc_memlimit_update_on_free(pool_tc);
1079 TC_INVALIDATE_FULL_CHUNK(pool_tc);
1080 free(pool);
1082 return;
1085 if (pool->end == next_tc) {
1087 * if pool->pool still points to end of
1088 * 'tc' (which is stored in the 'next_tc' variable),
1089 * we can reclaim the memory of 'tc'.
1091 pool->end = tc;
1092 return;
1096 * Do nothing. The memory is just "wasted", waiting for the pool
1097 * itself to be freed.
1101 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1102 void *ptr,
1103 const char *location);
1105 static inline int _talloc_free_internal(void *ptr, const char *location);
1108 internal free call that takes a struct talloc_chunk *.
1110 static inline int _tc_free_internal(struct talloc_chunk *tc,
1111 const char *location)
1113 void *ptr_to_free;
1114 void *ptr = TC_PTR_FROM_CHUNK(tc);
1116 if (unlikely(tc->refs)) {
1117 int is_child;
1118 /* check if this is a reference from a child or
1119 * grandchild back to it's parent or grandparent
1121 * in that case we need to remove the reference and
1122 * call another instance of talloc_free() on the current
1123 * pointer.
1125 is_child = talloc_is_parent(tc->refs, ptr);
1126 _talloc_free_internal(tc->refs, location);
1127 if (is_child) {
1128 return _talloc_free_internal(ptr, location);
1130 return -1;
1133 if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1134 /* we have a free loop - stop looping */
1135 return 0;
1138 if (unlikely(tc->destructor)) {
1139 talloc_destructor_t d = tc->destructor;
1142 * Protect the destructor against some overwrite
1143 * attacks, by explicitly checking it has the right
1144 * magic here.
1146 if (talloc_chunk_from_ptr(ptr) != tc) {
1148 * This can't actually happen, the
1149 * call itself will panic.
1151 TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1154 if (d == (talloc_destructor_t)-1) {
1155 return -1;
1157 tc->destructor = (talloc_destructor_t)-1;
1158 if (d(ptr) == -1) {
1160 * Only replace the destructor pointer if
1161 * calling the destructor didn't modify it.
1163 if (tc->destructor == (talloc_destructor_t)-1) {
1164 tc->destructor = d;
1166 return -1;
1168 tc->destructor = NULL;
1171 if (tc->parent) {
1172 _TLIST_REMOVE(tc->parent->child, tc);
1173 if (tc->parent->child) {
1174 tc->parent->child->parent = tc->parent;
1176 } else {
1177 if (tc->prev) tc->prev->next = tc->next;
1178 if (tc->next) tc->next->prev = tc->prev;
1179 tc->prev = tc->next = NULL;
1182 tc->flags |= TALLOC_FLAG_LOOP;
1184 _tc_free_children_internal(tc, ptr, location);
1186 _talloc_chunk_set_free(tc, location);
1188 if (tc->flags & TALLOC_FLAG_POOL) {
1189 struct talloc_pool_hdr *pool;
1191 pool = talloc_pool_from_chunk(tc);
1193 if (unlikely(pool->object_count == 0)) {
1194 talloc_abort("Pool object count zero!");
1195 return 0;
1198 pool->object_count--;
1200 if (likely(pool->object_count != 0)) {
1201 return 0;
1205 * With object_count==0, a pool becomes a normal piece of
1206 * memory to free. If it's allocated inside a pool, it needs
1207 * to be freed as poolmem, else it needs to be just freed.
1209 ptr_to_free = pool;
1210 } else {
1211 ptr_to_free = tc;
1214 if (tc->flags & TALLOC_FLAG_POOLMEM) {
1215 _tc_free_poolmem(tc, location);
1216 return 0;
1219 tc_memlimit_update_on_free(tc);
1221 TC_INVALIDATE_FULL_CHUNK(tc);
1222 free(ptr_to_free);
1223 return 0;
1227 internal talloc_free call
1229 static inline int _talloc_free_internal(void *ptr, const char *location)
1231 struct talloc_chunk *tc;
1233 if (unlikely(ptr == NULL)) {
1234 return -1;
1237 /* possibly initialised the talloc fill value */
1238 if (unlikely(!talloc_fill.initialised)) {
1239 const char *fill = getenv(TALLOC_FILL_ENV);
1240 if (fill != NULL) {
1241 talloc_fill.enabled = true;
1242 talloc_fill.fill_value = strtoul(fill, NULL, 0);
1244 talloc_fill.initialised = true;
1247 tc = talloc_chunk_from_ptr(ptr);
1248 return _tc_free_internal(tc, location);
1251 static inline size_t _talloc_total_limit_size(const void *ptr,
1252 struct talloc_memlimit *old_limit,
1253 struct talloc_memlimit *new_limit);
1256 move a lump of memory from one talloc context to another return the
1257 ptr on success, or NULL if it could not be transferred.
1258 passing NULL as ptr will always return NULL with no side effects.
1260 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1262 struct talloc_chunk *tc, *new_tc;
1263 size_t ctx_size = 0;
1265 if (unlikely(!ptr)) {
1266 return NULL;
1269 if (unlikely(new_ctx == NULL)) {
1270 new_ctx = null_context;
1273 tc = talloc_chunk_from_ptr(ptr);
1275 if (tc->limit != NULL) {
1277 ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1279 /* Decrement the memory limit from the source .. */
1280 talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1282 if (tc->limit->parent == tc) {
1283 tc->limit->upper = NULL;
1284 } else {
1285 tc->limit = NULL;
1289 if (unlikely(new_ctx == NULL)) {
1290 if (tc->parent) {
1291 _TLIST_REMOVE(tc->parent->child, tc);
1292 if (tc->parent->child) {
1293 tc->parent->child->parent = tc->parent;
1295 } else {
1296 if (tc->prev) tc->prev->next = tc->next;
1297 if (tc->next) tc->next->prev = tc->prev;
1300 tc->parent = tc->next = tc->prev = NULL;
1301 return discard_const_p(void, ptr);
1304 new_tc = talloc_chunk_from_ptr(new_ctx);
1306 if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1307 return discard_const_p(void, ptr);
1310 if (tc->parent) {
1311 _TLIST_REMOVE(tc->parent->child, tc);
1312 if (tc->parent->child) {
1313 tc->parent->child->parent = tc->parent;
1315 } else {
1316 if (tc->prev) tc->prev->next = tc->next;
1317 if (tc->next) tc->next->prev = tc->prev;
1318 tc->prev = tc->next = NULL;
1321 tc->parent = new_tc;
1322 if (new_tc->child) new_tc->child->parent = NULL;
1323 _TLIST_ADD(new_tc->child, tc);
1325 if (tc->limit || new_tc->limit) {
1326 ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1327 new_tc->limit);
1328 /* .. and increment it in the destination. */
1329 if (new_tc->limit) {
1330 talloc_memlimit_grow(new_tc->limit, ctx_size);
1334 return discard_const_p(void, ptr);
1338 move a lump of memory from one talloc context to another return the
1339 ptr on success, or NULL if it could not be transferred.
1340 passing NULL as ptr will always return NULL with no side effects.
1342 _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1344 struct talloc_chunk *tc;
1346 if (unlikely(ptr == NULL)) {
1347 return NULL;
1350 tc = talloc_chunk_from_ptr(ptr);
1352 if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1353 struct talloc_reference_handle *h;
1355 talloc_log("WARNING: talloc_steal with references at %s\n",
1356 location);
1358 for (h=tc->refs; h; h=h->next) {
1359 talloc_log("\treference at %s\n",
1360 h->location);
1364 #if 0
1365 /* this test is probably too expensive to have on in the
1366 normal build, but it useful for debugging */
1367 if (talloc_is_parent(new_ctx, ptr)) {
1368 talloc_log("WARNING: stealing into talloc child at %s\n", location);
1370 #endif
1372 return _talloc_steal_internal(new_ctx, ptr);
1376 this is like a talloc_steal(), but you must supply the old
1377 parent. This resolves the ambiguity in a talloc_steal() which is
1378 called on a context that has more than one parent (via references)
1380 The old parent can be either a reference or a parent
1382 _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1384 struct talloc_chunk *tc;
1385 struct talloc_reference_handle *h;
1387 if (unlikely(ptr == NULL)) {
1388 return NULL;
1391 if (old_parent == talloc_parent(ptr)) {
1392 return _talloc_steal_internal(new_parent, ptr);
1395 tc = talloc_chunk_from_ptr(ptr);
1396 for (h=tc->refs;h;h=h->next) {
1397 if (talloc_parent(h) == old_parent) {
1398 if (_talloc_steal_internal(new_parent, h) != h) {
1399 return NULL;
1401 return discard_const_p(void, ptr);
1405 /* it wasn't a parent */
1406 return NULL;
1410 remove a secondary reference to a pointer. This undo's what
1411 talloc_reference() has done. The context and pointer arguments
1412 must match those given to a talloc_reference()
1414 static inline int talloc_unreference(const void *context, const void *ptr)
1416 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1417 struct talloc_reference_handle *h;
1419 if (unlikely(context == NULL)) {
1420 context = null_context;
1423 for (h=tc->refs;h;h=h->next) {
1424 struct talloc_chunk *p = talloc_parent_chunk(h);
1425 if (p == NULL) {
1426 if (context == NULL) break;
1427 } else if (TC_PTR_FROM_CHUNK(p) == context) {
1428 break;
1431 if (h == NULL) {
1432 return -1;
1435 return _talloc_free_internal(h, __location__);
1439 remove a specific parent context from a pointer. This is a more
1440 controlled variant of talloc_free()
1443 /* coverity[ -tainted_data_sink : arg-1 ] */
1444 _PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1446 struct talloc_chunk *tc_p, *new_p, *tc_c;
1447 void *new_parent;
1449 if (ptr == NULL) {
1450 return -1;
1453 if (context == NULL) {
1454 context = null_context;
1457 if (talloc_unreference(context, ptr) == 0) {
1458 return 0;
1461 if (context != NULL) {
1462 tc_c = talloc_chunk_from_ptr(context);
1463 } else {
1464 tc_c = NULL;
1466 if (tc_c != talloc_parent_chunk(ptr)) {
1467 return -1;
1470 tc_p = talloc_chunk_from_ptr(ptr);
1472 if (tc_p->refs == NULL) {
1473 return _talloc_free_internal(ptr, __location__);
1476 new_p = talloc_parent_chunk(tc_p->refs);
1477 if (new_p) {
1478 new_parent = TC_PTR_FROM_CHUNK(new_p);
1479 } else {
1480 new_parent = NULL;
1483 if (talloc_unreference(new_parent, ptr) != 0) {
1484 return -1;
1487 _talloc_steal_internal(new_parent, ptr);
1489 return 0;
1493 add a name to an existing pointer - va_list version
1495 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1496 const char *fmt,
1497 va_list ap) PRINTF_ATTRIBUTE(2,0);
1499 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1500 const char *fmt,
1501 va_list ap)
1503 struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1504 fmt,
1505 ap);
1506 if (likely(name_tc)) {
1507 tc->name = TC_PTR_FROM_CHUNK(name_tc);
1508 _tc_set_name_const(name_tc, ".name");
1509 } else {
1510 tc->name = NULL;
1512 return tc->name;
1516 add a name to an existing pointer
1518 _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1520 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1521 const char *name;
1522 va_list ap;
1523 va_start(ap, fmt);
1524 name = tc_set_name_v(tc, fmt, ap);
1525 va_end(ap);
1526 return name;
1531 create a named talloc pointer. Any talloc pointer can be named, and
1532 talloc_named() operates just like talloc() except that it allows you
1533 to name the pointer.
1535 _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1537 va_list ap;
1538 void *ptr;
1539 const char *name;
1540 struct talloc_chunk *tc;
1542 ptr = __talloc(context, size, &tc);
1543 if (unlikely(ptr == NULL)) return NULL;
1545 va_start(ap, fmt);
1546 name = tc_set_name_v(tc, fmt, ap);
1547 va_end(ap);
1549 if (unlikely(name == NULL)) {
1550 _talloc_free_internal(ptr, __location__);
1551 return NULL;
1554 return ptr;
1558 return the name of a talloc ptr, or "UNNAMED"
1560 static inline const char *__talloc_get_name(const void *ptr)
1562 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1563 if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1564 return ".reference";
1566 if (likely(tc->name)) {
1567 return tc->name;
1569 return "UNNAMED";
1572 _PUBLIC_ const char *talloc_get_name(const void *ptr)
1574 return __talloc_get_name(ptr);
1578 check if a pointer has the given name. If it does, return the pointer,
1579 otherwise return NULL
1581 _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1583 const char *pname;
1584 if (unlikely(ptr == NULL)) return NULL;
1585 pname = __talloc_get_name(ptr);
1586 if (likely(pname == name || strcmp(pname, name) == 0)) {
1587 return discard_const_p(void, ptr);
1589 return NULL;
1592 static void talloc_abort_type_mismatch(const char *location,
1593 const char *name,
1594 const char *expected)
1596 const char *reason;
1598 reason = talloc_asprintf(NULL,
1599 "%s: Type mismatch: name[%s] expected[%s]",
1600 location,
1601 name?name:"NULL",
1602 expected);
1603 if (!reason) {
1604 reason = "Type mismatch";
1607 talloc_abort(reason);
1610 _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1612 const char *pname;
1614 if (unlikely(ptr == NULL)) {
1615 talloc_abort_type_mismatch(location, NULL, name);
1616 return NULL;
1619 pname = __talloc_get_name(ptr);
1620 if (likely(pname == name || strcmp(pname, name) == 0)) {
1621 return discard_const_p(void, ptr);
1624 talloc_abort_type_mismatch(location, pname, name);
1625 return NULL;
1629 this is for compatibility with older versions of talloc
1631 _PUBLIC_ void *talloc_init(const char *fmt, ...)
1633 va_list ap;
1634 void *ptr;
1635 const char *name;
1636 struct talloc_chunk *tc;
1638 ptr = __talloc(NULL, 0, &tc);
1639 if (unlikely(ptr == NULL)) return NULL;
1641 va_start(ap, fmt);
1642 name = tc_set_name_v(tc, fmt, ap);
1643 va_end(ap);
1645 if (unlikely(name == NULL)) {
1646 _talloc_free_internal(ptr, __location__);
1647 return NULL;
1650 return ptr;
1653 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1654 void *ptr,
1655 const char *location)
1657 while (tc->child) {
1658 /* we need to work out who will own an abandoned child
1659 if it cannot be freed. In priority order, the first
1660 choice is owner of any remaining reference to this
1661 pointer, the second choice is our parent, and the
1662 final choice is the null context. */
1663 void *child = TC_PTR_FROM_CHUNK(tc->child);
1664 const void *new_parent = null_context;
1665 if (unlikely(tc->child->refs)) {
1666 struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1667 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1669 if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1670 if (talloc_parent_chunk(child) != tc) {
1672 * Destructor already reparented this child.
1673 * No further reparenting needed.
1675 continue;
1677 if (new_parent == null_context) {
1678 struct talloc_chunk *p = talloc_parent_chunk(ptr);
1679 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1681 _talloc_steal_internal(new_parent, child);
1687 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1688 should probably not be used in new code. It's in here to keep the talloc
1689 code consistent across Samba 3 and 4.
1691 _PUBLIC_ void talloc_free_children(void *ptr)
1693 struct talloc_chunk *tc_name = NULL;
1694 struct talloc_chunk *tc;
1696 if (unlikely(ptr == NULL)) {
1697 return;
1700 tc = talloc_chunk_from_ptr(ptr);
1702 /* we do not want to free the context name if it is a child .. */
1703 if (likely(tc->child)) {
1704 for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1705 if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1707 if (tc_name) {
1708 _TLIST_REMOVE(tc->child, tc_name);
1709 if (tc->child) {
1710 tc->child->parent = tc;
1715 _tc_free_children_internal(tc, ptr, __location__);
1717 /* .. so we put it back after all other children have been freed */
1718 if (tc_name) {
1719 if (tc->child) {
1720 tc->child->parent = NULL;
1722 tc_name->parent = tc;
1723 _TLIST_ADD(tc->child, tc_name);
1728 Allocate a bit of memory as a child of an existing pointer
1730 _PUBLIC_ void *_talloc(const void *context, size_t size)
1732 struct talloc_chunk *tc;
1733 return __talloc(context, size, &tc);
1737 externally callable talloc_set_name_const()
1739 _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1741 _tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1745 create a named talloc pointer. Any talloc pointer can be named, and
1746 talloc_named() operates just like talloc() except that it allows you
1747 to name the pointer.
1749 _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1751 return _talloc_named_const(context, size, name);
1755 free a talloc pointer. This also frees all child pointers of this
1756 pointer recursively
1758 return 0 if the memory is actually freed, otherwise -1. The memory
1759 will not be freed if the ref_count is > 1 or the destructor (if
1760 any) returns non-zero
1762 _PUBLIC_ int _talloc_free(void *ptr, const char *location)
1764 struct talloc_chunk *tc;
1766 if (unlikely(ptr == NULL)) {
1767 return -1;
1770 tc = talloc_chunk_from_ptr(ptr);
1772 if (unlikely(tc->refs != NULL)) {
1773 struct talloc_reference_handle *h;
1775 if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1776 /* in this case we do know which parent should
1777 get this pointer, as there is really only
1778 one parent */
1779 return talloc_unlink(null_context, ptr);
1782 talloc_log("ERROR: talloc_free with references at %s\n",
1783 location);
1785 for (h=tc->refs; h; h=h->next) {
1786 talloc_log("\treference at %s\n",
1787 h->location);
1789 return -1;
1792 return _talloc_free_internal(ptr, location);
1798 A talloc version of realloc. The context argument is only used if
1799 ptr is NULL
1801 _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1803 struct talloc_chunk *tc;
1804 void *new_ptr;
1805 bool malloced = false;
1806 struct talloc_pool_hdr *pool_hdr = NULL;
1807 size_t old_size = 0;
1808 size_t new_size = 0;
1810 /* size zero is equivalent to free() */
1811 if (unlikely(size == 0)) {
1812 talloc_unlink(context, ptr);
1813 return NULL;
1816 if (unlikely(size >= MAX_TALLOC_SIZE)) {
1817 return NULL;
1820 /* realloc(NULL) is equivalent to malloc() */
1821 if (ptr == NULL) {
1822 return _talloc_named_const(context, size, name);
1825 tc = talloc_chunk_from_ptr(ptr);
1827 /* don't allow realloc on referenced pointers */
1828 if (unlikely(tc->refs)) {
1829 return NULL;
1832 /* don't let anybody try to realloc a talloc_pool */
1833 if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1834 return NULL;
1837 /* handle realloc inside a talloc_pool */
1838 if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1839 pool_hdr = tc->pool;
1842 /* don't shrink if we have less than 1k to gain */
1843 if (size < tc->size && tc->limit == NULL) {
1844 if (pool_hdr) {
1845 void *next_tc = tc_next_chunk(tc);
1846 TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1847 tc->size = size;
1848 if (next_tc == pool_hdr->end) {
1849 /* note: tc->size has changed, so this works */
1850 pool_hdr->end = tc_next_chunk(tc);
1852 return ptr;
1853 } else if ((tc->size - size) < 1024) {
1855 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1856 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1857 * after each realloc call, which slows down
1858 * testing a lot :-(.
1860 * That is why we only mark memory as undefined here.
1862 TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1864 /* do not shrink if we have less than 1k to gain */
1865 tc->size = size;
1866 return ptr;
1868 } else if (tc->size == size) {
1870 * do not change the pointer if it is exactly
1871 * the same size.
1873 return ptr;
1877 * by resetting magic we catch users of the old memory
1879 * We mark this memory as free, and also over-stamp the talloc
1880 * magic with the old-style magic.
1882 * Why? This tries to avoid a memory read use-after-free from
1883 * disclosing our talloc magic, which would then allow an
1884 * attacker to prepare a valid header and so run a destructor.
1886 * What else? We have to re-stamp back a valid normal magic
1887 * on this memory once realloc() is done, as it will have done
1888 * a memcpy() into the new valid memory. We can't do this in
1889 * reverse as that would be a real use-after-free.
1891 _talloc_chunk_set_free(tc, NULL);
1893 if (pool_hdr) {
1894 struct talloc_chunk *pool_tc;
1895 void *next_tc = tc_next_chunk(tc);
1896 size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1897 size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1898 size_t space_needed;
1899 size_t space_left;
1900 unsigned int chunk_count = pool_hdr->object_count;
1902 pool_tc = talloc_chunk_from_pool(pool_hdr);
1903 if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1904 chunk_count -= 1;
1907 if (chunk_count == 1) {
1909 * optimize for the case where 'tc' is the only
1910 * chunk in the pool.
1912 char *start = tc_pool_first_chunk(pool_hdr);
1913 space_needed = new_chunk_size;
1914 space_left = (char *)tc_pool_end(pool_hdr) - start;
1916 if (space_left >= space_needed) {
1917 size_t old_used = TC_HDR_SIZE + tc->size;
1918 size_t new_used = TC_HDR_SIZE + size;
1919 new_ptr = start;
1921 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1924 * The area from
1925 * start -> tc may have
1926 * been freed and thus been marked as
1927 * VALGRIND_MEM_NOACCESS. Set it to
1928 * VALGRIND_MEM_UNDEFINED so we can
1929 * copy into it without valgrind errors.
1930 * We can't just mark
1931 * new_ptr -> new_ptr + old_used
1932 * as this may overlap on top of tc,
1933 * (which is why we use memmove, not
1934 * memcpy below) hence the MIN.
1936 size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1937 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1939 #endif
1941 memmove(new_ptr, tc, old_used);
1943 tc = (struct talloc_chunk *)new_ptr;
1944 TC_UNDEFINE_GROW_CHUNK(tc, size);
1947 * first we do not align the pool pointer
1948 * because we want to invalidate the padding
1949 * too.
1951 pool_hdr->end = new_used + (char *)new_ptr;
1952 tc_invalidate_pool(pool_hdr);
1954 /* now the aligned pointer */
1955 pool_hdr->end = new_chunk_size + (char *)new_ptr;
1956 goto got_new_ptr;
1959 next_tc = NULL;
1962 if (new_chunk_size == old_chunk_size) {
1963 TC_UNDEFINE_GROW_CHUNK(tc, size);
1964 _talloc_chunk_set_not_free(tc);
1965 tc->size = size;
1966 return ptr;
1969 if (next_tc == pool_hdr->end) {
1971 * optimize for the case where 'tc' is the last
1972 * chunk in the pool.
1974 space_needed = new_chunk_size - old_chunk_size;
1975 space_left = tc_pool_space_left(pool_hdr);
1977 if (space_left >= space_needed) {
1978 TC_UNDEFINE_GROW_CHUNK(tc, size);
1979 _talloc_chunk_set_not_free(tc);
1980 tc->size = size;
1981 pool_hdr->end = tc_next_chunk(tc);
1982 return ptr;
1986 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1988 if (new_ptr == NULL) {
1990 * Couldn't allocate from pool (pool size
1991 * counts as already allocated for memlimit
1992 * purposes). We must check memory limit
1993 * before any real malloc.
1995 if (tc->limit) {
1997 * Note we're doing an extra malloc,
1998 * on top of the pool size, so account
1999 * for size only, not the difference
2000 * between old and new size.
2002 if (!talloc_memlimit_check(tc->limit, size)) {
2003 _talloc_chunk_set_not_free(tc);
2004 errno = ENOMEM;
2005 return NULL;
2008 new_ptr = malloc(TC_HDR_SIZE+size);
2009 malloced = true;
2010 new_size = size;
2013 if (new_ptr) {
2014 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
2016 _tc_free_poolmem(tc, __location__ "_talloc_realloc");
2019 else {
2020 /* We're doing realloc here, so record the difference. */
2021 old_size = tc->size;
2022 new_size = size;
2024 * We must check memory limit
2025 * before any real realloc.
2027 if (tc->limit && (size > old_size)) {
2028 if (!talloc_memlimit_check(tc->limit,
2029 (size - old_size))) {
2030 _talloc_chunk_set_not_free(tc);
2031 errno = ENOMEM;
2032 return NULL;
2035 new_ptr = realloc(tc, size + TC_HDR_SIZE);
2037 got_new_ptr:
2039 if (unlikely(!new_ptr)) {
2041 * Ok, this is a strange spot. We have to put back
2042 * the old talloc_magic and any flags, except the
2043 * TALLOC_FLAG_FREE as this was not free'ed by the
2044 * realloc() call after all
2046 _talloc_chunk_set_not_free(tc);
2047 return NULL;
2051 * tc is now the new value from realloc(), the old memory we
2052 * can't access any more and was preemptively marked as
2053 * TALLOC_FLAG_FREE before the call. Now we mark it as not
2054 * free again
2056 tc = (struct talloc_chunk *)new_ptr;
2057 _talloc_chunk_set_not_free(tc);
2058 if (malloced) {
2059 tc->flags &= ~TALLOC_FLAG_POOLMEM;
2061 if (tc->parent) {
2062 tc->parent->child = tc;
2064 if (tc->child) {
2065 tc->child->parent = tc;
2068 if (tc->prev) {
2069 tc->prev->next = tc;
2071 if (tc->next) {
2072 tc->next->prev = tc;
2075 if (new_size > old_size) {
2076 talloc_memlimit_grow(tc->limit, new_size - old_size);
2077 } else if (new_size < old_size) {
2078 talloc_memlimit_shrink(tc->limit, old_size - new_size);
2081 tc->size = size;
2082 _tc_set_name_const(tc, name);
2084 return TC_PTR_FROM_CHUNK(tc);
2088 a wrapper around talloc_steal() for situations where you are moving a pointer
2089 between two structures, and want the old pointer to be set to NULL
2091 _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
2093 const void **pptr = discard_const_p(const void *,_pptr);
2094 void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
2095 (*pptr) = NULL;
2096 return ret;
2099 enum talloc_mem_count_type {
2100 TOTAL_MEM_SIZE,
2101 TOTAL_MEM_BLOCKS,
2102 TOTAL_MEM_LIMIT,
2105 static inline size_t _talloc_total_mem_internal(const void *ptr,
2106 enum talloc_mem_count_type type,
2107 struct talloc_memlimit *old_limit,
2108 struct talloc_memlimit *new_limit)
2110 size_t total = 0;
2111 struct talloc_chunk *c, *tc;
2113 if (ptr == NULL) {
2114 ptr = null_context;
2116 if (ptr == NULL) {
2117 return 0;
2120 tc = talloc_chunk_from_ptr(ptr);
2122 if (old_limit || new_limit) {
2123 if (tc->limit && tc->limit->upper == old_limit) {
2124 tc->limit->upper = new_limit;
2128 /* optimize in the memlimits case */
2129 if (type == TOTAL_MEM_LIMIT &&
2130 tc->limit != NULL &&
2131 tc->limit != old_limit &&
2132 tc->limit->parent == tc) {
2133 return tc->limit->cur_size;
2136 if (tc->flags & TALLOC_FLAG_LOOP) {
2137 return 0;
2140 tc->flags |= TALLOC_FLAG_LOOP;
2142 if (old_limit || new_limit) {
2143 if (old_limit == tc->limit) {
2144 tc->limit = new_limit;
2148 switch (type) {
2149 case TOTAL_MEM_SIZE:
2150 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2151 total = tc->size;
2153 break;
2154 case TOTAL_MEM_BLOCKS:
2155 total++;
2156 break;
2157 case TOTAL_MEM_LIMIT:
2158 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2160 * Don't count memory allocated from a pool
2161 * when calculating limits. Only count the
2162 * pool itself.
2164 if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2165 if (tc->flags & TALLOC_FLAG_POOL) {
2167 * If this is a pool, the allocated
2168 * size is in the pool header, and
2169 * remember to add in the prefix
2170 * length.
2172 struct talloc_pool_hdr *pool_hdr
2173 = talloc_pool_from_chunk(tc);
2174 total = pool_hdr->poolsize +
2175 TC_HDR_SIZE +
2176 TP_HDR_SIZE;
2177 } else {
2178 total = tc->size + TC_HDR_SIZE;
2182 break;
2184 for (c = tc->child; c; c = c->next) {
2185 total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2186 old_limit, new_limit);
2189 tc->flags &= ~TALLOC_FLAG_LOOP;
2191 return total;
2195 return the total size of a talloc pool (subtree)
2197 _PUBLIC_ size_t talloc_total_size(const void *ptr)
2199 return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2203 return the total number of blocks in a talloc pool (subtree)
2205 _PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2207 return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2211 return the number of external references to a pointer
2213 _PUBLIC_ size_t talloc_reference_count(const void *ptr)
2215 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2216 struct talloc_reference_handle *h;
2217 size_t ret = 0;
2219 for (h=tc->refs;h;h=h->next) {
2220 ret++;
2222 return ret;
2226 report on memory usage by all children of a pointer, giving a full tree view
2228 _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2229 void (*callback)(const void *ptr,
2230 int depth, int max_depth,
2231 int is_ref,
2232 void *private_data),
2233 void *private_data)
2235 struct talloc_chunk *c, *tc;
2237 if (ptr == NULL) {
2238 ptr = null_context;
2240 if (ptr == NULL) return;
2242 tc = talloc_chunk_from_ptr(ptr);
2244 if (tc->flags & TALLOC_FLAG_LOOP) {
2245 return;
2248 callback(ptr, depth, max_depth, 0, private_data);
2250 if (max_depth >= 0 && depth >= max_depth) {
2251 return;
2254 tc->flags |= TALLOC_FLAG_LOOP;
2255 for (c=tc->child;c;c=c->next) {
2256 if (c->name == TALLOC_MAGIC_REFERENCE) {
2257 struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2258 callback(h->ptr, depth + 1, max_depth, 1, private_data);
2259 } else {
2260 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2263 tc->flags &= ~TALLOC_FLAG_LOOP;
2266 static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2268 const char *name = __talloc_get_name(ptr);
2269 struct talloc_chunk *tc;
2270 FILE *f = (FILE *)_f;
2272 if (is_ref) {
2273 fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2274 return;
2277 tc = talloc_chunk_from_ptr(ptr);
2278 if (tc->limit && tc->limit->parent == tc) {
2279 fprintf(f, "%*s%-30s is a memlimit context"
2280 " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2281 depth*4, "",
2282 name,
2283 (unsigned long)tc->limit->max_size,
2284 (unsigned long)tc->limit->cur_size);
2287 if (depth == 0) {
2288 fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2289 (max_depth < 0 ? "full " :""), name,
2290 (unsigned long)talloc_total_size(ptr),
2291 (unsigned long)talloc_total_blocks(ptr));
2292 return;
2295 fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2296 depth*4, "",
2297 name,
2298 (unsigned long)talloc_total_size(ptr),
2299 (unsigned long)talloc_total_blocks(ptr),
2300 (int)talloc_reference_count(ptr), ptr);
2302 #if 0
2303 fprintf(f, "content: ");
2304 if (talloc_total_size(ptr)) {
2305 int tot = talloc_total_size(ptr);
2306 int i;
2308 for (i = 0; i < tot; i++) {
2309 if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2310 fprintf(f, "%c", ((char *)ptr)[i]);
2311 } else {
2312 fprintf(f, "~%02x", ((char *)ptr)[i]);
2316 fprintf(f, "\n");
2317 #endif
2321 report on memory usage by all children of a pointer, giving a full tree view
2323 _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2325 if (f) {
2326 talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2327 fflush(f);
2332 report on memory usage by all children of a pointer, giving a full tree view
2334 _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2336 talloc_report_depth_file(ptr, 0, -1, f);
2340 report on memory usage by all children of a pointer
2342 _PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2344 talloc_report_depth_file(ptr, 0, 1, f);
2348 enable tracking of the NULL context
2350 _PUBLIC_ void talloc_enable_null_tracking(void)
2352 if (null_context == NULL) {
2353 null_context = _talloc_named_const(NULL, 0, "null_context");
2354 if (autofree_context != NULL) {
2355 talloc_reparent(NULL, null_context, autofree_context);
2361 enable tracking of the NULL context, not moving the autofree context
2362 into the NULL context. This is needed for the talloc testsuite
2364 _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2366 if (null_context == NULL) {
2367 null_context = _talloc_named_const(NULL, 0, "null_context");
2372 disable tracking of the NULL context
2374 _PUBLIC_ void talloc_disable_null_tracking(void)
2376 if (null_context != NULL) {
2377 /* we have to move any children onto the real NULL
2378 context */
2379 struct talloc_chunk *tc, *tc2;
2380 tc = talloc_chunk_from_ptr(null_context);
2381 for (tc2 = tc->child; tc2; tc2=tc2->next) {
2382 if (tc2->parent == tc) tc2->parent = NULL;
2383 if (tc2->prev == tc) tc2->prev = NULL;
2385 for (tc2 = tc->next; tc2; tc2=tc2->next) {
2386 if (tc2->parent == tc) tc2->parent = NULL;
2387 if (tc2->prev == tc) tc2->prev = NULL;
2389 tc->child = NULL;
2390 tc->next = NULL;
2392 talloc_free(null_context);
2393 null_context = NULL;
2397 enable leak reporting on exit
2399 _PUBLIC_ void talloc_enable_leak_report(void)
2401 talloc_enable_null_tracking();
2402 talloc_report_null = true;
2403 talloc_setup_atexit();
2407 enable full leak reporting on exit
2409 _PUBLIC_ void talloc_enable_leak_report_full(void)
2411 talloc_enable_null_tracking();
2412 talloc_report_null_full = true;
2413 talloc_setup_atexit();
2417 talloc and zero memory.
2419 _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2421 void *p = _talloc_named_const(ctx, size, name);
2423 if (p) {
2424 memset(p, '\0', size);
2427 return p;
2431 memdup with a talloc.
2433 _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2435 void *newp = NULL;
2437 if (likely(size > 0) && unlikely(p == NULL)) {
2438 return NULL;
2441 newp = _talloc_named_const(t, size, name);
2442 if (likely(newp != NULL) && likely(size > 0)) {
2443 memcpy(newp, p, size);
2446 return newp;
2449 static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2451 char *ret;
2452 struct talloc_chunk *tc;
2454 ret = (char *)__talloc(t, len + 1, &tc);
2455 if (unlikely(!ret)) return NULL;
2457 memcpy(ret, p, len);
2458 ret[len] = 0;
2460 _tc_set_name_const(tc, ret);
2461 return ret;
2465 strdup with a talloc
2467 _PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2469 if (unlikely(!p)) return NULL;
2470 return __talloc_strlendup(t, p, strlen(p));
2474 strndup with a talloc
2476 _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2478 if (unlikely(!p)) return NULL;
2479 return __talloc_strlendup(t, p, strnlen(p, n));
2482 static inline char *__talloc_strlendup_append(char *s, size_t slen,
2483 const char *a, size_t alen)
2485 char *ret;
2487 ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2488 if (unlikely(!ret)) return NULL;
2490 /* append the string and the trailing \0 */
2491 memcpy(&ret[slen], a, alen);
2492 ret[slen+alen] = 0;
2494 _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2495 return ret;
2499 * Appends at the end of the string.
2501 _PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2503 if (unlikely(!s)) {
2504 return talloc_strdup(NULL, a);
2507 if (unlikely(!a)) {
2508 return s;
2511 return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2515 * Appends at the end of the talloc'ed buffer,
2516 * not the end of the string.
2518 _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2520 size_t slen;
2522 if (unlikely(!s)) {
2523 return talloc_strdup(NULL, a);
2526 if (unlikely(!a)) {
2527 return s;
2530 slen = talloc_get_size(s);
2531 if (likely(slen > 0)) {
2532 slen--;
2535 return __talloc_strlendup_append(s, slen, a, strlen(a));
2539 * Appends at the end of the string.
2541 _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2543 if (unlikely(!s)) {
2544 return talloc_strndup(NULL, a, n);
2547 if (unlikely(!a)) {
2548 return s;
2551 return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2555 * Appends at the end of the talloc'ed buffer,
2556 * not the end of the string.
2558 _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2560 size_t slen;
2562 if (unlikely(!s)) {
2563 return talloc_strndup(NULL, a, n);
2566 if (unlikely(!a)) {
2567 return s;
2570 slen = talloc_get_size(s);
2571 if (likely(slen > 0)) {
2572 slen--;
2575 return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2578 #ifndef HAVE_VA_COPY
2579 #ifdef HAVE___VA_COPY
2580 #define va_copy(dest, src) __va_copy(dest, src)
2581 #else
2582 #define va_copy(dest, src) (dest) = (src)
2583 #endif
2584 #endif
2586 static struct talloc_chunk *_vasprintf_tc(const void *t,
2587 const char *fmt,
2588 va_list ap) PRINTF_ATTRIBUTE(2,0);
2590 static struct talloc_chunk *_vasprintf_tc(const void *t,
2591 const char *fmt,
2592 va_list ap)
2594 int vlen;
2595 size_t len;
2596 char *ret;
2597 va_list ap2;
2598 struct talloc_chunk *tc;
2599 char buf[1024];
2601 /* this call looks strange, but it makes it work on older solaris boxes */
2602 va_copy(ap2, ap);
2603 vlen = vsnprintf(buf, sizeof(buf), fmt, ap2);
2604 va_end(ap2);
2605 if (unlikely(vlen < 0)) {
2606 return NULL;
2608 len = vlen;
2609 if (unlikely(len + 1 < len)) {
2610 return NULL;
2613 ret = (char *)__talloc(t, len+1, &tc);
2614 if (unlikely(!ret)) return NULL;
2616 if (len < sizeof(buf)) {
2617 memcpy(ret, buf, len+1);
2618 } else {
2619 va_copy(ap2, ap);
2620 vsnprintf(ret, len+1, fmt, ap2);
2621 va_end(ap2);
2624 _tc_set_name_const(tc, ret);
2625 return tc;
2628 _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2630 struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2631 if (tc == NULL) {
2632 return NULL;
2634 return TC_PTR_FROM_CHUNK(tc);
2639 Perform string formatting, and return a pointer to newly allocated
2640 memory holding the result, inside a memory pool.
2642 _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2644 va_list ap;
2645 char *ret;
2647 va_start(ap, fmt);
2648 ret = talloc_vasprintf(t, fmt, ap);
2649 va_end(ap);
2650 return ret;
2653 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2654 const char *fmt, va_list ap)
2655 PRINTF_ATTRIBUTE(3,0);
2657 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2658 const char *fmt, va_list ap)
2660 ssize_t alen;
2661 va_list ap2;
2662 char c;
2664 va_copy(ap2, ap);
2665 alen = vsnprintf(&c, 1, fmt, ap2);
2666 va_end(ap2);
2668 if (alen <= 0) {
2669 /* Either the vsnprintf failed or the format resulted in
2670 * no characters being formatted. In the former case, we
2671 * ought to return NULL, in the latter we ought to return
2672 * the original string. Most current callers of this
2673 * function expect it to never return NULL.
2675 return s;
2678 s = talloc_realloc(NULL, s, char, slen + alen + 1);
2679 if (!s) return NULL;
2681 va_copy(ap2, ap);
2682 vsnprintf(s + slen, alen + 1, fmt, ap2);
2683 va_end(ap2);
2685 _tc_set_name_const(talloc_chunk_from_ptr(s), s);
2686 return s;
2690 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2691 * and return @p s, which may have moved. Good for gradually
2692 * accumulating output into a string buffer. Appends at the end
2693 * of the string.
2695 _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2697 if (unlikely(!s)) {
2698 return talloc_vasprintf(NULL, fmt, ap);
2701 return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2705 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2706 * and return @p s, which may have moved. Always appends at the
2707 * end of the talloc'ed buffer, not the end of the string.
2709 _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2711 size_t slen;
2713 if (unlikely(!s)) {
2714 return talloc_vasprintf(NULL, fmt, ap);
2717 slen = talloc_get_size(s);
2718 if (likely(slen > 0)) {
2719 slen--;
2722 return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2726 Realloc @p s to append the formatted result of @p fmt and return @p
2727 s, which may have moved. Good for gradually accumulating output
2728 into a string buffer.
2730 _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2732 va_list ap;
2734 va_start(ap, fmt);
2735 s = talloc_vasprintf_append(s, fmt, ap);
2736 va_end(ap);
2737 return s;
2741 Realloc @p s to append the formatted result of @p fmt and return @p
2742 s, which may have moved. Good for gradually accumulating output
2743 into a buffer.
2745 _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2747 va_list ap;
2749 va_start(ap, fmt);
2750 s = talloc_vasprintf_append_buffer(s, fmt, ap);
2751 va_end(ap);
2752 return s;
2756 alloc an array, checking for integer overflow in the array size
2758 _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2760 if (count >= MAX_TALLOC_SIZE/el_size) {
2761 return NULL;
2763 return _talloc_named_const(ctx, el_size * count, name);
2767 alloc an zero array, checking for integer overflow in the array size
2769 _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2771 if (count >= MAX_TALLOC_SIZE/el_size) {
2772 return NULL;
2774 return _talloc_zero(ctx, el_size * count, name);
2778 realloc an array, checking for integer overflow in the array size
2780 _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2782 if (count >= MAX_TALLOC_SIZE/el_size) {
2783 return NULL;
2785 return _talloc_realloc(ctx, ptr, el_size * count, name);
2789 a function version of talloc_realloc(), so it can be passed as a function pointer
2790 to libraries that want a realloc function (a realloc function encapsulates
2791 all the basic capabilities of an allocation library, which is why this is useful)
2793 _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2795 return _talloc_realloc(context, ptr, size, NULL);
2799 static int talloc_autofree_destructor(void *ptr)
2801 autofree_context = NULL;
2802 return 0;
2806 return a context which will be auto-freed on exit
2807 this is useful for reducing the noise in leak reports
2809 _PUBLIC_ void *talloc_autofree_context(void)
2811 if (autofree_context == NULL) {
2812 autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2813 talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2814 talloc_setup_atexit();
2816 return autofree_context;
2819 _PUBLIC_ size_t talloc_get_size(const void *context)
2821 struct talloc_chunk *tc;
2823 if (context == NULL) {
2824 return 0;
2827 tc = talloc_chunk_from_ptr(context);
2829 return tc->size;
2833 find a parent of this context that has the given name, if any
2835 _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2837 struct talloc_chunk *tc;
2839 if (context == NULL) {
2840 return NULL;
2843 tc = talloc_chunk_from_ptr(context);
2844 while (tc) {
2845 if (tc->name && strcmp(tc->name, name) == 0) {
2846 return TC_PTR_FROM_CHUNK(tc);
2848 while (tc && tc->prev) tc = tc->prev;
2849 if (tc) {
2850 tc = tc->parent;
2853 return NULL;
2857 show the parentage of a context
2859 _PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2861 struct talloc_chunk *tc;
2863 if (context == NULL) {
2864 fprintf(file, "talloc no parents for NULL\n");
2865 return;
2868 tc = talloc_chunk_from_ptr(context);
2869 fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2870 while (tc) {
2871 fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2872 while (tc && tc->prev) tc = tc->prev;
2873 if (tc) {
2874 tc = tc->parent;
2877 fflush(file);
2881 return 1 if ptr is a parent of context
2883 static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2885 struct talloc_chunk *tc;
2887 if (context == NULL) {
2888 return 0;
2891 tc = talloc_chunk_from_ptr(context);
2892 while (tc) {
2893 if (depth <= 0) {
2894 return 0;
2896 if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2897 while (tc && tc->prev) tc = tc->prev;
2898 if (tc) {
2899 tc = tc->parent;
2900 depth--;
2903 return 0;
2907 return 1 if ptr is a parent of context
2909 _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2911 return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2915 return the total size of memory used by this context and all children
2917 static inline size_t _talloc_total_limit_size(const void *ptr,
2918 struct talloc_memlimit *old_limit,
2919 struct talloc_memlimit *new_limit)
2921 return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2922 old_limit, new_limit);
2925 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2927 struct talloc_memlimit *l;
2929 for (l = limit; l != NULL; l = l->upper) {
2930 if (l->max_size != 0 &&
2931 ((l->max_size <= l->cur_size) ||
2932 (l->max_size - l->cur_size < size))) {
2933 return false;
2937 return true;
2941 Update memory limits when freeing a talloc_chunk.
2943 static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2945 size_t limit_shrink_size;
2947 if (!tc->limit) {
2948 return;
2952 * Pool entries don't count. Only the pools
2953 * themselves are counted as part of the memory
2954 * limits. Note that this also takes care of
2955 * nested pools which have both flags
2956 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2958 if (tc->flags & TALLOC_FLAG_POOLMEM) {
2959 return;
2963 * If we are part of a memory limited context hierarchy
2964 * we need to subtract the memory used from the counters
2967 limit_shrink_size = tc->size+TC_HDR_SIZE;
2970 * If we're deallocating a pool, take into
2971 * account the prefix size added for the pool.
2974 if (tc->flags & TALLOC_FLAG_POOL) {
2975 limit_shrink_size += TP_HDR_SIZE;
2978 talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2980 if (tc->limit->parent == tc) {
2981 free(tc->limit);
2984 tc->limit = NULL;
2988 Increase memory limit accounting after a malloc/realloc.
2990 static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2991 size_t size)
2993 struct talloc_memlimit *l;
2995 for (l = limit; l != NULL; l = l->upper) {
2996 size_t new_cur_size = l->cur_size + size;
2997 if (new_cur_size < l->cur_size) {
2998 talloc_abort("logic error in talloc_memlimit_grow\n");
2999 return;
3001 l->cur_size = new_cur_size;
3006 Decrease memory limit accounting after a free/realloc.
3008 static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
3009 size_t size)
3011 struct talloc_memlimit *l;
3013 for (l = limit; l != NULL; l = l->upper) {
3014 if (l->cur_size < size) {
3015 talloc_abort("logic error in talloc_memlimit_shrink\n");
3016 return;
3018 l->cur_size = l->cur_size - size;
3022 _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
3024 struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
3025 struct talloc_memlimit *orig_limit;
3026 struct talloc_memlimit *limit = NULL;
3028 if (tc->limit && tc->limit->parent == tc) {
3029 tc->limit->max_size = max_size;
3030 return 0;
3032 orig_limit = tc->limit;
3034 limit = malloc(sizeof(struct talloc_memlimit));
3035 if (limit == NULL) {
3036 return 1;
3038 limit->parent = tc;
3039 limit->max_size = max_size;
3040 limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
3042 if (orig_limit) {
3043 limit->upper = orig_limit;
3044 } else {
3045 limit->upper = NULL;
3048 return 0;