2 Samba Unix SMB/CIFS implementation.
4 Samba trivial allocation library - new interface
6 NOTE: Please read talloc_guide.txt for full documentation
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 inspired by http://swapped.cc/halloc/
36 #ifdef HAVE_SYS_AUXV_H
40 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
41 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
44 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
45 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
48 /* Special macros that are no-ops except when run under Valgrind on
49 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
50 #ifdef HAVE_VALGRIND_MEMCHECK_H
51 /* memcheck.h includes valgrind.h */
52 #include <valgrind/memcheck.h>
53 #elif defined(HAVE_VALGRIND_H)
57 /* use this to force every realloc to change the pointer, to stress test
58 code that might not cope */
59 #define ALWAYS_REALLOC 0
62 #define MAX_TALLOC_SIZE 0x10000000
64 #define TALLOC_FLAG_FREE 0x01
65 #define TALLOC_FLAG_LOOP 0x02
66 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
67 #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
70 * Bits above this are random, used to make it harder to fake talloc
71 * headers during an attack. Try not to change this without good reason.
73 #define TALLOC_FLAG_MASK 0x0F
75 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
77 #define TALLOC_MAGIC_BASE 0xe814ec70
78 #define TALLOC_MAGIC_NON_RANDOM ( \
79 ~TALLOC_FLAG_MASK & ( \
81 (TALLOC_BUILD_VERSION_MAJOR << 24) + \
82 (TALLOC_BUILD_VERSION_MINOR << 16) + \
83 (TALLOC_BUILD_VERSION_RELEASE << 8)))
84 static unsigned int talloc_magic
= TALLOC_MAGIC_NON_RANDOM
;
86 /* by default we abort when given a bad pointer (such as when talloc_free() is called
87 on a pointer that came from malloc() */
89 #define TALLOC_ABORT(reason) abort()
92 #ifndef discard_const_p
93 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
94 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
96 # define discard_const_p(type, ptr) ((type *)(ptr))
100 /* these macros gain us a few percent of speed on gcc */
102 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
103 as its first argument */
105 #define likely(x) __builtin_expect(!!(x), 1)
108 #define unlikely(x) __builtin_expect(!!(x), 0)
112 #define likely(x) (x)
115 #define unlikely(x) (x)
119 /* this null_context is only used if talloc_enable_leak_report() or
120 talloc_enable_leak_report_full() is called, otherwise it remains
123 static void *null_context
;
124 static bool talloc_report_null
;
125 static bool talloc_report_null_full
;
126 static void *autofree_context
;
128 static void talloc_setup_atexit(void);
130 /* used to enable fill of memory on free, which can be useful for
131 * catching use after free errors when valgrind is too slow
139 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
142 * do not wipe the header, to allow the
143 * double-free logic to still work
145 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
146 if (unlikely(talloc_fill.enabled)) { \
147 size_t _flen = (_tc)->size; \
148 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
149 memset(_fptr, talloc_fill.fill_value, _flen); \
153 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
154 /* Mark the whole chunk as not accessable */
155 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
156 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
157 char *_fptr = (char *)(_tc); \
158 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
161 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
164 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
165 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
166 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
169 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
170 if (unlikely(talloc_fill.enabled)) { \
171 size_t _flen = (_tc)->size - (_new_size); \
172 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
173 _fptr += (_new_size); \
174 memset(_fptr, talloc_fill.fill_value, _flen); \
178 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
179 /* Mark the unused bytes not accessable */
180 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
181 size_t _flen = (_tc)->size - (_new_size); \
182 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
183 _fptr += (_new_size); \
184 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
187 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
190 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
191 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
192 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
195 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
196 if (unlikely(talloc_fill.enabled)) { \
197 size_t _flen = (_tc)->size - (_new_size); \
198 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
199 _fptr += (_new_size); \
200 memset(_fptr, talloc_fill.fill_value, _flen); \
204 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
205 /* Mark the unused bytes as undefined */
206 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
207 size_t _flen = (_tc)->size - (_new_size); \
208 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
209 _fptr += (_new_size); \
210 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
213 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
216 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
217 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
218 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
221 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
222 /* Mark the new bytes as undefined */
223 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
224 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
225 size_t _new_used = TC_HDR_SIZE + (_new_size); \
226 size_t _flen = _new_used - _old_used; \
227 char *_fptr = _old_used + (char *)(_tc); \
228 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
231 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
234 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
235 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
238 struct talloc_reference_handle
{
239 struct talloc_reference_handle
*next
, *prev
;
241 const char *location
;
244 struct talloc_memlimit
{
245 struct talloc_chunk
*parent
;
246 struct talloc_memlimit
*upper
;
251 static inline bool talloc_memlimit_check(struct talloc_memlimit
*limit
, size_t size
);
252 static inline void talloc_memlimit_grow(struct talloc_memlimit
*limit
,
254 static inline void talloc_memlimit_shrink(struct talloc_memlimit
*limit
,
256 static inline void tc_memlimit_update_on_free(struct talloc_chunk
*tc
);
258 static inline void _tc_set_name_const(struct talloc_chunk
*tc
,
260 static struct talloc_chunk
*_vasprintf_tc(const void *t
,
264 typedef int (*talloc_destructor_t
)(void *);
266 struct talloc_pool_hdr
;
268 struct talloc_chunk
{
270 * flags includes the talloc magic, which is randomised to
271 * make overwrite attacks harder
276 * If you have a logical tree like:
282 * <child 1> <child 2> <child 3>
284 * The actual talloc tree is:
288 * <child 1> - <child 2> - <child 3>
290 * The children are linked with next/prev pointers, and
291 * child 1 is linked to the parent with parent/child
295 struct talloc_chunk
*next
, *prev
;
296 struct talloc_chunk
*parent
, *child
;
297 struct talloc_reference_handle
*refs
;
298 talloc_destructor_t destructor
;
304 * if 'limit' is set it means all *new* children of the context will
305 * be limited to a total aggregate size ox max_size for memory
307 * cur_size is used to keep track of the current use
309 struct talloc_memlimit
*limit
;
312 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
313 * is a pointer to the struct talloc_chunk of the pool that it was
314 * allocated from. This way children can quickly find the pool to chew
317 struct talloc_pool_hdr
*pool
;
320 union talloc_chunk_cast_u
{
322 struct talloc_chunk
*chunk
;
325 /* 16 byte alignment seems to keep everyone happy */
326 #define TC_ALIGN16(s) (((s)+15)&~15)
327 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
328 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
330 _PUBLIC_
int talloc_version_major(void)
332 return TALLOC_VERSION_MAJOR
;
335 _PUBLIC_
int talloc_version_minor(void)
337 return TALLOC_VERSION_MINOR
;
340 _PUBLIC_
int talloc_test_get_magic(void)
345 static inline void _talloc_chunk_set_free(struct talloc_chunk
*tc
,
346 const char *location
)
349 * Mark this memory as free, and also over-stamp the talloc
350 * magic with the old-style magic.
352 * Why? This tries to avoid a memory read use-after-free from
353 * disclosing our talloc magic, which would then allow an
354 * attacker to prepare a valid header and so run a destructor.
357 tc
->flags
= TALLOC_MAGIC_NON_RANDOM
| TALLOC_FLAG_FREE
358 | (tc
->flags
& TALLOC_FLAG_MASK
);
360 /* we mark the freed memory with where we called the free
361 * from. This means on a double free error we can report where
362 * the first free came from
369 static inline void _talloc_chunk_set_not_free(struct talloc_chunk
*tc
)
372 * Mark this memory as not free.
374 * Why? This is memory either in a pool (and so available for
375 * talloc's re-use or after the realloc(). We need to mark
376 * the memory as free() before any realloc() call as we can't
377 * write to the memory after that.
379 * We put back the normal magic instead of the 'not random'
383 tc
->flags
= talloc_magic
|
384 ((tc
->flags
& TALLOC_FLAG_MASK
) & ~TALLOC_FLAG_FREE
);
387 static void (*talloc_log_fn
)(const char *message
);
389 _PUBLIC_
void talloc_set_log_fn(void (*log_fn
)(const char *message
))
391 talloc_log_fn
= log_fn
;
394 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
395 void talloc_lib_init(void) __attribute__((constructor
));
396 void talloc_lib_init(void)
398 uint32_t random_value
;
399 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
402 * Use the kernel-provided random values used for
403 * ASLR. This won't change per-exec, which is ideal for us
405 p
= (uint8_t *) getauxval(AT_RANDOM
);
408 * We get 16 bytes from getauxval. By calling rand(),
409 * a totally insecure PRNG, but one that will
410 * deterministically have a different value when called
411 * twice, we ensure that if two talloc-like libraries
412 * are somehow loaded in the same address space, that
413 * because we choose different bytes, we will keep the
414 * protection against collision of multiple talloc
417 * This protection is important because the effects of
418 * passing a talloc pointer from one to the other may
419 * be very hard to determine.
421 int offset
= rand() % (16 - sizeof(random_value
));
422 memcpy(&random_value
, p
+ offset
, sizeof(random_value
));
427 * Otherwise, hope the location we are loaded in
428 * memory is randomised by someone else
430 random_value
= ((uintptr_t)talloc_lib_init
& 0xFFFFFFFF);
432 talloc_magic
= random_value
& ~TALLOC_FLAG_MASK
;
435 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
438 static void talloc_lib_atexit(void)
440 TALLOC_FREE(autofree_context
);
442 if (talloc_total_size(null_context
) == 0) {
446 if (talloc_report_null_full
) {
447 talloc_report_full(null_context
, stderr
);
448 } else if (talloc_report_null
) {
449 talloc_report(null_context
, stderr
);
453 static void talloc_setup_atexit(void)
461 atexit(talloc_lib_atexit
);
465 static void talloc_log(const char *fmt
, ...) PRINTF_ATTRIBUTE(1,2);
466 static void talloc_log(const char *fmt
, ...)
471 if (!talloc_log_fn
) {
476 message
= talloc_vasprintf(NULL
, fmt
, ap
);
479 talloc_log_fn(message
);
480 talloc_free(message
);
483 static void talloc_log_stderr(const char *message
)
485 fprintf(stderr
, "%s", message
);
488 _PUBLIC_
void talloc_set_log_stderr(void)
490 talloc_set_log_fn(talloc_log_stderr
);
493 static void (*talloc_abort_fn
)(const char *reason
);
495 _PUBLIC_
void talloc_set_abort_fn(void (*abort_fn
)(const char *reason
))
497 talloc_abort_fn
= abort_fn
;
500 static void talloc_abort(const char *reason
)
502 talloc_log("%s\n", reason
);
504 if (!talloc_abort_fn
) {
505 TALLOC_ABORT(reason
);
508 talloc_abort_fn(reason
);
511 static void talloc_abort_access_after_free(void)
513 talloc_abort("Bad talloc magic value - access after free");
516 static void talloc_abort_unknown_value(void)
518 talloc_abort("Bad talloc magic value - unknown value");
521 /* panic if we get a bad magic value */
522 static inline struct talloc_chunk
*talloc_chunk_from_ptr(const void *ptr
)
524 const char *pp
= (const char *)ptr
;
525 struct talloc_chunk
*tc
= discard_const_p(struct talloc_chunk
, pp
- TC_HDR_SIZE
);
526 if (unlikely((tc
->flags
& (TALLOC_FLAG_FREE
| ~TALLOC_FLAG_MASK
)) != talloc_magic
)) {
527 if ((tc
->flags
& (TALLOC_FLAG_FREE
| ~TALLOC_FLAG_MASK
))
528 == (TALLOC_MAGIC_NON_RANDOM
| TALLOC_FLAG_FREE
)) {
529 talloc_log("talloc: access after free error - first free may be at %s\n", tc
->name
);
530 talloc_abort_access_after_free();
534 talloc_abort_unknown_value();
540 /* hook into the front of the list */
541 #define _TLIST_ADD(list, p) \
545 (p)->next = (p)->prev = NULL; \
547 (list)->prev = (p); \
548 (p)->next = (list); \
554 /* remove an element from a list - element doesn't have to be in list. */
555 #define _TLIST_REMOVE(list, p) \
557 if ((p) == (list)) { \
558 (list) = (p)->next; \
559 if (list) (list)->prev = NULL; \
561 if ((p)->prev) (p)->prev->next = (p)->next; \
562 if ((p)->next) (p)->next->prev = (p)->prev; \
564 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
569 return the parent chunk of a pointer
571 static inline struct talloc_chunk
*talloc_parent_chunk(const void *ptr
)
573 struct talloc_chunk
*tc
;
575 if (unlikely(ptr
== NULL
)) {
579 tc
= talloc_chunk_from_ptr(ptr
);
580 while (tc
->prev
) tc
=tc
->prev
;
585 _PUBLIC_
void *talloc_parent(const void *ptr
)
587 struct talloc_chunk
*tc
= talloc_parent_chunk(ptr
);
588 return tc
? TC_PTR_FROM_CHUNK(tc
) : NULL
;
594 _PUBLIC_
const char *talloc_parent_name(const void *ptr
)
596 struct talloc_chunk
*tc
= talloc_parent_chunk(ptr
);
597 return tc
? tc
->name
: NULL
;
601 A pool carries an in-pool object count count in the first 16 bytes.
602 bytes. This is done to support talloc_steal() to a parent outside of the
603 pool. The count includes the pool itself, so a talloc_free() on a pool will
604 only destroy the pool if the count has dropped to zero. A talloc_free() of a
605 pool member will reduce the count, and eventually also call free(3) on the
608 The object count is not put into "struct talloc_chunk" because it is only
609 relevant for talloc pools and the alignment to 16 bytes would increase the
610 memory footprint of each talloc chunk by those 16 bytes.
613 struct talloc_pool_hdr
{
615 unsigned int object_count
;
619 union talloc_pool_hdr_cast_u
{
621 struct talloc_pool_hdr
*hdr
;
624 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
626 static inline struct talloc_pool_hdr
*talloc_pool_from_chunk(struct talloc_chunk
*c
)
628 union talloc_chunk_cast_u tcc
= { .chunk
= c
};
629 union talloc_pool_hdr_cast_u tphc
= { tcc
.ptr
- TP_HDR_SIZE
};
633 static inline struct talloc_chunk
*talloc_chunk_from_pool(struct talloc_pool_hdr
*h
)
635 union talloc_pool_hdr_cast_u tphc
= { .hdr
= h
};
636 union talloc_chunk_cast_u tcc
= { .ptr
= tphc
.ptr
+ TP_HDR_SIZE
};
640 static inline void *tc_pool_end(struct talloc_pool_hdr
*pool_hdr
)
642 struct talloc_chunk
*tc
= talloc_chunk_from_pool(pool_hdr
);
643 return (char *)tc
+ TC_HDR_SIZE
+ pool_hdr
->poolsize
;
646 static inline size_t tc_pool_space_left(struct talloc_pool_hdr
*pool_hdr
)
648 return (char *)tc_pool_end(pool_hdr
) - (char *)pool_hdr
->end
;
651 /* If tc is inside a pool, this gives the next neighbour. */
652 static inline void *tc_next_chunk(struct talloc_chunk
*tc
)
654 return (char *)tc
+ TC_ALIGN16(TC_HDR_SIZE
+ tc
->size
);
657 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr
*pool_hdr
)
659 struct talloc_chunk
*tc
= talloc_chunk_from_pool(pool_hdr
);
660 return tc_next_chunk(tc
);
663 /* Mark the whole remaining pool as not accessable */
664 static inline void tc_invalidate_pool(struct talloc_pool_hdr
*pool_hdr
)
666 size_t flen
= tc_pool_space_left(pool_hdr
);
668 if (unlikely(talloc_fill
.enabled
)) {
669 memset(pool_hdr
->end
, talloc_fill
.fill_value
, flen
);
672 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
673 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr
->end
, flen
);
681 static inline struct talloc_chunk
*tc_alloc_pool(struct talloc_chunk
*parent
,
682 size_t size
, size_t prefix_len
)
684 struct talloc_pool_hdr
*pool_hdr
= NULL
;
685 union talloc_chunk_cast_u tcc
;
687 struct talloc_chunk
*result
;
690 if (parent
== NULL
) {
694 if (parent
->flags
& TALLOC_FLAG_POOL
) {
695 pool_hdr
= talloc_pool_from_chunk(parent
);
697 else if (parent
->flags
& TALLOC_FLAG_POOLMEM
) {
698 pool_hdr
= parent
->pool
;
701 if (pool_hdr
== NULL
) {
705 space_left
= tc_pool_space_left(pool_hdr
);
708 * Align size to 16 bytes
710 chunk_size
= TC_ALIGN16(size
+ prefix_len
);
712 if (space_left
< chunk_size
) {
716 tcc
= (union talloc_chunk_cast_u
) {
717 .ptr
= ((uint8_t *)pool_hdr
->end
) + prefix_len
721 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
722 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr
->end
, chunk_size
);
725 pool_hdr
->end
= (void *)((char *)pool_hdr
->end
+ chunk_size
);
727 result
->flags
= talloc_magic
| TALLOC_FLAG_POOLMEM
;
728 result
->pool
= pool_hdr
;
730 pool_hdr
->object_count
++;
736 Allocate a bit of memory as a child of an existing pointer
738 static inline void *__talloc_with_prefix(const void *context
,
741 struct talloc_chunk
**tc_ret
)
743 struct talloc_chunk
*tc
= NULL
;
744 struct talloc_memlimit
*limit
= NULL
;
745 size_t total_len
= TC_HDR_SIZE
+ size
+ prefix_len
;
746 struct talloc_chunk
*parent
= NULL
;
748 if (unlikely(context
== NULL
)) {
749 context
= null_context
;
752 if (unlikely(size
>= MAX_TALLOC_SIZE
)) {
756 if (unlikely(total_len
< TC_HDR_SIZE
)) {
760 if (likely(context
!= NULL
)) {
761 parent
= talloc_chunk_from_ptr(context
);
763 if (parent
->limit
!= NULL
) {
764 limit
= parent
->limit
;
767 tc
= tc_alloc_pool(parent
, TC_HDR_SIZE
+size
, prefix_len
);
772 union talloc_chunk_cast_u tcc
;
775 * Only do the memlimit check/update on actual allocation.
777 if (!talloc_memlimit_check(limit
, total_len
)) {
782 ptr
= malloc(total_len
);
783 if (unlikely(ptr
== NULL
)) {
786 tcc
= (union talloc_chunk_cast_u
) { .ptr
= ptr
+ prefix_len
};
788 tc
->flags
= talloc_magic
;
791 talloc_memlimit_grow(limit
, total_len
);
796 tc
->destructor
= NULL
;
801 if (likely(context
!= NULL
)) {
803 parent
->child
->parent
= NULL
;
804 tc
->next
= parent
->child
;
813 tc
->next
= tc
->prev
= tc
->parent
= NULL
;
817 return TC_PTR_FROM_CHUNK(tc
);
820 static inline void *__talloc(const void *context
,
822 struct talloc_chunk
**tc
)
824 return __talloc_with_prefix(context
, size
, 0, tc
);
828 * Create a talloc pool
831 static inline void *_talloc_pool(const void *context
, size_t size
)
833 struct talloc_chunk
*tc
;
834 struct talloc_pool_hdr
*pool_hdr
;
837 result
= __talloc_with_prefix(context
, size
, TP_HDR_SIZE
, &tc
);
839 if (unlikely(result
== NULL
)) {
843 pool_hdr
= talloc_pool_from_chunk(tc
);
845 tc
->flags
|= TALLOC_FLAG_POOL
;
848 pool_hdr
->object_count
= 1;
849 pool_hdr
->end
= result
;
850 pool_hdr
->poolsize
= size
;
852 tc_invalidate_pool(pool_hdr
);
857 _PUBLIC_
void *talloc_pool(const void *context
, size_t size
)
859 return _talloc_pool(context
, size
);
863 * Create a talloc pool correctly sized for a basic size plus
864 * a number of subobjects whose total size is given. Essentially
865 * a custom allocator for talloc to reduce fragmentation.
868 _PUBLIC_
void *_talloc_pooled_object(const void *ctx
,
870 const char *type_name
,
871 unsigned num_subobjects
,
872 size_t total_subobjects_size
)
874 size_t poolsize
, subobjects_slack
, tmp
;
875 struct talloc_chunk
*tc
;
876 struct talloc_pool_hdr
*pool_hdr
;
879 poolsize
= type_size
+ total_subobjects_size
;
881 if ((poolsize
< type_size
) || (poolsize
< total_subobjects_size
)) {
885 if (num_subobjects
== UINT_MAX
) {
888 num_subobjects
+= 1; /* the object body itself */
891 * Alignment can increase the pool size by at most 15 bytes per object
892 * plus alignment for the object itself
894 subobjects_slack
= (TC_HDR_SIZE
+ TP_HDR_SIZE
+ 15) * num_subobjects
;
895 if (subobjects_slack
< num_subobjects
) {
899 tmp
= poolsize
+ subobjects_slack
;
900 if ((tmp
< poolsize
) || (tmp
< subobjects_slack
)) {
905 ret
= _talloc_pool(ctx
, poolsize
);
910 tc
= talloc_chunk_from_ptr(ret
);
911 tc
->size
= type_size
;
913 pool_hdr
= talloc_pool_from_chunk(tc
);
915 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
916 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr
->end
, type_size
);
919 pool_hdr
->end
= ((char *)pool_hdr
->end
+ TC_ALIGN16(type_size
));
921 _tc_set_name_const(tc
, type_name
);
929 setup a destructor to be called on free of a pointer
930 the destructor should return 0 on success, or -1 on failure.
931 if the destructor fails then the free is failed, and the memory can
932 be continued to be used
934 _PUBLIC_
void _talloc_set_destructor(const void *ptr
, int (*destructor
)(void *))
936 struct talloc_chunk
*tc
= talloc_chunk_from_ptr(ptr
);
937 tc
->destructor
= destructor
;
941 increase the reference count on a piece of memory.
943 _PUBLIC_
int talloc_increase_ref_count(const void *ptr
)
945 if (unlikely(!talloc_reference(null_context
, ptr
))) {
952 helper for talloc_reference()
954 this is referenced by a function pointer and should not be inline
956 static int talloc_reference_destructor(struct talloc_reference_handle
*handle
)
958 struct talloc_chunk
*ptr_tc
= talloc_chunk_from_ptr(handle
->ptr
);
959 _TLIST_REMOVE(ptr_tc
->refs
, handle
);
964 more efficient way to add a name to a pointer - the name must point to a
967 static inline void _tc_set_name_const(struct talloc_chunk
*tc
,
974 internal talloc_named_const()
976 static inline void *_talloc_named_const(const void *context
, size_t size
, const char *name
)
979 struct talloc_chunk
*tc
;
981 ptr
= __talloc(context
, size
, &tc
);
982 if (unlikely(ptr
== NULL
)) {
986 _tc_set_name_const(tc
, name
);
992 make a secondary reference to a pointer, hanging off the given context.
993 the pointer remains valid until both the original caller and this given
996 the major use for this is when two different structures need to reference the
997 same underlying data, and you want to be able to free the two instances separately,
1000 _PUBLIC_
void *_talloc_reference_loc(const void *context
, const void *ptr
, const char *location
)
1002 struct talloc_chunk
*tc
;
1003 struct talloc_reference_handle
*handle
;
1004 if (unlikely(ptr
== NULL
)) return NULL
;
1006 tc
= talloc_chunk_from_ptr(ptr
);
1007 handle
= (struct talloc_reference_handle
*)_talloc_named_const(context
,
1008 sizeof(struct talloc_reference_handle
),
1009 TALLOC_MAGIC_REFERENCE
);
1010 if (unlikely(handle
== NULL
)) return NULL
;
1012 /* note that we hang the destructor off the handle, not the
1013 main context as that allows the caller to still setup their
1014 own destructor on the context if they want to */
1015 talloc_set_destructor(handle
, talloc_reference_destructor
);
1016 handle
->ptr
= discard_const_p(void, ptr
);
1017 handle
->location
= location
;
1018 _TLIST_ADD(tc
->refs
, handle
);
1022 static void *_talloc_steal_internal(const void *new_ctx
, const void *ptr
);
1024 static inline void _tc_free_poolmem(struct talloc_chunk
*tc
,
1025 const char *location
)
1027 struct talloc_pool_hdr
*pool
;
1028 struct talloc_chunk
*pool_tc
;
1032 pool_tc
= talloc_chunk_from_pool(pool
);
1033 next_tc
= tc_next_chunk(tc
);
1035 _talloc_chunk_set_free(tc
, location
);
1037 TC_INVALIDATE_FULL_CHUNK(tc
);
1039 if (unlikely(pool
->object_count
== 0)) {
1040 talloc_abort("Pool object count zero!");
1044 pool
->object_count
--;
1046 if (unlikely(pool
->object_count
== 1
1047 && !(pool_tc
->flags
& TALLOC_FLAG_FREE
))) {
1049 * if there is just one object left in the pool
1050 * and pool->flags does not have TALLOC_FLAG_FREE,
1051 * it means this is the pool itself and
1052 * the rest is available for new objects
1055 pool
->end
= tc_pool_first_chunk(pool
);
1056 tc_invalidate_pool(pool
);
1060 if (unlikely(pool
->object_count
== 0)) {
1062 * we mark the freed memory with where we called the free
1063 * from. This means on a double free error we can report where
1064 * the first free came from
1066 pool_tc
->name
= location
;
1068 if (pool_tc
->flags
& TALLOC_FLAG_POOLMEM
) {
1069 _tc_free_poolmem(pool_tc
, location
);
1072 * The tc_memlimit_update_on_free()
1073 * call takes into account the
1074 * prefix TP_HDR_SIZE allocated before
1075 * the pool talloc_chunk.
1077 tc_memlimit_update_on_free(pool_tc
);
1078 TC_INVALIDATE_FULL_CHUNK(pool_tc
);
1084 if (pool
->end
== next_tc
) {
1086 * if pool->pool still points to end of
1087 * 'tc' (which is stored in the 'next_tc' variable),
1088 * we can reclaim the memory of 'tc'.
1095 * Do nothing. The memory is just "wasted", waiting for the pool
1096 * itself to be freed.
1100 static inline void _tc_free_children_internal(struct talloc_chunk
*tc
,
1102 const char *location
);
1104 static inline int _talloc_free_internal(void *ptr
, const char *location
);
1107 internal free call that takes a struct talloc_chunk *.
1109 static inline int _tc_free_internal(struct talloc_chunk
*tc
,
1110 const char *location
)
1113 void *ptr
= TC_PTR_FROM_CHUNK(tc
);
1115 if (unlikely(tc
->refs
)) {
1117 /* check if this is a reference from a child or
1118 * grandchild back to it's parent or grandparent
1120 * in that case we need to remove the reference and
1121 * call another instance of talloc_free() on the current
1124 is_child
= talloc_is_parent(tc
->refs
, ptr
);
1125 _talloc_free_internal(tc
->refs
, location
);
1127 return _talloc_free_internal(ptr
, location
);
1132 if (unlikely(tc
->flags
& TALLOC_FLAG_LOOP
)) {
1133 /* we have a free loop - stop looping */
1137 if (unlikely(tc
->destructor
)) {
1138 talloc_destructor_t d
= tc
->destructor
;
1141 * Protect the destructor against some overwrite
1142 * attacks, by explicitly checking it has the right
1145 if (talloc_chunk_from_ptr(ptr
) != tc
) {
1147 * This can't actually happen, the
1148 * call itself will panic.
1150 TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1153 if (d
== (talloc_destructor_t
)-1) {
1156 tc
->destructor
= (talloc_destructor_t
)-1;
1159 * Only replace the destructor pointer if
1160 * calling the destructor didn't modify it.
1162 if (tc
->destructor
== (talloc_destructor_t
)-1) {
1167 tc
->destructor
= NULL
;
1171 _TLIST_REMOVE(tc
->parent
->child
, tc
);
1172 if (tc
->parent
->child
) {
1173 tc
->parent
->child
->parent
= tc
->parent
;
1176 if (tc
->prev
) tc
->prev
->next
= tc
->next
;
1177 if (tc
->next
) tc
->next
->prev
= tc
->prev
;
1178 tc
->prev
= tc
->next
= NULL
;
1181 tc
->flags
|= TALLOC_FLAG_LOOP
;
1183 _tc_free_children_internal(tc
, ptr
, location
);
1185 _talloc_chunk_set_free(tc
, location
);
1187 if (tc
->flags
& TALLOC_FLAG_POOL
) {
1188 struct talloc_pool_hdr
*pool
;
1190 pool
= talloc_pool_from_chunk(tc
);
1192 if (unlikely(pool
->object_count
== 0)) {
1193 talloc_abort("Pool object count zero!");
1197 pool
->object_count
--;
1199 if (likely(pool
->object_count
!= 0)) {
1204 * With object_count==0, a pool becomes a normal piece of
1205 * memory to free. If it's allocated inside a pool, it needs
1206 * to be freed as poolmem, else it needs to be just freed.
1213 if (tc
->flags
& TALLOC_FLAG_POOLMEM
) {
1214 _tc_free_poolmem(tc
, location
);
1218 tc_memlimit_update_on_free(tc
);
1220 TC_INVALIDATE_FULL_CHUNK(tc
);
1226 internal talloc_free call
1228 static inline int _talloc_free_internal(void *ptr
, const char *location
)
1230 struct talloc_chunk
*tc
;
1232 if (unlikely(ptr
== NULL
)) {
1236 /* possibly initialised the talloc fill value */
1237 if (unlikely(!talloc_fill
.initialised
)) {
1238 const char *fill
= getenv(TALLOC_FILL_ENV
);
1240 talloc_fill
.enabled
= true;
1241 talloc_fill
.fill_value
= strtoul(fill
, NULL
, 0);
1243 talloc_fill
.initialised
= true;
1246 tc
= talloc_chunk_from_ptr(ptr
);
1247 return _tc_free_internal(tc
, location
);
1250 static inline size_t _talloc_total_limit_size(const void *ptr
,
1251 struct talloc_memlimit
*old_limit
,
1252 struct talloc_memlimit
*new_limit
);
1255 move a lump of memory from one talloc context to another return the
1256 ptr on success, or NULL if it could not be transferred.
1257 passing NULL as ptr will always return NULL with no side effects.
1259 static void *_talloc_steal_internal(const void *new_ctx
, const void *ptr
)
1261 struct talloc_chunk
*tc
, *new_tc
;
1262 size_t ctx_size
= 0;
1264 if (unlikely(!ptr
)) {
1268 if (unlikely(new_ctx
== NULL
)) {
1269 new_ctx
= null_context
;
1272 tc
= talloc_chunk_from_ptr(ptr
);
1274 if (tc
->limit
!= NULL
) {
1276 ctx_size
= _talloc_total_limit_size(ptr
, NULL
, NULL
);
1278 /* Decrement the memory limit from the source .. */
1279 talloc_memlimit_shrink(tc
->limit
->upper
, ctx_size
);
1281 if (tc
->limit
->parent
== tc
) {
1282 tc
->limit
->upper
= NULL
;
1288 if (unlikely(new_ctx
== NULL
)) {
1290 _TLIST_REMOVE(tc
->parent
->child
, tc
);
1291 if (tc
->parent
->child
) {
1292 tc
->parent
->child
->parent
= tc
->parent
;
1295 if (tc
->prev
) tc
->prev
->next
= tc
->next
;
1296 if (tc
->next
) tc
->next
->prev
= tc
->prev
;
1299 tc
->parent
= tc
->next
= tc
->prev
= NULL
;
1300 return discard_const_p(void, ptr
);
1303 new_tc
= talloc_chunk_from_ptr(new_ctx
);
1305 if (unlikely(tc
== new_tc
|| tc
->parent
== new_tc
)) {
1306 return discard_const_p(void, ptr
);
1310 _TLIST_REMOVE(tc
->parent
->child
, tc
);
1311 if (tc
->parent
->child
) {
1312 tc
->parent
->child
->parent
= tc
->parent
;
1315 if (tc
->prev
) tc
->prev
->next
= tc
->next
;
1316 if (tc
->next
) tc
->next
->prev
= tc
->prev
;
1317 tc
->prev
= tc
->next
= NULL
;
1320 tc
->parent
= new_tc
;
1321 if (new_tc
->child
) new_tc
->child
->parent
= NULL
;
1322 _TLIST_ADD(new_tc
->child
, tc
);
1324 if (tc
->limit
|| new_tc
->limit
) {
1325 ctx_size
= _talloc_total_limit_size(ptr
, tc
->limit
,
1327 /* .. and increment it in the destination. */
1328 if (new_tc
->limit
) {
1329 talloc_memlimit_grow(new_tc
->limit
, ctx_size
);
1333 return discard_const_p(void, ptr
);
1337 move a lump of memory from one talloc context to another return the
1338 ptr on success, or NULL if it could not be transferred.
1339 passing NULL as ptr will always return NULL with no side effects.
1341 _PUBLIC_
void *_talloc_steal_loc(const void *new_ctx
, const void *ptr
, const char *location
)
1343 struct talloc_chunk
*tc
;
1345 if (unlikely(ptr
== NULL
)) {
1349 tc
= talloc_chunk_from_ptr(ptr
);
1351 if (unlikely(tc
->refs
!= NULL
) && talloc_parent(ptr
) != new_ctx
) {
1352 struct talloc_reference_handle
*h
;
1354 talloc_log("WARNING: talloc_steal with references at %s\n",
1357 for (h
=tc
->refs
; h
; h
=h
->next
) {
1358 talloc_log("\treference at %s\n",
1364 /* this test is probably too expensive to have on in the
1365 normal build, but it useful for debugging */
1366 if (talloc_is_parent(new_ctx
, ptr
)) {
1367 talloc_log("WARNING: stealing into talloc child at %s\n", location
);
1371 return _talloc_steal_internal(new_ctx
, ptr
);
1375 this is like a talloc_steal(), but you must supply the old
1376 parent. This resolves the ambiguity in a talloc_steal() which is
1377 called on a context that has more than one parent (via references)
1379 The old parent can be either a reference or a parent
1381 _PUBLIC_
void *talloc_reparent(const void *old_parent
, const void *new_parent
, const void *ptr
)
1383 struct talloc_chunk
*tc
;
1384 struct talloc_reference_handle
*h
;
1386 if (unlikely(ptr
== NULL
)) {
1390 if (old_parent
== talloc_parent(ptr
)) {
1391 return _talloc_steal_internal(new_parent
, ptr
);
1394 tc
= talloc_chunk_from_ptr(ptr
);
1395 for (h
=tc
->refs
;h
;h
=h
->next
) {
1396 if (talloc_parent(h
) == old_parent
) {
1397 if (_talloc_steal_internal(new_parent
, h
) != h
) {
1400 return discard_const_p(void, ptr
);
1404 /* it wasn't a parent */
1409 remove a secondary reference to a pointer. This undo's what
1410 talloc_reference() has done. The context and pointer arguments
1411 must match those given to a talloc_reference()
1413 static inline int talloc_unreference(const void *context
, const void *ptr
)
1415 struct talloc_chunk
*tc
= talloc_chunk_from_ptr(ptr
);
1416 struct talloc_reference_handle
*h
;
1418 if (unlikely(context
== NULL
)) {
1419 context
= null_context
;
1422 for (h
=tc
->refs
;h
;h
=h
->next
) {
1423 struct talloc_chunk
*p
= talloc_parent_chunk(h
);
1425 if (context
== NULL
) break;
1426 } else if (TC_PTR_FROM_CHUNK(p
) == context
) {
1434 return _talloc_free_internal(h
, __location__
);
1438 remove a specific parent context from a pointer. This is a more
1439 controlled variant of talloc_free()
1442 /* coverity[ -tainted_data_sink : arg-1 ] */
1443 _PUBLIC_
int talloc_unlink(const void *context
, void *ptr
)
1445 struct talloc_chunk
*tc_p
, *new_p
, *tc_c
;
1452 if (context
== NULL
) {
1453 context
= null_context
;
1456 if (talloc_unreference(context
, ptr
) == 0) {
1460 if (context
!= NULL
) {
1461 tc_c
= talloc_chunk_from_ptr(context
);
1465 if (tc_c
!= talloc_parent_chunk(ptr
)) {
1469 tc_p
= talloc_chunk_from_ptr(ptr
);
1471 if (tc_p
->refs
== NULL
) {
1472 return _talloc_free_internal(ptr
, __location__
);
1475 new_p
= talloc_parent_chunk(tc_p
->refs
);
1477 new_parent
= TC_PTR_FROM_CHUNK(new_p
);
1482 if (talloc_unreference(new_parent
, ptr
) != 0) {
1486 _talloc_steal_internal(new_parent
, ptr
);
1492 add a name to an existing pointer - va_list version
1494 static inline const char *tc_set_name_v(struct talloc_chunk
*tc
,
1496 va_list ap
) PRINTF_ATTRIBUTE(2,0);
1498 static inline const char *tc_set_name_v(struct talloc_chunk
*tc
,
1502 struct talloc_chunk
*name_tc
= _vasprintf_tc(TC_PTR_FROM_CHUNK(tc
),
1505 if (likely(name_tc
)) {
1506 tc
->name
= TC_PTR_FROM_CHUNK(name_tc
);
1507 _tc_set_name_const(name_tc
, ".name");
1515 add a name to an existing pointer
1517 _PUBLIC_
const char *talloc_set_name(const void *ptr
, const char *fmt
, ...)
1519 struct talloc_chunk
*tc
= talloc_chunk_from_ptr(ptr
);
1523 name
= tc_set_name_v(tc
, fmt
, ap
);
1530 create a named talloc pointer. Any talloc pointer can be named, and
1531 talloc_named() operates just like talloc() except that it allows you
1532 to name the pointer.
1534 _PUBLIC_
void *talloc_named(const void *context
, size_t size
, const char *fmt
, ...)
1539 struct talloc_chunk
*tc
;
1541 ptr
= __talloc(context
, size
, &tc
);
1542 if (unlikely(ptr
== NULL
)) return NULL
;
1545 name
= tc_set_name_v(tc
, fmt
, ap
);
1548 if (unlikely(name
== NULL
)) {
1549 _talloc_free_internal(ptr
, __location__
);
1557 return the name of a talloc ptr, or "UNNAMED"
1559 static inline const char *__talloc_get_name(const void *ptr
)
1561 struct talloc_chunk
*tc
= talloc_chunk_from_ptr(ptr
);
1562 if (unlikely(tc
->name
== TALLOC_MAGIC_REFERENCE
)) {
1563 return ".reference";
1565 if (likely(tc
->name
)) {
1571 _PUBLIC_
const char *talloc_get_name(const void *ptr
)
1573 return __talloc_get_name(ptr
);
1577 check if a pointer has the given name. If it does, return the pointer,
1578 otherwise return NULL
1580 _PUBLIC_
void *talloc_check_name(const void *ptr
, const char *name
)
1583 if (unlikely(ptr
== NULL
)) return NULL
;
1584 pname
= __talloc_get_name(ptr
);
1585 if (likely(pname
== name
|| strcmp(pname
, name
) == 0)) {
1586 return discard_const_p(void, ptr
);
1591 static void talloc_abort_type_mismatch(const char *location
,
1593 const char *expected
)
1597 reason
= talloc_asprintf(NULL
,
1598 "%s: Type mismatch: name[%s] expected[%s]",
1603 reason
= "Type mismatch";
1606 talloc_abort(reason
);
1609 _PUBLIC_
void *_talloc_get_type_abort(const void *ptr
, const char *name
, const char *location
)
1613 if (unlikely(ptr
== NULL
)) {
1614 talloc_abort_type_mismatch(location
, NULL
, name
);
1618 pname
= __talloc_get_name(ptr
);
1619 if (likely(pname
== name
|| strcmp(pname
, name
) == 0)) {
1620 return discard_const_p(void, ptr
);
1623 talloc_abort_type_mismatch(location
, pname
, name
);
1628 this is for compatibility with older versions of talloc
1630 _PUBLIC_
void *talloc_init(const char *fmt
, ...)
1635 struct talloc_chunk
*tc
;
1637 ptr
= __talloc(NULL
, 0, &tc
);
1638 if (unlikely(ptr
== NULL
)) return NULL
;
1641 name
= tc_set_name_v(tc
, fmt
, ap
);
1644 if (unlikely(name
== NULL
)) {
1645 _talloc_free_internal(ptr
, __location__
);
1652 static inline void _tc_free_children_internal(struct talloc_chunk
*tc
,
1654 const char *location
)
1657 /* we need to work out who will own an abandoned child
1658 if it cannot be freed. In priority order, the first
1659 choice is owner of any remaining reference to this
1660 pointer, the second choice is our parent, and the
1661 final choice is the null context. */
1662 void *child
= TC_PTR_FROM_CHUNK(tc
->child
);
1663 const void *new_parent
= null_context
;
1664 if (unlikely(tc
->child
->refs
)) {
1665 struct talloc_chunk
*p
= talloc_parent_chunk(tc
->child
->refs
);
1666 if (p
) new_parent
= TC_PTR_FROM_CHUNK(p
);
1668 if (unlikely(_tc_free_internal(tc
->child
, location
) == -1)) {
1669 if (talloc_parent_chunk(child
) != tc
) {
1671 * Destructor already reparented this child.
1672 * No further reparenting needed.
1676 if (new_parent
== null_context
) {
1677 struct talloc_chunk
*p
= talloc_parent_chunk(ptr
);
1678 if (p
) new_parent
= TC_PTR_FROM_CHUNK(p
);
1680 _talloc_steal_internal(new_parent
, child
);
1686 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1687 should probably not be used in new code. It's in here to keep the talloc
1688 code consistent across Samba 3 and 4.
1690 _PUBLIC_
void talloc_free_children(void *ptr
)
1692 struct talloc_chunk
*tc_name
= NULL
;
1693 struct talloc_chunk
*tc
;
1695 if (unlikely(ptr
== NULL
)) {
1699 tc
= talloc_chunk_from_ptr(ptr
);
1701 /* we do not want to free the context name if it is a child .. */
1702 if (likely(tc
->child
)) {
1703 for (tc_name
= tc
->child
; tc_name
; tc_name
= tc_name
->next
) {
1704 if (tc
->name
== TC_PTR_FROM_CHUNK(tc_name
)) break;
1707 _TLIST_REMOVE(tc
->child
, tc_name
);
1709 tc
->child
->parent
= tc
;
1714 _tc_free_children_internal(tc
, ptr
, __location__
);
1716 /* .. so we put it back after all other children have been freed */
1719 tc
->child
->parent
= NULL
;
1721 tc_name
->parent
= tc
;
1722 _TLIST_ADD(tc
->child
, tc_name
);
1727 Allocate a bit of memory as a child of an existing pointer
1729 _PUBLIC_
void *_talloc(const void *context
, size_t size
)
1731 struct talloc_chunk
*tc
;
1732 return __talloc(context
, size
, &tc
);
1736 externally callable talloc_set_name_const()
1738 _PUBLIC_
void talloc_set_name_const(const void *ptr
, const char *name
)
1740 _tc_set_name_const(talloc_chunk_from_ptr(ptr
), name
);
1744 create a named talloc pointer. Any talloc pointer can be named, and
1745 talloc_named() operates just like talloc() except that it allows you
1746 to name the pointer.
1748 _PUBLIC_
void *talloc_named_const(const void *context
, size_t size
, const char *name
)
1750 return _talloc_named_const(context
, size
, name
);
1754 free a talloc pointer. This also frees all child pointers of this
1757 return 0 if the memory is actually freed, otherwise -1. The memory
1758 will not be freed if the ref_count is > 1 or the destructor (if
1759 any) returns non-zero
1761 _PUBLIC_
int _talloc_free(void *ptr
, const char *location
)
1763 struct talloc_chunk
*tc
;
1765 if (unlikely(ptr
== NULL
)) {
1769 tc
= talloc_chunk_from_ptr(ptr
);
1771 if (unlikely(tc
->refs
!= NULL
)) {
1772 struct talloc_reference_handle
*h
;
1774 if (talloc_parent(ptr
) == null_context
&& tc
->refs
->next
== NULL
) {
1775 /* in this case we do know which parent should
1776 get this pointer, as there is really only
1778 return talloc_unlink(null_context
, ptr
);
1781 talloc_log("ERROR: talloc_free with references at %s\n",
1784 for (h
=tc
->refs
; h
; h
=h
->next
) {
1785 talloc_log("\treference at %s\n",
1791 return _talloc_free_internal(ptr
, location
);
1797 A talloc version of realloc. The context argument is only used if
1800 _PUBLIC_
void *_talloc_realloc(const void *context
, void *ptr
, size_t size
, const char *name
)
1802 struct talloc_chunk
*tc
;
1804 bool malloced
= false;
1805 struct talloc_pool_hdr
*pool_hdr
= NULL
;
1806 size_t old_size
= 0;
1807 size_t new_size
= 0;
1809 /* size zero is equivalent to free() */
1810 if (unlikely(size
== 0)) {
1811 talloc_unlink(context
, ptr
);
1815 if (unlikely(size
>= MAX_TALLOC_SIZE
)) {
1819 /* realloc(NULL) is equivalent to malloc() */
1821 return _talloc_named_const(context
, size
, name
);
1824 tc
= talloc_chunk_from_ptr(ptr
);
1826 /* don't allow realloc on referenced pointers */
1827 if (unlikely(tc
->refs
)) {
1831 /* don't let anybody try to realloc a talloc_pool */
1832 if (unlikely(tc
->flags
& TALLOC_FLAG_POOL
)) {
1836 if (tc
->limit
&& (size
> tc
->size
)) {
1837 if (!talloc_memlimit_check(tc
->limit
, (size
- tc
->size
))) {
1843 /* handle realloc inside a talloc_pool */
1844 if (unlikely(tc
->flags
& TALLOC_FLAG_POOLMEM
)) {
1845 pool_hdr
= tc
->pool
;
1848 #if (ALWAYS_REALLOC == 0)
1849 /* don't shrink if we have less than 1k to gain */
1850 if (size
< tc
->size
&& tc
->limit
== NULL
) {
1852 void *next_tc
= tc_next_chunk(tc
);
1853 TC_INVALIDATE_SHRINK_CHUNK(tc
, size
);
1855 if (next_tc
== pool_hdr
->end
) {
1856 /* note: tc->size has changed, so this works */
1857 pool_hdr
->end
= tc_next_chunk(tc
);
1860 } else if ((tc
->size
- size
) < 1024) {
1862 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1863 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1864 * after each realloc call, which slows down
1865 * testing a lot :-(.
1867 * That is why we only mark memory as undefined here.
1869 TC_UNDEFINE_SHRINK_CHUNK(tc
, size
);
1871 /* do not shrink if we have less than 1k to gain */
1875 } else if (tc
->size
== size
) {
1877 * do not change the pointer if it is exactly
1885 * by resetting magic we catch users of the old memory
1887 * We mark this memory as free, and also over-stamp the talloc
1888 * magic with the old-style magic.
1890 * Why? This tries to avoid a memory read use-after-free from
1891 * disclosing our talloc magic, which would then allow an
1892 * attacker to prepare a valid header and so run a destructor.
1894 * What else? We have to re-stamp back a valid normal magic
1895 * on this memory once realloc() is done, as it will have done
1896 * a memcpy() into the new valid memory. We can't do this in
1897 * reverse as that would be a real use-after-free.
1899 _talloc_chunk_set_free(tc
, NULL
);
1903 new_ptr
= tc_alloc_pool(tc
, size
+ TC_HDR_SIZE
, 0);
1904 pool_hdr
->object_count
--;
1906 if (new_ptr
== NULL
) {
1907 new_ptr
= malloc(TC_HDR_SIZE
+size
);
1913 memcpy(new_ptr
, tc
, MIN(tc
->size
,size
) + TC_HDR_SIZE
);
1914 TC_INVALIDATE_FULL_CHUNK(tc
);
1917 /* We're doing malloc then free here, so record the difference. */
1918 old_size
= tc
->size
;
1920 new_ptr
= malloc(size
+ TC_HDR_SIZE
);
1922 memcpy(new_ptr
, tc
, MIN(tc
->size
, size
) + TC_HDR_SIZE
);
1928 struct talloc_chunk
*pool_tc
;
1929 void *next_tc
= tc_next_chunk(tc
);
1930 size_t old_chunk_size
= TC_ALIGN16(TC_HDR_SIZE
+ tc
->size
);
1931 size_t new_chunk_size
= TC_ALIGN16(TC_HDR_SIZE
+ size
);
1932 size_t space_needed
;
1934 unsigned int chunk_count
= pool_hdr
->object_count
;
1936 pool_tc
= talloc_chunk_from_pool(pool_hdr
);
1937 if (!(pool_tc
->flags
& TALLOC_FLAG_FREE
)) {
1941 if (chunk_count
== 1) {
1943 * optimize for the case where 'tc' is the only
1944 * chunk in the pool.
1946 char *start
= tc_pool_first_chunk(pool_hdr
);
1947 space_needed
= new_chunk_size
;
1948 space_left
= (char *)tc_pool_end(pool_hdr
) - start
;
1950 if (space_left
>= space_needed
) {
1951 size_t old_used
= TC_HDR_SIZE
+ tc
->size
;
1952 size_t new_used
= TC_HDR_SIZE
+ size
;
1955 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1959 * start -> tc may have
1960 * been freed and thus been marked as
1961 * VALGRIND_MEM_NOACCESS. Set it to
1962 * VALGRIND_MEM_UNDEFINED so we can
1963 * copy into it without valgrind errors.
1964 * We can't just mark
1965 * new_ptr -> new_ptr + old_used
1966 * as this may overlap on top of tc,
1967 * (which is why we use memmove, not
1968 * memcpy below) hence the MIN.
1970 size_t undef_len
= MIN((((char *)tc
) - ((char *)new_ptr
)),old_used
);
1971 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr
, undef_len
);
1975 memmove(new_ptr
, tc
, old_used
);
1977 tc
= (struct talloc_chunk
*)new_ptr
;
1978 TC_UNDEFINE_GROW_CHUNK(tc
, size
);
1981 * first we do not align the pool pointer
1982 * because we want to invalidate the padding
1985 pool_hdr
->end
= new_used
+ (char *)new_ptr
;
1986 tc_invalidate_pool(pool_hdr
);
1988 /* now the aligned pointer */
1989 pool_hdr
->end
= new_chunk_size
+ (char *)new_ptr
;
1996 if (new_chunk_size
== old_chunk_size
) {
1997 TC_UNDEFINE_GROW_CHUNK(tc
, size
);
1998 _talloc_chunk_set_not_free(tc
);
2003 if (next_tc
== pool_hdr
->end
) {
2005 * optimize for the case where 'tc' is the last
2006 * chunk in the pool.
2008 space_needed
= new_chunk_size
- old_chunk_size
;
2009 space_left
= tc_pool_space_left(pool_hdr
);
2011 if (space_left
>= space_needed
) {
2012 TC_UNDEFINE_GROW_CHUNK(tc
, size
);
2013 _talloc_chunk_set_not_free(tc
);
2015 pool_hdr
->end
= tc_next_chunk(tc
);
2020 new_ptr
= tc_alloc_pool(tc
, size
+ TC_HDR_SIZE
, 0);
2022 if (new_ptr
== NULL
) {
2023 new_ptr
= malloc(TC_HDR_SIZE
+size
);
2029 memcpy(new_ptr
, tc
, MIN(tc
->size
,size
) + TC_HDR_SIZE
);
2031 _tc_free_poolmem(tc
, __location__
"_talloc_realloc");
2035 /* We're doing realloc here, so record the difference. */
2036 old_size
= tc
->size
;
2038 new_ptr
= realloc(tc
, size
+ TC_HDR_SIZE
);
2042 if (unlikely(!new_ptr
)) {
2044 * Ok, this is a strange spot. We have to put back
2045 * the old talloc_magic and any flags, except the
2046 * TALLOC_FLAG_FREE as this was not free'ed by the
2047 * realloc() call after all
2049 _talloc_chunk_set_not_free(tc
);
2054 * tc is now the new value from realloc(), the old memory we
2055 * can't access any more and was preemptively marked as
2056 * TALLOC_FLAG_FREE before the call. Now we mark it as not
2059 tc
= (struct talloc_chunk
*)new_ptr
;
2060 _talloc_chunk_set_not_free(tc
);
2062 tc
->flags
&= ~TALLOC_FLAG_POOLMEM
;
2065 tc
->parent
->child
= tc
;
2068 tc
->child
->parent
= tc
;
2072 tc
->prev
->next
= tc
;
2075 tc
->next
->prev
= tc
;
2078 if (new_size
> old_size
) {
2079 talloc_memlimit_grow(tc
->limit
, new_size
- old_size
);
2080 } else if (new_size
< old_size
) {
2081 talloc_memlimit_shrink(tc
->limit
, old_size
- new_size
);
2085 _tc_set_name_const(tc
, name
);
2087 return TC_PTR_FROM_CHUNK(tc
);
2091 a wrapper around talloc_steal() for situations where you are moving a pointer
2092 between two structures, and want the old pointer to be set to NULL
2094 _PUBLIC_
void *_talloc_move(const void *new_ctx
, const void *_pptr
)
2096 const void **pptr
= discard_const_p(const void *,_pptr
);
2097 void *ret
= talloc_steal(new_ctx
, discard_const_p(void, *pptr
));
2102 enum talloc_mem_count_type
{
2108 static inline size_t _talloc_total_mem_internal(const void *ptr
,
2109 enum talloc_mem_count_type type
,
2110 struct talloc_memlimit
*old_limit
,
2111 struct talloc_memlimit
*new_limit
)
2114 struct talloc_chunk
*c
, *tc
;
2123 tc
= talloc_chunk_from_ptr(ptr
);
2125 if (old_limit
|| new_limit
) {
2126 if (tc
->limit
&& tc
->limit
->upper
== old_limit
) {
2127 tc
->limit
->upper
= new_limit
;
2131 /* optimize in the memlimits case */
2132 if (type
== TOTAL_MEM_LIMIT
&&
2133 tc
->limit
!= NULL
&&
2134 tc
->limit
!= old_limit
&&
2135 tc
->limit
->parent
== tc
) {
2136 return tc
->limit
->cur_size
;
2139 if (tc
->flags
& TALLOC_FLAG_LOOP
) {
2143 tc
->flags
|= TALLOC_FLAG_LOOP
;
2145 if (old_limit
|| new_limit
) {
2146 if (old_limit
== tc
->limit
) {
2147 tc
->limit
= new_limit
;
2152 case TOTAL_MEM_SIZE
:
2153 if (likely(tc
->name
!= TALLOC_MAGIC_REFERENCE
)) {
2157 case TOTAL_MEM_BLOCKS
:
2160 case TOTAL_MEM_LIMIT
:
2161 if (likely(tc
->name
!= TALLOC_MAGIC_REFERENCE
)) {
2163 * Don't count memory allocated from a pool
2164 * when calculating limits. Only count the
2167 if (!(tc
->flags
& TALLOC_FLAG_POOLMEM
)) {
2168 if (tc
->flags
& TALLOC_FLAG_POOL
) {
2170 * If this is a pool, the allocated
2171 * size is in the pool header, and
2172 * remember to add in the prefix
2175 struct talloc_pool_hdr
*pool_hdr
2176 = talloc_pool_from_chunk(tc
);
2177 total
= pool_hdr
->poolsize
+
2181 total
= tc
->size
+ TC_HDR_SIZE
;
2187 for (c
= tc
->child
; c
; c
= c
->next
) {
2188 total
+= _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c
), type
,
2189 old_limit
, new_limit
);
2192 tc
->flags
&= ~TALLOC_FLAG_LOOP
;
2198 return the total size of a talloc pool (subtree)
2200 _PUBLIC_
size_t talloc_total_size(const void *ptr
)
2202 return _talloc_total_mem_internal(ptr
, TOTAL_MEM_SIZE
, NULL
, NULL
);
2206 return the total number of blocks in a talloc pool (subtree)
2208 _PUBLIC_
size_t talloc_total_blocks(const void *ptr
)
2210 return _talloc_total_mem_internal(ptr
, TOTAL_MEM_BLOCKS
, NULL
, NULL
);
2214 return the number of external references to a pointer
2216 _PUBLIC_
size_t talloc_reference_count(const void *ptr
)
2218 struct talloc_chunk
*tc
= talloc_chunk_from_ptr(ptr
);
2219 struct talloc_reference_handle
*h
;
2222 for (h
=tc
->refs
;h
;h
=h
->next
) {
2229 report on memory usage by all children of a pointer, giving a full tree view
2231 _PUBLIC_
void talloc_report_depth_cb(const void *ptr
, int depth
, int max_depth
,
2232 void (*callback
)(const void *ptr
,
2233 int depth
, int max_depth
,
2235 void *private_data
),
2238 struct talloc_chunk
*c
, *tc
;
2243 if (ptr
== NULL
) return;
2245 tc
= talloc_chunk_from_ptr(ptr
);
2247 if (tc
->flags
& TALLOC_FLAG_LOOP
) {
2251 callback(ptr
, depth
, max_depth
, 0, private_data
);
2253 if (max_depth
>= 0 && depth
>= max_depth
) {
2257 tc
->flags
|= TALLOC_FLAG_LOOP
;
2258 for (c
=tc
->child
;c
;c
=c
->next
) {
2259 if (c
->name
== TALLOC_MAGIC_REFERENCE
) {
2260 struct talloc_reference_handle
*h
= (struct talloc_reference_handle
*)TC_PTR_FROM_CHUNK(c
);
2261 callback(h
->ptr
, depth
+ 1, max_depth
, 1, private_data
);
2263 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c
), depth
+ 1, max_depth
, callback
, private_data
);
2266 tc
->flags
&= ~TALLOC_FLAG_LOOP
;
2269 static void talloc_report_depth_FILE_helper(const void *ptr
, int depth
, int max_depth
, int is_ref
, void *_f
)
2271 const char *name
= __talloc_get_name(ptr
);
2272 struct talloc_chunk
*tc
;
2273 FILE *f
= (FILE *)_f
;
2276 fprintf(f
, "%*sreference to: %s\n", depth
*4, "", name
);
2280 tc
= talloc_chunk_from_ptr(ptr
);
2281 if (tc
->limit
&& tc
->limit
->parent
== tc
) {
2282 fprintf(f
, "%*s%-30s is a memlimit context"
2283 " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2286 (unsigned long)tc
->limit
->max_size
,
2287 (unsigned long)tc
->limit
->cur_size
);
2291 fprintf(f
,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2292 (max_depth
< 0 ? "full " :""), name
,
2293 (unsigned long)talloc_total_size(ptr
),
2294 (unsigned long)talloc_total_blocks(ptr
));
2298 fprintf(f
, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2301 (unsigned long)talloc_total_size(ptr
),
2302 (unsigned long)talloc_total_blocks(ptr
),
2303 (int)talloc_reference_count(ptr
), ptr
);
2306 fprintf(f
, "content: ");
2307 if (talloc_total_size(ptr
)) {
2308 int tot
= talloc_total_size(ptr
);
2311 for (i
= 0; i
< tot
; i
++) {
2312 if ((((char *)ptr
)[i
] > 31) && (((char *)ptr
)[i
] < 126)) {
2313 fprintf(f
, "%c", ((char *)ptr
)[i
]);
2315 fprintf(f
, "~%02x", ((char *)ptr
)[i
]);
2324 report on memory usage by all children of a pointer, giving a full tree view
2326 _PUBLIC_
void talloc_report_depth_file(const void *ptr
, int depth
, int max_depth
, FILE *f
)
2329 talloc_report_depth_cb(ptr
, depth
, max_depth
, talloc_report_depth_FILE_helper
, f
);
2335 report on memory usage by all children of a pointer, giving a full tree view
2337 _PUBLIC_
void talloc_report_full(const void *ptr
, FILE *f
)
2339 talloc_report_depth_file(ptr
, 0, -1, f
);
2343 report on memory usage by all children of a pointer
2345 _PUBLIC_
void talloc_report(const void *ptr
, FILE *f
)
2347 talloc_report_depth_file(ptr
, 0, 1, f
);
2351 enable tracking of the NULL context
2353 _PUBLIC_
void talloc_enable_null_tracking(void)
2355 if (null_context
== NULL
) {
2356 null_context
= _talloc_named_const(NULL
, 0, "null_context");
2357 if (autofree_context
!= NULL
) {
2358 talloc_reparent(NULL
, null_context
, autofree_context
);
2364 enable tracking of the NULL context, not moving the autofree context
2365 into the NULL context. This is needed for the talloc testsuite
2367 _PUBLIC_
void talloc_enable_null_tracking_no_autofree(void)
2369 if (null_context
== NULL
) {
2370 null_context
= _talloc_named_const(NULL
, 0, "null_context");
2375 disable tracking of the NULL context
2377 _PUBLIC_
void talloc_disable_null_tracking(void)
2379 if (null_context
!= NULL
) {
2380 /* we have to move any children onto the real NULL
2382 struct talloc_chunk
*tc
, *tc2
;
2383 tc
= talloc_chunk_from_ptr(null_context
);
2384 for (tc2
= tc
->child
; tc2
; tc2
=tc2
->next
) {
2385 if (tc2
->parent
== tc
) tc2
->parent
= NULL
;
2386 if (tc2
->prev
== tc
) tc2
->prev
= NULL
;
2388 for (tc2
= tc
->next
; tc2
; tc2
=tc2
->next
) {
2389 if (tc2
->parent
== tc
) tc2
->parent
= NULL
;
2390 if (tc2
->prev
== tc
) tc2
->prev
= NULL
;
2395 talloc_free(null_context
);
2396 null_context
= NULL
;
2400 enable leak reporting on exit
2402 _PUBLIC_
void talloc_enable_leak_report(void)
2404 talloc_enable_null_tracking();
2405 talloc_report_null
= true;
2406 talloc_setup_atexit();
2410 enable full leak reporting on exit
2412 _PUBLIC_
void talloc_enable_leak_report_full(void)
2414 talloc_enable_null_tracking();
2415 talloc_report_null_full
= true;
2416 talloc_setup_atexit();
2420 talloc and zero memory.
2422 _PUBLIC_
void *_talloc_zero(const void *ctx
, size_t size
, const char *name
)
2424 void *p
= _talloc_named_const(ctx
, size
, name
);
2427 memset(p
, '\0', size
);
2434 memdup with a talloc.
2436 _PUBLIC_
void *_talloc_memdup(const void *t
, const void *p
, size_t size
, const char *name
)
2440 if (likely(size
> 0) && unlikely(p
== NULL
)) {
2444 newp
= _talloc_named_const(t
, size
, name
);
2445 if (likely(newp
!= NULL
) && likely(size
> 0)) {
2446 memcpy(newp
, p
, size
);
2452 static inline char *__talloc_strlendup(const void *t
, const char *p
, size_t len
)
2455 struct talloc_chunk
*tc
;
2457 ret
= (char *)__talloc(t
, len
+ 1, &tc
);
2458 if (unlikely(!ret
)) return NULL
;
2460 memcpy(ret
, p
, len
);
2463 _tc_set_name_const(tc
, ret
);
2468 strdup with a talloc
2470 _PUBLIC_
char *talloc_strdup(const void *t
, const char *p
)
2472 if (unlikely(!p
)) return NULL
;
2473 return __talloc_strlendup(t
, p
, strlen(p
));
2477 strndup with a talloc
2479 _PUBLIC_
char *talloc_strndup(const void *t
, const char *p
, size_t n
)
2481 if (unlikely(!p
)) return NULL
;
2482 return __talloc_strlendup(t
, p
, strnlen(p
, n
));
2485 static inline char *__talloc_strlendup_append(char *s
, size_t slen
,
2486 const char *a
, size_t alen
)
2490 ret
= talloc_realloc(NULL
, s
, char, slen
+ alen
+ 1);
2491 if (unlikely(!ret
)) return NULL
;
2493 /* append the string and the trailing \0 */
2494 memcpy(&ret
[slen
], a
, alen
);
2497 _tc_set_name_const(talloc_chunk_from_ptr(ret
), ret
);
2502 * Appends at the end of the string.
2504 _PUBLIC_
char *talloc_strdup_append(char *s
, const char *a
)
2507 return talloc_strdup(NULL
, a
);
2514 return __talloc_strlendup_append(s
, strlen(s
), a
, strlen(a
));
2518 * Appends at the end of the talloc'ed buffer,
2519 * not the end of the string.
2521 _PUBLIC_
char *talloc_strdup_append_buffer(char *s
, const char *a
)
2526 return talloc_strdup(NULL
, a
);
2533 slen
= talloc_get_size(s
);
2534 if (likely(slen
> 0)) {
2538 return __talloc_strlendup_append(s
, slen
, a
, strlen(a
));
2542 * Appends at the end of the string.
2544 _PUBLIC_
char *talloc_strndup_append(char *s
, const char *a
, size_t n
)
2547 return talloc_strndup(NULL
, a
, n
);
2554 return __talloc_strlendup_append(s
, strlen(s
), a
, strnlen(a
, n
));
2558 * Appends at the end of the talloc'ed buffer,
2559 * not the end of the string.
2561 _PUBLIC_
char *talloc_strndup_append_buffer(char *s
, const char *a
, size_t n
)
2566 return talloc_strndup(NULL
, a
, n
);
2573 slen
= talloc_get_size(s
);
2574 if (likely(slen
> 0)) {
2578 return __talloc_strlendup_append(s
, slen
, a
, strnlen(a
, n
));
2581 #ifndef HAVE_VA_COPY
2582 #ifdef HAVE___VA_COPY
2583 #define va_copy(dest, src) __va_copy(dest, src)
2585 #define va_copy(dest, src) (dest) = (src)
2589 static struct talloc_chunk
*_vasprintf_tc(const void *t
,
2591 va_list ap
) PRINTF_ATTRIBUTE(2,0);
2593 static struct talloc_chunk
*_vasprintf_tc(const void *t
,
2601 struct talloc_chunk
*tc
;
2604 /* this call looks strange, but it makes it work on older solaris boxes */
2606 vlen
= vsnprintf(buf
, sizeof(buf
), fmt
, ap2
);
2608 if (unlikely(vlen
< 0)) {
2612 if (unlikely(len
+ 1 < len
)) {
2616 ret
= (char *)__talloc(t
, len
+1, &tc
);
2617 if (unlikely(!ret
)) return NULL
;
2619 if (len
< sizeof(buf
)) {
2620 memcpy(ret
, buf
, len
+1);
2623 vsnprintf(ret
, len
+1, fmt
, ap2
);
2627 _tc_set_name_const(tc
, ret
);
2631 _PUBLIC_
char *talloc_vasprintf(const void *t
, const char *fmt
, va_list ap
)
2633 struct talloc_chunk
*tc
= _vasprintf_tc(t
, fmt
, ap
);
2637 return TC_PTR_FROM_CHUNK(tc
);
2642 Perform string formatting, and return a pointer to newly allocated
2643 memory holding the result, inside a memory pool.
2645 _PUBLIC_
char *talloc_asprintf(const void *t
, const char *fmt
, ...)
2651 ret
= talloc_vasprintf(t
, fmt
, ap
);
2656 static inline char *__talloc_vaslenprintf_append(char *s
, size_t slen
,
2657 const char *fmt
, va_list ap
)
2658 PRINTF_ATTRIBUTE(3,0);
2660 static inline char *__talloc_vaslenprintf_append(char *s
, size_t slen
,
2661 const char *fmt
, va_list ap
)
2668 alen
= vsnprintf(&c
, 1, fmt
, ap2
);
2672 /* Either the vsnprintf failed or the format resulted in
2673 * no characters being formatted. In the former case, we
2674 * ought to return NULL, in the latter we ought to return
2675 * the original string. Most current callers of this
2676 * function expect it to never return NULL.
2681 s
= talloc_realloc(NULL
, s
, char, slen
+ alen
+ 1);
2682 if (!s
) return NULL
;
2685 vsnprintf(s
+ slen
, alen
+ 1, fmt
, ap2
);
2688 _tc_set_name_const(talloc_chunk_from_ptr(s
), s
);
2693 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2694 * and return @p s, which may have moved. Good for gradually
2695 * accumulating output into a string buffer. Appends at the end
2698 _PUBLIC_
char *talloc_vasprintf_append(char *s
, const char *fmt
, va_list ap
)
2701 return talloc_vasprintf(NULL
, fmt
, ap
);
2704 return __talloc_vaslenprintf_append(s
, strlen(s
), fmt
, ap
);
2708 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2709 * and return @p s, which may have moved. Always appends at the
2710 * end of the talloc'ed buffer, not the end of the string.
2712 _PUBLIC_
char *talloc_vasprintf_append_buffer(char *s
, const char *fmt
, va_list ap
)
2717 return talloc_vasprintf(NULL
, fmt
, ap
);
2720 slen
= talloc_get_size(s
);
2721 if (likely(slen
> 0)) {
2725 return __talloc_vaslenprintf_append(s
, slen
, fmt
, ap
);
2729 Realloc @p s to append the formatted result of @p fmt and return @p
2730 s, which may have moved. Good for gradually accumulating output
2731 into a string buffer.
2733 _PUBLIC_
char *talloc_asprintf_append(char *s
, const char *fmt
, ...)
2738 s
= talloc_vasprintf_append(s
, fmt
, ap
);
2744 Realloc @p s to append the formatted result of @p fmt and return @p
2745 s, which may have moved. Good for gradually accumulating output
2748 _PUBLIC_
char *talloc_asprintf_append_buffer(char *s
, const char *fmt
, ...)
2753 s
= talloc_vasprintf_append_buffer(s
, fmt
, ap
);
2759 alloc an array, checking for integer overflow in the array size
2761 _PUBLIC_
void *_talloc_array(const void *ctx
, size_t el_size
, unsigned count
, const char *name
)
2763 if (count
>= MAX_TALLOC_SIZE
/el_size
) {
2766 return _talloc_named_const(ctx
, el_size
* count
, name
);
2770 alloc an zero array, checking for integer overflow in the array size
2772 _PUBLIC_
void *_talloc_zero_array(const void *ctx
, size_t el_size
, unsigned count
, const char *name
)
2774 if (count
>= MAX_TALLOC_SIZE
/el_size
) {
2777 return _talloc_zero(ctx
, el_size
* count
, name
);
2781 realloc an array, checking for integer overflow in the array size
2783 _PUBLIC_
void *_talloc_realloc_array(const void *ctx
, void *ptr
, size_t el_size
, unsigned count
, const char *name
)
2785 if (count
>= MAX_TALLOC_SIZE
/el_size
) {
2788 return _talloc_realloc(ctx
, ptr
, el_size
* count
, name
);
2792 a function version of talloc_realloc(), so it can be passed as a function pointer
2793 to libraries that want a realloc function (a realloc function encapsulates
2794 all the basic capabilities of an allocation library, which is why this is useful)
2796 _PUBLIC_
void *talloc_realloc_fn(const void *context
, void *ptr
, size_t size
)
2798 return _talloc_realloc(context
, ptr
, size
, NULL
);
2802 static int talloc_autofree_destructor(void *ptr
)
2804 autofree_context
= NULL
;
2809 return a context which will be auto-freed on exit
2810 this is useful for reducing the noise in leak reports
2812 _PUBLIC_
void *talloc_autofree_context(void)
2814 if (autofree_context
== NULL
) {
2815 autofree_context
= _talloc_named_const(NULL
, 0, "autofree_context");
2816 talloc_set_destructor(autofree_context
, talloc_autofree_destructor
);
2817 talloc_setup_atexit();
2819 return autofree_context
;
2822 _PUBLIC_
size_t talloc_get_size(const void *context
)
2824 struct talloc_chunk
*tc
;
2826 if (context
== NULL
) {
2830 tc
= talloc_chunk_from_ptr(context
);
2836 find a parent of this context that has the given name, if any
2838 _PUBLIC_
void *talloc_find_parent_byname(const void *context
, const char *name
)
2840 struct talloc_chunk
*tc
;
2842 if (context
== NULL
) {
2846 tc
= talloc_chunk_from_ptr(context
);
2848 if (tc
->name
&& strcmp(tc
->name
, name
) == 0) {
2849 return TC_PTR_FROM_CHUNK(tc
);
2851 while (tc
&& tc
->prev
) tc
= tc
->prev
;
2860 show the parentage of a context
2862 _PUBLIC_
void talloc_show_parents(const void *context
, FILE *file
)
2864 struct talloc_chunk
*tc
;
2866 if (context
== NULL
) {
2867 fprintf(file
, "talloc no parents for NULL\n");
2871 tc
= talloc_chunk_from_ptr(context
);
2872 fprintf(file
, "talloc parents of '%s'\n", __talloc_get_name(context
));
2874 fprintf(file
, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc
)));
2875 while (tc
&& tc
->prev
) tc
= tc
->prev
;
2884 return 1 if ptr is a parent of context
2886 static int _talloc_is_parent(const void *context
, const void *ptr
, int depth
)
2888 struct talloc_chunk
*tc
;
2890 if (context
== NULL
) {
2894 tc
= talloc_chunk_from_ptr(context
);
2899 if (TC_PTR_FROM_CHUNK(tc
) == ptr
) return 1;
2900 while (tc
&& tc
->prev
) tc
= tc
->prev
;
2910 return 1 if ptr is a parent of context
2912 _PUBLIC_
int talloc_is_parent(const void *context
, const void *ptr
)
2914 return _talloc_is_parent(context
, ptr
, TALLOC_MAX_DEPTH
);
2918 return the total size of memory used by this context and all children
2920 static inline size_t _talloc_total_limit_size(const void *ptr
,
2921 struct talloc_memlimit
*old_limit
,
2922 struct talloc_memlimit
*new_limit
)
2924 return _talloc_total_mem_internal(ptr
, TOTAL_MEM_LIMIT
,
2925 old_limit
, new_limit
);
2928 static inline bool talloc_memlimit_check(struct talloc_memlimit
*limit
, size_t size
)
2930 struct talloc_memlimit
*l
;
2932 for (l
= limit
; l
!= NULL
; l
= l
->upper
) {
2933 if (l
->max_size
!= 0 &&
2934 ((l
->max_size
<= l
->cur_size
) ||
2935 (l
->max_size
- l
->cur_size
< size
))) {
2944 Update memory limits when freeing a talloc_chunk.
2946 static void tc_memlimit_update_on_free(struct talloc_chunk
*tc
)
2948 size_t limit_shrink_size
;
2955 * Pool entries don't count. Only the pools
2956 * themselves are counted as part of the memory
2957 * limits. Note that this also takes care of
2958 * nested pools which have both flags
2959 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2961 if (tc
->flags
& TALLOC_FLAG_POOLMEM
) {
2966 * If we are part of a memory limited context hierarchy
2967 * we need to subtract the memory used from the counters
2970 limit_shrink_size
= tc
->size
+TC_HDR_SIZE
;
2973 * If we're deallocating a pool, take into
2974 * account the prefix size added for the pool.
2977 if (tc
->flags
& TALLOC_FLAG_POOL
) {
2978 limit_shrink_size
+= TP_HDR_SIZE
;
2981 talloc_memlimit_shrink(tc
->limit
, limit_shrink_size
);
2983 if (tc
->limit
->parent
== tc
) {
2991 Increase memory limit accounting after a malloc/realloc.
2993 static void talloc_memlimit_grow(struct talloc_memlimit
*limit
,
2996 struct talloc_memlimit
*l
;
2998 for (l
= limit
; l
!= NULL
; l
= l
->upper
) {
2999 size_t new_cur_size
= l
->cur_size
+ size
;
3000 if (new_cur_size
< l
->cur_size
) {
3001 talloc_abort("logic error in talloc_memlimit_grow\n");
3004 l
->cur_size
= new_cur_size
;
3009 Decrease memory limit accounting after a free/realloc.
3011 static void talloc_memlimit_shrink(struct talloc_memlimit
*limit
,
3014 struct talloc_memlimit
*l
;
3016 for (l
= limit
; l
!= NULL
; l
= l
->upper
) {
3017 if (l
->cur_size
< size
) {
3018 talloc_abort("logic error in talloc_memlimit_shrink\n");
3021 l
->cur_size
= l
->cur_size
- size
;
3025 _PUBLIC_
int talloc_set_memlimit(const void *ctx
, size_t max_size
)
3027 struct talloc_chunk
*tc
= talloc_chunk_from_ptr(ctx
);
3028 struct talloc_memlimit
*orig_limit
;
3029 struct talloc_memlimit
*limit
= NULL
;
3031 if (tc
->limit
&& tc
->limit
->parent
== tc
) {
3032 tc
->limit
->max_size
= max_size
;
3035 orig_limit
= tc
->limit
;
3037 limit
= malloc(sizeof(struct talloc_memlimit
));
3038 if (limit
== NULL
) {
3042 limit
->max_size
= max_size
;
3043 limit
->cur_size
= _talloc_total_limit_size(ctx
, tc
->limit
, limit
);
3046 limit
->upper
= orig_limit
;
3048 limit
->upper
= NULL
;