2 Samba Unix SMB/CIFS implementation.
4 Samba trivial allocation library - new interface
6 NOTE: Please read talloc_guide.txt for full documentation
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 inspired by http://swapped.cc/halloc/
36 #ifdef HAVE_SYS_AUXV_H
40 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
41 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
44 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
45 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
48 /* Special macros that are no-ops except when run under Valgrind on
49 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
50 #ifdef HAVE_VALGRIND_MEMCHECK_H
51 /* memcheck.h includes valgrind.h */
52 #include <valgrind/memcheck.h>
53 #elif defined(HAVE_VALGRIND_H)
57 /* use this to force every realloc to change the pointer, to stress test
58 code that might not cope */
59 #define ALWAYS_REALLOC 0
62 #define MAX_TALLOC_SIZE 0x10000000
64 #define TALLOC_FLAG_FREE 0x01
65 #define TALLOC_FLAG_LOOP 0x02
66 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
67 #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
70 * Bits above this are random, used to make it harder to fake talloc
71 * headers during an attack. Try not to change this without good reason.
73 #define TALLOC_FLAG_MASK 0x0F
75 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
77 #define TALLOC_MAGIC_BASE 0xe814ec70
78 #define TALLOC_MAGIC_NON_RANDOM ( \
79 ~TALLOC_FLAG_MASK & ( \
81 (TALLOC_BUILD_VERSION_MAJOR << 24) + \
82 (TALLOC_BUILD_VERSION_MINOR << 16) + \
83 (TALLOC_BUILD_VERSION_RELEASE << 8)))
84 static unsigned int talloc_magic
= TALLOC_MAGIC_NON_RANDOM
;
86 /* by default we abort when given a bad pointer (such as when talloc_free() is called
87 on a pointer that came from malloc() */
89 #define TALLOC_ABORT(reason) abort()
92 #ifndef discard_const_p
93 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
94 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
96 # define discard_const_p(type, ptr) ((type *)(ptr))
100 /* these macros gain us a few percent of speed on gcc */
102 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
103 as its first argument */
105 #define likely(x) __builtin_expect(!!(x), 1)
108 #define unlikely(x) __builtin_expect(!!(x), 0)
112 #define likely(x) (x)
115 #define unlikely(x) (x)
119 /* this null_context is only used if talloc_enable_leak_report() or
120 talloc_enable_leak_report_full() is called, otherwise it remains
123 static void *null_context
;
124 static bool talloc_report_null
;
125 static bool talloc_report_null_full
;
126 static void *autofree_context
;
128 static void talloc_setup_atexit(void);
130 /* used to enable fill of memory on free, which can be useful for
131 * catching use after free errors when valgrind is too slow
139 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
142 * do not wipe the header, to allow the
143 * double-free logic to still work
145 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
146 if (unlikely(talloc_fill.enabled)) { \
147 size_t _flen = (_tc)->size; \
148 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
149 memset(_fptr, talloc_fill.fill_value, _flen); \
153 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
154 /* Mark the whole chunk as not accessable */
155 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
156 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
157 char *_fptr = (char *)(_tc); \
158 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
161 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
164 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
165 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
166 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
169 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
170 if (unlikely(talloc_fill.enabled)) { \
171 size_t _flen = (_tc)->size - (_new_size); \
172 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
173 _fptr += (_new_size); \
174 memset(_fptr, talloc_fill.fill_value, _flen); \
178 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
179 /* Mark the unused bytes not accessable */
180 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
181 size_t _flen = (_tc)->size - (_new_size); \
182 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
183 _fptr += (_new_size); \
184 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
187 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
190 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
191 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
192 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
195 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
196 if (unlikely(talloc_fill.enabled)) { \
197 size_t _flen = (_tc)->size - (_new_size); \
198 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
199 _fptr += (_new_size); \
200 memset(_fptr, talloc_fill.fill_value, _flen); \
204 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
205 /* Mark the unused bytes as undefined */
206 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
207 size_t _flen = (_tc)->size - (_new_size); \
208 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
209 _fptr += (_new_size); \
210 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
213 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
216 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
217 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
218 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
221 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
222 /* Mark the new bytes as undefined */
223 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
224 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
225 size_t _new_used = TC_HDR_SIZE + (_new_size); \
226 size_t _flen = _new_used - _old_used; \
227 char *_fptr = _old_used + (char *)(_tc); \
228 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
231 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
234 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
235 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
238 struct talloc_reference_handle
{
239 struct talloc_reference_handle
*next
, *prev
;
241 const char *location
;
244 struct talloc_memlimit
{
245 struct talloc_chunk
*parent
;
246 struct talloc_memlimit
*upper
;
251 static inline bool talloc_memlimit_check(struct talloc_memlimit
*limit
, size_t size
);
252 static inline void talloc_memlimit_grow(struct talloc_memlimit
*limit
,
254 static inline void talloc_memlimit_shrink(struct talloc_memlimit
*limit
,
256 static inline void tc_memlimit_update_on_free(struct talloc_chunk
*tc
);
258 static inline void _tc_set_name_const(struct talloc_chunk
*tc
,
260 static struct talloc_chunk
*_vasprintf_tc(const void *t
,
264 typedef int (*talloc_destructor_t
)(void *);
266 struct talloc_pool_hdr
;
268 struct talloc_chunk
{
270 * flags includes the talloc magic, which is randomised to
271 * make overwrite attacks harder
276 * If you have a logical tree like:
282 * <child 1> <child 2> <child 3>
284 * The actual talloc tree is:
288 * <child 1> - <child 2> - <child 3>
290 * The children are linked with next/prev pointers, and
291 * child 1 is linked to the parent with parent/child
295 struct talloc_chunk
*next
, *prev
;
296 struct talloc_chunk
*parent
, *child
;
297 struct talloc_reference_handle
*refs
;
298 talloc_destructor_t destructor
;
304 * if 'limit' is set it means all *new* children of the context will
305 * be limited to a total aggregate size ox max_size for memory
307 * cur_size is used to keep track of the current use
309 struct talloc_memlimit
*limit
;
312 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
313 * is a pointer to the struct talloc_chunk of the pool that it was
314 * allocated from. This way children can quickly find the pool to chew
317 struct talloc_pool_hdr
*pool
;
320 /* 16 byte alignment seems to keep everyone happy */
321 #define TC_ALIGN16(s) (((s)+15)&~15)
322 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
323 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
325 _PUBLIC_
int talloc_version_major(void)
327 return TALLOC_VERSION_MAJOR
;
330 _PUBLIC_
int talloc_version_minor(void)
332 return TALLOC_VERSION_MINOR
;
335 _PUBLIC_
int talloc_test_get_magic(void)
340 static inline void _talloc_chunk_set_free(struct talloc_chunk
*tc
,
341 const char *location
)
344 * Mark this memory as free, and also over-stamp the talloc
345 * magic with the old-style magic.
347 * Why? This tries to avoid a memory read use-after-free from
348 * disclosing our talloc magic, which would then allow an
349 * attacker to prepare a valid header and so run a destructor.
352 tc
->flags
= TALLOC_MAGIC_NON_RANDOM
| TALLOC_FLAG_FREE
353 | (tc
->flags
& TALLOC_FLAG_MASK
);
355 /* we mark the freed memory with where we called the free
356 * from. This means on a double free error we can report where
357 * the first free came from
364 static inline void _talloc_chunk_set_not_free(struct talloc_chunk
*tc
)
367 * Mark this memory as not free.
369 * Why? This is memory either in a pool (and so available for
370 * talloc's re-use or after the realloc(). We need to mark
371 * the memory as free() before any realloc() call as we can't
372 * write to the memory after that.
374 * We put back the normal magic instead of the 'not random'
378 tc
->flags
= talloc_magic
|
379 ((tc
->flags
& TALLOC_FLAG_MASK
) & ~TALLOC_FLAG_FREE
);
382 static void (*talloc_log_fn
)(const char *message
);
384 _PUBLIC_
void talloc_set_log_fn(void (*log_fn
)(const char *message
))
386 talloc_log_fn
= log_fn
;
389 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
390 void talloc_lib_init(void) __attribute__((constructor
));
391 void talloc_lib_init(void)
393 uint32_t random_value
;
394 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
397 * Use the kernel-provided random values used for
398 * ASLR. This won't change per-exec, which is ideal for us
400 p
= (uint8_t *) getauxval(AT_RANDOM
);
403 * We get 16 bytes from getauxval. By calling rand(),
404 * a totally insecure PRNG, but one that will
405 * deterministically have a different value when called
406 * twice, we ensure that if two talloc-like libraries
407 * are somehow loaded in the same address space, that
408 * because we choose different bytes, we will keep the
409 * protection against collision of multiple talloc
412 * This protection is important because the effects of
413 * passing a talloc pointer from one to the other may
414 * be very hard to determine.
416 int offset
= rand() % (16 - sizeof(random_value
));
417 memcpy(&random_value
, p
+ offset
, sizeof(random_value
));
422 * Otherwise, hope the location we are loaded in
423 * memory is randomised by someone else
425 random_value
= ((uintptr_t)talloc_lib_init
& 0xFFFFFFFF);
427 talloc_magic
= random_value
& ~TALLOC_FLAG_MASK
;
430 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
433 static void talloc_lib_atexit(void)
435 TALLOC_FREE(autofree_context
);
437 if (talloc_total_size(null_context
) == 0) {
441 if (talloc_report_null_full
) {
442 talloc_report_full(null_context
, stderr
);
443 } else if (talloc_report_null
) {
444 talloc_report(null_context
, stderr
);
448 static void talloc_setup_atexit(void)
456 atexit(talloc_lib_atexit
);
460 static void talloc_log(const char *fmt
, ...) PRINTF_ATTRIBUTE(1,2);
461 static void talloc_log(const char *fmt
, ...)
466 if (!talloc_log_fn
) {
471 message
= talloc_vasprintf(NULL
, fmt
, ap
);
474 talloc_log_fn(message
);
475 talloc_free(message
);
478 static void talloc_log_stderr(const char *message
)
480 fprintf(stderr
, "%s", message
);
483 _PUBLIC_
void talloc_set_log_stderr(void)
485 talloc_set_log_fn(talloc_log_stderr
);
488 static void (*talloc_abort_fn
)(const char *reason
);
490 _PUBLIC_
void talloc_set_abort_fn(void (*abort_fn
)(const char *reason
))
492 talloc_abort_fn
= abort_fn
;
495 static void talloc_abort(const char *reason
)
497 talloc_log("%s\n", reason
);
499 if (!talloc_abort_fn
) {
500 TALLOC_ABORT(reason
);
503 talloc_abort_fn(reason
);
506 static void talloc_abort_access_after_free(void)
508 talloc_abort("Bad talloc magic value - access after free");
511 static void talloc_abort_unknown_value(void)
513 talloc_abort("Bad talloc magic value - unknown value");
516 /* panic if we get a bad magic value */
517 static inline struct talloc_chunk
*talloc_chunk_from_ptr(const void *ptr
)
519 const char *pp
= (const char *)ptr
;
520 struct talloc_chunk
*tc
= discard_const_p(struct talloc_chunk
, pp
- TC_HDR_SIZE
);
521 if (unlikely((tc
->flags
& (TALLOC_FLAG_FREE
| ~TALLOC_FLAG_MASK
)) != talloc_magic
)) {
522 if ((tc
->flags
& (TALLOC_FLAG_FREE
| ~TALLOC_FLAG_MASK
))
523 == (TALLOC_MAGIC_NON_RANDOM
| TALLOC_FLAG_FREE
)) {
524 talloc_log("talloc: access after free error - first free may be at %s\n", tc
->name
);
525 talloc_abort_access_after_free();
529 talloc_abort_unknown_value();
535 /* hook into the front of the list */
536 #define _TLIST_ADD(list, p) \
540 (p)->next = (p)->prev = NULL; \
542 (list)->prev = (p); \
543 (p)->next = (list); \
549 /* remove an element from a list - element doesn't have to be in list. */
550 #define _TLIST_REMOVE(list, p) \
552 if ((p) == (list)) { \
553 (list) = (p)->next; \
554 if (list) (list)->prev = NULL; \
556 if ((p)->prev) (p)->prev->next = (p)->next; \
557 if ((p)->next) (p)->next->prev = (p)->prev; \
559 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
564 return the parent chunk of a pointer
566 static inline struct talloc_chunk
*talloc_parent_chunk(const void *ptr
)
568 struct talloc_chunk
*tc
;
570 if (unlikely(ptr
== NULL
)) {
574 tc
= talloc_chunk_from_ptr(ptr
);
575 while (tc
->prev
) tc
=tc
->prev
;
580 _PUBLIC_
void *talloc_parent(const void *ptr
)
582 struct talloc_chunk
*tc
= talloc_parent_chunk(ptr
);
583 return tc
? TC_PTR_FROM_CHUNK(tc
) : NULL
;
589 _PUBLIC_
const char *talloc_parent_name(const void *ptr
)
591 struct talloc_chunk
*tc
= talloc_parent_chunk(ptr
);
592 return tc
? tc
->name
: NULL
;
596 A pool carries an in-pool object count count in the first 16 bytes.
597 bytes. This is done to support talloc_steal() to a parent outside of the
598 pool. The count includes the pool itself, so a talloc_free() on a pool will
599 only destroy the pool if the count has dropped to zero. A talloc_free() of a
600 pool member will reduce the count, and eventually also call free(3) on the
603 The object count is not put into "struct talloc_chunk" because it is only
604 relevant for talloc pools and the alignment to 16 bytes would increase the
605 memory footprint of each talloc chunk by those 16 bytes.
608 struct talloc_pool_hdr
{
610 unsigned int object_count
;
614 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
616 static inline struct talloc_pool_hdr
*talloc_pool_from_chunk(struct talloc_chunk
*c
)
618 return (struct talloc_pool_hdr
*)((char *)c
- TP_HDR_SIZE
);
621 static inline struct talloc_chunk
*talloc_chunk_from_pool(struct talloc_pool_hdr
*h
)
623 return (struct talloc_chunk
*)((char *)h
+ TP_HDR_SIZE
);
626 static inline void *tc_pool_end(struct talloc_pool_hdr
*pool_hdr
)
628 struct talloc_chunk
*tc
= talloc_chunk_from_pool(pool_hdr
);
629 return (char *)tc
+ TC_HDR_SIZE
+ pool_hdr
->poolsize
;
632 static inline size_t tc_pool_space_left(struct talloc_pool_hdr
*pool_hdr
)
634 return (char *)tc_pool_end(pool_hdr
) - (char *)pool_hdr
->end
;
637 /* If tc is inside a pool, this gives the next neighbour. */
638 static inline void *tc_next_chunk(struct talloc_chunk
*tc
)
640 return (char *)tc
+ TC_ALIGN16(TC_HDR_SIZE
+ tc
->size
);
643 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr
*pool_hdr
)
645 struct talloc_chunk
*tc
= talloc_chunk_from_pool(pool_hdr
);
646 return tc_next_chunk(tc
);
649 /* Mark the whole remaining pool as not accessable */
650 static inline void tc_invalidate_pool(struct talloc_pool_hdr
*pool_hdr
)
652 size_t flen
= tc_pool_space_left(pool_hdr
);
654 if (unlikely(talloc_fill
.enabled
)) {
655 memset(pool_hdr
->end
, talloc_fill
.fill_value
, flen
);
658 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
659 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr
->end
, flen
);
667 static inline struct talloc_chunk
*tc_alloc_pool(struct talloc_chunk
*parent
,
668 size_t size
, size_t prefix_len
)
670 struct talloc_pool_hdr
*pool_hdr
= NULL
;
672 struct talloc_chunk
*result
;
675 if (parent
== NULL
) {
679 if (parent
->flags
& TALLOC_FLAG_POOL
) {
680 pool_hdr
= talloc_pool_from_chunk(parent
);
682 else if (parent
->flags
& TALLOC_FLAG_POOLMEM
) {
683 pool_hdr
= parent
->pool
;
686 if (pool_hdr
== NULL
) {
690 space_left
= tc_pool_space_left(pool_hdr
);
693 * Align size to 16 bytes
695 chunk_size
= TC_ALIGN16(size
+ prefix_len
);
697 if (space_left
< chunk_size
) {
701 result
= (struct talloc_chunk
*)((char *)pool_hdr
->end
+ prefix_len
);
703 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
704 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr
->end
, chunk_size
);
707 pool_hdr
->end
= (void *)((char *)pool_hdr
->end
+ chunk_size
);
709 result
->flags
= talloc_magic
| TALLOC_FLAG_POOLMEM
;
710 result
->pool
= pool_hdr
;
712 pool_hdr
->object_count
++;
718 Allocate a bit of memory as a child of an existing pointer
720 static inline void *__talloc_with_prefix(const void *context
,
723 struct talloc_chunk
**tc_ret
)
725 struct talloc_chunk
*tc
= NULL
;
726 struct talloc_memlimit
*limit
= NULL
;
727 size_t total_len
= TC_HDR_SIZE
+ size
+ prefix_len
;
728 struct talloc_chunk
*parent
= NULL
;
730 if (unlikely(context
== NULL
)) {
731 context
= null_context
;
734 if (unlikely(size
>= MAX_TALLOC_SIZE
)) {
738 if (unlikely(total_len
< TC_HDR_SIZE
)) {
742 if (likely(context
!= NULL
)) {
743 parent
= talloc_chunk_from_ptr(context
);
745 if (parent
->limit
!= NULL
) {
746 limit
= parent
->limit
;
749 tc
= tc_alloc_pool(parent
, TC_HDR_SIZE
+size
, prefix_len
);
756 * Only do the memlimit check/update on actual allocation.
758 if (!talloc_memlimit_check(limit
, total_len
)) {
763 ptr
= malloc(total_len
);
764 if (unlikely(ptr
== NULL
)) {
767 tc
= (struct talloc_chunk
*)(ptr
+ prefix_len
);
768 tc
->flags
= talloc_magic
;
771 talloc_memlimit_grow(limit
, total_len
);
776 tc
->destructor
= NULL
;
781 if (likely(context
!= NULL
)) {
783 parent
->child
->parent
= NULL
;
784 tc
->next
= parent
->child
;
793 tc
->next
= tc
->prev
= tc
->parent
= NULL
;
797 return TC_PTR_FROM_CHUNK(tc
);
800 static inline void *__talloc(const void *context
,
802 struct talloc_chunk
**tc
)
804 return __talloc_with_prefix(context
, size
, 0, tc
);
808 * Create a talloc pool
811 static inline void *_talloc_pool(const void *context
, size_t size
)
813 struct talloc_chunk
*tc
;
814 struct talloc_pool_hdr
*pool_hdr
;
817 result
= __talloc_with_prefix(context
, size
, TP_HDR_SIZE
, &tc
);
819 if (unlikely(result
== NULL
)) {
823 pool_hdr
= talloc_pool_from_chunk(tc
);
825 tc
->flags
|= TALLOC_FLAG_POOL
;
828 pool_hdr
->object_count
= 1;
829 pool_hdr
->end
= result
;
830 pool_hdr
->poolsize
= size
;
832 tc_invalidate_pool(pool_hdr
);
837 _PUBLIC_
void *talloc_pool(const void *context
, size_t size
)
839 return _talloc_pool(context
, size
);
843 * Create a talloc pool correctly sized for a basic size plus
844 * a number of subobjects whose total size is given. Essentially
845 * a custom allocator for talloc to reduce fragmentation.
848 _PUBLIC_
void *_talloc_pooled_object(const void *ctx
,
850 const char *type_name
,
851 unsigned num_subobjects
,
852 size_t total_subobjects_size
)
854 size_t poolsize
, subobjects_slack
, tmp
;
855 struct talloc_chunk
*tc
;
856 struct talloc_pool_hdr
*pool_hdr
;
859 poolsize
= type_size
+ total_subobjects_size
;
861 if ((poolsize
< type_size
) || (poolsize
< total_subobjects_size
)) {
865 if (num_subobjects
== UINT_MAX
) {
868 num_subobjects
+= 1; /* the object body itself */
871 * Alignment can increase the pool size by at most 15 bytes per object
872 * plus alignment for the object itself
874 subobjects_slack
= (TC_HDR_SIZE
+ TP_HDR_SIZE
+ 15) * num_subobjects
;
875 if (subobjects_slack
< num_subobjects
) {
879 tmp
= poolsize
+ subobjects_slack
;
880 if ((tmp
< poolsize
) || (tmp
< subobjects_slack
)) {
885 ret
= _talloc_pool(ctx
, poolsize
);
890 tc
= talloc_chunk_from_ptr(ret
);
891 tc
->size
= type_size
;
893 pool_hdr
= talloc_pool_from_chunk(tc
);
895 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
896 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr
->end
, type_size
);
899 pool_hdr
->end
= ((char *)pool_hdr
->end
+ TC_ALIGN16(type_size
));
901 _tc_set_name_const(tc
, type_name
);
909 setup a destructor to be called on free of a pointer
910 the destructor should return 0 on success, or -1 on failure.
911 if the destructor fails then the free is failed, and the memory can
912 be continued to be used
914 _PUBLIC_
void _talloc_set_destructor(const void *ptr
, int (*destructor
)(void *))
916 struct talloc_chunk
*tc
= talloc_chunk_from_ptr(ptr
);
917 tc
->destructor
= destructor
;
921 increase the reference count on a piece of memory.
923 _PUBLIC_
int talloc_increase_ref_count(const void *ptr
)
925 if (unlikely(!talloc_reference(null_context
, ptr
))) {
932 helper for talloc_reference()
934 this is referenced by a function pointer and should not be inline
936 static int talloc_reference_destructor(struct talloc_reference_handle
*handle
)
938 struct talloc_chunk
*ptr_tc
= talloc_chunk_from_ptr(handle
->ptr
);
939 _TLIST_REMOVE(ptr_tc
->refs
, handle
);
944 more efficient way to add a name to a pointer - the name must point to a
947 static inline void _tc_set_name_const(struct talloc_chunk
*tc
,
954 internal talloc_named_const()
956 static inline void *_talloc_named_const(const void *context
, size_t size
, const char *name
)
959 struct talloc_chunk
*tc
;
961 ptr
= __talloc(context
, size
, &tc
);
962 if (unlikely(ptr
== NULL
)) {
966 _tc_set_name_const(tc
, name
);
972 make a secondary reference to a pointer, hanging off the given context.
973 the pointer remains valid until both the original caller and this given
976 the major use for this is when two different structures need to reference the
977 same underlying data, and you want to be able to free the two instances separately,
980 _PUBLIC_
void *_talloc_reference_loc(const void *context
, const void *ptr
, const char *location
)
982 struct talloc_chunk
*tc
;
983 struct talloc_reference_handle
*handle
;
984 if (unlikely(ptr
== NULL
)) return NULL
;
986 tc
= talloc_chunk_from_ptr(ptr
);
987 handle
= (struct talloc_reference_handle
*)_talloc_named_const(context
,
988 sizeof(struct talloc_reference_handle
),
989 TALLOC_MAGIC_REFERENCE
);
990 if (unlikely(handle
== NULL
)) return NULL
;
992 /* note that we hang the destructor off the handle, not the
993 main context as that allows the caller to still setup their
994 own destructor on the context if they want to */
995 talloc_set_destructor(handle
, talloc_reference_destructor
);
996 handle
->ptr
= discard_const_p(void, ptr
);
997 handle
->location
= location
;
998 _TLIST_ADD(tc
->refs
, handle
);
1002 static void *_talloc_steal_internal(const void *new_ctx
, const void *ptr
);
1004 static inline void _tc_free_poolmem(struct talloc_chunk
*tc
,
1005 const char *location
)
1007 struct talloc_pool_hdr
*pool
;
1008 struct talloc_chunk
*pool_tc
;
1012 pool_tc
= talloc_chunk_from_pool(pool
);
1013 next_tc
= tc_next_chunk(tc
);
1015 _talloc_chunk_set_free(tc
, location
);
1017 TC_INVALIDATE_FULL_CHUNK(tc
);
1019 if (unlikely(pool
->object_count
== 0)) {
1020 talloc_abort("Pool object count zero!");
1024 pool
->object_count
--;
1026 if (unlikely(pool
->object_count
== 1
1027 && !(pool_tc
->flags
& TALLOC_FLAG_FREE
))) {
1029 * if there is just one object left in the pool
1030 * and pool->flags does not have TALLOC_FLAG_FREE,
1031 * it means this is the pool itself and
1032 * the rest is available for new objects
1035 pool
->end
= tc_pool_first_chunk(pool
);
1036 tc_invalidate_pool(pool
);
1040 if (unlikely(pool
->object_count
== 0)) {
1042 * we mark the freed memory with where we called the free
1043 * from. This means on a double free error we can report where
1044 * the first free came from
1046 pool_tc
->name
= location
;
1048 if (pool_tc
->flags
& TALLOC_FLAG_POOLMEM
) {
1049 _tc_free_poolmem(pool_tc
, location
);
1052 * The tc_memlimit_update_on_free()
1053 * call takes into account the
1054 * prefix TP_HDR_SIZE allocated before
1055 * the pool talloc_chunk.
1057 tc_memlimit_update_on_free(pool_tc
);
1058 TC_INVALIDATE_FULL_CHUNK(pool_tc
);
1064 if (pool
->end
== next_tc
) {
1066 * if pool->pool still points to end of
1067 * 'tc' (which is stored in the 'next_tc' variable),
1068 * we can reclaim the memory of 'tc'.
1075 * Do nothing. The memory is just "wasted", waiting for the pool
1076 * itself to be freed.
1080 static inline void _tc_free_children_internal(struct talloc_chunk
*tc
,
1082 const char *location
);
1084 static inline int _talloc_free_internal(void *ptr
, const char *location
);
1087 internal free call that takes a struct talloc_chunk *.
1089 static inline int _tc_free_internal(struct talloc_chunk
*tc
,
1090 const char *location
)
1093 void *ptr
= TC_PTR_FROM_CHUNK(tc
);
1095 if (unlikely(tc
->refs
)) {
1097 /* check if this is a reference from a child or
1098 * grandchild back to it's parent or grandparent
1100 * in that case we need to remove the reference and
1101 * call another instance of talloc_free() on the current
1104 is_child
= talloc_is_parent(tc
->refs
, ptr
);
1105 _talloc_free_internal(tc
->refs
, location
);
1107 return _talloc_free_internal(ptr
, location
);
1112 if (unlikely(tc
->flags
& TALLOC_FLAG_LOOP
)) {
1113 /* we have a free loop - stop looping */
1117 if (unlikely(tc
->destructor
)) {
1118 talloc_destructor_t d
= tc
->destructor
;
1121 * Protect the destructor against some overwrite
1122 * attacks, by explicitly checking it has the right
1125 if (talloc_chunk_from_ptr(ptr
) != tc
) {
1127 * This can't actually happen, the
1128 * call itself will panic.
1130 TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1133 if (d
== (talloc_destructor_t
)-1) {
1136 tc
->destructor
= (talloc_destructor_t
)-1;
1139 * Only replace the destructor pointer if
1140 * calling the destructor didn't modify it.
1142 if (tc
->destructor
== (talloc_destructor_t
)-1) {
1147 tc
->destructor
= NULL
;
1151 _TLIST_REMOVE(tc
->parent
->child
, tc
);
1152 if (tc
->parent
->child
) {
1153 tc
->parent
->child
->parent
= tc
->parent
;
1156 if (tc
->prev
) tc
->prev
->next
= tc
->next
;
1157 if (tc
->next
) tc
->next
->prev
= tc
->prev
;
1158 tc
->prev
= tc
->next
= NULL
;
1161 tc
->flags
|= TALLOC_FLAG_LOOP
;
1163 _tc_free_children_internal(tc
, ptr
, location
);
1165 _talloc_chunk_set_free(tc
, location
);
1167 if (tc
->flags
& TALLOC_FLAG_POOL
) {
1168 struct talloc_pool_hdr
*pool
;
1170 pool
= talloc_pool_from_chunk(tc
);
1172 if (unlikely(pool
->object_count
== 0)) {
1173 talloc_abort("Pool object count zero!");
1177 pool
->object_count
--;
1179 if (likely(pool
->object_count
!= 0)) {
1184 * With object_count==0, a pool becomes a normal piece of
1185 * memory to free. If it's allocated inside a pool, it needs
1186 * to be freed as poolmem, else it needs to be just freed.
1193 if (tc
->flags
& TALLOC_FLAG_POOLMEM
) {
1194 _tc_free_poolmem(tc
, location
);
1198 tc_memlimit_update_on_free(tc
);
1200 TC_INVALIDATE_FULL_CHUNK(tc
);
1206 internal talloc_free call
1208 static inline int _talloc_free_internal(void *ptr
, const char *location
)
1210 struct talloc_chunk
*tc
;
1212 if (unlikely(ptr
== NULL
)) {
1216 /* possibly initialised the talloc fill value */
1217 if (unlikely(!talloc_fill
.initialised
)) {
1218 const char *fill
= getenv(TALLOC_FILL_ENV
);
1220 talloc_fill
.enabled
= true;
1221 talloc_fill
.fill_value
= strtoul(fill
, NULL
, 0);
1223 talloc_fill
.initialised
= true;
1226 tc
= talloc_chunk_from_ptr(ptr
);
1227 return _tc_free_internal(tc
, location
);
1230 static inline size_t _talloc_total_limit_size(const void *ptr
,
1231 struct talloc_memlimit
*old_limit
,
1232 struct talloc_memlimit
*new_limit
);
1235 move a lump of memory from one talloc context to another return the
1236 ptr on success, or NULL if it could not be transferred.
1237 passing NULL as ptr will always return NULL with no side effects.
1239 static void *_talloc_steal_internal(const void *new_ctx
, const void *ptr
)
1241 struct talloc_chunk
*tc
, *new_tc
;
1242 size_t ctx_size
= 0;
1244 if (unlikely(!ptr
)) {
1248 if (unlikely(new_ctx
== NULL
)) {
1249 new_ctx
= null_context
;
1252 tc
= talloc_chunk_from_ptr(ptr
);
1254 if (tc
->limit
!= NULL
) {
1256 ctx_size
= _talloc_total_limit_size(ptr
, NULL
, NULL
);
1258 /* Decrement the memory limit from the source .. */
1259 talloc_memlimit_shrink(tc
->limit
->upper
, ctx_size
);
1261 if (tc
->limit
->parent
== tc
) {
1262 tc
->limit
->upper
= NULL
;
1268 if (unlikely(new_ctx
== NULL
)) {
1270 _TLIST_REMOVE(tc
->parent
->child
, tc
);
1271 if (tc
->parent
->child
) {
1272 tc
->parent
->child
->parent
= tc
->parent
;
1275 if (tc
->prev
) tc
->prev
->next
= tc
->next
;
1276 if (tc
->next
) tc
->next
->prev
= tc
->prev
;
1279 tc
->parent
= tc
->next
= tc
->prev
= NULL
;
1280 return discard_const_p(void, ptr
);
1283 new_tc
= talloc_chunk_from_ptr(new_ctx
);
1285 if (unlikely(tc
== new_tc
|| tc
->parent
== new_tc
)) {
1286 return discard_const_p(void, ptr
);
1290 _TLIST_REMOVE(tc
->parent
->child
, tc
);
1291 if (tc
->parent
->child
) {
1292 tc
->parent
->child
->parent
= tc
->parent
;
1295 if (tc
->prev
) tc
->prev
->next
= tc
->next
;
1296 if (tc
->next
) tc
->next
->prev
= tc
->prev
;
1297 tc
->prev
= tc
->next
= NULL
;
1300 tc
->parent
= new_tc
;
1301 if (new_tc
->child
) new_tc
->child
->parent
= NULL
;
1302 _TLIST_ADD(new_tc
->child
, tc
);
1304 if (tc
->limit
|| new_tc
->limit
) {
1305 ctx_size
= _talloc_total_limit_size(ptr
, tc
->limit
,
1307 /* .. and increment it in the destination. */
1308 if (new_tc
->limit
) {
1309 talloc_memlimit_grow(new_tc
->limit
, ctx_size
);
1313 return discard_const_p(void, ptr
);
1317 move a lump of memory from one talloc context to another return the
1318 ptr on success, or NULL if it could not be transferred.
1319 passing NULL as ptr will always return NULL with no side effects.
1321 _PUBLIC_
void *_talloc_steal_loc(const void *new_ctx
, const void *ptr
, const char *location
)
1323 struct talloc_chunk
*tc
;
1325 if (unlikely(ptr
== NULL
)) {
1329 tc
= talloc_chunk_from_ptr(ptr
);
1331 if (unlikely(tc
->refs
!= NULL
) && talloc_parent(ptr
) != new_ctx
) {
1332 struct talloc_reference_handle
*h
;
1334 talloc_log("WARNING: talloc_steal with references at %s\n",
1337 for (h
=tc
->refs
; h
; h
=h
->next
) {
1338 talloc_log("\treference at %s\n",
1344 /* this test is probably too expensive to have on in the
1345 normal build, but it useful for debugging */
1346 if (talloc_is_parent(new_ctx
, ptr
)) {
1347 talloc_log("WARNING: stealing into talloc child at %s\n", location
);
1351 return _talloc_steal_internal(new_ctx
, ptr
);
1355 this is like a talloc_steal(), but you must supply the old
1356 parent. This resolves the ambiguity in a talloc_steal() which is
1357 called on a context that has more than one parent (via references)
1359 The old parent can be either a reference or a parent
1361 _PUBLIC_
void *talloc_reparent(const void *old_parent
, const void *new_parent
, const void *ptr
)
1363 struct talloc_chunk
*tc
;
1364 struct talloc_reference_handle
*h
;
1366 if (unlikely(ptr
== NULL
)) {
1370 if (old_parent
== talloc_parent(ptr
)) {
1371 return _talloc_steal_internal(new_parent
, ptr
);
1374 tc
= talloc_chunk_from_ptr(ptr
);
1375 for (h
=tc
->refs
;h
;h
=h
->next
) {
1376 if (talloc_parent(h
) == old_parent
) {
1377 if (_talloc_steal_internal(new_parent
, h
) != h
) {
1380 return discard_const_p(void, ptr
);
1384 /* it wasn't a parent */
1389 remove a secondary reference to a pointer. This undo's what
1390 talloc_reference() has done. The context and pointer arguments
1391 must match those given to a talloc_reference()
1393 static inline int talloc_unreference(const void *context
, const void *ptr
)
1395 struct talloc_chunk
*tc
= talloc_chunk_from_ptr(ptr
);
1396 struct talloc_reference_handle
*h
;
1398 if (unlikely(context
== NULL
)) {
1399 context
= null_context
;
1402 for (h
=tc
->refs
;h
;h
=h
->next
) {
1403 struct talloc_chunk
*p
= talloc_parent_chunk(h
);
1405 if (context
== NULL
) break;
1406 } else if (TC_PTR_FROM_CHUNK(p
) == context
) {
1414 return _talloc_free_internal(h
, __location__
);
1418 remove a specific parent context from a pointer. This is a more
1419 controlled variant of talloc_free()
1421 _PUBLIC_
int talloc_unlink(const void *context
, void *ptr
)
1423 struct talloc_chunk
*tc_p
, *new_p
, *tc_c
;
1430 if (context
== NULL
) {
1431 context
= null_context
;
1434 if (talloc_unreference(context
, ptr
) == 0) {
1438 if (context
!= NULL
) {
1439 tc_c
= talloc_chunk_from_ptr(context
);
1443 if (tc_c
!= talloc_parent_chunk(ptr
)) {
1447 tc_p
= talloc_chunk_from_ptr(ptr
);
1449 if (tc_p
->refs
== NULL
) {
1450 return _talloc_free_internal(ptr
, __location__
);
1453 new_p
= talloc_parent_chunk(tc_p
->refs
);
1455 new_parent
= TC_PTR_FROM_CHUNK(new_p
);
1460 if (talloc_unreference(new_parent
, ptr
) != 0) {
1464 _talloc_steal_internal(new_parent
, ptr
);
1470 add a name to an existing pointer - va_list version
1472 static inline const char *tc_set_name_v(struct talloc_chunk
*tc
,
1474 va_list ap
) PRINTF_ATTRIBUTE(2,0);
1476 static inline const char *tc_set_name_v(struct talloc_chunk
*tc
,
1480 struct talloc_chunk
*name_tc
= _vasprintf_tc(TC_PTR_FROM_CHUNK(tc
),
1483 if (likely(name_tc
)) {
1484 tc
->name
= TC_PTR_FROM_CHUNK(name_tc
);
1485 _tc_set_name_const(name_tc
, ".name");
1493 add a name to an existing pointer
1495 _PUBLIC_
const char *talloc_set_name(const void *ptr
, const char *fmt
, ...)
1497 struct talloc_chunk
*tc
= talloc_chunk_from_ptr(ptr
);
1501 name
= tc_set_name_v(tc
, fmt
, ap
);
1508 create a named talloc pointer. Any talloc pointer can be named, and
1509 talloc_named() operates just like talloc() except that it allows you
1510 to name the pointer.
1512 _PUBLIC_
void *talloc_named(const void *context
, size_t size
, const char *fmt
, ...)
1517 struct talloc_chunk
*tc
;
1519 ptr
= __talloc(context
, size
, &tc
);
1520 if (unlikely(ptr
== NULL
)) return NULL
;
1523 name
= tc_set_name_v(tc
, fmt
, ap
);
1526 if (unlikely(name
== NULL
)) {
1527 _talloc_free_internal(ptr
, __location__
);
1535 return the name of a talloc ptr, or "UNNAMED"
1537 static inline const char *__talloc_get_name(const void *ptr
)
1539 struct talloc_chunk
*tc
= talloc_chunk_from_ptr(ptr
);
1540 if (unlikely(tc
->name
== TALLOC_MAGIC_REFERENCE
)) {
1541 return ".reference";
1543 if (likely(tc
->name
)) {
1549 _PUBLIC_
const char *talloc_get_name(const void *ptr
)
1551 return __talloc_get_name(ptr
);
1555 check if a pointer has the given name. If it does, return the pointer,
1556 otherwise return NULL
1558 _PUBLIC_
void *talloc_check_name(const void *ptr
, const char *name
)
1561 if (unlikely(ptr
== NULL
)) return NULL
;
1562 pname
= __talloc_get_name(ptr
);
1563 if (likely(pname
== name
|| strcmp(pname
, name
) == 0)) {
1564 return discard_const_p(void, ptr
);
1569 static void talloc_abort_type_mismatch(const char *location
,
1571 const char *expected
)
1575 reason
= talloc_asprintf(NULL
,
1576 "%s: Type mismatch: name[%s] expected[%s]",
1581 reason
= "Type mismatch";
1584 talloc_abort(reason
);
1587 _PUBLIC_
void *_talloc_get_type_abort(const void *ptr
, const char *name
, const char *location
)
1591 if (unlikely(ptr
== NULL
)) {
1592 talloc_abort_type_mismatch(location
, NULL
, name
);
1596 pname
= __talloc_get_name(ptr
);
1597 if (likely(pname
== name
|| strcmp(pname
, name
) == 0)) {
1598 return discard_const_p(void, ptr
);
1601 talloc_abort_type_mismatch(location
, pname
, name
);
1606 this is for compatibility with older versions of talloc
1608 _PUBLIC_
void *talloc_init(const char *fmt
, ...)
1613 struct talloc_chunk
*tc
;
1615 ptr
= __talloc(NULL
, 0, &tc
);
1616 if (unlikely(ptr
== NULL
)) return NULL
;
1619 name
= tc_set_name_v(tc
, fmt
, ap
);
1622 if (unlikely(name
== NULL
)) {
1623 _talloc_free_internal(ptr
, __location__
);
1630 static inline void _tc_free_children_internal(struct talloc_chunk
*tc
,
1632 const char *location
)
1635 /* we need to work out who will own an abandoned child
1636 if it cannot be freed. In priority order, the first
1637 choice is owner of any remaining reference to this
1638 pointer, the second choice is our parent, and the
1639 final choice is the null context. */
1640 void *child
= TC_PTR_FROM_CHUNK(tc
->child
);
1641 const void *new_parent
= null_context
;
1642 if (unlikely(tc
->child
->refs
)) {
1643 struct talloc_chunk
*p
= talloc_parent_chunk(tc
->child
->refs
);
1644 if (p
) new_parent
= TC_PTR_FROM_CHUNK(p
);
1646 if (unlikely(_tc_free_internal(tc
->child
, location
) == -1)) {
1647 if (talloc_parent_chunk(child
) != tc
) {
1649 * Destructor already reparented this child.
1650 * No further reparenting needed.
1654 if (new_parent
== null_context
) {
1655 struct talloc_chunk
*p
= talloc_parent_chunk(ptr
);
1656 if (p
) new_parent
= TC_PTR_FROM_CHUNK(p
);
1658 _talloc_steal_internal(new_parent
, child
);
1664 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1665 should probably not be used in new code. It's in here to keep the talloc
1666 code consistent across Samba 3 and 4.
1668 _PUBLIC_
void talloc_free_children(void *ptr
)
1670 struct talloc_chunk
*tc_name
= NULL
;
1671 struct talloc_chunk
*tc
;
1673 if (unlikely(ptr
== NULL
)) {
1677 tc
= talloc_chunk_from_ptr(ptr
);
1679 /* we do not want to free the context name if it is a child .. */
1680 if (likely(tc
->child
)) {
1681 for (tc_name
= tc
->child
; tc_name
; tc_name
= tc_name
->next
) {
1682 if (tc
->name
== TC_PTR_FROM_CHUNK(tc_name
)) break;
1685 _TLIST_REMOVE(tc
->child
, tc_name
);
1687 tc
->child
->parent
= tc
;
1692 _tc_free_children_internal(tc
, ptr
, __location__
);
1694 /* .. so we put it back after all other children have been freed */
1697 tc
->child
->parent
= NULL
;
1699 tc_name
->parent
= tc
;
1700 _TLIST_ADD(tc
->child
, tc_name
);
1705 Allocate a bit of memory as a child of an existing pointer
1707 _PUBLIC_
void *_talloc(const void *context
, size_t size
)
1709 struct talloc_chunk
*tc
;
1710 return __talloc(context
, size
, &tc
);
1714 externally callable talloc_set_name_const()
1716 _PUBLIC_
void talloc_set_name_const(const void *ptr
, const char *name
)
1718 _tc_set_name_const(talloc_chunk_from_ptr(ptr
), name
);
1722 create a named talloc pointer. Any talloc pointer can be named, and
1723 talloc_named() operates just like talloc() except that it allows you
1724 to name the pointer.
1726 _PUBLIC_
void *talloc_named_const(const void *context
, size_t size
, const char *name
)
1728 return _talloc_named_const(context
, size
, name
);
1732 free a talloc pointer. This also frees all child pointers of this
1735 return 0 if the memory is actually freed, otherwise -1. The memory
1736 will not be freed if the ref_count is > 1 or the destructor (if
1737 any) returns non-zero
1739 _PUBLIC_
int _talloc_free(void *ptr
, const char *location
)
1741 struct talloc_chunk
*tc
;
1743 if (unlikely(ptr
== NULL
)) {
1747 tc
= talloc_chunk_from_ptr(ptr
);
1749 if (unlikely(tc
->refs
!= NULL
)) {
1750 struct talloc_reference_handle
*h
;
1752 if (talloc_parent(ptr
) == null_context
&& tc
->refs
->next
== NULL
) {
1753 /* in this case we do know which parent should
1754 get this pointer, as there is really only
1756 return talloc_unlink(null_context
, ptr
);
1759 talloc_log("ERROR: talloc_free with references at %s\n",
1762 for (h
=tc
->refs
; h
; h
=h
->next
) {
1763 talloc_log("\treference at %s\n",
1769 return _talloc_free_internal(ptr
, location
);
1775 A talloc version of realloc. The context argument is only used if
1778 _PUBLIC_
void *_talloc_realloc(const void *context
, void *ptr
, size_t size
, const char *name
)
1780 struct talloc_chunk
*tc
;
1782 bool malloced
= false;
1783 struct talloc_pool_hdr
*pool_hdr
= NULL
;
1784 size_t old_size
= 0;
1785 size_t new_size
= 0;
1787 /* size zero is equivalent to free() */
1788 if (unlikely(size
== 0)) {
1789 talloc_unlink(context
, ptr
);
1793 if (unlikely(size
>= MAX_TALLOC_SIZE
)) {
1797 /* realloc(NULL) is equivalent to malloc() */
1799 return _talloc_named_const(context
, size
, name
);
1802 tc
= talloc_chunk_from_ptr(ptr
);
1804 /* don't allow realloc on referenced pointers */
1805 if (unlikely(tc
->refs
)) {
1809 /* don't let anybody try to realloc a talloc_pool */
1810 if (unlikely(tc
->flags
& TALLOC_FLAG_POOL
)) {
1814 if (tc
->limit
&& (size
> tc
->size
)) {
1815 if (!talloc_memlimit_check(tc
->limit
, (size
- tc
->size
))) {
1821 /* handle realloc inside a talloc_pool */
1822 if (unlikely(tc
->flags
& TALLOC_FLAG_POOLMEM
)) {
1823 pool_hdr
= tc
->pool
;
1826 #if (ALWAYS_REALLOC == 0)
1827 /* don't shrink if we have less than 1k to gain */
1828 if (size
< tc
->size
&& tc
->limit
== NULL
) {
1830 void *next_tc
= tc_next_chunk(tc
);
1831 TC_INVALIDATE_SHRINK_CHUNK(tc
, size
);
1833 if (next_tc
== pool_hdr
->end
) {
1834 /* note: tc->size has changed, so this works */
1835 pool_hdr
->end
= tc_next_chunk(tc
);
1838 } else if ((tc
->size
- size
) < 1024) {
1840 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1841 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1842 * after each realloc call, which slows down
1843 * testing a lot :-(.
1845 * That is why we only mark memory as undefined here.
1847 TC_UNDEFINE_SHRINK_CHUNK(tc
, size
);
1849 /* do not shrink if we have less than 1k to gain */
1853 } else if (tc
->size
== size
) {
1855 * do not change the pointer if it is exactly
1863 * by resetting magic we catch users of the old memory
1865 * We mark this memory as free, and also over-stamp the talloc
1866 * magic with the old-style magic.
1868 * Why? This tries to avoid a memory read use-after-free from
1869 * disclosing our talloc magic, which would then allow an
1870 * attacker to prepare a valid header and so run a destructor.
1872 * What else? We have to re-stamp back a valid normal magic
1873 * on this memory once realloc() is done, as it will have done
1874 * a memcpy() into the new valid memory. We can't do this in
1875 * reverse as that would be a real use-after-free.
1877 _talloc_chunk_set_free(tc
, NULL
);
1881 new_ptr
= tc_alloc_pool(tc
, size
+ TC_HDR_SIZE
, 0);
1882 pool_hdr
->object_count
--;
1884 if (new_ptr
== NULL
) {
1885 new_ptr
= malloc(TC_HDR_SIZE
+size
);
1891 memcpy(new_ptr
, tc
, MIN(tc
->size
,size
) + TC_HDR_SIZE
);
1892 TC_INVALIDATE_FULL_CHUNK(tc
);
1895 /* We're doing malloc then free here, so record the difference. */
1896 old_size
= tc
->size
;
1898 new_ptr
= malloc(size
+ TC_HDR_SIZE
);
1900 memcpy(new_ptr
, tc
, MIN(tc
->size
, size
) + TC_HDR_SIZE
);
1906 struct talloc_chunk
*pool_tc
;
1907 void *next_tc
= tc_next_chunk(tc
);
1908 size_t old_chunk_size
= TC_ALIGN16(TC_HDR_SIZE
+ tc
->size
);
1909 size_t new_chunk_size
= TC_ALIGN16(TC_HDR_SIZE
+ size
);
1910 size_t space_needed
;
1912 unsigned int chunk_count
= pool_hdr
->object_count
;
1914 pool_tc
= talloc_chunk_from_pool(pool_hdr
);
1915 if (!(pool_tc
->flags
& TALLOC_FLAG_FREE
)) {
1919 if (chunk_count
== 1) {
1921 * optimize for the case where 'tc' is the only
1922 * chunk in the pool.
1924 char *start
= tc_pool_first_chunk(pool_hdr
);
1925 space_needed
= new_chunk_size
;
1926 space_left
= (char *)tc_pool_end(pool_hdr
) - start
;
1928 if (space_left
>= space_needed
) {
1929 size_t old_used
= TC_HDR_SIZE
+ tc
->size
;
1930 size_t new_used
= TC_HDR_SIZE
+ size
;
1933 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1937 * start -> tc may have
1938 * been freed and thus been marked as
1939 * VALGRIND_MEM_NOACCESS. Set it to
1940 * VALGRIND_MEM_UNDEFINED so we can
1941 * copy into it without valgrind errors.
1942 * We can't just mark
1943 * new_ptr -> new_ptr + old_used
1944 * as this may overlap on top of tc,
1945 * (which is why we use memmove, not
1946 * memcpy below) hence the MIN.
1948 size_t undef_len
= MIN((((char *)tc
) - ((char *)new_ptr
)),old_used
);
1949 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr
, undef_len
);
1953 memmove(new_ptr
, tc
, old_used
);
1955 tc
= (struct talloc_chunk
*)new_ptr
;
1956 TC_UNDEFINE_GROW_CHUNK(tc
, size
);
1959 * first we do not align the pool pointer
1960 * because we want to invalidate the padding
1963 pool_hdr
->end
= new_used
+ (char *)new_ptr
;
1964 tc_invalidate_pool(pool_hdr
);
1966 /* now the aligned pointer */
1967 pool_hdr
->end
= new_chunk_size
+ (char *)new_ptr
;
1974 if (new_chunk_size
== old_chunk_size
) {
1975 TC_UNDEFINE_GROW_CHUNK(tc
, size
);
1976 _talloc_chunk_set_not_free(tc
);
1981 if (next_tc
== pool_hdr
->end
) {
1983 * optimize for the case where 'tc' is the last
1984 * chunk in the pool.
1986 space_needed
= new_chunk_size
- old_chunk_size
;
1987 space_left
= tc_pool_space_left(pool_hdr
);
1989 if (space_left
>= space_needed
) {
1990 TC_UNDEFINE_GROW_CHUNK(tc
, size
);
1991 _talloc_chunk_set_not_free(tc
);
1993 pool_hdr
->end
= tc_next_chunk(tc
);
1998 new_ptr
= tc_alloc_pool(tc
, size
+ TC_HDR_SIZE
, 0);
2000 if (new_ptr
== NULL
) {
2001 new_ptr
= malloc(TC_HDR_SIZE
+size
);
2007 memcpy(new_ptr
, tc
, MIN(tc
->size
,size
) + TC_HDR_SIZE
);
2009 _tc_free_poolmem(tc
, __location__
"_talloc_realloc");
2013 /* We're doing realloc here, so record the difference. */
2014 old_size
= tc
->size
;
2016 new_ptr
= realloc(tc
, size
+ TC_HDR_SIZE
);
2020 if (unlikely(!new_ptr
)) {
2022 * Ok, this is a strange spot. We have to put back
2023 * the old talloc_magic and any flags, except the
2024 * TALLOC_FLAG_FREE as this was not free'ed by the
2025 * realloc() call after all
2027 _talloc_chunk_set_not_free(tc
);
2032 * tc is now the new value from realloc(), the old memory we
2033 * can't access any more and was preemptively marked as
2034 * TALLOC_FLAG_FREE before the call. Now we mark it as not
2037 tc
= (struct talloc_chunk
*)new_ptr
;
2038 _talloc_chunk_set_not_free(tc
);
2040 tc
->flags
&= ~TALLOC_FLAG_POOLMEM
;
2043 tc
->parent
->child
= tc
;
2046 tc
->child
->parent
= tc
;
2050 tc
->prev
->next
= tc
;
2053 tc
->next
->prev
= tc
;
2056 if (new_size
> old_size
) {
2057 talloc_memlimit_grow(tc
->limit
, new_size
- old_size
);
2058 } else if (new_size
< old_size
) {
2059 talloc_memlimit_shrink(tc
->limit
, old_size
- new_size
);
2063 _tc_set_name_const(tc
, name
);
2065 return TC_PTR_FROM_CHUNK(tc
);
2069 a wrapper around talloc_steal() for situations where you are moving a pointer
2070 between two structures, and want the old pointer to be set to NULL
2072 _PUBLIC_
void *_talloc_move(const void *new_ctx
, const void *_pptr
)
2074 const void **pptr
= discard_const_p(const void *,_pptr
);
2075 void *ret
= talloc_steal(new_ctx
, discard_const_p(void, *pptr
));
2080 enum talloc_mem_count_type
{
2086 static inline size_t _talloc_total_mem_internal(const void *ptr
,
2087 enum talloc_mem_count_type type
,
2088 struct talloc_memlimit
*old_limit
,
2089 struct talloc_memlimit
*new_limit
)
2092 struct talloc_chunk
*c
, *tc
;
2101 tc
= talloc_chunk_from_ptr(ptr
);
2103 if (old_limit
|| new_limit
) {
2104 if (tc
->limit
&& tc
->limit
->upper
== old_limit
) {
2105 tc
->limit
->upper
= new_limit
;
2109 /* optimize in the memlimits case */
2110 if (type
== TOTAL_MEM_LIMIT
&&
2111 tc
->limit
!= NULL
&&
2112 tc
->limit
!= old_limit
&&
2113 tc
->limit
->parent
== tc
) {
2114 return tc
->limit
->cur_size
;
2117 if (tc
->flags
& TALLOC_FLAG_LOOP
) {
2121 tc
->flags
|= TALLOC_FLAG_LOOP
;
2123 if (old_limit
|| new_limit
) {
2124 if (old_limit
== tc
->limit
) {
2125 tc
->limit
= new_limit
;
2130 case TOTAL_MEM_SIZE
:
2131 if (likely(tc
->name
!= TALLOC_MAGIC_REFERENCE
)) {
2135 case TOTAL_MEM_BLOCKS
:
2138 case TOTAL_MEM_LIMIT
:
2139 if (likely(tc
->name
!= TALLOC_MAGIC_REFERENCE
)) {
2141 * Don't count memory allocated from a pool
2142 * when calculating limits. Only count the
2145 if (!(tc
->flags
& TALLOC_FLAG_POOLMEM
)) {
2146 if (tc
->flags
& TALLOC_FLAG_POOL
) {
2148 * If this is a pool, the allocated
2149 * size is in the pool header, and
2150 * remember to add in the prefix
2153 struct talloc_pool_hdr
*pool_hdr
2154 = talloc_pool_from_chunk(tc
);
2155 total
= pool_hdr
->poolsize
+
2159 total
= tc
->size
+ TC_HDR_SIZE
;
2165 for (c
= tc
->child
; c
; c
= c
->next
) {
2166 total
+= _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c
), type
,
2167 old_limit
, new_limit
);
2170 tc
->flags
&= ~TALLOC_FLAG_LOOP
;
2176 return the total size of a talloc pool (subtree)
2178 _PUBLIC_
size_t talloc_total_size(const void *ptr
)
2180 return _talloc_total_mem_internal(ptr
, TOTAL_MEM_SIZE
, NULL
, NULL
);
2184 return the total number of blocks in a talloc pool (subtree)
2186 _PUBLIC_
size_t talloc_total_blocks(const void *ptr
)
2188 return _talloc_total_mem_internal(ptr
, TOTAL_MEM_BLOCKS
, NULL
, NULL
);
2192 return the number of external references to a pointer
2194 _PUBLIC_
size_t talloc_reference_count(const void *ptr
)
2196 struct talloc_chunk
*tc
= talloc_chunk_from_ptr(ptr
);
2197 struct talloc_reference_handle
*h
;
2200 for (h
=tc
->refs
;h
;h
=h
->next
) {
2207 report on memory usage by all children of a pointer, giving a full tree view
2209 _PUBLIC_
void talloc_report_depth_cb(const void *ptr
, int depth
, int max_depth
,
2210 void (*callback
)(const void *ptr
,
2211 int depth
, int max_depth
,
2213 void *private_data
),
2216 struct talloc_chunk
*c
, *tc
;
2221 if (ptr
== NULL
) return;
2223 tc
= talloc_chunk_from_ptr(ptr
);
2225 if (tc
->flags
& TALLOC_FLAG_LOOP
) {
2229 callback(ptr
, depth
, max_depth
, 0, private_data
);
2231 if (max_depth
>= 0 && depth
>= max_depth
) {
2235 tc
->flags
|= TALLOC_FLAG_LOOP
;
2236 for (c
=tc
->child
;c
;c
=c
->next
) {
2237 if (c
->name
== TALLOC_MAGIC_REFERENCE
) {
2238 struct talloc_reference_handle
*h
= (struct talloc_reference_handle
*)TC_PTR_FROM_CHUNK(c
);
2239 callback(h
->ptr
, depth
+ 1, max_depth
, 1, private_data
);
2241 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c
), depth
+ 1, max_depth
, callback
, private_data
);
2244 tc
->flags
&= ~TALLOC_FLAG_LOOP
;
2247 static void talloc_report_depth_FILE_helper(const void *ptr
, int depth
, int max_depth
, int is_ref
, void *_f
)
2249 const char *name
= __talloc_get_name(ptr
);
2250 struct talloc_chunk
*tc
;
2251 FILE *f
= (FILE *)_f
;
2254 fprintf(f
, "%*sreference to: %s\n", depth
*4, "", name
);
2258 tc
= talloc_chunk_from_ptr(ptr
);
2259 if (tc
->limit
&& tc
->limit
->parent
== tc
) {
2260 fprintf(f
, "%*s%-30s is a memlimit context"
2261 " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2264 (unsigned long)tc
->limit
->max_size
,
2265 (unsigned long)tc
->limit
->cur_size
);
2269 fprintf(f
,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2270 (max_depth
< 0 ? "full " :""), name
,
2271 (unsigned long)talloc_total_size(ptr
),
2272 (unsigned long)talloc_total_blocks(ptr
));
2276 fprintf(f
, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2279 (unsigned long)talloc_total_size(ptr
),
2280 (unsigned long)talloc_total_blocks(ptr
),
2281 (int)talloc_reference_count(ptr
), ptr
);
2284 fprintf(f
, "content: ");
2285 if (talloc_total_size(ptr
)) {
2286 int tot
= talloc_total_size(ptr
);
2289 for (i
= 0; i
< tot
; i
++) {
2290 if ((((char *)ptr
)[i
] > 31) && (((char *)ptr
)[i
] < 126)) {
2291 fprintf(f
, "%c", ((char *)ptr
)[i
]);
2293 fprintf(f
, "~%02x", ((char *)ptr
)[i
]);
2302 report on memory usage by all children of a pointer, giving a full tree view
2304 _PUBLIC_
void talloc_report_depth_file(const void *ptr
, int depth
, int max_depth
, FILE *f
)
2307 talloc_report_depth_cb(ptr
, depth
, max_depth
, talloc_report_depth_FILE_helper
, f
);
2313 report on memory usage by all children of a pointer, giving a full tree view
2315 _PUBLIC_
void talloc_report_full(const void *ptr
, FILE *f
)
2317 talloc_report_depth_file(ptr
, 0, -1, f
);
2321 report on memory usage by all children of a pointer
2323 _PUBLIC_
void talloc_report(const void *ptr
, FILE *f
)
2325 talloc_report_depth_file(ptr
, 0, 1, f
);
2329 enable tracking of the NULL context
2331 _PUBLIC_
void talloc_enable_null_tracking(void)
2333 if (null_context
== NULL
) {
2334 null_context
= _talloc_named_const(NULL
, 0, "null_context");
2335 if (autofree_context
!= NULL
) {
2336 talloc_reparent(NULL
, null_context
, autofree_context
);
2342 enable tracking of the NULL context, not moving the autofree context
2343 into the NULL context. This is needed for the talloc testsuite
2345 _PUBLIC_
void talloc_enable_null_tracking_no_autofree(void)
2347 if (null_context
== NULL
) {
2348 null_context
= _talloc_named_const(NULL
, 0, "null_context");
2353 disable tracking of the NULL context
2355 _PUBLIC_
void talloc_disable_null_tracking(void)
2357 if (null_context
!= NULL
) {
2358 /* we have to move any children onto the real NULL
2360 struct talloc_chunk
*tc
, *tc2
;
2361 tc
= talloc_chunk_from_ptr(null_context
);
2362 for (tc2
= tc
->child
; tc2
; tc2
=tc2
->next
) {
2363 if (tc2
->parent
== tc
) tc2
->parent
= NULL
;
2364 if (tc2
->prev
== tc
) tc2
->prev
= NULL
;
2366 for (tc2
= tc
->next
; tc2
; tc2
=tc2
->next
) {
2367 if (tc2
->parent
== tc
) tc2
->parent
= NULL
;
2368 if (tc2
->prev
== tc
) tc2
->prev
= NULL
;
2373 talloc_free(null_context
);
2374 null_context
= NULL
;
2378 enable leak reporting on exit
2380 _PUBLIC_
void talloc_enable_leak_report(void)
2382 talloc_enable_null_tracking();
2383 talloc_report_null
= true;
2384 talloc_setup_atexit();
2388 enable full leak reporting on exit
2390 _PUBLIC_
void talloc_enable_leak_report_full(void)
2392 talloc_enable_null_tracking();
2393 talloc_report_null_full
= true;
2394 talloc_setup_atexit();
2398 talloc and zero memory.
2400 _PUBLIC_
void *_talloc_zero(const void *ctx
, size_t size
, const char *name
)
2402 void *p
= _talloc_named_const(ctx
, size
, name
);
2405 memset(p
, '\0', size
);
2412 memdup with a talloc.
2414 _PUBLIC_
void *_talloc_memdup(const void *t
, const void *p
, size_t size
, const char *name
)
2418 if (likely(size
> 0) && unlikely(p
== NULL
)) {
2422 newp
= _talloc_named_const(t
, size
, name
);
2423 if (likely(newp
!= NULL
) && likely(size
> 0)) {
2424 memcpy(newp
, p
, size
);
2430 static inline char *__talloc_strlendup(const void *t
, const char *p
, size_t len
)
2433 struct talloc_chunk
*tc
;
2435 ret
= (char *)__talloc(t
, len
+ 1, &tc
);
2436 if (unlikely(!ret
)) return NULL
;
2438 memcpy(ret
, p
, len
);
2441 _tc_set_name_const(tc
, ret
);
2446 strdup with a talloc
2448 _PUBLIC_
char *talloc_strdup(const void *t
, const char *p
)
2450 if (unlikely(!p
)) return NULL
;
2451 return __talloc_strlendup(t
, p
, strlen(p
));
2455 strndup with a talloc
2457 _PUBLIC_
char *talloc_strndup(const void *t
, const char *p
, size_t n
)
2459 if (unlikely(!p
)) return NULL
;
2460 return __talloc_strlendup(t
, p
, strnlen(p
, n
));
2463 static inline char *__talloc_strlendup_append(char *s
, size_t slen
,
2464 const char *a
, size_t alen
)
2468 ret
= talloc_realloc(NULL
, s
, char, slen
+ alen
+ 1);
2469 if (unlikely(!ret
)) return NULL
;
2471 /* append the string and the trailing \0 */
2472 memcpy(&ret
[slen
], a
, alen
);
2475 _tc_set_name_const(talloc_chunk_from_ptr(ret
), ret
);
2480 * Appends at the end of the string.
2482 _PUBLIC_
char *talloc_strdup_append(char *s
, const char *a
)
2485 return talloc_strdup(NULL
, a
);
2492 return __talloc_strlendup_append(s
, strlen(s
), a
, strlen(a
));
2496 * Appends at the end of the talloc'ed buffer,
2497 * not the end of the string.
2499 _PUBLIC_
char *talloc_strdup_append_buffer(char *s
, const char *a
)
2504 return talloc_strdup(NULL
, a
);
2511 slen
= talloc_get_size(s
);
2512 if (likely(slen
> 0)) {
2516 return __talloc_strlendup_append(s
, slen
, a
, strlen(a
));
2520 * Appends at the end of the string.
2522 _PUBLIC_
char *talloc_strndup_append(char *s
, const char *a
, size_t n
)
2525 return talloc_strndup(NULL
, a
, n
);
2532 return __talloc_strlendup_append(s
, strlen(s
), a
, strnlen(a
, n
));
2536 * Appends at the end of the talloc'ed buffer,
2537 * not the end of the string.
2539 _PUBLIC_
char *talloc_strndup_append_buffer(char *s
, const char *a
, size_t n
)
2544 return talloc_strndup(NULL
, a
, n
);
2551 slen
= talloc_get_size(s
);
2552 if (likely(slen
> 0)) {
2556 return __talloc_strlendup_append(s
, slen
, a
, strnlen(a
, n
));
2559 #ifndef HAVE_VA_COPY
2560 #ifdef HAVE___VA_COPY
2561 #define va_copy(dest, src) __va_copy(dest, src)
2563 #define va_copy(dest, src) (dest) = (src)
2567 static struct talloc_chunk
*_vasprintf_tc(const void *t
,
2569 va_list ap
) PRINTF_ATTRIBUTE(2,0);
2571 static struct talloc_chunk
*_vasprintf_tc(const void *t
,
2579 struct talloc_chunk
*tc
;
2582 /* this call looks strange, but it makes it work on older solaris boxes */
2584 vlen
= vsnprintf(buf
, sizeof(buf
), fmt
, ap2
);
2586 if (unlikely(vlen
< 0)) {
2590 if (unlikely(len
+ 1 < len
)) {
2594 ret
= (char *)__talloc(t
, len
+1, &tc
);
2595 if (unlikely(!ret
)) return NULL
;
2597 if (len
< sizeof(buf
)) {
2598 memcpy(ret
, buf
, len
+1);
2601 vsnprintf(ret
, len
+1, fmt
, ap2
);
2605 _tc_set_name_const(tc
, ret
);
2609 _PUBLIC_
char *talloc_vasprintf(const void *t
, const char *fmt
, va_list ap
)
2611 struct talloc_chunk
*tc
= _vasprintf_tc(t
, fmt
, ap
);
2615 return TC_PTR_FROM_CHUNK(tc
);
2620 Perform string formatting, and return a pointer to newly allocated
2621 memory holding the result, inside a memory pool.
2623 _PUBLIC_
char *talloc_asprintf(const void *t
, const char *fmt
, ...)
2629 ret
= talloc_vasprintf(t
, fmt
, ap
);
2634 static inline char *__talloc_vaslenprintf_append(char *s
, size_t slen
,
2635 const char *fmt
, va_list ap
)
2636 PRINTF_ATTRIBUTE(3,0);
2638 static inline char *__talloc_vaslenprintf_append(char *s
, size_t slen
,
2639 const char *fmt
, va_list ap
)
2646 alen
= vsnprintf(&c
, 1, fmt
, ap2
);
2650 /* Either the vsnprintf failed or the format resulted in
2651 * no characters being formatted. In the former case, we
2652 * ought to return NULL, in the latter we ought to return
2653 * the original string. Most current callers of this
2654 * function expect it to never return NULL.
2659 s
= talloc_realloc(NULL
, s
, char, slen
+ alen
+ 1);
2660 if (!s
) return NULL
;
2663 vsnprintf(s
+ slen
, alen
+ 1, fmt
, ap2
);
2666 _tc_set_name_const(talloc_chunk_from_ptr(s
), s
);
2671 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2672 * and return @p s, which may have moved. Good for gradually
2673 * accumulating output into a string buffer. Appends at the end
2676 _PUBLIC_
char *talloc_vasprintf_append(char *s
, const char *fmt
, va_list ap
)
2679 return talloc_vasprintf(NULL
, fmt
, ap
);
2682 return __talloc_vaslenprintf_append(s
, strlen(s
), fmt
, ap
);
2686 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2687 * and return @p s, which may have moved. Always appends at the
2688 * end of the talloc'ed buffer, not the end of the string.
2690 _PUBLIC_
char *talloc_vasprintf_append_buffer(char *s
, const char *fmt
, va_list ap
)
2695 return talloc_vasprintf(NULL
, fmt
, ap
);
2698 slen
= talloc_get_size(s
);
2699 if (likely(slen
> 0)) {
2703 return __talloc_vaslenprintf_append(s
, slen
, fmt
, ap
);
2707 Realloc @p s to append the formatted result of @p fmt and return @p
2708 s, which may have moved. Good for gradually accumulating output
2709 into a string buffer.
2711 _PUBLIC_
char *talloc_asprintf_append(char *s
, const char *fmt
, ...)
2716 s
= talloc_vasprintf_append(s
, fmt
, ap
);
2722 Realloc @p s to append the formatted result of @p fmt and return @p
2723 s, which may have moved. Good for gradually accumulating output
2726 _PUBLIC_
char *talloc_asprintf_append_buffer(char *s
, const char *fmt
, ...)
2731 s
= talloc_vasprintf_append_buffer(s
, fmt
, ap
);
2737 alloc an array, checking for integer overflow in the array size
2739 _PUBLIC_
void *_talloc_array(const void *ctx
, size_t el_size
, unsigned count
, const char *name
)
2741 if (count
>= MAX_TALLOC_SIZE
/el_size
) {
2744 return _talloc_named_const(ctx
, el_size
* count
, name
);
2748 alloc an zero array, checking for integer overflow in the array size
2750 _PUBLIC_
void *_talloc_zero_array(const void *ctx
, size_t el_size
, unsigned count
, const char *name
)
2752 if (count
>= MAX_TALLOC_SIZE
/el_size
) {
2755 return _talloc_zero(ctx
, el_size
* count
, name
);
2759 realloc an array, checking for integer overflow in the array size
2761 _PUBLIC_
void *_talloc_realloc_array(const void *ctx
, void *ptr
, size_t el_size
, unsigned count
, const char *name
)
2763 if (count
>= MAX_TALLOC_SIZE
/el_size
) {
2766 return _talloc_realloc(ctx
, ptr
, el_size
* count
, name
);
2770 a function version of talloc_realloc(), so it can be passed as a function pointer
2771 to libraries that want a realloc function (a realloc function encapsulates
2772 all the basic capabilities of an allocation library, which is why this is useful)
2774 _PUBLIC_
void *talloc_realloc_fn(const void *context
, void *ptr
, size_t size
)
2776 return _talloc_realloc(context
, ptr
, size
, NULL
);
2780 static int talloc_autofree_destructor(void *ptr
)
2782 autofree_context
= NULL
;
2787 return a context which will be auto-freed on exit
2788 this is useful for reducing the noise in leak reports
2790 _PUBLIC_
void *talloc_autofree_context(void)
2792 if (autofree_context
== NULL
) {
2793 autofree_context
= _talloc_named_const(NULL
, 0, "autofree_context");
2794 talloc_set_destructor(autofree_context
, talloc_autofree_destructor
);
2795 talloc_setup_atexit();
2797 return autofree_context
;
2800 _PUBLIC_
size_t talloc_get_size(const void *context
)
2802 struct talloc_chunk
*tc
;
2804 if (context
== NULL
) {
2808 tc
= talloc_chunk_from_ptr(context
);
2814 find a parent of this context that has the given name, if any
2816 _PUBLIC_
void *talloc_find_parent_byname(const void *context
, const char *name
)
2818 struct talloc_chunk
*tc
;
2820 if (context
== NULL
) {
2824 tc
= talloc_chunk_from_ptr(context
);
2826 if (tc
->name
&& strcmp(tc
->name
, name
) == 0) {
2827 return TC_PTR_FROM_CHUNK(tc
);
2829 while (tc
&& tc
->prev
) tc
= tc
->prev
;
2838 show the parentage of a context
2840 _PUBLIC_
void talloc_show_parents(const void *context
, FILE *file
)
2842 struct talloc_chunk
*tc
;
2844 if (context
== NULL
) {
2845 fprintf(file
, "talloc no parents for NULL\n");
2849 tc
= talloc_chunk_from_ptr(context
);
2850 fprintf(file
, "talloc parents of '%s'\n", __talloc_get_name(context
));
2852 fprintf(file
, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc
)));
2853 while (tc
&& tc
->prev
) tc
= tc
->prev
;
2862 return 1 if ptr is a parent of context
2864 static int _talloc_is_parent(const void *context
, const void *ptr
, int depth
)
2866 struct talloc_chunk
*tc
;
2868 if (context
== NULL
) {
2872 tc
= talloc_chunk_from_ptr(context
);
2877 if (TC_PTR_FROM_CHUNK(tc
) == ptr
) return 1;
2878 while (tc
&& tc
->prev
) tc
= tc
->prev
;
2888 return 1 if ptr is a parent of context
2890 _PUBLIC_
int talloc_is_parent(const void *context
, const void *ptr
)
2892 return _talloc_is_parent(context
, ptr
, TALLOC_MAX_DEPTH
);
2896 return the total size of memory used by this context and all children
2898 static inline size_t _talloc_total_limit_size(const void *ptr
,
2899 struct talloc_memlimit
*old_limit
,
2900 struct talloc_memlimit
*new_limit
)
2902 return _talloc_total_mem_internal(ptr
, TOTAL_MEM_LIMIT
,
2903 old_limit
, new_limit
);
2906 static inline bool talloc_memlimit_check(struct talloc_memlimit
*limit
, size_t size
)
2908 struct talloc_memlimit
*l
;
2910 for (l
= limit
; l
!= NULL
; l
= l
->upper
) {
2911 if (l
->max_size
!= 0 &&
2912 ((l
->max_size
<= l
->cur_size
) ||
2913 (l
->max_size
- l
->cur_size
< size
))) {
2922 Update memory limits when freeing a talloc_chunk.
2924 static void tc_memlimit_update_on_free(struct talloc_chunk
*tc
)
2926 size_t limit_shrink_size
;
2933 * Pool entries don't count. Only the pools
2934 * themselves are counted as part of the memory
2935 * limits. Note that this also takes care of
2936 * nested pools which have both flags
2937 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2939 if (tc
->flags
& TALLOC_FLAG_POOLMEM
) {
2944 * If we are part of a memory limited context hierarchy
2945 * we need to subtract the memory used from the counters
2948 limit_shrink_size
= tc
->size
+TC_HDR_SIZE
;
2951 * If we're deallocating a pool, take into
2952 * account the prefix size added for the pool.
2955 if (tc
->flags
& TALLOC_FLAG_POOL
) {
2956 limit_shrink_size
+= TP_HDR_SIZE
;
2959 talloc_memlimit_shrink(tc
->limit
, limit_shrink_size
);
2961 if (tc
->limit
->parent
== tc
) {
2969 Increase memory limit accounting after a malloc/realloc.
2971 static void talloc_memlimit_grow(struct talloc_memlimit
*limit
,
2974 struct talloc_memlimit
*l
;
2976 for (l
= limit
; l
!= NULL
; l
= l
->upper
) {
2977 size_t new_cur_size
= l
->cur_size
+ size
;
2978 if (new_cur_size
< l
->cur_size
) {
2979 talloc_abort("logic error in talloc_memlimit_grow\n");
2982 l
->cur_size
= new_cur_size
;
2987 Decrease memory limit accounting after a free/realloc.
2989 static void talloc_memlimit_shrink(struct talloc_memlimit
*limit
,
2992 struct talloc_memlimit
*l
;
2994 for (l
= limit
; l
!= NULL
; l
= l
->upper
) {
2995 if (l
->cur_size
< size
) {
2996 talloc_abort("logic error in talloc_memlimit_shrink\n");
2999 l
->cur_size
= l
->cur_size
- size
;
3003 _PUBLIC_
int talloc_set_memlimit(const void *ctx
, size_t max_size
)
3005 struct talloc_chunk
*tc
= talloc_chunk_from_ptr(ctx
);
3006 struct talloc_memlimit
*orig_limit
;
3007 struct talloc_memlimit
*limit
= NULL
;
3009 if (tc
->limit
&& tc
->limit
->parent
== tc
) {
3010 tc
->limit
->max_size
= max_size
;
3013 orig_limit
= tc
->limit
;
3015 limit
= malloc(sizeof(struct talloc_memlimit
));
3016 if (limit
== NULL
) {
3020 limit
->max_size
= max_size
;
3021 limit
->cur_size
= _talloc_total_limit_size(ctx
, tc
->limit
, limit
);
3024 limit
->upper
= orig_limit
;
3026 limit
->upper
= NULL
;