4 * Copyright 1996 Alexandre Julliard
5 * Copyright 1998 Ulrich Weigand
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #define RUNNING_ON_VALGRIND 0 /* FIXME */
31 #define WIN32_NO_STATUS
32 #define NONAMELESSUNION
36 #include "ntdll_misc.h"
37 #include "wine/list.h"
38 #include "wine/debug.h"
40 WINE_DEFAULT_DEBUG_CHANNEL(heap
);
42 /* HeapCompatibilityInformation values */
49 /* undocumented RtlWalkHeap structure */
54 SIZE_T cbData
; /* differs from PROCESS_HEAP_ENTRY */
57 WORD wFlags
; /* value differs from PROCESS_HEAP_ENTRY */
64 DWORD dwCommittedSize
;
65 DWORD dwUnCommittedSize
;
72 /* rtl_heap_entry flags, names made up */
74 #define RTL_HEAP_ENTRY_BUSY 0x0001
75 #define RTL_HEAP_ENTRY_REGION 0x0002
76 #define RTL_HEAP_ENTRY_BLOCK 0x0010
77 #define RTL_HEAP_ENTRY_UNCOMMITTED 0x1000
78 #define RTL_HEAP_ENTRY_COMMITTED 0x4000
79 #define RTL_HEAP_ENTRY_LFH 0x8000
82 /* header for heap blocks */
84 #define REGION_ALIGN 0x10000
85 #define BLOCK_ALIGN (2 * sizeof(void *))
87 struct DECLSPEC_ALIGN(8) block
89 /* block size in multiple of BLOCK_ALIGN */
91 /* unused size (used block) / high size bits (free block) */
93 /* offset to region base / first group block (LFH block) */
99 C_ASSERT( sizeof(struct block
) == 8 );
101 /* block specific flags */
103 #define BLOCK_FLAG_FREE 0x01
104 #define BLOCK_FLAG_PREV_FREE 0x02
105 #define BLOCK_FLAG_FREE_LINK 0x03
106 #define BLOCK_FLAG_LARGE 0x04
107 #define BLOCK_FLAG_LFH 0x08 /* block is handled by the LFH frontend */
108 #define BLOCK_FLAG_USER_INFO 0x10 /* user flags up to 0xf0 */
109 #define BLOCK_FLAG_USER_MASK 0xf0
111 #define BLOCK_USER_FLAGS( heap_flags ) (((heap_flags) >> 4) & BLOCK_FLAG_USER_MASK)
112 #define HEAP_USER_FLAGS( block_flags ) (((block_flags) & BLOCK_FLAG_USER_MASK) << 4)
114 /* entry to link free blocks in free lists */
116 struct DECLSPEC_ALIGN(BLOCK_ALIGN
) entry
122 C_ASSERT( sizeof(struct entry
) == 2 * BLOCK_ALIGN
);
126 SIZE_T __pad
[sizeof(SIZE_T
) / sizeof(DWORD
)];
127 struct list entry
; /* entry in heap large blocks list */
128 SIZE_T data_size
; /* size of user data */
129 SIZE_T block_size
; /* total size of virtual memory block */
134 /* block must be last and aligned */
135 C_ASSERT( sizeof(ARENA_LARGE
) == offsetof(ARENA_LARGE
, block
) + sizeof(struct block
) );
136 C_ASSERT( sizeof(ARENA_LARGE
) == 4 * BLOCK_ALIGN
);
138 #define BLOCK_TYPE_USED 'u'
139 #define BLOCK_TYPE_DEAD 'D'
140 #define BLOCK_TYPE_FREE 'F'
141 #define BLOCK_TYPE_LARGE 'L'
143 #define BLOCK_FILL_USED 0x55
144 #define BLOCK_FILL_TAIL 0xab
145 #define BLOCK_FILL_FREE 0xfeeefeee
147 #define ROUND_ADDR(addr, mask) ((void *)((UINT_PTR)(addr) & ~(UINT_PTR)(mask)))
148 #define ROUND_SIZE(size, mask) ((((SIZE_T)(size) + (mask)) & ~(SIZE_T)(mask)))
149 #define FIELD_MAX(type, field) (((SIZE_T)1 << (sizeof(((type *)0)->field) * 8)) - 1)
151 #define HEAP_MIN_BLOCK_SIZE ROUND_SIZE(sizeof(struct entry) + BLOCK_ALIGN, BLOCK_ALIGN - 1)
153 C_ASSERT( sizeof(struct block
) <= HEAP_MIN_BLOCK_SIZE
);
154 C_ASSERT( sizeof(struct entry
) <= HEAP_MIN_BLOCK_SIZE
);
156 /* used block size is coded into block_size */
157 #define HEAP_MAX_USED_BLOCK_SIZE (FIELD_MAX( struct block, block_size ) * BLOCK_ALIGN)
158 /* free block size is coded into block_size + tail_size */
159 #define HEAP_MAX_FREE_BLOCK_SIZE (HEAP_MAX_USED_BLOCK_SIZE + (FIELD_MAX( struct block, tail_size ) << 16) * BLOCK_ALIGN)
160 /* subheap distance is coded into base_offset */
161 #define HEAP_MAX_BLOCK_REGION_SIZE (FIELD_MAX( struct block, base_offset ) * REGION_ALIGN)
163 C_ASSERT( HEAP_MAX_USED_BLOCK_SIZE
== 512 * 1024 * (sizeof(void *) / 4) - BLOCK_ALIGN
);
164 C_ASSERT( HEAP_MAX_FREE_BLOCK_SIZE
>= 128 * 1024 * 1024 * (sizeof(void *) / 4) - BLOCK_ALIGN
);
165 C_ASSERT( HEAP_MAX_BLOCK_REGION_SIZE
>= 128 * 1024 * 1024 * (sizeof(void *) / 4) - BLOCK_ALIGN
);
166 C_ASSERT( HEAP_MAX_FREE_BLOCK_SIZE
>= HEAP_MAX_BLOCK_REGION_SIZE
);
168 /* minimum size to start allocating large blocks */
169 #define HEAP_MIN_LARGE_BLOCK_SIZE (HEAP_MAX_USED_BLOCK_SIZE - 0x1000)
171 /* There will be a free list bucket for every arena size up to and including this value */
172 #define HEAP_MAX_SMALL_FREE_LIST 0x100
173 C_ASSERT( HEAP_MAX_SMALL_FREE_LIST
% BLOCK_ALIGN
== 0 );
174 #define HEAP_NB_SMALL_FREE_LISTS (((HEAP_MAX_SMALL_FREE_LIST - HEAP_MIN_BLOCK_SIZE) / BLOCK_ALIGN) + 1)
176 /* Max size of the blocks on the free lists above HEAP_MAX_SMALL_FREE_LIST */
177 static const SIZE_T free_list_sizes
[] =
179 0x200, 0x400, 0x1000, ~(SIZE_T
)0
181 #define HEAP_NB_FREE_LISTS (ARRAY_SIZE(free_list_sizes) + HEAP_NB_SMALL_FREE_LISTS)
183 typedef struct DECLSPEC_ALIGN(BLOCK_ALIGN
) tagSUBHEAP
185 SIZE_T __pad
[sizeof(SIZE_T
) / sizeof(DWORD
)];
193 /* block must be last and aligned */
194 C_ASSERT( sizeof(SUBHEAP
) == offsetof(SUBHEAP
, block
) + sizeof(struct block
) );
195 C_ASSERT( sizeof(SUBHEAP
) == 4 * BLOCK_ALIGN
);
198 /* LFH block size bins */
200 #define BIN_SIZE_MIN_0 0
201 #define BIN_SIZE_MIN_1 0x100
202 #define BIN_SIZE_MIN_2 0x200
203 #define BIN_SIZE_MIN_3 0x400
204 #define BIN_SIZE_MIN_4 0x800
205 #define BIN_SIZE_MIN_5 0x1000
206 #define BIN_SIZE_MIN_6 0x2000
207 #define BIN_SIZE_MIN_7 0x4000
208 #define BIN_SIZE_MAX 0x8000
210 #define BIN_SIZE_STEP_0 (16)
211 #define BIN_SIZE_STEP_1 (BIN_SIZE_MIN_1 >> 4)
212 #define BIN_SIZE_STEP_2 (BIN_SIZE_MIN_2 >> 4)
213 #define BIN_SIZE_STEP_3 (BIN_SIZE_MIN_3 >> 4)
214 #define BIN_SIZE_STEP_4 (BIN_SIZE_MIN_4 >> 4)
215 #define BIN_SIZE_STEP_5 (BIN_SIZE_MIN_5 >> 4)
216 #define BIN_SIZE_STEP_6 (BIN_SIZE_MIN_6 >> 4)
217 #define BIN_SIZE_STEP_7 (BIN_SIZE_MIN_7 >> 4)
219 #define BLOCK_BIN_SIZE_N( n, bin ) (BIN_SIZE_MIN_##n + (bin + 1) * BIN_SIZE_STEP_##n)
220 #define BLOCK_SIZE_BIN_N( n, size ) (((size) - 1 - BIN_SIZE_MIN_##n) / BIN_SIZE_STEP_##n)
222 #define BLOCK_BIN_SIZE( bin ) ((bin) >= 0x80 ? ~(SIZE_T)0 : \
223 (bin) >= 0x70 ? BLOCK_BIN_SIZE_N( 7, bin - 0x70 ) : \
224 (bin) >= 0x60 ? BLOCK_BIN_SIZE_N( 6, bin - 0x60 ) : \
225 (bin) >= 0x50 ? BLOCK_BIN_SIZE_N( 5, bin - 0x50 ) : \
226 (bin) >= 0x40 ? BLOCK_BIN_SIZE_N( 4, bin - 0x40 ) : \
227 (bin) >= 0x30 ? BLOCK_BIN_SIZE_N( 3, bin - 0x30 ) : \
228 (bin) >= 0x20 ? BLOCK_BIN_SIZE_N( 2, bin - 0x20 ) : \
229 (bin) >= 0x10 ? BLOCK_BIN_SIZE_N( 1, bin - 0x10 ) : \
230 BLOCK_BIN_SIZE_N( 0, bin ))
232 #define BLOCK_SIZE_BIN( size ) ((size) > BIN_SIZE_MAX ? 0x80 : \
233 (size) > BIN_SIZE_MIN_7 ? 0x70 + BLOCK_SIZE_BIN_N( 7, size ) : \
234 (size) > BIN_SIZE_MIN_6 ? 0x60 + BLOCK_SIZE_BIN_N( 6, size ) : \
235 (size) > BIN_SIZE_MIN_5 ? 0x50 + BLOCK_SIZE_BIN_N( 5, size ) : \
236 (size) > BIN_SIZE_MIN_4 ? 0x40 + BLOCK_SIZE_BIN_N( 4, size ) : \
237 (size) > BIN_SIZE_MIN_3 ? 0x30 + BLOCK_SIZE_BIN_N( 3, size ) : \
238 (size) > BIN_SIZE_MIN_2 ? 0x20 + BLOCK_SIZE_BIN_N( 2, size ) : \
239 (size) > BIN_SIZE_MIN_1 ? 0x10 + BLOCK_SIZE_BIN_N( 1, size ) : \
240 (size) <= BIN_SIZE_MIN_0 ? 0 : BLOCK_SIZE_BIN_N( 0, size ))
242 #define BLOCK_SIZE_BIN_COUNT (BLOCK_SIZE_BIN( BIN_SIZE_MAX + 1 ) + 1)
244 /* macros sanity checks */
245 C_ASSERT( BLOCK_SIZE_BIN( 0 ) == 0 );
246 C_ASSERT( BLOCK_SIZE_BIN( 0x10 ) == 0 );
247 C_ASSERT( BLOCK_BIN_SIZE( 0 ) == BIN_SIZE_MIN_0
+ 1 * BIN_SIZE_STEP_0
);
248 C_ASSERT( BLOCK_SIZE_BIN( 0x11 ) == 1 );
249 C_ASSERT( BLOCK_BIN_SIZE( 1 ) == BIN_SIZE_MIN_0
+ 2 * BIN_SIZE_STEP_0
);
250 C_ASSERT( BLOCK_SIZE_BIN( BIN_SIZE_MAX
) == 0x7f );
251 C_ASSERT( BLOCK_BIN_SIZE( 0x7f ) == BIN_SIZE_MAX
);
252 C_ASSERT( BLOCK_SIZE_BIN( BIN_SIZE_MAX
+ 1 ) == 0x80 );
253 C_ASSERT( BLOCK_BIN_SIZE( 0x80 ) == ~(SIZE_T
)0 );
255 /* difference between block classes and all possible validation overhead must fit into block tail_size */
256 C_ASSERT( BIN_SIZE_STEP_7
+ 3 * BLOCK_ALIGN
<= FIELD_MAX( struct block
, tail_size
) );
258 static BYTE affinity_mapping
[] = {20,6,31,15,14,29,27,4,18,24,26,13,0,9,2,30,17,7,23,25,10,19,12,3,22,21,5,16,1,28,11,8};
259 static LONG next_thread_affinity
;
261 /* a bin, tracking heap blocks of a certain size */
264 /* counters for LFH activation */
269 /* list of groups with free blocks */
272 /* array of affinity reserved groups, interleaved with other bins to keep
273 * all pointers of the same affinity and different bin grouped together,
274 * and pointers of the same bin and different affinity away from each other,
275 * hopefully in separate cache lines.
277 struct group
**affinity_group_base
;
280 static inline struct group
**bin_get_affinity_group( struct bin
*bin
, BYTE affinity
)
282 return bin
->affinity_group_base
+ affinity
* BLOCK_SIZE_BIN_COUNT
;
287 DWORD_PTR unknown1
[2]; /* 0000/0000 */
288 DWORD ffeeffee
; /* 0008/0010 */
289 DWORD auto_flags
; /* 000c/0014 */
290 DWORD_PTR unknown2
[7]; /* 0010/0018 */
291 DWORD unknown3
[2]; /* 002c/0050 */
292 DWORD_PTR unknown4
[3]; /* 0034/0058 */
293 DWORD flags
; /* 0040/0070 */
294 DWORD force_flags
; /* 0044/0074 */
295 /* end of the Windows 10 compatible struct layout */
297 LONG compat_info
; /* HeapCompatibilityInformation / heap frontend type */
298 struct list entry
; /* Entry in process heap list */
299 struct list subheap_list
; /* Sub-heap list */
300 struct list large_list
; /* Large blocks list */
301 SIZE_T grow_size
; /* Size of next subheap for growing heap */
302 SIZE_T min_size
; /* Minimum committed size */
303 DWORD magic
; /* Magic number */
304 DWORD pending_pos
; /* Position in pending free requests ring */
305 struct block
**pending_free
; /* Ring buffer for pending free requests */
306 RTL_CRITICAL_SECTION cs
;
307 struct entry free_lists
[HEAP_NB_FREE_LISTS
];
312 /* subheap must be last and aligned */
313 C_ASSERT( sizeof(struct heap
) == offsetof(struct heap
, subheap
) + sizeof(SUBHEAP
) );
314 C_ASSERT( sizeof(struct heap
) % BLOCK_ALIGN
== 0 );
315 C_ASSERT( offsetof(struct heap
, subheap
) <= REGION_ALIGN
- 1 );
317 #define HEAP_MAGIC ((DWORD)('H' | ('E'<<8) | ('A'<<16) | ('P'<<24)))
319 #define HEAP_DEF_SIZE (0x40000 * BLOCK_ALIGN)
320 #define MAX_FREE_PENDING 1024 /* max number of free requests to delay */
322 /* some undocumented flags (names are made up) */
323 #define HEAP_PRIVATE 0x00001000
324 #define HEAP_ADD_USER_INFO 0x00000100
325 #define HEAP_USER_FLAGS_MASK 0x00000f00
326 #define HEAP_PAGE_ALLOCS 0x01000000
327 #define HEAP_VALIDATE 0x10000000
328 #define HEAP_VALIDATE_ALL 0x20000000
329 #define HEAP_VALIDATE_PARAMS 0x40000000
330 #define HEAP_CHECKING_ENABLED 0x80000000
332 static struct heap
*process_heap
; /* main process heap */
334 /* check if memory range a contains memory range b */
335 static inline BOOL
contains( const void *a
, SIZE_T a_size
, const void *b
, SIZE_T b_size
)
337 const void *a_end
= (char *)a
+ a_size
, *b_end
= (char *)b
+ b_size
;
338 return a
<= b
&& b
<= b_end
&& b_end
<= a_end
;
341 static inline UINT
block_get_flags( const struct block
*block
)
343 return block
->block_flags
;
346 static inline UINT
block_get_type( const struct block
*block
)
348 return block
->block_type
;
351 static inline void block_set_base( struct block
*block
, const void *base
)
353 const char *offset
= ROUND_ADDR( block
, REGION_ALIGN
- 1 );
354 block
->base_offset
= (offset
- (char *)base
) / REGION_ALIGN
;
357 static inline void block_set_type( struct block
*block
, UINT type
)
359 block
->block_type
= type
;
362 static inline SUBHEAP
*block_get_subheap( const struct heap
*heap
, const struct block
*block
)
364 char *offset
= ROUND_ADDR( block
, REGION_ALIGN
- 1 );
365 void *base
= offset
- block
->base_offset
* REGION_ALIGN
;
366 if (base
!= (void *)heap
) return base
;
367 else return (SUBHEAP
*)&heap
->subheap
;
370 static inline UINT
block_get_overhead( const struct block
*block
)
372 if (block_get_flags( block
) & BLOCK_FLAG_FREE
) return sizeof(*block
) + sizeof(struct list
);
373 return sizeof(*block
) + block
->tail_size
;
376 /* return the size of a block, including its header */
377 static inline UINT
block_get_size( const struct block
*block
)
379 UINT block_size
= block
->block_size
;
380 if (block_get_flags( block
) & BLOCK_FLAG_FREE
) block_size
+= (UINT
)block
->tail_size
<< 16;
381 return block_size
* BLOCK_ALIGN
;
384 static inline void block_set_size( struct block
*block
, UINT block_size
)
386 block_size
/= BLOCK_ALIGN
;
387 if (block_get_flags( block
) & BLOCK_FLAG_FREE
) block
->tail_size
= block_size
>> 16;
388 block
->block_size
= block_size
;
391 static inline void block_set_flags( struct block
*block
, BYTE clear
, BYTE set
)
393 UINT block_size
= block_get_size( block
);
394 block
->block_flags
&= ~clear
;
395 block
->block_flags
|= set
;
396 block_set_size( block
, block_size
);
399 static inline void *subheap_base( const SUBHEAP
*subheap
)
401 return ROUND_ADDR( subheap
, REGION_ALIGN
- 1 );
404 static inline SIZE_T
subheap_overhead( const SUBHEAP
*subheap
)
406 return (char *)&subheap
->block
- (char *)subheap_base( subheap
);
409 static inline SIZE_T
subheap_size( const SUBHEAP
*subheap
)
411 return subheap
->block_size
+ subheap_overhead( subheap
);
414 static inline const void *subheap_commit_end( const SUBHEAP
*subheap
)
416 return (char *)(subheap
+ 1) + subheap
->data_size
;
419 static void subheap_set_bounds( SUBHEAP
*subheap
, char *commit_end
, char *end
)
421 subheap
->block_size
= end
- (char *)&subheap
->block
;
422 subheap
->data_size
= commit_end
- (char *)(subheap
+ 1);
425 static inline void *first_block( const SUBHEAP
*subheap
)
427 return (void *)&subheap
->block
;
430 static inline const void *last_block( const SUBHEAP
*subheap
)
432 return (char *)subheap_commit_end( subheap
) - sizeof(struct block
);
435 static inline struct block
*next_block( const SUBHEAP
*subheap
, const struct block
*block
)
437 const char *data
= (char *)(block
+ 1), *next
, *last
= last_block( subheap
);
438 next
= (char *)block
+ block_get_size( block
);
439 if (!contains( data
, last
- (char *)data
, next
, sizeof(*block
) )) return NULL
;
440 return (struct block
*)next
;
443 static inline BOOL
check_subheap( const SUBHEAP
*subheap
, const struct heap
*heap
)
445 if (subheap
->user_value
!= heap
) return FALSE
;
447 return contains( &subheap
->block
, subheap
->block_size
, subheap
+ 1, subheap
->data_size
);
450 static BOOL
heap_validate( const struct heap
*heap
);
452 /* mark a block of memory as innacessible for debugging purposes */
453 static inline void valgrind_make_noaccess( void const *ptr
, SIZE_T size
)
455 #if defined(VALGRIND_MAKE_MEM_NOACCESS)
456 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_NOACCESS( ptr
, size
) );
457 #elif defined(VALGRIND_MAKE_NOACCESS)
458 VALGRIND_DISCARD( VALGRIND_MAKE_NOACCESS( ptr
, size
) );
462 /* mark a block of memory as initialized for debugging purposes */
463 static inline void valgrind_make_readable( void const *ptr
, SIZE_T size
)
465 #if defined(VALGRIND_MAKE_MEM_DEFINED)
466 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_DEFINED( ptr
, size
) );
467 #elif defined(VALGRIND_MAKE_READABLE)
468 VALGRIND_DISCARD( VALGRIND_MAKE_READABLE( ptr
, size
) );
472 /* mark a block of memory as uninitialized for debugging purposes */
473 static inline void valgrind_make_writable( void const *ptr
, SIZE_T size
)
475 #if defined(VALGRIND_MAKE_MEM_UNDEFINED)
476 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_UNDEFINED( ptr
, size
) );
477 #elif defined(VALGRIND_MAKE_WRITABLE)
478 VALGRIND_DISCARD( VALGRIND_MAKE_WRITABLE( ptr
, size
) );
482 /* mark a block of memory as free for debugging purposes */
483 static inline void mark_block_free( void *ptr
, SIZE_T size
, DWORD flags
)
485 if (flags
& HEAP_FREE_CHECKING_ENABLED
)
488 for (i
= 0; i
< size
/ sizeof(DWORD
); i
++) ((DWORD
*)ptr
)[i
] = BLOCK_FILL_FREE
;
490 valgrind_make_noaccess( ptr
, size
);
493 /* mark a block of memory as a tail block */
494 static inline void mark_block_tail( struct block
*block
, DWORD flags
)
496 char *tail
= (char *)block
+ block_get_size( block
) - block
->tail_size
;
497 if (flags
& HEAP_TAIL_CHECKING_ENABLED
)
499 valgrind_make_writable( tail
, BLOCK_ALIGN
);
500 memset( tail
, BLOCK_FILL_TAIL
, BLOCK_ALIGN
);
502 valgrind_make_noaccess( tail
, BLOCK_ALIGN
);
503 if (flags
& HEAP_ADD_USER_INFO
)
505 if (flags
& HEAP_TAIL_CHECKING_ENABLED
|| RUNNING_ON_VALGRIND
) tail
+= BLOCK_ALIGN
;
506 valgrind_make_writable( tail
+ sizeof(void *), sizeof(void *) );
507 memset( tail
+ sizeof(void *), 0, sizeof(void *) );
511 /* initialize contents of a newly created block of memory */
512 static inline void initialize_block( struct block
*block
, SIZE_T old_size
, SIZE_T size
, DWORD flags
)
514 char *data
= (char *)(block
+ 1);
516 if (size
<= old_size
) return;
518 if (flags
& HEAP_ZERO_MEMORY
)
520 valgrind_make_writable( data
+ old_size
, size
- old_size
);
521 memset( data
+ old_size
, 0, size
- old_size
);
523 else if (flags
& HEAP_FREE_CHECKING_ENABLED
)
525 valgrind_make_writable( data
+ old_size
, size
- old_size
);
526 memset( data
+ old_size
, BLOCK_FILL_USED
, size
- old_size
);
530 /* notify that a new block of memory has been allocated for debugging purposes */
531 static inline void valgrind_notify_alloc( void const *ptr
, SIZE_T size
, BOOL init
)
533 #ifdef VALGRIND_MALLOCLIKE_BLOCK
534 VALGRIND_MALLOCLIKE_BLOCK( ptr
, size
, 0, init
);
538 /* notify that a block of memory has been freed for debugging purposes */
539 static inline void valgrind_notify_free( void const *ptr
)
541 #ifdef VALGRIND_FREELIKE_BLOCK
542 VALGRIND_FREELIKE_BLOCK( ptr
, 0 );
546 static inline void valgrind_notify_resize( void const *ptr
, SIZE_T size_old
, SIZE_T size_new
)
548 #ifdef VALGRIND_RESIZEINPLACE_BLOCK
549 /* zero is not a valid size */
550 VALGRIND_RESIZEINPLACE_BLOCK( ptr
, size_old
? size_old
: 1, size_new
? size_new
: 1, 0 );
554 static void valgrind_notify_free_all( SUBHEAP
*subheap
, const struct heap
*heap
)
556 #ifdef VALGRIND_FREELIKE_BLOCK
559 if (!RUNNING_ON_VALGRIND
) return;
560 if (!check_subheap( subheap
, heap
)) return;
562 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
564 if (block_get_flags( block
) & BLOCK_FLAG_FREE
) continue;
565 if (block_get_type( block
) == BLOCK_TYPE_USED
) valgrind_notify_free( block
+ 1 );
570 /* locate a free list entry of the appropriate size */
571 /* size is the size of the whole block including the arena header */
572 static inline struct entry
*find_free_list( struct heap
*heap
, SIZE_T block_size
, BOOL last
)
574 struct entry
*list
, *end
= heap
->free_lists
+ ARRAY_SIZE(heap
->free_lists
);
577 if (block_size
<= HEAP_MAX_SMALL_FREE_LIST
)
578 i
= (block_size
- HEAP_MIN_BLOCK_SIZE
) / BLOCK_ALIGN
;
579 else for (i
= HEAP_NB_SMALL_FREE_LISTS
; i
< HEAP_NB_FREE_LISTS
- 1; i
++)
580 if (block_size
<= free_list_sizes
[i
- HEAP_NB_SMALL_FREE_LISTS
]) break;
582 list
= heap
->free_lists
+ i
;
583 if (last
&& ++list
== end
) list
= heap
->free_lists
;
587 /* get the memory protection type to use for a given heap */
588 static inline ULONG
get_protection_type( DWORD flags
)
590 return (flags
& HEAP_CREATE_ENABLE_EXECUTE
) ? PAGE_EXECUTE_READWRITE
: PAGE_READWRITE
;
593 static RTL_CRITICAL_SECTION_DEBUG process_heap_cs_debug
=
595 0, 0, NULL
, /* will be set later */
596 { &process_heap_cs_debug
.ProcessLocksList
, &process_heap_cs_debug
.ProcessLocksList
},
597 0, 0, { (DWORD_PTR
)(__FILE__
": main process heap section") }
600 static inline ULONG
heap_get_flags( const struct heap
*heap
, ULONG flags
)
602 if (flags
& (HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
)) flags
|= HEAP_CHECKING_ENABLED
;
603 flags
&= HEAP_GENERATE_EXCEPTIONS
| HEAP_NO_SERIALIZE
| HEAP_ZERO_MEMORY
| HEAP_REALLOC_IN_PLACE_ONLY
| HEAP_CHECKING_ENABLED
| HEAP_USER_FLAGS_MASK
;
604 return heap
->flags
| flags
;
607 static inline void heap_lock( struct heap
*heap
, ULONG flags
)
609 if (flags
& HEAP_NO_SERIALIZE
) return;
610 RtlEnterCriticalSection( &heap
->cs
);
613 static inline void heap_unlock( struct heap
*heap
, ULONG flags
)
615 if (flags
& HEAP_NO_SERIALIZE
) return;
616 RtlLeaveCriticalSection( &heap
->cs
);
619 static void heap_set_status( const struct heap
*heap
, ULONG flags
, NTSTATUS status
)
621 if (status
== STATUS_NO_MEMORY
&& (flags
& HEAP_GENERATE_EXCEPTIONS
)) RtlRaiseStatus( status
);
622 if (status
) RtlSetLastWin32ErrorAndNtStatusFromNtStatus( status
);
625 static size_t get_free_list_block_size( unsigned int index
)
627 if (index
< HEAP_NB_SMALL_FREE_LISTS
) return HEAP_MIN_BLOCK_SIZE
+ index
* BLOCK_ALIGN
;
628 return free_list_sizes
[index
- HEAP_NB_SMALL_FREE_LISTS
];
631 static void heap_dump( const struct heap
*heap
)
633 const struct block
*block
;
634 const ARENA_LARGE
*large
;
635 const SUBHEAP
*subheap
;
638 TRACE( "heap: %p\n", heap
);
639 TRACE( " next %p\n", LIST_ENTRY( heap
->entry
.next
, struct heap
, entry
) );
642 for (i
= 0; heap
->bins
&& i
< BLOCK_SIZE_BIN_COUNT
; i
++)
644 const struct bin
*bin
= heap
->bins
+ i
;
645 ULONG alloc
= ReadNoFence( &bin
->count_alloc
), freed
= ReadNoFence( &bin
->count_freed
);
646 if (!alloc
&& !freed
) continue;
647 TRACE( " %3u: size %#4Ix, alloc %ld, freed %ld, enabled %lu\n", i
, BLOCK_BIN_SIZE( i
),
648 alloc
, freed
, ReadNoFence( &bin
->enabled
) );
651 TRACE( " free_lists: %p\n", heap
->free_lists
);
652 for (i
= 0; i
< HEAP_NB_FREE_LISTS
; i
++)
653 TRACE( " %p: size %#8Ix, prev %p, next %p\n", heap
->free_lists
+ i
, get_free_list_block_size( i
),
654 LIST_ENTRY( heap
->free_lists
[i
].entry
.prev
, struct entry
, entry
),
655 LIST_ENTRY( heap
->free_lists
[i
].entry
.next
, struct entry
, entry
) );
657 TRACE( " subheaps: %p\n", &heap
->subheap_list
);
658 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
660 SIZE_T free_size
= 0, used_size
= 0, overhead
= 0;
661 const char *base
= subheap_base( subheap
);
663 TRACE( " %p: base %p first %p last %p end %p\n", subheap
, base
, first_block( subheap
),
664 last_block( subheap
), base
+ subheap_size( subheap
) );
666 if (!check_subheap( subheap
, heap
)) return;
668 overhead
+= subheap_overhead( subheap
);
669 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
671 if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
673 TRACE( " %p: (free) type %#10x, size %#8x, flags %#4x, prev %p, next %p\n", block
,
674 block_get_type( block
), block_get_size( block
), block_get_flags( block
),
675 LIST_ENTRY( ((struct entry
*)block
)->entry
.prev
, struct entry
, entry
),
676 LIST_ENTRY( ((struct entry
*)block
)->entry
.next
, struct entry
, entry
) );
678 overhead
+= block_get_overhead( block
);
679 free_size
+= block_get_size( block
) - block_get_overhead( block
);
683 TRACE( " %p: (used) type %#10x, size %#8x, flags %#4x, unused %#4x", block
,
684 block_get_type( block
), block_get_size( block
), block_get_flags( block
),
686 if (!(block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
)) TRACE( "\n" );
687 else TRACE( ", back %p\n", *((struct block
**)block
- 1) );
689 overhead
+= block_get_overhead( block
);
690 used_size
+= block_get_size( block
) - block_get_overhead( block
);
694 TRACE( " total %#Ix, used %#Ix, free %#Ix, overhead %#Ix (%Iu%%)\n", used_size
+ free_size
+ overhead
,
695 used_size
, free_size
, overhead
, (overhead
* 100) / subheap_size( subheap
) );
698 TRACE( " large blocks: %p\n", &heap
->large_list
);
699 LIST_FOR_EACH_ENTRY( large
, &heap
->large_list
, ARENA_LARGE
, entry
)
701 TRACE( " %p: (large) type %#10x, size %#8x, flags %#4x, total_size %#10Ix, alloc_size %#10Ix, prev %p, next %p\n",
702 large
, block_get_type( &large
->block
), block_get_size( &large
->block
), block_get_flags( &large
->block
), large
->block_size
,
703 large
->data_size
, LIST_ENTRY( large
->entry
.prev
, ARENA_LARGE
, entry
), LIST_ENTRY( large
->entry
.next
, ARENA_LARGE
, entry
) );
706 if (heap
->pending_free
)
708 TRACE( " pending blocks: %p\n", heap
->pending_free
);
709 for (i
= 0; i
< MAX_FREE_PENDING
; ++i
)
711 if (!(block
= heap
->pending_free
[i
])) break;
713 TRACE( " %c%p: (pend) type %#10x, size %#8x, flags %#4x, unused %#4x", i
== heap
->pending_pos
? '*' : ' ',
714 block
, block_get_type( block
), block_get_size( block
), block_get_flags( block
), block
->tail_size
);
715 if (!(block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
)) TRACE( "\n" );
716 else TRACE( ", back %p\n", *((struct block
**)block
- 1) );
721 static const char *debugstr_heap_entry( struct rtl_heap_entry
*entry
)
723 const char *str
= wine_dbg_sprintf( "data %p, size %#Ix, overhead %#x, region %#x, flags %#x", entry
->lpData
,
724 entry
->cbData
, entry
->cbOverhead
, entry
->iRegionIndex
, entry
->wFlags
);
725 if (!(entry
->wFlags
& RTL_HEAP_ENTRY_REGION
)) return str
;
726 return wine_dbg_sprintf( "%s, commit %#lx, uncommit %#lx, first %p, last %p", str
, entry
->Region
.dwCommittedSize
,
727 entry
->Region
.dwUnCommittedSize
, entry
->Region
.lpFirstBlock
, entry
->Region
.lpLastBlock
);
730 static struct heap
*unsafe_heap_from_handle( HANDLE handle
, ULONG flags
, ULONG
*heap_flags
)
732 struct heap
*heap
= handle
;
735 if (!heap
|| (heap
->magic
!= HEAP_MAGIC
))
737 ERR( "Invalid handle %p!\n", handle
);
740 if (heap
->flags
& HEAP_VALIDATE_ALL
)
742 heap_lock( heap
, 0 );
743 valid
= heap_validate( heap
);
744 heap_unlock( heap
, 0 );
746 if (!valid
&& TRACE_ON(heap
))
753 *heap_flags
= heap_get_flags( heap
, flags
);
754 return valid
? heap
: NULL
;
758 static SUBHEAP
*find_subheap( const struct heap
*heap
, const struct block
*block
, BOOL heap_walk
)
762 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
764 SIZE_T blocks_size
= (char *)last_block( subheap
) - (char *)first_block( subheap
);
765 if (!check_subheap( subheap
, heap
)) return NULL
;
766 if (contains( first_block( subheap
), blocks_size
, block
, sizeof(*block
) )) return subheap
;
767 /* outside of blocks region, possible corruption or heap_walk */
768 if (contains( subheap_base( subheap
), subheap_size( subheap
), block
, 1 )) return heap_walk
? subheap
: NULL
;
775 static inline BOOL
subheap_commit( const struct heap
*heap
, SUBHEAP
*subheap
, const struct block
*block
, SIZE_T block_size
)
777 const char *end
= (char *)subheap_base( subheap
) + subheap_size( subheap
), *commit_end
;
781 commit_end
= (char *)block
+ block_size
+ sizeof(struct entry
);
782 commit_end
= ROUND_ADDR( (char *)commit_end
+ REGION_ALIGN
- 1, REGION_ALIGN
- 1 );
784 if (commit_end
> end
) commit_end
= end
;
785 if (commit_end
<= (char *)subheap_commit_end( subheap
)) return TRUE
;
787 addr
= (void *)subheap_commit_end( subheap
);
788 size
= commit_end
- (char *)addr
;
790 if (NtAllocateVirtualMemory( NtCurrentProcess(), &addr
, 0, &size
, MEM_COMMIT
,
791 get_protection_type( heap
->flags
) ))
793 WARN( "Could not commit %#Ix bytes at %p for heap %p\n", size
, addr
, heap
);
797 subheap
->data_size
= (char *)commit_end
- (char *)(subheap
+ 1);
801 static inline BOOL
subheap_decommit( const struct heap
*heap
, SUBHEAP
*subheap
, const void *commit_end
)
803 char *base
= subheap_base( subheap
);
807 commit_end
= ROUND_ADDR( (char *)commit_end
+ REGION_ALIGN
- 1, REGION_ALIGN
- 1 );
808 if (subheap
== &heap
->subheap
) commit_end
= max( (char *)commit_end
, (char *)base
+ heap
->min_size
);
809 if (commit_end
>= subheap_commit_end( subheap
)) return TRUE
;
811 size
= (char *)subheap_commit_end( subheap
) - (char *)commit_end
;
812 addr
= (void *)commit_end
;
814 if (NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_DECOMMIT
))
816 WARN( "Could not decommit %#Ix bytes at %p for heap %p\n", size
, addr
, heap
);
820 subheap
->data_size
= (char *)commit_end
- (char *)(subheap
+ 1);
824 static void block_init_free( struct block
*block
, ULONG flags
, SUBHEAP
*subheap
, SIZE_T block_size
)
826 const char *end
= (char *)block
+ block_size
, *commit_end
= subheap_commit_end( subheap
);
827 struct entry
*entry
= (struct entry
*)block
;
829 valgrind_make_writable( block
, sizeof(*entry
) );
830 block_set_type( block
, BLOCK_TYPE_FREE
);
831 block_set_base( block
, subheap_base( subheap
) );
832 block_set_flags( block
, ~0, BLOCK_FLAG_FREE
);
833 block_set_size( block
, block_size
);
835 /* If debugging, erase the freed block content */
837 if (end
> commit_end
) end
= commit_end
;
838 if (end
> (char *)(entry
+ 1)) mark_block_free( entry
+ 1, end
- (char *)(entry
+ 1), flags
);
841 static void insert_free_block( struct heap
*heap
, ULONG flags
, SUBHEAP
*subheap
, struct block
*block
)
843 struct entry
*entry
= (struct entry
*)block
, *list
;
846 if ((next
= next_block( subheap
, block
)))
848 /* set the next block PREV_FREE flag and back pointer */
849 block_set_flags( next
, 0, BLOCK_FLAG_PREV_FREE
);
850 valgrind_make_writable( (struct block
**)next
- 1, sizeof(struct block
*) );
851 *((struct block
**)next
- 1) = block
;
854 list
= find_free_list( heap
, block_get_size( block
), !next
);
855 if (!next
) list_add_before( &list
->entry
, &entry
->entry
);
856 else list_add_after( &list
->entry
, &entry
->entry
);
860 static struct block
*heap_delay_free( struct heap
*heap
, ULONG flags
, struct block
*block
)
864 if (!heap
->pending_free
) return block
;
866 block_set_type( block
, BLOCK_TYPE_DEAD
);
867 mark_block_free( block
+ 1, block_get_size( block
) - sizeof(*block
), flags
);
869 heap_lock( heap
, flags
);
871 tmp
= heap
->pending_free
[heap
->pending_pos
];
872 heap
->pending_free
[heap
->pending_pos
] = block
;
873 heap
->pending_pos
= (heap
->pending_pos
+ 1) % MAX_FREE_PENDING
;
875 heap_unlock( heap
, flags
);
881 static NTSTATUS
heap_free_block( struct heap
*heap
, ULONG flags
, struct block
*block
)
883 SUBHEAP
*subheap
= block_get_subheap( heap
, block
);
884 SIZE_T block_size
= block_get_size( block
);
888 if ((next
= next_block( subheap
, block
)) && (block_get_flags( next
) & BLOCK_FLAG_FREE
))
890 /* merge with next block if it is free */
891 entry
= (struct entry
*)next
;
892 block_size
+= block_get_size( &entry
->block
);
893 list_remove( &entry
->entry
);
894 next
= next_block( subheap
, next
);
897 if (block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
)
899 /* merge with previous block if it is free */
900 entry
= *((struct entry
**)block
- 1);
901 block_size
+= block_get_size( &entry
->block
);
902 list_remove( &entry
->entry
);
903 block
= &entry
->block
;
906 if (block
== first_block( subheap
) && !next
&& subheap
!= &heap
->subheap
)
908 /* free the subheap if it's empty and not the main one */
909 void *addr
= subheap_base( subheap
);
912 list_remove( &subheap
->entry
);
913 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
914 return STATUS_SUCCESS
;
917 block_init_free( block
, flags
, subheap
, block_size
);
918 insert_free_block( heap
, flags
, subheap
, block
);
920 /* keep room for a full committed block as hysteresis */
921 if (!next
) subheap_decommit( heap
, subheap
, (char *)((struct entry
*)block
+ 1) + REGION_ALIGN
);
923 return STATUS_SUCCESS
;
927 static struct block
*split_block( struct heap
*heap
, ULONG flags
, struct block
*block
,
928 SIZE_T old_block_size
, SIZE_T block_size
)
930 SUBHEAP
*subheap
= block_get_subheap( heap
, block
);
932 if (old_block_size
>= block_size
+ HEAP_MIN_BLOCK_SIZE
)
934 block_set_size( block
, block_size
);
935 return next_block( subheap
, block
);
938 block_set_size( block
, old_block_size
);
943 static void *allocate_region( struct heap
*heap
, ULONG flags
, SIZE_T
*region_size
, SIZE_T
*commit_size
)
948 if (heap
&& !(flags
& HEAP_GROWABLE
))
950 WARN( "Heap %p isn't growable, cannot allocate %#Ix bytes\n", heap
, *region_size
);
954 /* allocate the memory block */
955 if ((status
= NtAllocateVirtualMemory( NtCurrentProcess(), &addr
, 0, region_size
, MEM_RESERVE
,
956 get_protection_type( flags
) )))
958 WARN( "Could not allocate %#Ix bytes, status %#lx\n", *region_size
, status
);
961 if ((status
= NtAllocateVirtualMemory( NtCurrentProcess(), &addr
, 0, commit_size
, MEM_COMMIT
,
962 get_protection_type( flags
) )))
964 WARN( "Could not commit %#Ix bytes, status %#lx\n", *commit_size
, status
);
972 static NTSTATUS
heap_allocate_large( struct heap
*heap
, ULONG flags
, SIZE_T block_size
,
973 SIZE_T size
, void **ret
)
976 SIZE_T total_size
= ROUND_SIZE( sizeof(*arena
) + size
, REGION_ALIGN
- 1 );
979 if (total_size
< size
) return STATUS_NO_MEMORY
; /* overflow */
980 if (!(arena
= allocate_region( heap
, flags
, &total_size
, &total_size
))) return STATUS_NO_MEMORY
;
982 block
= &arena
->block
;
983 arena
->data_size
= size
;
984 arena
->block_size
= (char *)arena
+ total_size
- (char *)block
;
986 block_set_type( block
, BLOCK_TYPE_LARGE
);
987 block_set_base( block
, arena
);
988 block_set_flags( block
, ~0, BLOCK_FLAG_LARGE
| BLOCK_USER_FLAGS( flags
) );
989 block_set_size( block
, 0 );
991 heap_lock( heap
, flags
);
992 list_add_tail( &heap
->large_list
, &arena
->entry
);
993 heap_unlock( heap
, flags
);
995 valgrind_make_noaccess( (char *)block
+ sizeof(*block
) + arena
->data_size
,
996 arena
->block_size
- sizeof(*block
) - arena
->data_size
);
999 return STATUS_SUCCESS
;
1003 static NTSTATUS
heap_free_large( struct heap
*heap
, ULONG flags
, struct block
*block
)
1005 ARENA_LARGE
*arena
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
1006 LPVOID address
= arena
;
1009 heap_lock( heap
, flags
);
1010 list_remove( &arena
->entry
);
1011 heap_unlock( heap
, flags
);
1013 return NtFreeVirtualMemory( NtCurrentProcess(), &address
, &size
, MEM_RELEASE
);
1017 /***********************************************************************
1020 static BOOL
find_large_block( const struct heap
*heap
, const struct block
*block
)
1024 LIST_FOR_EACH_ENTRY( arena
, &heap
->large_list
, ARENA_LARGE
, entry
)
1025 if (block
== &arena
->block
) return TRUE
;
1030 static BOOL
validate_large_block( const struct heap
*heap
, const struct block
*block
)
1032 const ARENA_LARGE
*arena
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
1033 const char *err
= NULL
;
1035 if (ROUND_ADDR( block
, REGION_ALIGN
- 1 ) != arena
)
1036 err
= "invalid block BLOCK_ALIGN";
1037 else if (block_get_size( block
))
1038 err
= "invalid block size";
1039 else if (!(block_get_flags( block
) & BLOCK_FLAG_LARGE
))
1040 err
= "invalid block flags";
1041 else if (block_get_type( block
) != BLOCK_TYPE_LARGE
)
1042 err
= "invalid block type";
1043 else if (!contains( block
, arena
->block_size
, block
+ 1, arena
->data_size
))
1044 err
= "invalid block size";
1048 ERR( "heap %p, block %p: %s\n", heap
, block
, err
);
1049 if (TRACE_ON(heap
)) heap_dump( heap
);
1056 static SUBHEAP
*create_subheap( struct heap
*heap
, DWORD flags
, SIZE_T total_size
, SIZE_T commit_size
)
1061 commit_size
= ROUND_SIZE( max( commit_size
, REGION_ALIGN
), REGION_ALIGN
- 1 );
1062 total_size
= min( max( commit_size
, total_size
), 0xffff0000 ); /* don't allow a heap larger than 4GB */
1064 if (!(subheap
= allocate_region( heap
, flags
, &total_size
, &commit_size
))) return NULL
;
1066 subheap
->user_value
= heap
;
1067 subheap_set_bounds( subheap
, (char *)subheap
+ commit_size
, (char *)subheap
+ total_size
);
1068 block_size
= (SIZE_T
)ROUND_ADDR( subheap_size( subheap
) - subheap_overhead( subheap
), BLOCK_ALIGN
- 1 );
1069 block_init_free( first_block( subheap
), flags
, subheap
, block_size
);
1071 list_add_head( &heap
->subheap_list
, &subheap
->entry
);
1077 static struct block
*find_free_block( struct heap
*heap
, ULONG flags
, SIZE_T block_size
)
1079 struct list
*ptr
= &find_free_list( heap
, block_size
, FALSE
)->entry
;
1080 struct entry
*entry
;
1081 struct block
*block
;
1085 /* Find a suitable free list, and in it find a block large enough */
1087 while ((ptr
= list_next( &heap
->free_lists
[0].entry
, ptr
)))
1089 entry
= LIST_ENTRY( ptr
, struct entry
, entry
);
1090 block
= &entry
->block
;
1091 if (block_get_flags( block
) == BLOCK_FLAG_FREE_LINK
) continue;
1092 if (block_get_size( block
) >= block_size
)
1094 if (!subheap_commit( heap
, block_get_subheap( heap
, block
), block
, block_size
)) return NULL
;
1095 list_remove( &entry
->entry
);
1100 /* make sure we can fit the block and a free entry at the end */
1101 total_size
= sizeof(SUBHEAP
) + block_size
+ sizeof(struct entry
);
1102 if (total_size
< block_size
) return NULL
; /* overflow */
1104 if ((subheap
= create_subheap( heap
, flags
, max( heap
->grow_size
, total_size
), total_size
)))
1106 if (heap
->grow_size
<= HEAP_MAX_FREE_BLOCK_SIZE
/ 2) heap
->grow_size
*= 2;
1108 else while (!subheap
) /* shrink the grow size again if we are running out of space */
1110 if (heap
->grow_size
<= total_size
|| heap
->grow_size
<= 4 * 1024 * 1024) return NULL
;
1111 heap
->grow_size
/= 2;
1112 subheap
= create_subheap( heap
, flags
, max( heap
->grow_size
, total_size
), total_size
);
1115 TRACE( "created new sub-heap %p of %#Ix bytes for heap %p\n", subheap
, subheap_size( subheap
), heap
);
1117 return first_block( subheap
);
1121 static BOOL
is_valid_free_block( const struct heap
*heap
, const struct block
*block
)
1123 const SUBHEAP
*subheap
;
1126 if ((subheap
= find_subheap( heap
, block
, FALSE
))) return TRUE
;
1127 for (i
= 0; i
< HEAP_NB_FREE_LISTS
; i
++) if (block
== &heap
->free_lists
[i
].block
) return TRUE
;
1131 static BOOL
validate_free_block( const struct heap
*heap
, const SUBHEAP
*subheap
, const struct block
*block
)
1133 const char *err
= NULL
, *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
);
1134 const struct entry
*entry
= (struct entry
*)block
;
1135 const struct block
*prev
, *next
;
1136 DWORD flags
= heap
->flags
;
1138 if ((ULONG_PTR
)(block
+ 1) % BLOCK_ALIGN
)
1139 err
= "invalid block BLOCK_ALIGN";
1140 else if (block_get_type( block
) != BLOCK_TYPE_FREE
)
1141 err
= "invalid block header";
1142 else if (!(block_get_flags( block
) & BLOCK_FLAG_FREE
) || (block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
))
1143 err
= "invalid block flags";
1144 else if (!contains( base
, subheap_size( subheap
), block
, block_get_size( block
) ))
1145 err
= "invalid block size";
1146 else if (!is_valid_free_block( heap
, (next
= &LIST_ENTRY( entry
->entry
.next
, struct entry
, entry
)->block
) ))
1147 err
= "invalid next free block pointer";
1148 else if (!(block_get_flags( next
) & BLOCK_FLAG_FREE
) || block_get_type( next
) != BLOCK_TYPE_FREE
)
1149 err
= "invalid next free block header";
1150 else if (!is_valid_free_block( heap
, (prev
= &LIST_ENTRY( entry
->entry
.prev
, struct entry
, entry
)->block
) ))
1151 err
= "invalid previous free block pointer";
1152 else if (!(block_get_flags( prev
) & BLOCK_FLAG_FREE
) || block_get_type( prev
) != BLOCK_TYPE_FREE
)
1153 err
= "invalid previous free block header";
1154 else if ((next
= next_block( subheap
, block
)))
1156 if (!(block_get_flags( next
) & BLOCK_FLAG_PREV_FREE
))
1157 err
= "invalid next block flags";
1158 if (*((struct block
**)next
- 1) != block
)
1159 err
= "invalid next block back pointer";
1162 if (!err
&& (flags
& HEAP_FREE_CHECKING_ENABLED
))
1164 const char *ptr
= (char *)(entry
+ 1), *end
= (char *)block
+ block_get_size( block
);
1165 if (next
) end
-= sizeof(struct block
*);
1166 if (end
> commit_end
) end
= commit_end
;
1167 while (!err
&& ptr
< end
)
1169 if (*(DWORD
*)ptr
!= BLOCK_FILL_FREE
) err
= "free block overwritten";
1170 ptr
+= sizeof(DWORD
);
1176 ERR( "heap %p, block %p: %s\n", heap
, block
, err
);
1177 if (TRACE_ON(heap
)) heap_dump( heap
);
1184 static BOOL
validate_used_block( const struct heap
*heap
, const SUBHEAP
*subheap
, const struct block
*block
,
1185 unsigned int expected_block_type
)
1187 const char *err
= NULL
, *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
);
1188 DWORD flags
= heap
->flags
;
1189 const struct block
*next
;
1192 if ((ULONG_PTR
)(block
+ 1) % BLOCK_ALIGN
)
1193 err
= "invalid block BLOCK_ALIGN";
1194 else if (block_get_type( block
) != BLOCK_TYPE_USED
&& block_get_type( block
) != BLOCK_TYPE_DEAD
)
1195 err
= "invalid block header";
1196 else if (expected_block_type
&& block_get_type( block
) != expected_block_type
)
1197 err
= "invalid block type";
1198 else if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
1199 err
= "invalid block flags";
1200 else if (!contains( base
, commit_end
- base
, block
, block_get_size( block
) ))
1201 err
= "invalid block size";
1202 else if (block
->tail_size
> block_get_size( block
) - sizeof(*block
))
1203 err
= "invalid block unused size";
1204 else if ((next
= next_block( subheap
, block
)) && (block_get_flags( next
) & BLOCK_FLAG_PREV_FREE
) &&
1205 /* LFH blocks do not use BLOCK_FLAG_PREV_FREE or back pointer */
1206 !(block_get_flags( block
) & BLOCK_FLAG_LFH
))
1207 err
= "invalid next block flags";
1208 else if (block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
)
1210 const struct block
*prev
= *((struct block
**)block
- 1);
1211 if (!is_valid_free_block( heap
, prev
))
1212 err
= "invalid previous block pointer";
1213 else if (!(block_get_flags( prev
) & BLOCK_FLAG_FREE
) || block_get_type( prev
) != BLOCK_TYPE_FREE
)
1214 err
= "invalid previous block flags";
1215 if ((char *)prev
+ block_get_size( prev
) != (char *)block
)
1216 err
= "invalid previous block size";
1219 if (!err
&& block_get_type( block
) == BLOCK_TYPE_DEAD
)
1221 const char *ptr
= (char *)(block
+ 1), *end
= (char *)block
+ block_get_size( block
);
1222 while (!err
&& ptr
< end
)
1224 if (*(DWORD
*)ptr
!= BLOCK_FILL_FREE
) err
= "free block overwritten";
1225 ptr
+= sizeof(DWORD
);
1228 else if (!err
&& (flags
& HEAP_TAIL_CHECKING_ENABLED
))
1230 const unsigned char *tail
= (unsigned char *)block
+ block_get_size( block
) - block
->tail_size
;
1231 for (i
= 0; !err
&& i
< BLOCK_ALIGN
; i
++) if (tail
[i
] != BLOCK_FILL_TAIL
) err
= "invalid block tail";
1236 ERR( "heap %p, block %p: %s\n", heap
, block
, err
);
1237 if (TRACE_ON(heap
)) heap_dump( heap
);
1244 static BOOL
heap_validate_ptr( const struct heap
*heap
, const void *ptr
)
1246 const struct block
*block
= (struct block
*)ptr
- 1;
1247 const SUBHEAP
*subheap
;
1249 if (!(subheap
= find_subheap( heap
, block
, FALSE
)))
1251 if (!find_large_block( heap
, block
))
1253 WARN("heap %p, ptr %p: block region not found\n", heap
, ptr
);
1257 return validate_large_block( heap
, block
);
1260 return validate_used_block( heap
, subheap
, block
, BLOCK_TYPE_USED
);
1263 static BOOL
heap_validate( const struct heap
*heap
)
1265 const ARENA_LARGE
*large_arena
;
1266 const struct block
*block
;
1267 const SUBHEAP
*subheap
;
1269 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
1271 if (!check_subheap( subheap
, heap
))
1273 ERR( "heap %p, subheap %p corrupted sizes or user_value\n", heap
, subheap
);
1274 if (TRACE_ON(heap
)) heap_dump( heap
);
1278 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
1280 if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
1282 if (!validate_free_block( heap
, subheap
, block
)) return FALSE
;
1286 if (!validate_used_block( heap
, subheap
, block
, 0 )) return FALSE
;
1291 if (heap
->pending_free
)
1295 for (i
= 0; i
< MAX_FREE_PENDING
; i
++)
1297 if (!(block
= heap
->pending_free
[i
])) break;
1299 subheap
= find_subheap( heap
, block
, FALSE
);
1302 ERR( "heap %p: cannot find valid subheap for delayed freed block %p\n", heap
, block
);
1303 if (TRACE_ON(heap
)) heap_dump( heap
);
1307 if (!validate_used_block( heap
, subheap
, block
, BLOCK_TYPE_DEAD
)) return FALSE
;
1310 for (; i
< MAX_FREE_PENDING
; i
++)
1312 if ((block
= heap
->pending_free
[i
]))
1314 ERR( "heap %p: unexpected delayed freed block %p at slot %u\n", heap
, block
, i
);
1315 if (TRACE_ON(heap
)) heap_dump( heap
);
1321 LIST_FOR_EACH_ENTRY( large_arena
, &heap
->large_list
, ARENA_LARGE
, entry
)
1322 if (!validate_large_block( heap
, &large_arena
->block
)) return FALSE
;
1327 static inline struct block
*unsafe_block_from_ptr( struct heap
*heap
, ULONG flags
, const void *ptr
)
1329 struct block
*block
= (struct block
*)ptr
- 1;
1330 const char *err
= NULL
;
1333 if (flags
& HEAP_VALIDATE
)
1335 heap_lock( heap
, flags
);
1336 if (!heap_validate_ptr( heap
, ptr
)) block
= NULL
;
1337 heap_unlock( heap
, flags
);
1341 if ((ULONG_PTR
)ptr
% BLOCK_ALIGN
)
1342 err
= "invalid ptr alignment";
1343 else if (block_get_type( block
) == BLOCK_TYPE_DEAD
)
1344 err
= "delayed freed block";
1345 else if (block_get_type( block
) == BLOCK_TYPE_FREE
)
1346 err
= "already freed block";
1347 else if (block_get_flags( block
) & BLOCK_FLAG_LFH
)
1349 /* LFH block base_offset points to the group, not the subheap */
1350 if (block_get_type( block
) != BLOCK_TYPE_USED
)
1351 err
= "invalid block type";
1353 else if ((subheap
= block_get_subheap( heap
, block
)) >= (SUBHEAP
*)block
)
1354 err
= "invalid base offset";
1355 else if (block_get_type( block
) == BLOCK_TYPE_USED
)
1357 const char *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
);
1358 if (subheap
->user_value
!= heap
) err
= "mismatching heap";
1359 else if (!contains( base
, commit_end
- base
, block
, block_get_size( block
) )) err
= "invalid block size";
1361 else if (block_get_type( block
) == BLOCK_TYPE_LARGE
)
1363 ARENA_LARGE
*large
= subheap_base( subheap
);
1364 if (block
!= &large
->block
) err
= "invalid large block";
1368 err
= "invalid block type";
1371 if (err
) WARN( "heap %p, block %p: %s\n", heap
, block
, err
);
1372 return err
? NULL
: block
;
1375 static DWORD
heap_flags_from_global_flag( DWORD flag
)
1379 if (flag
& FLG_HEAP_ENABLE_TAIL_CHECK
)
1380 ret
|= HEAP_TAIL_CHECKING_ENABLED
;
1381 if (flag
& FLG_HEAP_ENABLE_FREE_CHECK
)
1382 ret
|= HEAP_FREE_CHECKING_ENABLED
;
1383 if (flag
& FLG_HEAP_VALIDATE_PARAMETERS
)
1384 ret
|= HEAP_VALIDATE_PARAMS
| HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
;
1385 if (flag
& FLG_HEAP_VALIDATE_ALL
)
1386 ret
|= HEAP_VALIDATE_ALL
| HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
;
1387 if (flag
& FLG_HEAP_DISABLE_COALESCING
)
1388 ret
|= HEAP_DISABLE_COALESCE_ON_FREE
;
1389 if (flag
& FLG_HEAP_PAGE_ALLOCS
)
1390 ret
|= HEAP_PAGE_ALLOCS
;
1395 /***********************************************************************
1396 * heap_set_debug_flags
1398 static void heap_set_debug_flags( HANDLE handle
)
1400 ULONG global_flags
= RtlGetNtGlobalFlags();
1401 DWORD dummy
, flags
, force_flags
;
1404 if (TRACE_ON(heap
)) global_flags
|= FLG_HEAP_VALIDATE_ALL
;
1405 if (WARN_ON(heap
)) global_flags
|= FLG_HEAP_VALIDATE_PARAMETERS
;
1407 heap
= unsafe_heap_from_handle( handle
, 0, &dummy
);
1408 flags
= heap_flags_from_global_flag( global_flags
);
1409 force_flags
= (heap
->flags
| flags
) & ~(HEAP_SHARED
|HEAP_DISABLE_COALESCE_ON_FREE
);
1411 if (global_flags
& FLG_HEAP_ENABLE_TAGGING
) flags
|= HEAP_SHARED
;
1412 if (!(global_flags
& FLG_HEAP_PAGE_ALLOCS
)) force_flags
&= ~(HEAP_GROWABLE
|HEAP_PRIVATE
);
1414 if (RUNNING_ON_VALGRIND
) flags
= 0; /* no sense in validating since Valgrind catches accesses */
1416 heap
->flags
|= flags
;
1417 heap
->force_flags
|= force_flags
;
1419 if (flags
& (HEAP_FREE_CHECKING_ENABLED
| HEAP_TAIL_CHECKING_ENABLED
)) /* fix existing blocks */
1421 struct block
*block
;
1424 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
1426 const char *commit_end
= subheap_commit_end( subheap
);
1428 if (!check_subheap( subheap
, heap
)) break;
1430 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
1432 if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
1434 char *data
= (char *)block
+ block_get_overhead( block
), *end
= (char *)block
+ block_get_size( block
);
1435 if (next_block( subheap
, block
)) end
-= sizeof(struct block
*);
1436 if (end
> commit_end
) mark_block_free( data
, commit_end
- data
, flags
);
1437 else mark_block_free( data
, end
- data
, flags
);
1441 if (block_get_type( block
) == BLOCK_TYPE_DEAD
) mark_block_free( block
+ 1, block_get_size( block
) - sizeof(*block
), flags
);
1442 else mark_block_tail( block
, flags
);
1448 if ((heap
->flags
& HEAP_GROWABLE
) && !heap
->pending_free
&&
1449 ((flags
& HEAP_FREE_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
))
1451 heap
->pending_free
= RtlAllocateHeap( handle
, HEAP_ZERO_MEMORY
,
1452 MAX_FREE_PENDING
* sizeof(*heap
->pending_free
) );
1453 heap
->pending_pos
= 0;
1458 /***********************************************************************
1459 * RtlCreateHeap (NTDLL.@)
1461 * Create a new Heap.
1464 * flags [I] HEAP_ flags from "winnt.h"
1465 * addr [I] Desired base address
1466 * totalSize [I] Total size of the heap, or 0 for a growable heap
1467 * commitSize [I] Amount of heap space to commit
1468 * unknown [I] Not yet understood
1469 * definition [I] Heap definition
1472 * Success: A HANDLE to the newly created heap.
1473 * Failure: a NULL HANDLE.
1475 HANDLE WINAPI
RtlCreateHeap( ULONG flags
, void *addr
, SIZE_T total_size
, SIZE_T commit_size
,
1476 void *unknown
, RTL_HEAP_DEFINITION
*definition
)
1478 struct entry
*entry
;
1484 TRACE( "flags %#lx, addr %p, total_size %#Ix, commit_size %#Ix, unknown %p, definition %p\n",
1485 flags
, addr
, total_size
, commit_size
, unknown
, definition
);
1487 flags
&= ~(HEAP_TAIL_CHECKING_ENABLED
|HEAP_FREE_CHECKING_ENABLED
);
1488 if (process_heap
) flags
|= HEAP_PRIVATE
;
1489 if (!process_heap
|| !total_size
|| (flags
& HEAP_SHARED
)) flags
|= HEAP_GROWABLE
;
1490 if (!total_size
) total_size
= HEAP_DEF_SIZE
;
1494 if (!commit_size
) commit_size
= REGION_ALIGN
;
1495 total_size
= min( max( total_size
, commit_size
), 0xffff0000 ); /* don't allow a heap larger than 4GB */
1496 commit_size
= min( total_size
, ROUND_SIZE( commit_size
, REGION_ALIGN
- 1 ) );
1497 if (!(heap
= allocate_region( NULL
, flags
, &total_size
, &commit_size
))) return 0;
1500 heap
->ffeeffee
= 0xffeeffee;
1501 heap
->auto_flags
= (flags
& HEAP_GROWABLE
);
1502 heap
->flags
= (flags
& ~HEAP_SHARED
);
1503 heap
->compat_info
= HEAP_STD
;
1504 heap
->magic
= HEAP_MAGIC
;
1505 heap
->grow_size
= max( HEAP_DEF_SIZE
, total_size
);
1506 heap
->min_size
= commit_size
;
1507 list_init( &heap
->subheap_list
);
1508 list_init( &heap
->large_list
);
1510 list_init( &heap
->free_lists
[0].entry
);
1511 for (i
= 0, entry
= heap
->free_lists
; i
< HEAP_NB_FREE_LISTS
; i
++, entry
++)
1513 block_set_flags( &entry
->block
, ~0, BLOCK_FLAG_FREE_LINK
);
1514 block_set_size( &entry
->block
, 0 );
1515 block_set_type( &entry
->block
, BLOCK_TYPE_FREE
);
1516 block_set_base( &entry
->block
, heap
);
1517 if (i
) list_add_after( &entry
[-1].entry
, &entry
->entry
);
1520 if (!process_heap
) /* do it by hand to avoid memory allocations */
1522 heap
->cs
.DebugInfo
= &process_heap_cs_debug
;
1523 heap
->cs
.LockCount
= -1;
1524 heap
->cs
.RecursionCount
= 0;
1525 heap
->cs
.OwningThread
= 0;
1526 heap
->cs
.LockSemaphore
= 0;
1527 heap
->cs
.SpinCount
= 0;
1528 process_heap_cs_debug
.CriticalSection
= &heap
->cs
;
1532 RtlInitializeCriticalSection( &heap
->cs
);
1533 heap
->cs
.DebugInfo
->Spare
[0] = (DWORD_PTR
)(__FILE__
": heap.cs");
1536 subheap
= &heap
->subheap
;
1537 subheap
->user_value
= heap
;
1538 subheap_set_bounds( subheap
, (char *)heap
+ commit_size
, (char *)heap
+ total_size
);
1539 block_size
= (SIZE_T
)ROUND_ADDR( subheap_size( subheap
) - subheap_overhead( subheap
), BLOCK_ALIGN
- 1 );
1540 block_init_free( first_block( subheap
), flags
, subheap
, block_size
);
1542 insert_free_block( heap
, flags
, subheap
, first_block( subheap
) );
1543 list_add_head( &heap
->subheap_list
, &subheap
->entry
);
1545 heap_set_debug_flags( heap
);
1547 if (heap
->flags
& HEAP_GROWABLE
)
1549 SIZE_T size
= (sizeof(struct bin
) + sizeof(struct group
*) * ARRAY_SIZE(affinity_mapping
)) * BLOCK_SIZE_BIN_COUNT
;
1550 NtAllocateVirtualMemory( NtCurrentProcess(), (void *)&heap
->bins
,
1551 0, &size
, MEM_COMMIT
, PAGE_READWRITE
);
1553 for (i
= 0; heap
->bins
&& i
< BLOCK_SIZE_BIN_COUNT
; ++i
)
1555 RtlInitializeSListHead( &heap
->bins
[i
].groups
);
1556 /* offset affinity_group_base to interleave the bin affinity group pointers */
1557 heap
->bins
[i
].affinity_group_base
= (struct group
**)(heap
->bins
+ BLOCK_SIZE_BIN_COUNT
) + i
;
1561 /* link it into the per-process heap list */
1564 RtlEnterCriticalSection( &process_heap
->cs
);
1565 list_add_head( &process_heap
->entry
, &heap
->entry
);
1566 RtlLeaveCriticalSection( &process_heap
->cs
);
1570 process_heap
= heap
; /* assume the first heap we create is the process main heap */
1571 list_init( &process_heap
->entry
);
1578 /***********************************************************************
1579 * RtlDestroyHeap (NTDLL.@)
1581 * Destroy a Heap created with RtlCreateHeap().
1584 * heap [I] Heap to destroy.
1587 * Success: A NULL HANDLE, if heap is NULL or it was destroyed
1588 * Failure: The Heap handle, if heap is the process heap.
1590 HANDLE WINAPI
RtlDestroyHeap( HANDLE handle
)
1592 SUBHEAP
*subheap
, *next
;
1593 ARENA_LARGE
*arena
, *arena_next
;
1594 struct block
**pending
, **tmp
;
1600 TRACE( "handle %p\n", handle
);
1602 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &heap_flags
)) && handle
&&
1603 (((struct heap
*)handle
)->flags
& HEAP_VALIDATE_PARAMS
) &&
1604 NtCurrentTeb()->Peb
->BeingDebugged
)
1606 DbgPrint( "Attempt to destroy an invalid heap\n" );
1609 if (!heap
) return handle
;
1611 if ((pending
= heap
->pending_free
))
1613 heap
->pending_free
= NULL
;
1614 for (tmp
= pending
; *tmp
&& tmp
!= pending
+ MAX_FREE_PENDING
; ++tmp
)
1615 heap_free_block( heap
, heap
->flags
, *tmp
);
1616 RtlFreeHeap( handle
, 0, pending
);
1619 if (heap
== process_heap
) return handle
; /* cannot delete the main process heap */
1621 /* remove it from the per-process list */
1622 RtlEnterCriticalSection( &process_heap
->cs
);
1623 list_remove( &heap
->entry
);
1624 RtlLeaveCriticalSection( &process_heap
->cs
);
1626 heap
->cs
.DebugInfo
->Spare
[0] = 0;
1627 RtlDeleteCriticalSection( &heap
->cs
);
1629 LIST_FOR_EACH_ENTRY_SAFE( arena
, arena_next
, &heap
->large_list
, ARENA_LARGE
, entry
)
1631 list_remove( &arena
->entry
);
1634 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1636 LIST_FOR_EACH_ENTRY_SAFE( subheap
, next
, &heap
->subheap_list
, SUBHEAP
, entry
)
1638 if (subheap
== &heap
->subheap
) continue; /* do this one last */
1639 valgrind_notify_free_all( subheap
, heap
);
1640 list_remove( &subheap
->entry
);
1642 addr
= ROUND_ADDR( subheap
, REGION_ALIGN
- 1 );
1643 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1645 valgrind_notify_free_all( &heap
->subheap
, heap
);
1646 if ((addr
= heap
->bins
))
1649 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1653 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1657 static SIZE_T
heap_get_block_size( const struct heap
*heap
, ULONG flags
, SIZE_T size
)
1659 static const ULONG padd_flags
= HEAP_VALIDATE
| HEAP_VALIDATE_ALL
| HEAP_VALIDATE_PARAMS
| HEAP_ADD_USER_INFO
;
1660 static const ULONG check_flags
= HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
| HEAP_CHECKING_ENABLED
;
1661 SIZE_T overhead
, block_size
;
1663 if ((flags
& check_flags
)) overhead
= BLOCK_ALIGN
;
1664 else overhead
= sizeof(struct block
);
1666 if ((flags
& HEAP_TAIL_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
) overhead
+= BLOCK_ALIGN
;
1667 if (flags
& padd_flags
) overhead
+= BLOCK_ALIGN
;
1669 if (size
< BLOCK_ALIGN
) size
= BLOCK_ALIGN
;
1670 block_size
= ROUND_SIZE( size
+ overhead
, BLOCK_ALIGN
- 1 );
1672 if (block_size
< size
) return ~0U; /* overflow */
1673 if (block_size
< HEAP_MIN_BLOCK_SIZE
) block_size
= HEAP_MIN_BLOCK_SIZE
;
1677 static NTSTATUS
heap_allocate_block( struct heap
*heap
, ULONG flags
, SIZE_T block_size
, SIZE_T size
, void **ret
)
1679 struct block
*block
, *next
;
1680 SIZE_T old_block_size
;
1683 /* Locate a suitable free block */
1685 if (!(block
= find_free_block( heap
, flags
, block_size
))) return STATUS_NO_MEMORY
;
1686 /* read the free block size, changing block type or flags may alter it */
1687 old_block_size
= block_get_size( block
);
1688 subheap
= block_get_subheap( heap
, block
);
1690 if ((next
= split_block( heap
, flags
, block
, old_block_size
, block_size
)))
1692 block_init_free( next
, flags
, subheap
, old_block_size
- block_size
);
1693 insert_free_block( heap
, flags
, subheap
, next
);
1696 block_set_type( block
, BLOCK_TYPE_USED
);
1697 block_set_flags( block
, ~0, BLOCK_USER_FLAGS( flags
) );
1698 block
->tail_size
= block_get_size( block
) - sizeof(*block
) - size
;
1699 initialize_block( block
, 0, size
, flags
);
1700 mark_block_tail( block
, flags
);
1702 if ((next
= next_block( subheap
, block
))) block_set_flags( next
, BLOCK_FLAG_PREV_FREE
, 0 );
1705 return STATUS_SUCCESS
;
1708 /* Low Fragmentation Heap frontend */
1710 /* header for every LFH block group */
1711 struct DECLSPEC_ALIGN(BLOCK_ALIGN
) group
1714 /* one bit for each free block and the highest bit for GROUP_FLAG_FREE */
1716 /* affinity of the thread which last allocated from this group */
1718 /* first block of a group, required for alignment */
1719 struct block first_block
;
1722 #define GROUP_BLOCK_COUNT (sizeof(((struct group *)0)->free_bits) * 8 - 1)
1723 #define GROUP_FLAG_FREE (1u << GROUP_BLOCK_COUNT)
1725 static inline UINT
block_get_group_index( const struct block
*block
)
1727 return block
->base_offset
;
1730 static inline struct group
*block_get_group( const struct block
*block
)
1732 SIZE_T block_size
= block_get_size( block
);
1733 void *first_block
= (char *)block
- block_get_group_index( block
) * block_size
;
1734 return CONTAINING_RECORD( first_block
, struct group
, first_block
);
1737 static inline void block_set_group( struct block
*block
, SIZE_T block_size
, const struct group
*group
)
1739 SIZE_T offset
= (char *)block
- (char *)&group
->first_block
;
1740 block
->base_offset
= offset
/ block_size
;
1743 static inline struct block
*group_get_block( struct group
*group
, SIZE_T block_size
, UINT index
)
1745 char *first_block
= (char *)&group
->first_block
;
1746 return (struct block
*)(first_block
+ index
* block_size
);
1749 /* lookup a free block using the group free_bits, the current thread must own the group */
1750 static inline struct block
*group_find_free_block( struct group
*group
, SIZE_T block_size
)
1752 ULONG i
, free_bits
= ReadNoFence( &group
->free_bits
);
1753 /* free_bits will never be 0 as the group is unlinked when it's fully used */
1754 BitScanForward( &i
, free_bits
);
1755 InterlockedAnd( &group
->free_bits
, ~(1 << i
) );
1756 return group_get_block( group
, block_size
, i
);
1759 /* allocate a new group block using non-LFH allocation, returns a group owned by current thread */
1760 static struct group
*group_allocate( struct heap
*heap
, ULONG flags
, SIZE_T block_size
)
1762 SIZE_T i
, group_size
, group_block_size
;
1763 struct group
*group
;
1766 group_size
= offsetof( struct group
, first_block
) + GROUP_BLOCK_COUNT
* block_size
;
1767 group_block_size
= heap_get_block_size( heap
, flags
, group_size
);
1769 heap_lock( heap
, flags
);
1771 if (group_block_size
>= HEAP_MIN_LARGE_BLOCK_SIZE
)
1772 status
= heap_allocate_large( heap
, flags
& ~HEAP_ZERO_MEMORY
, group_block_size
, group_size
, (void **)&group
);
1774 status
= heap_allocate_block( heap
, flags
& ~HEAP_ZERO_MEMORY
, group_block_size
, group_size
, (void **)&group
);
1776 heap_unlock( heap
, flags
);
1778 if (status
) return NULL
;
1780 block_set_flags( (struct block
*)group
- 1, 0, BLOCK_FLAG_LFH
);
1781 group
->free_bits
= ~GROUP_FLAG_FREE
;
1783 for (i
= 0; i
< GROUP_BLOCK_COUNT
; ++i
)
1785 struct block
*block
= group_get_block( group
, block_size
, i
);
1786 valgrind_make_writable( block
, sizeof(*block
) );
1787 block_set_type( block
, BLOCK_TYPE_FREE
);
1788 block_set_flags( block
, ~0, BLOCK_FLAG_FREE
| BLOCK_FLAG_LFH
);
1789 block_set_group( block
, block_size
, group
);
1790 block_set_size( block
, block_size
);
1791 mark_block_free( block
+ 1, (char *)block
+ block_size
- (char *)(block
+ 1), flags
);
1797 /* release a fully freed group to the non-LFH subheap, group must be owned by current thread */
1798 static NTSTATUS
group_release( struct heap
*heap
, ULONG flags
, struct bin
*bin
, struct group
*group
)
1800 struct block
*block
= (struct block
*)group
- 1;
1803 heap_lock( heap
, flags
);
1805 block_set_flags( block
, BLOCK_FLAG_LFH
, 0 );
1807 if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
1808 status
= heap_free_large( heap
, flags
, block
);
1810 status
= heap_free_block( heap
, flags
, block
);
1812 heap_unlock( heap
, flags
);
1817 static inline ULONG
heap_current_thread_affinity(void)
1821 if (!(affinity
= NtCurrentTeb()->HeapVirtualAffinity
))
1823 affinity
= InterlockedIncrement( &next_thread_affinity
);
1824 affinity
= affinity_mapping
[affinity
% ARRAY_SIZE(affinity_mapping
)];
1825 NtCurrentTeb()->HeapVirtualAffinity
= affinity
;
1831 /* acquire a group from the bin, thread takes ownership of a shared group or allocates a new one */
1832 static struct group
*heap_acquire_bin_group( struct heap
*heap
, ULONG flags
, SIZE_T block_size
, struct bin
*bin
)
1834 ULONG affinity
= NtCurrentTeb()->HeapVirtualAffinity
;
1835 struct group
*group
;
1838 if ((group
= InterlockedExchangePointer( (void *)bin_get_affinity_group( bin
, affinity
), NULL
)))
1841 if ((entry
= RtlInterlockedPopEntrySList( &bin
->groups
)))
1842 return CONTAINING_RECORD( entry
, struct group
, entry
);
1844 return group_allocate( heap
, flags
, block_size
);
1847 /* release a thread owned and fully freed group to the bin shared group, or free its memory */
1848 static NTSTATUS
heap_release_bin_group( struct heap
*heap
, ULONG flags
, struct bin
*bin
, struct group
*group
)
1850 ULONG affinity
= group
->affinity
;
1852 /* using InterlockedExchangePointer here would possibly return a group that has used blocks,
1853 * we prefer keeping our fully freed group instead for reduced memory consumption.
1855 if (!InterlockedCompareExchangePointer( (void *)bin_get_affinity_group( bin
, affinity
), group
, NULL
))
1856 return STATUS_SUCCESS
;
1858 /* try re-using the block group instead of releasing it */
1859 if (RtlQueryDepthSList( &bin
->groups
) <= ARRAY_SIZE(affinity_mapping
))
1861 RtlInterlockedPushEntrySList( &bin
->groups
, &group
->entry
);
1862 return STATUS_SUCCESS
;
1865 return group_release( heap
, flags
, bin
, group
);
1868 static struct block
*find_free_bin_block( struct heap
*heap
, ULONG flags
, SIZE_T block_size
, struct bin
*bin
)
1870 ULONG affinity
= heap_current_thread_affinity();
1871 struct block
*block
;
1872 struct group
*group
;
1874 /* acquire a group, the thread will own it and no other thread can clear free bits.
1875 * some other thread might still set the free bits if they are freeing blocks.
1877 if (!(group
= heap_acquire_bin_group( heap
, flags
, block_size
, bin
))) return NULL
;
1878 group
->affinity
= affinity
;
1880 block
= group_find_free_block( group
, block_size
);
1882 /* serialize with heap_free_block_lfh: atomically set GROUP_FLAG_FREE when the free bits are all 0. */
1883 if (ReadNoFence( &group
->free_bits
) || InterlockedCompareExchange( &group
->free_bits
, GROUP_FLAG_FREE
, 0 ))
1885 /* if GROUP_FLAG_FREE isn't set, thread is responsible for putting it back into group list. */
1886 if ((group
= InterlockedExchangePointer( (void *)bin_get_affinity_group( bin
, affinity
), group
)))
1887 RtlInterlockedPushEntrySList( &bin
->groups
, &group
->entry
);
1893 static NTSTATUS
heap_allocate_block_lfh( struct heap
*heap
, ULONG flags
, SIZE_T block_size
,
1894 SIZE_T size
, void **ret
)
1896 struct bin
*bin
, *last
= heap
->bins
+ BLOCK_SIZE_BIN_COUNT
- 1;
1897 struct block
*block
;
1899 bin
= heap
->bins
+ BLOCK_SIZE_BIN( block_size
);
1900 if (bin
== last
) return STATUS_UNSUCCESSFUL
;
1902 /* paired with WriteRelease in bin_try_enable. */
1903 if (!ReadAcquire( &bin
->enabled
)) return STATUS_UNSUCCESSFUL
;
1905 block_size
= BLOCK_BIN_SIZE( BLOCK_SIZE_BIN( block_size
) );
1907 if ((block
= find_free_bin_block( heap
, flags
, block_size
, bin
)))
1909 block_set_type( block
, BLOCK_TYPE_USED
);
1910 block_set_flags( block
, ~BLOCK_FLAG_LFH
, BLOCK_USER_FLAGS( flags
) );
1911 block
->tail_size
= block_size
- sizeof(*block
) - size
;
1912 initialize_block( block
, 0, size
, flags
);
1913 mark_block_tail( block
, flags
);
1917 return block
? STATUS_SUCCESS
: STATUS_NO_MEMORY
;
1920 static NTSTATUS
heap_free_block_lfh( struct heap
*heap
, ULONG flags
, struct block
*block
)
1922 struct bin
*bin
, *last
= heap
->bins
+ BLOCK_SIZE_BIN_COUNT
- 1;
1923 SIZE_T i
, block_size
= block_get_size( block
);
1924 struct group
*group
= block_get_group( block
);
1925 NTSTATUS status
= STATUS_SUCCESS
;
1927 if (!(block_get_flags( block
) & BLOCK_FLAG_LFH
)) return STATUS_UNSUCCESSFUL
;
1929 bin
= heap
->bins
+ BLOCK_SIZE_BIN( block_size
);
1930 if (bin
== last
) return STATUS_UNSUCCESSFUL
;
1932 i
= block_get_group_index( block
);
1933 valgrind_make_writable( block
, sizeof(*block
) );
1934 block_set_type( block
, BLOCK_TYPE_FREE
);
1935 block_set_flags( block
, ~BLOCK_FLAG_LFH
, BLOCK_FLAG_FREE
);
1936 mark_block_free( block
+ 1, (char *)block
+ block_size
- (char *)(block
+ 1), flags
);
1938 /* if this was the last used block in a group and GROUP_FLAG_FREE was set */
1939 if (InterlockedOr( &group
->free_bits
, 1 << i
) == ~(1 << i
))
1941 /* thread now owns the group, and can release it to its bin */
1942 group
->free_bits
= ~GROUP_FLAG_FREE
;
1943 status
= heap_release_bin_group( heap
, flags
, bin
, group
);
1949 static void bin_try_enable( struct heap
*heap
, struct bin
*bin
)
1951 ULONG alloc
= ReadNoFence( &bin
->count_alloc
), freed
= ReadNoFence( &bin
->count_freed
);
1952 SIZE_T block_size
= BLOCK_BIN_SIZE( bin
- heap
->bins
);
1953 BOOL enable
= FALSE
;
1955 if (bin
== heap
->bins
&& alloc
> 0x10) enable
= TRUE
;
1956 else if (bin
- heap
->bins
< 0x30 && alloc
> 0x800) enable
= TRUE
;
1957 else if (bin
- heap
->bins
< 0x30 && alloc
- freed
> 0x10) enable
= TRUE
;
1958 else if (alloc
- freed
> 0x400000 / block_size
) enable
= TRUE
;
1959 if (!enable
) return;
1961 if (ReadNoFence( &heap
->compat_info
) != HEAP_LFH
)
1963 ULONG info
= HEAP_LFH
;
1964 RtlSetHeapInformation( heap
, HeapCompatibilityInformation
, &info
, sizeof(info
) );
1967 /* paired with ReadAcquire in heap_allocate_block_lfh.
1969 * The acq/rel barrier on the enabled flag is protecting compat_info
1970 * (i.e. compat_info := LFH happens-before enabled := TRUE), so that
1971 * a caller that observes LFH block allocation (alloc request
1972 * succeeds without heap lock) will never observe HEAP_STD when it
1975 WriteRelease( &bin
->enabled
, TRUE
);
1978 static void heap_thread_detach_bin_groups( struct heap
*heap
)
1980 ULONG i
, affinity
= NtCurrentTeb()->HeapVirtualAffinity
;
1982 if (!heap
->bins
) return;
1984 for (i
= 0; i
< BLOCK_SIZE_BIN_COUNT
; ++i
)
1986 struct bin
*bin
= heap
->bins
+ i
;
1987 struct group
*group
;
1988 if (!(group
= InterlockedExchangePointer( (void *)bin_get_affinity_group( bin
, affinity
), NULL
))) continue;
1989 RtlInterlockedPushEntrySList( &bin
->groups
, &group
->entry
);
1993 void heap_thread_detach(void)
1997 RtlEnterCriticalSection( &process_heap
->cs
);
1999 LIST_FOR_EACH_ENTRY( heap
, &process_heap
->entry
, struct heap
, entry
)
2000 heap_thread_detach_bin_groups( heap
);
2002 heap_thread_detach_bin_groups( process_heap
);
2004 RtlLeaveCriticalSection( &process_heap
->cs
);
2007 /***********************************************************************
2008 * RtlAllocateHeap (NTDLL.@)
2010 void *WINAPI DECLSPEC_HOTPATCH
RtlAllocateHeap( HANDLE handle
, ULONG flags
, SIZE_T size
)
2018 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2019 status
= STATUS_INVALID_HANDLE
;
2020 else if ((block_size
= heap_get_block_size( heap
, heap_flags
, size
)) == ~0U)
2021 status
= STATUS_NO_MEMORY
;
2022 else if (block_size
>= HEAP_MIN_LARGE_BLOCK_SIZE
)
2023 status
= heap_allocate_large( heap
, heap_flags
, block_size
, size
, &ptr
);
2024 else if (heap
->bins
&& !heap_allocate_block_lfh( heap
, heap_flags
, block_size
, size
, &ptr
))
2025 status
= STATUS_SUCCESS
;
2028 heap_lock( heap
, heap_flags
);
2029 status
= heap_allocate_block( heap
, heap_flags
, block_size
, size
, &ptr
);
2030 heap_unlock( heap
, heap_flags
);
2032 if (!status
&& heap
->bins
)
2034 SIZE_T bin
= BLOCK_SIZE_BIN( block_get_size( (struct block
*)ptr
- 1 ) );
2035 InterlockedIncrement( &heap
->bins
[bin
].count_alloc
);
2036 if (!ReadNoFence( &heap
->bins
[bin
].enabled
)) bin_try_enable( heap
, &heap
->bins
[bin
] );
2040 if (!status
) valgrind_notify_alloc( ptr
, size
, flags
& HEAP_ZERO_MEMORY
);
2042 TRACE( "handle %p, flags %#lx, size %#Ix, return %p, status %#lx.\n", handle
, flags
, size
, ptr
, status
);
2043 heap_set_status( heap
, flags
, status
);
2048 /***********************************************************************
2049 * RtlFreeHeap (NTDLL.@)
2051 BOOLEAN WINAPI DECLSPEC_HOTPATCH
RtlFreeHeap( HANDLE handle
, ULONG flags
, void *ptr
)
2053 struct block
*block
;
2058 if (!ptr
) return TRUE
;
2060 valgrind_notify_free( ptr
);
2062 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2063 status
= STATUS_INVALID_PARAMETER
;
2064 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2065 status
= STATUS_INVALID_PARAMETER
;
2066 else if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2067 status
= heap_free_large( heap
, heap_flags
, block
);
2068 else if (!(block
= heap_delay_free( heap
, heap_flags
, block
)))
2069 status
= STATUS_SUCCESS
;
2070 else if (!heap_free_block_lfh( heap
, heap_flags
, block
))
2071 status
= STATUS_SUCCESS
;
2074 SIZE_T block_size
= block_get_size( block
), bin
= BLOCK_SIZE_BIN( block_size
);
2076 heap_lock( heap
, heap_flags
);
2077 status
= heap_free_block( heap
, heap_flags
, block
);
2078 heap_unlock( heap
, heap_flags
);
2080 if (!status
&& heap
->bins
) InterlockedIncrement( &heap
->bins
[bin
].count_freed
);
2083 TRACE( "handle %p, flags %#lx, ptr %p, return %u, status %#lx.\n", handle
, flags
, ptr
, !status
, status
);
2084 heap_set_status( heap
, flags
, status
);
2088 static NTSTATUS
heap_resize_large( struct heap
*heap
, ULONG flags
, struct block
*block
, SIZE_T block_size
,
2089 SIZE_T size
, SIZE_T
*old_size
, void **ret
)
2091 ARENA_LARGE
*large
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2092 SIZE_T old_block_size
= large
->block_size
;
2093 *old_size
= large
->data_size
;
2095 if (old_block_size
< block_size
) return STATUS_NO_MEMORY
;
2097 /* FIXME: we could remap zero-pages instead */
2098 valgrind_notify_resize( block
+ 1, *old_size
, size
);
2099 initialize_block( block
, *old_size
, size
, flags
);
2101 large
->data_size
= size
;
2102 valgrind_make_noaccess( (char *)block
+ sizeof(*block
) + large
->data_size
,
2103 old_block_size
- sizeof(*block
) - large
->data_size
);
2106 return STATUS_SUCCESS
;
2109 static NTSTATUS
heap_resize_block( struct heap
*heap
, ULONG flags
, struct block
*block
, SIZE_T block_size
,
2110 SIZE_T size
, SIZE_T old_block_size
, SIZE_T
*old_size
, void **ret
)
2112 SUBHEAP
*subheap
= block_get_subheap( heap
, block
);
2115 if (block_size
> old_block_size
)
2117 /* need to grow block, make sure it's followed by large enough free block */
2118 if (!(next
= next_block( subheap
, block
))) return STATUS_NO_MEMORY
;
2119 if (!(block_get_flags( next
) & BLOCK_FLAG_FREE
)) return STATUS_NO_MEMORY
;
2120 if (block_size
> old_block_size
+ block_get_size( next
)) return STATUS_NO_MEMORY
;
2121 if (!subheap_commit( heap
, subheap
, block
, block_size
)) return STATUS_NO_MEMORY
;
2124 if ((next
= next_block( subheap
, block
)) && (block_get_flags( next
) & BLOCK_FLAG_FREE
))
2126 /* merge with next block if it is free */
2127 struct entry
*entry
= (struct entry
*)next
;
2128 list_remove( &entry
->entry
);
2129 old_block_size
+= block_get_size( next
);
2132 if ((next
= split_block( heap
, flags
, block
, old_block_size
, block_size
)))
2134 block_init_free( next
, flags
, subheap
, old_block_size
- block_size
);
2135 insert_free_block( heap
, flags
, subheap
, next
);
2138 valgrind_notify_resize( block
+ 1, *old_size
, size
);
2139 block_set_flags( block
, BLOCK_FLAG_USER_MASK
& ~BLOCK_FLAG_USER_INFO
, BLOCK_USER_FLAGS( flags
) );
2140 block
->tail_size
= block_get_size( block
) - sizeof(*block
) - size
;
2141 initialize_block( block
, *old_size
, size
, flags
);
2142 mark_block_tail( block
, flags
);
2144 if ((next
= next_block( subheap
, block
))) block_set_flags( next
, BLOCK_FLAG_PREV_FREE
, 0 );
2147 return STATUS_SUCCESS
;
2150 static NTSTATUS
heap_resize_block_lfh( struct block
*block
, ULONG flags
, SIZE_T block_size
, SIZE_T size
, SIZE_T
*old_size
, void **ret
)
2152 /* as native LFH does it with different block size: refuse to resize even though we could */
2153 if (ROUND_SIZE( *old_size
, BLOCK_ALIGN
- 1) != ROUND_SIZE( size
, BLOCK_ALIGN
- 1)) return STATUS_NO_MEMORY
;
2154 if (size
>= *old_size
) return STATUS_NO_MEMORY
;
2156 block_set_flags( block
, BLOCK_FLAG_USER_MASK
& ~BLOCK_FLAG_USER_INFO
, BLOCK_USER_FLAGS( flags
) );
2157 block
->tail_size
= block_size
- sizeof(*block
) - size
;
2158 initialize_block( block
, *old_size
, size
, flags
);
2159 mark_block_tail( block
, flags
);
2162 return STATUS_SUCCESS
;
2165 static NTSTATUS
heap_resize_in_place( struct heap
*heap
, ULONG flags
, struct block
*block
, SIZE_T block_size
,
2166 SIZE_T size
, SIZE_T
*old_size
, void **ret
)
2168 SIZE_T old_bin
, old_block_size
;
2171 if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2172 return heap_resize_large( heap
, flags
, block
, block_size
, size
, old_size
, ret
);
2174 old_block_size
= block_get_size( block
);
2175 *old_size
= old_block_size
- block_get_overhead( block
);
2176 old_bin
= BLOCK_SIZE_BIN( old_block_size
);
2178 if (block_size
>= HEAP_MIN_LARGE_BLOCK_SIZE
) return STATUS_NO_MEMORY
; /* growing small block to large block */
2180 if (block_get_flags( block
) & BLOCK_FLAG_LFH
)
2181 return heap_resize_block_lfh( block
, flags
, block_size
, size
, old_size
, ret
);
2183 heap_lock( heap
, flags
);
2184 status
= heap_resize_block( heap
, flags
, block
, block_size
, size
, old_block_size
, old_size
, ret
);
2185 heap_unlock( heap
, flags
);
2187 if (!status
&& heap
->bins
)
2189 SIZE_T new_bin
= BLOCK_SIZE_BIN( block_size
);
2190 InterlockedIncrement( &heap
->bins
[old_bin
].count_freed
);
2191 InterlockedIncrement( &heap
->bins
[new_bin
].count_alloc
);
2192 if (!ReadNoFence( &heap
->bins
[new_bin
].enabled
)) bin_try_enable( heap
, &heap
->bins
[new_bin
] );
2198 /***********************************************************************
2199 * RtlReAllocateHeap (NTDLL.@)
2201 void *WINAPI
RtlReAllocateHeap( HANDLE handle
, ULONG flags
, void *ptr
, SIZE_T size
)
2203 SIZE_T block_size
, old_size
;
2204 struct block
*block
;
2210 if (!ptr
) return NULL
;
2212 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2213 status
= STATUS_INVALID_HANDLE
;
2214 else if ((block_size
= heap_get_block_size( heap
, heap_flags
, size
)) == ~0U)
2215 status
= STATUS_NO_MEMORY
;
2216 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2217 status
= STATUS_INVALID_PARAMETER
;
2218 else if ((status
= heap_resize_in_place( heap
, heap_flags
, block
, block_size
, size
,
2221 if (flags
& HEAP_REALLOC_IN_PLACE_ONLY
)
2222 status
= STATUS_NO_MEMORY
;
2223 else if (!(ret
= RtlAllocateHeap( heap
, flags
, size
)))
2224 status
= STATUS_NO_MEMORY
;
2227 memcpy( ret
, ptr
, min( size
, old_size
) );
2228 RtlFreeHeap( heap
, flags
, ptr
);
2229 status
= STATUS_SUCCESS
;
2233 TRACE( "handle %p, flags %#lx, ptr %p, size %#Ix, return %p, status %#lx.\n", handle
, flags
, ptr
, size
, ret
, status
);
2234 heap_set_status( heap
, flags
, status
);
2239 /***********************************************************************
2240 * RtlCompactHeap (NTDLL.@)
2242 * Compact the free space in a Heap.
2245 * heap [I] Heap that block was allocated from
2246 * flags [I] HEAP_ flags from "winnt.h"
2249 * The number of bytes compacted.
2252 * This function is a harmless stub.
2254 ULONG WINAPI
RtlCompactHeap( HANDLE handle
, ULONG flags
)
2256 static BOOL reported
;
2257 if (!reported
++) FIXME( "handle %p, flags %#lx stub!\n", handle
, flags
);
2262 /***********************************************************************
2263 * RtlLockHeap (NTDLL.@)
2268 * heap [I] Heap to lock
2271 * Success: TRUE. The Heap is locked.
2272 * Failure: FALSE, if heap is invalid.
2274 BOOLEAN WINAPI
RtlLockHeap( HANDLE handle
)
2278 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &heap_flags
))) return FALSE
;
2279 heap_lock( heap
, heap_flags
);
2284 /***********************************************************************
2285 * RtlUnlockHeap (NTDLL.@)
2290 * heap [I] Heap to unlock
2293 * Success: TRUE. The Heap is unlocked.
2294 * Failure: FALSE, if heap is invalid.
2296 BOOLEAN WINAPI
RtlUnlockHeap( HANDLE handle
)
2300 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &heap_flags
))) return FALSE
;
2301 heap_unlock( heap
, heap_flags
);
2306 static NTSTATUS
heap_size( const struct heap
*heap
, struct block
*block
, SIZE_T
*size
)
2308 if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2310 const ARENA_LARGE
*large_arena
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2311 *size
= large_arena
->data_size
;
2313 else *size
= block_get_size( block
) - block_get_overhead( block
);
2315 return STATUS_SUCCESS
;
2318 /***********************************************************************
2319 * RtlSizeHeap (NTDLL.@)
2321 SIZE_T WINAPI
RtlSizeHeap( HANDLE handle
, ULONG flags
, const void *ptr
)
2323 SIZE_T size
= ~(SIZE_T
)0;
2324 struct block
*block
;
2329 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2330 status
= STATUS_INVALID_PARAMETER
;
2331 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2332 status
= STATUS_INVALID_PARAMETER
;
2335 heap_lock( heap
, heap_flags
);
2336 status
= heap_size( heap
, block
, &size
);
2337 heap_unlock( heap
, heap_flags
);
2340 TRACE( "handle %p, flags %#lx, ptr %p, return %#Ix, status %#lx.\n", handle
, flags
, ptr
, size
, status
);
2341 heap_set_status( heap
, flags
, status
);
2346 /***********************************************************************
2347 * RtlValidateHeap (NTDLL.@)
2349 BOOLEAN WINAPI
RtlValidateHeap( HANDLE handle
, ULONG flags
, const void *ptr
)
2355 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2359 heap_lock( heap
, heap_flags
);
2360 if (ptr
) ret
= heap_validate_ptr( heap
, ptr
);
2361 else ret
= heap_validate( heap
);
2362 heap_unlock( heap
, heap_flags
);
2365 TRACE( "handle %p, flags %#lx, ptr %p, return %u.\n", handle
, flags
, ptr
, !!ret
);
2370 static NTSTATUS
heap_walk_blocks( const struct heap
*heap
, const SUBHEAP
*subheap
,
2371 const struct block
*block
, struct rtl_heap_entry
*entry
)
2373 const char *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
), *end
= base
+ subheap_size( subheap
);
2374 const struct block
*blocks
= first_block( subheap
);
2376 if (entry
->lpData
== commit_end
) return STATUS_NO_MORE_ENTRIES
;
2377 if (entry
->lpData
== base
) block
= blocks
;
2378 else if (!(block
= next_block( subheap
, block
)))
2380 entry
->lpData
= (void *)commit_end
;
2381 entry
->cbData
= end
- commit_end
;
2382 entry
->cbOverhead
= 0;
2383 entry
->iRegionIndex
= 0;
2384 entry
->wFlags
= RTL_HEAP_ENTRY_UNCOMMITTED
;
2385 return STATUS_SUCCESS
;
2388 if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
2390 entry
->lpData
= (char *)block
+ block_get_overhead( block
);
2391 entry
->cbData
= block_get_size( block
) - block_get_overhead( block
);
2392 /* FIXME: last free block should not include uncommitted range, which also has its own overhead */
2393 if (!contains( blocks
, commit_end
- (char *)blocks
, block
, block_get_size( block
) ))
2394 entry
->cbData
= commit_end
- (char *)entry
->lpData
- 4 * BLOCK_ALIGN
;
2395 entry
->cbOverhead
= 2 * BLOCK_ALIGN
;
2396 entry
->iRegionIndex
= 0;
2401 entry
->lpData
= (void *)(block
+ 1);
2402 entry
->cbData
= block_get_size( block
) - block_get_overhead( block
);
2403 entry
->cbOverhead
= block_get_overhead( block
);
2404 entry
->iRegionIndex
= 0;
2405 entry
->wFlags
= RTL_HEAP_ENTRY_COMMITTED
|RTL_HEAP_ENTRY_BLOCK
|RTL_HEAP_ENTRY_BUSY
;
2408 return STATUS_SUCCESS
;
2411 static NTSTATUS
heap_walk( const struct heap
*heap
, struct rtl_heap_entry
*entry
)
2413 const char *data
= entry
->lpData
;
2414 const ARENA_LARGE
*large
= NULL
;
2415 const struct block
*block
;
2416 const struct list
*next
;
2417 const SUBHEAP
*subheap
;
2421 if (!data
|| entry
->wFlags
& RTL_HEAP_ENTRY_REGION
) block
= (struct block
*)data
;
2422 else if (entry
->wFlags
& RTL_HEAP_ENTRY_BUSY
) block
= (struct block
*)data
- 1;
2423 else block
= (struct block
*)(data
- sizeof(struct list
)) - 1;
2425 if (find_large_block( heap
, block
))
2427 large
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2428 next
= &large
->entry
;
2430 else if ((subheap
= find_subheap( heap
, block
, TRUE
)))
2432 if (!(status
= heap_walk_blocks( heap
, subheap
, block
, entry
))) return STATUS_SUCCESS
;
2433 else if (status
!= STATUS_NO_MORE_ENTRIES
) return status
;
2434 next
= &subheap
->entry
;
2438 if (entry
->lpData
) return STATUS_INVALID_PARAMETER
;
2439 next
= &heap
->subheap_list
;
2442 if (!large
&& (next
= list_next( &heap
->subheap_list
, next
)))
2444 subheap
= LIST_ENTRY( next
, SUBHEAP
, entry
);
2445 base
= subheap_base( subheap
);
2446 entry
->lpData
= base
;
2447 entry
->cbData
= subheap_overhead( subheap
);
2448 entry
->cbOverhead
= 0;
2449 entry
->iRegionIndex
= 0;
2450 entry
->wFlags
= RTL_HEAP_ENTRY_REGION
;
2451 entry
->Region
.dwCommittedSize
= (char *)subheap_commit_end( subheap
) - base
;
2452 entry
->Region
.dwUnCommittedSize
= subheap_size( subheap
) - entry
->Region
.dwCommittedSize
;
2453 entry
->Region
.lpFirstBlock
= base
+ entry
->cbData
;
2454 entry
->Region
.lpLastBlock
= base
+ subheap_size( subheap
);
2455 return STATUS_SUCCESS
;
2458 if (!next
) next
= &heap
->large_list
;
2459 if ((next
= list_next( &heap
->large_list
, next
)))
2461 large
= LIST_ENTRY( next
, ARENA_LARGE
, entry
);
2462 entry
->lpData
= (void *)(large
+ 1);
2463 entry
->cbData
= large
->data_size
;
2464 entry
->cbOverhead
= 0;
2465 entry
->iRegionIndex
= 64;
2466 entry
->wFlags
= RTL_HEAP_ENTRY_COMMITTED
|RTL_HEAP_ENTRY_BLOCK
|RTL_HEAP_ENTRY_BUSY
;
2467 return STATUS_SUCCESS
;
2470 return STATUS_NO_MORE_ENTRIES
;
2473 /***********************************************************************
2474 * RtlWalkHeap (NTDLL.@)
2476 NTSTATUS WINAPI
RtlWalkHeap( HANDLE handle
, void *entry_ptr
)
2478 struct rtl_heap_entry
*entry
= entry_ptr
;
2483 if (!entry
) return STATUS_INVALID_PARAMETER
;
2485 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &heap_flags
)))
2486 status
= STATUS_INVALID_HANDLE
;
2489 heap_lock( heap
, heap_flags
);
2490 status
= heap_walk( heap
, entry
);
2491 heap_unlock( heap
, heap_flags
);
2494 TRACE( "handle %p, entry %p %s, return %#lx\n", handle
, entry
,
2495 status
? "<empty>" : debugstr_heap_entry(entry
), status
);
2500 /***********************************************************************
2501 * RtlGetProcessHeaps (NTDLL.@)
2503 * Get the Heaps belonging to the current process.
2506 * count [I] size of heaps
2507 * heaps [O] Destination array for heap HANDLE's
2510 * Success: The number of Heaps allocated by the process.
2513 ULONG WINAPI
RtlGetProcessHeaps( ULONG count
, HANDLE
*heaps
)
2515 ULONG total
= 1; /* main heap */
2518 RtlEnterCriticalSection( &process_heap
->cs
);
2519 LIST_FOR_EACH( ptr
, &process_heap
->entry
) total
++;
2522 *heaps
++ = process_heap
;
2523 LIST_FOR_EACH( ptr
, &process_heap
->entry
)
2524 *heaps
++ = LIST_ENTRY( ptr
, struct heap
, entry
);
2526 RtlLeaveCriticalSection( &process_heap
->cs
);
2530 /***********************************************************************
2531 * RtlQueryHeapInformation (NTDLL.@)
2533 NTSTATUS WINAPI
RtlQueryHeapInformation( HANDLE handle
, HEAP_INFORMATION_CLASS info_class
,
2534 void *info
, SIZE_T size_in
, SIZE_T
*size_out
)
2539 TRACE( "handle %p, info_class %u, info %p, size_in %Iu, size_out %p.\n", handle
, info_class
, info
, size_in
, size_out
);
2543 case HeapCompatibilityInformation
:
2544 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &flags
))) return STATUS_ACCESS_VIOLATION
;
2545 if (size_out
) *size_out
= sizeof(ULONG
);
2546 if (size_in
< sizeof(ULONG
)) return STATUS_BUFFER_TOO_SMALL
;
2547 *(ULONG
*)info
= ReadNoFence( &heap
->compat_info
);
2548 return STATUS_SUCCESS
;
2551 FIXME( "HEAP_INFORMATION_CLASS %u not implemented!\n", info_class
);
2552 return STATUS_INVALID_INFO_CLASS
;
2556 /***********************************************************************
2557 * RtlSetHeapInformation (NTDLL.@)
2559 NTSTATUS WINAPI
RtlSetHeapInformation( HANDLE handle
, HEAP_INFORMATION_CLASS info_class
, void *info
, SIZE_T size
)
2564 TRACE( "handle %p, info_class %u, info %p, size %Iu.\n", handle
, info_class
, info
, size
);
2568 case HeapCompatibilityInformation
:
2572 if (size
< sizeof(ULONG
)) return STATUS_BUFFER_TOO_SMALL
;
2573 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &flags
))) return STATUS_INVALID_HANDLE
;
2574 if (heap
->flags
& HEAP_NO_SERIALIZE
) return STATUS_INVALID_PARAMETER
;
2576 compat_info
= *(ULONG
*)info
;
2577 if (compat_info
!= HEAP_STD
&& compat_info
!= HEAP_LFH
)
2579 FIXME( "HeapCompatibilityInformation %lu not implemented!\n", compat_info
);
2580 return STATUS_UNSUCCESSFUL
;
2582 if (InterlockedCompareExchange( &heap
->compat_info
, compat_info
, HEAP_STD
) != HEAP_STD
)
2583 return STATUS_UNSUCCESSFUL
;
2584 return STATUS_SUCCESS
;
2588 FIXME( "HEAP_INFORMATION_CLASS %u not implemented!\n", info_class
);
2589 return STATUS_SUCCESS
;
2593 /***********************************************************************
2594 * RtlGetUserInfoHeap (NTDLL.@)
2596 BOOLEAN WINAPI
RtlGetUserInfoHeap( HANDLE handle
, ULONG flags
, void *ptr
, void **user_value
, ULONG
*user_flags
)
2598 NTSTATUS status
= STATUS_SUCCESS
;
2599 struct block
*block
;
2604 TRACE( "handle %p, flags %#lx, ptr %p, user_value %p, user_flags %p semi-stub!\n",
2605 handle
, flags
, ptr
, user_value
, user_flags
);
2609 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2610 status
= STATUS_SUCCESS
;
2611 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2613 status
= STATUS_INVALID_PARAMETER
;
2616 else if (!(*user_flags
= HEAP_USER_FLAGS(block_get_flags( block
))))
2617 WARN( "Block %p wasn't allocated with user info\n", ptr
);
2618 else if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2620 const ARENA_LARGE
*large
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2621 *user_flags
= *user_flags
& ~HEAP_ADD_USER_INFO
;
2622 *user_value
= large
->user_value
;
2626 heap_lock( heap
, heap_flags
);
2628 tmp
= (char *)block
+ block_get_size( block
) - block
->tail_size
+ sizeof(void *);
2629 if ((heap_flags
& HEAP_TAIL_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
) tmp
+= BLOCK_ALIGN
;
2630 *user_flags
= *user_flags
& ~HEAP_ADD_USER_INFO
;
2631 *user_value
= *(void **)tmp
;
2633 heap_unlock( heap
, heap_flags
);
2636 heap_set_status( heap
, flags
, status
);
2640 /***********************************************************************
2641 * RtlSetUserValueHeap (NTDLL.@)
2643 BOOLEAN WINAPI
RtlSetUserValueHeap( HANDLE handle
, ULONG flags
, void *ptr
, void *user_value
)
2645 struct block
*block
;
2651 TRACE( "handle %p, flags %#lx, ptr %p, user_value %p.\n", handle
, flags
, ptr
, user_value
);
2653 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2655 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2657 else if (!(block_get_flags( block
) & BLOCK_FLAG_USER_INFO
))
2659 else if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2661 ARENA_LARGE
*large
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2662 large
->user_value
= user_value
;
2667 heap_lock( heap
, heap_flags
);
2669 tmp
= (char *)block
+ block_get_size( block
) - block
->tail_size
+ sizeof(void *);
2670 if ((heap_flags
& HEAP_TAIL_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
) tmp
+= BLOCK_ALIGN
;
2671 *(void **)tmp
= user_value
;
2674 heap_unlock( heap
, heap_flags
);
2680 /***********************************************************************
2681 * RtlSetUserFlagsHeap (NTDLL.@)
2683 BOOLEAN WINAPI
RtlSetUserFlagsHeap( HANDLE handle
, ULONG flags
, void *ptr
, ULONG clear
, ULONG set
)
2685 struct block
*block
;
2690 TRACE( "handle %p, flags %#lx, ptr %p, clear %#lx, set %#lx.\n", handle
, flags
, ptr
, clear
, set
);
2692 if ((clear
| set
) & ~(0xe00))
2694 SetLastError( ERROR_INVALID_PARAMETER
);
2698 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2700 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2702 else if (!(block_get_flags( block
) & BLOCK_FLAG_USER_INFO
))
2706 block_set_flags( block
, BLOCK_USER_FLAGS( clear
), BLOCK_USER_FLAGS( set
) );