4 * Copyright 1996 Alexandre Julliard
5 * Copyright 1998 Ulrich Weigand
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #define RUNNING_ON_VALGRIND 0 /* FIXME */
31 #define WIN32_NO_STATUS
35 #include "ntdll_misc.h"
36 #include "wine/list.h"
37 #include "wine/debug.h"
39 WINE_DEFAULT_DEBUG_CHANNEL(heap
);
41 /* HeapCompatibilityInformation values */
48 /* undocumented RtlWalkHeap structure */
53 SIZE_T cbData
; /* differs from PROCESS_HEAP_ENTRY */
56 WORD wFlags
; /* value differs from PROCESS_HEAP_ENTRY */
63 DWORD dwCommittedSize
;
64 DWORD dwUnCommittedSize
;
71 /* rtl_heap_entry flags, names made up */
73 #define RTL_HEAP_ENTRY_BUSY 0x0001
74 #define RTL_HEAP_ENTRY_REGION 0x0002
75 #define RTL_HEAP_ENTRY_BLOCK 0x0010
76 #define RTL_HEAP_ENTRY_UNCOMMITTED 0x1000
77 #define RTL_HEAP_ENTRY_COMMITTED 0x4000
78 #define RTL_HEAP_ENTRY_LFH 0x8000
81 /* header for heap blocks */
83 #define REGION_ALIGN 0x10000
84 #define BLOCK_ALIGN (2 * sizeof(void *))
86 struct DECLSPEC_ALIGN(8) block
88 /* block size in multiple of BLOCK_ALIGN */
90 /* unused size (used block) / high size bits (free block) */
92 /* offset to region base / first group block (LFH block) */
98 C_ASSERT( sizeof(struct block
) == 8 );
100 /* block specific flags */
102 #define BLOCK_FLAG_FREE 0x01
103 #define BLOCK_FLAG_PREV_FREE 0x02
104 #define BLOCK_FLAG_FREE_LINK 0x03
105 #define BLOCK_FLAG_LARGE 0x04
106 #define BLOCK_FLAG_LFH 0x08 /* block is handled by the LFH frontend */
107 #define BLOCK_FLAG_USER_INFO 0x10 /* user flags up to 0xf0 */
108 #define BLOCK_FLAG_USER_MASK 0xf0
110 #define BLOCK_USER_FLAGS( heap_flags ) (((heap_flags) >> 4) & BLOCK_FLAG_USER_MASK)
111 #define HEAP_USER_FLAGS( block_flags ) (((block_flags) & BLOCK_FLAG_USER_MASK) << 4)
113 /* entry to link free blocks in free lists */
115 struct DECLSPEC_ALIGN(BLOCK_ALIGN
) entry
121 C_ASSERT( sizeof(struct entry
) == 2 * BLOCK_ALIGN
);
125 SIZE_T __pad
[sizeof(SIZE_T
) / sizeof(DWORD
)];
126 struct list entry
; /* entry in heap large blocks list */
127 SIZE_T data_size
; /* size of user data */
128 SIZE_T block_size
; /* total size of virtual memory block */
133 /* block must be last and aligned */
134 C_ASSERT( sizeof(ARENA_LARGE
) == offsetof(ARENA_LARGE
, block
) + sizeof(struct block
) );
135 C_ASSERT( sizeof(ARENA_LARGE
) == 4 * BLOCK_ALIGN
);
137 #define BLOCK_TYPE_USED 'u'
138 #define BLOCK_TYPE_DEAD 'D'
139 #define BLOCK_TYPE_FREE 'F'
140 #define BLOCK_TYPE_LARGE 'L'
142 #define BLOCK_FILL_USED 0x55
143 #define BLOCK_FILL_TAIL 0xab
144 #define BLOCK_FILL_FREE 0xfeeefeee
146 #define ROUND_ADDR(addr, mask) ((void *)((UINT_PTR)(addr) & ~(UINT_PTR)(mask)))
147 #define ROUND_SIZE(size, mask) ((((SIZE_T)(size) + (mask)) & ~(SIZE_T)(mask)))
148 #define FIELD_BITS(type, field) (sizeof(((type *)0)->field) * 8)
149 #define FIELD_MAX(type, field) (((SIZE_T)1 << FIELD_BITS(type, field)) - 1)
151 #define HEAP_MIN_BLOCK_SIZE ROUND_SIZE(sizeof(struct entry) + BLOCK_ALIGN, BLOCK_ALIGN - 1)
153 C_ASSERT( sizeof(struct block
) <= HEAP_MIN_BLOCK_SIZE
);
154 C_ASSERT( sizeof(struct entry
) <= HEAP_MIN_BLOCK_SIZE
);
156 /* used block size is coded into block_size */
157 #define HEAP_MAX_USED_BLOCK_SIZE (FIELD_MAX( struct block, block_size ) * BLOCK_ALIGN)
158 /* free block size is coded into block_size + tail_size */
159 #define HEAP_MAX_FREE_BLOCK_SIZE (HEAP_MAX_USED_BLOCK_SIZE + (FIELD_MAX( struct block, tail_size ) << 16) * BLOCK_ALIGN)
160 /* subheap distance is coded into base_offset */
161 #define HEAP_MAX_BLOCK_REGION_SIZE (FIELD_MAX( struct block, base_offset ) * REGION_ALIGN)
163 C_ASSERT( HEAP_MAX_USED_BLOCK_SIZE
== 512 * 1024 * (sizeof(void *) / 4) - BLOCK_ALIGN
);
164 C_ASSERT( HEAP_MAX_FREE_BLOCK_SIZE
>= 128 * 1024 * 1024 * (sizeof(void *) / 4) - BLOCK_ALIGN
);
165 C_ASSERT( HEAP_MAX_BLOCK_REGION_SIZE
>= 128 * 1024 * 1024 * (sizeof(void *) / 4) - BLOCK_ALIGN
);
166 C_ASSERT( HEAP_MAX_FREE_BLOCK_SIZE
>= HEAP_MAX_BLOCK_REGION_SIZE
);
168 /* minimum size to start allocating large blocks */
169 #define HEAP_MIN_LARGE_BLOCK_SIZE (HEAP_MAX_USED_BLOCK_SIZE - 0x1000)
171 #define FREE_LIST_LINEAR_BITS 2
172 #define FREE_LIST_LINEAR_MASK ((1 << FREE_LIST_LINEAR_BITS) - 1)
173 #define FREE_LIST_COUNT ((FIELD_BITS( struct block, block_size ) - FREE_LIST_LINEAR_BITS + 1) * (1 << FREE_LIST_LINEAR_BITS) + 1)
174 /* for reference, update this when changing parameters */
175 C_ASSERT( FREE_LIST_COUNT
== 0x3d );
177 typedef struct DECLSPEC_ALIGN(BLOCK_ALIGN
) tagSUBHEAP
179 SIZE_T __pad
[sizeof(SIZE_T
) / sizeof(DWORD
)];
187 /* block must be last and aligned */
188 C_ASSERT( sizeof(SUBHEAP
) == offsetof(SUBHEAP
, block
) + sizeof(struct block
) );
189 C_ASSERT( sizeof(SUBHEAP
) == 4 * BLOCK_ALIGN
);
192 /* LFH block size bins */
194 #define BIN_SIZE_MIN_0 0
195 #define BIN_SIZE_MIN_1 0x100
196 #define BIN_SIZE_MIN_2 0x200
197 #define BIN_SIZE_MIN_3 0x400
198 #define BIN_SIZE_MIN_4 0x800
199 #define BIN_SIZE_MIN_5 0x1000
200 #define BIN_SIZE_MIN_6 0x2000
201 #define BIN_SIZE_MIN_7 0x4000
202 #define BIN_SIZE_MAX 0x8000
204 #define BIN_SIZE_STEP_0 (16)
205 #define BIN_SIZE_STEP_1 (BIN_SIZE_MIN_1 >> 4)
206 #define BIN_SIZE_STEP_2 (BIN_SIZE_MIN_2 >> 4)
207 #define BIN_SIZE_STEP_3 (BIN_SIZE_MIN_3 >> 4)
208 #define BIN_SIZE_STEP_4 (BIN_SIZE_MIN_4 >> 4)
209 #define BIN_SIZE_STEP_5 (BIN_SIZE_MIN_5 >> 4)
210 #define BIN_SIZE_STEP_6 (BIN_SIZE_MIN_6 >> 4)
211 #define BIN_SIZE_STEP_7 (BIN_SIZE_MIN_7 >> 4)
213 #define BLOCK_BIN_SIZE_N( n, bin ) (BIN_SIZE_MIN_##n + (bin + 1) * BIN_SIZE_STEP_##n)
214 #define BLOCK_SIZE_BIN_N( n, size ) (((size) - 1 - BIN_SIZE_MIN_##n) / BIN_SIZE_STEP_##n)
216 #define BLOCK_BIN_SIZE( bin ) ((bin) >= 0x80 ? ~(SIZE_T)0 : \
217 (bin) >= 0x70 ? BLOCK_BIN_SIZE_N( 7, bin - 0x70 ) : \
218 (bin) >= 0x60 ? BLOCK_BIN_SIZE_N( 6, bin - 0x60 ) : \
219 (bin) >= 0x50 ? BLOCK_BIN_SIZE_N( 5, bin - 0x50 ) : \
220 (bin) >= 0x40 ? BLOCK_BIN_SIZE_N( 4, bin - 0x40 ) : \
221 (bin) >= 0x30 ? BLOCK_BIN_SIZE_N( 3, bin - 0x30 ) : \
222 (bin) >= 0x20 ? BLOCK_BIN_SIZE_N( 2, bin - 0x20 ) : \
223 (bin) >= 0x10 ? BLOCK_BIN_SIZE_N( 1, bin - 0x10 ) : \
224 BLOCK_BIN_SIZE_N( 0, bin ))
226 #define BLOCK_SIZE_BIN( size ) ((size) > BIN_SIZE_MAX ? 0x80 : \
227 (size) > BIN_SIZE_MIN_7 ? 0x70 + BLOCK_SIZE_BIN_N( 7, size ) : \
228 (size) > BIN_SIZE_MIN_6 ? 0x60 + BLOCK_SIZE_BIN_N( 6, size ) : \
229 (size) > BIN_SIZE_MIN_5 ? 0x50 + BLOCK_SIZE_BIN_N( 5, size ) : \
230 (size) > BIN_SIZE_MIN_4 ? 0x40 + BLOCK_SIZE_BIN_N( 4, size ) : \
231 (size) > BIN_SIZE_MIN_3 ? 0x30 + BLOCK_SIZE_BIN_N( 3, size ) : \
232 (size) > BIN_SIZE_MIN_2 ? 0x20 + BLOCK_SIZE_BIN_N( 2, size ) : \
233 (size) > BIN_SIZE_MIN_1 ? 0x10 + BLOCK_SIZE_BIN_N( 1, size ) : \
234 (size) <= BIN_SIZE_MIN_0 ? 0 : BLOCK_SIZE_BIN_N( 0, size ))
236 #define BLOCK_SIZE_BIN_COUNT (BLOCK_SIZE_BIN( BIN_SIZE_MAX + 1 ) + 1)
238 /* macros sanity checks */
239 C_ASSERT( BLOCK_SIZE_BIN( 0 ) == 0 );
240 C_ASSERT( BLOCK_SIZE_BIN( 0x10 ) == 0 );
241 C_ASSERT( BLOCK_BIN_SIZE( 0 ) == BIN_SIZE_MIN_0
+ 1 * BIN_SIZE_STEP_0
);
242 C_ASSERT( BLOCK_SIZE_BIN( 0x11 ) == 1 );
243 C_ASSERT( BLOCK_BIN_SIZE( 1 ) == BIN_SIZE_MIN_0
+ 2 * BIN_SIZE_STEP_0
);
244 C_ASSERT( BLOCK_SIZE_BIN( BIN_SIZE_MAX
) == 0x7f );
245 C_ASSERT( BLOCK_BIN_SIZE( 0x7f ) == BIN_SIZE_MAX
);
246 C_ASSERT( BLOCK_SIZE_BIN( BIN_SIZE_MAX
+ 1 ) == 0x80 );
247 C_ASSERT( BLOCK_BIN_SIZE( 0x80 ) == ~(SIZE_T
)0 );
249 /* difference between block classes and all possible validation overhead must fit into block tail_size */
250 C_ASSERT( BIN_SIZE_STEP_7
+ 3 * BLOCK_ALIGN
<= FIELD_MAX( struct block
, tail_size
) );
252 static BYTE affinity_mapping
[] = {20,6,31,15,14,29,27,4,18,24,26,13,0,9,2,30,17,7,23,25,10,19,12,3,22,21,5,16,1,28,11,8};
253 static LONG next_thread_affinity
;
255 /* a bin, tracking heap blocks of a certain size */
258 /* counters for LFH activation */
263 /* list of groups with free blocks */
266 /* array of affinity reserved groups, interleaved with other bins to keep
267 * all pointers of the same affinity and different bin grouped together,
268 * and pointers of the same bin and different affinity away from each other,
269 * hopefully in separate cache lines.
271 struct group
**affinity_group_base
;
274 static inline struct group
**bin_get_affinity_group( struct bin
*bin
, BYTE affinity
)
276 return bin
->affinity_group_base
+ affinity
* BLOCK_SIZE_BIN_COUNT
;
281 DWORD_PTR unknown1
[2]; /* 0000/0000 */
282 DWORD ffeeffee
; /* 0008/0010 */
283 DWORD auto_flags
; /* 000c/0014 */
284 DWORD_PTR unknown2
[7]; /* 0010/0018 */
285 DWORD unknown3
[2]; /* 002c/0050 */
286 DWORD_PTR unknown4
[3]; /* 0034/0058 */
287 DWORD flags
; /* 0040/0070 */
288 DWORD force_flags
; /* 0044/0074 */
289 /* end of the Windows 10 compatible struct layout */
291 LONG compat_info
; /* HeapCompatibilityInformation / heap frontend type */
292 struct list entry
; /* Entry in process heap list */
293 struct list subheap_list
; /* Sub-heap list */
294 struct list large_list
; /* Large blocks list */
295 SIZE_T grow_size
; /* Size of next subheap for growing heap */
296 SIZE_T min_size
; /* Minimum committed size */
297 DWORD magic
; /* Magic number */
298 DWORD pending_pos
; /* Position in pending free requests ring */
299 struct block
**pending_free
; /* Ring buffer for pending free requests */
300 RTL_CRITICAL_SECTION cs
;
301 struct entry free_lists
[FREE_LIST_COUNT
];
306 /* subheap must be last and aligned */
307 C_ASSERT( sizeof(struct heap
) == offsetof(struct heap
, subheap
) + sizeof(SUBHEAP
) );
308 C_ASSERT( sizeof(struct heap
) % BLOCK_ALIGN
== 0 );
309 C_ASSERT( offsetof(struct heap
, subheap
) <= REGION_ALIGN
- 1 );
311 #define HEAP_MAGIC ((DWORD)('H' | ('E'<<8) | ('A'<<16) | ('P'<<24)))
313 #define HEAP_INITIAL_SIZE 0x10000
314 #define HEAP_INITIAL_GROW_SIZE 0x100000
315 #define HEAP_MAX_GROW_SIZE 0xfd0000
317 C_ASSERT( HEAP_MIN_LARGE_BLOCK_SIZE
<= HEAP_INITIAL_GROW_SIZE
);
319 #define MAX_FREE_PENDING 1024 /* max number of free requests to delay */
321 /* some undocumented flags (names are made up) */
322 #define HEAP_PRIVATE 0x00001000
323 #define HEAP_ADD_USER_INFO 0x00000100
324 #define HEAP_USER_FLAGS_MASK 0x00000f00
325 #define HEAP_PAGE_ALLOCS 0x01000000
326 #define HEAP_VALIDATE 0x10000000
327 #define HEAP_VALIDATE_ALL 0x20000000
328 #define HEAP_VALIDATE_PARAMS 0x40000000
329 #define HEAP_CHECKING_ENABLED 0x80000000
331 static struct heap
*process_heap
; /* main process heap */
333 static NTSTATUS
heap_free_block_lfh( struct heap
*heap
, ULONG flags
, struct block
*block
);
335 /* check if memory range a contains memory range b */
336 static inline BOOL
contains( const void *a
, SIZE_T a_size
, const void *b
, SIZE_T b_size
)
338 const void *a_end
= (char *)a
+ a_size
, *b_end
= (char *)b
+ b_size
;
339 return a
<= b
&& b
<= b_end
&& b_end
<= a_end
;
342 static inline UINT
block_get_flags( const struct block
*block
)
344 return block
->block_flags
;
347 static inline UINT
block_get_type( const struct block
*block
)
349 return block
->block_type
;
352 static inline void block_set_base( struct block
*block
, const void *base
)
354 const char *offset
= ROUND_ADDR( block
, REGION_ALIGN
- 1 );
355 block
->base_offset
= (offset
- (char *)base
) / REGION_ALIGN
;
358 static inline void block_set_type( struct block
*block
, UINT type
)
360 block
->block_type
= type
;
363 static inline SUBHEAP
*block_get_subheap( const struct heap
*heap
, const struct block
*block
)
365 char *offset
= ROUND_ADDR( block
, REGION_ALIGN
- 1 );
366 void *base
= offset
- (SIZE_T
)block
->base_offset
* REGION_ALIGN
;
367 if (base
!= (void *)heap
) return base
;
368 else return (SUBHEAP
*)&heap
->subheap
;
371 static inline UINT
block_get_overhead( const struct block
*block
)
373 if (block_get_flags( block
) & BLOCK_FLAG_FREE
) return sizeof(*block
) + sizeof(struct list
);
374 return sizeof(*block
) + block
->tail_size
;
377 /* return the size of a block, including its header */
378 static inline UINT
block_get_size( const struct block
*block
)
380 UINT block_size
= block
->block_size
;
381 if (block_get_flags( block
) & BLOCK_FLAG_FREE
) block_size
+= (UINT
)block
->tail_size
<< 16;
382 return block_size
* BLOCK_ALIGN
;
385 static inline void block_set_size( struct block
*block
, UINT block_size
)
387 block_size
/= BLOCK_ALIGN
;
388 if (block_get_flags( block
) & BLOCK_FLAG_FREE
) block
->tail_size
= block_size
>> 16;
389 block
->block_size
= block_size
;
392 static inline void block_set_flags( struct block
*block
, BYTE clear
, BYTE set
)
394 UINT block_size
= block_get_size( block
);
395 block
->block_flags
&= ~clear
;
396 block
->block_flags
|= set
;
397 block_set_size( block
, block_size
);
400 static inline void *subheap_base( const SUBHEAP
*subheap
)
402 return ROUND_ADDR( subheap
, REGION_ALIGN
- 1 );
405 static inline SIZE_T
subheap_overhead( const SUBHEAP
*subheap
)
407 return (char *)&subheap
->block
- (char *)subheap_base( subheap
);
410 static inline SIZE_T
subheap_size( const SUBHEAP
*subheap
)
412 return subheap
->block_size
+ subheap_overhead( subheap
);
415 static inline const void *subheap_commit_end( const SUBHEAP
*subheap
)
417 return (char *)(subheap
+ 1) + subheap
->data_size
;
420 static void subheap_set_bounds( SUBHEAP
*subheap
, char *commit_end
, char *end
)
422 subheap
->block_size
= end
- (char *)&subheap
->block
;
423 subheap
->data_size
= commit_end
- (char *)(subheap
+ 1);
426 static inline void *first_block( const SUBHEAP
*subheap
)
428 return (void *)&subheap
->block
;
431 static inline const void *last_block( const SUBHEAP
*subheap
)
433 return (char *)subheap_commit_end( subheap
) - sizeof(struct block
);
436 static inline struct block
*next_block( const SUBHEAP
*subheap
, const struct block
*block
)
438 const char *data
= (char *)(block
+ 1), *next
, *last
= last_block( subheap
);
439 next
= (char *)block
+ block_get_size( block
);
440 if (!contains( data
, last
- (char *)data
, next
, sizeof(*block
) )) return NULL
;
441 return (struct block
*)next
;
444 static inline BOOL
check_subheap( const SUBHEAP
*subheap
, const struct heap
*heap
)
446 if (subheap
->user_value
!= heap
) return FALSE
;
448 return contains( &subheap
->block
, subheap
->block_size
, subheap
+ 1, subheap
->data_size
);
451 static BOOL
heap_validate( const struct heap
*heap
);
453 /* mark a block of memory as innacessible for debugging purposes */
454 static inline void valgrind_make_noaccess( void const *ptr
, SIZE_T size
)
456 #if defined(VALGRIND_MAKE_MEM_NOACCESS)
457 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_NOACCESS( ptr
, size
) );
458 #elif defined(VALGRIND_MAKE_NOACCESS)
459 VALGRIND_DISCARD( VALGRIND_MAKE_NOACCESS( ptr
, size
) );
463 /* mark a block of memory as initialized for debugging purposes */
464 static inline void valgrind_make_readable( void const *ptr
, SIZE_T size
)
466 #if defined(VALGRIND_MAKE_MEM_DEFINED)
467 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_DEFINED( ptr
, size
) );
468 #elif defined(VALGRIND_MAKE_READABLE)
469 VALGRIND_DISCARD( VALGRIND_MAKE_READABLE( ptr
, size
) );
473 /* mark a block of memory as uninitialized for debugging purposes */
474 static inline void valgrind_make_writable( void const *ptr
, SIZE_T size
)
476 #if defined(VALGRIND_MAKE_MEM_UNDEFINED)
477 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_UNDEFINED( ptr
, size
) );
478 #elif defined(VALGRIND_MAKE_WRITABLE)
479 VALGRIND_DISCARD( VALGRIND_MAKE_WRITABLE( ptr
, size
) );
483 /* mark a block of memory as free for debugging purposes */
484 static inline void mark_block_free( void *ptr
, SIZE_T size
, DWORD flags
)
486 if (flags
& HEAP_FREE_CHECKING_ENABLED
)
489 for (i
= 0; i
< size
/ sizeof(DWORD
); i
++) ((DWORD
*)ptr
)[i
] = BLOCK_FILL_FREE
;
491 valgrind_make_noaccess( ptr
, size
);
494 /* mark a block of memory as a tail block */
495 static inline void mark_block_tail( struct block
*block
, DWORD flags
)
497 char *tail
= (char *)block
+ block_get_size( block
) - block
->tail_size
;
498 if (flags
& HEAP_TAIL_CHECKING_ENABLED
)
500 valgrind_make_writable( tail
, BLOCK_ALIGN
);
501 memset( tail
, BLOCK_FILL_TAIL
, BLOCK_ALIGN
);
503 valgrind_make_noaccess( tail
, BLOCK_ALIGN
);
504 if (flags
& HEAP_ADD_USER_INFO
)
506 if (flags
& HEAP_TAIL_CHECKING_ENABLED
|| RUNNING_ON_VALGRIND
) tail
+= BLOCK_ALIGN
;
507 valgrind_make_writable( tail
, BLOCK_ALIGN
);
508 memset( tail
, 0, BLOCK_ALIGN
);
512 /* initialize contents of a newly created block of memory */
513 static inline void initialize_block( struct block
*block
, SIZE_T old_size
, SIZE_T size
, DWORD flags
)
515 char *data
= (char *)(block
+ 1);
517 if (size
<= old_size
) return;
519 if (flags
& HEAP_ZERO_MEMORY
)
521 valgrind_make_writable( data
+ old_size
, size
- old_size
);
522 memset( data
+ old_size
, 0, size
- old_size
);
524 else if (flags
& HEAP_FREE_CHECKING_ENABLED
)
526 valgrind_make_writable( data
+ old_size
, size
- old_size
);
527 memset( data
+ old_size
, BLOCK_FILL_USED
, size
- old_size
);
531 /* notify that a new block of memory has been allocated for debugging purposes */
532 static inline void valgrind_notify_alloc( void const *ptr
, SIZE_T size
, BOOL init
)
534 #ifdef VALGRIND_MALLOCLIKE_BLOCK
535 VALGRIND_MALLOCLIKE_BLOCK( ptr
, size
, 0, init
);
539 /* notify that a block of memory has been freed for debugging purposes */
540 static inline void valgrind_notify_free( void const *ptr
)
542 #ifdef VALGRIND_FREELIKE_BLOCK
543 VALGRIND_FREELIKE_BLOCK( ptr
, 0 );
547 static inline void valgrind_notify_resize( void const *ptr
, SIZE_T size_old
, SIZE_T size_new
)
549 #ifdef VALGRIND_RESIZEINPLACE_BLOCK
550 /* zero is not a valid size */
551 VALGRIND_RESIZEINPLACE_BLOCK( ptr
, size_old
? size_old
: 1, size_new
? size_new
: 1, 0 );
555 static void valgrind_notify_free_all( SUBHEAP
*subheap
, const struct heap
*heap
)
557 #ifdef VALGRIND_FREELIKE_BLOCK
560 if (!RUNNING_ON_VALGRIND
) return;
561 if (!check_subheap( subheap
, heap
)) return;
563 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
565 if (block_get_flags( block
) & BLOCK_FLAG_FREE
) continue;
566 if (block_get_type( block
) == BLOCK_TYPE_USED
) valgrind_notify_free( block
+ 1 );
571 /* get the memory protection type to use for a given heap */
572 static inline ULONG
get_protection_type( DWORD flags
)
574 return (flags
& HEAP_CREATE_ENABLE_EXECUTE
) ? PAGE_EXECUTE_READWRITE
: PAGE_READWRITE
;
577 static RTL_CRITICAL_SECTION_DEBUG process_heap_cs_debug
=
579 0, 0, NULL
, /* will be set later */
580 { &process_heap_cs_debug
.ProcessLocksList
, &process_heap_cs_debug
.ProcessLocksList
},
581 0, 0, { (DWORD_PTR
)(__FILE__
": main process heap section") }
584 static inline ULONG
heap_get_flags( const struct heap
*heap
, ULONG flags
)
586 if (flags
& (HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
)) flags
|= HEAP_CHECKING_ENABLED
;
587 flags
&= HEAP_GENERATE_EXCEPTIONS
| HEAP_NO_SERIALIZE
| HEAP_ZERO_MEMORY
| HEAP_REALLOC_IN_PLACE_ONLY
| HEAP_CHECKING_ENABLED
| HEAP_USER_FLAGS_MASK
;
588 return heap
->flags
| flags
;
591 static inline void heap_lock( struct heap
*heap
, ULONG flags
)
593 if (flags
& HEAP_NO_SERIALIZE
) return;
594 RtlEnterCriticalSection( &heap
->cs
);
597 static inline void heap_unlock( struct heap
*heap
, ULONG flags
)
599 if (flags
& HEAP_NO_SERIALIZE
) return;
600 RtlLeaveCriticalSection( &heap
->cs
);
603 static void heap_set_status( const struct heap
*heap
, ULONG flags
, NTSTATUS status
)
605 if (status
== STATUS_NO_MEMORY
&& (flags
& HEAP_GENERATE_EXCEPTIONS
)) RtlRaiseStatus( status
);
606 if (status
) RtlSetLastWin32ErrorAndNtStatusFromNtStatus( status
);
609 static SIZE_T
get_free_list_block_size( unsigned int index
)
611 DWORD log
= index
>> FREE_LIST_LINEAR_BITS
;
612 DWORD linear
= index
& FREE_LIST_LINEAR_MASK
;
614 if (log
== 0) return index
* BLOCK_ALIGN
;
616 return (((1 << FREE_LIST_LINEAR_BITS
) + linear
) << (log
- 1)) * BLOCK_ALIGN
;
620 * Given a size, return its index in the block size list for freelists.
622 * With FREE_LIST_LINEAR_BITS=2, the list looks like this
623 * (with respect to size / BLOCK_ALIGN):
625 * 1, 2, 3, 4, 5, 6, 7, 8,
626 * 10, 12, 14, 16, 20, 24, 28, 32,
627 * 40, 48, 56, 64, 80, 96, 112, 128,
628 * 160, 192, 224, 256, 320, 384, 448, 512,
631 static unsigned int get_free_list_index( SIZE_T block_size
)
633 DWORD bit
, log
, linear
;
635 if (block_size
> get_free_list_block_size( FREE_LIST_COUNT
- 1 ))
636 return FREE_LIST_COUNT
- 1;
638 block_size
/= BLOCK_ALIGN
;
639 /* find the highest bit */
640 if (!BitScanReverse( &bit
, block_size
) || bit
< FREE_LIST_LINEAR_BITS
)
642 /* for small values, the index is same as block_size. */
648 /* the highest bit is always set, ignore it and encode the next FREE_LIST_LINEAR_BITS bits
649 * as a linear scale, combined with the shift as a log scale, in the free list index. */
650 log
= bit
- FREE_LIST_LINEAR_BITS
+ 1;
651 linear
= (block_size
>> (bit
- FREE_LIST_LINEAR_BITS
)) & FREE_LIST_LINEAR_MASK
;
654 return (log
<< FREE_LIST_LINEAR_BITS
) + linear
;
657 /* locate a free list entry of the appropriate size */
658 static inline struct entry
*find_free_list( struct heap
*heap
, SIZE_T block_size
, BOOL last
)
660 unsigned int index
= get_free_list_index( block_size
);
661 if (last
&& ++index
== FREE_LIST_COUNT
) index
= 0;
662 return &heap
->free_lists
[index
];
665 static void heap_dump( const struct heap
*heap
)
667 const struct block
*block
;
668 const ARENA_LARGE
*large
;
669 const SUBHEAP
*subheap
;
672 TRACE( "heap: %p\n", heap
);
673 TRACE( " next %p\n", LIST_ENTRY( heap
->entry
.next
, struct heap
, entry
) );
676 for (i
= 0; heap
->bins
&& i
< BLOCK_SIZE_BIN_COUNT
; i
++)
678 const struct bin
*bin
= heap
->bins
+ i
;
679 ULONG alloc
= ReadNoFence( &bin
->count_alloc
), freed
= ReadNoFence( &bin
->count_freed
);
680 if (!alloc
&& !freed
) continue;
681 TRACE( " %3u: size %#4Ix, alloc %ld, freed %ld, enabled %lu\n", i
, BLOCK_BIN_SIZE( i
),
682 alloc
, freed
, ReadNoFence( &bin
->enabled
) );
685 TRACE( " free_lists: %p\n", heap
->free_lists
);
686 for (i
= 0; i
< FREE_LIST_COUNT
; i
++)
687 TRACE( " %p: size %#8Ix, prev %p, next %p\n", heap
->free_lists
+ i
, get_free_list_block_size( i
),
688 LIST_ENTRY( heap
->free_lists
[i
].entry
.prev
, struct entry
, entry
),
689 LIST_ENTRY( heap
->free_lists
[i
].entry
.next
, struct entry
, entry
) );
691 TRACE( " subheaps: %p\n", &heap
->subheap_list
);
692 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
694 SIZE_T free_size
= 0, used_size
= 0, overhead
= 0;
695 const char *base
= subheap_base( subheap
);
697 TRACE( " %p: base %p first %p last %p end %p\n", subheap
, base
, first_block( subheap
),
698 last_block( subheap
), base
+ subheap_size( subheap
) );
700 if (!check_subheap( subheap
, heap
)) return;
702 overhead
+= subheap_overhead( subheap
);
703 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
705 if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
707 TRACE( " %p: (free) type %#10x, size %#8x, flags %#4x, prev %p, next %p\n", block
,
708 block_get_type( block
), block_get_size( block
), block_get_flags( block
),
709 LIST_ENTRY( ((struct entry
*)block
)->entry
.prev
, struct entry
, entry
),
710 LIST_ENTRY( ((struct entry
*)block
)->entry
.next
, struct entry
, entry
) );
712 overhead
+= block_get_overhead( block
);
713 free_size
+= block_get_size( block
) - block_get_overhead( block
);
717 TRACE( " %p: (used) type %#10x, size %#8x, flags %#4x, unused %#4x", block
,
718 block_get_type( block
), block_get_size( block
), block_get_flags( block
),
720 if (!(block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
)) TRACE( "\n" );
721 else TRACE( ", back %p\n", *((struct block
**)block
- 1) );
723 overhead
+= block_get_overhead( block
);
724 used_size
+= block_get_size( block
) - block_get_overhead( block
);
728 TRACE( " total %#Ix, used %#Ix, free %#Ix, overhead %#Ix (%Iu%%)\n", used_size
+ free_size
+ overhead
,
729 used_size
, free_size
, overhead
, (overhead
* 100) / subheap_size( subheap
) );
732 TRACE( " large blocks: %p\n", &heap
->large_list
);
733 LIST_FOR_EACH_ENTRY( large
, &heap
->large_list
, ARENA_LARGE
, entry
)
735 TRACE( " %p: (large) type %#10x, size %#8x, flags %#4x, total_size %#10Ix, alloc_size %#10Ix, prev %p, next %p\n",
736 large
, block_get_type( &large
->block
), block_get_size( &large
->block
), block_get_flags( &large
->block
), large
->block_size
,
737 large
->data_size
, LIST_ENTRY( large
->entry
.prev
, ARENA_LARGE
, entry
), LIST_ENTRY( large
->entry
.next
, ARENA_LARGE
, entry
) );
740 if (heap
->pending_free
)
742 TRACE( " pending blocks: %p\n", heap
->pending_free
);
743 for (i
= 0; i
< MAX_FREE_PENDING
; ++i
)
745 if (!(block
= heap
->pending_free
[i
])) break;
747 TRACE( " %c%p: (pend) type %#10x, size %#8x, flags %#4x, unused %#4x", i
== heap
->pending_pos
? '*' : ' ',
748 block
, block_get_type( block
), block_get_size( block
), block_get_flags( block
), block
->tail_size
);
749 if (!(block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
)) TRACE( "\n" );
750 else TRACE( ", back %p\n", *((struct block
**)block
- 1) );
755 static const char *debugstr_heap_entry( struct rtl_heap_entry
*entry
)
757 const char *str
= wine_dbg_sprintf( "data %p, size %#Ix, overhead %#x, region %#x, flags %#x", entry
->lpData
,
758 entry
->cbData
, entry
->cbOverhead
, entry
->iRegionIndex
, entry
->wFlags
);
759 if (!(entry
->wFlags
& RTL_HEAP_ENTRY_REGION
)) return str
;
760 return wine_dbg_sprintf( "%s, commit %#lx, uncommit %#lx, first %p, last %p", str
, entry
->Region
.dwCommittedSize
,
761 entry
->Region
.dwUnCommittedSize
, entry
->Region
.lpFirstBlock
, entry
->Region
.lpLastBlock
);
764 static struct heap
*unsafe_heap_from_handle( HANDLE handle
, ULONG flags
, ULONG
*heap_flags
)
766 struct heap
*heap
= handle
;
769 if (!heap
|| (heap
->magic
!= HEAP_MAGIC
))
771 ERR( "Invalid handle %p!\n", handle
);
774 if (heap
->flags
& HEAP_VALIDATE_ALL
)
776 heap_lock( heap
, 0 );
777 valid
= heap_validate( heap
);
778 heap_unlock( heap
, 0 );
780 if (!valid
&& TRACE_ON(heap
))
787 *heap_flags
= heap_get_flags( heap
, flags
);
788 return valid
? heap
: NULL
;
792 static SUBHEAP
*find_subheap( const struct heap
*heap
, const struct block
*block
, BOOL heap_walk
)
796 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
798 SIZE_T blocks_size
= (char *)last_block( subheap
) - (char *)first_block( subheap
);
799 if (!check_subheap( subheap
, heap
)) return NULL
;
800 if (contains( first_block( subheap
), blocks_size
, block
, sizeof(*block
) )) return subheap
;
801 /* outside of blocks region, possible corruption or heap_walk */
802 if (contains( subheap_base( subheap
), subheap_size( subheap
), block
, 1 )) return heap_walk
? subheap
: NULL
;
809 static inline BOOL
subheap_commit( const struct heap
*heap
, SUBHEAP
*subheap
, const struct block
*block
, SIZE_T block_size
)
811 const char *end
= (char *)subheap_base( subheap
) + subheap_size( subheap
), *commit_end
;
815 commit_end
= (char *)block
+ block_size
+ sizeof(struct entry
);
816 commit_end
= ROUND_ADDR( (char *)commit_end
+ REGION_ALIGN
- 1, REGION_ALIGN
- 1 );
818 if (commit_end
> end
) commit_end
= end
;
819 if (commit_end
<= (char *)subheap_commit_end( subheap
)) return TRUE
;
821 addr
= (void *)subheap_commit_end( subheap
);
822 size
= commit_end
- (char *)addr
;
824 if (NtAllocateVirtualMemory( NtCurrentProcess(), &addr
, 0, &size
, MEM_COMMIT
,
825 get_protection_type( heap
->flags
) ))
827 WARN( "Could not commit %#Ix bytes at %p for heap %p\n", size
, addr
, heap
);
831 subheap
->data_size
= (char *)commit_end
- (char *)(subheap
+ 1);
835 static inline BOOL
subheap_decommit( const struct heap
*heap
, SUBHEAP
*subheap
, const void *commit_end
)
837 char *base
= subheap_base( subheap
);
841 commit_end
= ROUND_ADDR( (char *)commit_end
+ REGION_ALIGN
- 1, REGION_ALIGN
- 1 );
842 if (subheap
== &heap
->subheap
) commit_end
= max( (char *)commit_end
, (char *)base
+ heap
->min_size
);
843 if (commit_end
>= subheap_commit_end( subheap
)) return TRUE
;
845 size
= (char *)subheap_commit_end( subheap
) - (char *)commit_end
;
846 addr
= (void *)commit_end
;
848 if (NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_DECOMMIT
))
850 WARN( "Could not decommit %#Ix bytes at %p for heap %p\n", size
, addr
, heap
);
854 subheap
->data_size
= (char *)commit_end
- (char *)(subheap
+ 1);
858 static void block_init_free( struct block
*block
, ULONG flags
, SUBHEAP
*subheap
, SIZE_T block_size
)
860 const char *end
= (char *)block
+ block_size
, *commit_end
= subheap_commit_end( subheap
);
861 struct entry
*entry
= (struct entry
*)block
;
863 valgrind_make_writable( block
, sizeof(*entry
) );
864 block_set_type( block
, BLOCK_TYPE_FREE
);
865 block_set_base( block
, subheap_base( subheap
) );
866 block_set_flags( block
, ~0, BLOCK_FLAG_FREE
);
867 block_set_size( block
, block_size
);
869 /* If debugging, erase the freed block content */
871 if (end
> commit_end
) end
= commit_end
;
872 if (end
> (char *)(entry
+ 1)) mark_block_free( entry
+ 1, end
- (char *)(entry
+ 1), flags
);
875 static void insert_free_block( struct heap
*heap
, ULONG flags
, SUBHEAP
*subheap
, struct block
*block
)
877 struct entry
*entry
= (struct entry
*)block
, *list
;
880 if ((next
= next_block( subheap
, block
)))
882 /* set the next block PREV_FREE flag and back pointer */
883 block_set_flags( next
, 0, BLOCK_FLAG_PREV_FREE
);
884 valgrind_make_writable( (struct block
**)next
- 1, sizeof(struct block
*) );
885 *((struct block
**)next
- 1) = block
;
888 list
= find_free_list( heap
, block_get_size( block
), !next
);
889 if (!next
) list_add_before( &list
->entry
, &entry
->entry
);
890 else list_add_after( &list
->entry
, &entry
->entry
);
894 static struct block
*heap_delay_free( struct heap
*heap
, ULONG flags
, struct block
*block
)
898 if (!heap
->pending_free
) return block
;
900 block_set_type( block
, BLOCK_TYPE_DEAD
);
901 mark_block_free( block
+ 1, block_get_size( block
) - sizeof(*block
), flags
);
903 heap_lock( heap
, flags
);
905 tmp
= heap
->pending_free
[heap
->pending_pos
];
906 heap
->pending_free
[heap
->pending_pos
] = block
;
907 heap
->pending_pos
= (heap
->pending_pos
+ 1) % MAX_FREE_PENDING
;
909 heap_unlock( heap
, flags
);
915 static NTSTATUS
heap_free_block( struct heap
*heap
, ULONG flags
, struct block
*block
)
917 SUBHEAP
*subheap
= block_get_subheap( heap
, block
);
918 SIZE_T block_size
= block_get_size( block
);
922 if ((next
= next_block( subheap
, block
)) && (block_get_flags( next
) & BLOCK_FLAG_FREE
))
924 /* merge with next block if it is free */
925 entry
= (struct entry
*)next
;
926 block_size
+= block_get_size( &entry
->block
);
927 list_remove( &entry
->entry
);
928 next
= next_block( subheap
, next
);
931 if (block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
)
933 /* merge with previous block if it is free */
934 entry
= *((struct entry
**)block
- 1);
935 block_size
+= block_get_size( &entry
->block
);
936 list_remove( &entry
->entry
);
937 block
= &entry
->block
;
940 if (block
== first_block( subheap
) && !next
&& subheap
!= &heap
->subheap
)
942 /* free the subheap if it's empty and not the main one */
943 void *addr
= subheap_base( subheap
);
946 list_remove( &subheap
->entry
);
947 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
948 return STATUS_SUCCESS
;
951 block_init_free( block
, flags
, subheap
, block_size
);
952 insert_free_block( heap
, flags
, subheap
, block
);
954 /* keep room for a full committed block as hysteresis */
955 if (!next
) subheap_decommit( heap
, subheap
, (char *)((struct entry
*)block
+ 1) + REGION_ALIGN
);
957 return STATUS_SUCCESS
;
961 static struct block
*split_block( struct heap
*heap
, ULONG flags
, struct block
*block
,
962 SIZE_T old_block_size
, SIZE_T block_size
)
964 SUBHEAP
*subheap
= block_get_subheap( heap
, block
);
966 if (old_block_size
>= block_size
+ HEAP_MIN_BLOCK_SIZE
)
968 block_set_size( block
, block_size
);
969 return next_block( subheap
, block
);
972 block_set_size( block
, old_block_size
);
977 static void *allocate_region( struct heap
*heap
, ULONG flags
, SIZE_T
*region_size
, SIZE_T
*commit_size
)
982 if (heap
&& !(flags
& HEAP_GROWABLE
))
984 WARN( "Heap %p isn't growable, cannot allocate %#Ix bytes\n", heap
, *region_size
);
988 /* allocate the memory block */
989 if ((status
= NtAllocateVirtualMemory( NtCurrentProcess(), &addr
, 0, region_size
, MEM_RESERVE
,
990 get_protection_type( flags
) )))
992 WARN( "Could not allocate %#Ix bytes, status %#lx\n", *region_size
, status
);
995 if ((status
= NtAllocateVirtualMemory( NtCurrentProcess(), &addr
, 0, commit_size
, MEM_COMMIT
,
996 get_protection_type( flags
) )))
998 WARN( "Could not commit %#Ix bytes, status %#lx\n", *commit_size
, status
);
1006 static NTSTATUS
heap_allocate_large( struct heap
*heap
, ULONG flags
, SIZE_T block_size
,
1007 SIZE_T size
, void **ret
)
1010 SIZE_T total_size
= ROUND_SIZE( sizeof(*arena
) + size
, REGION_ALIGN
- 1 );
1011 struct block
*block
;
1013 if (total_size
< size
) return STATUS_NO_MEMORY
; /* overflow */
1014 if (!(arena
= allocate_region( heap
, flags
, &total_size
, &total_size
))) return STATUS_NO_MEMORY
;
1016 block
= &arena
->block
;
1017 arena
->data_size
= size
;
1018 arena
->block_size
= (char *)arena
+ total_size
- (char *)block
;
1020 block_set_type( block
, BLOCK_TYPE_LARGE
);
1021 block_set_base( block
, arena
);
1022 block_set_flags( block
, ~0, BLOCK_FLAG_LARGE
| BLOCK_USER_FLAGS( flags
) );
1023 block_set_size( block
, 0 );
1025 heap_lock( heap
, flags
);
1026 list_add_tail( &heap
->large_list
, &arena
->entry
);
1027 heap_unlock( heap
, flags
);
1029 valgrind_make_noaccess( (char *)block
+ sizeof(*block
) + arena
->data_size
,
1030 arena
->block_size
- sizeof(*block
) - arena
->data_size
);
1033 return STATUS_SUCCESS
;
1037 static NTSTATUS
heap_free_large( struct heap
*heap
, ULONG flags
, struct block
*block
)
1039 ARENA_LARGE
*arena
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
1040 LPVOID address
= arena
;
1043 heap_lock( heap
, flags
);
1044 list_remove( &arena
->entry
);
1045 heap_unlock( heap
, flags
);
1047 return NtFreeVirtualMemory( NtCurrentProcess(), &address
, &size
, MEM_RELEASE
);
1051 /***********************************************************************
1054 static BOOL
find_large_block( const struct heap
*heap
, const struct block
*block
)
1058 LIST_FOR_EACH_ENTRY( arena
, &heap
->large_list
, ARENA_LARGE
, entry
)
1059 if (block
== &arena
->block
) return TRUE
;
1064 static BOOL
validate_large_block( const struct heap
*heap
, const struct block
*block
)
1066 const ARENA_LARGE
*arena
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
1067 const char *err
= NULL
;
1069 if (ROUND_ADDR( block
, REGION_ALIGN
- 1 ) != arena
)
1070 err
= "invalid block BLOCK_ALIGN";
1071 else if (block_get_size( block
))
1072 err
= "invalid block size";
1073 else if (!(block_get_flags( block
) & BLOCK_FLAG_LARGE
))
1074 err
= "invalid block flags";
1075 else if (block_get_type( block
) != BLOCK_TYPE_LARGE
)
1076 err
= "invalid block type";
1077 else if (!contains( block
, arena
->block_size
, block
+ 1, arena
->data_size
))
1078 err
= "invalid block size";
1082 ERR( "heap %p, block %p: %s\n", heap
, block
, err
);
1083 if (TRACE_ON(heap
)) heap_dump( heap
);
1090 static SUBHEAP
*create_subheap( struct heap
*heap
, DWORD flags
, SIZE_T total_size
, SIZE_T commit_size
)
1095 commit_size
= ROUND_SIZE( max( commit_size
, REGION_ALIGN
), REGION_ALIGN
- 1 );
1096 total_size
= min( max( commit_size
, total_size
), 0xffff0000 ); /* don't allow a heap larger than 4GB */
1098 if (!(subheap
= allocate_region( heap
, flags
, &total_size
, &commit_size
))) return NULL
;
1100 subheap
->user_value
= heap
;
1101 subheap_set_bounds( subheap
, (char *)subheap
+ commit_size
, (char *)subheap
+ total_size
);
1102 block_size
= (SIZE_T
)ROUND_ADDR( subheap_size( subheap
) - subheap_overhead( subheap
), BLOCK_ALIGN
- 1 );
1103 block_init_free( first_block( subheap
), flags
, subheap
, block_size
);
1105 list_add_head( &heap
->subheap_list
, &subheap
->entry
);
1111 static struct block
*find_free_block( struct heap
*heap
, ULONG flags
, SIZE_T block_size
)
1113 struct list
*ptr
= &find_free_list( heap
, block_size
, FALSE
)->entry
;
1114 struct entry
*entry
;
1115 struct block
*block
;
1119 /* Find a suitable free list, and in it find a block large enough */
1121 while ((ptr
= list_next( &heap
->free_lists
[0].entry
, ptr
)))
1123 entry
= LIST_ENTRY( ptr
, struct entry
, entry
);
1124 block
= &entry
->block
;
1125 if (block_get_flags( block
) == BLOCK_FLAG_FREE_LINK
) continue;
1126 if (block_get_size( block
) >= block_size
)
1128 if (!subheap_commit( heap
, block_get_subheap( heap
, block
), block
, block_size
)) return NULL
;
1129 list_remove( &entry
->entry
);
1134 /* make sure we can fit the block and a free entry at the end */
1135 total_size
= sizeof(SUBHEAP
) + block_size
+ sizeof(struct entry
);
1136 if (total_size
< block_size
) return NULL
; /* overflow */
1138 if ((subheap
= create_subheap( heap
, flags
, max( heap
->grow_size
, total_size
), total_size
)))
1140 heap
->grow_size
= min( heap
->grow_size
* 2, HEAP_MAX_GROW_SIZE
);
1142 else while (!subheap
) /* shrink the grow size again if we are running out of space */
1144 if (heap
->grow_size
<= total_size
|| heap
->grow_size
<= 4 * 1024 * 1024) return NULL
;
1145 heap
->grow_size
/= 2;
1146 subheap
= create_subheap( heap
, flags
, max( heap
->grow_size
, total_size
), total_size
);
1149 TRACE( "created new sub-heap %p of %#Ix bytes for heap %p\n", subheap
, subheap_size( subheap
), heap
);
1151 return first_block( subheap
);
1155 static BOOL
is_valid_free_block( const struct heap
*heap
, const struct block
*block
)
1157 const SUBHEAP
*subheap
;
1160 if ((subheap
= find_subheap( heap
, block
, FALSE
))) return TRUE
;
1161 for (i
= 0; i
< FREE_LIST_COUNT
; i
++) if (block
== &heap
->free_lists
[i
].block
) return TRUE
;
1165 static BOOL
validate_free_block( const struct heap
*heap
, const SUBHEAP
*subheap
, const struct block
*block
)
1167 const char *err
= NULL
, *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
);
1168 const struct entry
*entry
= (struct entry
*)block
;
1169 const struct block
*prev
, *next
;
1170 DWORD flags
= heap
->flags
;
1172 if ((ULONG_PTR
)(block
+ 1) % BLOCK_ALIGN
)
1173 err
= "invalid block BLOCK_ALIGN";
1174 else if (block_get_type( block
) != BLOCK_TYPE_FREE
)
1175 err
= "invalid block header";
1176 else if (!(block_get_flags( block
) & BLOCK_FLAG_FREE
) || (block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
))
1177 err
= "invalid block flags";
1178 else if (!contains( base
, subheap_size( subheap
), block
, block_get_size( block
) ))
1179 err
= "invalid block size";
1180 else if (!is_valid_free_block( heap
, (next
= &LIST_ENTRY( entry
->entry
.next
, struct entry
, entry
)->block
) ))
1181 err
= "invalid next free block pointer";
1182 else if (!(block_get_flags( next
) & BLOCK_FLAG_FREE
) || block_get_type( next
) != BLOCK_TYPE_FREE
)
1183 err
= "invalid next free block header";
1184 else if (!is_valid_free_block( heap
, (prev
= &LIST_ENTRY( entry
->entry
.prev
, struct entry
, entry
)->block
) ))
1185 err
= "invalid previous free block pointer";
1186 else if (!(block_get_flags( prev
) & BLOCK_FLAG_FREE
) || block_get_type( prev
) != BLOCK_TYPE_FREE
)
1187 err
= "invalid previous free block header";
1188 else if ((next
= next_block( subheap
, block
)))
1190 if (!(block_get_flags( next
) & BLOCK_FLAG_PREV_FREE
))
1191 err
= "invalid next block flags";
1192 if (*((struct block
**)next
- 1) != block
)
1193 err
= "invalid next block back pointer";
1196 if (!err
&& (flags
& HEAP_FREE_CHECKING_ENABLED
))
1198 const char *ptr
= (char *)(entry
+ 1), *end
= (char *)block
+ block_get_size( block
);
1199 if (next
) end
-= sizeof(struct block
*);
1200 if (end
> commit_end
) end
= commit_end
;
1201 while (!err
&& ptr
< end
)
1203 if (*(DWORD
*)ptr
!= BLOCK_FILL_FREE
) err
= "free block overwritten";
1204 ptr
+= sizeof(DWORD
);
1210 ERR( "heap %p, block %p: %s\n", heap
, block
, err
);
1211 if (TRACE_ON(heap
)) heap_dump( heap
);
1218 static BOOL
validate_used_block( const struct heap
*heap
, const SUBHEAP
*subheap
, const struct block
*block
,
1219 unsigned int expected_block_type
)
1221 const char *err
= NULL
, *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
);
1222 DWORD flags
= heap
->flags
;
1223 const struct block
*next
;
1226 if ((ULONG_PTR
)(block
+ 1) % BLOCK_ALIGN
)
1227 err
= "invalid block BLOCK_ALIGN";
1228 else if (block_get_type( block
) != BLOCK_TYPE_USED
&& block_get_type( block
) != BLOCK_TYPE_DEAD
)
1229 err
= "invalid block header";
1230 else if (expected_block_type
&& block_get_type( block
) != expected_block_type
)
1231 err
= "invalid block type";
1232 else if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
1233 err
= "invalid block flags";
1234 else if (!contains( base
, commit_end
- base
, block
, block_get_size( block
) ))
1235 err
= "invalid block size";
1236 else if (block
->tail_size
> block_get_size( block
) - sizeof(*block
))
1237 err
= "invalid block unused size";
1238 else if ((next
= next_block( subheap
, block
)) && (block_get_flags( next
) & BLOCK_FLAG_PREV_FREE
) &&
1239 /* LFH blocks do not use BLOCK_FLAG_PREV_FREE or back pointer */
1240 !(block_get_flags( block
) & BLOCK_FLAG_LFH
))
1241 err
= "invalid next block flags";
1242 else if (block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
)
1244 const struct block
*prev
= *((struct block
**)block
- 1);
1245 if (!is_valid_free_block( heap
, prev
))
1246 err
= "invalid previous block pointer";
1247 else if (!(block_get_flags( prev
) & BLOCK_FLAG_FREE
) || block_get_type( prev
) != BLOCK_TYPE_FREE
)
1248 err
= "invalid previous block flags";
1249 if ((char *)prev
+ block_get_size( prev
) != (char *)block
)
1250 err
= "invalid previous block size";
1253 if (!err
&& block_get_type( block
) == BLOCK_TYPE_DEAD
)
1255 const char *ptr
= (char *)(block
+ 1), *end
= (char *)block
+ block_get_size( block
);
1256 while (!err
&& ptr
< end
)
1258 if (*(DWORD
*)ptr
!= BLOCK_FILL_FREE
) err
= "free block overwritten";
1259 ptr
+= sizeof(DWORD
);
1262 else if (!err
&& (flags
& HEAP_TAIL_CHECKING_ENABLED
))
1264 const unsigned char *tail
= (unsigned char *)block
+ block_get_size( block
) - block
->tail_size
;
1265 for (i
= 0; !err
&& i
< BLOCK_ALIGN
; i
++) if (tail
[i
] != BLOCK_FILL_TAIL
) err
= "invalid block tail";
1270 ERR( "heap %p, block %p: %s\n", heap
, block
, err
);
1271 if (TRACE_ON(heap
)) heap_dump( heap
);
1278 static BOOL
heap_validate_ptr( const struct heap
*heap
, const void *ptr
)
1280 const struct block
*block
= (struct block
*)ptr
- 1;
1281 const SUBHEAP
*subheap
;
1283 if (!(subheap
= find_subheap( heap
, block
, FALSE
)))
1285 if (!find_large_block( heap
, block
))
1287 WARN("heap %p, ptr %p: block region not found\n", heap
, ptr
);
1291 return validate_large_block( heap
, block
);
1294 return validate_used_block( heap
, subheap
, block
, BLOCK_TYPE_USED
);
1297 static BOOL
heap_validate( const struct heap
*heap
)
1299 const ARENA_LARGE
*large_arena
;
1300 const struct block
*block
;
1301 const SUBHEAP
*subheap
;
1303 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
1305 if (!check_subheap( subheap
, heap
))
1307 ERR( "heap %p, subheap %p corrupted sizes or user_value\n", heap
, subheap
);
1308 if (TRACE_ON(heap
)) heap_dump( heap
);
1312 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
1314 if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
1316 if (!validate_free_block( heap
, subheap
, block
)) return FALSE
;
1320 if (!validate_used_block( heap
, subheap
, block
, 0 )) return FALSE
;
1325 if (heap
->pending_free
)
1329 for (i
= 0; i
< MAX_FREE_PENDING
; i
++)
1331 if (!(block
= heap
->pending_free
[i
])) break;
1333 subheap
= find_subheap( heap
, block
, FALSE
);
1336 ERR( "heap %p: cannot find valid subheap for delayed freed block %p\n", heap
, block
);
1337 if (TRACE_ON(heap
)) heap_dump( heap
);
1341 if (!validate_used_block( heap
, subheap
, block
, BLOCK_TYPE_DEAD
)) return FALSE
;
1344 for (; i
< MAX_FREE_PENDING
; i
++)
1346 if ((block
= heap
->pending_free
[i
]))
1348 ERR( "heap %p: unexpected delayed freed block %p at slot %u\n", heap
, block
, i
);
1349 if (TRACE_ON(heap
)) heap_dump( heap
);
1355 LIST_FOR_EACH_ENTRY( large_arena
, &heap
->large_list
, ARENA_LARGE
, entry
)
1356 if (!validate_large_block( heap
, &large_arena
->block
)) return FALSE
;
1361 static inline struct block
*unsafe_block_from_ptr( struct heap
*heap
, ULONG flags
, const void *ptr
)
1363 struct block
*block
= (struct block
*)ptr
- 1;
1364 const char *err
= NULL
;
1367 if (flags
& HEAP_VALIDATE
)
1369 heap_lock( heap
, flags
);
1370 if (!heap_validate_ptr( heap
, ptr
)) block
= NULL
;
1371 heap_unlock( heap
, flags
);
1375 if ((ULONG_PTR
)ptr
% BLOCK_ALIGN
)
1376 err
= "invalid ptr alignment";
1377 else if (block_get_type( block
) == BLOCK_TYPE_DEAD
)
1378 err
= "delayed freed block";
1379 else if (block_get_type( block
) == BLOCK_TYPE_FREE
)
1380 err
= "already freed block";
1381 else if (block_get_flags( block
) & BLOCK_FLAG_LFH
)
1383 /* LFH block base_offset points to the group, not the subheap */
1384 if (block_get_type( block
) != BLOCK_TYPE_USED
)
1385 err
= "invalid block type";
1387 else if ((subheap
= block_get_subheap( heap
, block
)) >= (SUBHEAP
*)block
)
1388 err
= "invalid base offset";
1389 else if (block_get_type( block
) == BLOCK_TYPE_USED
)
1391 const char *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
);
1392 if (subheap
->user_value
!= heap
) err
= "mismatching heap";
1393 else if (!contains( base
, commit_end
- base
, block
, block_get_size( block
) )) err
= "invalid block size";
1395 else if (block_get_type( block
) == BLOCK_TYPE_LARGE
)
1397 ARENA_LARGE
*large
= subheap_base( subheap
);
1398 if (block
!= &large
->block
) err
= "invalid large block";
1402 err
= "invalid block type";
1405 if (err
) WARN( "heap %p, block %p: %s\n", heap
, block
, err
);
1406 return err
? NULL
: block
;
1409 static DWORD
heap_flags_from_global_flag( DWORD flag
)
1413 if (flag
& FLG_HEAP_ENABLE_TAIL_CHECK
)
1414 ret
|= HEAP_TAIL_CHECKING_ENABLED
;
1415 if (flag
& FLG_HEAP_ENABLE_FREE_CHECK
)
1416 ret
|= HEAP_FREE_CHECKING_ENABLED
;
1417 if (flag
& FLG_HEAP_VALIDATE_PARAMETERS
)
1418 ret
|= HEAP_VALIDATE_PARAMS
| HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
;
1419 if (flag
& FLG_HEAP_VALIDATE_ALL
)
1420 ret
|= HEAP_VALIDATE_ALL
| HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
;
1421 if (flag
& FLG_HEAP_DISABLE_COALESCING
)
1422 ret
|= HEAP_DISABLE_COALESCE_ON_FREE
;
1423 if (flag
& FLG_HEAP_PAGE_ALLOCS
)
1424 ret
|= HEAP_PAGE_ALLOCS
;
1429 /***********************************************************************
1430 * heap_set_debug_flags
1432 static void heap_set_debug_flags( HANDLE handle
)
1434 ULONG global_flags
= RtlGetNtGlobalFlags();
1435 DWORD dummy
, flags
, force_flags
;
1438 if (TRACE_ON(heap
)) global_flags
|= FLG_HEAP_VALIDATE_ALL
;
1439 if (WARN_ON(heap
)) global_flags
|= FLG_HEAP_VALIDATE_PARAMETERS
;
1441 heap
= unsafe_heap_from_handle( handle
, 0, &dummy
);
1442 flags
= heap_flags_from_global_flag( global_flags
);
1443 force_flags
= (heap
->flags
| flags
) & ~(HEAP_SHARED
|HEAP_DISABLE_COALESCE_ON_FREE
);
1445 if (global_flags
& FLG_HEAP_ENABLE_TAGGING
) flags
|= HEAP_SHARED
;
1446 if (!(global_flags
& FLG_HEAP_PAGE_ALLOCS
)) force_flags
&= ~(HEAP_GROWABLE
|HEAP_PRIVATE
);
1448 if (RUNNING_ON_VALGRIND
) flags
= 0; /* no sense in validating since Valgrind catches accesses */
1450 heap
->flags
|= flags
;
1451 heap
->force_flags
|= force_flags
;
1453 if (flags
& (HEAP_FREE_CHECKING_ENABLED
| HEAP_TAIL_CHECKING_ENABLED
)) /* fix existing blocks */
1455 struct block
*block
;
1458 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
1460 const char *commit_end
= subheap_commit_end( subheap
);
1462 if (!check_subheap( subheap
, heap
)) break;
1464 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
1466 if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
1468 char *data
= (char *)block
+ block_get_overhead( block
), *end
= (char *)block
+ block_get_size( block
);
1469 if (next_block( subheap
, block
)) end
-= sizeof(struct block
*);
1470 if (end
> commit_end
) mark_block_free( data
, commit_end
- data
, flags
);
1471 else mark_block_free( data
, end
- data
, flags
);
1475 if (block_get_type( block
) == BLOCK_TYPE_DEAD
) mark_block_free( block
+ 1, block_get_size( block
) - sizeof(*block
), flags
);
1476 else mark_block_tail( block
, flags
);
1482 if ((heap
->flags
& HEAP_GROWABLE
) && !heap
->pending_free
&&
1483 ((flags
& HEAP_FREE_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
))
1485 heap
->pending_free
= RtlAllocateHeap( handle
, HEAP_ZERO_MEMORY
,
1486 MAX_FREE_PENDING
* sizeof(*heap
->pending_free
) );
1487 heap
->pending_pos
= 0;
1492 /***********************************************************************
1493 * RtlCreateHeap (NTDLL.@)
1495 * Create a new Heap.
1498 * flags [I] HEAP_ flags from "winnt.h"
1499 * addr [I] Desired base address
1500 * totalSize [I] Total size of the heap, or 0 for a growable heap
1501 * commitSize [I] Amount of heap space to commit
1502 * unknown [I] Not yet understood
1503 * definition [I] Heap definition
1506 * Success: A HANDLE to the newly created heap.
1507 * Failure: a NULL HANDLE.
1509 HANDLE WINAPI
RtlCreateHeap( ULONG flags
, void *addr
, SIZE_T total_size
, SIZE_T commit_size
,
1510 void *unknown
, RTL_HEAP_DEFINITION
*definition
)
1512 struct entry
*entry
;
1518 TRACE( "flags %#lx, addr %p, total_size %#Ix, commit_size %#Ix, unknown %p, definition %p\n",
1519 flags
, addr
, total_size
, commit_size
, unknown
, definition
);
1521 flags
&= ~(HEAP_TAIL_CHECKING_ENABLED
|HEAP_FREE_CHECKING_ENABLED
);
1522 if (process_heap
) flags
|= HEAP_PRIVATE
;
1523 if (!process_heap
|| !total_size
|| (flags
& HEAP_SHARED
)) flags
|= HEAP_GROWABLE
;
1524 if (!total_size
) total_size
= commit_size
+ HEAP_INITIAL_SIZE
;
1528 if (!commit_size
) commit_size
= REGION_ALIGN
;
1529 total_size
= min( max( total_size
, commit_size
), 0xffff0000 ); /* don't allow a heap larger than 4GB */
1530 commit_size
= min( total_size
, ROUND_SIZE( commit_size
, REGION_ALIGN
- 1 ) );
1531 if (!(heap
= allocate_region( NULL
, flags
, &total_size
, &commit_size
))) return 0;
1534 heap
->ffeeffee
= 0xffeeffee;
1535 heap
->auto_flags
= (flags
& HEAP_GROWABLE
);
1536 heap
->flags
= (flags
& ~HEAP_SHARED
);
1537 heap
->compat_info
= HEAP_STD
;
1538 heap
->magic
= HEAP_MAGIC
;
1539 heap
->grow_size
= HEAP_INITIAL_GROW_SIZE
;
1540 heap
->min_size
= commit_size
;
1541 list_init( &heap
->subheap_list
);
1542 list_init( &heap
->large_list
);
1544 list_init( &heap
->free_lists
[0].entry
);
1545 for (i
= 0, entry
= heap
->free_lists
; i
< FREE_LIST_COUNT
; i
++, entry
++)
1547 block_set_flags( &entry
->block
, ~0, BLOCK_FLAG_FREE_LINK
);
1548 block_set_size( &entry
->block
, 0 );
1549 block_set_type( &entry
->block
, BLOCK_TYPE_FREE
);
1550 block_set_base( &entry
->block
, heap
);
1551 if (i
) list_add_after( &entry
[-1].entry
, &entry
->entry
);
1554 if (!process_heap
) /* do it by hand to avoid memory allocations */
1556 heap
->cs
.DebugInfo
= &process_heap_cs_debug
;
1557 heap
->cs
.LockCount
= -1;
1558 heap
->cs
.RecursionCount
= 0;
1559 heap
->cs
.OwningThread
= 0;
1560 heap
->cs
.LockSemaphore
= 0;
1561 heap
->cs
.SpinCount
= 0;
1562 process_heap_cs_debug
.CriticalSection
= &heap
->cs
;
1566 RtlInitializeCriticalSection( &heap
->cs
);
1567 heap
->cs
.DebugInfo
->Spare
[0] = (DWORD_PTR
)(__FILE__
": heap.cs");
1570 subheap
= &heap
->subheap
;
1571 subheap
->user_value
= heap
;
1572 subheap_set_bounds( subheap
, (char *)heap
+ commit_size
, (char *)heap
+ total_size
);
1573 block_size
= (SIZE_T
)ROUND_ADDR( subheap_size( subheap
) - subheap_overhead( subheap
), BLOCK_ALIGN
- 1 );
1574 block_init_free( first_block( subheap
), flags
, subheap
, block_size
);
1576 insert_free_block( heap
, flags
, subheap
, first_block( subheap
) );
1577 list_add_head( &heap
->subheap_list
, &subheap
->entry
);
1579 heap_set_debug_flags( heap
);
1581 if (heap
->flags
& HEAP_GROWABLE
)
1583 SIZE_T size
= (sizeof(struct bin
) + sizeof(struct group
*) * ARRAY_SIZE(affinity_mapping
)) * BLOCK_SIZE_BIN_COUNT
;
1584 NtAllocateVirtualMemory( NtCurrentProcess(), (void *)&heap
->bins
,
1585 0, &size
, MEM_COMMIT
, PAGE_READWRITE
);
1587 for (i
= 0; heap
->bins
&& i
< BLOCK_SIZE_BIN_COUNT
; ++i
)
1589 RtlInitializeSListHead( &heap
->bins
[i
].groups
);
1590 /* offset affinity_group_base to interleave the bin affinity group pointers */
1591 heap
->bins
[i
].affinity_group_base
= (struct group
**)(heap
->bins
+ BLOCK_SIZE_BIN_COUNT
) + i
;
1595 /* link it into the per-process heap list */
1598 RtlEnterCriticalSection( &process_heap
->cs
);
1599 list_add_head( &process_heap
->entry
, &heap
->entry
);
1600 RtlLeaveCriticalSection( &process_heap
->cs
);
1604 process_heap
= heap
; /* assume the first heap we create is the process main heap */
1605 list_init( &process_heap
->entry
);
1612 /***********************************************************************
1613 * RtlDestroyHeap (NTDLL.@)
1615 * Destroy a Heap created with RtlCreateHeap().
1618 * heap [I] Heap to destroy.
1621 * Success: A NULL HANDLE, if heap is NULL or it was destroyed
1622 * Failure: The Heap handle, if heap is the process heap.
1624 HANDLE WINAPI
RtlDestroyHeap( HANDLE handle
)
1626 SUBHEAP
*subheap
, *next
;
1627 ARENA_LARGE
*arena
, *arena_next
;
1628 struct block
**pending
, **tmp
;
1634 TRACE( "handle %p\n", handle
);
1636 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &heap_flags
)) && handle
&&
1637 (((struct heap
*)handle
)->flags
& HEAP_VALIDATE_PARAMS
) &&
1638 NtCurrentTeb()->Peb
->BeingDebugged
)
1640 DbgPrint( "Attempt to destroy an invalid heap\n" );
1643 if (!heap
) return handle
;
1645 if ((pending
= heap
->pending_free
))
1647 heap
->pending_free
= NULL
;
1648 for (tmp
= pending
; *tmp
&& tmp
!= pending
+ MAX_FREE_PENDING
; ++tmp
)
1650 if (!heap_free_block_lfh( heap
, heap
->flags
, *tmp
)) continue;
1651 heap_free_block( heap
, heap
->flags
, *tmp
);
1653 RtlFreeHeap( handle
, 0, pending
);
1656 if (heap
== process_heap
) return handle
; /* cannot delete the main process heap */
1658 /* remove it from the per-process list */
1659 RtlEnterCriticalSection( &process_heap
->cs
);
1660 list_remove( &heap
->entry
);
1661 RtlLeaveCriticalSection( &process_heap
->cs
);
1663 heap
->cs
.DebugInfo
->Spare
[0] = 0;
1664 RtlDeleteCriticalSection( &heap
->cs
);
1666 LIST_FOR_EACH_ENTRY_SAFE( arena
, arena_next
, &heap
->large_list
, ARENA_LARGE
, entry
)
1668 list_remove( &arena
->entry
);
1671 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1673 LIST_FOR_EACH_ENTRY_SAFE( subheap
, next
, &heap
->subheap_list
, SUBHEAP
, entry
)
1675 if (subheap
== &heap
->subheap
) continue; /* do this one last */
1676 valgrind_notify_free_all( subheap
, heap
);
1677 list_remove( &subheap
->entry
);
1679 addr
= ROUND_ADDR( subheap
, REGION_ALIGN
- 1 );
1680 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1682 valgrind_notify_free_all( &heap
->subheap
, heap
);
1683 if ((addr
= heap
->bins
))
1686 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1690 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1694 static SIZE_T
heap_get_block_size( const struct heap
*heap
, ULONG flags
, SIZE_T size
)
1696 static const ULONG padd_flags
= HEAP_VALIDATE
| HEAP_VALIDATE_ALL
| HEAP_VALIDATE_PARAMS
| HEAP_ADD_USER_INFO
;
1697 static const ULONG check_flags
= HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
| HEAP_CHECKING_ENABLED
;
1698 SIZE_T overhead
, block_size
;
1700 if ((flags
& check_flags
)) overhead
= BLOCK_ALIGN
;
1701 else overhead
= sizeof(struct block
);
1703 if ((flags
& HEAP_TAIL_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
) overhead
+= BLOCK_ALIGN
;
1704 if (flags
& padd_flags
) overhead
+= BLOCK_ALIGN
;
1706 if (size
< BLOCK_ALIGN
) size
= BLOCK_ALIGN
;
1707 block_size
= ROUND_SIZE( size
+ overhead
, BLOCK_ALIGN
- 1 );
1709 if (block_size
< size
) return ~0U; /* overflow */
1710 if (block_size
< HEAP_MIN_BLOCK_SIZE
) block_size
= HEAP_MIN_BLOCK_SIZE
;
1714 static NTSTATUS
heap_allocate_block( struct heap
*heap
, ULONG flags
, SIZE_T block_size
, SIZE_T size
, void **ret
)
1716 struct block
*block
, *next
;
1717 SIZE_T old_block_size
;
1720 /* Locate a suitable free block */
1722 if (!(block
= find_free_block( heap
, flags
, block_size
))) return STATUS_NO_MEMORY
;
1723 /* read the free block size, changing block type or flags may alter it */
1724 old_block_size
= block_get_size( block
);
1725 subheap
= block_get_subheap( heap
, block
);
1727 if ((next
= split_block( heap
, flags
, block
, old_block_size
, block_size
)))
1729 block_init_free( next
, flags
, subheap
, old_block_size
- block_size
);
1730 insert_free_block( heap
, flags
, subheap
, next
);
1733 block_set_type( block
, BLOCK_TYPE_USED
);
1734 block_set_flags( block
, ~0, BLOCK_USER_FLAGS( flags
) );
1735 block
->tail_size
= block_get_size( block
) - sizeof(*block
) - size
;
1736 initialize_block( block
, 0, size
, flags
);
1737 mark_block_tail( block
, flags
);
1739 if ((next
= next_block( subheap
, block
))) block_set_flags( next
, BLOCK_FLAG_PREV_FREE
, 0 );
1742 return STATUS_SUCCESS
;
1745 /* Low Fragmentation Heap frontend */
1747 /* header for every LFH block group */
1748 struct DECLSPEC_ALIGN(BLOCK_ALIGN
) group
1751 /* one bit for each free block and the highest bit for GROUP_FLAG_FREE */
1753 /* affinity of the thread which last allocated from this group */
1755 /* first block of a group, required for alignment */
1756 struct block first_block
;
1759 #define GROUP_BLOCK_COUNT (sizeof(((struct group *)0)->free_bits) * 8 - 1)
1760 #define GROUP_FLAG_FREE (1u << GROUP_BLOCK_COUNT)
1762 static inline UINT
block_get_group_index( const struct block
*block
)
1764 return block
->base_offset
;
1767 static inline struct group
*block_get_group( const struct block
*block
)
1769 SIZE_T block_size
= block_get_size( block
);
1770 void *first_block
= (char *)block
- block_get_group_index( block
) * block_size
;
1771 return CONTAINING_RECORD( first_block
, struct group
, first_block
);
1774 static inline void block_set_group( struct block
*block
, SIZE_T block_size
, const struct group
*group
)
1776 SIZE_T offset
= (char *)block
- (char *)&group
->first_block
;
1777 block
->base_offset
= offset
/ block_size
;
1780 static inline struct block
*group_get_block( struct group
*group
, SIZE_T block_size
, UINT index
)
1782 char *first_block
= (char *)&group
->first_block
;
1783 return (struct block
*)(first_block
+ index
* block_size
);
1786 /* lookup a free block using the group free_bits, the current thread must own the group */
1787 static inline struct block
*group_find_free_block( struct group
*group
, SIZE_T block_size
)
1789 ULONG i
, free_bits
= ReadNoFence( &group
->free_bits
);
1790 /* free_bits will never be 0 as the group is unlinked when it's fully used */
1791 BitScanForward( &i
, free_bits
);
1792 InterlockedAnd( &group
->free_bits
, ~(1 << i
) );
1793 return group_get_block( group
, block_size
, i
);
1796 /* allocate a new group block using non-LFH allocation, returns a group owned by current thread */
1797 static struct group
*group_allocate( struct heap
*heap
, ULONG flags
, SIZE_T block_size
)
1799 SIZE_T i
, group_size
, group_block_size
;
1800 struct group
*group
;
1803 group_size
= offsetof( struct group
, first_block
) + GROUP_BLOCK_COUNT
* block_size
;
1804 group_block_size
= heap_get_block_size( heap
, flags
, group_size
);
1806 heap_lock( heap
, flags
);
1808 if (group_block_size
>= HEAP_MIN_LARGE_BLOCK_SIZE
)
1809 status
= heap_allocate_large( heap
, flags
& ~HEAP_ZERO_MEMORY
, group_block_size
, group_size
, (void **)&group
);
1811 status
= heap_allocate_block( heap
, flags
& ~HEAP_ZERO_MEMORY
, group_block_size
, group_size
, (void **)&group
);
1813 heap_unlock( heap
, flags
);
1815 if (status
) return NULL
;
1817 block_set_flags( (struct block
*)group
- 1, 0, BLOCK_FLAG_LFH
);
1818 group
->free_bits
= ~GROUP_FLAG_FREE
;
1820 for (i
= 0; i
< GROUP_BLOCK_COUNT
; ++i
)
1822 struct block
*block
= group_get_block( group
, block_size
, i
);
1823 valgrind_make_writable( block
, sizeof(*block
) );
1824 block_set_type( block
, BLOCK_TYPE_FREE
);
1825 block_set_flags( block
, ~0, BLOCK_FLAG_FREE
| BLOCK_FLAG_LFH
);
1826 block_set_group( block
, block_size
, group
);
1827 block_set_size( block
, block_size
);
1828 mark_block_free( block
+ 1, (char *)block
+ block_size
- (char *)(block
+ 1), flags
);
1834 /* release a fully freed group to the non-LFH subheap, group must be owned by current thread */
1835 static NTSTATUS
group_release( struct heap
*heap
, ULONG flags
, struct bin
*bin
, struct group
*group
)
1837 struct block
*block
= (struct block
*)group
- 1;
1840 heap_lock( heap
, flags
);
1842 block_set_flags( block
, BLOCK_FLAG_LFH
, 0 );
1844 if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
1845 status
= heap_free_large( heap
, flags
, block
);
1847 status
= heap_free_block( heap
, flags
, block
);
1849 heap_unlock( heap
, flags
);
1854 static inline ULONG
heap_current_thread_affinity(void)
1858 if (!(affinity
= NtCurrentTeb()->HeapVirtualAffinity
))
1860 affinity
= InterlockedIncrement( &next_thread_affinity
);
1861 affinity
= affinity_mapping
[affinity
% ARRAY_SIZE(affinity_mapping
)];
1862 NtCurrentTeb()->HeapVirtualAffinity
= affinity
;
1868 /* acquire a group from the bin, thread takes ownership of a shared group or allocates a new one */
1869 static struct group
*heap_acquire_bin_group( struct heap
*heap
, ULONG flags
, SIZE_T block_size
, struct bin
*bin
)
1871 ULONG affinity
= NtCurrentTeb()->HeapVirtualAffinity
;
1872 struct group
*group
;
1875 if ((group
= InterlockedExchangePointer( (void *)bin_get_affinity_group( bin
, affinity
), NULL
)))
1878 if ((entry
= RtlInterlockedPopEntrySList( &bin
->groups
)))
1879 return CONTAINING_RECORD( entry
, struct group
, entry
);
1881 return group_allocate( heap
, flags
, block_size
);
1884 /* release a thread owned and fully freed group to the bin shared group, or free its memory */
1885 static NTSTATUS
heap_release_bin_group( struct heap
*heap
, ULONG flags
, struct bin
*bin
, struct group
*group
)
1887 ULONG affinity
= group
->affinity
;
1889 /* using InterlockedExchangePointer here would possibly return a group that has used blocks,
1890 * we prefer keeping our fully freed group instead for reduced memory consumption.
1892 if (!InterlockedCompareExchangePointer( (void *)bin_get_affinity_group( bin
, affinity
), group
, NULL
))
1893 return STATUS_SUCCESS
;
1895 /* try re-using the block group instead of releasing it */
1896 if (RtlQueryDepthSList( &bin
->groups
) <= ARRAY_SIZE(affinity_mapping
))
1898 RtlInterlockedPushEntrySList( &bin
->groups
, &group
->entry
);
1899 return STATUS_SUCCESS
;
1902 return group_release( heap
, flags
, bin
, group
);
1905 static struct block
*find_free_bin_block( struct heap
*heap
, ULONG flags
, SIZE_T block_size
, struct bin
*bin
)
1907 ULONG affinity
= heap_current_thread_affinity();
1908 struct block
*block
;
1909 struct group
*group
;
1911 /* acquire a group, the thread will own it and no other thread can clear free bits.
1912 * some other thread might still set the free bits if they are freeing blocks.
1914 if (!(group
= heap_acquire_bin_group( heap
, flags
, block_size
, bin
))) return NULL
;
1915 group
->affinity
= affinity
;
1917 block
= group_find_free_block( group
, block_size
);
1919 /* serialize with heap_free_block_lfh: atomically set GROUP_FLAG_FREE when the free bits are all 0. */
1920 if (ReadNoFence( &group
->free_bits
) || InterlockedCompareExchange( &group
->free_bits
, GROUP_FLAG_FREE
, 0 ))
1922 /* if GROUP_FLAG_FREE isn't set, thread is responsible for putting it back into group list. */
1923 if ((group
= InterlockedExchangePointer( (void *)bin_get_affinity_group( bin
, affinity
), group
)))
1924 RtlInterlockedPushEntrySList( &bin
->groups
, &group
->entry
);
1930 static NTSTATUS
heap_allocate_block_lfh( struct heap
*heap
, ULONG flags
, SIZE_T block_size
,
1931 SIZE_T size
, void **ret
)
1933 struct bin
*bin
, *last
= heap
->bins
+ BLOCK_SIZE_BIN_COUNT
- 1;
1934 struct block
*block
;
1936 bin
= heap
->bins
+ BLOCK_SIZE_BIN( block_size
);
1937 if (bin
== last
) return STATUS_UNSUCCESSFUL
;
1939 /* paired with WriteRelease in bin_try_enable. */
1940 if (!ReadAcquire( &bin
->enabled
)) return STATUS_UNSUCCESSFUL
;
1942 block_size
= BLOCK_BIN_SIZE( BLOCK_SIZE_BIN( block_size
) );
1944 if ((block
= find_free_bin_block( heap
, flags
, block_size
, bin
)))
1946 block_set_type( block
, BLOCK_TYPE_USED
);
1947 block_set_flags( block
, ~BLOCK_FLAG_LFH
, BLOCK_USER_FLAGS( flags
) );
1948 block
->tail_size
= block_size
- sizeof(*block
) - size
;
1949 initialize_block( block
, 0, size
, flags
);
1950 mark_block_tail( block
, flags
);
1954 return block
? STATUS_SUCCESS
: STATUS_NO_MEMORY
;
1957 static NTSTATUS
heap_free_block_lfh( struct heap
*heap
, ULONG flags
, struct block
*block
)
1959 struct bin
*bin
, *last
= heap
->bins
+ BLOCK_SIZE_BIN_COUNT
- 1;
1960 SIZE_T i
, block_size
= block_get_size( block
);
1961 struct group
*group
= block_get_group( block
);
1962 NTSTATUS status
= STATUS_SUCCESS
;
1964 if (!(block_get_flags( block
) & BLOCK_FLAG_LFH
)) return STATUS_UNSUCCESSFUL
;
1966 bin
= heap
->bins
+ BLOCK_SIZE_BIN( block_size
);
1967 if (bin
== last
) return STATUS_UNSUCCESSFUL
;
1969 i
= block_get_group_index( block
);
1970 valgrind_make_writable( block
, sizeof(*block
) );
1971 block_set_type( block
, BLOCK_TYPE_FREE
);
1972 block_set_flags( block
, ~BLOCK_FLAG_LFH
, BLOCK_FLAG_FREE
);
1973 mark_block_free( block
+ 1, (char *)block
+ block_size
- (char *)(block
+ 1), flags
);
1975 /* if this was the last used block in a group and GROUP_FLAG_FREE was set */
1976 if (InterlockedOr( &group
->free_bits
, 1 << i
) == ~(1 << i
))
1978 /* thread now owns the group, and can release it to its bin */
1979 group
->free_bits
= ~GROUP_FLAG_FREE
;
1980 status
= heap_release_bin_group( heap
, flags
, bin
, group
);
1986 static void bin_try_enable( struct heap
*heap
, struct bin
*bin
)
1988 ULONG alloc
= ReadNoFence( &bin
->count_alloc
), freed
= ReadNoFence( &bin
->count_freed
);
1989 SIZE_T block_size
= BLOCK_BIN_SIZE( bin
- heap
->bins
);
1990 BOOL enable
= FALSE
;
1992 if (bin
== heap
->bins
&& alloc
> 0x10) enable
= TRUE
;
1993 else if (bin
- heap
->bins
< 0x30 && alloc
> 0x800) enable
= TRUE
;
1994 else if (bin
- heap
->bins
< 0x30 && alloc
- freed
> 0x10) enable
= TRUE
;
1995 else if (alloc
- freed
> 0x400000 / block_size
) enable
= TRUE
;
1996 if (!enable
) return;
1998 if (ReadNoFence( &heap
->compat_info
) != HEAP_LFH
)
2000 ULONG info
= HEAP_LFH
;
2001 RtlSetHeapInformation( heap
, HeapCompatibilityInformation
, &info
, sizeof(info
) );
2004 /* paired with ReadAcquire in heap_allocate_block_lfh.
2006 * The acq/rel barrier on the enabled flag is protecting compat_info
2007 * (i.e. compat_info := LFH happens-before enabled := TRUE), so that
2008 * a caller that observes LFH block allocation (alloc request
2009 * succeeds without heap lock) will never observe HEAP_STD when it
2012 WriteRelease( &bin
->enabled
, TRUE
);
2015 static void heap_thread_detach_bin_groups( struct heap
*heap
)
2017 ULONG i
, affinity
= NtCurrentTeb()->HeapVirtualAffinity
;
2019 if (!heap
->bins
) return;
2021 for (i
= 0; i
< BLOCK_SIZE_BIN_COUNT
; ++i
)
2023 struct bin
*bin
= heap
->bins
+ i
;
2024 struct group
*group
;
2025 if (!(group
= InterlockedExchangePointer( (void *)bin_get_affinity_group( bin
, affinity
), NULL
))) continue;
2026 RtlInterlockedPushEntrySList( &bin
->groups
, &group
->entry
);
2030 void heap_thread_detach(void)
2034 RtlEnterCriticalSection( &process_heap
->cs
);
2036 LIST_FOR_EACH_ENTRY( heap
, &process_heap
->entry
, struct heap
, entry
)
2037 heap_thread_detach_bin_groups( heap
);
2039 heap_thread_detach_bin_groups( process_heap
);
2041 RtlLeaveCriticalSection( &process_heap
->cs
);
2044 /***********************************************************************
2045 * RtlAllocateHeap (NTDLL.@)
2047 void *WINAPI DECLSPEC_HOTPATCH
RtlAllocateHeap( HANDLE handle
, ULONG flags
, SIZE_T size
)
2055 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2056 status
= STATUS_INVALID_HANDLE
;
2057 else if ((block_size
= heap_get_block_size( heap
, heap_flags
, size
)) == ~0U)
2058 status
= STATUS_NO_MEMORY
;
2059 else if (block_size
>= HEAP_MIN_LARGE_BLOCK_SIZE
)
2060 status
= heap_allocate_large( heap
, heap_flags
, block_size
, size
, &ptr
);
2061 else if (heap
->bins
&& !heap_allocate_block_lfh( heap
, heap_flags
, block_size
, size
, &ptr
))
2062 status
= STATUS_SUCCESS
;
2065 heap_lock( heap
, heap_flags
);
2066 status
= heap_allocate_block( heap
, heap_flags
, block_size
, size
, &ptr
);
2067 heap_unlock( heap
, heap_flags
);
2069 if (!status
&& heap
->bins
)
2071 SIZE_T bin
= BLOCK_SIZE_BIN( block_get_size( (struct block
*)ptr
- 1 ) );
2072 InterlockedIncrement( &heap
->bins
[bin
].count_alloc
);
2073 if (!ReadNoFence( &heap
->bins
[bin
].enabled
)) bin_try_enable( heap
, &heap
->bins
[bin
] );
2077 if (!status
) valgrind_notify_alloc( ptr
, size
, flags
& HEAP_ZERO_MEMORY
);
2079 TRACE( "handle %p, flags %#lx, size %#Ix, return %p, status %#lx.\n", handle
, flags
, size
, ptr
, status
);
2080 heap_set_status( heap
, flags
, status
);
2085 /***********************************************************************
2086 * RtlFreeHeap (NTDLL.@)
2088 BOOLEAN WINAPI DECLSPEC_HOTPATCH
RtlFreeHeap( HANDLE handle
, ULONG flags
, void *ptr
)
2090 struct block
*block
;
2095 if (!ptr
) return TRUE
;
2097 valgrind_notify_free( ptr
);
2099 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2100 status
= STATUS_INVALID_PARAMETER
;
2101 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2102 status
= STATUS_INVALID_PARAMETER
;
2103 else if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2104 status
= heap_free_large( heap
, heap_flags
, block
);
2105 else if (!(block
= heap_delay_free( heap
, heap_flags
, block
)))
2106 status
= STATUS_SUCCESS
;
2107 else if (!heap_free_block_lfh( heap
, heap_flags
, block
))
2108 status
= STATUS_SUCCESS
;
2111 SIZE_T block_size
= block_get_size( block
), bin
= BLOCK_SIZE_BIN( block_size
);
2113 heap_lock( heap
, heap_flags
);
2114 status
= heap_free_block( heap
, heap_flags
, block
);
2115 heap_unlock( heap
, heap_flags
);
2117 if (!status
&& heap
->bins
) InterlockedIncrement( &heap
->bins
[bin
].count_freed
);
2120 TRACE( "handle %p, flags %#lx, ptr %p, return %u, status %#lx.\n", handle
, flags
, ptr
, !status
, status
);
2121 heap_set_status( heap
, flags
, status
);
2125 static NTSTATUS
heap_resize_large( struct heap
*heap
, ULONG flags
, struct block
*block
, SIZE_T block_size
,
2126 SIZE_T size
, SIZE_T
*old_size
, void **ret
)
2128 ARENA_LARGE
*large
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2129 SIZE_T old_block_size
= large
->block_size
;
2130 *old_size
= large
->data_size
;
2132 if (old_block_size
< block_size
) return STATUS_NO_MEMORY
;
2134 /* FIXME: we could remap zero-pages instead */
2135 valgrind_notify_resize( block
+ 1, *old_size
, size
);
2136 initialize_block( block
, *old_size
, size
, flags
);
2138 large
->data_size
= size
;
2139 valgrind_make_noaccess( (char *)block
+ sizeof(*block
) + large
->data_size
,
2140 old_block_size
- sizeof(*block
) - large
->data_size
);
2143 return STATUS_SUCCESS
;
2146 static NTSTATUS
heap_resize_block( struct heap
*heap
, ULONG flags
, struct block
*block
, SIZE_T block_size
,
2147 SIZE_T size
, SIZE_T old_block_size
, SIZE_T
*old_size
, void **ret
)
2149 SUBHEAP
*subheap
= block_get_subheap( heap
, block
);
2152 if (block_size
> old_block_size
)
2154 /* need to grow block, make sure it's followed by large enough free block */
2155 if (!(next
= next_block( subheap
, block
))) return STATUS_NO_MEMORY
;
2156 if (!(block_get_flags( next
) & BLOCK_FLAG_FREE
)) return STATUS_NO_MEMORY
;
2157 if (block_size
> old_block_size
+ block_get_size( next
)) return STATUS_NO_MEMORY
;
2158 if (!subheap_commit( heap
, subheap
, block
, block_size
)) return STATUS_NO_MEMORY
;
2161 if ((next
= next_block( subheap
, block
)) && (block_get_flags( next
) & BLOCK_FLAG_FREE
))
2163 /* merge with next block if it is free */
2164 struct entry
*entry
= (struct entry
*)next
;
2165 list_remove( &entry
->entry
);
2166 old_block_size
+= block_get_size( next
);
2169 if ((next
= split_block( heap
, flags
, block
, old_block_size
, block_size
)))
2171 block_init_free( next
, flags
, subheap
, old_block_size
- block_size
);
2172 insert_free_block( heap
, flags
, subheap
, next
);
2175 valgrind_notify_resize( block
+ 1, *old_size
, size
);
2176 block_set_flags( block
, BLOCK_FLAG_USER_MASK
& ~BLOCK_FLAG_USER_INFO
, BLOCK_USER_FLAGS( flags
) );
2177 block
->tail_size
= block_get_size( block
) - sizeof(*block
) - size
;
2178 initialize_block( block
, *old_size
, size
, flags
);
2179 mark_block_tail( block
, flags
);
2181 if ((next
= next_block( subheap
, block
))) block_set_flags( next
, BLOCK_FLAG_PREV_FREE
, 0 );
2184 return STATUS_SUCCESS
;
2187 static NTSTATUS
heap_resize_block_lfh( struct block
*block
, ULONG flags
, SIZE_T block_size
, SIZE_T size
, SIZE_T
*old_size
, void **ret
)
2189 /* as native LFH does it with different block size: refuse to resize even though we could */
2190 if (ROUND_SIZE( *old_size
, BLOCK_ALIGN
- 1) != ROUND_SIZE( size
, BLOCK_ALIGN
- 1)) return STATUS_NO_MEMORY
;
2191 if (size
>= *old_size
) return STATUS_NO_MEMORY
;
2193 block_set_flags( block
, BLOCK_FLAG_USER_MASK
& ~BLOCK_FLAG_USER_INFO
, BLOCK_USER_FLAGS( flags
) );
2194 block
->tail_size
= block_size
- sizeof(*block
) - size
;
2195 initialize_block( block
, *old_size
, size
, flags
);
2196 mark_block_tail( block
, flags
);
2199 return STATUS_SUCCESS
;
2202 static NTSTATUS
heap_resize_in_place( struct heap
*heap
, ULONG flags
, struct block
*block
, SIZE_T block_size
,
2203 SIZE_T size
, SIZE_T
*old_size
, void **ret
)
2205 SIZE_T old_bin
, old_block_size
;
2208 if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2209 return heap_resize_large( heap
, flags
, block
, block_size
, size
, old_size
, ret
);
2211 old_block_size
= block_get_size( block
);
2212 *old_size
= old_block_size
- block_get_overhead( block
);
2213 old_bin
= BLOCK_SIZE_BIN( old_block_size
);
2215 if (block_size
>= HEAP_MIN_LARGE_BLOCK_SIZE
) return STATUS_NO_MEMORY
; /* growing small block to large block */
2217 if (block_get_flags( block
) & BLOCK_FLAG_LFH
)
2218 return heap_resize_block_lfh( block
, flags
, block_size
, size
, old_size
, ret
);
2220 heap_lock( heap
, flags
);
2221 status
= heap_resize_block( heap
, flags
, block
, block_size
, size
, old_block_size
, old_size
, ret
);
2222 heap_unlock( heap
, flags
);
2224 if (!status
&& heap
->bins
)
2226 SIZE_T new_bin
= BLOCK_SIZE_BIN( block_size
);
2227 InterlockedIncrement( &heap
->bins
[old_bin
].count_freed
);
2228 InterlockedIncrement( &heap
->bins
[new_bin
].count_alloc
);
2229 if (!ReadNoFence( &heap
->bins
[new_bin
].enabled
)) bin_try_enable( heap
, &heap
->bins
[new_bin
] );
2235 /***********************************************************************
2236 * RtlReAllocateHeap (NTDLL.@)
2238 void *WINAPI
RtlReAllocateHeap( HANDLE handle
, ULONG flags
, void *ptr
, SIZE_T size
)
2240 SIZE_T block_size
, old_size
;
2241 struct block
*block
;
2247 if (!ptr
) return NULL
;
2249 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2250 status
= STATUS_INVALID_HANDLE
;
2251 else if ((block_size
= heap_get_block_size( heap
, heap_flags
, size
)) == ~0U)
2252 status
= STATUS_NO_MEMORY
;
2253 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2254 status
= STATUS_INVALID_PARAMETER
;
2255 else if ((status
= heap_resize_in_place( heap
, heap_flags
, block
, block_size
, size
,
2258 if (flags
& HEAP_REALLOC_IN_PLACE_ONLY
)
2259 status
= STATUS_NO_MEMORY
;
2260 else if (!(ret
= RtlAllocateHeap( heap
, flags
, size
)))
2261 status
= STATUS_NO_MEMORY
;
2264 memcpy( ret
, ptr
, min( size
, old_size
) );
2265 RtlFreeHeap( heap
, flags
, ptr
);
2266 status
= STATUS_SUCCESS
;
2270 TRACE( "handle %p, flags %#lx, ptr %p, size %#Ix, return %p, status %#lx.\n", handle
, flags
, ptr
, size
, ret
, status
);
2271 heap_set_status( heap
, flags
, status
);
2276 /***********************************************************************
2277 * RtlCompactHeap (NTDLL.@)
2279 * Compact the free space in a Heap.
2282 * heap [I] Heap that block was allocated from
2283 * flags [I] HEAP_ flags from "winnt.h"
2286 * The number of bytes compacted.
2289 * This function is a harmless stub.
2291 ULONG WINAPI
RtlCompactHeap( HANDLE handle
, ULONG flags
)
2293 static BOOL reported
;
2294 if (!reported
++) FIXME( "handle %p, flags %#lx stub!\n", handle
, flags
);
2299 /***********************************************************************
2300 * RtlLockHeap (NTDLL.@)
2305 * heap [I] Heap to lock
2308 * Success: TRUE. The Heap is locked.
2309 * Failure: FALSE, if heap is invalid.
2311 BOOLEAN WINAPI
RtlLockHeap( HANDLE handle
)
2315 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &heap_flags
))) return FALSE
;
2316 heap_lock( heap
, heap_flags
);
2321 /***********************************************************************
2322 * RtlUnlockHeap (NTDLL.@)
2327 * heap [I] Heap to unlock
2330 * Success: TRUE. The Heap is unlocked.
2331 * Failure: FALSE, if heap is invalid.
2333 BOOLEAN WINAPI
RtlUnlockHeap( HANDLE handle
)
2337 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &heap_flags
))) return FALSE
;
2338 heap_unlock( heap
, heap_flags
);
2343 static NTSTATUS
heap_size( const struct heap
*heap
, struct block
*block
, SIZE_T
*size
)
2345 if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2347 const ARENA_LARGE
*large_arena
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2348 *size
= large_arena
->data_size
;
2350 else *size
= block_get_size( block
) - block_get_overhead( block
);
2352 return STATUS_SUCCESS
;
2355 /***********************************************************************
2356 * RtlSizeHeap (NTDLL.@)
2358 SIZE_T WINAPI
RtlSizeHeap( HANDLE handle
, ULONG flags
, const void *ptr
)
2360 SIZE_T size
= ~(SIZE_T
)0;
2361 struct block
*block
;
2366 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2367 status
= STATUS_INVALID_PARAMETER
;
2368 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2369 status
= STATUS_INVALID_PARAMETER
;
2372 heap_lock( heap
, heap_flags
);
2373 status
= heap_size( heap
, block
, &size
);
2374 heap_unlock( heap
, heap_flags
);
2377 TRACE( "handle %p, flags %#lx, ptr %p, return %#Ix, status %#lx.\n", handle
, flags
, ptr
, size
, status
);
2378 heap_set_status( heap
, flags
, status
);
2383 /***********************************************************************
2384 * RtlValidateHeap (NTDLL.@)
2386 BOOLEAN WINAPI
RtlValidateHeap( HANDLE handle
, ULONG flags
, const void *ptr
)
2392 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2396 heap_lock( heap
, heap_flags
);
2397 if (ptr
) ret
= heap_validate_ptr( heap
, ptr
);
2398 else ret
= heap_validate( heap
);
2399 heap_unlock( heap
, heap_flags
);
2402 TRACE( "handle %p, flags %#lx, ptr %p, return %u.\n", handle
, flags
, ptr
, !!ret
);
2406 static NTSTATUS
heap_walk_blocks( const struct heap
*heap
, const SUBHEAP
*subheap
,
2407 const struct block
*block
, struct rtl_heap_entry
*entry
)
2409 const char *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
), *end
= base
+ subheap_size( subheap
);
2410 const struct block
*blocks
= first_block( subheap
);
2412 if (entry
->lpData
== commit_end
) return STATUS_NO_MORE_ENTRIES
;
2413 if (entry
->lpData
== base
) block
= blocks
;
2414 else if (!(block
= next_block( subheap
, block
)))
2416 entry
->lpData
= (void *)commit_end
;
2417 entry
->cbData
= end
- commit_end
;
2418 entry
->cbOverhead
= 0;
2419 entry
->iRegionIndex
= 0;
2420 entry
->wFlags
= RTL_HEAP_ENTRY_UNCOMMITTED
;
2421 return STATUS_SUCCESS
;
2424 if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
2426 entry
->lpData
= (char *)block
+ block_get_overhead( block
);
2427 entry
->cbData
= block_get_size( block
) - block_get_overhead( block
);
2428 /* FIXME: last free block should not include uncommitted range, which also has its own overhead */
2429 if (!contains( blocks
, commit_end
- 4 * BLOCK_ALIGN
- (char *)blocks
, block
, block_get_size( block
) ))
2430 entry
->cbData
= commit_end
- 4 * BLOCK_ALIGN
- (char *)entry
->lpData
;
2431 entry
->cbOverhead
= 2 * BLOCK_ALIGN
;
2432 entry
->iRegionIndex
= 0;
2437 entry
->lpData
= (void *)(block
+ 1);
2438 entry
->cbData
= block_get_size( block
) - block_get_overhead( block
);
2439 entry
->cbOverhead
= block_get_overhead( block
);
2440 entry
->iRegionIndex
= 0;
2441 entry
->wFlags
= RTL_HEAP_ENTRY_COMMITTED
|RTL_HEAP_ENTRY_BLOCK
|RTL_HEAP_ENTRY_BUSY
;
2444 return STATUS_SUCCESS
;
2447 static NTSTATUS
heap_walk( const struct heap
*heap
, struct rtl_heap_entry
*entry
)
2449 const char *data
= entry
->lpData
;
2450 const ARENA_LARGE
*large
= NULL
;
2451 const struct block
*block
;
2452 const struct list
*next
;
2453 const SUBHEAP
*subheap
;
2457 if (!data
|| entry
->wFlags
& RTL_HEAP_ENTRY_REGION
) block
= (struct block
*)data
;
2458 else if (entry
->wFlags
& RTL_HEAP_ENTRY_BUSY
) block
= (struct block
*)data
- 1;
2459 else block
= (struct block
*)(data
- sizeof(struct list
)) - 1;
2461 if (find_large_block( heap
, block
))
2463 large
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2464 next
= &large
->entry
;
2466 else if ((subheap
= find_subheap( heap
, block
, TRUE
)))
2468 if (!(status
= heap_walk_blocks( heap
, subheap
, block
, entry
))) return STATUS_SUCCESS
;
2469 else if (status
!= STATUS_NO_MORE_ENTRIES
) return status
;
2470 next
= &subheap
->entry
;
2474 if (entry
->lpData
) return STATUS_INVALID_PARAMETER
;
2475 next
= &heap
->subheap_list
;
2478 if (!large
&& (next
= list_next( &heap
->subheap_list
, next
)))
2480 subheap
= LIST_ENTRY( next
, SUBHEAP
, entry
);
2481 base
= subheap_base( subheap
);
2482 entry
->lpData
= base
;
2483 entry
->cbData
= subheap_overhead( subheap
);
2484 entry
->cbOverhead
= 0;
2485 entry
->iRegionIndex
= 0;
2486 entry
->wFlags
= RTL_HEAP_ENTRY_REGION
;
2487 entry
->Region
.dwCommittedSize
= (char *)subheap_commit_end( subheap
) - base
;
2488 entry
->Region
.dwUnCommittedSize
= subheap_size( subheap
) - entry
->Region
.dwCommittedSize
;
2489 entry
->Region
.lpFirstBlock
= base
+ entry
->cbData
;
2490 entry
->Region
.lpLastBlock
= base
+ subheap_size( subheap
);
2491 return STATUS_SUCCESS
;
2494 if (!next
) next
= &heap
->large_list
;
2495 if ((next
= list_next( &heap
->large_list
, next
)))
2497 large
= LIST_ENTRY( next
, ARENA_LARGE
, entry
);
2498 entry
->lpData
= (void *)(large
+ 1);
2499 entry
->cbData
= large
->data_size
;
2500 entry
->cbOverhead
= 0;
2501 entry
->iRegionIndex
= 64;
2502 entry
->wFlags
= RTL_HEAP_ENTRY_COMMITTED
|RTL_HEAP_ENTRY_BLOCK
|RTL_HEAP_ENTRY_BUSY
;
2503 return STATUS_SUCCESS
;
2506 return STATUS_NO_MORE_ENTRIES
;
2509 /***********************************************************************
2510 * RtlWalkHeap (NTDLL.@)
2512 NTSTATUS WINAPI
RtlWalkHeap( HANDLE handle
, void *entry_ptr
)
2514 struct rtl_heap_entry
*entry
= entry_ptr
;
2519 if (!entry
) return STATUS_INVALID_PARAMETER
;
2521 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &heap_flags
)))
2522 status
= STATUS_INVALID_HANDLE
;
2525 heap_lock( heap
, heap_flags
);
2526 status
= heap_walk( heap
, entry
);
2527 heap_unlock( heap
, heap_flags
);
2530 TRACE( "handle %p, entry %p %s, return %#lx\n", handle
, entry
,
2531 status
? "<empty>" : debugstr_heap_entry(entry
), status
);
2536 /***********************************************************************
2537 * RtlGetProcessHeaps (NTDLL.@)
2539 * Get the Heaps belonging to the current process.
2542 * count [I] size of heaps
2543 * heaps [O] Destination array for heap HANDLE's
2546 * Success: The number of Heaps allocated by the process.
2549 ULONG WINAPI
RtlGetProcessHeaps( ULONG count
, HANDLE
*heaps
)
2551 ULONG total
= 1; /* main heap */
2554 RtlEnterCriticalSection( &process_heap
->cs
);
2555 LIST_FOR_EACH( ptr
, &process_heap
->entry
) total
++;
2558 *heaps
++ = process_heap
;
2559 LIST_FOR_EACH( ptr
, &process_heap
->entry
)
2560 *heaps
++ = LIST_ENTRY( ptr
, struct heap
, entry
);
2562 RtlLeaveCriticalSection( &process_heap
->cs
);
2566 /***********************************************************************
2567 * RtlQueryHeapInformation (NTDLL.@)
2569 NTSTATUS WINAPI
RtlQueryHeapInformation( HANDLE handle
, HEAP_INFORMATION_CLASS info_class
,
2570 void *info
, SIZE_T size_in
, SIZE_T
*size_out
)
2575 TRACE( "handle %p, info_class %u, info %p, size_in %Iu, size_out %p.\n", handle
, info_class
, info
, size_in
, size_out
);
2579 case HeapCompatibilityInformation
:
2580 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &flags
))) return STATUS_ACCESS_VIOLATION
;
2581 if (size_out
) *size_out
= sizeof(ULONG
);
2582 if (size_in
< sizeof(ULONG
)) return STATUS_BUFFER_TOO_SMALL
;
2583 *(ULONG
*)info
= ReadNoFence( &heap
->compat_info
);
2584 return STATUS_SUCCESS
;
2587 FIXME( "HEAP_INFORMATION_CLASS %u not implemented!\n", info_class
);
2588 return STATUS_INVALID_INFO_CLASS
;
2592 /***********************************************************************
2593 * RtlSetHeapInformation (NTDLL.@)
2595 NTSTATUS WINAPI
RtlSetHeapInformation( HANDLE handle
, HEAP_INFORMATION_CLASS info_class
, void *info
, SIZE_T size
)
2600 TRACE( "handle %p, info_class %u, info %p, size %Iu.\n", handle
, info_class
, info
, size
);
2604 case HeapCompatibilityInformation
:
2608 if (size
< sizeof(ULONG
)) return STATUS_BUFFER_TOO_SMALL
;
2609 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &flags
))) return STATUS_INVALID_HANDLE
;
2610 if (heap
->flags
& HEAP_NO_SERIALIZE
) return STATUS_INVALID_PARAMETER
;
2612 compat_info
= *(ULONG
*)info
;
2613 if (compat_info
!= HEAP_STD
&& compat_info
!= HEAP_LFH
)
2615 FIXME( "HeapCompatibilityInformation %lu not implemented!\n", compat_info
);
2616 return STATUS_UNSUCCESSFUL
;
2618 if (InterlockedCompareExchange( &heap
->compat_info
, compat_info
, HEAP_STD
) != HEAP_STD
)
2619 return STATUS_UNSUCCESSFUL
;
2620 return STATUS_SUCCESS
;
2624 FIXME( "HEAP_INFORMATION_CLASS %u not implemented!\n", info_class
);
2625 return STATUS_SUCCESS
;
2629 /***********************************************************************
2630 * RtlGetUserInfoHeap (NTDLL.@)
2632 BOOLEAN WINAPI
RtlGetUserInfoHeap( HANDLE handle
, ULONG flags
, void *ptr
, void **user_value
, ULONG
*user_flags
)
2634 NTSTATUS status
= STATUS_SUCCESS
;
2635 struct block
*block
;
2640 TRACE( "handle %p, flags %#lx, ptr %p, user_value %p, user_flags %p semi-stub!\n",
2641 handle
, flags
, ptr
, user_value
, user_flags
);
2645 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2646 status
= STATUS_SUCCESS
;
2647 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2649 status
= STATUS_INVALID_PARAMETER
;
2652 else if (!(*user_flags
= HEAP_USER_FLAGS(block_get_flags( block
))))
2653 WARN( "Block %p wasn't allocated with user info\n", ptr
);
2654 else if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2656 const ARENA_LARGE
*large
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2657 *user_flags
= *user_flags
& ~HEAP_ADD_USER_INFO
;
2658 *user_value
= large
->user_value
;
2662 heap_lock( heap
, heap_flags
);
2664 tmp
= (char *)block
+ block_get_size( block
) - block
->tail_size
+ sizeof(void *);
2665 if ((heap_flags
& HEAP_TAIL_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
) tmp
+= BLOCK_ALIGN
;
2666 *user_flags
= *user_flags
& ~HEAP_ADD_USER_INFO
;
2667 *user_value
= *(void **)tmp
;
2669 heap_unlock( heap
, heap_flags
);
2672 heap_set_status( heap
, flags
, status
);
2676 /***********************************************************************
2677 * RtlSetUserValueHeap (NTDLL.@)
2679 BOOLEAN WINAPI
RtlSetUserValueHeap( HANDLE handle
, ULONG flags
, void *ptr
, void *user_value
)
2681 struct block
*block
;
2687 TRACE( "handle %p, flags %#lx, ptr %p, user_value %p.\n", handle
, flags
, ptr
, user_value
);
2689 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2691 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2693 else if (!(block_get_flags( block
) & BLOCK_FLAG_USER_INFO
))
2695 else if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2697 ARENA_LARGE
*large
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2698 large
->user_value
= user_value
;
2703 heap_lock( heap
, heap_flags
);
2705 tmp
= (char *)block
+ block_get_size( block
) - block
->tail_size
+ sizeof(void *);
2706 if ((heap_flags
& HEAP_TAIL_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
) tmp
+= BLOCK_ALIGN
;
2707 *(void **)tmp
= user_value
;
2710 heap_unlock( heap
, heap_flags
);
2716 /***********************************************************************
2717 * RtlSetUserFlagsHeap (NTDLL.@)
2719 BOOLEAN WINAPI
RtlSetUserFlagsHeap( HANDLE handle
, ULONG flags
, void *ptr
, ULONG clear
, ULONG set
)
2721 struct block
*block
;
2726 TRACE( "handle %p, flags %#lx, ptr %p, clear %#lx, set %#lx.\n", handle
, flags
, ptr
, clear
, set
);
2728 if ((clear
| set
) & ~(0xe00))
2730 SetLastError( ERROR_INVALID_PARAMETER
);
2734 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2736 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2738 else if (!(block_get_flags( block
) & BLOCK_FLAG_USER_INFO
))
2742 block_set_flags( block
, BLOCK_USER_FLAGS( clear
), BLOCK_USER_FLAGS( set
) );