4 * Copyright 1996 Alexandre Julliard
5 * Copyright 1998 Ulrich Weigand
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #define RUNNING_ON_VALGRIND 0 /* FIXME */
31 #define WIN32_NO_STATUS
35 #include "ntdll_misc.h"
36 #include "wine/list.h"
37 #include "wine/debug.h"
39 WINE_DEFAULT_DEBUG_CHANNEL(heap
);
41 /* HeapCompatibilityInformation values */
48 /* undocumented RtlWalkHeap structure */
53 SIZE_T cbData
; /* differs from PROCESS_HEAP_ENTRY */
56 WORD wFlags
; /* value differs from PROCESS_HEAP_ENTRY */
63 DWORD dwCommittedSize
;
64 DWORD dwUnCommittedSize
;
71 /* rtl_heap_entry flags, names made up */
73 #define RTL_HEAP_ENTRY_BUSY 0x0001
74 #define RTL_HEAP_ENTRY_REGION 0x0002
75 #define RTL_HEAP_ENTRY_BLOCK 0x0010
76 #define RTL_HEAP_ENTRY_UNCOMMITTED 0x1000
77 #define RTL_HEAP_ENTRY_COMMITTED 0x4000
78 #define RTL_HEAP_ENTRY_LFH 0x8000
81 /* header for heap blocks */
83 #define REGION_ALIGN 0x10000
84 #define BLOCK_ALIGN (2 * sizeof(void *))
86 struct DECLSPEC_ALIGN(8) block
88 /* block size in multiple of BLOCK_ALIGN */
90 /* unused size (used block) / high size bits (free block) */
92 /* offset to region base / first group block (LFH block) */
98 C_ASSERT( sizeof(struct block
) == 8 );
100 /* block specific flags */
102 #define BLOCK_FLAG_FREE 0x01
103 #define BLOCK_FLAG_PREV_FREE 0x02
104 #define BLOCK_FLAG_FREE_LINK 0x03
105 #define BLOCK_FLAG_LARGE 0x04
106 #define BLOCK_FLAG_LFH 0x80 /* block is handled by the LFH frontend */
107 #define BLOCK_FLAG_USER_INFO 0x08 /* user flags bits 3-6 */
108 #define BLOCK_FLAG_USER_MASK 0x78
110 #define BLOCK_USER_FLAGS( heap_flags ) (((heap_flags) >> 5) & BLOCK_FLAG_USER_MASK)
111 #define HEAP_USER_FLAGS( block_flags ) (((block_flags) & BLOCK_FLAG_USER_MASK) << 5)
113 /* entry to link free blocks in free lists */
115 struct DECLSPEC_ALIGN(BLOCK_ALIGN
) entry
121 C_ASSERT( sizeof(struct entry
) == 2 * BLOCK_ALIGN
);
125 SIZE_T __pad
[sizeof(SIZE_T
) / sizeof(DWORD
)];
126 struct list entry
; /* entry in heap large blocks list */
127 SIZE_T data_size
; /* size of user data */
128 SIZE_T block_size
; /* total size of virtual memory block */
133 /* block must be last and aligned */
134 C_ASSERT( sizeof(ARENA_LARGE
) == offsetof(ARENA_LARGE
, block
) + sizeof(struct block
) );
135 C_ASSERT( sizeof(ARENA_LARGE
) == 4 * BLOCK_ALIGN
);
137 #define BLOCK_TYPE_USED 'u'
138 #define BLOCK_TYPE_DEAD 'D'
139 #define BLOCK_TYPE_FREE 'F'
140 #define BLOCK_TYPE_LARGE 'L'
142 #define BLOCK_FILL_USED 0xbaadf00d
143 #define BLOCK_FILL_TAIL 0xab
144 #define BLOCK_FILL_FREE 0xfeeefeee
146 #define ROUND_ADDR(addr, mask) ((void *)((UINT_PTR)(addr) & ~(UINT_PTR)(mask)))
147 #define ROUND_SIZE(size, mask) ((((SIZE_T)(size) + (mask)) & ~(SIZE_T)(mask)))
148 #define FIELD_BITS(type, field) (sizeof(((type *)0)->field) * 8)
149 #define FIELD_MAX(type, field) (((SIZE_T)1 << FIELD_BITS(type, field)) - 1)
151 #define HEAP_MIN_BLOCK_SIZE ROUND_SIZE(sizeof(struct entry) + BLOCK_ALIGN, BLOCK_ALIGN - 1)
153 C_ASSERT( sizeof(struct block
) <= HEAP_MIN_BLOCK_SIZE
);
154 C_ASSERT( sizeof(struct entry
) <= HEAP_MIN_BLOCK_SIZE
);
156 /* used block size is coded into block_size */
157 #define HEAP_MAX_USED_BLOCK_SIZE (FIELD_MAX( struct block, block_size ) * BLOCK_ALIGN)
158 /* free block size is coded into block_size + tail_size */
159 #define HEAP_MAX_FREE_BLOCK_SIZE (HEAP_MAX_USED_BLOCK_SIZE + (FIELD_MAX( struct block, tail_size ) << 16) * BLOCK_ALIGN)
160 /* subheap distance is coded into base_offset */
161 #define HEAP_MAX_BLOCK_REGION_SIZE (FIELD_MAX( struct block, base_offset ) * REGION_ALIGN)
163 C_ASSERT( HEAP_MAX_USED_BLOCK_SIZE
== 512 * 1024 * (sizeof(void *) / 4) - BLOCK_ALIGN
);
164 C_ASSERT( HEAP_MAX_FREE_BLOCK_SIZE
>= 128 * 1024 * 1024 * (sizeof(void *) / 4) - BLOCK_ALIGN
);
165 C_ASSERT( HEAP_MAX_BLOCK_REGION_SIZE
>= 128 * 1024 * 1024 * (sizeof(void *) / 4) - BLOCK_ALIGN
);
166 C_ASSERT( HEAP_MAX_FREE_BLOCK_SIZE
>= HEAP_MAX_BLOCK_REGION_SIZE
);
168 /* minimum size to start allocating large blocks */
169 #define HEAP_MIN_LARGE_BLOCK_SIZE (HEAP_MAX_USED_BLOCK_SIZE - 0x1000)
171 #define FREE_LIST_LINEAR_BITS 2
172 #define FREE_LIST_LINEAR_MASK ((1 << FREE_LIST_LINEAR_BITS) - 1)
173 #define FREE_LIST_COUNT ((FIELD_BITS( struct block, block_size ) - FREE_LIST_LINEAR_BITS + 1) * (1 << FREE_LIST_LINEAR_BITS) + 1)
174 /* for reference, update this when changing parameters */
175 C_ASSERT( FREE_LIST_COUNT
== 0x3d );
177 typedef struct DECLSPEC_ALIGN(BLOCK_ALIGN
) tagSUBHEAP
179 SIZE_T __pad
[sizeof(SIZE_T
) / sizeof(DWORD
)];
187 /* block must be last and aligned */
188 C_ASSERT( sizeof(SUBHEAP
) == offsetof(SUBHEAP
, block
) + sizeof(struct block
) );
189 C_ASSERT( sizeof(SUBHEAP
) == 4 * BLOCK_ALIGN
);
192 /* LFH block size bins */
194 #define BIN_SIZE_MIN_0 0
195 #define BIN_SIZE_MIN_1 0x100
196 #define BIN_SIZE_MIN_2 0x200
197 #define BIN_SIZE_MIN_3 0x400
198 #define BIN_SIZE_MIN_4 0x800
199 #define BIN_SIZE_MIN_5 0x1000
200 #define BIN_SIZE_MIN_6 0x2000
201 #define BIN_SIZE_MIN_7 0x4000
202 #define BIN_SIZE_MAX 0x8000
204 #define BIN_SIZE_STEP_0 (16)
205 #define BIN_SIZE_STEP_1 (BIN_SIZE_MIN_1 >> 4)
206 #define BIN_SIZE_STEP_2 (BIN_SIZE_MIN_2 >> 4)
207 #define BIN_SIZE_STEP_3 (BIN_SIZE_MIN_3 >> 4)
208 #define BIN_SIZE_STEP_4 (BIN_SIZE_MIN_4 >> 4)
209 #define BIN_SIZE_STEP_5 (BIN_SIZE_MIN_5 >> 4)
210 #define BIN_SIZE_STEP_6 (BIN_SIZE_MIN_6 >> 4)
211 #define BIN_SIZE_STEP_7 (BIN_SIZE_MIN_7 >> 4)
213 #define BLOCK_BIN_SIZE_N( n, bin ) (BIN_SIZE_MIN_##n + (bin + 1) * BIN_SIZE_STEP_##n)
214 #define BLOCK_SIZE_BIN_N( n, size ) (((size) - 1 - BIN_SIZE_MIN_##n) / BIN_SIZE_STEP_##n)
216 #define BLOCK_BIN_SIZE( bin ) ((bin) >= 0x80 ? ~(SIZE_T)0 : \
217 (bin) >= 0x70 ? BLOCK_BIN_SIZE_N( 7, bin - 0x70 ) : \
218 (bin) >= 0x60 ? BLOCK_BIN_SIZE_N( 6, bin - 0x60 ) : \
219 (bin) >= 0x50 ? BLOCK_BIN_SIZE_N( 5, bin - 0x50 ) : \
220 (bin) >= 0x40 ? BLOCK_BIN_SIZE_N( 4, bin - 0x40 ) : \
221 (bin) >= 0x30 ? BLOCK_BIN_SIZE_N( 3, bin - 0x30 ) : \
222 (bin) >= 0x20 ? BLOCK_BIN_SIZE_N( 2, bin - 0x20 ) : \
223 (bin) >= 0x10 ? BLOCK_BIN_SIZE_N( 1, bin - 0x10 ) : \
224 BLOCK_BIN_SIZE_N( 0, bin ))
226 #define BLOCK_SIZE_BIN( size ) ((size) > BIN_SIZE_MAX ? 0x80 : \
227 (size) > BIN_SIZE_MIN_7 ? 0x70 + BLOCK_SIZE_BIN_N( 7, size ) : \
228 (size) > BIN_SIZE_MIN_6 ? 0x60 + BLOCK_SIZE_BIN_N( 6, size ) : \
229 (size) > BIN_SIZE_MIN_5 ? 0x50 + BLOCK_SIZE_BIN_N( 5, size ) : \
230 (size) > BIN_SIZE_MIN_4 ? 0x40 + BLOCK_SIZE_BIN_N( 4, size ) : \
231 (size) > BIN_SIZE_MIN_3 ? 0x30 + BLOCK_SIZE_BIN_N( 3, size ) : \
232 (size) > BIN_SIZE_MIN_2 ? 0x20 + BLOCK_SIZE_BIN_N( 2, size ) : \
233 (size) > BIN_SIZE_MIN_1 ? 0x10 + BLOCK_SIZE_BIN_N( 1, size ) : \
234 (size) <= BIN_SIZE_MIN_0 ? 0 : BLOCK_SIZE_BIN_N( 0, size ))
236 #define BLOCK_SIZE_BIN_COUNT (BLOCK_SIZE_BIN( BIN_SIZE_MAX + 1 ) + 1)
238 /* macros sanity checks */
239 C_ASSERT( BLOCK_SIZE_BIN( 0 ) == 0 );
240 C_ASSERT( BLOCK_SIZE_BIN( 0x10 ) == 0 );
241 C_ASSERT( BLOCK_BIN_SIZE( 0 ) == BIN_SIZE_MIN_0
+ 1 * BIN_SIZE_STEP_0
);
242 C_ASSERT( BLOCK_SIZE_BIN( 0x11 ) == 1 );
243 C_ASSERT( BLOCK_BIN_SIZE( 1 ) == BIN_SIZE_MIN_0
+ 2 * BIN_SIZE_STEP_0
);
244 C_ASSERT( BLOCK_SIZE_BIN( BIN_SIZE_MAX
) == 0x7f );
245 C_ASSERT( BLOCK_BIN_SIZE( 0x7f ) == BIN_SIZE_MAX
);
246 C_ASSERT( BLOCK_SIZE_BIN( BIN_SIZE_MAX
+ 1 ) == 0x80 );
247 C_ASSERT( BLOCK_BIN_SIZE( 0x80 ) == ~(SIZE_T
)0 );
249 /* difference between block classes and all possible validation overhead must fit into block tail_size */
250 C_ASSERT( BIN_SIZE_STEP_7
+ 3 * BLOCK_ALIGN
<= FIELD_MAX( struct block
, tail_size
) );
252 static BYTE affinity_mapping
[] = {20,6,31,15,14,29,27,4,18,24,26,13,0,9,2,30,17,7,23,25,10,19,12,3,22,21,5,16,1,28,11,8};
253 static LONG next_thread_affinity
;
255 /* a bin, tracking heap blocks of a certain size */
258 /* counters for LFH activation */
263 /* list of groups with free blocks */
266 /* array of affinity reserved groups, interleaved with other bins to keep
267 * all pointers of the same affinity and different bin grouped together,
268 * and pointers of the same bin and different affinity away from each other,
269 * hopefully in separate cache lines.
271 struct group
**affinity_group_base
;
274 static inline struct group
**bin_get_affinity_group( struct bin
*bin
, BYTE affinity
)
276 return bin
->affinity_group_base
+ affinity
* BLOCK_SIZE_BIN_COUNT
;
281 DWORD_PTR unknown1
[2]; /* 0000/0000 */
282 DWORD ffeeffee
; /* 0008/0010 */
283 DWORD auto_flags
; /* 000c/0014 */
284 DWORD_PTR unknown2
[7]; /* 0010/0018 */
285 DWORD unknown3
[2]; /* 002c/0050 */
286 DWORD_PTR unknown4
[3]; /* 0034/0058 */
287 DWORD flags
; /* 0040/0070 */
288 DWORD force_flags
; /* 0044/0074 */
289 /* end of the Windows 10 compatible struct layout */
291 LONG compat_info
; /* HeapCompatibilityInformation / heap frontend type */
292 struct list entry
; /* Entry in process heap list */
293 struct list subheap_list
; /* Sub-heap list */
294 struct list large_list
; /* Large blocks list */
295 SIZE_T grow_size
; /* Size of next subheap for growing heap */
296 SIZE_T min_size
; /* Minimum committed size */
297 DWORD magic
; /* Magic number */
298 DWORD pending_pos
; /* Position in pending free requests ring */
299 struct block
**pending_free
; /* Ring buffer for pending free requests */
300 RTL_CRITICAL_SECTION cs
;
301 struct entry free_lists
[FREE_LIST_COUNT
];
306 /* subheap must be last and aligned */
307 C_ASSERT( sizeof(struct heap
) == offsetof(struct heap
, subheap
) + sizeof(SUBHEAP
) );
308 C_ASSERT( sizeof(struct heap
) % BLOCK_ALIGN
== 0 );
309 C_ASSERT( offsetof(struct heap
, subheap
) <= REGION_ALIGN
- 1 );
311 #define HEAP_MAGIC ((DWORD)('H' | ('E'<<8) | ('A'<<16) | ('P'<<24)))
313 #define HEAP_INITIAL_SIZE 0x10000
314 #define HEAP_INITIAL_GROW_SIZE 0x100000
315 #define HEAP_MAX_GROW_SIZE 0xfd0000
317 C_ASSERT( HEAP_MIN_LARGE_BLOCK_SIZE
<= HEAP_INITIAL_GROW_SIZE
);
319 #define MAX_FREE_PENDING 1024 /* max number of free requests to delay */
321 /* some undocumented flags (names are made up) */
322 #define HEAP_PRIVATE 0x00001000
323 #define HEAP_ADD_USER_INFO 0x00000100
324 #define HEAP_USER_FLAGS_MASK 0x00000f00
325 #define HEAP_PAGE_ALLOCS 0x01000000
326 #define HEAP_VALIDATE 0x10000000
327 #define HEAP_VALIDATE_ALL 0x20000000
328 #define HEAP_VALIDATE_PARAMS 0x40000000
329 #define HEAP_CHECKING_ENABLED 0x80000000
331 static struct heap
*process_heap
; /* main process heap */
333 static NTSTATUS
heap_free_block_lfh( struct heap
*heap
, ULONG flags
, struct block
*block
);
335 /* check if memory range a contains memory range b */
336 static inline BOOL
contains( const void *a
, SIZE_T a_size
, const void *b
, SIZE_T b_size
)
338 const void *a_end
= (char *)a
+ a_size
, *b_end
= (char *)b
+ b_size
;
339 return a
<= b
&& b
<= b_end
&& b_end
<= a_end
;
342 static inline UINT
block_get_flags( const struct block
*block
)
344 return block
->block_flags
;
347 static inline UINT
block_get_type( const struct block
*block
)
349 return block
->block_type
;
352 static inline void block_set_base( struct block
*block
, const void *base
)
354 const char *offset
= ROUND_ADDR( block
, REGION_ALIGN
- 1 );
355 block
->base_offset
= (offset
- (char *)base
) / REGION_ALIGN
;
358 static inline void block_set_type( struct block
*block
, UINT type
)
360 block
->block_type
= type
;
363 static inline SUBHEAP
*block_get_subheap( const struct heap
*heap
, const struct block
*block
)
365 char *offset
= ROUND_ADDR( block
, REGION_ALIGN
- 1 );
366 void *base
= offset
- (SIZE_T
)block
->base_offset
* REGION_ALIGN
;
367 if (base
!= (void *)heap
) return base
;
368 else return (SUBHEAP
*)&heap
->subheap
;
371 static inline UINT
block_get_overhead( const struct block
*block
)
373 if (block_get_flags( block
) & BLOCK_FLAG_FREE
) return sizeof(*block
) + sizeof(struct list
);
374 return sizeof(*block
) + block
->tail_size
;
377 /* return the size of a block, including its header */
378 static inline UINT
block_get_size( const struct block
*block
)
380 UINT block_size
= block
->block_size
;
381 if (block_get_flags( block
) & BLOCK_FLAG_FREE
) block_size
+= (UINT
)block
->tail_size
<< 16;
382 return block_size
* BLOCK_ALIGN
;
385 static inline void block_set_size( struct block
*block
, UINT block_size
)
387 block_size
/= BLOCK_ALIGN
;
388 if (block_get_flags( block
) & BLOCK_FLAG_FREE
) block
->tail_size
= block_size
>> 16;
389 block
->block_size
= block_size
;
392 static inline void block_set_flags( struct block
*block
, BYTE clear
, BYTE set
)
394 UINT block_size
= block_get_size( block
);
395 block
->block_flags
&= ~clear
;
396 block
->block_flags
|= set
;
397 block_set_size( block
, block_size
);
400 static inline void *subheap_base( const SUBHEAP
*subheap
)
402 return ROUND_ADDR( subheap
, REGION_ALIGN
- 1 );
405 static inline SIZE_T
subheap_overhead( const SUBHEAP
*subheap
)
407 return (char *)&subheap
->block
- (char *)subheap_base( subheap
);
410 static inline SIZE_T
subheap_size( const SUBHEAP
*subheap
)
412 return subheap
->block_size
+ subheap_overhead( subheap
);
415 static inline const void *subheap_commit_end( const SUBHEAP
*subheap
)
417 return (char *)(subheap
+ 1) + subheap
->data_size
;
420 static void subheap_set_bounds( SUBHEAP
*subheap
, char *commit_end
, char *end
)
422 subheap
->block_size
= end
- (char *)&subheap
->block
;
423 subheap
->data_size
= commit_end
- (char *)(subheap
+ 1);
426 static inline void *first_block( const SUBHEAP
*subheap
)
428 return (void *)&subheap
->block
;
431 static inline const void *last_block( const SUBHEAP
*subheap
)
433 return (char *)subheap_commit_end( subheap
) - sizeof(struct block
);
436 static inline struct block
*next_block( const SUBHEAP
*subheap
, const struct block
*block
)
438 const char *data
= (char *)(block
+ 1), *next
, *last
= last_block( subheap
);
439 next
= (char *)block
+ block_get_size( block
);
440 if (!contains( data
, last
- (char *)data
, next
, sizeof(*block
) )) return NULL
;
441 return (struct block
*)next
;
444 static inline BOOL
check_subheap( const SUBHEAP
*subheap
, const struct heap
*heap
)
446 if (subheap
->user_value
!= heap
) return FALSE
;
448 return contains( &subheap
->block
, subheap
->block_size
, subheap
+ 1, subheap
->data_size
);
451 static BOOL
heap_validate( const struct heap
*heap
);
453 /* mark a block of memory as innacessible for debugging purposes */
454 static inline void valgrind_make_noaccess( void const *ptr
, SIZE_T size
)
456 #if defined(VALGRIND_MAKE_MEM_NOACCESS)
457 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_NOACCESS( ptr
, size
) );
458 #elif defined(VALGRIND_MAKE_NOACCESS)
459 VALGRIND_DISCARD( VALGRIND_MAKE_NOACCESS( ptr
, size
) );
463 /* mark a block of memory as initialized for debugging purposes */
464 static inline void valgrind_make_readable( void const *ptr
, SIZE_T size
)
466 #if defined(VALGRIND_MAKE_MEM_DEFINED)
467 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_DEFINED( ptr
, size
) );
468 #elif defined(VALGRIND_MAKE_READABLE)
469 VALGRIND_DISCARD( VALGRIND_MAKE_READABLE( ptr
, size
) );
473 /* mark a block of memory as uninitialized for debugging purposes */
474 static inline void valgrind_make_writable( void const *ptr
, SIZE_T size
)
476 #if defined(VALGRIND_MAKE_MEM_UNDEFINED)
477 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_UNDEFINED( ptr
, size
) );
478 #elif defined(VALGRIND_MAKE_WRITABLE)
479 VALGRIND_DISCARD( VALGRIND_MAKE_WRITABLE( ptr
, size
) );
483 /* mark a block of memory as free for debugging purposes */
484 static inline void mark_block_free( void *ptr
, SIZE_T size
, DWORD flags
)
486 if (flags
& HEAP_FREE_CHECKING_ENABLED
)
489 for (i
= 0; i
< size
/ sizeof(DWORD
); i
++) ((DWORD
*)ptr
)[i
] = BLOCK_FILL_FREE
;
491 valgrind_make_noaccess( ptr
, size
);
494 /* mark a block of memory as a tail block */
495 static inline void mark_block_tail( struct block
*block
, DWORD flags
)
497 char *tail
= (char *)block
+ block_get_size( block
) - block
->tail_size
;
498 if (flags
& HEAP_TAIL_CHECKING_ENABLED
)
500 valgrind_make_writable( tail
, BLOCK_ALIGN
);
501 memset( tail
, BLOCK_FILL_TAIL
, BLOCK_ALIGN
);
503 valgrind_make_noaccess( tail
, BLOCK_ALIGN
);
504 if (flags
& HEAP_ADD_USER_INFO
)
506 if (flags
& HEAP_TAIL_CHECKING_ENABLED
|| RUNNING_ON_VALGRIND
) tail
+= BLOCK_ALIGN
;
507 valgrind_make_writable( tail
, BLOCK_ALIGN
);
508 memset( tail
, 0, BLOCK_ALIGN
);
512 /* initialize contents of a newly created block of memory */
513 static inline void initialize_block( struct block
*block
, SIZE_T old_size
, SIZE_T size
, DWORD flags
)
515 char *data
= (char *)(block
+ 1);
518 if (size
<= old_size
) return;
520 if (flags
& HEAP_ZERO_MEMORY
)
522 valgrind_make_writable( data
+ old_size
, size
- old_size
);
523 memset( data
+ old_size
, 0, size
- old_size
);
525 else if (flags
& HEAP_FREE_CHECKING_ENABLED
)
527 valgrind_make_writable( data
+ old_size
, size
- old_size
);
528 i
= ROUND_SIZE( old_size
, sizeof(DWORD
) - 1 ) / sizeof(DWORD
);
529 for (; i
< size
/ sizeof(DWORD
); ++i
) ((DWORD
*)data
)[i
] = BLOCK_FILL_USED
;
533 /* notify that a new block of memory has been allocated for debugging purposes */
534 static inline void valgrind_notify_alloc( void const *ptr
, SIZE_T size
, BOOL init
)
536 #ifdef VALGRIND_MALLOCLIKE_BLOCK
537 VALGRIND_MALLOCLIKE_BLOCK( ptr
, size
, 0, init
);
541 /* notify that a block of memory has been freed for debugging purposes */
542 static inline void valgrind_notify_free( void const *ptr
)
544 #ifdef VALGRIND_FREELIKE_BLOCK
545 VALGRIND_FREELIKE_BLOCK( ptr
, 0 );
549 static inline void valgrind_notify_resize( void const *ptr
, SIZE_T size_old
, SIZE_T size_new
)
551 #ifdef VALGRIND_RESIZEINPLACE_BLOCK
552 /* zero is not a valid size */
553 VALGRIND_RESIZEINPLACE_BLOCK( ptr
, size_old
? size_old
: 1, size_new
? size_new
: 1, 0 );
557 static void valgrind_notify_free_all( SUBHEAP
*subheap
, const struct heap
*heap
)
559 #ifdef VALGRIND_FREELIKE_BLOCK
562 if (!RUNNING_ON_VALGRIND
) return;
563 if (!check_subheap( subheap
, heap
)) return;
565 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
567 if (block_get_flags( block
) & BLOCK_FLAG_FREE
) continue;
568 if (block_get_type( block
) == BLOCK_TYPE_USED
) valgrind_notify_free( block
+ 1 );
573 /* get the memory protection type to use for a given heap */
574 static inline ULONG
get_protection_type( DWORD flags
)
576 return (flags
& HEAP_CREATE_ENABLE_EXECUTE
) ? PAGE_EXECUTE_READWRITE
: PAGE_READWRITE
;
579 static RTL_CRITICAL_SECTION_DEBUG process_heap_cs_debug
=
581 0, 0, NULL
, /* will be set later */
582 { &process_heap_cs_debug
.ProcessLocksList
, &process_heap_cs_debug
.ProcessLocksList
},
583 0, 0, { (DWORD_PTR
)(__FILE__
": main process heap section") }
586 static inline ULONG
heap_get_flags( const struct heap
*heap
, ULONG flags
)
588 if (flags
& (HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
)) flags
|= HEAP_CHECKING_ENABLED
;
589 flags
&= HEAP_GENERATE_EXCEPTIONS
| HEAP_NO_SERIALIZE
| HEAP_ZERO_MEMORY
| HEAP_REALLOC_IN_PLACE_ONLY
| HEAP_CHECKING_ENABLED
| HEAP_USER_FLAGS_MASK
;
590 return heap
->flags
| flags
;
593 static inline void heap_lock( struct heap
*heap
, ULONG flags
)
595 if (flags
& HEAP_NO_SERIALIZE
) return;
596 RtlEnterCriticalSection( &heap
->cs
);
599 static inline void heap_unlock( struct heap
*heap
, ULONG flags
)
601 if (flags
& HEAP_NO_SERIALIZE
) return;
602 RtlLeaveCriticalSection( &heap
->cs
);
605 static void heap_set_status( const struct heap
*heap
, ULONG flags
, NTSTATUS status
)
607 if (status
== STATUS_NO_MEMORY
&& (flags
& HEAP_GENERATE_EXCEPTIONS
)) RtlRaiseStatus( status
);
608 if (status
) RtlSetLastWin32ErrorAndNtStatusFromNtStatus( status
);
611 static SIZE_T
get_free_list_block_size( unsigned int index
)
613 DWORD log
= index
>> FREE_LIST_LINEAR_BITS
;
614 DWORD linear
= index
& FREE_LIST_LINEAR_MASK
;
616 if (log
== 0) return index
* BLOCK_ALIGN
;
618 return (((1 << FREE_LIST_LINEAR_BITS
) + linear
) << (log
- 1)) * BLOCK_ALIGN
;
622 * Given a size, return its index in the block size list for freelists.
624 * With FREE_LIST_LINEAR_BITS=2, the list looks like this
625 * (with respect to size / BLOCK_ALIGN):
627 * 1, 2, 3, 4, 5, 6, 7, 8,
628 * 10, 12, 14, 16, 20, 24, 28, 32,
629 * 40, 48, 56, 64, 80, 96, 112, 128,
630 * 160, 192, 224, 256, 320, 384, 448, 512,
633 static unsigned int get_free_list_index( SIZE_T block_size
)
635 DWORD bit
, log
, linear
;
637 if (block_size
> get_free_list_block_size( FREE_LIST_COUNT
- 1 ))
638 return FREE_LIST_COUNT
- 1;
640 block_size
/= BLOCK_ALIGN
;
641 /* find the highest bit */
642 if (!BitScanReverse( &bit
, block_size
) || bit
< FREE_LIST_LINEAR_BITS
)
644 /* for small values, the index is same as block_size. */
650 /* the highest bit is always set, ignore it and encode the next FREE_LIST_LINEAR_BITS bits
651 * as a linear scale, combined with the shift as a log scale, in the free list index. */
652 log
= bit
- FREE_LIST_LINEAR_BITS
+ 1;
653 linear
= (block_size
>> (bit
- FREE_LIST_LINEAR_BITS
)) & FREE_LIST_LINEAR_MASK
;
656 return (log
<< FREE_LIST_LINEAR_BITS
) + linear
;
659 /* locate a free list entry of the appropriate size */
660 static inline struct entry
*find_free_list( struct heap
*heap
, SIZE_T block_size
, BOOL last
)
662 unsigned int index
= get_free_list_index( block_size
);
663 if (last
&& ++index
== FREE_LIST_COUNT
) index
= 0;
664 return &heap
->free_lists
[index
];
667 static void heap_dump( const struct heap
*heap
)
669 const struct block
*block
;
670 const ARENA_LARGE
*large
;
671 const SUBHEAP
*subheap
;
674 TRACE( "heap: %p\n", heap
);
675 TRACE( " next %p\n", LIST_ENTRY( heap
->entry
.next
, struct heap
, entry
) );
678 for (i
= 0; heap
->bins
&& i
< BLOCK_SIZE_BIN_COUNT
; i
++)
680 const struct bin
*bin
= heap
->bins
+ i
;
681 ULONG alloc
= ReadNoFence( &bin
->count_alloc
), freed
= ReadNoFence( &bin
->count_freed
);
682 if (!alloc
&& !freed
) continue;
683 TRACE( " %3u: size %#4Ix, alloc %ld, freed %ld, enabled %lu\n", i
, BLOCK_BIN_SIZE( i
),
684 alloc
, freed
, ReadNoFence( &bin
->enabled
) );
687 TRACE( " free_lists: %p\n", heap
->free_lists
);
688 for (i
= 0; i
< FREE_LIST_COUNT
; i
++)
689 TRACE( " %p: size %#8Ix, prev %p, next %p\n", heap
->free_lists
+ i
, get_free_list_block_size( i
),
690 LIST_ENTRY( heap
->free_lists
[i
].entry
.prev
, struct entry
, entry
),
691 LIST_ENTRY( heap
->free_lists
[i
].entry
.next
, struct entry
, entry
) );
693 TRACE( " subheaps: %p\n", &heap
->subheap_list
);
694 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
696 SIZE_T free_size
= 0, used_size
= 0, overhead
= 0;
697 const char *base
= subheap_base( subheap
);
699 TRACE( " %p: base %p first %p last %p end %p\n", subheap
, base
, first_block( subheap
),
700 last_block( subheap
), base
+ subheap_size( subheap
) );
702 if (!check_subheap( subheap
, heap
)) return;
704 overhead
+= subheap_overhead( subheap
);
705 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
707 if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
709 TRACE( " %p: (free) type %#10x, size %#8x, flags %#4x, prev %p, next %p\n", block
,
710 block_get_type( block
), block_get_size( block
), block_get_flags( block
),
711 LIST_ENTRY( ((struct entry
*)block
)->entry
.prev
, struct entry
, entry
),
712 LIST_ENTRY( ((struct entry
*)block
)->entry
.next
, struct entry
, entry
) );
714 overhead
+= block_get_overhead( block
);
715 free_size
+= block_get_size( block
) - block_get_overhead( block
);
719 TRACE( " %p: (used) type %#10x, size %#8x, flags %#4x, unused %#4x", block
,
720 block_get_type( block
), block_get_size( block
), block_get_flags( block
),
722 if (!(block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
)) TRACE( "\n" );
723 else TRACE( ", back %p\n", *((struct block
**)block
- 1) );
725 overhead
+= block_get_overhead( block
);
726 used_size
+= block_get_size( block
) - block_get_overhead( block
);
730 TRACE( " total %#Ix, used %#Ix, free %#Ix, overhead %#Ix (%Iu%%)\n", used_size
+ free_size
+ overhead
,
731 used_size
, free_size
, overhead
, (overhead
* 100) / subheap_size( subheap
) );
734 TRACE( " large blocks: %p\n", &heap
->large_list
);
735 LIST_FOR_EACH_ENTRY( large
, &heap
->large_list
, ARENA_LARGE
, entry
)
737 TRACE( " %p: (large) type %#10x, size %#8x, flags %#4x, total_size %#10Ix, alloc_size %#10Ix, prev %p, next %p\n",
738 large
, block_get_type( &large
->block
), block_get_size( &large
->block
), block_get_flags( &large
->block
), large
->block_size
,
739 large
->data_size
, LIST_ENTRY( large
->entry
.prev
, ARENA_LARGE
, entry
), LIST_ENTRY( large
->entry
.next
, ARENA_LARGE
, entry
) );
742 if (heap
->pending_free
)
744 TRACE( " pending blocks: %p\n", heap
->pending_free
);
745 for (i
= 0; i
< MAX_FREE_PENDING
; ++i
)
747 if (!(block
= heap
->pending_free
[i
])) break;
749 TRACE( " %c%p: (pend) type %#10x, size %#8x, flags %#4x, unused %#4x", i
== heap
->pending_pos
? '*' : ' ',
750 block
, block_get_type( block
), block_get_size( block
), block_get_flags( block
), block
->tail_size
);
751 if (!(block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
)) TRACE( "\n" );
752 else TRACE( ", back %p\n", *((struct block
**)block
- 1) );
757 static const char *debugstr_heap_entry( struct rtl_heap_entry
*entry
)
759 const char *str
= wine_dbg_sprintf( "data %p, size %#Ix, overhead %#x, region %#x, flags %#x", entry
->lpData
,
760 entry
->cbData
, entry
->cbOverhead
, entry
->iRegionIndex
, entry
->wFlags
);
761 if (!(entry
->wFlags
& RTL_HEAP_ENTRY_REGION
)) return str
;
762 return wine_dbg_sprintf( "%s, commit %#lx, uncommit %#lx, first %p, last %p", str
, entry
->Region
.dwCommittedSize
,
763 entry
->Region
.dwUnCommittedSize
, entry
->Region
.lpFirstBlock
, entry
->Region
.lpLastBlock
);
766 static struct heap
*unsafe_heap_from_handle( HANDLE handle
, ULONG flags
, ULONG
*heap_flags
)
768 struct heap
*heap
= handle
;
771 if (!heap
|| (heap
->magic
!= HEAP_MAGIC
))
773 ERR( "Invalid handle %p!\n", handle
);
776 if (heap
->flags
& HEAP_VALIDATE_ALL
)
778 heap_lock( heap
, 0 );
779 valid
= heap_validate( heap
);
780 heap_unlock( heap
, 0 );
782 if (!valid
&& TRACE_ON(heap
))
789 *heap_flags
= heap_get_flags( heap
, flags
);
790 return valid
? heap
: NULL
;
794 static SUBHEAP
*find_subheap( const struct heap
*heap
, const struct block
*block
, BOOL heap_walk
)
798 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
800 SIZE_T blocks_size
= (char *)last_block( subheap
) - (char *)first_block( subheap
);
801 if (!check_subheap( subheap
, heap
)) return NULL
;
802 if (contains( first_block( subheap
), blocks_size
, block
, sizeof(*block
) )) return subheap
;
803 /* outside of blocks region, possible corruption or heap_walk */
804 if (contains( subheap_base( subheap
), subheap_size( subheap
), block
, 1 )) return heap_walk
? subheap
: NULL
;
811 static inline BOOL
subheap_commit( const struct heap
*heap
, SUBHEAP
*subheap
, const struct block
*block
, SIZE_T block_size
)
813 const char *end
= (char *)subheap_base( subheap
) + subheap_size( subheap
), *commit_end
;
817 commit_end
= (char *)block
+ block_size
+ sizeof(struct entry
);
818 commit_end
= ROUND_ADDR( (char *)commit_end
+ REGION_ALIGN
- 1, REGION_ALIGN
- 1 );
820 if (commit_end
> end
) commit_end
= end
;
821 if (commit_end
<= (char *)subheap_commit_end( subheap
)) return TRUE
;
823 addr
= (void *)subheap_commit_end( subheap
);
824 size
= commit_end
- (char *)addr
;
826 if (NtAllocateVirtualMemory( NtCurrentProcess(), &addr
, 0, &size
, MEM_COMMIT
,
827 get_protection_type( heap
->flags
) ))
829 WARN( "Could not commit %#Ix bytes at %p for heap %p\n", size
, addr
, heap
);
833 subheap
->data_size
= (char *)commit_end
- (char *)(subheap
+ 1);
837 static inline BOOL
subheap_decommit( const struct heap
*heap
, SUBHEAP
*subheap
, const void *commit_end
)
839 char *base
= subheap_base( subheap
);
843 commit_end
= ROUND_ADDR( (char *)commit_end
+ REGION_ALIGN
- 1, REGION_ALIGN
- 1 );
844 if (subheap
== &heap
->subheap
) commit_end
= max( (char *)commit_end
, (char *)base
+ heap
->min_size
);
845 if (commit_end
>= subheap_commit_end( subheap
)) return TRUE
;
847 size
= (char *)subheap_commit_end( subheap
) - (char *)commit_end
;
848 addr
= (void *)commit_end
;
850 if (NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_DECOMMIT
))
852 WARN( "Could not decommit %#Ix bytes at %p for heap %p\n", size
, addr
, heap
);
856 subheap
->data_size
= (char *)commit_end
- (char *)(subheap
+ 1);
860 static void block_init_free( struct block
*block
, ULONG flags
, SUBHEAP
*subheap
, SIZE_T block_size
)
862 const char *end
= (char *)block
+ block_size
, *commit_end
= subheap_commit_end( subheap
);
863 struct entry
*entry
= (struct entry
*)block
;
865 valgrind_make_writable( block
, sizeof(*entry
) );
866 block_set_type( block
, BLOCK_TYPE_FREE
);
867 block_set_base( block
, subheap_base( subheap
) );
868 block_set_flags( block
, ~0, BLOCK_FLAG_FREE
);
869 block_set_size( block
, block_size
);
871 /* If debugging, erase the freed block content */
873 if (end
> commit_end
) end
= commit_end
;
874 if (end
> (char *)(entry
+ 1)) mark_block_free( entry
+ 1, end
- (char *)(entry
+ 1), flags
);
877 static void insert_free_block( struct heap
*heap
, ULONG flags
, SUBHEAP
*subheap
, struct block
*block
)
879 struct entry
*entry
= (struct entry
*)block
, *list
;
882 if ((next
= next_block( subheap
, block
)))
884 /* set the next block PREV_FREE flag and back pointer */
885 block_set_flags( next
, 0, BLOCK_FLAG_PREV_FREE
);
886 valgrind_make_writable( (struct block
**)next
- 1, sizeof(struct block
*) );
887 *((struct block
**)next
- 1) = block
;
890 list
= find_free_list( heap
, block_get_size( block
), !next
);
891 if (!next
) list_add_before( &list
->entry
, &entry
->entry
);
892 else list_add_after( &list
->entry
, &entry
->entry
);
896 static struct block
*heap_delay_free( struct heap
*heap
, ULONG flags
, struct block
*block
)
900 if (!heap
->pending_free
) return block
;
902 block_set_type( block
, BLOCK_TYPE_DEAD
);
903 mark_block_free( block
+ 1, block_get_size( block
) - sizeof(*block
), flags
);
905 heap_lock( heap
, flags
);
907 tmp
= heap
->pending_free
[heap
->pending_pos
];
908 heap
->pending_free
[heap
->pending_pos
] = block
;
909 heap
->pending_pos
= (heap
->pending_pos
+ 1) % MAX_FREE_PENDING
;
911 heap_unlock( heap
, flags
);
917 static NTSTATUS
heap_free_block( struct heap
*heap
, ULONG flags
, struct block
*block
)
919 SUBHEAP
*subheap
= block_get_subheap( heap
, block
);
920 SIZE_T block_size
= block_get_size( block
);
924 if ((next
= next_block( subheap
, block
)) && (block_get_flags( next
) & BLOCK_FLAG_FREE
))
926 /* merge with next block if it is free */
927 entry
= (struct entry
*)next
;
928 block_size
+= block_get_size( &entry
->block
);
929 list_remove( &entry
->entry
);
930 next
= next_block( subheap
, next
);
933 if (block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
)
935 /* merge with previous block if it is free */
936 entry
= *((struct entry
**)block
- 1);
937 block_size
+= block_get_size( &entry
->block
);
938 list_remove( &entry
->entry
);
939 block
= &entry
->block
;
942 if (block
== first_block( subheap
) && !next
&& subheap
!= &heap
->subheap
)
944 /* free the subheap if it's empty and not the main one */
945 void *addr
= subheap_base( subheap
);
948 list_remove( &subheap
->entry
);
949 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
950 return STATUS_SUCCESS
;
953 block_init_free( block
, flags
, subheap
, block_size
);
954 insert_free_block( heap
, flags
, subheap
, block
);
956 /* keep room for a full committed block as hysteresis */
957 if (!next
) subheap_decommit( heap
, subheap
, (char *)((struct entry
*)block
+ 1) + REGION_ALIGN
);
959 return STATUS_SUCCESS
;
963 static struct block
*split_block( struct heap
*heap
, ULONG flags
, struct block
*block
,
964 SIZE_T old_block_size
, SIZE_T block_size
)
966 SUBHEAP
*subheap
= block_get_subheap( heap
, block
);
968 if (old_block_size
>= block_size
+ HEAP_MIN_BLOCK_SIZE
)
970 block_set_size( block
, block_size
);
971 return next_block( subheap
, block
);
974 block_set_size( block
, old_block_size
);
979 static void *allocate_region( struct heap
*heap
, ULONG flags
, SIZE_T
*region_size
, SIZE_T
*commit_size
)
984 if (heap
&& !(flags
& HEAP_GROWABLE
))
986 WARN( "Heap %p isn't growable, cannot allocate %#Ix bytes\n", heap
, *region_size
);
990 /* allocate the memory block */
991 if ((status
= NtAllocateVirtualMemory( NtCurrentProcess(), &addr
, 0, region_size
, MEM_RESERVE
,
992 get_protection_type( flags
) )))
994 WARN( "Could not allocate %#Ix bytes, status %#lx\n", *region_size
, status
);
997 if ((status
= NtAllocateVirtualMemory( NtCurrentProcess(), &addr
, 0, commit_size
, MEM_COMMIT
,
998 get_protection_type( flags
) )))
1000 WARN( "Could not commit %#Ix bytes, status %#lx\n", *commit_size
, status
);
1008 static NTSTATUS
heap_allocate_large( struct heap
*heap
, ULONG flags
, SIZE_T block_size
,
1009 SIZE_T size
, void **ret
)
1012 SIZE_T total_size
= ROUND_SIZE( sizeof(*arena
) + size
, REGION_ALIGN
- 1 );
1013 struct block
*block
;
1015 if (total_size
< size
) return STATUS_NO_MEMORY
; /* overflow */
1016 if (!(arena
= allocate_region( heap
, flags
, &total_size
, &total_size
))) return STATUS_NO_MEMORY
;
1018 block
= &arena
->block
;
1019 arena
->data_size
= size
;
1020 arena
->block_size
= (char *)arena
+ total_size
- (char *)block
;
1022 block_set_type( block
, BLOCK_TYPE_LARGE
);
1023 block_set_base( block
, arena
);
1024 block_set_flags( block
, ~0, BLOCK_FLAG_LARGE
| BLOCK_USER_FLAGS( flags
) );
1025 block_set_size( block
, 0 );
1027 heap_lock( heap
, flags
);
1028 list_add_tail( &heap
->large_list
, &arena
->entry
);
1029 heap_unlock( heap
, flags
);
1031 valgrind_make_noaccess( (char *)block
+ sizeof(*block
) + arena
->data_size
,
1032 arena
->block_size
- sizeof(*block
) - arena
->data_size
);
1035 return STATUS_SUCCESS
;
1039 static NTSTATUS
heap_free_large( struct heap
*heap
, ULONG flags
, struct block
*block
)
1041 ARENA_LARGE
*arena
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
1042 LPVOID address
= arena
;
1045 heap_lock( heap
, flags
);
1046 list_remove( &arena
->entry
);
1047 heap_unlock( heap
, flags
);
1049 return NtFreeVirtualMemory( NtCurrentProcess(), &address
, &size
, MEM_RELEASE
);
1053 static ARENA_LARGE
*find_arena_large( const struct heap
*heap
, const struct block
*block
, BOOL heap_walk
)
1057 LIST_FOR_EACH_ENTRY( arena
, &heap
->large_list
, ARENA_LARGE
, entry
)
1059 if (contains( &arena
->block
, arena
->block_size
, block
, 1 ))
1060 return !heap_walk
|| block
== &arena
->block
? arena
: NULL
;
1066 static BOOL
validate_large_block( const struct heap
*heap
, const struct block
*block
)
1068 const ARENA_LARGE
*arena
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
1069 const char *err
= NULL
;
1071 if (ROUND_ADDR( block
, REGION_ALIGN
- 1 ) != arena
)
1072 err
= "invalid block BLOCK_ALIGN";
1073 else if (block_get_size( block
))
1074 err
= "invalid block size";
1075 else if (!(block_get_flags( block
) & BLOCK_FLAG_LARGE
))
1076 err
= "invalid block flags";
1077 else if (block_get_type( block
) != BLOCK_TYPE_LARGE
)
1078 err
= "invalid block type";
1079 else if (!contains( block
, arena
->block_size
, block
+ 1, arena
->data_size
))
1080 err
= "invalid block size";
1084 ERR( "heap %p, block %p: %s\n", heap
, block
, err
);
1085 if (TRACE_ON(heap
)) heap_dump( heap
);
1092 static SUBHEAP
*create_subheap( struct heap
*heap
, DWORD flags
, SIZE_T total_size
, SIZE_T commit_size
)
1097 commit_size
= ROUND_SIZE( max( commit_size
, REGION_ALIGN
), REGION_ALIGN
- 1 );
1098 total_size
= min( max( commit_size
, total_size
), 0xffff0000 ); /* don't allow a heap larger than 4GB */
1100 if (!(subheap
= allocate_region( heap
, flags
, &total_size
, &commit_size
))) return NULL
;
1102 subheap
->user_value
= heap
;
1103 subheap_set_bounds( subheap
, (char *)subheap
+ commit_size
, (char *)subheap
+ total_size
);
1104 block_size
= (SIZE_T
)ROUND_ADDR( subheap_size( subheap
) - subheap_overhead( subheap
), BLOCK_ALIGN
- 1 );
1105 block_init_free( first_block( subheap
), flags
, subheap
, block_size
);
1107 list_add_head( &heap
->subheap_list
, &subheap
->entry
);
1113 static struct block
*find_free_block( struct heap
*heap
, ULONG flags
, SIZE_T block_size
)
1115 struct list
*ptr
= &find_free_list( heap
, block_size
, FALSE
)->entry
;
1116 struct entry
*entry
;
1117 struct block
*block
;
1121 /* Find a suitable free list, and in it find a block large enough */
1123 while ((ptr
= list_next( &heap
->free_lists
[0].entry
, ptr
)))
1125 entry
= LIST_ENTRY( ptr
, struct entry
, entry
);
1126 block
= &entry
->block
;
1127 if (block_get_flags( block
) == BLOCK_FLAG_FREE_LINK
) continue;
1128 if (block_get_size( block
) >= block_size
)
1130 if (!subheap_commit( heap
, block_get_subheap( heap
, block
), block
, block_size
)) return NULL
;
1131 list_remove( &entry
->entry
);
1136 /* make sure we can fit the block and a free entry at the end */
1137 total_size
= sizeof(SUBHEAP
) + block_size
+ sizeof(struct entry
);
1138 if (total_size
< block_size
) return NULL
; /* overflow */
1140 if ((subheap
= create_subheap( heap
, flags
, max( heap
->grow_size
, total_size
), total_size
)))
1142 heap
->grow_size
= min( heap
->grow_size
* 2, HEAP_MAX_GROW_SIZE
);
1144 else while (!subheap
) /* shrink the grow size again if we are running out of space */
1146 if (heap
->grow_size
<= total_size
|| heap
->grow_size
<= 4 * 1024 * 1024) return NULL
;
1147 heap
->grow_size
/= 2;
1148 subheap
= create_subheap( heap
, flags
, max( heap
->grow_size
, total_size
), total_size
);
1151 TRACE( "created new sub-heap %p of %#Ix bytes for heap %p\n", subheap
, subheap_size( subheap
), heap
);
1153 return first_block( subheap
);
1157 static BOOL
is_valid_free_block( const struct heap
*heap
, const struct block
*block
)
1159 const SUBHEAP
*subheap
;
1162 if ((subheap
= find_subheap( heap
, block
, FALSE
))) return TRUE
;
1163 for (i
= 0; i
< FREE_LIST_COUNT
; i
++) if (block
== &heap
->free_lists
[i
].block
) return TRUE
;
1167 static BOOL
validate_free_block( const struct heap
*heap
, const SUBHEAP
*subheap
, const struct block
*block
)
1169 const char *err
= NULL
, *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
);
1170 const struct entry
*entry
= (struct entry
*)block
;
1171 const struct block
*prev
, *next
;
1172 DWORD flags
= heap
->flags
;
1174 if ((ULONG_PTR
)(block
+ 1) % BLOCK_ALIGN
)
1175 err
= "invalid block BLOCK_ALIGN";
1176 else if (block_get_type( block
) != BLOCK_TYPE_FREE
)
1177 err
= "invalid block header";
1178 else if (!(block_get_flags( block
) & BLOCK_FLAG_FREE
) || (block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
))
1179 err
= "invalid block flags";
1180 else if (!contains( base
, subheap_size( subheap
), block
, block_get_size( block
) ))
1181 err
= "invalid block size";
1182 else if (!is_valid_free_block( heap
, (next
= &LIST_ENTRY( entry
->entry
.next
, struct entry
, entry
)->block
) ))
1183 err
= "invalid next free block pointer";
1184 else if (!(block_get_flags( next
) & BLOCK_FLAG_FREE
) || block_get_type( next
) != BLOCK_TYPE_FREE
)
1185 err
= "invalid next free block header";
1186 else if (!is_valid_free_block( heap
, (prev
= &LIST_ENTRY( entry
->entry
.prev
, struct entry
, entry
)->block
) ))
1187 err
= "invalid previous free block pointer";
1188 else if (!(block_get_flags( prev
) & BLOCK_FLAG_FREE
) || block_get_type( prev
) != BLOCK_TYPE_FREE
)
1189 err
= "invalid previous free block header";
1190 else if ((next
= next_block( subheap
, block
)))
1192 if (!(block_get_flags( next
) & BLOCK_FLAG_PREV_FREE
))
1193 err
= "invalid next block flags";
1194 if (*((struct block
**)next
- 1) != block
)
1195 err
= "invalid next block back pointer";
1198 if (!err
&& (flags
& HEAP_FREE_CHECKING_ENABLED
))
1200 const char *ptr
= (char *)(entry
+ 1), *end
= (char *)block
+ block_get_size( block
);
1201 if (next
) end
-= sizeof(struct block
*);
1202 if (end
> commit_end
) end
= commit_end
;
1203 while (!err
&& ptr
< end
)
1205 if (*(DWORD
*)ptr
!= BLOCK_FILL_FREE
) err
= "free block overwritten";
1206 ptr
+= sizeof(DWORD
);
1212 ERR( "heap %p, block %p: %s\n", heap
, block
, err
);
1213 if (TRACE_ON(heap
)) heap_dump( heap
);
1220 static BOOL
validate_used_block( const struct heap
*heap
, const SUBHEAP
*subheap
, const struct block
*block
,
1221 unsigned int expected_block_type
)
1223 const char *err
= NULL
, *base
= NULL
, *commit_end
;
1224 DWORD flags
= heap
->flags
;
1225 const struct block
*next
;
1226 ARENA_LARGE
*arena_large
;
1231 base
= subheap_base( subheap
);
1232 commit_end
= subheap_commit_end( subheap
);
1234 else if ((arena_large
= find_arena_large( heap
, block
, FALSE
)))
1236 if (!validate_large_block( heap
, &arena_large
->block
)) return FALSE
;
1237 if (block
== &arena_large
->block
) return TRUE
;
1239 if (contains( &arena_large
->block
+ 1, arena_large
->data_size
, block
, sizeof(*block
) )
1240 && block_get_flags( block
) & BLOCK_FLAG_LFH
)
1242 base
= (char *)arena_large
;
1243 commit_end
= (char *)(arena_large
+ 1) + arena_large
->data_size
;
1248 WARN( "heap %p, ptr %p: block region not found.\n", heap
, block
+ 1 );
1252 if ((ULONG_PTR
)(block
+ 1) % BLOCK_ALIGN
)
1253 err
= "invalid block BLOCK_ALIGN";
1254 else if (block_get_type( block
) != BLOCK_TYPE_USED
&& block_get_type( block
) != BLOCK_TYPE_DEAD
)
1255 err
= "invalid block header";
1256 else if (expected_block_type
&& block_get_type( block
) != expected_block_type
)
1257 err
= "invalid block type";
1258 else if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
1259 err
= "invalid block flags";
1260 else if (!contains( base
, commit_end
- base
, block
, block_get_size( block
) ))
1261 err
= "invalid block size";
1262 else if (block
->tail_size
> block_get_size( block
) - sizeof(*block
))
1263 err
= "invalid block unused size";
1264 else if (!(block_get_flags( block
) & BLOCK_FLAG_LFH
) /* LFH blocks do not use BLOCK_FLAG_PREV_FREE or back pointer */
1265 && (next
= next_block( subheap
, block
)) && (block_get_flags( next
) & BLOCK_FLAG_PREV_FREE
))
1266 err
= "invalid next block flags";
1267 else if (block_get_flags( block
) & BLOCK_FLAG_PREV_FREE
)
1269 const struct block
*prev
= *((struct block
**)block
- 1);
1270 if (!is_valid_free_block( heap
, prev
))
1271 err
= "invalid previous block pointer";
1272 else if (!(block_get_flags( prev
) & BLOCK_FLAG_FREE
) || block_get_type( prev
) != BLOCK_TYPE_FREE
)
1273 err
= "invalid previous block flags";
1274 if ((char *)prev
+ block_get_size( prev
) != (char *)block
)
1275 err
= "invalid previous block size";
1278 if (!err
&& block_get_type( block
) == BLOCK_TYPE_DEAD
)
1280 const char *ptr
= (char *)(block
+ 1), *end
= (char *)block
+ block_get_size( block
);
1281 while (!err
&& ptr
< end
)
1283 if (*(DWORD
*)ptr
!= BLOCK_FILL_FREE
) err
= "free block overwritten";
1284 ptr
+= sizeof(DWORD
);
1287 else if (!err
&& (flags
& HEAP_TAIL_CHECKING_ENABLED
))
1289 const unsigned char *tail
= (unsigned char *)block
+ block_get_size( block
) - block
->tail_size
;
1290 for (i
= 0; !err
&& i
< BLOCK_ALIGN
; i
++) if (tail
[i
] != BLOCK_FILL_TAIL
) err
= "invalid block tail";
1295 ERR( "heap %p, block %p: %s\n", heap
, block
, err
);
1296 if (TRACE_ON(heap
)) heap_dump( heap
);
1303 static BOOL
heap_validate_ptr( const struct heap
*heap
, const void *ptr
)
1305 const struct block
*block
= (struct block
*)ptr
- 1;
1307 return validate_used_block( heap
, find_subheap( heap
, block
, FALSE
), block
, BLOCK_TYPE_USED
);
1310 static BOOL
heap_validate( const struct heap
*heap
)
1312 const ARENA_LARGE
*large_arena
;
1313 const struct block
*block
;
1314 const SUBHEAP
*subheap
;
1316 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
1318 if (!check_subheap( subheap
, heap
))
1320 ERR( "heap %p, subheap %p corrupted sizes or user_value\n", heap
, subheap
);
1321 if (TRACE_ON(heap
)) heap_dump( heap
);
1325 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
1327 if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
1329 if (!validate_free_block( heap
, subheap
, block
)) return FALSE
;
1333 if (!validate_used_block( heap
, subheap
, block
, 0 )) return FALSE
;
1338 if (heap
->pending_free
)
1342 for (i
= 0; i
< MAX_FREE_PENDING
; i
++)
1344 if (!(block
= heap
->pending_free
[i
])) break;
1346 if (!validate_used_block( heap
, find_subheap( heap
, block
, FALSE
), block
, BLOCK_TYPE_DEAD
))
1348 ERR( "heap %p: failed to to validate delayed free block %p\n", heap
, block
);
1353 for (; i
< MAX_FREE_PENDING
; i
++)
1355 if ((block
= heap
->pending_free
[i
]))
1357 ERR( "heap %p: unexpected delayed freed block %p at slot %u\n", heap
, block
, i
);
1358 if (TRACE_ON(heap
)) heap_dump( heap
);
1364 LIST_FOR_EACH_ENTRY( large_arena
, &heap
->large_list
, ARENA_LARGE
, entry
)
1365 if (!validate_large_block( heap
, &large_arena
->block
)) return FALSE
;
1370 static inline struct block
*unsafe_block_from_ptr( struct heap
*heap
, ULONG flags
, const void *ptr
)
1372 struct block
*block
= (struct block
*)ptr
- 1;
1373 const char *err
= NULL
;
1376 if (flags
& HEAP_VALIDATE
)
1378 heap_lock( heap
, flags
);
1379 if (!heap_validate_ptr( heap
, ptr
)) block
= NULL
;
1380 heap_unlock( heap
, flags
);
1384 if ((ULONG_PTR
)ptr
% BLOCK_ALIGN
)
1385 err
= "invalid ptr alignment";
1386 else if (block_get_type( block
) == BLOCK_TYPE_DEAD
)
1387 err
= "delayed freed block";
1388 else if (block_get_type( block
) == BLOCK_TYPE_FREE
)
1389 err
= "already freed block";
1390 else if (block_get_flags( block
) & BLOCK_FLAG_LFH
)
1392 /* LFH block base_offset points to the group, not the subheap */
1393 if (block_get_type( block
) != BLOCK_TYPE_USED
)
1394 err
= "invalid block type";
1396 else if ((subheap
= block_get_subheap( heap
, block
)) >= (SUBHEAP
*)block
)
1397 err
= "invalid base offset";
1398 else if (block_get_type( block
) == BLOCK_TYPE_USED
)
1400 const char *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
);
1401 if (subheap
->user_value
!= heap
) err
= "mismatching heap";
1402 else if (!contains( base
, commit_end
- base
, block
, block_get_size( block
) )) err
= "invalid block size";
1404 else if (block_get_type( block
) == BLOCK_TYPE_LARGE
)
1406 ARENA_LARGE
*large
= subheap_base( subheap
);
1407 if (block
!= &large
->block
) err
= "invalid large block";
1411 err
= "invalid block type";
1414 if (err
) WARN( "heap %p, block %p: %s\n", heap
, block
, err
);
1415 return err
? NULL
: block
;
1418 static DWORD
heap_flags_from_global_flag( DWORD flag
)
1422 if (flag
& FLG_HEAP_ENABLE_TAIL_CHECK
)
1423 ret
|= HEAP_TAIL_CHECKING_ENABLED
;
1424 if (flag
& FLG_HEAP_ENABLE_FREE_CHECK
)
1425 ret
|= HEAP_FREE_CHECKING_ENABLED
;
1426 if (flag
& FLG_HEAP_VALIDATE_PARAMETERS
)
1427 ret
|= HEAP_VALIDATE_PARAMS
| HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
;
1428 if (flag
& FLG_HEAP_VALIDATE_ALL
)
1429 ret
|= HEAP_VALIDATE_ALL
| HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
;
1430 if (flag
& FLG_HEAP_DISABLE_COALESCING
)
1431 ret
|= HEAP_DISABLE_COALESCE_ON_FREE
;
1432 if (flag
& FLG_HEAP_PAGE_ALLOCS
)
1433 ret
|= HEAP_PAGE_ALLOCS
;
1438 /***********************************************************************
1439 * heap_set_debug_flags
1441 static void heap_set_debug_flags( HANDLE handle
)
1443 ULONG global_flags
= RtlGetNtGlobalFlags();
1444 DWORD dummy
, flags
, force_flags
;
1447 if (TRACE_ON(heap
)) global_flags
|= FLG_HEAP_VALIDATE_ALL
;
1448 if (WARN_ON(heap
)) global_flags
|= FLG_HEAP_VALIDATE_PARAMETERS
;
1450 heap
= unsafe_heap_from_handle( handle
, 0, &dummy
);
1451 flags
= heap_flags_from_global_flag( global_flags
);
1452 force_flags
= (heap
->flags
| flags
) & ~(HEAP_SHARED
|HEAP_DISABLE_COALESCE_ON_FREE
);
1454 if (global_flags
& FLG_HEAP_ENABLE_TAGGING
) flags
|= HEAP_SHARED
;
1455 if (!(global_flags
& FLG_HEAP_PAGE_ALLOCS
)) force_flags
&= ~(HEAP_GROWABLE
|HEAP_PRIVATE
);
1457 if (RUNNING_ON_VALGRIND
) flags
= 0; /* no sense in validating since Valgrind catches accesses */
1459 heap
->flags
|= flags
;
1460 heap
->force_flags
|= force_flags
;
1462 if (flags
& (HEAP_FREE_CHECKING_ENABLED
| HEAP_TAIL_CHECKING_ENABLED
)) /* fix existing blocks */
1464 struct block
*block
;
1467 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
1469 const char *commit_end
= subheap_commit_end( subheap
);
1471 if (!check_subheap( subheap
, heap
)) break;
1473 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
1475 if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
1477 char *data
= (char *)block
+ block_get_overhead( block
), *end
= (char *)block
+ block_get_size( block
);
1478 if (next_block( subheap
, block
)) end
-= sizeof(struct block
*);
1479 if (end
> commit_end
) mark_block_free( data
, commit_end
- data
, flags
);
1480 else mark_block_free( data
, end
- data
, flags
);
1484 if (block_get_type( block
) == BLOCK_TYPE_DEAD
) mark_block_free( block
+ 1, block_get_size( block
) - sizeof(*block
), flags
);
1485 else mark_block_tail( block
, flags
);
1491 if ((heap
->flags
& HEAP_GROWABLE
) && !heap
->pending_free
&&
1492 ((flags
& HEAP_FREE_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
))
1494 heap
->pending_free
= RtlAllocateHeap( handle
, HEAP_ZERO_MEMORY
,
1495 MAX_FREE_PENDING
* sizeof(*heap
->pending_free
) );
1496 heap
->pending_pos
= 0;
1501 /***********************************************************************
1502 * RtlCreateHeap (NTDLL.@)
1504 * Create a new Heap.
1507 * flags [I] HEAP_ flags from "winnt.h"
1508 * addr [I] Desired base address
1509 * totalSize [I] Total size of the heap, or 0 for a growable heap
1510 * commitSize [I] Amount of heap space to commit
1511 * unknown [I] Not yet understood
1512 * definition [I] Heap definition
1515 * Success: A HANDLE to the newly created heap.
1516 * Failure: a NULL HANDLE.
1518 HANDLE WINAPI
RtlCreateHeap( ULONG flags
, void *addr
, SIZE_T total_size
, SIZE_T commit_size
,
1519 void *unknown
, RTL_HEAP_DEFINITION
*definition
)
1521 struct entry
*entry
;
1527 TRACE( "flags %#lx, addr %p, total_size %#Ix, commit_size %#Ix, unknown %p, definition %p\n",
1528 flags
, addr
, total_size
, commit_size
, unknown
, definition
);
1530 flags
&= ~(HEAP_TAIL_CHECKING_ENABLED
|HEAP_FREE_CHECKING_ENABLED
);
1531 if (process_heap
) flags
|= HEAP_PRIVATE
;
1532 if (!process_heap
|| !total_size
|| (flags
& HEAP_SHARED
)) flags
|= HEAP_GROWABLE
;
1533 if (!total_size
) total_size
= commit_size
+ HEAP_INITIAL_SIZE
;
1537 if (!commit_size
) commit_size
= REGION_ALIGN
;
1538 total_size
= min( max( total_size
, commit_size
), 0xffff0000 ); /* don't allow a heap larger than 4GB */
1539 commit_size
= min( total_size
, ROUND_SIZE( commit_size
, REGION_ALIGN
- 1 ) );
1540 if (!(heap
= allocate_region( NULL
, flags
, &total_size
, &commit_size
))) return 0;
1543 heap
->ffeeffee
= 0xffeeffee;
1544 heap
->auto_flags
= (flags
& HEAP_GROWABLE
);
1545 heap
->flags
= (flags
& ~HEAP_SHARED
);
1546 heap
->compat_info
= HEAP_STD
;
1547 heap
->magic
= HEAP_MAGIC
;
1548 heap
->grow_size
= HEAP_INITIAL_GROW_SIZE
;
1549 heap
->min_size
= commit_size
;
1550 list_init( &heap
->subheap_list
);
1551 list_init( &heap
->large_list
);
1553 list_init( &heap
->free_lists
[0].entry
);
1554 for (i
= 0, entry
= heap
->free_lists
; i
< FREE_LIST_COUNT
; i
++, entry
++)
1556 block_set_flags( &entry
->block
, ~0, BLOCK_FLAG_FREE_LINK
);
1557 block_set_size( &entry
->block
, 0 );
1558 block_set_type( &entry
->block
, BLOCK_TYPE_FREE
);
1559 block_set_base( &entry
->block
, heap
);
1560 if (i
) list_add_after( &entry
[-1].entry
, &entry
->entry
);
1563 if (!process_heap
) /* do it by hand to avoid memory allocations */
1565 heap
->cs
.DebugInfo
= &process_heap_cs_debug
;
1566 heap
->cs
.LockCount
= -1;
1567 heap
->cs
.RecursionCount
= 0;
1568 heap
->cs
.OwningThread
= 0;
1569 heap
->cs
.LockSemaphore
= 0;
1570 heap
->cs
.SpinCount
= 0;
1571 process_heap_cs_debug
.CriticalSection
= &heap
->cs
;
1575 RtlInitializeCriticalSection( &heap
->cs
);
1576 heap
->cs
.DebugInfo
->Spare
[0] = (DWORD_PTR
)(__FILE__
": heap.cs");
1579 subheap
= &heap
->subheap
;
1580 subheap
->user_value
= heap
;
1581 subheap_set_bounds( subheap
, (char *)heap
+ commit_size
, (char *)heap
+ total_size
);
1582 block_size
= (SIZE_T
)ROUND_ADDR( subheap_size( subheap
) - subheap_overhead( subheap
), BLOCK_ALIGN
- 1 );
1583 block_init_free( first_block( subheap
), flags
, subheap
, block_size
);
1585 insert_free_block( heap
, flags
, subheap
, first_block( subheap
) );
1586 list_add_head( &heap
->subheap_list
, &subheap
->entry
);
1588 heap_set_debug_flags( heap
);
1590 if (heap
->flags
& HEAP_GROWABLE
)
1592 SIZE_T size
= (sizeof(struct bin
) + sizeof(struct group
*) * ARRAY_SIZE(affinity_mapping
)) * BLOCK_SIZE_BIN_COUNT
;
1593 NtAllocateVirtualMemory( NtCurrentProcess(), (void *)&heap
->bins
,
1594 0, &size
, MEM_COMMIT
, PAGE_READWRITE
);
1596 for (i
= 0; heap
->bins
&& i
< BLOCK_SIZE_BIN_COUNT
; ++i
)
1598 RtlInitializeSListHead( &heap
->bins
[i
].groups
);
1599 /* offset affinity_group_base to interleave the bin affinity group pointers */
1600 heap
->bins
[i
].affinity_group_base
= (struct group
**)(heap
->bins
+ BLOCK_SIZE_BIN_COUNT
) + i
;
1604 /* link it into the per-process heap list */
1607 RtlEnterCriticalSection( &process_heap
->cs
);
1608 list_add_head( &process_heap
->entry
, &heap
->entry
);
1609 RtlLeaveCriticalSection( &process_heap
->cs
);
1613 process_heap
= heap
; /* assume the first heap we create is the process main heap */
1614 list_init( &process_heap
->entry
);
1621 /***********************************************************************
1622 * RtlDestroyHeap (NTDLL.@)
1624 * Destroy a Heap created with RtlCreateHeap().
1627 * heap [I] Heap to destroy.
1630 * Success: A NULL HANDLE, if heap is NULL or it was destroyed
1631 * Failure: The Heap handle, if heap is the process heap.
1633 HANDLE WINAPI
RtlDestroyHeap( HANDLE handle
)
1635 SUBHEAP
*subheap
, *next
;
1636 ARENA_LARGE
*arena
, *arena_next
;
1637 struct block
**pending
, **tmp
;
1643 TRACE( "handle %p\n", handle
);
1645 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &heap_flags
)) && handle
&&
1646 (((struct heap
*)handle
)->flags
& HEAP_VALIDATE_PARAMS
) &&
1647 NtCurrentTeb()->Peb
->BeingDebugged
)
1649 DbgPrint( "Attempt to destroy an invalid heap\n" );
1652 if (!heap
) return handle
;
1654 if ((pending
= heap
->pending_free
))
1656 heap
->pending_free
= NULL
;
1657 for (tmp
= pending
; *tmp
&& tmp
!= pending
+ MAX_FREE_PENDING
; ++tmp
)
1659 if (!heap_free_block_lfh( heap
, heap
->flags
, *tmp
)) continue;
1660 heap_free_block( heap
, heap
->flags
, *tmp
);
1662 RtlFreeHeap( handle
, 0, pending
);
1665 if (heap
== process_heap
) return handle
; /* cannot delete the main process heap */
1667 /* remove it from the per-process list */
1668 RtlEnterCriticalSection( &process_heap
->cs
);
1669 list_remove( &heap
->entry
);
1670 RtlLeaveCriticalSection( &process_heap
->cs
);
1672 heap
->cs
.DebugInfo
->Spare
[0] = 0;
1673 RtlDeleteCriticalSection( &heap
->cs
);
1675 LIST_FOR_EACH_ENTRY_SAFE( arena
, arena_next
, &heap
->large_list
, ARENA_LARGE
, entry
)
1677 list_remove( &arena
->entry
);
1680 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1682 LIST_FOR_EACH_ENTRY_SAFE( subheap
, next
, &heap
->subheap_list
, SUBHEAP
, entry
)
1684 if (subheap
== &heap
->subheap
) continue; /* do this one last */
1685 valgrind_notify_free_all( subheap
, heap
);
1686 list_remove( &subheap
->entry
);
1688 addr
= ROUND_ADDR( subheap
, REGION_ALIGN
- 1 );
1689 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1691 valgrind_notify_free_all( &heap
->subheap
, heap
);
1692 if ((addr
= heap
->bins
))
1695 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1699 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1703 static SIZE_T
heap_get_block_size( const struct heap
*heap
, ULONG flags
, SIZE_T size
)
1705 static const ULONG padd_flags
= HEAP_VALIDATE
| HEAP_VALIDATE_ALL
| HEAP_VALIDATE_PARAMS
| HEAP_ADD_USER_INFO
;
1706 static const ULONG check_flags
= HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
| HEAP_CHECKING_ENABLED
;
1707 SIZE_T overhead
, block_size
;
1709 if ((flags
& check_flags
)) overhead
= BLOCK_ALIGN
;
1710 else overhead
= sizeof(struct block
);
1712 if ((flags
& HEAP_TAIL_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
) overhead
+= BLOCK_ALIGN
;
1713 if (flags
& padd_flags
) overhead
+= BLOCK_ALIGN
;
1715 if (size
< BLOCK_ALIGN
) size
= BLOCK_ALIGN
;
1716 block_size
= ROUND_SIZE( size
+ overhead
, BLOCK_ALIGN
- 1 );
1718 if (block_size
< size
) return ~0U; /* overflow */
1719 if (block_size
< HEAP_MIN_BLOCK_SIZE
) block_size
= HEAP_MIN_BLOCK_SIZE
;
1723 static NTSTATUS
heap_allocate_block( struct heap
*heap
, ULONG flags
, SIZE_T block_size
, SIZE_T size
, void **ret
)
1725 struct block
*block
, *next
;
1726 SIZE_T old_block_size
;
1729 /* Locate a suitable free block */
1731 if (!(block
= find_free_block( heap
, flags
, block_size
))) return STATUS_NO_MEMORY
;
1732 /* read the free block size, changing block type or flags may alter it */
1733 old_block_size
= block_get_size( block
);
1734 subheap
= block_get_subheap( heap
, block
);
1736 if ((next
= split_block( heap
, flags
, block
, old_block_size
, block_size
)))
1738 block_init_free( next
, flags
, subheap
, old_block_size
- block_size
);
1739 insert_free_block( heap
, flags
, subheap
, next
);
1742 block_set_type( block
, BLOCK_TYPE_USED
);
1743 block_set_flags( block
, ~0, BLOCK_USER_FLAGS( flags
) );
1744 block
->tail_size
= block_get_size( block
) - sizeof(*block
) - size
;
1745 initialize_block( block
, 0, size
, flags
);
1746 mark_block_tail( block
, flags
);
1748 if ((next
= next_block( subheap
, block
))) block_set_flags( next
, BLOCK_FLAG_PREV_FREE
, 0 );
1751 return STATUS_SUCCESS
;
1754 /* Low Fragmentation Heap frontend */
1756 /* header for every LFH block group */
1757 struct DECLSPEC_ALIGN(BLOCK_ALIGN
) group
1760 /* one bit for each free block and the highest bit for GROUP_FLAG_FREE */
1762 /* affinity of the thread which last allocated from this group */
1764 /* first block of a group, required for alignment */
1765 struct block first_block
;
1768 #define GROUP_BLOCK_COUNT (sizeof(((struct group *)0)->free_bits) * 8 - 1)
1769 #define GROUP_FLAG_FREE (1u << GROUP_BLOCK_COUNT)
1771 static inline UINT
block_get_group_index( const struct block
*block
)
1773 return block
->base_offset
;
1776 static inline struct group
*block_get_group( const struct block
*block
)
1778 SIZE_T block_size
= block_get_size( block
);
1779 void *first_block
= (char *)block
- block_get_group_index( block
) * block_size
;
1780 return CONTAINING_RECORD( first_block
, struct group
, first_block
);
1783 static inline void block_set_group( struct block
*block
, SIZE_T block_size
, const struct group
*group
)
1785 SIZE_T offset
= (char *)block
- (char *)&group
->first_block
;
1786 block
->base_offset
= offset
/ block_size
;
1789 static inline struct block
*group_get_block( struct group
*group
, SIZE_T block_size
, UINT index
)
1791 char *first_block
= (char *)&group
->first_block
;
1792 return (struct block
*)(first_block
+ index
* block_size
);
1795 /* lookup a free block using the group free_bits, the current thread must own the group */
1796 static inline struct block
*group_find_free_block( struct group
*group
, SIZE_T block_size
)
1798 ULONG i
, free_bits
= ReadNoFence( &group
->free_bits
);
1799 /* free_bits will never be 0 as the group is unlinked when it's fully used */
1800 BitScanForward( &i
, free_bits
);
1801 InterlockedAnd( &group
->free_bits
, ~(1 << i
) );
1802 return group_get_block( group
, block_size
, i
);
1805 /* allocate a new group block using non-LFH allocation, returns a group owned by current thread */
1806 static struct group
*group_allocate( struct heap
*heap
, ULONG flags
, SIZE_T block_size
)
1808 SIZE_T i
, group_size
, group_block_size
;
1809 struct group
*group
;
1812 group_size
= offsetof( struct group
, first_block
) + GROUP_BLOCK_COUNT
* block_size
;
1813 group_block_size
= heap_get_block_size( heap
, flags
, group_size
);
1815 heap_lock( heap
, flags
);
1817 if (group_block_size
>= HEAP_MIN_LARGE_BLOCK_SIZE
)
1818 status
= heap_allocate_large( heap
, flags
& ~HEAP_ZERO_MEMORY
, group_block_size
, group_size
, (void **)&group
);
1820 status
= heap_allocate_block( heap
, flags
& ~HEAP_ZERO_MEMORY
, group_block_size
, group_size
, (void **)&group
);
1822 heap_unlock( heap
, flags
);
1824 if (status
) return NULL
;
1826 block_set_flags( (struct block
*)group
- 1, 0, BLOCK_FLAG_LFH
);
1827 group
->free_bits
= ~GROUP_FLAG_FREE
;
1829 for (i
= 0; i
< GROUP_BLOCK_COUNT
; ++i
)
1831 struct block
*block
= group_get_block( group
, block_size
, i
);
1832 valgrind_make_writable( block
, sizeof(*block
) );
1833 block_set_type( block
, BLOCK_TYPE_FREE
);
1834 block_set_flags( block
, ~0, BLOCK_FLAG_FREE
| BLOCK_FLAG_LFH
);
1835 block_set_group( block
, block_size
, group
);
1836 block_set_size( block
, block_size
);
1837 mark_block_free( block
+ 1, (char *)block
+ block_size
- (char *)(block
+ 1), flags
);
1843 /* release a fully freed group to the non-LFH subheap, group must be owned by current thread */
1844 static NTSTATUS
group_release( struct heap
*heap
, ULONG flags
, struct bin
*bin
, struct group
*group
)
1846 struct block
*block
= (struct block
*)group
- 1;
1849 heap_lock( heap
, flags
);
1851 block_set_flags( block
, BLOCK_FLAG_LFH
, 0 );
1853 if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
1854 status
= heap_free_large( heap
, flags
, block
);
1856 status
= heap_free_block( heap
, flags
, block
);
1858 heap_unlock( heap
, flags
);
1863 static inline ULONG
heap_current_thread_affinity(void)
1867 if (!(affinity
= NtCurrentTeb()->HeapVirtualAffinity
))
1869 affinity
= InterlockedIncrement( &next_thread_affinity
);
1870 affinity
= affinity_mapping
[affinity
% ARRAY_SIZE(affinity_mapping
)];
1871 NtCurrentTeb()->HeapVirtualAffinity
= affinity
;
1877 /* acquire a group from the bin, thread takes ownership of a shared group or allocates a new one */
1878 static struct group
*heap_acquire_bin_group( struct heap
*heap
, ULONG flags
, SIZE_T block_size
, struct bin
*bin
)
1880 ULONG affinity
= NtCurrentTeb()->HeapVirtualAffinity
;
1881 struct group
*group
;
1884 if ((group
= InterlockedExchangePointer( (void *)bin_get_affinity_group( bin
, affinity
), NULL
)))
1887 if ((entry
= RtlInterlockedPopEntrySList( &bin
->groups
)))
1888 return CONTAINING_RECORD( entry
, struct group
, entry
);
1890 return group_allocate( heap
, flags
, block_size
);
1893 /* release a thread owned and fully freed group to the bin shared group, or free its memory */
1894 static NTSTATUS
heap_release_bin_group( struct heap
*heap
, ULONG flags
, struct bin
*bin
, struct group
*group
)
1896 ULONG affinity
= group
->affinity
;
1898 /* using InterlockedExchangePointer here would possibly return a group that has used blocks,
1899 * we prefer keeping our fully freed group instead for reduced memory consumption.
1901 if (!InterlockedCompareExchangePointer( (void *)bin_get_affinity_group( bin
, affinity
), group
, NULL
))
1902 return STATUS_SUCCESS
;
1904 /* try re-using the block group instead of releasing it */
1905 if (RtlQueryDepthSList( &bin
->groups
) <= ARRAY_SIZE(affinity_mapping
))
1907 RtlInterlockedPushEntrySList( &bin
->groups
, &group
->entry
);
1908 return STATUS_SUCCESS
;
1911 return group_release( heap
, flags
, bin
, group
);
1914 static struct block
*find_free_bin_block( struct heap
*heap
, ULONG flags
, SIZE_T block_size
, struct bin
*bin
)
1916 ULONG affinity
= heap_current_thread_affinity();
1917 struct block
*block
;
1918 struct group
*group
;
1920 /* acquire a group, the thread will own it and no other thread can clear free bits.
1921 * some other thread might still set the free bits if they are freeing blocks.
1923 if (!(group
= heap_acquire_bin_group( heap
, flags
, block_size
, bin
))) return NULL
;
1924 group
->affinity
= affinity
;
1926 block
= group_find_free_block( group
, block_size
);
1928 /* serialize with heap_free_block_lfh: atomically set GROUP_FLAG_FREE when the free bits are all 0. */
1929 if (ReadNoFence( &group
->free_bits
) || InterlockedCompareExchange( &group
->free_bits
, GROUP_FLAG_FREE
, 0 ))
1931 /* if GROUP_FLAG_FREE isn't set, thread is responsible for putting it back into group list. */
1932 if ((group
= InterlockedExchangePointer( (void *)bin_get_affinity_group( bin
, affinity
), group
)))
1933 RtlInterlockedPushEntrySList( &bin
->groups
, &group
->entry
);
1939 static NTSTATUS
heap_allocate_block_lfh( struct heap
*heap
, ULONG flags
, SIZE_T block_size
,
1940 SIZE_T size
, void **ret
)
1942 struct bin
*bin
, *last
= heap
->bins
+ BLOCK_SIZE_BIN_COUNT
- 1;
1943 struct block
*block
;
1945 bin
= heap
->bins
+ BLOCK_SIZE_BIN( block_size
);
1946 if (bin
== last
) return STATUS_UNSUCCESSFUL
;
1948 /* paired with WriteRelease in bin_try_enable. */
1949 if (!ReadAcquire( &bin
->enabled
)) return STATUS_UNSUCCESSFUL
;
1951 block_size
= BLOCK_BIN_SIZE( BLOCK_SIZE_BIN( block_size
) );
1953 if ((block
= find_free_bin_block( heap
, flags
, block_size
, bin
)))
1955 block_set_type( block
, BLOCK_TYPE_USED
);
1956 block_set_flags( block
, (BYTE
)~BLOCK_FLAG_LFH
, BLOCK_USER_FLAGS( flags
) );
1957 block
->tail_size
= block_size
- sizeof(*block
) - size
;
1958 initialize_block( block
, 0, size
, flags
);
1959 mark_block_tail( block
, flags
);
1963 return block
? STATUS_SUCCESS
: STATUS_NO_MEMORY
;
1966 static NTSTATUS
heap_free_block_lfh( struct heap
*heap
, ULONG flags
, struct block
*block
)
1968 struct bin
*bin
, *last
= heap
->bins
+ BLOCK_SIZE_BIN_COUNT
- 1;
1969 SIZE_T i
, block_size
= block_get_size( block
);
1970 struct group
*group
= block_get_group( block
);
1971 NTSTATUS status
= STATUS_SUCCESS
;
1973 if (!(block_get_flags( block
) & BLOCK_FLAG_LFH
)) return STATUS_UNSUCCESSFUL
;
1975 bin
= heap
->bins
+ BLOCK_SIZE_BIN( block_size
);
1976 if (bin
== last
) return STATUS_UNSUCCESSFUL
;
1978 i
= block_get_group_index( block
);
1979 valgrind_make_writable( block
, sizeof(*block
) );
1980 block_set_type( block
, BLOCK_TYPE_FREE
);
1981 block_set_flags( block
, (BYTE
)~BLOCK_FLAG_LFH
, BLOCK_FLAG_FREE
);
1982 mark_block_free( block
+ 1, (char *)block
+ block_size
- (char *)(block
+ 1), flags
);
1984 /* if this was the last used block in a group and GROUP_FLAG_FREE was set */
1985 if (InterlockedOr( &group
->free_bits
, 1 << i
) == ~(1 << i
))
1987 /* thread now owns the group, and can release it to its bin */
1988 group
->free_bits
= ~GROUP_FLAG_FREE
;
1989 status
= heap_release_bin_group( heap
, flags
, bin
, group
);
1995 static void bin_try_enable( struct heap
*heap
, struct bin
*bin
)
1997 ULONG alloc
= ReadNoFence( &bin
->count_alloc
), freed
= ReadNoFence( &bin
->count_freed
);
1998 SIZE_T block_size
= BLOCK_BIN_SIZE( bin
- heap
->bins
);
1999 BOOL enable
= FALSE
;
2001 if (bin
== heap
->bins
&& alloc
> 0x10) enable
= TRUE
;
2002 else if (bin
- heap
->bins
< 0x30 && alloc
> 0x800) enable
= TRUE
;
2003 else if (bin
- heap
->bins
< 0x30 && alloc
- freed
> 0x10) enable
= TRUE
;
2004 else if (alloc
- freed
> 0x400000 / block_size
) enable
= TRUE
;
2005 if (!enable
) return;
2007 if (ReadNoFence( &heap
->compat_info
) != HEAP_LFH
)
2009 ULONG info
= HEAP_LFH
;
2010 RtlSetHeapInformation( heap
, HeapCompatibilityInformation
, &info
, sizeof(info
) );
2013 /* paired with ReadAcquire in heap_allocate_block_lfh.
2015 * The acq/rel barrier on the enabled flag is protecting compat_info
2016 * (i.e. compat_info := LFH happens-before enabled := TRUE), so that
2017 * a caller that observes LFH block allocation (alloc request
2018 * succeeds without heap lock) will never observe HEAP_STD when it
2021 WriteRelease( &bin
->enabled
, TRUE
);
2024 static void heap_thread_detach_bin_groups( struct heap
*heap
)
2026 ULONG i
, affinity
= NtCurrentTeb()->HeapVirtualAffinity
;
2028 if (!heap
->bins
) return;
2030 for (i
= 0; i
< BLOCK_SIZE_BIN_COUNT
; ++i
)
2032 struct bin
*bin
= heap
->bins
+ i
;
2033 struct group
*group
;
2034 if (!(group
= InterlockedExchangePointer( (void *)bin_get_affinity_group( bin
, affinity
), NULL
))) continue;
2035 RtlInterlockedPushEntrySList( &bin
->groups
, &group
->entry
);
2039 void heap_thread_detach(void)
2043 RtlEnterCriticalSection( &process_heap
->cs
);
2045 LIST_FOR_EACH_ENTRY( heap
, &process_heap
->entry
, struct heap
, entry
)
2046 heap_thread_detach_bin_groups( heap
);
2048 heap_thread_detach_bin_groups( process_heap
);
2050 RtlLeaveCriticalSection( &process_heap
->cs
);
2053 /***********************************************************************
2054 * RtlAllocateHeap (NTDLL.@)
2056 void *WINAPI DECLSPEC_HOTPATCH
RtlAllocateHeap( HANDLE handle
, ULONG flags
, SIZE_T size
)
2064 heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
);
2065 if ((block_size
= heap_get_block_size( heap
, heap_flags
, size
)) == ~0U)
2066 status
= STATUS_NO_MEMORY
;
2067 else if (block_size
>= HEAP_MIN_LARGE_BLOCK_SIZE
)
2068 status
= heap_allocate_large( heap
, heap_flags
, block_size
, size
, &ptr
);
2069 else if (heap
->bins
&& !heap_allocate_block_lfh( heap
, heap_flags
, block_size
, size
, &ptr
))
2070 status
= STATUS_SUCCESS
;
2073 heap_lock( heap
, heap_flags
);
2074 status
= heap_allocate_block( heap
, heap_flags
, block_size
, size
, &ptr
);
2075 heap_unlock( heap
, heap_flags
);
2077 if (!status
&& heap
->bins
)
2079 SIZE_T bin
= BLOCK_SIZE_BIN( block_get_size( (struct block
*)ptr
- 1 ) );
2080 InterlockedIncrement( &heap
->bins
[bin
].count_alloc
);
2081 if (!ReadNoFence( &heap
->bins
[bin
].enabled
)) bin_try_enable( heap
, &heap
->bins
[bin
] );
2085 if (!status
) valgrind_notify_alloc( ptr
, size
, flags
& HEAP_ZERO_MEMORY
);
2087 TRACE( "handle %p, flags %#lx, size %#Ix, return %p, status %#lx.\n", handle
, flags
, size
, ptr
, status
);
2088 heap_set_status( heap
, flags
, status
);
2093 /***********************************************************************
2094 * RtlFreeHeap (NTDLL.@)
2096 BOOLEAN WINAPI DECLSPEC_HOTPATCH
RtlFreeHeap( HANDLE handle
, ULONG flags
, void *ptr
)
2098 struct block
*block
;
2103 if (!ptr
) return TRUE
;
2105 valgrind_notify_free( ptr
);
2107 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2108 status
= STATUS_INVALID_PARAMETER
;
2109 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2110 status
= STATUS_INVALID_PARAMETER
;
2111 else if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2112 status
= heap_free_large( heap
, heap_flags
, block
);
2113 else if (!(block
= heap_delay_free( heap
, heap_flags
, block
)))
2114 status
= STATUS_SUCCESS
;
2115 else if (!heap_free_block_lfh( heap
, heap_flags
, block
))
2116 status
= STATUS_SUCCESS
;
2119 SIZE_T block_size
= block_get_size( block
), bin
= BLOCK_SIZE_BIN( block_size
);
2121 heap_lock( heap
, heap_flags
);
2122 status
= heap_free_block( heap
, heap_flags
, block
);
2123 heap_unlock( heap
, heap_flags
);
2125 if (!status
&& heap
->bins
) InterlockedIncrement( &heap
->bins
[bin
].count_freed
);
2128 TRACE( "handle %p, flags %#lx, ptr %p, return %u, status %#lx.\n", handle
, flags
, ptr
, !status
, status
);
2129 heap_set_status( heap
, flags
, status
);
2133 static NTSTATUS
heap_resize_large( struct heap
*heap
, ULONG flags
, struct block
*block
, SIZE_T block_size
,
2134 SIZE_T size
, SIZE_T
*old_size
, void **ret
)
2136 ARENA_LARGE
*large
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2137 SIZE_T old_block_size
= large
->block_size
;
2138 *old_size
= large
->data_size
;
2140 if (old_block_size
< block_size
) return STATUS_NO_MEMORY
;
2142 /* FIXME: we could remap zero-pages instead */
2143 valgrind_notify_resize( block
+ 1, *old_size
, size
);
2144 initialize_block( block
, *old_size
, size
, flags
);
2146 large
->data_size
= size
;
2147 valgrind_make_noaccess( (char *)block
+ sizeof(*block
) + large
->data_size
,
2148 old_block_size
- sizeof(*block
) - large
->data_size
);
2151 return STATUS_SUCCESS
;
2154 static NTSTATUS
heap_resize_block( struct heap
*heap
, ULONG flags
, struct block
*block
, SIZE_T block_size
,
2155 SIZE_T size
, SIZE_T old_block_size
, SIZE_T
*old_size
, void **ret
)
2157 SUBHEAP
*subheap
= block_get_subheap( heap
, block
);
2160 if (block_size
> old_block_size
)
2162 /* need to grow block, make sure it's followed by large enough free block */
2163 if (!(next
= next_block( subheap
, block
))) return STATUS_NO_MEMORY
;
2164 if (!(block_get_flags( next
) & BLOCK_FLAG_FREE
)) return STATUS_NO_MEMORY
;
2165 if (block_size
> old_block_size
+ block_get_size( next
)) return STATUS_NO_MEMORY
;
2166 if (!subheap_commit( heap
, subheap
, block
, block_size
)) return STATUS_NO_MEMORY
;
2169 if ((next
= next_block( subheap
, block
)) && (block_get_flags( next
) & BLOCK_FLAG_FREE
))
2171 /* merge with next block if it is free */
2172 struct entry
*entry
= (struct entry
*)next
;
2173 list_remove( &entry
->entry
);
2174 old_block_size
+= block_get_size( next
);
2177 if ((next
= split_block( heap
, flags
, block
, old_block_size
, block_size
)))
2179 block_init_free( next
, flags
, subheap
, old_block_size
- block_size
);
2180 insert_free_block( heap
, flags
, subheap
, next
);
2183 valgrind_notify_resize( block
+ 1, *old_size
, size
);
2184 block_set_flags( block
, BLOCK_FLAG_USER_MASK
& ~BLOCK_FLAG_USER_INFO
, BLOCK_USER_FLAGS( flags
) );
2185 block
->tail_size
= block_get_size( block
) - sizeof(*block
) - size
;
2186 initialize_block( block
, *old_size
, size
, flags
);
2187 mark_block_tail( block
, flags
);
2189 if ((next
= next_block( subheap
, block
))) block_set_flags( next
, BLOCK_FLAG_PREV_FREE
, 0 );
2192 return STATUS_SUCCESS
;
2195 static NTSTATUS
heap_resize_block_lfh( struct block
*block
, ULONG flags
, SIZE_T block_size
, SIZE_T size
, SIZE_T
*old_size
, void **ret
)
2197 /* as native LFH does it with different block size: refuse to resize even though we could */
2198 if (ROUND_SIZE( *old_size
, BLOCK_ALIGN
- 1) != ROUND_SIZE( size
, BLOCK_ALIGN
- 1)) return STATUS_NO_MEMORY
;
2199 if (size
>= *old_size
) return STATUS_NO_MEMORY
;
2201 block_set_flags( block
, BLOCK_FLAG_USER_MASK
& ~BLOCK_FLAG_USER_INFO
, BLOCK_USER_FLAGS( flags
) );
2202 block
->tail_size
= block_size
- sizeof(*block
) - size
;
2203 initialize_block( block
, *old_size
, size
, flags
);
2204 mark_block_tail( block
, flags
);
2207 return STATUS_SUCCESS
;
2210 static NTSTATUS
heap_resize_in_place( struct heap
*heap
, ULONG flags
, struct block
*block
, SIZE_T block_size
,
2211 SIZE_T size
, SIZE_T
*old_size
, void **ret
)
2213 SIZE_T old_bin
, old_block_size
;
2216 if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2217 return heap_resize_large( heap
, flags
, block
, block_size
, size
, old_size
, ret
);
2219 old_block_size
= block_get_size( block
);
2220 *old_size
= old_block_size
- block_get_overhead( block
);
2221 old_bin
= BLOCK_SIZE_BIN( old_block_size
);
2223 if (block_size
>= HEAP_MIN_LARGE_BLOCK_SIZE
) return STATUS_NO_MEMORY
; /* growing small block to large block */
2225 if (block_get_flags( block
) & BLOCK_FLAG_LFH
)
2226 return heap_resize_block_lfh( block
, flags
, block_size
, size
, old_size
, ret
);
2228 heap_lock( heap
, flags
);
2229 status
= heap_resize_block( heap
, flags
, block
, block_size
, size
, old_block_size
, old_size
, ret
);
2230 heap_unlock( heap
, flags
);
2232 if (!status
&& heap
->bins
)
2234 SIZE_T new_bin
= BLOCK_SIZE_BIN( block_size
);
2235 InterlockedIncrement( &heap
->bins
[old_bin
].count_freed
);
2236 InterlockedIncrement( &heap
->bins
[new_bin
].count_alloc
);
2237 if (!ReadNoFence( &heap
->bins
[new_bin
].enabled
)) bin_try_enable( heap
, &heap
->bins
[new_bin
] );
2243 /***********************************************************************
2244 * RtlReAllocateHeap (NTDLL.@)
2246 void *WINAPI
RtlReAllocateHeap( HANDLE handle
, ULONG flags
, void *ptr
, SIZE_T size
)
2248 SIZE_T block_size
, old_size
;
2249 struct block
*block
;
2255 if (!ptr
) return NULL
;
2257 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2258 status
= STATUS_INVALID_HANDLE
;
2259 else if ((block_size
= heap_get_block_size( heap
, heap_flags
, size
)) == ~0U)
2260 status
= STATUS_NO_MEMORY
;
2261 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2262 status
= STATUS_INVALID_PARAMETER
;
2263 else if ((status
= heap_resize_in_place( heap
, heap_flags
, block
, block_size
, size
,
2266 if (flags
& HEAP_REALLOC_IN_PLACE_ONLY
)
2267 status
= STATUS_NO_MEMORY
;
2268 else if (!(ret
= RtlAllocateHeap( heap
, flags
, size
)))
2269 status
= STATUS_NO_MEMORY
;
2272 memcpy( ret
, ptr
, min( size
, old_size
) );
2273 RtlFreeHeap( heap
, flags
, ptr
);
2274 status
= STATUS_SUCCESS
;
2278 TRACE( "handle %p, flags %#lx, ptr %p, size %#Ix, return %p, status %#lx.\n", handle
, flags
, ptr
, size
, ret
, status
);
2279 heap_set_status( heap
, flags
, status
);
2284 /***********************************************************************
2285 * RtlCompactHeap (NTDLL.@)
2287 * Compact the free space in a Heap.
2290 * heap [I] Heap that block was allocated from
2291 * flags [I] HEAP_ flags from "winnt.h"
2294 * The number of bytes compacted.
2297 * This function is a harmless stub.
2299 ULONG WINAPI
RtlCompactHeap( HANDLE handle
, ULONG flags
)
2301 static BOOL reported
;
2302 if (!reported
++) FIXME( "handle %p, flags %#lx stub!\n", handle
, flags
);
2307 /***********************************************************************
2308 * RtlLockHeap (NTDLL.@)
2313 * heap [I] Heap to lock
2316 * Success: TRUE. The Heap is locked.
2317 * Failure: FALSE, if heap is invalid.
2319 BOOLEAN WINAPI
RtlLockHeap( HANDLE handle
)
2323 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &heap_flags
))) return FALSE
;
2324 heap_lock( heap
, heap_flags
);
2329 /***********************************************************************
2330 * RtlUnlockHeap (NTDLL.@)
2335 * heap [I] Heap to unlock
2338 * Success: TRUE. The Heap is unlocked.
2339 * Failure: FALSE, if heap is invalid.
2341 BOOLEAN WINAPI
RtlUnlockHeap( HANDLE handle
)
2345 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &heap_flags
))) return FALSE
;
2346 heap_unlock( heap
, heap_flags
);
2351 static NTSTATUS
heap_size( const struct heap
*heap
, struct block
*block
, SIZE_T
*size
)
2353 if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2355 const ARENA_LARGE
*large_arena
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2356 *size
= large_arena
->data_size
;
2358 else *size
= block_get_size( block
) - block_get_overhead( block
);
2360 return STATUS_SUCCESS
;
2363 /***********************************************************************
2364 * RtlSizeHeap (NTDLL.@)
2366 SIZE_T WINAPI
RtlSizeHeap( HANDLE handle
, ULONG flags
, const void *ptr
)
2368 SIZE_T size
= ~(SIZE_T
)0;
2369 struct block
*block
;
2374 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2375 status
= STATUS_INVALID_PARAMETER
;
2376 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2377 status
= STATUS_INVALID_PARAMETER
;
2380 heap_lock( heap
, heap_flags
);
2381 status
= heap_size( heap
, block
, &size
);
2382 heap_unlock( heap
, heap_flags
);
2385 TRACE( "handle %p, flags %#lx, ptr %p, return %#Ix, status %#lx.\n", handle
, flags
, ptr
, size
, status
);
2386 heap_set_status( heap
, flags
, status
);
2391 /***********************************************************************
2392 * RtlValidateHeap (NTDLL.@)
2394 BOOLEAN WINAPI
RtlValidateHeap( HANDLE handle
, ULONG flags
, const void *ptr
)
2400 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2404 heap_lock( heap
, heap_flags
);
2405 if (ptr
) ret
= heap_validate_ptr( heap
, ptr
);
2406 else ret
= heap_validate( heap
);
2407 heap_unlock( heap
, heap_flags
);
2410 TRACE( "handle %p, flags %#lx, ptr %p, return %u.\n", handle
, flags
, ptr
, !!ret
);
2414 static NTSTATUS
heap_walk_blocks( const struct heap
*heap
, const SUBHEAP
*subheap
,
2415 const struct block
*block
, struct rtl_heap_entry
*entry
)
2417 const char *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
), *end
= base
+ subheap_size( subheap
);
2418 const struct block
*blocks
= first_block( subheap
);
2420 if (entry
->lpData
== commit_end
) return STATUS_NO_MORE_ENTRIES
;
2421 if (entry
->lpData
== base
) block
= blocks
;
2422 else if (!(block
= next_block( subheap
, block
)))
2424 entry
->lpData
= (void *)commit_end
;
2425 entry
->cbData
= end
- commit_end
;
2426 entry
->cbOverhead
= 0;
2427 entry
->iRegionIndex
= 0;
2428 entry
->wFlags
= RTL_HEAP_ENTRY_UNCOMMITTED
;
2429 return STATUS_SUCCESS
;
2432 if (block_get_flags( block
) & BLOCK_FLAG_FREE
)
2434 entry
->lpData
= (char *)block
+ block_get_overhead( block
);
2435 entry
->cbData
= block_get_size( block
) - block_get_overhead( block
);
2436 /* FIXME: last free block should not include uncommitted range, which also has its own overhead */
2437 if (!contains( blocks
, commit_end
- 4 * BLOCK_ALIGN
- (char *)blocks
, block
, block_get_size( block
) ))
2438 entry
->cbData
= commit_end
- 4 * BLOCK_ALIGN
- (char *)entry
->lpData
;
2439 entry
->cbOverhead
= 2 * BLOCK_ALIGN
;
2440 entry
->iRegionIndex
= 0;
2445 entry
->lpData
= (void *)(block
+ 1);
2446 entry
->cbData
= block_get_size( block
) - block_get_overhead( block
);
2447 entry
->cbOverhead
= block_get_overhead( block
);
2448 entry
->iRegionIndex
= 0;
2449 entry
->wFlags
= RTL_HEAP_ENTRY_COMMITTED
|RTL_HEAP_ENTRY_BLOCK
|RTL_HEAP_ENTRY_BUSY
;
2452 return STATUS_SUCCESS
;
2455 static NTSTATUS
heap_walk( const struct heap
*heap
, struct rtl_heap_entry
*entry
)
2457 const char *data
= entry
->lpData
;
2458 const ARENA_LARGE
*large
;
2459 const struct block
*block
;
2460 const struct list
*next
;
2461 const SUBHEAP
*subheap
;
2465 if (!data
|| entry
->wFlags
& RTL_HEAP_ENTRY_REGION
) block
= (struct block
*)data
;
2466 else if (entry
->wFlags
& RTL_HEAP_ENTRY_BUSY
) block
= (struct block
*)data
- 1;
2467 else block
= (struct block
*)(data
- sizeof(struct list
)) - 1;
2469 if ((large
= find_arena_large( heap
, block
, TRUE
)))
2471 next
= &large
->entry
;
2473 else if ((subheap
= find_subheap( heap
, block
, TRUE
)))
2475 if (!(status
= heap_walk_blocks( heap
, subheap
, block
, entry
))) return STATUS_SUCCESS
;
2476 else if (status
!= STATUS_NO_MORE_ENTRIES
) return status
;
2477 next
= &subheap
->entry
;
2481 if (entry
->lpData
) return STATUS_INVALID_PARAMETER
;
2482 next
= &heap
->subheap_list
;
2485 if (!large
&& (next
= list_next( &heap
->subheap_list
, next
)))
2487 subheap
= LIST_ENTRY( next
, SUBHEAP
, entry
);
2488 base
= subheap_base( subheap
);
2489 entry
->lpData
= base
;
2490 entry
->cbData
= subheap_overhead( subheap
);
2491 entry
->cbOverhead
= 0;
2492 entry
->iRegionIndex
= 0;
2493 entry
->wFlags
= RTL_HEAP_ENTRY_REGION
;
2494 entry
->Region
.dwCommittedSize
= (char *)subheap_commit_end( subheap
) - base
;
2495 entry
->Region
.dwUnCommittedSize
= subheap_size( subheap
) - entry
->Region
.dwCommittedSize
;
2496 entry
->Region
.lpFirstBlock
= base
+ entry
->cbData
;
2497 entry
->Region
.lpLastBlock
= base
+ subheap_size( subheap
);
2498 return STATUS_SUCCESS
;
2501 if (!next
) next
= &heap
->large_list
;
2502 if ((next
= list_next( &heap
->large_list
, next
)))
2504 large
= LIST_ENTRY( next
, ARENA_LARGE
, entry
);
2505 entry
->lpData
= (void *)(large
+ 1);
2506 entry
->cbData
= large
->data_size
;
2507 entry
->cbOverhead
= 0;
2508 entry
->iRegionIndex
= 64;
2509 entry
->wFlags
= RTL_HEAP_ENTRY_COMMITTED
|RTL_HEAP_ENTRY_BLOCK
|RTL_HEAP_ENTRY_BUSY
;
2510 return STATUS_SUCCESS
;
2513 return STATUS_NO_MORE_ENTRIES
;
2516 /***********************************************************************
2517 * RtlWalkHeap (NTDLL.@)
2519 NTSTATUS WINAPI
RtlWalkHeap( HANDLE handle
, void *entry_ptr
)
2521 struct rtl_heap_entry
*entry
= entry_ptr
;
2526 if (!entry
) return STATUS_INVALID_PARAMETER
;
2528 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &heap_flags
)))
2529 status
= STATUS_INVALID_HANDLE
;
2532 heap_lock( heap
, heap_flags
);
2533 status
= heap_walk( heap
, entry
);
2534 heap_unlock( heap
, heap_flags
);
2537 TRACE( "handle %p, entry %p %s, return %#lx\n", handle
, entry
,
2538 status
? "<empty>" : debugstr_heap_entry(entry
), status
);
2543 /***********************************************************************
2544 * RtlGetProcessHeaps (NTDLL.@)
2546 * Get the Heaps belonging to the current process.
2549 * count [I] size of heaps
2550 * heaps [O] Destination array for heap HANDLE's
2553 * Success: The number of Heaps allocated by the process.
2556 ULONG WINAPI
RtlGetProcessHeaps( ULONG count
, HANDLE
*heaps
)
2558 ULONG total
= 1; /* main heap */
2561 RtlEnterCriticalSection( &process_heap
->cs
);
2562 LIST_FOR_EACH( ptr
, &process_heap
->entry
) total
++;
2565 *heaps
++ = process_heap
;
2566 LIST_FOR_EACH( ptr
, &process_heap
->entry
)
2567 *heaps
++ = LIST_ENTRY( ptr
, struct heap
, entry
);
2569 RtlLeaveCriticalSection( &process_heap
->cs
);
2573 /***********************************************************************
2574 * RtlQueryHeapInformation (NTDLL.@)
2576 NTSTATUS WINAPI
RtlQueryHeapInformation( HANDLE handle
, HEAP_INFORMATION_CLASS info_class
,
2577 void *info
, SIZE_T size_in
, SIZE_T
*size_out
)
2582 TRACE( "handle %p, info_class %u, info %p, size_in %Iu, size_out %p.\n", handle
, info_class
, info
, size_in
, size_out
);
2586 case HeapCompatibilityInformation
:
2587 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &flags
))) return STATUS_ACCESS_VIOLATION
;
2588 if (size_out
) *size_out
= sizeof(ULONG
);
2589 if (size_in
< sizeof(ULONG
)) return STATUS_BUFFER_TOO_SMALL
;
2590 *(ULONG
*)info
= ReadNoFence( &heap
->compat_info
);
2591 return STATUS_SUCCESS
;
2594 FIXME( "HEAP_INFORMATION_CLASS %u not implemented!\n", info_class
);
2595 return STATUS_INVALID_INFO_CLASS
;
2599 /***********************************************************************
2600 * RtlSetHeapInformation (NTDLL.@)
2602 NTSTATUS WINAPI
RtlSetHeapInformation( HANDLE handle
, HEAP_INFORMATION_CLASS info_class
, void *info
, SIZE_T size
)
2607 TRACE( "handle %p, info_class %u, info %p, size %Iu.\n", handle
, info_class
, info
, size
);
2611 case HeapCompatibilityInformation
:
2615 if (size
< sizeof(ULONG
)) return STATUS_BUFFER_TOO_SMALL
;
2616 if (!(heap
= unsafe_heap_from_handle( handle
, 0, &flags
))) return STATUS_INVALID_HANDLE
;
2617 if (heap
->flags
& HEAP_NO_SERIALIZE
) return STATUS_INVALID_PARAMETER
;
2619 compat_info
= *(ULONG
*)info
;
2620 if (compat_info
!= HEAP_STD
&& compat_info
!= HEAP_LFH
)
2622 FIXME( "HeapCompatibilityInformation %lu not implemented!\n", compat_info
);
2623 return STATUS_UNSUCCESSFUL
;
2625 if (InterlockedCompareExchange( &heap
->compat_info
, compat_info
, HEAP_STD
) != HEAP_STD
)
2626 return STATUS_UNSUCCESSFUL
;
2627 return STATUS_SUCCESS
;
2631 FIXME( "HEAP_INFORMATION_CLASS %u not implemented!\n", info_class
);
2632 return STATUS_SUCCESS
;
2636 /***********************************************************************
2637 * RtlGetUserInfoHeap (NTDLL.@)
2639 BOOLEAN WINAPI
RtlGetUserInfoHeap( HANDLE handle
, ULONG flags
, void *ptr
, void **user_value
, ULONG
*user_flags
)
2641 NTSTATUS status
= STATUS_SUCCESS
;
2642 struct block
*block
;
2647 TRACE( "handle %p, flags %#lx, ptr %p, user_value %p, user_flags %p semi-stub!\n",
2648 handle
, flags
, ptr
, user_value
, user_flags
);
2652 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2653 status
= STATUS_SUCCESS
;
2654 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2656 status
= STATUS_INVALID_PARAMETER
;
2659 else if (!(*user_flags
= HEAP_USER_FLAGS(block_get_flags( block
))))
2660 WARN( "Block %p wasn't allocated with user info\n", ptr
);
2661 else if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2663 const ARENA_LARGE
*large
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2664 *user_flags
= *user_flags
& ~HEAP_ADD_USER_INFO
;
2665 *user_value
= large
->user_value
;
2669 heap_lock( heap
, heap_flags
);
2671 tmp
= (char *)block
+ block_get_size( block
) - block
->tail_size
+ sizeof(void *);
2672 if ((heap_flags
& HEAP_TAIL_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
) tmp
+= BLOCK_ALIGN
;
2673 *user_flags
= *user_flags
& ~HEAP_ADD_USER_INFO
;
2674 *user_value
= *(void **)tmp
;
2676 heap_unlock( heap
, heap_flags
);
2679 heap_set_status( heap
, flags
, status
);
2683 /***********************************************************************
2684 * RtlSetUserValueHeap (NTDLL.@)
2686 BOOLEAN WINAPI
RtlSetUserValueHeap( HANDLE handle
, ULONG flags
, void *ptr
, void *user_value
)
2688 struct block
*block
;
2694 TRACE( "handle %p, flags %#lx, ptr %p, user_value %p.\n", handle
, flags
, ptr
, user_value
);
2696 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2698 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2700 else if (!(block_get_flags( block
) & BLOCK_FLAG_USER_INFO
))
2702 else if (block_get_flags( block
) & BLOCK_FLAG_LARGE
)
2704 ARENA_LARGE
*large
= CONTAINING_RECORD( block
, ARENA_LARGE
, block
);
2705 large
->user_value
= user_value
;
2710 heap_lock( heap
, heap_flags
);
2712 tmp
= (char *)block
+ block_get_size( block
) - block
->tail_size
+ sizeof(void *);
2713 if ((heap_flags
& HEAP_TAIL_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
) tmp
+= BLOCK_ALIGN
;
2714 *(void **)tmp
= user_value
;
2717 heap_unlock( heap
, heap_flags
);
2723 /***********************************************************************
2724 * RtlSetUserFlagsHeap (NTDLL.@)
2726 BOOLEAN WINAPI
RtlSetUserFlagsHeap( HANDLE handle
, ULONG flags
, void *ptr
, ULONG clear
, ULONG set
)
2728 struct block
*block
;
2733 TRACE( "handle %p, flags %#lx, ptr %p, clear %#lx, set %#lx.\n", handle
, flags
, ptr
, clear
, set
);
2735 if ((clear
| set
) & ~(0xe00))
2737 SetLastError( ERROR_INVALID_PARAMETER
);
2741 if (!(heap
= unsafe_heap_from_handle( handle
, flags
, &heap_flags
)))
2743 else if (!(block
= unsafe_block_from_ptr( heap
, heap_flags
, ptr
)))
2745 else if (!(block_get_flags( block
) & BLOCK_FLAG_USER_INFO
))
2749 block_set_flags( block
, BLOCK_USER_FLAGS( clear
), BLOCK_USER_FLAGS( set
) );