4 * Copyright 1996 Alexandre Julliard
5 * Copyright 1998 Ulrich Weigand
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #define RUNNING_ON_VALGRIND 0 /* FIXME */
31 #define WIN32_NO_STATUS
32 #define NONAMELESSUNION
36 #include "ntdll_misc.h"
37 #include "wine/list.h"
38 #include "wine/debug.h"
40 WINE_DEFAULT_DEBUG_CHANNEL(heap
);
42 /* undocumented RtlWalkHeap structure */
47 SIZE_T cbData
; /* differs from PROCESS_HEAP_ENTRY */
50 WORD wFlags
; /* value differs from PROCESS_HEAP_ENTRY */
57 DWORD dwCommittedSize
;
58 DWORD dwUnCommittedSize
;
65 /* rtl_heap_entry flags, names made up */
67 #define RTL_HEAP_ENTRY_BUSY 0x0001
68 #define RTL_HEAP_ENTRY_REGION 0x0002
69 #define RTL_HEAP_ENTRY_BLOCK 0x0010
70 #define RTL_HEAP_ENTRY_UNCOMMITTED 0x1000
71 #define RTL_HEAP_ENTRY_COMMITTED 0x4000
72 #define RTL_HEAP_ENTRY_LFH 0x8000
75 /* header for heap blocks */
79 DWORD size
; /* Block size; must be the first field */
80 DWORD magic
: 24; /* Magic number */
81 DWORD unused_bytes
: 8; /* Number of bytes in the block not used by user data (max value is HEAP_MIN_DATA_SIZE+HEAP_MIN_SHRINK_SIZE) */
84 C_ASSERT( sizeof(struct block
) == 8 );
87 /* entry to link free blocks in free lists */
91 DWORD size
; /* Block size; must be the first field */
92 DWORD magic
; /* Magic number */
93 struct list entry
; /* Entry in free list */
98 struct list entry
; /* entry in heap large blocks list */
99 SIZE_T data_size
; /* size of user data */
100 SIZE_T block_size
; /* total size of virtual memory block */
101 DWORD pad
[2]; /* padding to ensure 16-byte alignment of data */
102 DWORD size
; /* fields for compatibility with normal arenas */
103 DWORD magic
; /* these must remain at the end of the structure */
106 #define ARENA_FLAG_FREE 0x00000001 /* flags OR'ed with arena size */
107 #define ARENA_FLAG_PREV_FREE 0x00000002
108 #define ARENA_SIZE_MASK (~3)
109 #define ARENA_LARGE_SIZE 0xfedcba90 /* magic value for 'size' field in large blocks */
111 /* Value for arena 'magic' field */
112 #define ARENA_INUSE_MAGIC 0x455355
113 #define ARENA_PENDING_MAGIC 0xbedead
114 #define ARENA_FREE_MAGIC 0x45455246
115 #define ARENA_LARGE_MAGIC 0x6752614c
117 #define ARENA_INUSE_FILLER 0x55
118 #define ARENA_TAIL_FILLER 0xab
119 #define ARENA_FREE_FILLER 0xfeeefeee
121 /* everything is aligned on 8 byte boundaries (16 for Win64) */
122 #define ALIGNMENT (2*sizeof(void*))
123 #define LARGE_ALIGNMENT 16 /* large blocks have stricter alignment */
124 #define ARENA_OFFSET (ALIGNMENT - sizeof(ARENA_INUSE))
126 C_ASSERT( sizeof(ARENA_LARGE
) % LARGE_ALIGNMENT
== 0 );
128 #define ROUND_SIZE(size) ((((size) + ALIGNMENT - 1) & ~(ALIGNMENT-1)) + ARENA_OFFSET)
130 /* minimum data size (without arenas) of an allocated block */
131 /* make sure that it's larger than a free list entry */
132 #define HEAP_MIN_DATA_SIZE ROUND_SIZE(2 * sizeof(struct list))
133 #define HEAP_MIN_ARENA_SIZE (HEAP_MIN_DATA_SIZE + sizeof(ARENA_INUSE))
134 /* minimum size that must remain to shrink an allocated block */
135 #define HEAP_MIN_SHRINK_SIZE (HEAP_MIN_DATA_SIZE+sizeof(ARENA_FREE))
136 /* minimum size to start allocating large blocks */
137 #define HEAP_MIN_LARGE_BLOCK_SIZE (0x10000 * ALIGNMENT - 0x1000)
138 /* extra size to add at the end of block for tail checking */
139 #define HEAP_TAIL_EXTRA_SIZE(flags) \
140 ((flags & HEAP_TAIL_CHECKING_ENABLED) || RUNNING_ON_VALGRIND ? ALIGNMENT : 0)
142 /* There will be a free list bucket for every arena size up to and including this value */
143 #define HEAP_MAX_SMALL_FREE_LIST 0x100
144 C_ASSERT( HEAP_MAX_SMALL_FREE_LIST
% ALIGNMENT
== 0 );
145 #define HEAP_NB_SMALL_FREE_LISTS (((HEAP_MAX_SMALL_FREE_LIST - HEAP_MIN_ARENA_SIZE) / ALIGNMENT) + 1)
147 /* Max size of the blocks on the free lists above HEAP_MAX_SMALL_FREE_LIST */
148 static const SIZE_T HEAP_freeListSizes
[] =
150 0x200, 0x400, 0x1000, ~(SIZE_T
)0
152 #define HEAP_NB_FREE_LISTS (ARRAY_SIZE( HEAP_freeListSizes ) + HEAP_NB_SMALL_FREE_LISTS)
162 typedef struct tagSUBHEAP
164 void *base
; /* Base address of the sub-heap memory block */
165 SIZE_T size
; /* Size of the whole sub-heap */
166 SIZE_T min_commit
; /* Minimum committed size */
167 SIZE_T commitSize
; /* Committed size of the sub-heap */
168 struct list entry
; /* Entry in sub-heap list */
169 struct tagHEAP
*heap
; /* Main heap structure */
170 DWORD headerSize
; /* Size of the heap header */
171 DWORD magic
; /* Magic number */
174 #define SUBHEAP_MAGIC ((DWORD)('S' | ('U'<<8) | ('B'<<16) | ('H'<<24)))
176 typedef struct tagHEAP
178 DWORD_PTR unknown1
[2]; /* 0000/0000 */
179 DWORD ffeeffee
; /* 0008/0010 */
180 DWORD auto_flags
; /* 000c/0014 */
181 DWORD_PTR unknown2
[7]; /* 0010/0018 */
182 DWORD unknown3
[2]; /* 002c/0050 */
183 DWORD_PTR unknown4
[3]; /* 0034/0058 */
184 DWORD flags
; /* 0040/0070 */
185 DWORD force_flags
; /* 0044/0074 */
186 /* end of the Windows 10 compatible struct layout */
188 BOOL shared
; /* System shared heap */
189 SUBHEAP subheap
; /* First sub-heap */
190 struct list entry
; /* Entry in process heap list */
191 struct list subheap_list
; /* Sub-heap list */
192 struct list large_list
; /* Large blocks list */
193 SIZE_T grow_size
; /* Size of next subheap for growing heap */
194 DWORD magic
; /* Magic number */
195 DWORD pending_pos
; /* Position in pending free requests ring */
196 ARENA_INUSE
**pending_free
; /* Ring buffer for pending free requests */
197 RTL_CRITICAL_SECTION cs
;
198 FREE_LIST_ENTRY
*freeList
; /* Free lists */
201 #define HEAP_MAGIC ((DWORD)('H' | ('E'<<8) | ('A'<<16) | ('P'<<24)))
203 #define HEAP_DEF_SIZE 0x110000 /* Default heap size = 1Mb + 64Kb */
204 #define COMMIT_MASK 0xffff /* bitmask for commit/decommit granularity */
205 #define MAX_FREE_PENDING 1024 /* max number of free requests to delay */
207 /* some undocumented flags (names are made up) */
208 #define HEAP_PRIVATE 0x00001000
209 #define HEAP_PAGE_ALLOCS 0x01000000
210 #define HEAP_VALIDATE 0x10000000
211 #define HEAP_VALIDATE_ALL 0x20000000
212 #define HEAP_VALIDATE_PARAMS 0x40000000
214 static HEAP
*processHeap
; /* main process heap */
216 /* check if memory range a contains memory range b */
217 static inline BOOL
contains( const void *a
, SIZE_T a_size
, const void *b
, SIZE_T b_size
)
219 const void *a_end
= (char *)a
+ a_size
, *b_end
= (char *)b
+ b_size
;
220 return a
<= b
&& b
<= b_end
&& b_end
<= a_end
;
223 static inline UINT
block_get_flags( const struct block
*block
)
225 return block
->size
& ~ARENA_SIZE_MASK
;
228 static inline UINT
block_get_type( const struct block
*block
)
230 if (block_get_flags( block
) & ARENA_FLAG_FREE
) return (block
->unused_bytes
<< 24)|block
->magic
;
234 static inline UINT
block_get_overhead( const struct block
*block
)
236 if (block_get_flags( block
) & ARENA_FLAG_FREE
) return sizeof(struct entry
);
237 return sizeof(*block
) + block
->unused_bytes
;
240 /* return the size of a block, including its header */
241 static inline UINT
block_get_size( const struct block
*block
)
243 UINT data_size
= block
->size
& ARENA_SIZE_MASK
, size
= data_size
;
244 if (block_get_flags( block
) & ARENA_FLAG_FREE
) size
+= sizeof(struct entry
);
245 else size
+= sizeof(*block
);
246 if (size
< data_size
) return ~0u;
250 static inline void *subheap_base( const SUBHEAP
*subheap
)
252 return subheap
->base
;
255 static inline SIZE_T
subheap_size( const SUBHEAP
*subheap
)
257 return subheap
->size
;
260 static inline const void *subheap_commit_end( const SUBHEAP
*subheap
)
262 return (char *)subheap_base( subheap
) + subheap
->commitSize
;
265 static inline void *first_block( const SUBHEAP
*subheap
)
267 return (char *)subheap_base( subheap
) + subheap
->headerSize
;
270 static inline const void *last_block( const SUBHEAP
*subheap
)
272 return (char *)subheap_commit_end( subheap
) - sizeof(struct block
);
275 static inline struct block
*next_block( const SUBHEAP
*subheap
, const struct block
*block
)
277 const char *data
= (char *)(block
+ 1), *next
, *last
= last_block( subheap
);
278 next
= (char *)block
+ block_get_size( block
);
279 if (!contains( data
, last
- (char *)data
, next
, sizeof(*block
) )) return NULL
;
280 return (struct block
*)next
;
283 static inline BOOL
check_subheap( const SUBHEAP
*subheap
)
285 const char *base
= subheap
->base
;
286 return contains( base
, subheap
->size
, base
+ subheap
->headerSize
, subheap
->commitSize
- subheap
->headerSize
);
289 static BOOL
heap_validate( const HEAP
*heap
);
291 /* mark a block of memory as free for debugging purposes */
292 static inline void mark_block_free( void *ptr
, SIZE_T size
, DWORD flags
)
294 if (flags
& HEAP_FREE_CHECKING_ENABLED
)
297 for (i
= 0; i
< size
/ sizeof(DWORD
); i
++) ((DWORD
*)ptr
)[i
] = ARENA_FREE_FILLER
;
299 #if defined(VALGRIND_MAKE_MEM_NOACCESS)
300 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_NOACCESS( ptr
, size
));
301 #elif defined( VALGRIND_MAKE_NOACCESS)
302 VALGRIND_DISCARD( VALGRIND_MAKE_NOACCESS( ptr
, size
));
306 /* mark a block of memory as initialized for debugging purposes */
307 static inline void mark_block_initialized( void *ptr
, SIZE_T size
)
309 #if defined(VALGRIND_MAKE_MEM_DEFINED)
310 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_DEFINED( ptr
, size
));
311 #elif defined(VALGRIND_MAKE_READABLE)
312 VALGRIND_DISCARD( VALGRIND_MAKE_READABLE( ptr
, size
));
316 /* mark a block of memory as uninitialized for debugging purposes */
317 static inline void mark_block_uninitialized( void *ptr
, SIZE_T size
)
319 #if defined(VALGRIND_MAKE_MEM_UNDEFINED)
320 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_UNDEFINED( ptr
, size
));
321 #elif defined(VALGRIND_MAKE_WRITABLE)
322 VALGRIND_DISCARD( VALGRIND_MAKE_WRITABLE( ptr
, size
));
326 /* mark a block of memory as a tail block */
327 static inline void mark_block_tail( void *ptr
, SIZE_T size
, DWORD flags
)
329 if (flags
& HEAP_TAIL_CHECKING_ENABLED
)
331 mark_block_uninitialized( ptr
, size
);
332 memset( ptr
, ARENA_TAIL_FILLER
, size
);
334 #if defined(VALGRIND_MAKE_MEM_NOACCESS)
335 VALGRIND_DISCARD( VALGRIND_MAKE_MEM_NOACCESS( ptr
, size
));
336 #elif defined( VALGRIND_MAKE_NOACCESS)
337 VALGRIND_DISCARD( VALGRIND_MAKE_NOACCESS( ptr
, size
));
341 /* initialize contents of a newly created block of memory */
342 static inline void initialize_block( void *ptr
, SIZE_T size
, SIZE_T unused
, DWORD flags
)
344 if (flags
& HEAP_ZERO_MEMORY
)
346 mark_block_initialized( ptr
, size
);
347 memset( ptr
, 0, size
);
351 mark_block_uninitialized( ptr
, size
);
352 if (flags
& HEAP_FREE_CHECKING_ENABLED
)
354 memset( ptr
, ARENA_INUSE_FILLER
, size
);
355 mark_block_uninitialized( ptr
, size
);
359 mark_block_tail( (char *)ptr
+ size
, unused
, flags
);
362 /* notify that a new block of memory has been allocated for debugging purposes */
363 static inline void notify_alloc( void *ptr
, SIZE_T size
, BOOL init
)
365 #ifdef VALGRIND_MALLOCLIKE_BLOCK
366 VALGRIND_MALLOCLIKE_BLOCK( ptr
, size
, 0, init
);
370 /* notify that a block of memory has been freed for debugging purposes */
371 static inline void notify_free( void const *ptr
)
373 #ifdef VALGRIND_FREELIKE_BLOCK
374 VALGRIND_FREELIKE_BLOCK( ptr
, 0 );
378 static inline void notify_realloc( void const *ptr
, SIZE_T size_old
, SIZE_T size_new
)
380 #ifdef VALGRIND_RESIZEINPLACE_BLOCK
381 /* zero is not a valid size */
382 VALGRIND_RESIZEINPLACE_BLOCK( ptr
, size_old
? size_old
: 1, size_new
? size_new
: 1, 0 );
386 static void notify_free_all( SUBHEAP
*subheap
)
388 #ifdef VALGRIND_FREELIKE_BLOCK
391 if (!RUNNING_ON_VALGRIND
) return;
392 if (!check_subheap( subheap
)) return;
394 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
396 if (block_get_flags( block
) & ARENA_FLAG_FREE
) continue;
397 if (block_get_type( block
) == ARENA_INUSE_MAGIC
) notify_free( block
+ 1 );
402 /* locate a free list entry of the appropriate size */
403 /* size is the size of the whole block including the arena header */
404 static inline unsigned int get_freelist_index( SIZE_T size
)
408 if (size
<= HEAP_MAX_SMALL_FREE_LIST
)
409 return (size
- HEAP_MIN_ARENA_SIZE
) / ALIGNMENT
;
411 for (i
= HEAP_NB_SMALL_FREE_LISTS
; i
< HEAP_NB_FREE_LISTS
- 1; i
++)
412 if (size
<= HEAP_freeListSizes
[i
- HEAP_NB_SMALL_FREE_LISTS
]) break;
416 /* get the memory protection type to use for a given heap */
417 static inline ULONG
get_protection_type( DWORD flags
)
419 return (flags
& HEAP_CREATE_ENABLE_EXECUTE
) ? PAGE_EXECUTE_READWRITE
: PAGE_READWRITE
;
422 static RTL_CRITICAL_SECTION_DEBUG process_heap_cs_debug
=
424 0, 0, NULL
, /* will be set later */
425 { &process_heap_cs_debug
.ProcessLocksList
, &process_heap_cs_debug
.ProcessLocksList
},
426 0, 0, { (DWORD_PTR
)(__FILE__
": main process heap section") }
429 static inline ULONG
heap_get_flags( const HEAP
*heap
, ULONG flags
)
431 flags
&= HEAP_GENERATE_EXCEPTIONS
| HEAP_NO_SERIALIZE
| HEAP_ZERO_MEMORY
| HEAP_REALLOC_IN_PLACE_ONLY
;
432 return heap
->flags
| flags
;
435 static void heap_lock( HEAP
*heap
, ULONG flags
)
437 if (heap_get_flags( heap
, flags
) & HEAP_NO_SERIALIZE
) return;
438 RtlEnterCriticalSection( &heap
->cs
);
441 static void heap_unlock( HEAP
*heap
, ULONG flags
)
443 if (heap_get_flags( heap
, flags
) & HEAP_NO_SERIALIZE
) return;
444 RtlLeaveCriticalSection( &heap
->cs
);
447 static void heap_set_status( const HEAP
*heap
, ULONG flags
, NTSTATUS status
)
449 if (status
== STATUS_NO_MEMORY
&& (flags
& HEAP_GENERATE_EXCEPTIONS
)) RtlRaiseStatus( status
);
450 if (status
) RtlSetLastWin32ErrorAndNtStatusFromNtStatus( status
);
453 static void heap_dump( const HEAP
*heap
)
455 const struct block
*block
;
456 const ARENA_LARGE
*large
;
457 const SUBHEAP
*subheap
;
461 TRACE( "heap: %p\n", heap
);
462 TRACE( " next %p\n", LIST_ENTRY( heap
->entry
.next
, HEAP
, entry
) );
464 TRACE( " free_lists: %p\n", heap
->freeList
);
465 for (i
= 0; i
< HEAP_NB_FREE_LISTS
; i
++)
467 if (i
< HEAP_NB_SMALL_FREE_LISTS
) size
= HEAP_MIN_ARENA_SIZE
+ i
* ALIGNMENT
;
468 else size
= HEAP_freeListSizes
[i
- HEAP_NB_SMALL_FREE_LISTS
];
469 TRACE( " %p: size %8Ix, prev %p, next %p\n", heap
->freeList
+ i
, size
,
470 LIST_ENTRY( heap
->freeList
[i
].arena
.entry
.prev
, struct entry
, entry
),
471 LIST_ENTRY( heap
->freeList
[i
].arena
.entry
.next
, struct entry
, entry
) );
474 TRACE( " subheaps: %p\n", &heap
->subheap_list
);
475 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
477 SIZE_T free_size
= 0, used_size
= 0, overhead
= 0;
478 const char *base
= subheap_base( subheap
);
480 TRACE( " %p: base %p first %p last %p end %p\n", subheap
, base
, first_block( subheap
),
481 last_block( subheap
), base
+ subheap_size( subheap
) );
483 if (!check_subheap( subheap
)) return;
485 overhead
+= (char *)first_block( subheap
) - base
;
486 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
488 if (block_get_flags( block
) & ARENA_FLAG_FREE
)
490 TRACE( " %p: (free) type %#10x, size %#8x, flags %#4x, prev %p, next %p\n", block
,
491 block_get_type( block
), block_get_size( block
), block_get_flags( block
),
492 LIST_ENTRY( ((struct entry
*)block
)->entry
.prev
, struct entry
, entry
),
493 LIST_ENTRY( ((struct entry
*)block
)->entry
.next
, struct entry
, entry
) );
495 overhead
+= block_get_overhead( block
);
496 free_size
+= block_get_size( block
) - block_get_overhead( block
);
500 TRACE( " %p: (used) type %#10x, size %#8x, flags %#4x, unused %#4x", block
,
501 block_get_type( block
), block_get_size( block
), block_get_flags( block
),
502 block
->unused_bytes
);
503 if (!(block_get_flags( block
) & ARENA_FLAG_PREV_FREE
)) TRACE( "\n" );
504 else TRACE( ", back %p\n", *((struct block
**)block
- 1) );
506 overhead
+= block_get_overhead( block
);
507 used_size
+= block_get_size( block
) - block_get_overhead( block
);
511 TRACE( " total %#Ix, used %#Ix, free %#Ix, overhead %#Ix (%Iu%%)\n", used_size
+ free_size
+ overhead
,
512 used_size
, free_size
, overhead
, (overhead
* 100) / subheap_size( subheap
) );
515 TRACE( " large blocks: %p\n", &heap
->large_list
);
516 LIST_FOR_EACH_ENTRY( large
, &heap
->large_list
, ARENA_LARGE
, entry
)
518 block
= (struct block
*)(large
+ 1) - 1;
519 TRACE( " %p: (large) type %#10x, size %#8x, flags %#4x, total_size %#10Ix, alloc_size %#10Ix, prev %p, next %p\n",
520 large
, block_get_type( block
), block_get_size( block
), block_get_flags( block
), large
->block_size
, large
->data_size
,
521 LIST_ENTRY( large
->entry
.prev
, ARENA_LARGE
, entry
), LIST_ENTRY( large
->entry
.next
, ARENA_LARGE
, entry
) );
524 if (heap
->pending_free
)
526 TRACE( " pending blocks: %p\n", heap
->pending_free
);
527 for (i
= 0; i
< MAX_FREE_PENDING
; ++i
)
529 if (!(block
= heap
->pending_free
[i
])) break;
531 TRACE( " %c%p: (pend) type %#10x, size %#8x, flags %#4x, unused %#4x", i
== heap
->pending_pos
? '*' : ' ',
532 block
, block_get_type( block
), block_get_size( block
), block_get_flags( block
), block
->unused_bytes
);
533 if (!(block_get_flags( block
) & ARENA_FLAG_PREV_FREE
)) TRACE( "\n" );
534 else TRACE( ", back %p\n", *((struct block
**)block
- 1) );
539 static const char *debugstr_heap_entry( struct rtl_heap_entry
*entry
)
541 const char *str
= wine_dbg_sprintf( "data %p, size %#Ix, overhead %#x, region %#x, flags %#x", entry
->lpData
,
542 entry
->cbData
, entry
->cbOverhead
, entry
->iRegionIndex
, entry
->wFlags
);
543 if (!(entry
->wFlags
& RTL_HEAP_ENTRY_REGION
)) return str
;
544 return wine_dbg_sprintf( "%s, commit %#x, uncommit %#x, first %p, last %p", str
, entry
->Region
.dwCommittedSize
,
545 entry
->Region
.dwUnCommittedSize
, entry
->Region
.lpFirstBlock
, entry
->Region
.lpLastBlock
);
548 /***********************************************************************
551 * Pointer to the heap
554 static HEAP
*HEAP_GetPtr(
555 HANDLE heap
/* [in] Handle to the heap */
557 HEAP
*heapPtr
= heap
;
560 if (!heapPtr
|| (heapPtr
->magic
!= HEAP_MAGIC
))
562 ERR("Invalid heap %p!\n", heap
);
565 if (heapPtr
->flags
& HEAP_VALIDATE_ALL
)
567 heap_lock( heapPtr
, 0 );
568 valid
= heap_validate( heapPtr
);
569 heap_unlock( heapPtr
, 0 );
571 if (!valid
&& TRACE_ON(heap
))
573 heap_dump( heapPtr
);
578 return valid
? heapPtr
: NULL
;
582 /***********************************************************************
583 * HEAP_InsertFreeBlock
585 * Insert a free block into the free list.
587 static inline void HEAP_InsertFreeBlock( HEAP
*heap
, ARENA_FREE
*pArena
, BOOL last
)
589 FREE_LIST_ENTRY
*pEntry
= heap
->freeList
+ get_freelist_index( pArena
->size
+ sizeof(*pArena
) );
592 /* insert at end of free list, i.e. before the next free list entry */
594 if (pEntry
== &heap
->freeList
[HEAP_NB_FREE_LISTS
]) pEntry
= heap
->freeList
;
595 list_add_before( &pEntry
->arena
.entry
, &pArena
->entry
);
599 /* insert at head of free list */
600 list_add_after( &pEntry
->arena
.entry
, &pArena
->entry
);
602 pArena
->size
|= ARENA_FLAG_FREE
;
606 static SUBHEAP
*find_subheap( const HEAP
*heap
, const struct block
*block
, BOOL heap_walk
)
610 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
612 SIZE_T blocks_size
= (char *)last_block( subheap
) - (char *)first_block( subheap
);
613 if (!check_subheap( subheap
)) return NULL
;
614 if (contains( first_block( subheap
), blocks_size
, block
, sizeof(*block
) )) return subheap
;
615 /* outside of blocks region, possible corruption or heap_walk */
616 if (contains( subheap_base( subheap
), subheap_size( subheap
), block
, 0 )) return heap_walk
? subheap
: NULL
;
623 /***********************************************************************
626 * Make sure the heap storage is committed for a given size in the specified arena.
628 static inline BOOL
HEAP_Commit( SUBHEAP
*subheap
, ARENA_INUSE
*pArena
, SIZE_T data_size
)
630 void *ptr
= (char *)(pArena
+ 1) + data_size
+ sizeof(ARENA_FREE
);
631 SIZE_T size
= (char *)ptr
- (char *)subheap
->base
;
632 size
= (size
+ COMMIT_MASK
) & ~COMMIT_MASK
;
633 if (size
> subheap
->size
) size
= subheap
->size
;
634 if (size
<= subheap
->commitSize
) return TRUE
;
635 size
-= subheap
->commitSize
;
636 ptr
= (char *)subheap
->base
+ subheap
->commitSize
;
637 if (NtAllocateVirtualMemory( NtCurrentProcess(), &ptr
, 0,
638 &size
, MEM_COMMIT
, get_protection_type( subheap
->heap
->flags
) ))
640 WARN("Could not commit %08lx bytes at %p for heap %p\n",
641 size
, ptr
, subheap
->heap
);
644 subheap
->commitSize
+= size
;
649 /***********************************************************************
652 * If possible, decommit the heap storage from (including) 'ptr'.
654 static inline BOOL
HEAP_Decommit( SUBHEAP
*subheap
, void *ptr
)
657 SIZE_T decommit_size
;
658 SIZE_T size
= (char *)ptr
- (char *)subheap
->base
;
660 /* round to next block and add one full block */
661 size
= ((size
+ COMMIT_MASK
) & ~COMMIT_MASK
) + COMMIT_MASK
+ 1;
662 size
= max( size
, subheap
->min_commit
);
663 if (size
>= subheap
->commitSize
) return TRUE
;
664 decommit_size
= subheap
->commitSize
- size
;
665 addr
= (char *)subheap
->base
+ size
;
667 if (NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &decommit_size
, MEM_DECOMMIT
))
669 WARN("Could not decommit %08lx bytes at %p for heap %p\n",
670 decommit_size
, (char *)subheap
->base
+ size
, subheap
->heap
);
673 subheap
->commitSize
-= decommit_size
;
678 /***********************************************************************
679 * HEAP_CreateFreeBlock
681 * Create a free block at a specified address. 'size' is the size of the
682 * whole block, including the new arena.
684 static void HEAP_CreateFreeBlock( SUBHEAP
*subheap
, void *ptr
, SIZE_T size
)
689 DWORD flags
= subheap
->heap
->flags
;
691 /* Create a free arena */
692 mark_block_uninitialized( ptr
, sizeof(ARENA_FREE
) );
694 pFree
->magic
= ARENA_FREE_MAGIC
;
696 /* If debugging, erase the freed block content */
698 pEnd
= (char *)ptr
+ size
;
699 if (pEnd
> (char *)subheap
->base
+ subheap
->commitSize
)
700 pEnd
= (char *)subheap
->base
+ subheap
->commitSize
;
701 if (pEnd
> (char *)(pFree
+ 1)) mark_block_free( pFree
+ 1, pEnd
- (char *)(pFree
+ 1), flags
);
703 /* Check if next block is free also */
705 if (((char *)ptr
+ size
< (char *)subheap
->base
+ subheap
->size
) &&
706 (*(DWORD
*)((char *)ptr
+ size
) & ARENA_FLAG_FREE
))
708 /* Remove the next arena from the free list */
709 ARENA_FREE
*pNext
= (ARENA_FREE
*)((char *)ptr
+ size
);
710 list_remove( &pNext
->entry
);
711 size
+= (pNext
->size
& ARENA_SIZE_MASK
) + sizeof(*pNext
);
712 mark_block_free( pNext
, sizeof(ARENA_FREE
), flags
);
715 /* Set the next block PREV_FREE flag and pointer */
717 last
= ((char *)ptr
+ size
>= (char *)subheap
->base
+ subheap
->size
);
720 DWORD
*pNext
= (DWORD
*)((char *)ptr
+ size
);
721 *pNext
|= ARENA_FLAG_PREV_FREE
;
722 mark_block_initialized( (ARENA_FREE
**)pNext
- 1, sizeof( ARENA_FREE
* ) );
723 *((ARENA_FREE
**)pNext
- 1) = pFree
;
726 /* Last, insert the new block into the free list */
728 pFree
->size
= size
- sizeof(*pFree
);
729 HEAP_InsertFreeBlock( subheap
->heap
, pFree
, last
);
733 /***********************************************************************
734 * HEAP_MakeInUseBlockFree
736 * Turn an in-use block into a free block. Can also decommit the end of
737 * the heap, and possibly even free the sub-heap altogether.
739 static void HEAP_MakeInUseBlockFree( SUBHEAP
*subheap
, ARENA_INUSE
*pArena
)
741 HEAP
*heap
= subheap
->heap
;
745 if (heap
->pending_free
)
747 ARENA_INUSE
*prev
= heap
->pending_free
[heap
->pending_pos
];
748 heap
->pending_free
[heap
->pending_pos
] = pArena
;
749 heap
->pending_pos
= (heap
->pending_pos
+ 1) % MAX_FREE_PENDING
;
750 pArena
->magic
= ARENA_PENDING_MAGIC
;
751 mark_block_free( pArena
+ 1, pArena
->size
& ARENA_SIZE_MASK
, heap
->flags
);
754 subheap
= find_subheap( heap
, pArena
, FALSE
);
757 /* Check if we can merge with previous block */
759 size
= (pArena
->size
& ARENA_SIZE_MASK
) + sizeof(*pArena
);
760 if (pArena
->size
& ARENA_FLAG_PREV_FREE
)
762 pFree
= *((ARENA_FREE
**)pArena
- 1);
763 size
+= (pFree
->size
& ARENA_SIZE_MASK
) + sizeof(ARENA_FREE
);
764 /* Remove it from the free list */
765 list_remove( &pFree
->entry
);
767 else pFree
= (ARENA_FREE
*)pArena
;
769 /* Create a free block */
771 HEAP_CreateFreeBlock( subheap
, pFree
, size
);
772 size
= (pFree
->size
& ARENA_SIZE_MASK
) + sizeof(ARENA_FREE
);
773 if ((char *)pFree
+ size
< (char *)subheap
->base
+ subheap
->size
)
774 return; /* Not the last block, so nothing more to do */
776 /* Free the whole sub-heap if it's empty and not the original one */
778 if (((char *)pFree
== (char *)subheap
->base
+ subheap
->headerSize
) &&
779 (subheap
!= &subheap
->heap
->subheap
))
781 void *addr
= subheap
->base
;
784 /* Remove the free block from the list */
785 list_remove( &pFree
->entry
);
786 /* Remove the subheap from the list */
787 list_remove( &subheap
->entry
);
788 /* Free the memory */
790 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
794 /* Decommit the end of the heap */
796 if (!(subheap
->heap
->shared
)) HEAP_Decommit( subheap
, pFree
+ 1 );
800 static void shrink_used_block( SUBHEAP
*subheap
, struct block
*block
, SIZE_T data_size
, SIZE_T size
)
802 SIZE_T old_data_size
= block_get_size( block
) - sizeof(*block
);
803 if (old_data_size
>= data_size
+ HEAP_MIN_SHRINK_SIZE
)
805 block
->size
= data_size
| block_get_flags( block
);
806 block
->unused_bytes
= data_size
- size
;
807 HEAP_CreateFreeBlock( subheap
, next_block( subheap
, block
), old_data_size
- data_size
);
812 block
->unused_bytes
= old_data_size
- size
;
813 if ((next
= next_block( subheap
, block
))) next
->size
&= ~ARENA_FLAG_PREV_FREE
;
818 /***********************************************************************
819 * allocate_large_block
821 static void *allocate_large_block( HEAP
*heap
, DWORD flags
, SIZE_T size
)
824 SIZE_T block_size
= sizeof(*arena
) + ROUND_SIZE(size
) + HEAP_TAIL_EXTRA_SIZE(flags
);
825 LPVOID address
= NULL
;
827 if (!(flags
& HEAP_GROWABLE
)) return NULL
;
828 if (block_size
< size
) return NULL
; /* overflow */
829 if (NtAllocateVirtualMemory( NtCurrentProcess(), &address
, 0, &block_size
,
830 MEM_COMMIT
, get_protection_type( flags
)))
832 WARN("Could not allocate block for %08lx bytes\n", size
);
836 arena
->data_size
= size
;
837 arena
->block_size
= block_size
;
838 arena
->size
= ARENA_LARGE_SIZE
;
839 arena
->magic
= ARENA_LARGE_MAGIC
;
840 mark_block_tail( (char *)(arena
+ 1) + size
, block_size
- sizeof(*arena
) - size
, flags
);
841 list_add_tail( &heap
->large_list
, &arena
->entry
);
842 notify_alloc( arena
+ 1, size
, flags
& HEAP_ZERO_MEMORY
);
847 /***********************************************************************
850 static void free_large_block( HEAP
*heap
, void *ptr
)
852 ARENA_LARGE
*arena
= (ARENA_LARGE
*)ptr
- 1;
853 LPVOID address
= arena
;
856 list_remove( &arena
->entry
);
857 NtFreeVirtualMemory( NtCurrentProcess(), &address
, &size
, MEM_RELEASE
);
861 /***********************************************************************
862 * realloc_large_block
864 static void *realloc_large_block( HEAP
*heap
, DWORD flags
, void *ptr
, SIZE_T size
)
866 ARENA_LARGE
*arena
= (ARENA_LARGE
*)ptr
- 1;
869 if (arena
->block_size
- sizeof(*arena
) >= size
)
871 SIZE_T unused
= arena
->block_size
- sizeof(*arena
) - size
;
873 /* FIXME: we could remap zero-pages instead */
874 #ifdef VALGRIND_RESIZEINPLACE_BLOCK
875 if (RUNNING_ON_VALGRIND
)
876 notify_realloc( arena
+ 1, arena
->data_size
, size
);
879 if (size
> arena
->data_size
)
880 initialize_block( (char *)ptr
+ arena
->data_size
, size
- arena
->data_size
, unused
, flags
);
882 mark_block_tail( (char *)ptr
+ size
, unused
, flags
);
883 arena
->data_size
= size
;
886 if (flags
& HEAP_REALLOC_IN_PLACE_ONLY
) return NULL
;
887 if (!(new_ptr
= allocate_large_block( heap
, flags
, size
)))
889 WARN("Could not allocate block for %08lx bytes\n", size
);
892 memcpy( new_ptr
, ptr
, arena
->data_size
);
893 free_large_block( heap
, ptr
);
899 /***********************************************************************
902 static ARENA_LARGE
*find_large_block( const HEAP
*heap
, const void *ptr
)
906 LIST_FOR_EACH_ENTRY( arena
, &heap
->large_list
, ARENA_LARGE
, entry
)
907 if (ptr
== arena
+ 1) return arena
;
912 static BOOL
validate_large_arena( const HEAP
*heap
, const ARENA_LARGE
*arena
)
914 const char *err
= NULL
;
916 if ((ULONG_PTR
)arena
& COMMIT_MASK
)
917 err
= "invalid block alignment";
918 else if (arena
->size
!= ARENA_LARGE_SIZE
|| arena
->magic
!= ARENA_LARGE_MAGIC
)
919 err
= "invalid block header";
920 else if (!contains( arena
, arena
->block_size
, arena
+ 1, arena
->data_size
))
921 err
= "invalid block size";
922 else if (heap
->flags
& HEAP_TAIL_CHECKING_ENABLED
)
924 SIZE_T i
, unused
= arena
->block_size
- sizeof(*arena
) - arena
->data_size
;
925 const unsigned char *data
= (const unsigned char *)(arena
+ 1) + arena
->data_size
;
926 for (i
= 0; i
< unused
&& !err
; i
++) if (data
[i
] != ARENA_TAIL_FILLER
) err
= "invalid block tail";
931 ERR( "heap %p, block %p: %s\n", heap
, arena
, err
);
932 if (TRACE_ON(heap
)) heap_dump( heap
);
938 /***********************************************************************
941 static SUBHEAP
*HEAP_CreateSubHeap( HEAP
*heap
, LPVOID address
, DWORD flags
,
942 SIZE_T commitSize
, SIZE_T totalSize
)
945 FREE_LIST_ENTRY
*pEntry
;
950 if (!commitSize
) commitSize
= COMMIT_MASK
+ 1;
951 totalSize
= min( totalSize
, 0xffff0000 ); /* don't allow a heap larger than 4GB */
952 if (totalSize
< commitSize
) totalSize
= commitSize
;
953 if (flags
& HEAP_SHARED
) commitSize
= totalSize
; /* always commit everything in a shared heap */
954 commitSize
= min( totalSize
, (commitSize
+ COMMIT_MASK
) & ~COMMIT_MASK
);
956 /* allocate the memory block */
957 if (NtAllocateVirtualMemory( NtCurrentProcess(), &address
, 0, &totalSize
,
958 MEM_RESERVE
, get_protection_type( flags
) ))
960 WARN("Could not allocate %08lx bytes\n", totalSize
);
963 if (NtAllocateVirtualMemory( NtCurrentProcess(), &address
, 0,
964 &commitSize
, MEM_COMMIT
, get_protection_type( flags
) ))
966 WARN("Could not commit %08lx bytes for sub-heap %p\n", commitSize
, address
);
973 /* If this is a secondary subheap, insert it into list */
976 subheap
->base
= address
;
977 subheap
->heap
= heap
;
978 subheap
->size
= totalSize
;
979 subheap
->min_commit
= 0x10000;
980 subheap
->commitSize
= commitSize
;
981 subheap
->magic
= SUBHEAP_MAGIC
;
982 subheap
->headerSize
= ROUND_SIZE( sizeof(SUBHEAP
) );
983 list_add_head( &heap
->subheap_list
, &subheap
->entry
);
987 /* If this is a primary subheap, initialize main heap */
990 heap
->ffeeffee
= 0xffeeffee;
991 heap
->auto_flags
= (flags
& HEAP_GROWABLE
);
992 heap
->flags
= (flags
& ~HEAP_SHARED
);
993 heap
->shared
= (flags
& HEAP_SHARED
) != 0;
994 heap
->magic
= HEAP_MAGIC
;
995 heap
->grow_size
= max( HEAP_DEF_SIZE
, totalSize
);
996 list_init( &heap
->subheap_list
);
997 list_init( &heap
->large_list
);
999 subheap
= &heap
->subheap
;
1000 subheap
->base
= address
;
1001 subheap
->heap
= heap
;
1002 subheap
->size
= totalSize
;
1003 subheap
->min_commit
= commitSize
;
1004 subheap
->commitSize
= commitSize
;
1005 subheap
->magic
= SUBHEAP_MAGIC
;
1006 subheap
->headerSize
= ROUND_SIZE( sizeof(HEAP
) );
1007 list_add_head( &heap
->subheap_list
, &subheap
->entry
);
1009 /* Build the free lists */
1011 heap
->freeList
= (FREE_LIST_ENTRY
*)((char *)heap
+ subheap
->headerSize
);
1012 subheap
->headerSize
+= HEAP_NB_FREE_LISTS
* sizeof(FREE_LIST_ENTRY
);
1013 list_init( &heap
->freeList
[0].arena
.entry
);
1014 for (i
= 0, pEntry
= heap
->freeList
; i
< HEAP_NB_FREE_LISTS
; i
++, pEntry
++)
1016 pEntry
->arena
.size
= 0 | ARENA_FLAG_FREE
;
1017 pEntry
->arena
.magic
= ARENA_FREE_MAGIC
;
1018 if (i
) list_add_after( &pEntry
[-1].arena
.entry
, &pEntry
->arena
.entry
);
1021 /* Initialize critical section */
1023 if (!processHeap
) /* do it by hand to avoid memory allocations */
1025 heap
->cs
.DebugInfo
= &process_heap_cs_debug
;
1026 heap
->cs
.LockCount
= -1;
1027 heap
->cs
.RecursionCount
= 0;
1028 heap
->cs
.OwningThread
= 0;
1029 heap
->cs
.LockSemaphore
= 0;
1030 heap
->cs
.SpinCount
= 0;
1031 process_heap_cs_debug
.CriticalSection
= &heap
->cs
;
1035 RtlInitializeCriticalSection( &heap
->cs
);
1036 heap
->cs
.DebugInfo
->Spare
[0] = (DWORD_PTR
)(__FILE__
": heap.cs");
1041 /* let's assume that only one thread at a time will try to do this */
1042 HANDLE sem
= heap
->cs
.LockSemaphore
;
1043 if (!sem
) NtCreateSemaphore( &sem
, SEMAPHORE_ALL_ACCESS
, NULL
, 0, 1 );
1045 NtDuplicateObject( NtCurrentProcess(), sem
, NtCurrentProcess(), &sem
, 0, 0,
1046 DUPLICATE_MAKE_GLOBAL
| DUPLICATE_SAME_ACCESS
| DUPLICATE_CLOSE_SOURCE
);
1047 heap
->cs
.LockSemaphore
= sem
;
1048 RtlFreeHeap( processHeap
, 0, heap
->cs
.DebugInfo
);
1049 heap
->cs
.DebugInfo
= NULL
;
1053 /* Create the first free block */
1055 HEAP_CreateFreeBlock( subheap
, (LPBYTE
)subheap
->base
+ subheap
->headerSize
,
1056 subheap
->size
- subheap
->headerSize
);
1062 /***********************************************************************
1063 * HEAP_FindFreeBlock
1065 * Find a free block at least as large as the requested size, and make sure
1066 * the requested size is committed.
1068 static ARENA_FREE
*HEAP_FindFreeBlock( HEAP
*heap
, SIZE_T data_size
, SUBHEAP
**ppSubHeap
)
1073 FREE_LIST_ENTRY
*pEntry
= heap
->freeList
+ get_freelist_index( data_size
+ sizeof(ARENA_INUSE
) );
1075 /* Find a suitable free list, and in it find a block large enough */
1077 ptr
= &pEntry
->arena
.entry
;
1078 while ((ptr
= list_next( &heap
->freeList
[0].arena
.entry
, ptr
)))
1080 ARENA_FREE
*pArena
= LIST_ENTRY( ptr
, ARENA_FREE
, entry
);
1081 SIZE_T arena_size
= (pArena
->size
& ARENA_SIZE_MASK
) +
1082 sizeof(ARENA_FREE
) - sizeof(ARENA_INUSE
);
1083 if (arena_size
>= data_size
)
1085 subheap
= find_subheap( heap
, (struct block
*)pArena
, FALSE
);
1086 if (!HEAP_Commit( subheap
, (ARENA_INUSE
*)pArena
, data_size
)) return NULL
;
1087 *ppSubHeap
= subheap
;
1092 /* If no block was found, attempt to grow the heap */
1094 if (!(heap
->flags
& HEAP_GROWABLE
))
1096 WARN("Not enough space in heap %p for %08lx bytes\n", heap
, data_size
);
1099 /* make sure that we have a big enough size *committed* to fit another
1100 * last free arena in !
1101 * So just one heap struct, one first free arena which will eventually
1102 * get used, and a second free arena that might get assigned all remaining
1103 * free space in shrink_used_block() */
1104 total_size
= data_size
+ ROUND_SIZE(sizeof(SUBHEAP
)) + sizeof(ARENA_INUSE
) + sizeof(ARENA_FREE
);
1105 if (total_size
< data_size
) return NULL
; /* overflow */
1107 if ((subheap
= HEAP_CreateSubHeap( heap
, NULL
, heap
->flags
, total_size
,
1108 max( heap
->grow_size
, total_size
) )))
1110 if (heap
->grow_size
< 128 * 1024 * 1024) heap
->grow_size
*= 2;
1112 else while (!subheap
) /* shrink the grow size again if we are running out of space */
1114 if (heap
->grow_size
<= total_size
|| heap
->grow_size
<= 4 * 1024 * 1024) return NULL
;
1115 heap
->grow_size
/= 2;
1116 subheap
= HEAP_CreateSubHeap( heap
, NULL
, heap
->flags
, total_size
,
1117 max( heap
->grow_size
, total_size
) );
1120 TRACE("created new sub-heap %p of %08lx bytes for heap %p\n",
1121 subheap
, subheap
->size
, heap
);
1123 *ppSubHeap
= subheap
;
1124 return (ARENA_FREE
*)((char *)subheap
->base
+ subheap
->headerSize
);
1128 static BOOL
is_valid_free_block( const HEAP
*heap
, const struct block
*block
)
1130 const SUBHEAP
*subheap
;
1133 if ((subheap
= find_subheap( heap
, block
, FALSE
))) return TRUE
;
1134 for (i
= 0; i
< HEAP_NB_FREE_LISTS
; i
++) if (block
== (struct block
*)&heap
->freeList
[i
].arena
) return TRUE
;
1138 static BOOL
validate_free_block( const SUBHEAP
*subheap
, const struct block
*block
)
1140 const char *err
= NULL
, *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
);
1141 const struct entry
*entry
= (struct entry
*)block
;
1142 const struct block
*prev
, *next
;
1143 HEAP
*heap
= subheap
->heap
;
1144 DWORD flags
= heap
->flags
;
1146 if ((ULONG_PTR
)(block
+ 1) % ALIGNMENT
)
1147 err
= "invalid block alignment";
1148 else if (block_get_type( block
) != ARENA_FREE_MAGIC
)
1149 err
= "invalid block header";
1150 else if (!(block_get_flags( block
) & ARENA_FLAG_FREE
) || (block_get_flags( block
) & ARENA_FLAG_PREV_FREE
))
1151 err
= "invalid block flags";
1152 else if (!contains( base
, subheap_size( subheap
), block
, block_get_size( block
) ))
1153 err
= "invalid block size";
1154 else if (!is_valid_free_block( heap
, (next
= (struct block
*)LIST_ENTRY( entry
->entry
.next
, struct entry
, entry
)) ))
1155 err
= "invalid next free block pointer";
1156 else if (!(block_get_flags( next
) & ARENA_FLAG_FREE
) || block_get_type( next
) != ARENA_FREE_MAGIC
)
1157 err
= "invalid next free block header";
1158 else if (!is_valid_free_block( heap
, (prev
= (struct block
*)LIST_ENTRY( entry
->entry
.prev
, struct entry
, entry
)) ))
1159 err
= "invalid previous free block pointer";
1160 else if (!(block_get_flags( prev
) & ARENA_FLAG_FREE
) || block_get_type( prev
) != ARENA_FREE_MAGIC
)
1161 err
= "invalid previous free block header";
1162 else if ((next
= next_block( subheap
, (struct block
*)block
)))
1164 if (!(block_get_flags( next
) & ARENA_FLAG_PREV_FREE
))
1165 err
= "invalid next block flags";
1166 if (*((struct block
**)next
- 1) != block
)
1167 err
= "invalid next block back pointer";
1170 if (!err
&& (flags
& HEAP_FREE_CHECKING_ENABLED
))
1172 const char *ptr
= (char *)(entry
+ 1), *end
= (char *)block
+ block_get_size( block
);
1173 if (next
) end
-= sizeof(struct block
*);
1174 if (end
> commit_end
) end
= commit_end
;
1175 while (!err
&& ptr
< end
)
1177 if (*(DWORD
*)ptr
!= ARENA_FREE_FILLER
) err
= "free block overwritten";
1178 ptr
+= sizeof(DWORD
);
1184 ERR( "heap %p, block %p: %s\n", heap
, block
, err
);
1185 if (TRACE_ON(heap
)) heap_dump( heap
);
1192 static BOOL
validate_used_block( const SUBHEAP
*subheap
, const struct block
*block
)
1194 const char *err
= NULL
, *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
);
1195 const HEAP
*heap
= subheap
->heap
;
1196 DWORD flags
= heap
->flags
;
1197 const struct block
*next
;
1200 if ((ULONG_PTR
)(block
+ 1) % ALIGNMENT
)
1201 err
= "invalid block alignment";
1202 else if (block_get_type( block
) != ARENA_INUSE_MAGIC
&& block_get_type( block
) != ARENA_PENDING_MAGIC
)
1203 err
= "invalid block header";
1204 else if (block_get_flags( block
) & ARENA_FLAG_FREE
)
1205 err
= "invalid block flags";
1206 else if (!contains( base
, commit_end
- base
, block
, block_get_size( block
) ))
1207 err
= "invalid block size";
1208 else if (block
->unused_bytes
> block_get_size( block
) - sizeof(*block
))
1209 err
= "invalid block unused size";
1210 else if ((next
= next_block( subheap
, block
)) && (block_get_flags( next
) & ARENA_FLAG_PREV_FREE
))
1211 err
= "invalid next block flags";
1212 else if (block_get_flags( block
) & ARENA_FLAG_PREV_FREE
)
1214 const struct block
*prev
= *((struct block
**)block
- 1);
1215 if (!is_valid_free_block( heap
, prev
))
1216 err
= "invalid previous block pointer";
1217 else if (!(block_get_flags( prev
) & ARENA_FLAG_FREE
) || block_get_type( prev
) != ARENA_FREE_MAGIC
)
1218 err
= "invalid previous block flags";
1219 if ((char *)prev
+ block_get_size( prev
) != (char *)block
)
1220 err
= "invalid previous block size";
1223 if (!err
&& block_get_type( block
) == ARENA_PENDING_MAGIC
)
1225 const char *ptr
= (char *)(block
+ 1), *end
= (char *)block
+ block_get_size( block
);
1226 while (!err
&& ptr
< end
)
1228 if (*(DWORD
*)ptr
!= ARENA_FREE_FILLER
) err
= "free block overwritten";
1229 ptr
+= sizeof(DWORD
);
1232 else if (!err
&& (flags
& HEAP_TAIL_CHECKING_ENABLED
))
1234 const unsigned char *tail
= (unsigned char *)block
+ block_get_size( block
) - block
->unused_bytes
;
1235 for (i
= 0; !err
&& i
< block
->unused_bytes
; i
++) if (tail
[i
] != ARENA_TAIL_FILLER
) err
= "invalid block tail";
1240 ERR( "heap %p, block %p: %s\n", heap
, block
, err
);
1241 if (TRACE_ON(heap
)) heap_dump( heap
);
1248 static BOOL
heap_validate_ptr( const HEAP
*heap
, const void *ptr
, SUBHEAP
**subheap
)
1250 const struct block
*arena
= (struct block
*)ptr
- 1;
1251 const ARENA_LARGE
*large_arena
;
1253 if (!(*subheap
= find_subheap( heap
, arena
, FALSE
)))
1255 if (!(large_arena
= find_large_block( heap
, ptr
)))
1257 if (WARN_ON(heap
)) WARN("heap %p, ptr %p: block region not found\n", heap
, ptr
);
1261 return validate_large_arena( heap
, large_arena
);
1264 return validate_used_block( *subheap
, arena
);
1267 static BOOL
heap_validate( const HEAP
*heap
)
1269 const ARENA_LARGE
*large_arena
;
1270 const struct block
*block
;
1271 const SUBHEAP
*subheap
;
1273 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
1275 if (!check_subheap( subheap
))
1277 ERR( "heap %p, subheap %p corrupted sizes\n", heap
, subheap
);
1278 if (TRACE_ON(heap
)) heap_dump( heap
);
1282 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
1284 if (block_get_flags( block
) & ARENA_FLAG_FREE
)
1286 if (!validate_free_block( subheap
, block
)) return FALSE
;
1290 if (!validate_used_block( subheap
, block
)) return FALSE
;
1295 LIST_FOR_EACH_ENTRY( large_arena
, &heap
->large_list
, ARENA_LARGE
, entry
)
1296 if (!validate_large_arena( heap
, large_arena
)) return FALSE
;
1301 static inline struct block
*unsafe_block_from_ptr( const HEAP
*heap
, const void *ptr
, SUBHEAP
**subheap
)
1303 struct block
*block
= (struct block
*)ptr
- 1;
1304 const char *err
= NULL
, *base
, *commit_end
;
1306 if (heap
->flags
& HEAP_VALIDATE
)
1308 if (!heap_validate_ptr( heap
, ptr
, subheap
)) return NULL
;
1312 if ((*subheap
= find_subheap( heap
, block
, FALSE
)))
1314 base
= subheap_base( *subheap
);
1315 commit_end
= subheap_commit_end( *subheap
);
1320 if (find_large_block( heap
, ptr
)) return block
;
1321 err
= "block region not found";
1323 else if ((ULONG_PTR
)ptr
% ALIGNMENT
)
1324 err
= "invalid ptr alignment";
1325 else if (block_get_type( block
) == ARENA_PENDING_MAGIC
|| (block_get_flags( block
) & ARENA_FLAG_FREE
))
1326 err
= "already freed block";
1327 else if (block_get_type( block
) != ARENA_INUSE_MAGIC
)
1328 err
= "invalid block header";
1329 else if (!contains( base
, commit_end
- base
, block
, block_get_size( block
) ))
1330 err
= "invalid block size";
1332 if (err
) WARN( "heap %p, block %p: %s\n", heap
, block
, err
);
1333 return err
? NULL
: block
;
1336 static DWORD
heap_flags_from_global_flag( DWORD flag
)
1340 if (flag
& FLG_HEAP_ENABLE_TAIL_CHECK
)
1341 ret
|= HEAP_TAIL_CHECKING_ENABLED
;
1342 if (flag
& FLG_HEAP_ENABLE_FREE_CHECK
)
1343 ret
|= HEAP_FREE_CHECKING_ENABLED
;
1344 if (flag
& FLG_HEAP_VALIDATE_PARAMETERS
)
1345 ret
|= HEAP_VALIDATE_PARAMS
| HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
;
1346 if (flag
& FLG_HEAP_VALIDATE_ALL
)
1347 ret
|= HEAP_VALIDATE_ALL
| HEAP_TAIL_CHECKING_ENABLED
| HEAP_FREE_CHECKING_ENABLED
;
1348 if (flag
& FLG_HEAP_DISABLE_COALESCING
)
1349 ret
|= HEAP_DISABLE_COALESCE_ON_FREE
;
1350 if (flag
& FLG_HEAP_PAGE_ALLOCS
)
1351 ret
|= HEAP_PAGE_ALLOCS
;
1356 /***********************************************************************
1357 * heap_set_debug_flags
1359 static void heap_set_debug_flags( HANDLE handle
)
1361 HEAP
*heap
= HEAP_GetPtr( handle
);
1362 ULONG global_flags
= RtlGetNtGlobalFlags();
1363 DWORD flags
, force_flags
;
1365 if (TRACE_ON(heap
)) global_flags
|= FLG_HEAP_VALIDATE_ALL
;
1366 if (WARN_ON(heap
)) global_flags
|= FLG_HEAP_VALIDATE_PARAMETERS
;
1368 flags
= heap_flags_from_global_flag( global_flags
);
1369 force_flags
= (heap
->flags
| flags
) & ~(HEAP_SHARED
|HEAP_DISABLE_COALESCE_ON_FREE
);
1371 if (global_flags
& FLG_HEAP_ENABLE_TAGGING
) flags
|= HEAP_SHARED
;
1372 if (!(global_flags
& FLG_HEAP_PAGE_ALLOCS
)) force_flags
&= ~(HEAP_GROWABLE
|HEAP_PRIVATE
);
1374 if (RUNNING_ON_VALGRIND
) flags
= 0; /* no sense in validating since Valgrind catches accesses */
1376 heap
->flags
|= flags
;
1377 heap
->force_flags
|= force_flags
;
1379 if (flags
& (HEAP_FREE_CHECKING_ENABLED
| HEAP_TAIL_CHECKING_ENABLED
)) /* fix existing blocks */
1381 struct block
*block
;
1385 LIST_FOR_EACH_ENTRY( subheap
, &heap
->subheap_list
, SUBHEAP
, entry
)
1387 const char *commit_end
= subheap_commit_end( subheap
);
1389 if (!check_subheap( subheap
)) break;
1391 for (block
= first_block( subheap
); block
; block
= next_block( subheap
, block
))
1393 if (block_get_flags( block
) & ARENA_FLAG_FREE
)
1395 char *data
= (char *)block
+ block_get_overhead( block
), *end
= (char *)block
+ block_get_size( block
);
1396 if (end
>= commit_end
) mark_block_free( data
, commit_end
- data
, flags
);
1397 else mark_block_free( data
, end
- sizeof(struct block
*) - data
, flags
);
1401 if (block_get_type( block
) == ARENA_PENDING_MAGIC
) mark_block_free( block
+ 1, block_get_size( block
) - sizeof(*block
), flags
);
1402 else mark_block_tail( (char *)block
+ block_get_size( block
) - block
->unused_bytes
, block
->unused_bytes
, flags
);
1407 LIST_FOR_EACH_ENTRY( large
, &heap
->large_list
, ARENA_LARGE
, entry
)
1408 mark_block_tail( (char *)(large
+ 1) + large
->data_size
,
1409 large
->block_size
- sizeof(*large
) - large
->data_size
, flags
);
1412 if ((heap
->flags
& HEAP_GROWABLE
) && !heap
->pending_free
&&
1413 ((flags
& HEAP_FREE_CHECKING_ENABLED
) || RUNNING_ON_VALGRIND
))
1415 heap
->pending_free
= RtlAllocateHeap( handle
, HEAP_ZERO_MEMORY
,
1416 MAX_FREE_PENDING
* sizeof(*heap
->pending_free
) );
1417 heap
->pending_pos
= 0;
1422 /***********************************************************************
1423 * RtlCreateHeap (NTDLL.@)
1425 * Create a new Heap.
1428 * flags [I] HEAP_ flags from "winnt.h"
1429 * addr [I] Desired base address
1430 * totalSize [I] Total size of the heap, or 0 for a growable heap
1431 * commitSize [I] Amount of heap space to commit
1432 * unknown [I] Not yet understood
1433 * definition [I] Heap definition
1436 * Success: A HANDLE to the newly created heap.
1437 * Failure: a NULL HANDLE.
1439 HANDLE WINAPI
RtlCreateHeap( ULONG flags
, PVOID addr
, SIZE_T totalSize
, SIZE_T commitSize
,
1440 PVOID unknown
, PRTL_HEAP_DEFINITION definition
)
1444 /* Allocate the heap block */
1446 flags
&= ~(HEAP_TAIL_CHECKING_ENABLED
|HEAP_FREE_CHECKING_ENABLED
);
1447 if (processHeap
) flags
|= HEAP_PRIVATE
;
1448 if (!processHeap
|| !totalSize
|| (flags
& HEAP_SHARED
)) flags
|= HEAP_GROWABLE
;
1449 if (!totalSize
) totalSize
= HEAP_DEF_SIZE
;
1451 if (!(subheap
= HEAP_CreateSubHeap( NULL
, addr
, flags
, commitSize
, totalSize
))) return 0;
1453 heap_set_debug_flags( subheap
->heap
);
1455 /* link it into the per-process heap list */
1458 HEAP
*heapPtr
= subheap
->heap
;
1459 RtlEnterCriticalSection( &processHeap
->cs
);
1460 list_add_head( &processHeap
->entry
, &heapPtr
->entry
);
1461 RtlLeaveCriticalSection( &processHeap
->cs
);
1465 processHeap
= subheap
->heap
; /* assume the first heap we create is the process main heap */
1466 list_init( &processHeap
->entry
);
1469 return subheap
->heap
;
1473 /***********************************************************************
1474 * RtlDestroyHeap (NTDLL.@)
1476 * Destroy a Heap created with RtlCreateHeap().
1479 * heap [I] Heap to destroy.
1482 * Success: A NULL HANDLE, if heap is NULL or it was destroyed
1483 * Failure: The Heap handle, if heap is the process heap.
1485 HANDLE WINAPI
RtlDestroyHeap( HANDLE heap
)
1487 HEAP
*heapPtr
= HEAP_GetPtr( heap
);
1488 SUBHEAP
*subheap
, *next
;
1489 ARENA_LARGE
*arena
, *arena_next
;
1493 TRACE("%p\n", heap
);
1494 if (!heapPtr
&& heap
&& (((HEAP
*)heap
)->flags
& HEAP_VALIDATE_PARAMS
) &&
1495 NtCurrentTeb()->Peb
->BeingDebugged
)
1497 DbgPrint( "Attempt to destroy an invalid heap\n" );
1500 if (!heapPtr
) return heap
;
1502 if (heap
== processHeap
) return heap
; /* cannot delete the main process heap */
1504 /* remove it from the per-process list */
1505 RtlEnterCriticalSection( &processHeap
->cs
);
1506 list_remove( &heapPtr
->entry
);
1507 RtlLeaveCriticalSection( &processHeap
->cs
);
1509 heapPtr
->cs
.DebugInfo
->Spare
[0] = 0;
1510 RtlDeleteCriticalSection( &heapPtr
->cs
);
1512 LIST_FOR_EACH_ENTRY_SAFE( arena
, arena_next
, &heapPtr
->large_list
, ARENA_LARGE
, entry
)
1514 list_remove( &arena
->entry
);
1517 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1519 LIST_FOR_EACH_ENTRY_SAFE( subheap
, next
, &heapPtr
->subheap_list
, SUBHEAP
, entry
)
1521 if (subheap
== &heapPtr
->subheap
) continue; /* do this one last */
1522 notify_free_all( subheap
);
1523 list_remove( &subheap
->entry
);
1525 addr
= subheap
->base
;
1526 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1528 notify_free_all( &heapPtr
->subheap
);
1529 RtlFreeHeap( GetProcessHeap(), 0, heapPtr
->pending_free
);
1531 addr
= heapPtr
->subheap
.base
;
1532 NtFreeVirtualMemory( NtCurrentProcess(), &addr
, &size
, MEM_RELEASE
);
1536 static NTSTATUS
heap_allocate( HEAP
*heap
, ULONG flags
, SIZE_T size
, void **ret
)
1538 struct block
*block
;
1539 struct entry
*entry
;
1543 data_size
= ROUND_SIZE(size
) + HEAP_TAIL_EXTRA_SIZE(flags
);
1544 if (data_size
< size
) return STATUS_NO_MEMORY
; /* overflow */
1545 if (data_size
< HEAP_MIN_DATA_SIZE
) data_size
= HEAP_MIN_DATA_SIZE
;
1547 if (data_size
>= HEAP_MIN_LARGE_BLOCK_SIZE
)
1549 if (!(*ret
= allocate_large_block( heap
, flags
, size
))) return STATUS_NO_MEMORY
;
1550 return STATUS_SUCCESS
;
1553 /* Locate a suitable free block */
1555 if (!(entry
= HEAP_FindFreeBlock( heap
, data_size
, &subheap
))) return STATUS_NO_MEMORY
;
1557 /* Remove the arena from the free list */
1559 list_remove( &entry
->entry
);
1561 /* Build the in-use arena */
1563 block
= (struct block
*)entry
;
1565 /* in-use arena is smaller than free arena,
1566 * so we have to add the difference to the size */
1567 block
->size
= (block
->size
& ~ARENA_FLAG_FREE
) + sizeof(*entry
) - sizeof(*block
);
1568 block
->magic
= ARENA_INUSE_MAGIC
;
1570 /* Shrink the block */
1572 shrink_used_block( subheap
, block
, data_size
, size
);
1574 notify_alloc( block
+ 1, size
, flags
& HEAP_ZERO_MEMORY
);
1575 initialize_block( block
+ 1, size
, block
->unused_bytes
, flags
);
1578 return STATUS_SUCCESS
;
1581 /***********************************************************************
1582 * RtlAllocateHeap (NTDLL.@)
1584 void *WINAPI DECLSPEC_HOTPATCH
RtlAllocateHeap( HANDLE heap
, ULONG flags
, SIZE_T size
)
1590 if (!(heapPtr
= HEAP_GetPtr( heap
)))
1591 status
= STATUS_INVALID_HANDLE
;
1594 heap_lock( heapPtr
, flags
);
1595 status
= heap_allocate( heapPtr
, heap_get_flags( heapPtr
, flags
), size
, &ptr
);
1596 heap_unlock( heapPtr
, flags
);
1599 TRACE( "heap %p, flags %#x, size %#Ix, return %p, status %#x.\n", heap
, flags
, size
, ptr
, status
);
1600 heap_set_status( heapPtr
, flags
, status
);
1605 static NTSTATUS
heap_free( HEAP
*heap
, void *ptr
)
1610 /* Inform valgrind we are trying to free memory, so it can throw up an error message */
1613 if (!(block
= unsafe_block_from_ptr( heap
, ptr
, &subheap
))) return STATUS_INVALID_PARAMETER
;
1614 if (!subheap
) free_large_block( heap
, ptr
);
1615 else HEAP_MakeInUseBlockFree( subheap
, block
);
1617 return STATUS_SUCCESS
;
1620 /***********************************************************************
1621 * RtlFreeHeap (NTDLL.@)
1623 BOOLEAN WINAPI DECLSPEC_HOTPATCH
RtlFreeHeap( HANDLE heap
, ULONG flags
, void *ptr
)
1628 if (!ptr
) return TRUE
;
1630 if (!(heapPtr
= HEAP_GetPtr( heap
)))
1631 status
= STATUS_INVALID_PARAMETER
;
1634 heap_lock( heapPtr
, flags
);
1635 status
= heap_free( heapPtr
, ptr
);
1636 heap_unlock( heapPtr
, flags
);
1639 TRACE( "heap %p, flags %#x, ptr %p, return %u, status %#x.\n", heap
, flags
, ptr
, !status
, status
);
1640 heap_set_status( heapPtr
, flags
, status
);
1645 static NTSTATUS
heap_reallocate( HEAP
*heap
, ULONG flags
, void *ptr
, SIZE_T size
, void **ret
)
1647 SIZE_T old_data_size
, old_size
, data_size
;
1648 struct block
*next
, *block
;
1652 data_size
= ROUND_SIZE(size
) + HEAP_TAIL_EXTRA_SIZE(flags
);
1653 if (data_size
< size
) return STATUS_NO_MEMORY
; /* overflow */
1654 if (data_size
< HEAP_MIN_DATA_SIZE
) data_size
= HEAP_MIN_DATA_SIZE
;
1656 if (!(block
= unsafe_block_from_ptr( heap
, ptr
, &subheap
))) return STATUS_INVALID_PARAMETER
;
1659 if (!(*ret
= realloc_large_block( heap
, flags
, ptr
, size
))) return STATUS_NO_MEMORY
;
1660 return STATUS_SUCCESS
;
1663 /* Check if we need to grow the block */
1665 old_data_size
= block_get_size( block
) - sizeof(*block
);
1666 old_size
= block_get_size( block
) - block_get_overhead( block
);
1667 if (data_size
> old_data_size
)
1669 if ((next
= next_block( subheap
, block
)) && (block_get_flags( next
) & ARENA_FLAG_FREE
) &&
1670 data_size
< HEAP_MIN_LARGE_BLOCK_SIZE
&& data_size
<= old_data_size
+ block_get_size( next
))
1672 /* The next block is free and large enough */
1673 struct entry
*entry
= (struct entry
*)next
;
1674 list_remove( &entry
->entry
);
1675 block
->size
+= block_get_size( next
);
1676 if (!HEAP_Commit( subheap
, block
, data_size
)) return STATUS_NO_MEMORY
;
1677 notify_realloc( block
+ 1, old_size
, size
);
1678 shrink_used_block( subheap
, block
, data_size
, size
);
1682 if (flags
& HEAP_REALLOC_IN_PLACE_ONLY
) return STATUS_NO_MEMORY
;
1683 if ((status
= heap_allocate( heap
, flags
& ~HEAP_ZERO_MEMORY
, size
, ret
))) return status
;
1684 memcpy( *ret
, block
+ 1, old_size
);
1685 if (flags
& HEAP_ZERO_MEMORY
) memset( (char *)*ret
+ old_size
, 0, size
- old_size
);
1687 HEAP_MakeInUseBlockFree( subheap
, block
);
1688 return STATUS_SUCCESS
;
1693 notify_realloc( block
+ 1, old_size
, size
);
1694 shrink_used_block( subheap
, block
, data_size
, size
);
1697 /* Clear the extra bytes if needed */
1699 if (size
<= old_size
) mark_block_tail( (char *)(block
+ 1) + size
, block
->unused_bytes
, flags
);
1700 else initialize_block( (char *)(block
+ 1) + old_size
, size
- old_size
, block
->unused_bytes
, flags
);
1702 /* Return the new arena */
1705 return STATUS_SUCCESS
;
1708 /***********************************************************************
1709 * RtlReAllocateHeap (NTDLL.@)
1711 void *WINAPI
RtlReAllocateHeap( HANDLE heap
, ULONG flags
, void *ptr
, SIZE_T size
)
1717 if (!ptr
) return NULL
;
1719 if (!(heapPtr
= HEAP_GetPtr( heap
)))
1720 status
= STATUS_INVALID_HANDLE
;
1723 heap_lock( heapPtr
, flags
);
1724 status
= heap_reallocate( heapPtr
, heap_get_flags( heapPtr
, flags
), ptr
, size
, &ret
);
1725 heap_unlock( heapPtr
, flags
);
1728 TRACE( "heap %p, flags %#x, ptr %p, size %#Ix, return %p, status %#x.\n", heap
, flags
, ptr
, size
, ret
, status
);
1729 heap_set_status( heap
, flags
, status
);
1734 /***********************************************************************
1735 * RtlCompactHeap (NTDLL.@)
1737 * Compact the free space in a Heap.
1740 * heap [I] Heap that block was allocated from
1741 * flags [I] HEAP_ flags from "winnt.h"
1744 * The number of bytes compacted.
1747 * This function is a harmless stub.
1749 ULONG WINAPI
RtlCompactHeap( HANDLE heap
, ULONG flags
)
1751 static BOOL reported
;
1752 if (!reported
++) FIXME( "(%p, 0x%x) stub\n", heap
, flags
);
1757 /***********************************************************************
1758 * RtlLockHeap (NTDLL.@)
1763 * heap [I] Heap to lock
1766 * Success: TRUE. The Heap is locked.
1767 * Failure: FALSE, if heap is invalid.
1769 BOOLEAN WINAPI
RtlLockHeap( HANDLE heap
)
1771 HEAP
*heapPtr
= HEAP_GetPtr( heap
);
1772 if (!heapPtr
) return FALSE
;
1773 heap_lock( heapPtr
, 0 );
1778 /***********************************************************************
1779 * RtlUnlockHeap (NTDLL.@)
1784 * heap [I] Heap to unlock
1787 * Success: TRUE. The Heap is unlocked.
1788 * Failure: FALSE, if heap is invalid.
1790 BOOLEAN WINAPI
RtlUnlockHeap( HANDLE heap
)
1792 HEAP
*heapPtr
= HEAP_GetPtr( heap
);
1793 if (!heapPtr
) return FALSE
;
1794 heap_unlock( heapPtr
, 0 );
1799 static NTSTATUS
heap_size( HEAP
*heap
, const void *ptr
, SIZE_T
*size
)
1801 const ARENA_INUSE
*block
;
1804 if (!(block
= unsafe_block_from_ptr( heap
, ptr
, &subheap
))) return STATUS_INVALID_PARAMETER
;
1807 const ARENA_LARGE
*large_arena
= (const ARENA_LARGE
*)ptr
- 1;
1808 *size
= large_arena
->data_size
;
1810 else *size
= block_get_size( block
) - block_get_overhead( block
);
1812 return STATUS_SUCCESS
;
1815 /***********************************************************************
1816 * RtlSizeHeap (NTDLL.@)
1818 SIZE_T WINAPI
RtlSizeHeap( HANDLE heap
, ULONG flags
, const void *ptr
)
1820 SIZE_T size
= ~(SIZE_T
)0;
1824 if (!(heapPtr
= HEAP_GetPtr( heap
)))
1825 status
= STATUS_INVALID_PARAMETER
;
1828 heap_lock( heapPtr
, flags
);
1829 status
= heap_size( heapPtr
, ptr
, &size
);
1830 heap_unlock( heapPtr
, flags
);
1833 TRACE( "heap %p, flags %#x, ptr %p, return %#Ix, status %#x.\n", heap
, flags
, ptr
, size
, status
);
1834 heap_set_status( heapPtr
, flags
, status
);
1839 /***********************************************************************
1840 * RtlValidateHeap (NTDLL.@)
1842 BOOLEAN WINAPI
RtlValidateHeap( HANDLE heap
, ULONG flags
, const void *ptr
)
1848 if (!(heapPtr
= HEAP_GetPtr( heap
)))
1852 heap_lock( heapPtr
, flags
);
1853 if (ptr
) ret
= heap_validate_ptr( heapPtr
, ptr
, &subheap
);
1854 else ret
= heap_validate( heapPtr
);
1855 heap_unlock( heapPtr
, flags
);
1858 TRACE( "heap %p, flags %#x, ptr %p, return %u.\n", heap
, flags
, ptr
, !!ret
);
1863 static NTSTATUS
heap_walk_blocks( const HEAP
*heap
, const SUBHEAP
*subheap
, struct rtl_heap_entry
*entry
)
1865 const char *base
= subheap_base( subheap
), *commit_end
= subheap_commit_end( subheap
), *end
= base
+ subheap_size( subheap
);
1866 const struct block
*block
, *blocks
= first_block( subheap
);
1868 if (entry
->lpData
== commit_end
) return STATUS_NO_MORE_ENTRIES
;
1870 if (entry
->lpData
== base
) block
= blocks
;
1871 else if (!(block
= next_block( subheap
, (struct block
*)entry
->lpData
- 1 )))
1873 entry
->lpData
= (void *)commit_end
;
1874 entry
->cbData
= end
- commit_end
;
1875 entry
->cbOverhead
= 0;
1876 entry
->iRegionIndex
= 0;
1877 entry
->wFlags
= RTL_HEAP_ENTRY_UNCOMMITTED
;
1878 return STATUS_SUCCESS
;
1881 if (block_get_flags( block
) & ARENA_FLAG_FREE
)
1883 entry
->lpData
= (char *)block
+ block_get_overhead( block
);
1884 entry
->cbData
= block_get_size( block
) - block_get_overhead( block
);
1885 /* FIXME: last free block should not include uncommitted range, which also has its own overhead */
1886 if (!contains( blocks
, commit_end
- (char *)blocks
, block
, block_get_size( block
) ))
1887 entry
->cbData
= commit_end
- (char *)entry
->lpData
- 8 * sizeof(void *);
1888 entry
->cbOverhead
= 4 * sizeof(void *);
1889 entry
->iRegionIndex
= 0;
1894 entry
->lpData
= (void *)(block
+ 1);
1895 entry
->cbData
= block_get_size( block
) - block_get_overhead( block
);
1896 entry
->cbOverhead
= block_get_overhead( block
);
1897 entry
->iRegionIndex
= 0;
1898 entry
->wFlags
= RTL_HEAP_ENTRY_COMMITTED
|RTL_HEAP_ENTRY_BLOCK
|RTL_HEAP_ENTRY_BUSY
;
1901 return STATUS_SUCCESS
;
1904 static NTSTATUS
heap_walk( const HEAP
*heap
, struct rtl_heap_entry
*entry
)
1906 const ARENA_LARGE
*large
;
1907 const struct list
*next
;
1908 const SUBHEAP
*subheap
;
1912 if ((large
= find_large_block( heap
, entry
->lpData
)))
1913 next
= &large
->entry
;
1914 else if ((subheap
= find_subheap( heap
, entry
->lpData
, TRUE
)))
1916 if (!(status
= heap_walk_blocks( heap
, subheap
, entry
))) return STATUS_SUCCESS
;
1917 else if (status
!= STATUS_NO_MORE_ENTRIES
) return status
;
1918 next
= &subheap
->entry
;
1922 if (entry
->lpData
) return STATUS_INVALID_PARAMETER
;
1923 next
= &heap
->subheap_list
;
1926 if (!large
&& (next
= list_next( &heap
->subheap_list
, next
)))
1928 subheap
= LIST_ENTRY( next
, SUBHEAP
, entry
);
1929 base
= subheap_base( subheap
);
1930 entry
->lpData
= base
;
1931 entry
->cbData
= (char *)first_block( subheap
) - base
;
1932 entry
->cbOverhead
= 0;
1933 entry
->iRegionIndex
= 0;
1934 entry
->wFlags
= RTL_HEAP_ENTRY_REGION
;
1935 entry
->Region
.dwCommittedSize
= (char *)subheap_commit_end( subheap
) - base
;
1936 entry
->Region
.dwUnCommittedSize
= subheap_size( subheap
) - entry
->Region
.dwCommittedSize
;
1937 entry
->Region
.lpFirstBlock
= base
+ entry
->cbData
;
1938 entry
->Region
.lpLastBlock
= base
+ subheap_size( subheap
);
1939 return STATUS_SUCCESS
;
1942 if (!next
) next
= &heap
->large_list
;
1943 if ((next
= list_next( &heap
->large_list
, next
)))
1945 large
= LIST_ENTRY( next
, ARENA_LARGE
, entry
);
1946 entry
->lpData
= (void *)(large
+ 1);
1947 entry
->cbData
= large
->data_size
;
1948 entry
->cbOverhead
= 0;
1949 entry
->iRegionIndex
= 64;
1950 entry
->wFlags
= RTL_HEAP_ENTRY_COMMITTED
|RTL_HEAP_ENTRY_BLOCK
|RTL_HEAP_ENTRY_BUSY
;
1951 return STATUS_SUCCESS
;
1954 return STATUS_NO_MORE_ENTRIES
;
1957 /***********************************************************************
1958 * RtlWalkHeap (NTDLL.@)
1960 NTSTATUS WINAPI
RtlWalkHeap( HANDLE heap
, void *entry_ptr
)
1962 struct rtl_heap_entry
*entry
= entry_ptr
;
1966 if (!entry
) return STATUS_INVALID_PARAMETER
;
1968 if (!(heapPtr
= HEAP_GetPtr(heap
)))
1969 status
= STATUS_INVALID_HANDLE
;
1972 heap_lock( heapPtr
, 0 );
1973 status
= heap_walk( heapPtr
, entry
);
1974 heap_unlock( heapPtr
, 0 );
1977 TRACE( "heap %p, entry %p %s, return %#x\n", heap
, entry
,
1978 status
? "<empty>" : debugstr_heap_entry(entry
), status
);
1983 /***********************************************************************
1984 * RtlGetProcessHeaps (NTDLL.@)
1986 * Get the Heaps belonging to the current process.
1989 * count [I] size of heaps
1990 * heaps [O] Destination array for heap HANDLE's
1993 * Success: The number of Heaps allocated by the process.
1996 ULONG WINAPI
RtlGetProcessHeaps( ULONG count
, HANDLE
*heaps
)
1998 ULONG total
= 1; /* main heap */
2001 RtlEnterCriticalSection( &processHeap
->cs
);
2002 LIST_FOR_EACH( ptr
, &processHeap
->entry
) total
++;
2005 *heaps
++ = processHeap
;
2006 LIST_FOR_EACH( ptr
, &processHeap
->entry
)
2007 *heaps
++ = LIST_ENTRY( ptr
, HEAP
, entry
);
2009 RtlLeaveCriticalSection( &processHeap
->cs
);
2013 /***********************************************************************
2014 * RtlQueryHeapInformation (NTDLL.@)
2016 NTSTATUS WINAPI
RtlQueryHeapInformation( HANDLE heap
, HEAP_INFORMATION_CLASS info_class
,
2017 PVOID info
, SIZE_T size_in
, PSIZE_T size_out
)
2021 case HeapCompatibilityInformation
:
2022 if (size_out
) *size_out
= sizeof(ULONG
);
2024 if (size_in
< sizeof(ULONG
))
2025 return STATUS_BUFFER_TOO_SMALL
;
2027 *(ULONG
*)info
= 0; /* standard heap */
2028 return STATUS_SUCCESS
;
2031 FIXME("Unknown heap information class %u\n", info_class
);
2032 return STATUS_INVALID_INFO_CLASS
;
2036 /***********************************************************************
2037 * RtlSetHeapInformation (NTDLL.@)
2039 NTSTATUS WINAPI
RtlSetHeapInformation( HANDLE heap
, HEAP_INFORMATION_CLASS info_class
, PVOID info
, SIZE_T size
)
2041 FIXME("%p %d %p %ld stub\n", heap
, info_class
, info
, size
);
2042 return STATUS_SUCCESS
;
2045 /***********************************************************************
2046 * RtlGetUserInfoHeap (NTDLL.@)
2048 BOOLEAN WINAPI
RtlGetUserInfoHeap( HANDLE heap
, ULONG flags
, void *ptr
, void **user_value
, ULONG
*user_flags
)
2050 FIXME( "heap %p, flags %#x, ptr %p, user_value %p, user_flags %p semi-stub!\n",
2051 heap
, flags
, ptr
, user_value
, user_flags
);
2057 /***********************************************************************
2058 * RtlSetUserValueHeap (NTDLL.@)
2060 BOOLEAN WINAPI
RtlSetUserValueHeap( HANDLE heap
, ULONG flags
, void *ptr
, void *user_value
)
2062 FIXME( "heap %p, flags %#x, ptr %p, user_value %p stub!\n", heap
, flags
, ptr
, user_value
);
2066 /***********************************************************************
2067 * RtlSetUserFlagsHeap (NTDLL.@)
2069 BOOLEAN WINAPI
RtlSetUserFlagsHeap( HANDLE heap
, ULONG flags
, void *ptr
, ULONG clear
, ULONG set
)
2071 FIXME( "heap %p, flags %#x, ptr %p, clear %#x, set %#x stub!\n", heap
, flags
, ptr
, clear
, set
);