4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012 by Delphix. All rights reserved.
28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
32 * Big Theory Statement for the virtual memory allocator.
34 * For a more complete description of the main ideas, see:
36 * Jeff Bonwick and Jonathan Adams,
38 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
39 * Arbitrary Resources.
41 * Proceedings of the 2001 Usenix Conference.
42 * Available as http://www.usenix.org/event/usenix01/bonwick.html
50 * We divide the kernel address space into a number of logically distinct
51 * pieces, or *arenas*: text, data, heap, stack, and so on. Within these
52 * arenas we often subdivide further; for example, we use heap addresses
53 * not only for the kernel heap (kmem_alloc() space), but also for DVMA,
54 * bp_mapin(), /dev/kmem, and even some device mappings like the TOD chip.
55 * The kernel address space, therefore, is most accurately described as
56 * a tree of arenas in which each node of the tree *imports* some subset
57 * of its parent. The virtual memory allocator manages these arenas and
58 * supports their natural hierarchical structure.
62 * An arena is nothing more than a set of integers. These integers most
63 * commonly represent virtual addresses, but in fact they can represent
64 * anything at all. For example, we could use an arena containing the
65 * integers minpid through maxpid to allocate process IDs. vmem_create()
66 * and vmem_destroy() create and destroy vmem arenas. In order to
67 * differentiate between arenas used for adresses and arenas used for
68 * identifiers, the VMC_IDENTIFIER flag is passed to vmem_create(). This
69 * prevents identifier exhaustion from being diagnosed as general memory
74 * We represent the integers in an arena as a collection of *spans*, or
75 * contiguous ranges of integers. For example, the kernel heap consists
76 * of just one span: [kernelheap, ekernelheap). Spans can be added to an
77 * arena in two ways: explicitly, by vmem_add(), or implicitly, by
78 * importing, as described in Section 1.5 below.
82 * Spans are subdivided into *segments*, each of which is either allocated
83 * or free. A segment, like a span, is a contiguous range of integers.
84 * Each allocated segment [addr, addr + size) represents exactly one
85 * vmem_alloc(size) that returned addr. Free segments represent the space
86 * between allocated segments. If two free segments are adjacent, we
87 * coalesce them into one larger segment; that is, if segments [a, b) and
88 * [b, c) are both free, we merge them into a single segment [a, c).
89 * The segments within a span are linked together in increasing-address order
90 * so we can easily determine whether coalescing is possible.
92 * Segments never cross span boundaries. When all segments within
93 * an imported span become free, we return the span to its source.
97 * As mentioned in the overview, some arenas are logical subsets of
98 * other arenas. For example, kmem_va_arena (a virtual address cache
99 * that satisfies most kmem_slab_create() requests) is just a subset
100 * of heap_arena (the kernel heap) that provides caching for the most
101 * common slab sizes. When kmem_va_arena runs out of virtual memory,
102 * it *imports* more from the heap; we say that heap_arena is the
103 * *vmem source* for kmem_va_arena. vmem_create() allows you to
104 * specify any existing vmem arena as the source for your new arena.
105 * Topologically, since every arena is a child of at most one source,
106 * the set of all arenas forms a collection of trees.
108 * 1.6 Constrained Allocations
109 * ---------------------------
110 * Some vmem clients are quite picky about the kind of address they want.
111 * For example, the DVMA code may need an address that is at a particular
112 * phase with respect to some alignment (to get good cache coloring), or
113 * that lies within certain limits (the addressable range of a device),
114 * or that doesn't cross some boundary (a DMA counter restriction) --
115 * or all of the above. vmem_xalloc() allows the client to specify any
116 * or all of these constraints.
118 * 1.7 The Vmem Quantum
119 * --------------------
120 * Every arena has a notion of 'quantum', specified at vmem_create() time,
121 * that defines the arena's minimum unit of currency. Most commonly the
122 * quantum is either 1 or PAGESIZE, but any power of 2 is legal.
123 * All vmem allocations are guaranteed to be quantum-aligned.
125 * 1.8 Quantum Caching
126 * -------------------
127 * A vmem arena may be so hot (frequently used) that the scalability of vmem
128 * allocation is a significant concern. We address this by allowing the most
129 * common allocation sizes to be serviced by the kernel memory allocator,
130 * which provides low-latency per-cpu caching. The qcache_max argument to
131 * vmem_create() specifies the largest allocation size to cache.
133 * 1.9 Relationship to Kernel Memory Allocator
134 * -------------------------------------------
135 * Every kmem cache has a vmem arena as its slab supplier. The kernel memory
136 * allocator uses vmem_alloc() and vmem_free() to create and destroy slabs.
142 * 2.1 Segment lists and markers
143 * -----------------------------
144 * The segment structure (vmem_seg_t) contains two doubly-linked lists.
146 * The arena list (vs_anext/vs_aprev) links all segments in the arena.
147 * In addition to the allocated and free segments, the arena contains
148 * special marker segments at span boundaries. Span markers simplify
149 * coalescing and importing logic by making it easy to tell both when
150 * we're at a span boundary (so we don't coalesce across it), and when
151 * a span is completely free (its neighbors will both be span markers).
153 * Imported spans will have vs_import set.
155 * The next-of-kin list (vs_knext/vs_kprev) links segments of the same type:
156 * (1) for allocated segments, vs_knext is the hash chain linkage;
157 * (2) for free segments, vs_knext is the freelist linkage;
158 * (3) for span marker segments, vs_knext is the next span marker.
160 * 2.2 Allocation hashing
161 * ----------------------
162 * We maintain a hash table of all allocated segments, hashed by address.
163 * This allows vmem_free() to discover the target segment in constant time.
164 * vmem_update() periodically resizes hash tables to keep hash chains short.
166 * 2.3 Freelist management
167 * -----------------------
168 * We maintain power-of-2 freelists for free segments, i.e. free segments
169 * of size >= 2^n reside in vmp->vm_freelist[n]. To ensure constant-time
170 * allocation, vmem_xalloc() looks not in the first freelist that *might*
171 * satisfy the allocation, but in the first freelist that *definitely*
172 * satisfies the allocation (unless VM_BESTFIT is specified, or all larger
173 * freelists are empty). For example, a 1000-byte allocation will be
174 * satisfied not from the 512..1023-byte freelist, whose members *might*
175 * contains a 1000-byte segment, but from a 1024-byte or larger freelist,
176 * the first member of which will *definitely* satisfy the allocation.
177 * This ensures that vmem_xalloc() works in constant time.
179 * We maintain a bit map to determine quickly which freelists are non-empty.
180 * vmp->vm_freemap & (1 << n) is non-zero iff vmp->vm_freelist[n] is non-empty.
182 * The different freelists are linked together into one large freelist,
183 * with the freelist heads serving as markers. Freelist markers simplify
184 * the maintenance of vm_freemap by making it easy to tell when we're taking
185 * the last member of a freelist (both of its neighbors will be markers).
189 * For simplicity, all arena state is protected by a per-arena lock.
190 * For very hot arenas, use quantum caching for scalability.
192 * 2.5 Vmem Population
193 * -------------------
194 * Any internal vmem routine that might need to allocate new segment
195 * structures must prepare in advance by calling vmem_populate(), which
196 * will preallocate enough vmem_seg_t's to get is through the entire
197 * operation without dropping the arena lock.
201 * If KMF_AUDIT is set in kmem_flags, we audit vmem allocations as well.
202 * Since virtual addresses cannot be scribbled on, there is no equivalent
203 * in vmem to redzone checking, deadbeef, or other kmem debugging features.
204 * Moreover, we do not audit frees because segment coalescing destroys the
205 * association between an address and its segment structure. Auditing is
206 * thus intended primarily to keep track of who's consuming the arena.
207 * Debugging support could certainly be extended in the future if it proves
208 * necessary, but we do so much live checking via the allocation hash table
209 * that even non-DEBUG systems get quite a bit of sanity checking already.
212 #include <sys/vmem_impl.h>
213 #include <sys/kmem.h>
214 #include <sys/kstat.h>
215 #include <sys/param.h>
216 #include <sys/systm.h>
217 #include <sys/atomic.h>
218 #include <sys/bitmap.h>
219 #include <sys/sysmacros.h>
220 #include <sys/cmn_err.h>
221 #include <sys/debug.h>
222 #include <sys/panic.h>
224 #define VMEM_INITIAL 10 /* early vmem arenas */
225 #define VMEM_SEG_INITIAL 200 /* early segments */
228 * Adding a new span to an arena requires two segment structures: one to
229 * represent the span, and one to represent the free segment it contains.
231 #define VMEM_SEGS_PER_SPAN_CREATE 2
234 * Allocating a piece of an existing segment requires 0-2 segment structures
235 * depending on how much of the segment we're allocating.
237 * To allocate the entire segment, no new segment structures are needed; we
238 * simply move the existing segment structure from the freelist to the
239 * allocation hash table.
241 * To allocate a piece from the left or right end of the segment, we must
242 * split the segment into two pieces (allocated part and remainder), so we
243 * need one new segment structure to represent the remainder.
245 * To allocate from the middle of a segment, we need two new segment strucures
246 * to represent the remainders on either side of the allocated part.
248 #define VMEM_SEGS_PER_EXACT_ALLOC 0
249 #define VMEM_SEGS_PER_LEFT_ALLOC 1
250 #define VMEM_SEGS_PER_RIGHT_ALLOC 1
251 #define VMEM_SEGS_PER_MIDDLE_ALLOC 2
254 * vmem_populate() preallocates segment structures for vmem to do its work.
255 * It must preallocate enough for the worst case, which is when we must import
256 * a new span and then allocate from the middle of it.
258 #define VMEM_SEGS_PER_ALLOC_MAX \
259 (VMEM_SEGS_PER_SPAN_CREATE + VMEM_SEGS_PER_MIDDLE_ALLOC)
262 * The segment structures themselves are allocated from vmem_seg_arena, so
263 * we have a recursion problem when vmem_seg_arena needs to populate itself.
264 * We address this by working out the maximum number of segment structures
265 * this act will require, and multiplying by the maximum number of threads
266 * that we'll allow to do it simultaneously.
268 * The worst-case segment consumption to populate vmem_seg_arena is as
269 * follows (depicted as a stack trace to indicate why events are occurring):
271 * (In order to lower the fragmentation in the heap_arena, we specify a
272 * minimum import size for the vmem_metadata_arena which is the same size
273 * as the kmem_va quantum cache allocations. This causes the worst-case
274 * allocation from the vmem_metadata_arena to be 3 segments.)
276 * vmem_alloc(vmem_seg_arena) -> 2 segs (span create + exact alloc)
277 * segkmem_alloc(vmem_metadata_arena)
278 * vmem_alloc(vmem_metadata_arena) -> 3 segs (span create + left alloc)
279 * vmem_alloc(heap_arena) -> 1 seg (left alloc)
284 * vmem_alloc(hat_memload_arena) -> 2 segs (span create + exact alloc)
285 * segkmem_alloc(heap_arena)
286 * vmem_alloc(heap_arena) -> 1 seg (left alloc)
288 * hat_memload() -> (hat layer won't recurse further)
290 * The worst-case consumption for each arena is 3 segment structures.
291 * Of course, a 3-seg reserve could easily be blown by multiple threads.
292 * Therefore, we serialize all allocations from vmem_seg_arena (which is OK
293 * because they're rare). We cannot allow a non-blocking allocation to get
294 * tied up behind a blocking allocation, however, so we use separate locks
295 * for VM_SLEEP and VM_NOSLEEP allocations. Similarly, VM_PUSHPAGE allocations
296 * must not block behind ordinary VM_SLEEPs. In addition, if the system is
297 * panicking then we must keep enough resources for panic_thread to do its
298 * work. Thus we have at most four threads trying to allocate from
299 * vmem_seg_arena, and each thread consumes at most three segment structures,
300 * so we must maintain a 12-seg reserve.
302 #define VMEM_POPULATE_RESERVE 12
305 * vmem_populate() ensures that each arena has VMEM_MINFREE seg structures
306 * so that it can satisfy the worst-case allocation *and* participate in
307 * worst-case allocation from vmem_seg_arena.
309 #define VMEM_MINFREE (VMEM_POPULATE_RESERVE + VMEM_SEGS_PER_ALLOC_MAX)
311 static vmem_t vmem0
[VMEM_INITIAL
];
312 static vmem_t
*vmem_populator
[VMEM_INITIAL
];
313 static uint32_t vmem_id
;
314 static uint32_t vmem_populators
;
315 static vmem_seg_t vmem_seg0
[VMEM_SEG_INITIAL
];
316 static vmem_seg_t
*vmem_segfree
;
317 static kmutex_t vmem_list_lock
;
318 static kmutex_t vmem_segfree_lock
;
319 static kmutex_t vmem_sleep_lock
;
320 static kmutex_t vmem_nosleep_lock
;
321 static kmutex_t vmem_pushpage_lock
;
322 static kmutex_t vmem_panic_lock
;
323 static vmem_t
*vmem_list
;
324 static vmem_t
*vmem_metadata_arena
;
325 static vmem_t
*vmem_seg_arena
;
326 static vmem_t
*vmem_hash_arena
;
327 static vmem_t
*vmem_vmem_arena
;
328 static long vmem_update_interval
= 15; /* vmem_update() every 15 seconds */
329 uint32_t vmem_mtbf
; /* mean time between failures [default: off] */
330 size_t vmem_seg_size
= sizeof (vmem_seg_t
);
332 static vmem_kstat_t vmem_kstat_template
= {
333 { "mem_inuse", KSTAT_DATA_UINT64
},
334 { "mem_import", KSTAT_DATA_UINT64
},
335 { "mem_total", KSTAT_DATA_UINT64
},
336 { "vmem_source", KSTAT_DATA_UINT32
},
337 { "alloc", KSTAT_DATA_UINT64
},
338 { "free", KSTAT_DATA_UINT64
},
339 { "wait", KSTAT_DATA_UINT64
},
340 { "fail", KSTAT_DATA_UINT64
},
341 { "lookup", KSTAT_DATA_UINT64
},
342 { "search", KSTAT_DATA_UINT64
},
343 { "populate_wait", KSTAT_DATA_UINT64
},
344 { "populate_fail", KSTAT_DATA_UINT64
},
345 { "contains", KSTAT_DATA_UINT64
},
346 { "contains_search", KSTAT_DATA_UINT64
},
350 * Insert/delete from arena list (type 'a') or next-of-kin list (type 'k').
352 #define VMEM_INSERT(vprev, vsp, type) \
354 vmem_seg_t *vnext = (vprev)->vs_##type##next; \
355 (vsp)->vs_##type##next = (vnext); \
356 (vsp)->vs_##type##prev = (vprev); \
357 (vprev)->vs_##type##next = (vsp); \
358 (vnext)->vs_##type##prev = (vsp); \
361 #define VMEM_DELETE(vsp, type) \
363 vmem_seg_t *vprev = (vsp)->vs_##type##prev; \
364 vmem_seg_t *vnext = (vsp)->vs_##type##next; \
365 (vprev)->vs_##type##next = (vnext); \
366 (vnext)->vs_##type##prev = (vprev); \
370 * Get a vmem_seg_t from the global segfree list.
373 vmem_getseg_global(void)
377 mutex_enter(&vmem_segfree_lock
);
378 if ((vsp
= vmem_segfree
) != NULL
)
379 vmem_segfree
= vsp
->vs_knext
;
380 mutex_exit(&vmem_segfree_lock
);
386 * Put a vmem_seg_t on the global segfree list.
389 vmem_putseg_global(vmem_seg_t
*vsp
)
391 mutex_enter(&vmem_segfree_lock
);
392 vsp
->vs_knext
= vmem_segfree
;
394 mutex_exit(&vmem_segfree_lock
);
398 * Get a vmem_seg_t from vmp's segfree list.
401 vmem_getseg(vmem_t
*vmp
)
405 ASSERT(vmp
->vm_nsegfree
> 0);
407 vsp
= vmp
->vm_segfree
;
408 vmp
->vm_segfree
= vsp
->vs_knext
;
415 * Put a vmem_seg_t on vmp's segfree list.
418 vmem_putseg(vmem_t
*vmp
, vmem_seg_t
*vsp
)
420 vsp
->vs_knext
= vmp
->vm_segfree
;
421 vmp
->vm_segfree
= vsp
;
426 * Add vsp to the appropriate freelist.
429 vmem_freelist_insert(vmem_t
*vmp
, vmem_seg_t
*vsp
)
433 ASSERT(*VMEM_HASH(vmp
, vsp
->vs_start
) != vsp
);
435 vprev
= (vmem_seg_t
*)&vmp
->vm_freelist
[highbit(VS_SIZE(vsp
)) - 1];
436 vsp
->vs_type
= VMEM_FREE
;
437 vmp
->vm_freemap
|= VS_SIZE(vprev
);
438 VMEM_INSERT(vprev
, vsp
, k
);
440 cv_broadcast(&vmp
->vm_cv
);
444 * Take vsp from the freelist.
447 vmem_freelist_delete(vmem_t
*vmp
, vmem_seg_t
*vsp
)
449 ASSERT(*VMEM_HASH(vmp
, vsp
->vs_start
) != vsp
);
450 ASSERT(vsp
->vs_type
== VMEM_FREE
);
452 if (vsp
->vs_knext
->vs_start
== 0 && vsp
->vs_kprev
->vs_start
== 0) {
454 * The segments on both sides of 'vsp' are freelist heads,
455 * so taking vsp leaves the freelist at vsp->vs_kprev empty.
457 ASSERT(vmp
->vm_freemap
& VS_SIZE(vsp
->vs_kprev
));
458 vmp
->vm_freemap
^= VS_SIZE(vsp
->vs_kprev
);
464 * Add vsp to the allocated-segment hash table and update kstats.
467 vmem_hash_insert(vmem_t
*vmp
, vmem_seg_t
*vsp
)
471 vsp
->vs_type
= VMEM_ALLOC
;
472 bucket
= VMEM_HASH(vmp
, vsp
->vs_start
);
473 vsp
->vs_knext
= *bucket
;
476 if (vmem_seg_size
== sizeof (vmem_seg_t
)) {
477 vsp
->vs_depth
= (uint8_t)getpcstack(vsp
->vs_stack
,
479 vsp
->vs_thread
= curthread
;
480 vsp
->vs_timestamp
= gethrtime();
485 vmp
->vm_kstat
.vk_alloc
.value
.ui64
++;
486 vmp
->vm_kstat
.vk_mem_inuse
.value
.ui64
+= VS_SIZE(vsp
);
490 * Remove vsp from the allocated-segment hash table and update kstats.
493 vmem_hash_delete(vmem_t
*vmp
, uintptr_t addr
, size_t size
)
495 vmem_seg_t
*vsp
, **prev_vspp
;
497 prev_vspp
= VMEM_HASH(vmp
, addr
);
498 while ((vsp
= *prev_vspp
) != NULL
) {
499 if (vsp
->vs_start
== addr
) {
500 *prev_vspp
= vsp
->vs_knext
;
503 vmp
->vm_kstat
.vk_lookup
.value
.ui64
++;
504 prev_vspp
= &vsp
->vs_knext
;
508 panic("vmem_hash_delete(%p, %lx, %lu): bad free",
509 (void *)vmp
, addr
, size
);
510 if (VS_SIZE(vsp
) != size
)
511 panic("vmem_hash_delete(%p, %lx, %lu): wrong size (expect %lu)",
512 (void *)vmp
, addr
, size
, VS_SIZE(vsp
));
514 vmp
->vm_kstat
.vk_free
.value
.ui64
++;
515 vmp
->vm_kstat
.vk_mem_inuse
.value
.ui64
-= size
;
521 * Create a segment spanning the range [start, end) and add it to the arena.
524 vmem_seg_create(vmem_t
*vmp
, vmem_seg_t
*vprev
, uintptr_t start
, uintptr_t end
)
526 vmem_seg_t
*newseg
= vmem_getseg(vmp
);
528 newseg
->vs_start
= start
;
529 newseg
->vs_end
= end
;
531 newseg
->vs_import
= 0;
533 VMEM_INSERT(vprev
, newseg
, a
);
539 * Remove segment vsp from the arena.
542 vmem_seg_destroy(vmem_t
*vmp
, vmem_seg_t
*vsp
)
544 ASSERT(vsp
->vs_type
!= VMEM_ROTOR
);
547 vmem_putseg(vmp
, vsp
);
551 * Add the span [vaddr, vaddr + size) to vmp and update kstats.
554 vmem_span_create(vmem_t
*vmp
, void *vaddr
, size_t size
, uint8_t import
)
556 vmem_seg_t
*newseg
, *span
;
557 uintptr_t start
= (uintptr_t)vaddr
;
558 uintptr_t end
= start
+ size
;
560 ASSERT(MUTEX_HELD(&vmp
->vm_lock
));
562 if ((start
| end
) & (vmp
->vm_quantum
- 1))
563 panic("vmem_span_create(%p, %p, %lu): misaligned",
564 (void *)vmp
, vaddr
, size
);
566 span
= vmem_seg_create(vmp
, vmp
->vm_seg0
.vs_aprev
, start
, end
);
567 span
->vs_type
= VMEM_SPAN
;
568 span
->vs_import
= import
;
569 VMEM_INSERT(vmp
->vm_seg0
.vs_kprev
, span
, k
);
571 newseg
= vmem_seg_create(vmp
, span
, start
, end
);
572 vmem_freelist_insert(vmp
, newseg
);
575 vmp
->vm_kstat
.vk_mem_import
.value
.ui64
+= size
;
576 vmp
->vm_kstat
.vk_mem_total
.value
.ui64
+= size
;
582 * Remove span vsp from vmp and update kstats.
585 vmem_span_destroy(vmem_t
*vmp
, vmem_seg_t
*vsp
)
587 vmem_seg_t
*span
= vsp
->vs_aprev
;
588 size_t size
= VS_SIZE(vsp
);
590 ASSERT(MUTEX_HELD(&vmp
->vm_lock
));
591 ASSERT(span
->vs_type
== VMEM_SPAN
);
594 vmp
->vm_kstat
.vk_mem_import
.value
.ui64
-= size
;
595 vmp
->vm_kstat
.vk_mem_total
.value
.ui64
-= size
;
597 VMEM_DELETE(span
, k
);
599 vmem_seg_destroy(vmp
, vsp
);
600 vmem_seg_destroy(vmp
, span
);
604 * Allocate the subrange [addr, addr + size) from segment vsp.
605 * If there are leftovers on either side, place them on the freelist.
606 * Returns a pointer to the segment representing [addr, addr + size).
609 vmem_seg_alloc(vmem_t
*vmp
, vmem_seg_t
*vsp
, uintptr_t addr
, size_t size
)
611 uintptr_t vs_start
= vsp
->vs_start
;
612 uintptr_t vs_end
= vsp
->vs_end
;
613 size_t vs_size
= vs_end
- vs_start
;
614 size_t realsize
= P2ROUNDUP(size
, vmp
->vm_quantum
);
615 uintptr_t addr_end
= addr
+ realsize
;
617 ASSERT(P2PHASE(vs_start
, vmp
->vm_quantum
) == 0);
618 ASSERT(P2PHASE(addr
, vmp
->vm_quantum
) == 0);
619 ASSERT(vsp
->vs_type
== VMEM_FREE
);
620 ASSERT(addr
>= vs_start
&& addr_end
- 1 <= vs_end
- 1);
621 ASSERT(addr
- 1 <= addr_end
- 1);
624 * If we're allocating from the start of the segment, and the
625 * remainder will be on the same freelist, we can save quite
628 if (P2SAMEHIGHBIT(vs_size
, vs_size
- realsize
) && addr
== vs_start
) {
629 ASSERT(highbit(vs_size
) == highbit(vs_size
- realsize
));
630 vsp
->vs_start
= addr_end
;
631 vsp
= vmem_seg_create(vmp
, vsp
->vs_aprev
, addr
, addr
+ size
);
632 vmem_hash_insert(vmp
, vsp
);
636 vmem_freelist_delete(vmp
, vsp
);
638 if (vs_end
!= addr_end
)
639 vmem_freelist_insert(vmp
,
640 vmem_seg_create(vmp
, vsp
, addr_end
, vs_end
));
642 if (vs_start
!= addr
)
643 vmem_freelist_insert(vmp
,
644 vmem_seg_create(vmp
, vsp
->vs_aprev
, vs_start
, addr
));
646 vsp
->vs_start
= addr
;
647 vsp
->vs_end
= addr
+ size
;
649 vmem_hash_insert(vmp
, vsp
);
654 * Returns 1 if we are populating, 0 otherwise.
655 * Call it if we want to prevent recursion from HAT.
660 return (mutex_owner(&vmem_sleep_lock
) == curthread
||
661 mutex_owner(&vmem_nosleep_lock
) == curthread
||
662 mutex_owner(&vmem_pushpage_lock
) == curthread
||
663 mutex_owner(&vmem_panic_lock
) == curthread
);
667 * Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures.
670 vmem_populate(vmem_t
*vmp
, int vmflag
)
679 while (vmp
->vm_nsegfree
< VMEM_MINFREE
&&
680 (vsp
= vmem_getseg_global()) != NULL
)
681 vmem_putseg(vmp
, vsp
);
683 if (vmp
->vm_nsegfree
>= VMEM_MINFREE
)
687 * If we're already populating, tap the reserve.
689 if (vmem_is_populator()) {
690 ASSERT(vmp
->vm_cflags
& VMC_POPULATOR
);
694 mutex_exit(&vmp
->vm_lock
);
696 if (panic_thread
== curthread
)
697 lp
= &vmem_panic_lock
;
698 else if (vmflag
& VM_NOSLEEP
)
699 lp
= &vmem_nosleep_lock
;
700 else if (vmflag
& VM_PUSHPAGE
)
701 lp
= &vmem_pushpage_lock
;
703 lp
= &vmem_sleep_lock
;
707 nseg
= VMEM_MINFREE
+ vmem_populators
* VMEM_POPULATE_RESERVE
;
708 size
= P2ROUNDUP(nseg
* vmem_seg_size
, vmem_seg_arena
->vm_quantum
);
709 nseg
= size
/ vmem_seg_size
;
712 * The following vmem_alloc() may need to populate vmem_seg_arena
713 * and all the things it imports from. When doing so, it will tap
714 * each arena's reserve to prevent recursion (see the block comment
715 * above the definition of VMEM_POPULATE_RESERVE).
717 p
= vmem_alloc(vmem_seg_arena
, size
, vmflag
& VM_KMFLAGS
);
720 mutex_enter(&vmp
->vm_lock
);
721 vmp
->vm_kstat
.vk_populate_fail
.value
.ui64
++;
726 * Restock the arenas that may have been depleted during population.
728 for (i
= 0; i
< vmem_populators
; i
++) {
729 mutex_enter(&vmem_populator
[i
]->vm_lock
);
730 while (vmem_populator
[i
]->vm_nsegfree
< VMEM_POPULATE_RESERVE
)
731 vmem_putseg(vmem_populator
[i
],
732 (vmem_seg_t
*)(p
+ --nseg
* vmem_seg_size
));
733 mutex_exit(&vmem_populator
[i
]->vm_lock
);
737 mutex_enter(&vmp
->vm_lock
);
740 * Now take our own segments.
742 ASSERT(nseg
>= VMEM_MINFREE
);
743 while (vmp
->vm_nsegfree
< VMEM_MINFREE
)
744 vmem_putseg(vmp
, (vmem_seg_t
*)(p
+ --nseg
* vmem_seg_size
));
747 * Give the remainder to charity.
750 vmem_putseg_global((vmem_seg_t
*)(p
+ --nseg
* vmem_seg_size
));
756 * Advance a walker from its previous position to 'afterme'.
757 * Note: may drop and reacquire vmp->vm_lock.
760 vmem_advance(vmem_t
*vmp
, vmem_seg_t
*walker
, vmem_seg_t
*afterme
)
762 vmem_seg_t
*vprev
= walker
->vs_aprev
;
763 vmem_seg_t
*vnext
= walker
->vs_anext
;
764 vmem_seg_t
*vsp
= NULL
;
766 VMEM_DELETE(walker
, a
);
769 VMEM_INSERT(afterme
, walker
, a
);
772 * The walker segment's presence may have prevented its neighbors
773 * from coalescing. If so, coalesce them now.
775 if (vprev
->vs_type
== VMEM_FREE
) {
776 if (vnext
->vs_type
== VMEM_FREE
) {
777 ASSERT(vprev
->vs_end
== vnext
->vs_start
);
778 vmem_freelist_delete(vmp
, vnext
);
779 vmem_freelist_delete(vmp
, vprev
);
780 vprev
->vs_end
= vnext
->vs_end
;
781 vmem_freelist_insert(vmp
, vprev
);
782 vmem_seg_destroy(vmp
, vnext
);
785 } else if (vnext
->vs_type
== VMEM_FREE
) {
790 * vsp could represent a complete imported span,
791 * in which case we must return it to the source.
793 if (vsp
!= NULL
&& vsp
->vs_aprev
->vs_import
&&
794 vmp
->vm_source_free
!= NULL
&&
795 vsp
->vs_aprev
->vs_type
== VMEM_SPAN
&&
796 vsp
->vs_anext
->vs_type
== VMEM_SPAN
) {
797 void *vaddr
= (void *)vsp
->vs_start
;
798 size_t size
= VS_SIZE(vsp
);
799 ASSERT(size
== VS_SIZE(vsp
->vs_aprev
));
800 vmem_freelist_delete(vmp
, vsp
);
801 vmem_span_destroy(vmp
, vsp
);
802 mutex_exit(&vmp
->vm_lock
);
803 vmp
->vm_source_free(vmp
->vm_source
, vaddr
, size
);
804 mutex_enter(&vmp
->vm_lock
);
809 * VM_NEXTFIT allocations deliberately cycle through all virtual addresses
810 * in an arena, so that we avoid reusing addresses for as long as possible.
811 * This helps to catch used-after-freed bugs. It's also the perfect policy
812 * for allocating things like process IDs, where we want to cycle through
813 * all values in order.
816 vmem_nextfit_alloc(vmem_t
*vmp
, size_t size
, int vmflag
)
818 vmem_seg_t
*vsp
, *rotor
;
820 size_t realsize
= P2ROUNDUP(size
, vmp
->vm_quantum
);
823 mutex_enter(&vmp
->vm_lock
);
825 if (vmp
->vm_nsegfree
< VMEM_MINFREE
&& !vmem_populate(vmp
, vmflag
)) {
826 mutex_exit(&vmp
->vm_lock
);
831 * The common case is that the segment right after the rotor is free,
832 * and large enough that extracting 'size' bytes won't change which
833 * freelist it's on. In this case we can avoid a *lot* of work.
834 * Instead of the normal vmem_seg_alloc(), we just advance the start
835 * address of the victim segment. Instead of moving the rotor, we
836 * create the new segment structure *behind the rotor*, which has
837 * the same effect. And finally, we know we don't have to coalesce
838 * the rotor's neighbors because the new segment lies between them.
840 rotor
= &vmp
->vm_rotor
;
841 vsp
= rotor
->vs_anext
;
842 if (vsp
->vs_type
== VMEM_FREE
&& (vs_size
= VS_SIZE(vsp
)) > realsize
&&
843 P2SAMEHIGHBIT(vs_size
, vs_size
- realsize
)) {
844 ASSERT(highbit(vs_size
) == highbit(vs_size
- realsize
));
845 addr
= vsp
->vs_start
;
846 vsp
->vs_start
= addr
+ realsize
;
847 vmem_hash_insert(vmp
,
848 vmem_seg_create(vmp
, rotor
->vs_aprev
, addr
, addr
+ size
));
849 mutex_exit(&vmp
->vm_lock
);
850 return ((void *)addr
);
854 * Starting at the rotor, look for a segment large enough to
855 * satisfy the allocation.
858 vmp
->vm_kstat
.vk_search
.value
.ui64
++;
859 if (vsp
->vs_type
== VMEM_FREE
&& VS_SIZE(vsp
) >= size
)
864 * We've come full circle. One possibility is that the
865 * there's actually enough space, but the rotor itself
866 * is preventing the allocation from succeeding because
867 * it's sitting between two free segments. Therefore,
868 * we advance the rotor and see if that liberates a
871 vmem_advance(vmp
, rotor
, rotor
->vs_anext
);
872 vsp
= rotor
->vs_aprev
;
873 if (vsp
->vs_type
== VMEM_FREE
&& VS_SIZE(vsp
) >= size
)
876 * If there's a lower arena we can import from, or it's
877 * a VM_NOSLEEP allocation, let vmem_xalloc() handle it.
878 * Otherwise, wait until another thread frees something.
880 if (vmp
->vm_source_alloc
!= NULL
||
881 (vmflag
& VM_NOSLEEP
)) {
882 mutex_exit(&vmp
->vm_lock
);
883 return (vmem_xalloc(vmp
, size
, vmp
->vm_quantum
,
884 0, 0, NULL
, NULL
, vmflag
& VM_KMFLAGS
));
886 vmp
->vm_kstat
.vk_wait
.value
.ui64
++;
887 cv_wait(&vmp
->vm_cv
, &vmp
->vm_lock
);
888 vsp
= rotor
->vs_anext
;
893 * We found a segment. Extract enough space to satisfy the allocation.
895 addr
= vsp
->vs_start
;
896 vsp
= vmem_seg_alloc(vmp
, vsp
, addr
, size
);
897 ASSERT(vsp
->vs_type
== VMEM_ALLOC
&&
898 vsp
->vs_start
== addr
&& vsp
->vs_end
== addr
+ size
);
901 * Advance the rotor to right after the newly-allocated segment.
902 * That's where the next VM_NEXTFIT allocation will begin searching.
904 vmem_advance(vmp
, rotor
, vsp
);
905 mutex_exit(&vmp
->vm_lock
);
906 return ((void *)addr
);
910 * Checks if vmp is guaranteed to have a size-byte buffer somewhere on its
911 * freelist. If size is not a power-of-2, it can return a false-negative.
913 * Used to decide if a newly imported span is superfluous after re-acquiring
917 vmem_canalloc(vmem_t
*vmp
, size_t size
)
921 ASSERT(MUTEX_HELD(&vmp
->vm_lock
));
923 if ((size
& (size
- 1)) == 0)
924 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
, size
));
925 else if ((hb
= highbit(size
)) < VMEM_FREELISTS
)
926 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
, 1UL << hb
));
932 * Allocate size bytes at offset phase from an align boundary such that the
933 * resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr)
934 * that does not straddle a nocross-aligned boundary.
937 vmem_xalloc(vmem_t
*vmp
, size_t size
, size_t align_arg
, size_t phase
,
938 size_t nocross
, void *minaddr
, void *maxaddr
, int vmflag
)
941 vmem_seg_t
*vbest
= NULL
;
942 uintptr_t addr
, taddr
, start
, end
;
943 uintptr_t align
= (align_arg
!= 0) ? align_arg
: vmp
->vm_quantum
;
944 void *vaddr
, *xvaddr
= NULL
;
949 if ((align
| phase
| nocross
) & (vmp
->vm_quantum
- 1))
950 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
951 "parameters not vm_quantum aligned",
952 (void *)vmp
, size
, align_arg
, phase
, nocross
,
953 minaddr
, maxaddr
, vmflag
);
956 (align
> nocross
|| P2ROUNDUP(phase
+ size
, align
) > nocross
))
957 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
958 "overconstrained allocation",
959 (void *)vmp
, size
, align_arg
, phase
, nocross
,
960 minaddr
, maxaddr
, vmflag
);
962 if (phase
>= align
|| (align
& (align
- 1)) != 0 ||
963 (nocross
& (nocross
- 1)) != 0)
964 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
965 "parameters inconsistent or invalid",
966 (void *)vmp
, size
, align_arg
, phase
, nocross
,
967 minaddr
, maxaddr
, vmflag
);
969 if ((mtbf
= vmem_mtbf
| vmp
->vm_mtbf
) != 0 && gethrtime() % mtbf
== 0 &&
970 (vmflag
& (VM_NOSLEEP
| VM_PANIC
)) == VM_NOSLEEP
)
973 mutex_enter(&vmp
->vm_lock
);
975 if (vmp
->vm_nsegfree
< VMEM_MINFREE
&&
976 !vmem_populate(vmp
, vmflag
))
980 * highbit() returns the highest bit + 1, which is exactly
981 * what we want: we want to search the first freelist whose
982 * members are *definitely* large enough to satisfy our
983 * allocation. However, there are certain cases in which we
984 * want to look at the next-smallest freelist (which *might*
985 * be able to satisfy the allocation):
987 * (1) The size is exactly a power of 2, in which case
988 * the smaller freelist is always big enough;
990 * (2) All other freelists are empty;
992 * (3) We're in the highest possible freelist, which is
993 * always empty (e.g. the 4GB freelist on 32-bit systems);
995 * (4) We're doing a best-fit or first-fit allocation.
997 if ((size
& (size
- 1)) == 0) {
998 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
, size
));
1001 if ((vmp
->vm_freemap
>> hb
) == 0 ||
1002 hb
== VMEM_FREELISTS
||
1003 (vmflag
& (VM_BESTFIT
| VM_FIRSTFIT
)))
1005 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
, 1UL << hb
));
1008 for (vbest
= NULL
, vsp
= (flist
== 0) ? NULL
:
1009 vmp
->vm_freelist
[flist
- 1].vs_knext
;
1010 vsp
!= NULL
; vsp
= vsp
->vs_knext
) {
1011 vmp
->vm_kstat
.vk_search
.value
.ui64
++;
1012 if (vsp
->vs_start
== 0) {
1014 * We're moving up to a larger freelist,
1015 * so if we've already found a candidate,
1016 * the fit can't possibly get any better.
1021 * Find the next non-empty freelist.
1023 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
,
1027 vsp
= (vmem_seg_t
*)&vmp
->vm_freelist
[flist
];
1028 ASSERT(vsp
->vs_knext
->vs_type
== VMEM_FREE
);
1031 if (vsp
->vs_end
- 1 < (uintptr_t)minaddr
)
1033 if (vsp
->vs_start
> (uintptr_t)maxaddr
- 1)
1035 start
= MAX(vsp
->vs_start
, (uintptr_t)minaddr
);
1036 end
= MIN(vsp
->vs_end
- 1, (uintptr_t)maxaddr
- 1) + 1;
1037 taddr
= P2PHASEUP(start
, align
, phase
);
1038 if (P2BOUNDARY(taddr
, size
, nocross
))
1040 P2ROUNDUP(P2NPHASE(taddr
, nocross
), align
);
1041 if ((taddr
- start
) + size
> end
- start
||
1042 (vbest
!= NULL
&& VS_SIZE(vsp
) >= VS_SIZE(vbest
)))
1046 if (!(vmflag
& VM_BESTFIT
) || VS_SIZE(vbest
) == size
)
1051 ASSERT(xvaddr
== NULL
);
1053 panic("vmem_xalloc(): size == 0");
1054 if (vmp
->vm_source_alloc
!= NULL
&& nocross
== 0 &&
1055 minaddr
== NULL
&& maxaddr
== NULL
) {
1056 size_t aneeded
, asize
;
1057 size_t aquantum
= MAX(vmp
->vm_quantum
,
1058 vmp
->vm_source
->vm_quantum
);
1059 size_t aphase
= phase
;
1060 if ((align
> aquantum
) &&
1061 !(vmp
->vm_cflags
& VMC_XALIGN
)) {
1062 aphase
= (P2PHASE(phase
, aquantum
) != 0) ?
1063 align
- vmp
->vm_quantum
: align
- aquantum
;
1064 ASSERT(aphase
>= phase
);
1066 aneeded
= MAX(size
+ aphase
, vmp
->vm_min_import
);
1067 asize
= P2ROUNDUP(aneeded
, aquantum
);
1071 * The rounding induced overflow; return NULL
1072 * if we are permitted to fail the allocation
1073 * (and explicitly panic if we aren't).
1075 if ((vmflag
& VM_NOSLEEP
) &&
1076 !(vmflag
& VM_PANIC
)) {
1077 mutex_exit(&vmp
->vm_lock
);
1081 panic("vmem_xalloc(): size overflow");
1085 * Determine how many segment structures we'll consume.
1086 * The calculation must be precise because if we're
1087 * here on behalf of vmem_populate(), we are taking
1088 * segments from a very limited reserve.
1090 if (size
== asize
&& !(vmp
->vm_cflags
& VMC_XALLOC
))
1091 resv
= VMEM_SEGS_PER_SPAN_CREATE
+
1092 VMEM_SEGS_PER_EXACT_ALLOC
;
1093 else if (phase
== 0 &&
1094 align
<= vmp
->vm_source
->vm_quantum
)
1095 resv
= VMEM_SEGS_PER_SPAN_CREATE
+
1096 VMEM_SEGS_PER_LEFT_ALLOC
;
1098 resv
= VMEM_SEGS_PER_ALLOC_MAX
;
1100 ASSERT(vmp
->vm_nsegfree
>= resv
);
1101 vmp
->vm_nsegfree
-= resv
; /* reserve our segs */
1102 mutex_exit(&vmp
->vm_lock
);
1103 if (vmp
->vm_cflags
& VMC_XALLOC
) {
1104 size_t oasize
= asize
;
1105 vaddr
= ((vmem_ximport_t
*)
1106 vmp
->vm_source_alloc
)(vmp
->vm_source
,
1107 &asize
, align
, vmflag
& VM_KMFLAGS
);
1108 ASSERT(asize
>= oasize
);
1109 ASSERT(P2PHASE(asize
,
1110 vmp
->vm_source
->vm_quantum
) == 0);
1111 ASSERT(!(vmp
->vm_cflags
& VMC_XALIGN
) ||
1112 IS_P2ALIGNED(vaddr
, align
));
1114 vaddr
= vmp
->vm_source_alloc(vmp
->vm_source
,
1115 asize
, vmflag
& VM_KMFLAGS
);
1117 mutex_enter(&vmp
->vm_lock
);
1118 vmp
->vm_nsegfree
+= resv
; /* claim reservation */
1119 aneeded
= size
+ align
- vmp
->vm_quantum
;
1120 aneeded
= P2ROUNDUP(aneeded
, vmp
->vm_quantum
);
1121 if (vaddr
!= NULL
) {
1123 * Since we dropped the vmem lock while
1124 * calling the import function, other
1125 * threads could have imported space
1126 * and made our import unnecessary. In
1127 * order to save space, we return
1128 * excess imports immediately.
1130 if (asize
> aneeded
&&
1131 vmp
->vm_source_free
!= NULL
&&
1132 vmem_canalloc(vmp
, aneeded
)) {
1134 VMEM_SEGS_PER_MIDDLE_ALLOC
);
1139 vbest
= vmem_span_create(vmp
, vaddr
, asize
, 1);
1140 addr
= P2PHASEUP(vbest
->vs_start
, align
, phase
);
1142 } else if (vmem_canalloc(vmp
, aneeded
)) {
1144 * Our import failed, but another thread
1145 * added sufficient free memory to the arena
1146 * to satisfy our request. Go back and
1149 ASSERT(resv
>= VMEM_SEGS_PER_MIDDLE_ALLOC
);
1155 * If the requestor chooses to fail the allocation attempt
1156 * rather than reap wait and retry - get out of the loop.
1158 if (vmflag
& VM_ABORT
)
1160 mutex_exit(&vmp
->vm_lock
);
1161 if (vmp
->vm_cflags
& VMC_IDENTIFIER
)
1162 kmem_reap_idspace();
1165 mutex_enter(&vmp
->vm_lock
);
1166 if (vmflag
& VM_NOSLEEP
)
1168 vmp
->vm_kstat
.vk_wait
.value
.ui64
++;
1169 cv_wait(&vmp
->vm_cv
, &vmp
->vm_lock
);
1171 if (vbest
!= NULL
) {
1172 ASSERT(vbest
->vs_type
== VMEM_FREE
);
1173 ASSERT(vbest
->vs_knext
!= vbest
);
1174 /* re-position to end of buffer */
1175 if (vmflag
& VM_ENDALLOC
) {
1176 addr
+= ((vbest
->vs_end
- (addr
+ size
)) / align
) *
1179 (void) vmem_seg_alloc(vmp
, vbest
, addr
, size
);
1180 mutex_exit(&vmp
->vm_lock
);
1182 vmp
->vm_source_free(vmp
->vm_source
, xvaddr
, xsize
);
1183 ASSERT(P2PHASE(addr
, align
) == phase
);
1184 ASSERT(!P2BOUNDARY(addr
, size
, nocross
));
1185 ASSERT(addr
>= (uintptr_t)minaddr
);
1186 ASSERT(addr
+ size
- 1 <= (uintptr_t)maxaddr
- 1);
1187 return ((void *)addr
);
1189 vmp
->vm_kstat
.vk_fail
.value
.ui64
++;
1190 mutex_exit(&vmp
->vm_lock
);
1191 if (vmflag
& VM_PANIC
)
1192 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
1193 "cannot satisfy mandatory allocation",
1194 (void *)vmp
, size
, align_arg
, phase
, nocross
,
1195 minaddr
, maxaddr
, vmflag
);
1196 ASSERT(xvaddr
== NULL
);
1201 * Free the segment [vaddr, vaddr + size), where vaddr was a constrained
1202 * allocation. vmem_xalloc() and vmem_xfree() must always be paired because
1203 * both routines bypass the quantum caches.
1206 vmem_xfree(vmem_t
*vmp
, void *vaddr
, size_t size
)
1208 vmem_seg_t
*vsp
, *vnext
, *vprev
;
1210 mutex_enter(&vmp
->vm_lock
);
1212 vsp
= vmem_hash_delete(vmp
, (uintptr_t)vaddr
, size
);
1213 vsp
->vs_end
= P2ROUNDUP(vsp
->vs_end
, vmp
->vm_quantum
);
1216 * Attempt to coalesce with the next segment.
1218 vnext
= vsp
->vs_anext
;
1219 if (vnext
->vs_type
== VMEM_FREE
) {
1220 ASSERT(vsp
->vs_end
== vnext
->vs_start
);
1221 vmem_freelist_delete(vmp
, vnext
);
1222 vsp
->vs_end
= vnext
->vs_end
;
1223 vmem_seg_destroy(vmp
, vnext
);
1227 * Attempt to coalesce with the previous segment.
1229 vprev
= vsp
->vs_aprev
;
1230 if (vprev
->vs_type
== VMEM_FREE
) {
1231 ASSERT(vprev
->vs_end
== vsp
->vs_start
);
1232 vmem_freelist_delete(vmp
, vprev
);
1233 vprev
->vs_end
= vsp
->vs_end
;
1234 vmem_seg_destroy(vmp
, vsp
);
1239 * If the entire span is free, return it to the source.
1241 if (vsp
->vs_aprev
->vs_import
&& vmp
->vm_source_free
!= NULL
&&
1242 vsp
->vs_aprev
->vs_type
== VMEM_SPAN
&&
1243 vsp
->vs_anext
->vs_type
== VMEM_SPAN
) {
1244 vaddr
= (void *)vsp
->vs_start
;
1245 size
= VS_SIZE(vsp
);
1246 ASSERT(size
== VS_SIZE(vsp
->vs_aprev
));
1247 vmem_span_destroy(vmp
, vsp
);
1248 mutex_exit(&vmp
->vm_lock
);
1249 vmp
->vm_source_free(vmp
->vm_source
, vaddr
, size
);
1251 vmem_freelist_insert(vmp
, vsp
);
1252 mutex_exit(&vmp
->vm_lock
);
1257 * Allocate size bytes from arena vmp. Returns the allocated address
1258 * on success, NULL on failure. vmflag specifies VM_SLEEP or VM_NOSLEEP,
1259 * and may also specify best-fit, first-fit, or next-fit allocation policy
1260 * instead of the default instant-fit policy. VM_SLEEP allocations are
1261 * guaranteed to succeed.
1264 vmem_alloc(vmem_t
*vmp
, size_t size
, int vmflag
)
1272 if (size
- 1 < vmp
->vm_qcache_max
)
1273 return (kmem_cache_alloc(vmp
->vm_qcache
[(size
- 1) >>
1274 vmp
->vm_qshift
], vmflag
& VM_KMFLAGS
));
1276 if ((mtbf
= vmem_mtbf
| vmp
->vm_mtbf
) != 0 && gethrtime() % mtbf
== 0 &&
1277 (vmflag
& (VM_NOSLEEP
| VM_PANIC
)) == VM_NOSLEEP
)
1280 if (vmflag
& VM_NEXTFIT
)
1281 return (vmem_nextfit_alloc(vmp
, size
, vmflag
));
1283 if (vmflag
& (VM_BESTFIT
| VM_FIRSTFIT
))
1284 return (vmem_xalloc(vmp
, size
, vmp
->vm_quantum
, 0, 0,
1285 NULL
, NULL
, vmflag
));
1288 * Unconstrained instant-fit allocation from the segment list.
1290 mutex_enter(&vmp
->vm_lock
);
1292 if (vmp
->vm_nsegfree
>= VMEM_MINFREE
|| vmem_populate(vmp
, vmflag
)) {
1293 if ((size
& (size
- 1)) == 0)
1294 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
, size
));
1295 else if ((hb
= highbit(size
)) < VMEM_FREELISTS
)
1296 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
, 1UL << hb
));
1300 mutex_exit(&vmp
->vm_lock
);
1301 return (vmem_xalloc(vmp
, size
, vmp
->vm_quantum
,
1302 0, 0, NULL
, NULL
, vmflag
));
1305 ASSERT(size
<= (1UL << flist
));
1306 vsp
= vmp
->vm_freelist
[flist
].vs_knext
;
1307 addr
= vsp
->vs_start
;
1308 if (vmflag
& VM_ENDALLOC
) {
1309 addr
+= vsp
->vs_end
- (addr
+ size
);
1311 (void) vmem_seg_alloc(vmp
, vsp
, addr
, size
);
1312 mutex_exit(&vmp
->vm_lock
);
1313 return ((void *)addr
);
1317 * Free the segment [vaddr, vaddr + size).
1320 vmem_free(vmem_t
*vmp
, void *vaddr
, size_t size
)
1322 if (size
- 1 < vmp
->vm_qcache_max
)
1323 kmem_cache_free(vmp
->vm_qcache
[(size
- 1) >> vmp
->vm_qshift
],
1326 vmem_xfree(vmp
, vaddr
, size
);
1330 * Determine whether arena vmp contains the segment [vaddr, vaddr + size).
1333 vmem_contains(vmem_t
*vmp
, void *vaddr
, size_t size
)
1335 uintptr_t start
= (uintptr_t)vaddr
;
1336 uintptr_t end
= start
+ size
;
1338 vmem_seg_t
*seg0
= &vmp
->vm_seg0
;
1340 mutex_enter(&vmp
->vm_lock
);
1341 vmp
->vm_kstat
.vk_contains
.value
.ui64
++;
1342 for (vsp
= seg0
->vs_knext
; vsp
!= seg0
; vsp
= vsp
->vs_knext
) {
1343 vmp
->vm_kstat
.vk_contains_search
.value
.ui64
++;
1344 ASSERT(vsp
->vs_type
== VMEM_SPAN
);
1345 if (start
>= vsp
->vs_start
&& end
- 1 <= vsp
->vs_end
- 1)
1348 mutex_exit(&vmp
->vm_lock
);
1349 return (vsp
!= seg0
);
1353 * Add the span [vaddr, vaddr + size) to arena vmp.
1356 vmem_add(vmem_t
*vmp
, void *vaddr
, size_t size
, int vmflag
)
1358 if (vaddr
== NULL
|| size
== 0)
1359 panic("vmem_add(%p, %p, %lu): bad arguments",
1360 (void *)vmp
, vaddr
, size
);
1362 ASSERT(!vmem_contains(vmp
, vaddr
, size
));
1364 mutex_enter(&vmp
->vm_lock
);
1365 if (vmem_populate(vmp
, vmflag
))
1366 (void) vmem_span_create(vmp
, vaddr
, size
, 0);
1369 mutex_exit(&vmp
->vm_lock
);
1374 * Walk the vmp arena, applying func to each segment matching typemask.
1375 * If VMEM_REENTRANT is specified, the arena lock is dropped across each
1376 * call to func(); otherwise, it is held for the duration of vmem_walk()
1377 * to ensure a consistent snapshot. Note that VMEM_REENTRANT callbacks
1378 * are *not* necessarily consistent, so they may only be used when a hint
1382 vmem_walk(vmem_t
*vmp
, int typemask
,
1383 void (*func
)(void *, void *, size_t), void *arg
)
1386 vmem_seg_t
*seg0
= &vmp
->vm_seg0
;
1389 if (typemask
& VMEM_WALKER
)
1392 bzero(&walker
, sizeof (walker
));
1393 walker
.vs_type
= VMEM_WALKER
;
1395 mutex_enter(&vmp
->vm_lock
);
1396 VMEM_INSERT(seg0
, &walker
, a
);
1397 for (vsp
= seg0
->vs_anext
; vsp
!= seg0
; vsp
= vsp
->vs_anext
) {
1398 if (vsp
->vs_type
& typemask
) {
1399 void *start
= (void *)vsp
->vs_start
;
1400 size_t size
= VS_SIZE(vsp
);
1401 if (typemask
& VMEM_REENTRANT
) {
1402 vmem_advance(vmp
, &walker
, vsp
);
1403 mutex_exit(&vmp
->vm_lock
);
1404 func(arg
, start
, size
);
1405 mutex_enter(&vmp
->vm_lock
);
1408 func(arg
, start
, size
);
1412 vmem_advance(vmp
, &walker
, NULL
);
1413 mutex_exit(&vmp
->vm_lock
);
1417 * Return the total amount of memory whose type matches typemask. Thus:
1419 * typemask VMEM_ALLOC yields total memory allocated (in use).
1420 * typemask VMEM_FREE yields total memory free (available).
1421 * typemask (VMEM_ALLOC | VMEM_FREE) yields total arena size.
1424 vmem_size(vmem_t
*vmp
, int typemask
)
1428 if (typemask
& VMEM_ALLOC
)
1429 size
+= vmp
->vm_kstat
.vk_mem_inuse
.value
.ui64
;
1430 if (typemask
& VMEM_FREE
)
1431 size
+= vmp
->vm_kstat
.vk_mem_total
.value
.ui64
-
1432 vmp
->vm_kstat
.vk_mem_inuse
.value
.ui64
;
1433 return ((size_t)size
);
1437 * Create an arena called name whose initial span is [base, base + size).
1438 * The arena's natural unit of currency is quantum, so vmem_alloc()
1439 * guarantees quantum-aligned results. The arena may import new spans
1440 * by invoking afunc() on source, and may return those spans by invoking
1441 * ffunc() on source. To make small allocations fast and scalable,
1442 * the arena offers high-performance caching for each integer multiple
1443 * of quantum up to qcache_max.
1446 vmem_create_common(const char *name
, void *base
, size_t size
, size_t quantum
,
1447 void *(*afunc
)(vmem_t
*, size_t, int),
1448 void (*ffunc
)(vmem_t
*, void *, size_t),
1449 vmem_t
*source
, size_t qcache_max
, int vmflag
)
1453 vmem_t
*vmp
, *cur
, **vmpp
;
1455 vmem_freelist_t
*vfp
;
1456 uint32_t id
= atomic_inc_32_nv(&vmem_id
);
1458 if (vmem_vmem_arena
!= NULL
) {
1459 vmp
= vmem_alloc(vmem_vmem_arena
, sizeof (vmem_t
),
1460 vmflag
& VM_KMFLAGS
);
1462 ASSERT(id
<= VMEM_INITIAL
);
1463 vmp
= &vmem0
[id
- 1];
1466 /* An identifier arena must inherit from another identifier arena */
1467 ASSERT(source
== NULL
|| ((source
->vm_cflags
& VMC_IDENTIFIER
) ==
1468 (vmflag
& VMC_IDENTIFIER
)));
1472 bzero(vmp
, sizeof (vmem_t
));
1474 (void) snprintf(vmp
->vm_name
, VMEM_NAMELEN
, "%s", name
);
1475 mutex_init(&vmp
->vm_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1476 cv_init(&vmp
->vm_cv
, NULL
, CV_DEFAULT
, NULL
);
1477 vmp
->vm_cflags
= vmflag
;
1478 vmflag
&= VM_KMFLAGS
;
1480 vmp
->vm_quantum
= quantum
;
1481 vmp
->vm_qshift
= highbit(quantum
) - 1;
1482 nqcache
= MIN(qcache_max
>> vmp
->vm_qshift
, VMEM_NQCACHE_MAX
);
1484 for (i
= 0; i
<= VMEM_FREELISTS
; i
++) {
1485 vfp
= &vmp
->vm_freelist
[i
];
1486 vfp
->vs_end
= 1UL << i
;
1487 vfp
->vs_knext
= (vmem_seg_t
*)(vfp
+ 1);
1488 vfp
->vs_kprev
= (vmem_seg_t
*)(vfp
- 1);
1491 vmp
->vm_freelist
[0].vs_kprev
= NULL
;
1492 vmp
->vm_freelist
[VMEM_FREELISTS
].vs_knext
= NULL
;
1493 vmp
->vm_freelist
[VMEM_FREELISTS
].vs_end
= 0;
1494 vmp
->vm_hash_table
= vmp
->vm_hash0
;
1495 vmp
->vm_hash_mask
= VMEM_HASH_INITIAL
- 1;
1496 vmp
->vm_hash_shift
= highbit(vmp
->vm_hash_mask
);
1498 vsp
= &vmp
->vm_seg0
;
1499 vsp
->vs_anext
= vsp
;
1500 vsp
->vs_aprev
= vsp
;
1501 vsp
->vs_knext
= vsp
;
1502 vsp
->vs_kprev
= vsp
;
1503 vsp
->vs_type
= VMEM_SPAN
;
1505 vsp
= &vmp
->vm_rotor
;
1506 vsp
->vs_type
= VMEM_ROTOR
;
1507 VMEM_INSERT(&vmp
->vm_seg0
, vsp
, a
);
1509 bcopy(&vmem_kstat_template
, &vmp
->vm_kstat
, sizeof (vmem_kstat_t
));
1513 vmp
->vm_kstat
.vk_source_id
.value
.ui32
= source
->vm_id
;
1514 vmp
->vm_source
= source
;
1515 vmp
->vm_source_alloc
= afunc
;
1516 vmp
->vm_source_free
= ffunc
;
1519 * Some arenas (like vmem_metadata and kmem_metadata) cannot
1520 * use quantum caching to lower fragmentation. Instead, we
1521 * increase their imports, giving a similar effect.
1523 if (vmp
->vm_cflags
& VMC_NO_QCACHE
) {
1524 vmp
->vm_min_import
=
1525 VMEM_QCACHE_SLABSIZE(nqcache
<< vmp
->vm_qshift
);
1530 ASSERT(!(vmflag
& VM_NOSLEEP
));
1531 vmp
->vm_qcache_max
= nqcache
<< vmp
->vm_qshift
;
1532 for (i
= 0; i
< nqcache
; i
++) {
1533 char buf
[VMEM_NAMELEN
+ 21];
1534 (void) sprintf(buf
, "%s_%lu", vmp
->vm_name
,
1536 vmp
->vm_qcache
[i
] = kmem_cache_create(buf
,
1537 (i
+ 1) * quantum
, quantum
, NULL
, NULL
, NULL
,
1538 NULL
, vmp
, KMC_QCACHE
| KMC_NOTOUCH
);
1542 if ((vmp
->vm_ksp
= kstat_create("vmem", vmp
->vm_id
, vmp
->vm_name
,
1543 "vmem", KSTAT_TYPE_NAMED
, sizeof (vmem_kstat_t
) /
1544 sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
)) != NULL
) {
1545 vmp
->vm_ksp
->ks_data
= &vmp
->vm_kstat
;
1546 kstat_install(vmp
->vm_ksp
);
1549 mutex_enter(&vmem_list_lock
);
1551 while ((cur
= *vmpp
) != NULL
)
1552 vmpp
= &cur
->vm_next
;
1554 mutex_exit(&vmem_list_lock
);
1556 if (vmp
->vm_cflags
& VMC_POPULATOR
) {
1557 ASSERT(vmem_populators
< VMEM_INITIAL
);
1558 vmem_populator
[atomic_inc_32_nv(&vmem_populators
) - 1] = vmp
;
1559 mutex_enter(&vmp
->vm_lock
);
1560 (void) vmem_populate(vmp
, vmflag
| VM_PANIC
);
1561 mutex_exit(&vmp
->vm_lock
);
1564 if ((base
|| size
) && vmem_add(vmp
, base
, size
, vmflag
) == NULL
) {
1573 vmem_xcreate(const char *name
, void *base
, size_t size
, size_t quantum
,
1574 vmem_ximport_t
*afunc
, vmem_free_t
*ffunc
, vmem_t
*source
,
1575 size_t qcache_max
, int vmflag
)
1577 ASSERT(!(vmflag
& (VMC_POPULATOR
| VMC_XALLOC
)));
1578 vmflag
&= ~(VMC_POPULATOR
| VMC_XALLOC
);
1580 return (vmem_create_common(name
, base
, size
, quantum
,
1581 (vmem_alloc_t
*)afunc
, ffunc
, source
, qcache_max
,
1582 vmflag
| VMC_XALLOC
));
1586 vmem_create(const char *name
, void *base
, size_t size
, size_t quantum
,
1587 vmem_alloc_t
*afunc
, vmem_free_t
*ffunc
, vmem_t
*source
,
1588 size_t qcache_max
, int vmflag
)
1590 ASSERT(!(vmflag
& (VMC_XALLOC
| VMC_XALIGN
)));
1591 vmflag
&= ~(VMC_XALLOC
| VMC_XALIGN
);
1593 return (vmem_create_common(name
, base
, size
, quantum
,
1594 afunc
, ffunc
, source
, qcache_max
, vmflag
));
1598 * Destroy arena vmp.
1601 vmem_destroy(vmem_t
*vmp
)
1603 vmem_t
*cur
, **vmpp
;
1604 vmem_seg_t
*seg0
= &vmp
->vm_seg0
;
1605 vmem_seg_t
*vsp
, *anext
;
1609 mutex_enter(&vmem_list_lock
);
1611 while ((cur
= *vmpp
) != vmp
)
1612 vmpp
= &cur
->vm_next
;
1613 *vmpp
= vmp
->vm_next
;
1614 mutex_exit(&vmem_list_lock
);
1616 for (i
= 0; i
< VMEM_NQCACHE_MAX
; i
++)
1617 if (vmp
->vm_qcache
[i
])
1618 kmem_cache_destroy(vmp
->vm_qcache
[i
]);
1620 leaked
= vmem_size(vmp
, VMEM_ALLOC
);
1622 cmn_err(CE_WARN
, "vmem_destroy('%s'): leaked %lu %s",
1623 vmp
->vm_name
, leaked
, (vmp
->vm_cflags
& VMC_IDENTIFIER
) ?
1624 "identifiers" : "bytes");
1626 if (vmp
->vm_hash_table
!= vmp
->vm_hash0
)
1627 vmem_free(vmem_hash_arena
, vmp
->vm_hash_table
,
1628 (vmp
->vm_hash_mask
+ 1) * sizeof (void *));
1631 * Give back the segment structures for anything that's left in the
1632 * arena, e.g. the primary spans and their free segments.
1634 VMEM_DELETE(&vmp
->vm_rotor
, a
);
1635 for (vsp
= seg0
->vs_anext
; vsp
!= seg0
; vsp
= anext
) {
1636 anext
= vsp
->vs_anext
;
1637 vmem_putseg_global(vsp
);
1640 while (vmp
->vm_nsegfree
> 0)
1641 vmem_putseg_global(vmem_getseg(vmp
));
1643 kstat_delete(vmp
->vm_ksp
);
1645 mutex_destroy(&vmp
->vm_lock
);
1646 cv_destroy(&vmp
->vm_cv
);
1647 vmem_free(vmem_vmem_arena
, vmp
, sizeof (vmem_t
));
1651 * Resize vmp's hash table to keep the average lookup depth near 1.0.
1654 vmem_hash_rescale(vmem_t
*vmp
)
1656 vmem_seg_t
**old_table
, **new_table
, *vsp
;
1657 size_t old_size
, new_size
, h
, nseg
;
1659 nseg
= (size_t)(vmp
->vm_kstat
.vk_alloc
.value
.ui64
-
1660 vmp
->vm_kstat
.vk_free
.value
.ui64
);
1662 new_size
= MAX(VMEM_HASH_INITIAL
, 1 << (highbit(3 * nseg
+ 4) - 2));
1663 old_size
= vmp
->vm_hash_mask
+ 1;
1665 if ((old_size
>> 1) <= new_size
&& new_size
<= (old_size
<< 1))
1668 new_table
= vmem_alloc(vmem_hash_arena
, new_size
* sizeof (void *),
1670 if (new_table
== NULL
)
1672 bzero(new_table
, new_size
* sizeof (void *));
1674 mutex_enter(&vmp
->vm_lock
);
1676 old_size
= vmp
->vm_hash_mask
+ 1;
1677 old_table
= vmp
->vm_hash_table
;
1679 vmp
->vm_hash_mask
= new_size
- 1;
1680 vmp
->vm_hash_table
= new_table
;
1681 vmp
->vm_hash_shift
= highbit(vmp
->vm_hash_mask
);
1683 for (h
= 0; h
< old_size
; h
++) {
1685 while (vsp
!= NULL
) {
1686 uintptr_t addr
= vsp
->vs_start
;
1687 vmem_seg_t
*next_vsp
= vsp
->vs_knext
;
1688 vmem_seg_t
**hash_bucket
= VMEM_HASH(vmp
, addr
);
1689 vsp
->vs_knext
= *hash_bucket
;
1695 mutex_exit(&vmp
->vm_lock
);
1697 if (old_table
!= vmp
->vm_hash0
)
1698 vmem_free(vmem_hash_arena
, old_table
,
1699 old_size
* sizeof (void *));
1703 * Perform periodic maintenance on all vmem arenas.
1706 vmem_update(void *dummy
)
1710 mutex_enter(&vmem_list_lock
);
1711 for (vmp
= vmem_list
; vmp
!= NULL
; vmp
= vmp
->vm_next
) {
1713 * If threads are waiting for resources, wake them up
1714 * periodically so they can issue another kmem_reap()
1715 * to reclaim resources cached by the slab allocator.
1717 cv_broadcast(&vmp
->vm_cv
);
1720 * Rescale the hash table to keep the hash chains short.
1722 vmem_hash_rescale(vmp
);
1724 mutex_exit(&vmem_list_lock
);
1726 (void) timeout(vmem_update
, dummy
, vmem_update_interval
* hz
);
1730 vmem_qcache_reap(vmem_t
*vmp
)
1735 * Reap any quantum caches that may be part of this vmem.
1737 for (i
= 0; i
< VMEM_NQCACHE_MAX
; i
++)
1738 if (vmp
->vm_qcache
[i
])
1739 kmem_cache_reap_now(vmp
->vm_qcache
[i
]);
1743 * Prepare vmem for use.
1746 vmem_init(const char *heap_name
,
1747 void *heap_start
, size_t heap_size
, size_t heap_quantum
,
1748 void *(*heap_alloc
)(vmem_t
*, size_t, int),
1749 void (*heap_free
)(vmem_t
*, void *, size_t))
1752 int nseg
= VMEM_SEG_INITIAL
;
1756 vmem_putseg_global(&vmem_seg0
[nseg
]);
1758 heap
= vmem_create(heap_name
,
1759 heap_start
, heap_size
, heap_quantum
,
1760 NULL
, NULL
, NULL
, 0,
1761 VM_SLEEP
| VMC_POPULATOR
);
1763 vmem_metadata_arena
= vmem_create("vmem_metadata",
1764 NULL
, 0, heap_quantum
,
1765 vmem_alloc
, vmem_free
, heap
, 8 * heap_quantum
,
1766 VM_SLEEP
| VMC_POPULATOR
| VMC_NO_QCACHE
);
1768 vmem_seg_arena
= vmem_create("vmem_seg",
1769 NULL
, 0, heap_quantum
,
1770 heap_alloc
, heap_free
, vmem_metadata_arena
, 0,
1771 VM_SLEEP
| VMC_POPULATOR
);
1773 vmem_hash_arena
= vmem_create("vmem_hash",
1775 heap_alloc
, heap_free
, vmem_metadata_arena
, 0,
1778 vmem_vmem_arena
= vmem_create("vmem_vmem",
1779 vmem0
, sizeof (vmem0
), 1,
1780 heap_alloc
, heap_free
, vmem_metadata_arena
, 0,
1783 for (id
= 0; id
< vmem_id
; id
++)
1784 (void) vmem_xalloc(vmem_vmem_arena
, sizeof (vmem_t
),
1785 1, 0, 0, &vmem0
[id
], &vmem0
[id
+ 1],
1786 VM_NOSLEEP
| VM_BESTFIT
| VM_PANIC
);