4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/types.h>
26 #include <sys/t_lock.h>
27 #include <sys/param.h>
28 #include <sys/sysmacros.h>
29 #include <sys/tuneable.h>
30 #include <sys/systm.h>
35 #include <sys/cmn_err.h>
36 #include <sys/debug.h>
37 #include <sys/dumphdr.h>
38 #include <sys/bootconf.h>
40 #include <vm/seg_kmem.h>
43 #include <vm/vm_dep.h>
44 #include <vm/faultcode.h>
45 #include <sys/promif.h>
46 #include <vm/seg_kp.h>
47 #include <sys/bitmap.h>
48 #include <sys/mem_cage.h>
51 #include <sys/ivintr.h>
52 #include <sys/panic.h>
56 * seg_kmem is the primary kernel memory segment driver. It
57 * maps the kernel heap [kernelheap, ekernelheap), module text,
58 * and all memory which was allocated before the VM was initialized
61 * Pages which belong to seg_kmem are hashed into &kvp vnode at
62 * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1.
63 * They must never be paged out since segkmem_fault() is a no-op to
64 * prevent recursive faults.
66 * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on
67 * __x86 and are unlocked (p_sharelock == 0) on __sparc. Once __x86
68 * supports relocation the #ifdef kludges can be removed.
70 * seg_kmem pages may be subject to relocation by page_relocate(),
71 * provided that the HAT supports it; if this is so, segkmem_reloc
72 * will be set to a nonzero value. All boot time allocated memory as
73 * well as static memory is considered off limits to relocation.
74 * Pages are "relocatable" if p_state does not have P_NORELOC set, so
75 * we request P_NORELOC pages for memory that isn't safe to relocate.
77 * The kernel heap is logically divided up into four pieces:
79 * heap32_arena is for allocations that require 32-bit absolute
80 * virtual addresses (e.g. code that uses 32-bit pointers/offsets).
82 * heap_core is for allocations that require 2GB *relative*
83 * offsets; in other words all memory from heap_core is within
84 * 2GB of all other memory from the same arena. This is a requirement
85 * of the addressing modes of some processors in supervisor code.
87 * heap_arena is the general heap arena.
89 * static_arena is the static memory arena. Allocations from it
90 * are not subject to relocation so it is safe to use the memory
91 * physical address as well as the virtual address (e.g. the VA to
92 * PA translations are static). Caches may import from static_arena;
93 * all other static memory allocations should use static_alloc_arena.
95 * On some platforms which have limited virtual address space, seg_kmem
96 * may share [kernelheap, ekernelheap) with seg_kp; if this is so,
97 * segkp_bitmap is non-NULL, and each bit represents a page of virtual
98 * address space which is actually seg_kp mapped.
101 extern ulong_t
*segkp_bitmap
; /* Is set if segkp is from the kernel heap */
103 char *kernelheap
; /* start of primary kernel heap */
104 char *ekernelheap
; /* end of primary kernel heap */
105 struct seg kvseg
; /* primary kernel heap segment */
106 struct seg kvseg_core
; /* "core" kernel heap segment */
107 struct seg kzioseg
; /* Segment for zio mappings */
108 vmem_t
*heap_arena
; /* primary kernel heap arena */
109 vmem_t
*heap_core_arena
; /* core kernel heap arena */
110 char *heap_core_base
; /* start of core kernel heap arena */
111 char *heap_lp_base
; /* start of kernel large page heap arena */
112 char *heap_lp_end
; /* end of kernel large page heap arena */
113 vmem_t
*hat_memload_arena
; /* HAT translation data */
114 struct seg kvseg32
; /* 32-bit kernel heap segment */
115 vmem_t
*heap32_arena
; /* 32-bit kernel heap arena */
116 vmem_t
*heaptext_arena
; /* heaptext arena */
117 struct as kas
; /* kernel address space */
118 int segkmem_reloc
; /* enable/disable relocatable segkmem pages */
119 vmem_t
*static_arena
; /* arena for caches to import static memory */
120 vmem_t
*static_alloc_arena
; /* arena for allocating static memory */
121 vmem_t
*zio_arena
= NULL
; /* arena for allocating zio memory */
122 vmem_t
*zio_alloc_arena
= NULL
; /* arena for allocating zio memory */
125 * seg_kmem driver can map part of the kernel heap with large pages.
126 * Currently this functionality is implemented for sparc platforms only.
128 * The large page size "segkmem_lpsize" for kernel heap is selected in the
129 * platform specific code. It can also be modified via /etc/system file.
130 * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large
131 * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to
132 * match segkmem_lpsize.
134 * At boot time we carve from kernel heap arena a range of virtual addresses
135 * that will be used for large page mappings. This range [heap_lp_base,
136 * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also
137 * create "kmem_lp_arena" that caches memory already backed up by large
138 * pages. kmem_lp_arena imports virtual segments from heap_lp_arena.
141 size_t segkmem_lpsize
;
142 static uint_t segkmem_lpshift
= PAGESHIFT
;
143 int segkmem_lpszc
= 0;
145 size_t segkmem_kmemlp_quantum
= 0x400000; /* 4MB */
146 size_t segkmem_heaplp_quantum
;
147 vmem_t
*heap_lp_arena
;
148 static vmem_t
*kmem_lp_arena
;
149 static vmem_t
*segkmem_ppa_arena
;
150 static segkmem_lpcb_t segkmem_lpcb
;
153 * We use "segkmem_kmemlp_max" to limit the total amount of physical memory
154 * consumed by the large page heap. By default this parameter is set to 1/8 of
155 * physmem but can be adjusted through /etc/system either directly or
156 * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem
157 * we allow for large page heap.
159 size_t segkmem_kmemlp_max
;
160 static uint_t segkmem_kmemlp_pcnt
;
163 * Getting large pages for kernel heap could be problematic due to
164 * physical memory fragmentation. That's why we allow to preallocate
165 * "segkmem_kmemlp_min" bytes at boot time.
167 static size_t segkmem_kmemlp_min
;
170 * Throttling is used to avoid expensive tries to allocate large pages
171 * for kernel heap when a lot of succesive attempts to do so fail.
173 static ulong_t segkmem_lpthrottle_max
= 0x400000;
174 static ulong_t segkmem_lpthrottle_start
= 0x40;
175 static ulong_t segkmem_use_lpthrottle
= 1;
178 * Freed pages accumulate on a garbage list until segkmem is ready,
179 * at which point we call segkmem_gc() to free it all.
181 typedef struct segkmem_gc_list
{
182 struct segkmem_gc_list
*gc_next
;
187 static segkmem_gc_list_t
*segkmem_gc_list
;
190 * Allocations from the hat_memload arena add VM_MEMLOAD to their
191 * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
192 * to take steps to prevent infinite recursion. HAT allocations also
193 * must be non-relocatable to prevent recursive page faults.
196 hat_memload_alloc(vmem_t
*vmp
, size_t size
, int flags
)
198 flags
|= (VM_MEMLOAD
| VM_NORELOC
);
199 return (segkmem_alloc(vmp
, size
, flags
));
203 * Allocations from static_arena arena (or any other arena that uses
204 * segkmem_alloc_permanent()) require non-relocatable (permanently
205 * wired) memory pages, since these pages are referenced by physical
206 * as well as virtual address.
209 segkmem_alloc_permanent(vmem_t
*vmp
, size_t size
, int flags
)
211 return (segkmem_alloc(vmp
, size
, flags
| VM_NORELOC
));
215 * Initialize kernel heap boundaries.
228 vmem_t
*heaptext_parent
;
229 size_t heap_lp_size
= 0;
231 size_t kmem64_sz
= kmem64_aligned_end
- kmem64_base
;
234 kernelheap
= heap_start
;
235 ekernelheap
= heap_end
;
238 heap_lp_size
= (((uintptr_t)heap_end
- (uintptr_t)heap_start
) / 4);
240 * Bias heap_lp start address by kmem64_sz to reduce collisions
241 * in 4M kernel TSB between kmem64 area and heap_lp
243 kmem64_sz
= P2ROUNDUP(kmem64_sz
, MMU_PAGESIZE256M
);
244 if (kmem64_sz
<= heap_lp_size
/ 2)
245 heap_lp_size
-= kmem64_sz
;
246 heap_lp_base
= ekernelheap
- heap_lp_size
;
247 heap_lp_end
= heap_lp_base
+ heap_lp_size
;
251 * If this platform has a 'core' heap area, then the space for
252 * overflow module text should be carved out of the end of that
253 * heap. Otherwise, it gets carved out of the general purpose
256 core_size
= (uintptr_t)core_end
- (uintptr_t)core_start
;
258 ASSERT(core_size
>= HEAPTEXT_SIZE
);
259 textbase
= (uintptr_t)core_end
- HEAPTEXT_SIZE
;
260 core_size
-= HEAPTEXT_SIZE
;
264 ekernelheap
-= HEAPTEXT_SIZE
;
265 textbase
= (uintptr_t)ekernelheap
;
269 heap_size
= (uintptr_t)ekernelheap
- (uintptr_t)kernelheap
;
270 heap_arena
= vmem_init("heap", kernelheap
, heap_size
, PAGESIZE
,
271 segkmem_alloc
, segkmem_free
);
274 heap_core_arena
= vmem_create("heap_core", core_start
,
275 core_size
, PAGESIZE
, NULL
, NULL
, NULL
, 0, VM_SLEEP
);
276 heap_core_base
= core_start
;
278 heap_core_arena
= heap_arena
;
279 heap_core_base
= kernelheap
;
283 * reserve space for the large page heap. If large pages for kernel
284 * heap is enabled large page heap arean will be created later in the
285 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
286 * range will be returned back to the heap_arena.
289 (void) vmem_xalloc(heap_arena
, heap_lp_size
, PAGESIZE
, 0, 0,
290 heap_lp_base
, heap_lp_end
,
291 VM_NOSLEEP
| VM_BESTFIT
| VM_PANIC
);
295 * Remove the already-spoken-for memory range [kernelheap, first_avail).
297 (void) vmem_xalloc(heap_arena
, first_avail
- kernelheap
, PAGESIZE
,
298 0, 0, kernelheap
, first_avail
, VM_NOSLEEP
| VM_BESTFIT
| VM_PANIC
);
301 heap32_arena
= vmem_create("heap32", (void *)SYSBASE32
,
302 SYSLIMIT32
- SYSBASE32
- HEAPTEXT_SIZE
, PAGESIZE
, NULL
,
303 NULL
, NULL
, 0, VM_SLEEP
);
305 * Prom claims the physical and virtual resources used by panicbuf
306 * and inter_vec_table. So reserve space for panicbuf, intr_vec_table,
307 * reserved interrupt vector data structures from 32-bit heap.
309 (void) vmem_xalloc(heap32_arena
, PANICBUFSIZE
, PAGESIZE
, 0, 0,
310 panicbuf
, panicbuf
+ PANICBUFSIZE
,
311 VM_NOSLEEP
| VM_BESTFIT
| VM_PANIC
);
313 (void) vmem_xalloc(heap32_arena
, IVSIZE
, PAGESIZE
, 0, 0,
314 intr_vec_table
, (caddr_t
)intr_vec_table
+ IVSIZE
,
315 VM_NOSLEEP
| VM_BESTFIT
| VM_PANIC
);
317 textbase
= SYSLIMIT32
- HEAPTEXT_SIZE
;
318 heaptext_parent
= NULL
;
320 heap32_arena
= heap_core_arena
;
321 heaptext_parent
= heap_core_arena
;
324 heaptext_arena
= vmem_create("heaptext", (void *)textbase
,
325 HEAPTEXT_SIZE
, PAGESIZE
, NULL
, NULL
, heaptext_parent
, 0, VM_SLEEP
);
328 * Create a set of arenas for memory with static translations
329 * (e.g. VA -> PA translations cannot change). Since using
330 * kernel pages by physical address implies it isn't safe to
331 * walk across page boundaries, the static_arena quantum must
332 * be PAGESIZE. Any kmem caches that require static memory
333 * should source from static_arena, while direct allocations
334 * should only use static_alloc_arena.
336 static_arena
= vmem_create("static", NULL
, 0, PAGESIZE
,
337 segkmem_alloc_permanent
, segkmem_free
, heap_arena
, 0, VM_SLEEP
);
338 static_alloc_arena
= vmem_create("static_alloc", NULL
, 0,
339 sizeof (uint64_t), vmem_alloc
, vmem_free
, static_arena
,
343 * Create an arena for translation data (ptes, hmes, or hblks).
344 * We need an arena for this because hat_memload() is essential
345 * to vmem_populate() (see comments in common/os/vmem.c).
347 * Note: any kmem cache that allocates from hat_memload_arena
348 * must be created as a KMC_NOHASH cache (i.e. no external slab
349 * and bufctl structures to allocate) so that slab creation doesn't
350 * require anything more than a single vmem_alloc().
352 hat_memload_arena
= vmem_create("hat_memload", NULL
, 0, PAGESIZE
,
353 hat_memload_alloc
, segkmem_free
, heap_arena
, 0,
354 VM_SLEEP
| VMC_POPULATOR
| VMC_DUMPSAFE
);
358 boot_mapin(caddr_t addr
, size_t size
)
364 if (page_resv(btop(size
), KM_NOSLEEP
) == 0)
365 panic("boot_mapin: page_resv failed");
367 for (eaddr
= addr
+ size
; addr
< eaddr
; addr
+= PAGESIZE
) {
368 pfnum
= va_to_pfn(addr
);
369 if (pfnum
== PFN_INVALID
)
371 if ((pp
= page_numtopp_nolock(pfnum
)) == NULL
)
372 panic("boot_mapin(): No pp for pfnum = %lx", pfnum
);
375 * must break up any large pages that may have constituent
376 * pages being utilized for BOP_ALLOC()'s before calling
377 * page_numtopp().The locking code (ie. page_reclaim())
381 page_boot_demote(pp
);
383 pp
= page_numtopp(pfnum
, SE_EXCL
);
384 if (pp
== NULL
|| PP_ISFREE(pp
))
385 panic("boot_alloc: pp is NULL or free");
388 * If the cage is on but doesn't yet contain this page,
389 * mark it as non-relocatable.
391 if (kcage_on
&& !PP_ISNORELOC(pp
)) {
393 PLCNT_XFER_NORELOC(pp
);
396 (void) page_hashin(pp
, &kvp
, (u_offset_t
)(uintptr_t)addr
, NULL
);
407 * Get pages from boot and hash them into the kernel's vp.
408 * Used after page structs have been allocated, but before segkmem is ready.
411 boot_alloc(void *inaddr
, size_t size
, uint_t align
)
413 caddr_t addr
= inaddr
;
416 prom_panic("boot_alloc: attempt to allocate memory after "
419 size
= ptob(btopr(size
));
421 if (bop_alloc_chunk(addr
, size
, align
) != (caddr_t
)addr
)
422 panic("boot_alloc: bop_alloc_chunk failed");
424 if (BOP_ALLOC(bootops
, addr
, size
, align
) != addr
)
425 panic("boot_alloc: BOP_ALLOC failed");
427 boot_mapin((caddr_t
)addr
, size
);
434 panic("segkmem_badop");
437 #define SEGKMEM_BADOP(t) (t(*)())segkmem_badop
441 segkmem_fault(struct hat
*hat
, struct seg
*seg
, caddr_t addr
, size_t size
,
442 enum fault_type type
, enum seg_rw rw
)
447 struct vnode
*vp
= seg
->s_data
;
449 ASSERT(RW_READ_HELD(&seg
->s_as
->a_lock
));
451 if (seg
->s_as
!= &kas
|| size
> seg
->s_size
||
452 addr
< seg
->s_base
|| addr
+ size
> seg
->s_base
+ seg
->s_size
)
453 panic("segkmem_fault: bad args");
456 * If it is one of segkp pages, call segkp_fault.
458 if (segkp_bitmap
&& seg
== &kvseg
&&
459 BT_TEST(segkp_bitmap
, btop((uintptr_t)(addr
- seg
->s_base
))))
460 return (SEGOP_FAULT(hat
, segkp
, addr
, size
, type
, rw
));
462 if (rw
!= S_READ
&& rw
!= S_WRITE
&& rw
!= S_OTHER
)
463 return (FC_NOSUPPORT
);
465 npages
= btopr(size
);
468 case F_SOFTLOCK
: /* lock down already-loaded translations */
469 for (pg
= 0; pg
< npages
; pg
++) {
470 pp
= page_lookup(vp
, (u_offset_t
)(uintptr_t)addr
,
474 * Hmm, no page. Does a kernel mapping
477 if (!hat_probe(kas
.a_hat
, addr
)) {
480 pp
= page_find(vp
, (u_offset_t
)
492 hat_reserve(seg
->s_as
, addr
, size
);
496 pp
= page_find(vp
, (u_offset_t
)(uintptr_t)addr
);
503 return (FC_NOSUPPORT
);
509 segkmem_setprot(struct seg
*seg
, caddr_t addr
, size_t size
, uint_t prot
)
511 ASSERT(RW_LOCK_HELD(&seg
->s_as
->a_lock
));
513 if (seg
->s_as
!= &kas
|| size
> seg
->s_size
||
514 addr
< seg
->s_base
|| addr
+ size
> seg
->s_base
+ seg
->s_size
)
515 panic("segkmem_setprot: bad args");
518 * If it is one of segkp pages, call segkp.
520 if (segkp_bitmap
&& seg
== &kvseg
&&
521 BT_TEST(segkp_bitmap
, btop((uintptr_t)(addr
- seg
->s_base
))))
522 return (SEGOP_SETPROT(segkp
, addr
, size
, prot
));
525 hat_unload(kas
.a_hat
, addr
, size
, HAT_UNLOAD
);
527 hat_chgprot(kas
.a_hat
, addr
, size
, prot
);
532 * This is a dummy segkmem function overloaded to call segkp
533 * when segkp is under the heap.
537 segkmem_checkprot(struct seg
*seg
, caddr_t addr
, size_t size
, uint_t prot
)
539 ASSERT(RW_LOCK_HELD(&seg
->s_as
->a_lock
));
541 if (seg
->s_as
!= &kas
)
545 * If it is one of segkp pages, call into segkp.
547 if (segkp_bitmap
&& seg
== &kvseg
&&
548 BT_TEST(segkp_bitmap
, btop((uintptr_t)(addr
- seg
->s_base
))))
549 return (SEGOP_CHECKPROT(segkp
, addr
, size
, prot
));
556 * This is a dummy segkmem function overloaded to call segkp
557 * when segkp is under the heap.
561 segkmem_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
)
563 ASSERT(RW_LOCK_HELD(&seg
->s_as
->a_lock
));
565 if (seg
->s_as
!= &kas
)
569 * If it is one of segkp pages, call into segkp.
571 if (segkp_bitmap
&& seg
== &kvseg
&&
572 BT_TEST(segkp_bitmap
, btop((uintptr_t)(addr
- seg
->s_base
))))
573 return (SEGOP_KLUSTER(segkp
, addr
, delta
));
580 segkmem_xdump_range(void *arg
, void *start
, size_t size
)
583 caddr_t addr
= start
;
584 caddr_t addr_end
= addr
+ size
;
586 while (addr
< addr_end
) {
587 pfn_t pfn
= hat_getpfnum(kas
.a_hat
, addr
);
588 if (pfn
!= PFN_INVALID
&& pfn
<= physmax
&& pf_is_memory(pfn
))
589 dump_addpage(as
, addr
, pfn
);
591 dump_timeleft
= dump_timeout
;
596 segkmem_dump_range(void *arg
, void *start
, size_t size
)
598 caddr_t addr
= start
;
599 caddr_t addr_end
= addr
+ size
;
602 * If we are about to start dumping the range of addresses we
603 * carved out of the kernel heap for the large page heap walk
604 * heap_lp_arena to find what segments are actually populated
606 if (SEGKMEM_USE_LARGEPAGES
&&
607 addr
== heap_lp_base
&& addr_end
== heap_lp_end
&&
608 vmem_size(heap_lp_arena
, VMEM_ALLOC
) < size
) {
609 vmem_walk(heap_lp_arena
, VMEM_ALLOC
| VMEM_REENTRANT
,
610 segkmem_xdump_range
, arg
);
612 segkmem_xdump_range(arg
, start
, size
);
617 segkmem_dump(struct seg
*seg
)
620 * The kernel's heap_arena (represented by kvseg) is a very large
621 * VA space, most of which is typically unused. To speed up dumping
622 * we use vmem_walk() to quickly find the pieces of heap_arena that
623 * are actually in use. We do the same for heap32_arena and
626 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage()
627 * may ultimately need to allocate memory. Reentrant walks are
628 * necessarily imperfect snapshots. The kernel heap continues
629 * to change during a live crash dump, for example. For a normal
630 * crash dump, however, we know that there won't be any other threads
631 * messing with the heap. Therefore, at worst, we may fail to dump
632 * the pages that get allocated by the act of dumping; but we will
633 * always dump every page that was allocated when the walk began.
635 * The other segkmem segments are dense (fully populated), so there's
636 * no need to use this technique when dumping them.
638 * Note: when adding special dump handling for any new sparsely-
639 * populated segments, be sure to add similar handling to the ::kgrep
643 vmem_walk(heap_arena
, VMEM_ALLOC
| VMEM_REENTRANT
,
644 segkmem_dump_range
, seg
->s_as
);
646 vmem_walk(heaptext_arena
, VMEM_ALLOC
| VMEM_REENTRANT
,
647 segkmem_dump_range
, seg
->s_as
);
649 } else if (seg
== &kvseg_core
) {
650 vmem_walk(heap_core_arena
, VMEM_ALLOC
| VMEM_REENTRANT
,
651 segkmem_dump_range
, seg
->s_as
);
652 } else if (seg
== &kvseg32
) {
653 vmem_walk(heap32_arena
, VMEM_ALLOC
| VMEM_REENTRANT
,
654 segkmem_dump_range
, seg
->s_as
);
655 vmem_walk(heaptext_arena
, VMEM_ALLOC
| VMEM_REENTRANT
,
656 segkmem_dump_range
, seg
->s_as
);
657 } else if (seg
== &kzioseg
) {
659 * We don't want to dump pages attached to kzioseg since they
660 * contain file data from ZFS. If this page's segment is
661 * kzioseg return instead of writing it to the dump device.
665 segkmem_dump_range(seg
->s_as
, seg
->s_base
, seg
->s_size
);
670 * lock/unlock kmem pages over a given range [addr, addr+len).
671 * Returns a shadow list of pages in ppp. If there are holes
672 * in the range (e.g. some of the kernel mappings do not have
673 * underlying page_ts) returns ENOTSUP so that as_pagelock()
674 * will handle the range via as_fault(F_SOFTLOCK).
678 segkmem_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
679 page_t
***ppp
, enum lock_type type
, enum seg_rw rw
)
681 page_t
**pplist
, *pp
;
685 struct vnode
*vp
= seg
->s_data
;
690 * If it is one of segkp pages, call into segkp.
692 if (segkp_bitmap
&& seg
== &kvseg
&&
693 BT_TEST(segkp_bitmap
, btop((uintptr_t)(addr
- seg
->s_base
))))
694 return (SEGOP_PAGELOCK(segkp
, addr
, len
, ppp
, type
, rw
));
697 nb
= sizeof (page_t
*) * npages
;
699 if (type
== L_PAGEUNLOCK
) {
701 ASSERT(pplist
!= NULL
);
703 for (pg
= 0; pg
< npages
; pg
++) {
707 kmem_free(pplist
, nb
);
711 ASSERT(type
== L_PAGELOCK
);
713 pplist
= kmem_alloc(nb
, KM_NOSLEEP
);
714 if (pplist
== NULL
) {
716 return (ENOTSUP
); /* take the slow path */
719 for (pg
= 0; pg
< npages
; pg
++) {
720 pp
= page_lookup(vp
, (u_offset_t
)(uintptr_t)addr
, SE_SHARED
);
723 page_unlock(pplist
[pg
]);
724 kmem_free(pplist
, nb
);
737 * This is a dummy segkmem function overloaded to call segkp
738 * when segkp is under the heap.
742 segkmem_getmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
)
744 ASSERT(RW_LOCK_HELD(&seg
->s_as
->a_lock
));
746 if (seg
->s_as
!= &kas
)
750 * If it is one of segkp pages, call into segkp.
752 if (segkp_bitmap
&& seg
== &kvseg
&&
753 BT_TEST(segkp_bitmap
, btop((uintptr_t)(addr
- seg
->s_base
))))
754 return (SEGOP_GETMEMID(segkp
, addr
, memidp
));
761 static lgrp_mem_policy_info_t
*
762 segkmem_getpolicy(struct seg
*seg
, caddr_t addr
)
769 segkmem_capable(struct seg
*seg
, segcapability_t capability
)
771 if (capability
== S_CAPABILITY_NOMINFLT
)
776 static struct seg_ops segkmem_ops
= {
777 SEGKMEM_BADOP(int), /* dup */
778 SEGKMEM_BADOP(int), /* unmap */
779 SEGKMEM_BADOP(void), /* free */
781 SEGKMEM_BADOP(faultcode_t
), /* faulta */
785 SEGKMEM_BADOP(size_t), /* swapout */
786 SEGKMEM_BADOP(int), /* sync */
787 SEGKMEM_BADOP(size_t), /* incore */
788 SEGKMEM_BADOP(int), /* lockop */
789 SEGKMEM_BADOP(int), /* getprot */
790 SEGKMEM_BADOP(u_offset_t
), /* getoffset */
791 SEGKMEM_BADOP(int), /* gettype */
792 SEGKMEM_BADOP(int), /* getvp */
793 SEGKMEM_BADOP(int), /* advise */
796 SEGKMEM_BADOP(int), /* setpgsz */
798 segkmem_getpolicy
, /* getpolicy */
799 segkmem_capable
, /* capable */
800 seg_inherit_notsup
/* inherit */
804 segkmem_zio_create(struct seg
*seg
)
806 ASSERT(seg
->s_as
== &kas
&& RW_WRITE_HELD(&kas
.a_lock
));
807 seg
->s_ops
= &segkmem_ops
;
809 kas
.a_size
+= seg
->s_size
;
814 segkmem_create(struct seg
*seg
)
816 ASSERT(seg
->s_as
== &kas
&& RW_WRITE_HELD(&kas
.a_lock
));
817 seg
->s_ops
= &segkmem_ops
;
819 kas
.a_size
+= seg
->s_size
;
825 segkmem_page_create(void *addr
, size_t size
, int vmflag
, void *arg
)
829 struct vnode
*vp
= arg
;
837 if (segkmem_reloc
== 0 || (vmflag
& VM_NORELOC
))
838 pgflags
|= PG_NORELOC
;
839 if ((vmflag
& VM_NOSLEEP
) == 0)
841 if (vmflag
& VM_PANIC
)
843 if (vmflag
& VM_PUSHPAGE
)
844 pgflags
|= PG_PUSHPAGE
;
845 if (vmflag
& VM_NORMALPRI
) {
846 ASSERT(vmflag
& VM_NOSLEEP
);
847 pgflags
|= PG_NORMALPRI
;
850 return (page_create_va(vp
, (u_offset_t
)(uintptr_t)addr
, size
,
851 pgflags
, &kseg
, addr
));
855 * Allocate pages to back the virtual address range [addr, addr + size).
856 * If addr is NULL, allocate the virtual address space as well.
859 segkmem_xalloc(vmem_t
*vmp
, void *inaddr
, size_t size
, int vmflag
, uint_t attr
,
860 page_t
*(*page_create_func
)(void *, size_t, int, void *), void *pcarg
)
863 caddr_t addr
= inaddr
;
864 pgcnt_t npages
= btopr(size
);
867 if (inaddr
== NULL
&& (addr
= vmem_alloc(vmp
, size
, vmflag
)) == NULL
)
870 ASSERT(((uintptr_t)addr
& PAGEOFFSET
) == 0);
872 if (page_resv(npages
, vmflag
& VM_KMFLAGS
) == 0) {
874 vmem_free(vmp
, addr
, size
);
878 ppl
= page_create_func(addr
, size
, vmflag
, pcarg
);
881 vmem_free(vmp
, addr
, size
);
887 * Under certain conditions, we need to let the HAT layer know
888 * that it cannot safely allocate memory. Allocations from
889 * the hat_memload vmem arena always need this, to prevent
890 * infinite recursion.
892 * In addition, the x86 hat cannot safely do memory
893 * allocations while in vmem_populate(), because there
894 * is no simple bound on its usage.
896 if (vmflag
& VM_MEMLOAD
)
897 allocflag
= HAT_NO_KALLOC
;
899 else if (vmem_is_populator())
900 allocflag
= HAT_NO_KALLOC
;
905 while (ppl
!= NULL
) {
908 ASSERT(page_iolock_assert(pp
));
909 ASSERT(PAGE_EXCL(pp
));
911 hat_memload(kas
.a_hat
, (caddr_t
)(uintptr_t)pp
->p_offset
, pp
,
912 (PROT_ALL
& ~PROT_USER
) | HAT_NOSYNC
| attr
,
913 HAT_LOAD_LOCK
| allocflag
);
918 if (vmflag
& SEGKMEM_SHARELOCKED
)
929 segkmem_alloc_vn(vmem_t
*vmp
, size_t size
, int vmflag
, struct vnode
*vp
)
932 segkmem_gc_list_t
*gcp
, **prev_gcpp
;
936 if (kvseg
.s_base
== NULL
) {
938 if (bootops
->bsys_alloc
== NULL
)
939 halt("Memory allocation between bop_alloc() and "
944 * There's not a lot of memory to go around during boot,
945 * so recycle it if we can.
947 for (prev_gcpp
= &segkmem_gc_list
; (gcp
= *prev_gcpp
) != NULL
;
948 prev_gcpp
= &gcp
->gc_next
) {
949 if (gcp
->gc_arena
== vmp
&& gcp
->gc_size
== size
) {
950 *prev_gcpp
= gcp
->gc_next
;
955 addr
= vmem_alloc(vmp
, size
, vmflag
| VM_PANIC
);
956 if (boot_alloc(addr
, size
, BO_NO_ALIGN
) != addr
)
957 panic("segkmem_alloc: boot_alloc failed");
960 return (segkmem_xalloc(vmp
, NULL
, size
, vmflag
, 0,
961 segkmem_page_create
, vp
));
965 segkmem_alloc(vmem_t
*vmp
, size_t size
, int vmflag
)
967 return (segkmem_alloc_vn(vmp
, size
, vmflag
, &kvp
));
971 segkmem_zio_alloc(vmem_t
*vmp
, size_t size
, int vmflag
)
973 return (segkmem_alloc_vn(vmp
, size
, vmflag
, &zvp
));
977 * Any changes to this routine must also be carried over to
978 * devmap_free_pages() in the seg_dev driver. This is because
979 * we currently don't have a special kernel segment for non-paged
980 * kernel memory that is exported by drivers to user space.
983 segkmem_free_vn(vmem_t
*vmp
, void *inaddr
, size_t size
, struct vnode
*vp
,
984 void (*func
)(page_t
*))
987 caddr_t addr
= inaddr
;
989 pgcnt_t npages
= btopr(size
);
991 ASSERT(((uintptr_t)addr
& PAGEOFFSET
) == 0);
994 if (kvseg
.s_base
== NULL
) {
995 segkmem_gc_list_t
*gc
= inaddr
;
998 gc
->gc_next
= segkmem_gc_list
;
999 segkmem_gc_list
= gc
;
1003 hat_unload(kas
.a_hat
, addr
, size
, HAT_UNLOAD_UNLOCK
);
1005 for (eaddr
= addr
+ size
; addr
< eaddr
; addr
+= PAGESIZE
) {
1007 pp
= page_find(vp
, (u_offset_t
)(uintptr_t)addr
);
1009 panic("segkmem_free: page not found");
1010 if (!page_tryupgrade(pp
)) {
1012 * Some other thread has a sharelock. Wait for
1013 * it to drop the lock so we can free this page.
1016 pp
= page_lookup(vp
, (u_offset_t
)(uintptr_t)addr
,
1020 pp
= page_lookup(vp
, (u_offset_t
)(uintptr_t)addr
, SE_EXCL
);
1023 panic("segkmem_free: page not found");
1024 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */
1029 page_destroy(pp
, 0);
1032 page_unresv(npages
);
1035 vmem_free(vmp
, inaddr
, size
);
1040 segkmem_xfree(vmem_t
*vmp
, void *inaddr
, size_t size
, void (*func
)(page_t
*))
1042 segkmem_free_vn(vmp
, inaddr
, size
, &kvp
, func
);
1046 segkmem_free(vmem_t
*vmp
, void *inaddr
, size_t size
)
1048 segkmem_free_vn(vmp
, inaddr
, size
, &kvp
, NULL
);
1052 segkmem_zio_free(vmem_t
*vmp
, void *inaddr
, size_t size
)
1054 segkmem_free_vn(vmp
, inaddr
, size
, &zvp
, NULL
);
1060 ASSERT(kvseg
.s_base
!= NULL
);
1061 while (segkmem_gc_list
!= NULL
) {
1062 segkmem_gc_list_t
*gc
= segkmem_gc_list
;
1063 segkmem_gc_list
= gc
->gc_next
;
1064 segkmem_free(gc
->gc_arena
, gc
, gc
->gc_size
);
1069 * Legacy entry points from here to end of file.
1072 segkmem_mapin(struct seg
*seg
, void *addr
, size_t size
, uint_t vprot
,
1073 pfn_t pfn
, uint_t flags
)
1075 hat_unload(seg
->s_as
->a_hat
, addr
, size
, HAT_UNLOAD_UNLOCK
);
1076 hat_devload(seg
->s_as
->a_hat
, addr
, size
, pfn
, vprot
,
1077 flags
| HAT_LOAD_LOCK
);
1081 segkmem_mapout(struct seg
*seg
, void *addr
, size_t size
)
1083 hat_unload(seg
->s_as
->a_hat
, addr
, size
, HAT_UNLOAD_UNLOCK
);
1087 kmem_getpages(pgcnt_t npages
, int kmflag
)
1089 return (kmem_alloc(ptob(npages
), kmflag
));
1093 kmem_freepages(void *addr
, pgcnt_t npages
)
1095 kmem_free(addr
, ptob(npages
));
1099 * segkmem_page_create_large() allocates a large page to be used for the kmem
1100 * caches. If kpr is enabled we ask for a relocatable page unless requested
1101 * otherwise. If kpr is disabled we have to ask for a non-reloc page
1104 segkmem_page_create_large(void *addr
, size_t size
, int vmflag
, void *arg
)
1110 if (segkmem_reloc
== 0 || (vmflag
& VM_NORELOC
))
1111 pgflags
|= PG_NORELOC
;
1112 if (!(vmflag
& VM_NOSLEEP
))
1114 if (vmflag
& VM_PUSHPAGE
)
1115 pgflags
|= PG_PUSHPAGE
;
1116 if (vmflag
& VM_NORMALPRI
)
1117 pgflags
|= PG_NORMALPRI
;
1119 return (page_create_va_large(&kvp
, (u_offset_t
)(uintptr_t)addr
, size
,
1120 pgflags
, &kvseg
, addr
, arg
));
1124 * Allocate a large page to back the virtual address range
1125 * [addr, addr + size). If addr is NULL, allocate the virtual address
1129 segkmem_xalloc_lp(vmem_t
*vmp
, void *inaddr
, size_t size
, int vmflag
,
1130 uint_t attr
, page_t
*(*page_create_func
)(void *, size_t, int, void *),
1133 caddr_t addr
= inaddr
, pa
;
1134 size_t lpsize
= segkmem_lpsize
;
1135 pgcnt_t npages
= btopr(size
);
1136 pgcnt_t nbpages
= btop(lpsize
);
1137 pgcnt_t nlpages
= size
>> segkmem_lpshift
;
1138 size_t ppasize
= nbpages
* sizeof (page_t
*);
1139 page_t
*pp
, *rootpp
, **ppa
, *pplist
= NULL
;
1142 vmflag
|= VM_NOSLEEP
;
1144 if (page_resv(npages
, vmflag
& VM_KMFLAGS
) == 0) {
1149 * allocate an array we need for hat_memload_array.
1150 * we use a separate arena to avoid recursion.
1151 * we will not need this array when hat_memload_array learns pp++
1153 if ((ppa
= vmem_alloc(segkmem_ppa_arena
, ppasize
, vmflag
)) == NULL
) {
1154 goto fail_array_alloc
;
1157 if (inaddr
== NULL
&& (addr
= vmem_alloc(vmp
, size
, vmflag
)) == NULL
)
1158 goto fail_vmem_alloc
;
1160 ASSERT(((uintptr_t)addr
& (lpsize
- 1)) == 0);
1162 /* create all the pages */
1163 for (pa
= addr
, i
= 0; i
< nlpages
; i
++, pa
+= lpsize
) {
1164 if ((pp
= page_create_func(pa
, lpsize
, vmflag
, pcarg
)) == NULL
)
1165 goto fail_page_create
;
1166 page_list_concat(&pplist
, &pp
);
1169 /* at this point we have all the resource to complete the request */
1170 while ((rootpp
= pplist
) != NULL
) {
1171 for (i
= 0; i
< nbpages
; i
++) {
1172 ASSERT(pplist
!= NULL
);
1174 page_sub(&pplist
, pp
);
1175 ASSERT(page_iolock_assert(pp
));
1180 * Load the locked entry. It's OK to preload the entry into the
1181 * TSB since we now support large mappings in the kernel TSB.
1183 hat_memload_array(kas
.a_hat
,
1184 (caddr_t
)(uintptr_t)rootpp
->p_offset
, lpsize
,
1185 ppa
, (PROT_ALL
& ~PROT_USER
) | HAT_NOSYNC
| attr
,
1188 for (--i
; i
>= 0; --i
) {
1189 ppa
[i
]->p_lckcnt
= 1;
1190 page_unlock(ppa
[i
]);
1194 vmem_free(segkmem_ppa_arena
, ppa
, ppasize
);
1198 while ((rootpp
= pplist
) != NULL
) {
1199 for (i
= 0, pp
= pplist
; i
< nbpages
; i
++, pp
= pplist
) {
1201 page_sub(&pplist
, pp
);
1202 ASSERT(page_iolock_assert(pp
));
1205 page_destroy_pages(rootpp
);
1209 vmem_free(vmp
, addr
, size
);
1212 vmem_free(segkmem_ppa_arena
, ppa
, ppasize
);
1215 page_unresv(npages
);
1221 segkmem_free_one_lp(caddr_t addr
, size_t size
)
1223 page_t
*pp
, *rootpp
= NULL
;
1224 pgcnt_t pgs_left
= btopr(size
);
1226 ASSERT(size
== segkmem_lpsize
);
1228 hat_unload(kas
.a_hat
, addr
, size
, HAT_UNLOAD_UNLOCK
);
1230 for (; pgs_left
> 0; addr
+= PAGESIZE
, pgs_left
--) {
1231 pp
= page_lookup(&kvp
, (u_offset_t
)(uintptr_t)addr
, SE_EXCL
);
1233 panic("segkmem_free_one_lp: page not found");
1234 ASSERT(PAGE_EXCL(pp
));
1239 ASSERT(rootpp
!= NULL
);
1240 page_destroy_pages(rootpp
);
1242 /* page_unresv() is done by the caller */
1246 * This function is called to import new spans into the vmem arenas like
1247 * kmem_default_arena and kmem_oversize_arena. It first tries to import
1248 * spans from large page arena - kmem_lp_arena. In order to do this it might
1249 * have to "upgrade the requested size" to kmem_lp_arena quantum. If
1250 * it was not able to satisfy the upgraded request it then calls regular
1251 * segkmem_alloc() that satisfies the request by importing from "*vmp" arena
1255 segkmem_alloc_lp(vmem_t
*vmp
, size_t *sizep
, size_t align
, int vmflag
)
1258 kthread_t
*t
= curthread
;
1259 segkmem_lpcb_t
*lpcb
= &segkmem_lpcb
;
1261 ASSERT(sizep
!= NULL
);
1265 if (lpcb
->lp_uselp
&& !(t
->t_flag
& T_PANIC
) &&
1266 !(vmflag
& SEGKMEM_SHARELOCKED
)) {
1268 size_t kmemlp_qnt
= segkmem_kmemlp_quantum
;
1269 size_t asize
= P2ROUNDUP(size
, kmemlp_qnt
);
1271 ulong_t
*lpthrtp
= &lpcb
->lp_throttle
;
1272 ulong_t lpthrt
= *lpthrtp
;
1276 ASSERT(kmem_lp_arena
!= NULL
);
1277 ASSERT(asize
>= size
);
1280 /* try to update the throttle value */
1281 lpthrt
= atomic_inc_ulong_nv(lpthrtp
);
1282 if (lpthrt
>= segkmem_lpthrottle_max
) {
1283 lpthrt
= atomic_cas_ulong(lpthrtp
, lpthrt
,
1284 segkmem_lpthrottle_max
/ 4);
1288 * when we get above throttle start do an exponential
1289 * backoff at trying large pages and reaping
1291 if (lpthrt
> segkmem_lpthrottle_start
&&
1293 lpcb
->allocs_throttled
++;
1297 return (segkmem_alloc(vmp
, size
, vmflag
));
1301 if (!(vmflag
& VM_NOSLEEP
) &&
1302 segkmem_heaplp_quantum
>= (8 * kmemlp_qnt
) &&
1303 vmem_size(kmem_lp_arena
, VMEM_FREE
) <= kmemlp_qnt
&&
1304 asize
< (segkmem_heaplp_quantum
- kmemlp_qnt
)) {
1307 * we are low on free memory in kmem_lp_arena
1308 * we let only one guy to allocate heap_lp
1309 * quantum size chunk that everybody is going to
1312 mutex_enter(&lpcb
->lp_lock
);
1314 if (lpcb
->lp_wait
) {
1316 /* we are not the first one - wait */
1317 cv_wait(&lpcb
->lp_cv
, &lpcb
->lp_lock
);
1318 if (vmem_size(kmem_lp_arena
, VMEM_FREE
) <
1322 } else if (vmem_size(kmem_lp_arena
, VMEM_FREE
) <=
1326 * we are the first one, make sure we import
1329 if (asize
== kmemlp_qnt
)
1330 asize
+= kmemlp_qnt
;
1335 mutex_exit(&lpcb
->lp_lock
);
1339 * VM_ABORT flag prevents sleeps in vmem_xalloc when
1340 * large pages are not available. In that case this allocation
1341 * attempt will fail and we will retry allocation with small
1342 * pages. We also do not want to panic if this allocation fails
1343 * because we are going to retry.
1346 addr
= vmem_alloc(kmem_lp_arena
, asize
,
1347 (vmflag
| VM_ABORT
) & ~VM_PANIC
);
1350 mutex_enter(&lpcb
->lp_lock
);
1351 ASSERT(lpcb
->lp_wait
!= 0);
1353 cv_broadcast(&lpcb
->lp_cv
);
1354 mutex_exit(&lpcb
->lp_lock
);
1364 if (vmflag
& VM_NOSLEEP
)
1365 lpcb
->nosleep_allocs_failed
++;
1367 lpcb
->sleep_allocs_failed
++;
1368 lpcb
->alloc_bytes_failed
+= size
;
1370 /* if large page throttling is not started yet do it */
1371 if (segkmem_use_lpthrottle
&& lpthrt
== 0) {
1372 lpthrt
= atomic_cas_ulong(lpthrtp
, lpthrt
, 1);
1375 return (segkmem_alloc(vmp
, size
, vmflag
));
1379 segkmem_free_lp(vmem_t
*vmp
, void *inaddr
, size_t size
)
1381 if (kmem_lp_arena
== NULL
|| !IS_KMEM_VA_LARGEPAGE((caddr_t
)inaddr
)) {
1382 segkmem_free(vmp
, inaddr
, size
);
1384 vmem_free(kmem_lp_arena
, inaddr
, size
);
1389 * segkmem_alloc_lpi() imports virtual memory from large page heap arena
1390 * into kmem_lp arena. In the process it maps the imported segment with
1394 segkmem_alloc_lpi(vmem_t
*vmp
, size_t size
, int vmflag
)
1396 segkmem_lpcb_t
*lpcb
= &segkmem_lpcb
;
1400 ASSERT(vmp
== heap_lp_arena
);
1402 /* do not allow large page heap grow beyound limits */
1403 if (vmem_size(vmp
, VMEM_ALLOC
) >= segkmem_kmemlp_max
) {
1404 lpcb
->allocs_limited
++;
1408 addr
= segkmem_xalloc_lp(vmp
, NULL
, size
, vmflag
, 0,
1409 segkmem_page_create_large
, NULL
);
1414 * segkmem_free_lpi() returns virtual memory back into large page heap arena
1415 * from kmem_lp arena. Beore doing this it unmaps the segment and frees
1416 * large pages used to map it.
1419 segkmem_free_lpi(vmem_t
*vmp
, void *inaddr
, size_t size
)
1421 pgcnt_t nlpages
= size
>> segkmem_lpshift
;
1422 size_t lpsize
= segkmem_lpsize
;
1423 caddr_t addr
= inaddr
;
1424 pgcnt_t npages
= btopr(size
);
1427 ASSERT(vmp
== heap_lp_arena
);
1428 ASSERT(IS_KMEM_VA_LARGEPAGE(addr
));
1429 ASSERT(((uintptr_t)inaddr
& (lpsize
- 1)) == 0);
1431 for (i
= 0; i
< nlpages
; i
++) {
1432 segkmem_free_one_lp(addr
, lpsize
);
1436 page_unresv(npages
);
1438 vmem_free(vmp
, inaddr
, size
);
1442 * This function is called at system boot time by kmem_init right after
1443 * /etc/system file has been read. It checks based on hardware configuration
1444 * and /etc/system settings if system is going to use large pages. The
1445 * initialiazation necessary to actually start using large pages
1446 * happens later in the process after segkmem_heap_lp_init() is called.
1451 int use_large_pages
= 0;
1455 size_t memtotal
= physmem
* PAGESIZE
;
1457 if (heap_lp_base
== NULL
) {
1458 segkmem_lpsize
= PAGESIZE
;
1462 /* get a platform dependent value of large page size for kernel heap */
1463 segkmem_lpsize
= get_segkmem_lpsize(segkmem_lpsize
);
1465 if (segkmem_lpsize
<= PAGESIZE
) {
1467 * put virtual space reserved for the large page kernel
1468 * back to the regular heap
1470 vmem_xfree(heap_arena
, heap_lp_base
,
1471 heap_lp_end
- heap_lp_base
);
1472 heap_lp_base
= NULL
;
1474 segkmem_lpsize
= PAGESIZE
;
1478 /* set heap_lp quantum if necessary */
1479 if (segkmem_heaplp_quantum
== 0 || !ISP2(segkmem_heaplp_quantum
) ||
1480 P2PHASE(segkmem_heaplp_quantum
, segkmem_lpsize
)) {
1481 segkmem_heaplp_quantum
= segkmem_lpsize
;
1484 /* set kmem_lp quantum if necessary */
1485 if (segkmem_kmemlp_quantum
== 0 || !ISP2(segkmem_kmemlp_quantum
) ||
1486 segkmem_kmemlp_quantum
> segkmem_heaplp_quantum
) {
1487 segkmem_kmemlp_quantum
= segkmem_heaplp_quantum
;
1490 /* set total amount of memory allowed for large page kernel heap */
1491 if (segkmem_kmemlp_max
== 0) {
1492 if (segkmem_kmemlp_pcnt
== 0 || segkmem_kmemlp_pcnt
> 100)
1493 segkmem_kmemlp_pcnt
= 12;
1494 segkmem_kmemlp_max
= (memtotal
* segkmem_kmemlp_pcnt
) / 100;
1496 segkmem_kmemlp_max
= P2ROUNDUP(segkmem_kmemlp_max
,
1497 segkmem_heaplp_quantum
);
1499 /* fix lp kmem preallocation request if necesssary */
1500 if (segkmem_kmemlp_min
) {
1501 segkmem_kmemlp_min
= P2ROUNDUP(segkmem_kmemlp_min
,
1502 segkmem_heaplp_quantum
);
1503 if (segkmem_kmemlp_min
> segkmem_kmemlp_max
)
1504 segkmem_kmemlp_min
= segkmem_kmemlp_max
;
1507 use_large_pages
= 1;
1508 segkmem_lpszc
= page_szc(segkmem_lpsize
);
1509 segkmem_lpshift
= page_get_shift(segkmem_lpszc
);
1512 return (use_large_pages
);
1516 segkmem_zio_init(void *zio_mem_base
, size_t zio_mem_size
)
1518 ASSERT(zio_mem_base
!= NULL
);
1519 ASSERT(zio_mem_size
!= 0);
1522 * To reduce VA space fragmentation, we set up quantum caches for the
1523 * smaller sizes; we chose 32k because that translates to 128k VA
1524 * slabs, which matches nicely with the common 128k zio_data bufs.
1526 zio_arena
= vmem_create("zfs_file_data", zio_mem_base
, zio_mem_size
,
1527 PAGESIZE
, NULL
, NULL
, NULL
, 32 * 1024, VM_SLEEP
);
1529 zio_alloc_arena
= vmem_create("zfs_file_data_buf", NULL
, 0, PAGESIZE
,
1530 segkmem_zio_alloc
, segkmem_zio_free
, zio_arena
, 0, VM_SLEEP
);
1532 ASSERT(zio_arena
!= NULL
);
1533 ASSERT(zio_alloc_arena
!= NULL
);
1540 segkmem_alloc_ppa(vmem_t
*vmp
, size_t size
, int vmflag
)
1542 size_t ppaquantum
= btopr(segkmem_lpsize
) * sizeof (page_t
*);
1545 if (ppaquantum
<= PAGESIZE
)
1546 return (segkmem_alloc(vmp
, size
, vmflag
));
1548 ASSERT((size
& (ppaquantum
- 1)) == 0);
1550 addr
= vmem_xalloc(vmp
, size
, ppaquantum
, 0, 0, NULL
, NULL
, vmflag
);
1551 if (addr
!= NULL
&& segkmem_xalloc(vmp
, addr
, size
, vmflag
, 0,
1552 segkmem_page_create
, NULL
) == NULL
) {
1553 vmem_xfree(vmp
, addr
, size
);
1561 segkmem_free_ppa(vmem_t
*vmp
, void *addr
, size_t size
)
1563 size_t ppaquantum
= btopr(segkmem_lpsize
) * sizeof (page_t
*);
1565 ASSERT(addr
!= NULL
);
1567 if (ppaquantum
<= PAGESIZE
) {
1568 segkmem_free(vmp
, addr
, size
);
1570 segkmem_free(NULL
, addr
, size
);
1571 vmem_xfree(vmp
, addr
, size
);
1576 segkmem_heap_lp_init()
1578 segkmem_lpcb_t
*lpcb
= &segkmem_lpcb
;
1579 size_t heap_lp_size
= heap_lp_end
- heap_lp_base
;
1580 size_t lpsize
= segkmem_lpsize
;
1584 if (segkmem_lpsize
<= PAGESIZE
) {
1585 ASSERT(heap_lp_base
== NULL
);
1586 ASSERT(heap_lp_end
== NULL
);
1590 ASSERT(segkmem_heaplp_quantum
>= lpsize
);
1591 ASSERT((segkmem_heaplp_quantum
& (lpsize
- 1)) == 0);
1592 ASSERT(lpcb
->lp_uselp
== 0);
1593 ASSERT(heap_lp_base
!= NULL
);
1594 ASSERT(heap_lp_end
!= NULL
);
1595 ASSERT(heap_lp_base
< heap_lp_end
);
1596 ASSERT(heap_lp_arena
== NULL
);
1597 ASSERT(((uintptr_t)heap_lp_base
& (lpsize
- 1)) == 0);
1598 ASSERT(((uintptr_t)heap_lp_end
& (lpsize
- 1)) == 0);
1600 /* create large page heap arena */
1601 heap_lp_arena
= vmem_create("heap_lp", heap_lp_base
, heap_lp_size
,
1602 segkmem_heaplp_quantum
, NULL
, NULL
, NULL
, 0, VM_SLEEP
);
1604 ASSERT(heap_lp_arena
!= NULL
);
1606 /* This arena caches memory already mapped by large pages */
1607 kmem_lp_arena
= vmem_create("kmem_lp", NULL
, 0, segkmem_kmemlp_quantum
,
1608 segkmem_alloc_lpi
, segkmem_free_lpi
, heap_lp_arena
, 0, VM_SLEEP
);
1610 ASSERT(kmem_lp_arena
!= NULL
);
1612 mutex_init(&lpcb
->lp_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1613 cv_init(&lpcb
->lp_cv
, NULL
, CV_DEFAULT
, NULL
);
1616 * this arena is used for the array of page_t pointers necessary
1617 * to call hat_mem_load_array
1619 ppaquantum
= btopr(lpsize
) * sizeof (page_t
*);
1620 segkmem_ppa_arena
= vmem_create("segkmem_ppa", NULL
, 0, ppaquantum
,
1621 segkmem_alloc_ppa
, segkmem_free_ppa
, heap_arena
, ppaquantum
,
1624 ASSERT(segkmem_ppa_arena
!= NULL
);
1626 /* prealloacate some memory for the lp kernel heap */
1627 if (segkmem_kmemlp_min
) {
1629 ASSERT(P2PHASE(segkmem_kmemlp_min
,
1630 segkmem_heaplp_quantum
) == 0);
1632 if ((addr
= segkmem_alloc_lpi(heap_lp_arena
,
1633 segkmem_kmemlp_min
, VM_SLEEP
)) != NULL
) {
1635 addr
= vmem_add(kmem_lp_arena
, addr
,
1636 segkmem_kmemlp_min
, VM_SLEEP
);
1637 ASSERT(addr
!= NULL
);