4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2016 Joyent, Inc.
26 #include <sys/types.h>
27 #include <sys/t_lock.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/tuneable.h>
31 #include <sys/systm.h>
36 #include <sys/cmn_err.h>
37 #include <sys/debug.h>
38 #include <sys/dumphdr.h>
39 #include <sys/bootconf.h>
41 #include <vm/seg_kmem.h>
44 #include <vm/vm_dep.h>
45 #include <vm/faultcode.h>
46 #include <sys/promif.h>
47 #include <vm/seg_kp.h>
48 #include <sys/bitmap.h>
49 #include <sys/mem_cage.h>
52 #include <sys/ivintr.h>
53 #include <sys/panic.h>
57 * seg_kmem is the primary kernel memory segment driver. It
58 * maps the kernel heap [kernelheap, ekernelheap), module text,
59 * and all memory which was allocated before the VM was initialized
62 * Pages which belong to seg_kmem are hashed into &kvp vnode at
63 * an offset equal to (uoff_t)virt_addr, and have p_lckcnt >= 1.
64 * They must never be paged out since segkmem_fault() is a no-op to
65 * prevent recursive faults.
67 * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on
68 * __x86 and are unlocked (p_sharelock == 0) on __sparc. Once __x86
69 * supports relocation the #ifdef kludges can be removed.
71 * seg_kmem pages may be subject to relocation by page_relocate(),
72 * provided that the HAT supports it; if this is so, segkmem_reloc
73 * will be set to a nonzero value. All boot time allocated memory as
74 * well as static memory is considered off limits to relocation.
75 * Pages are "relocatable" if p_state does not have P_NORELOC set, so
76 * we request P_NORELOC pages for memory that isn't safe to relocate.
78 * The kernel heap is logically divided up into four pieces:
80 * heap32_arena is for allocations that require 32-bit absolute
81 * virtual addresses (e.g. code that uses 32-bit pointers/offsets).
83 * heap_core is for allocations that require 2GB *relative*
84 * offsets; in other words all memory from heap_core is within
85 * 2GB of all other memory from the same arena. This is a requirement
86 * of the addressing modes of some processors in supervisor code.
88 * heap_arena is the general heap arena.
90 * static_arena is the static memory arena. Allocations from it
91 * are not subject to relocation so it is safe to use the memory
92 * physical address as well as the virtual address (e.g. the VA to
93 * PA translations are static). Caches may import from static_arena;
94 * all other static memory allocations should use static_alloc_arena.
96 * On some platforms which have limited virtual address space, seg_kmem
97 * may share [kernelheap, ekernelheap) with seg_kp; if this is so,
98 * segkp_bitmap is non-NULL, and each bit represents a page of virtual
99 * address space which is actually seg_kp mapped.
102 extern ulong_t
*segkp_bitmap
; /* Is set if segkp is from the kernel heap */
104 char *kernelheap
; /* start of primary kernel heap */
105 char *ekernelheap
; /* end of primary kernel heap */
106 struct seg kvseg
; /* primary kernel heap segment */
107 struct seg kvseg_core
; /* "core" kernel heap segment */
108 struct seg kzioseg
; /* Segment for zio mappings */
109 vmem_t
*heap_arena
; /* primary kernel heap arena */
110 vmem_t
*heap_core_arena
; /* core kernel heap arena */
111 char *heap_core_base
; /* start of core kernel heap arena */
112 char *heap_lp_base
; /* start of kernel large page heap arena */
113 char *heap_lp_end
; /* end of kernel large page heap arena */
114 vmem_t
*hat_memload_arena
; /* HAT translation data */
115 struct seg kvseg32
; /* 32-bit kernel heap segment */
116 vmem_t
*heap32_arena
; /* 32-bit kernel heap arena */
117 vmem_t
*heaptext_arena
; /* heaptext arena */
118 struct as kas
; /* kernel address space */
119 int segkmem_reloc
; /* enable/disable relocatable segkmem pages */
120 vmem_t
*static_arena
; /* arena for caches to import static memory */
121 vmem_t
*static_alloc_arena
; /* arena for allocating static memory */
122 vmem_t
*zio_arena
= NULL
; /* arena for allocating zio memory */
123 vmem_t
*zio_alloc_arena
= NULL
; /* arena for allocating zio memory */
126 * seg_kmem driver can map part of the kernel heap with large pages.
127 * Currently this functionality is implemented for sparc platforms only.
129 * The large page size "segkmem_lpsize" for kernel heap is selected in the
130 * platform specific code. It can also be modified via /etc/system file.
131 * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large
132 * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to
133 * match segkmem_lpsize.
135 * At boot time we carve from kernel heap arena a range of virtual addresses
136 * that will be used for large page mappings. This range [heap_lp_base,
137 * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also
138 * create "kmem_lp_arena" that caches memory already backed up by large
139 * pages. kmem_lp_arena imports virtual segments from heap_lp_arena.
142 size_t segkmem_lpsize
;
143 static uint_t segkmem_lpshift
= PAGESHIFT
;
144 int segkmem_lpszc
= 0;
146 size_t segkmem_kmemlp_quantum
= 0x400000; /* 4MB */
147 size_t segkmem_heaplp_quantum
;
148 vmem_t
*heap_lp_arena
;
149 static vmem_t
*kmem_lp_arena
;
150 static vmem_t
*segkmem_ppa_arena
;
151 static segkmem_lpcb_t segkmem_lpcb
;
154 * We use "segkmem_kmemlp_max" to limit the total amount of physical memory
155 * consumed by the large page heap. By default this parameter is set to 1/8 of
156 * physmem but can be adjusted through /etc/system either directly or
157 * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem
158 * we allow for large page heap.
160 size_t segkmem_kmemlp_max
;
161 static uint_t segkmem_kmemlp_pcnt
;
164 * Getting large pages for kernel heap could be problematic due to
165 * physical memory fragmentation. That's why we allow to preallocate
166 * "segkmem_kmemlp_min" bytes at boot time.
168 static size_t segkmem_kmemlp_min
;
171 * Throttling is used to avoid expensive tries to allocate large pages
172 * for kernel heap when a lot of succesive attempts to do so fail.
174 static ulong_t segkmem_lpthrottle_max
= 0x400000;
175 static ulong_t segkmem_lpthrottle_start
= 0x40;
176 static ulong_t segkmem_use_lpthrottle
= 1;
179 * Freed pages accumulate on a garbage list until segkmem is ready,
180 * at which point we call segkmem_gc() to free it all.
182 typedef struct segkmem_gc_list
{
183 struct segkmem_gc_list
*gc_next
;
188 static segkmem_gc_list_t
*segkmem_gc_list
;
191 * Allocations from the hat_memload arena add VM_MEMLOAD to their
192 * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
193 * to take steps to prevent infinite recursion. HAT allocations also
194 * must be non-relocatable to prevent recursive page faults.
197 hat_memload_alloc(vmem_t
*vmp
, size_t size
, int flags
)
199 flags
|= (VM_MEMLOAD
| VM_NORELOC
);
200 return (segkmem_alloc(vmp
, size
, flags
));
204 * Allocations from static_arena arena (or any other arena that uses
205 * segkmem_alloc_permanent()) require non-relocatable (permanently
206 * wired) memory pages, since these pages are referenced by physical
207 * as well as virtual address.
210 segkmem_alloc_permanent(vmem_t
*vmp
, size_t size
, int flags
)
212 return (segkmem_alloc(vmp
, size
, flags
| VM_NORELOC
));
216 * Initialize kernel heap boundaries.
229 vmem_t
*heaptext_parent
;
230 size_t heap_lp_size
= 0;
232 size_t kmem64_sz
= kmem64_aligned_end
- kmem64_base
;
235 kernelheap
= heap_start
;
236 ekernelheap
= heap_end
;
239 heap_lp_size
= (((uintptr_t)heap_end
- (uintptr_t)heap_start
) / 4);
241 * Bias heap_lp start address by kmem64_sz to reduce collisions
242 * in 4M kernel TSB between kmem64 area and heap_lp
244 kmem64_sz
= P2ROUNDUP(kmem64_sz
, MMU_PAGESIZE256M
);
245 if (kmem64_sz
<= heap_lp_size
/ 2)
246 heap_lp_size
-= kmem64_sz
;
247 heap_lp_base
= ekernelheap
- heap_lp_size
;
248 heap_lp_end
= heap_lp_base
+ heap_lp_size
;
252 * If this platform has a 'core' heap area, then the space for
253 * overflow module text should be carved out of the end of that
254 * heap. Otherwise, it gets carved out of the general purpose
257 core_size
= (uintptr_t)core_end
- (uintptr_t)core_start
;
259 ASSERT(core_size
>= HEAPTEXT_SIZE
);
260 textbase
= (uintptr_t)core_end
- HEAPTEXT_SIZE
;
261 core_size
-= HEAPTEXT_SIZE
;
265 ekernelheap
-= HEAPTEXT_SIZE
;
266 textbase
= (uintptr_t)ekernelheap
;
270 heap_size
= (uintptr_t)ekernelheap
- (uintptr_t)kernelheap
;
271 heap_arena
= vmem_init("heap", kernelheap
, heap_size
, PAGESIZE
,
272 segkmem_alloc
, segkmem_free
);
275 heap_core_arena
= vmem_create("heap_core", core_start
,
276 core_size
, PAGESIZE
, NULL
, NULL
, NULL
, 0, VM_SLEEP
);
277 heap_core_base
= core_start
;
279 heap_core_arena
= heap_arena
;
280 heap_core_base
= kernelheap
;
284 * reserve space for the large page heap. If large pages for kernel
285 * heap is enabled large page heap arean will be created later in the
286 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
287 * range will be returned back to the heap_arena.
290 (void) vmem_xalloc(heap_arena
, heap_lp_size
, PAGESIZE
, 0, 0,
291 heap_lp_base
, heap_lp_end
,
292 VM_NOSLEEP
| VM_BESTFIT
| VM_PANIC
);
296 * Remove the already-spoken-for memory range [kernelheap, first_avail).
298 (void) vmem_xalloc(heap_arena
, first_avail
- kernelheap
, PAGESIZE
,
299 0, 0, kernelheap
, first_avail
, VM_NOSLEEP
| VM_BESTFIT
| VM_PANIC
);
302 heap32_arena
= vmem_create("heap32", (void *)SYSBASE32
,
303 SYSLIMIT32
- SYSBASE32
- HEAPTEXT_SIZE
, PAGESIZE
, NULL
,
304 NULL
, NULL
, 0, VM_SLEEP
);
306 * Prom claims the physical and virtual resources used by panicbuf
307 * and inter_vec_table. So reserve space for panicbuf, intr_vec_table,
308 * reserved interrupt vector data structures from 32-bit heap.
310 (void) vmem_xalloc(heap32_arena
, PANICBUFSIZE
, PAGESIZE
, 0, 0,
311 panicbuf
, panicbuf
+ PANICBUFSIZE
,
312 VM_NOSLEEP
| VM_BESTFIT
| VM_PANIC
);
314 (void) vmem_xalloc(heap32_arena
, IVSIZE
, PAGESIZE
, 0, 0,
315 intr_vec_table
, (caddr_t
)intr_vec_table
+ IVSIZE
,
316 VM_NOSLEEP
| VM_BESTFIT
| VM_PANIC
);
318 textbase
= SYSLIMIT32
- HEAPTEXT_SIZE
;
319 heaptext_parent
= NULL
;
321 heap32_arena
= heap_core_arena
;
322 heaptext_parent
= heap_core_arena
;
325 heaptext_arena
= vmem_create("heaptext", (void *)textbase
,
326 HEAPTEXT_SIZE
, PAGESIZE
, NULL
, NULL
, heaptext_parent
, 0, VM_SLEEP
);
329 * Create a set of arenas for memory with static translations
330 * (e.g. VA -> PA translations cannot change). Since using
331 * kernel pages by physical address implies it isn't safe to
332 * walk across page boundaries, the static_arena quantum must
333 * be PAGESIZE. Any kmem caches that require static memory
334 * should source from static_arena, while direct allocations
335 * should only use static_alloc_arena.
337 static_arena
= vmem_create("static", NULL
, 0, PAGESIZE
,
338 segkmem_alloc_permanent
, segkmem_free
, heap_arena
, 0, VM_SLEEP
);
339 static_alloc_arena
= vmem_create("static_alloc", NULL
, 0,
340 sizeof (uint64_t), vmem_alloc
, vmem_free
, static_arena
,
344 * Create an arena for translation data (ptes, hmes, or hblks).
345 * We need an arena for this because hat_memload() is essential
346 * to vmem_populate() (see comments in kernel/os/vmem.c).
348 * Note: any kmem cache that allocates from hat_memload_arena
349 * must be created as a KMC_NOHASH cache (i.e. no external slab
350 * and bufctl structures to allocate) so that slab creation doesn't
351 * require anything more than a single vmem_alloc().
353 hat_memload_arena
= vmem_create("hat_memload", NULL
, 0, PAGESIZE
,
354 hat_memload_alloc
, segkmem_free
, heap_arena
, 0,
355 VM_SLEEP
| VMC_POPULATOR
| VMC_DUMPSAFE
);
359 boot_mapin(caddr_t addr
, size_t size
)
365 if (page_resv(btop(size
), KM_NOSLEEP
) == 0)
366 panic("boot_mapin: page_resv failed");
368 for (eaddr
= addr
+ size
; addr
< eaddr
; addr
+= PAGESIZE
) {
369 pfnum
= va_to_pfn(addr
);
370 if (pfnum
== PFN_INVALID
)
372 if ((pp
= page_numtopp_nolock(pfnum
)) == NULL
)
373 panic("boot_mapin(): No pp for pfnum = %lx", pfnum
);
376 * must break up any large pages that may have constituent
377 * pages being utilized for BOP_ALLOC()'s before calling
378 * page_numtopp().The locking code (ie. page_reclaim())
382 page_boot_demote(pp
);
384 pp
= page_numtopp(pfnum
, SE_EXCL
);
385 if (pp
== NULL
|| PP_ISFREE(pp
))
386 panic("boot_alloc: pp is NULL or free");
389 * If the cage is on but doesn't yet contain this page,
390 * mark it as non-relocatable.
392 if (kcage_on
&& !PP_ISNORELOC(pp
)) {
394 PLCNT_XFER_NORELOC(pp
);
397 (void) page_hashin(pp
, &kvp
.v_object
, (uoff_t
)(uintptr_t)addr
,
409 * Get pages from boot and hash them into the kernel's vp.
410 * Used after page structs have been allocated, but before segkmem is ready.
413 boot_alloc(void *inaddr
, size_t size
, uint_t align
)
415 caddr_t addr
= inaddr
;
418 prom_panic("boot_alloc: attempt to allocate memory after "
421 size
= ptob(btopr(size
));
423 if (bop_alloc_chunk(addr
, size
, align
) != (caddr_t
)addr
)
424 panic("boot_alloc: bop_alloc_chunk failed");
426 if (BOP_ALLOC(bootops
, addr
, size
, align
) != addr
)
427 panic("boot_alloc: BOP_ALLOC failed");
429 boot_mapin((caddr_t
)addr
, size
);
436 panic("segkmem_badop");
439 #define SEGKMEM_BADOP(t) (t(*)())segkmem_badop
443 segkmem_fault(struct hat
*hat
, struct seg
*seg
, caddr_t addr
, size_t size
,
444 enum fault_type type
, enum seg_rw rw
)
449 struct vnode
*vp
= seg
->s_data
;
451 ASSERT(RW_READ_HELD(&seg
->s_as
->a_lock
));
453 if (seg
->s_as
!= &kas
|| size
> seg
->s_size
||
454 addr
< seg
->s_base
|| addr
+ size
> seg
->s_base
+ seg
->s_size
)
455 panic("segkmem_fault: bad args");
458 * If it is one of segkp pages, call segkp_fault.
460 if (segkp_bitmap
&& seg
== &kvseg
&&
461 BT_TEST(segkp_bitmap
, btop((uintptr_t)(addr
- seg
->s_base
))))
462 return (segop_fault(hat
, segkp
, addr
, size
, type
, rw
));
464 if (rw
!= S_READ
&& rw
!= S_WRITE
&& rw
!= S_OTHER
)
465 return (FC_NOSUPPORT
);
467 npages
= btopr(size
);
470 case F_SOFTLOCK
: /* lock down already-loaded translations */
471 for (pg
= 0; pg
< npages
; pg
++) {
472 pp
= page_lookup(&vp
->v_object
, (uoff_t
)(uintptr_t)addr
,
476 * Hmm, no page. Does a kernel mapping
479 if (!hat_probe(kas
.a_hat
, addr
)) {
482 pp
= page_find(&vp
->v_object
,
483 (uoff_t
)(uintptr_t)addr
);
494 hat_reserve(seg
->s_as
, addr
, size
);
498 pp
= page_find(&vp
->v_object
, (uoff_t
)(uintptr_t)addr
);
505 return (FC_NOSUPPORT
);
511 segkmem_setprot(struct seg
*seg
, caddr_t addr
, size_t size
, uint_t prot
)
513 ASSERT(RW_LOCK_HELD(&seg
->s_as
->a_lock
));
515 if (seg
->s_as
!= &kas
|| size
> seg
->s_size
||
516 addr
< seg
->s_base
|| addr
+ size
> seg
->s_base
+ seg
->s_size
)
517 panic("segkmem_setprot: bad args");
520 * If it is one of segkp pages, call segkp.
522 if (segkp_bitmap
&& seg
== &kvseg
&&
523 BT_TEST(segkp_bitmap
, btop((uintptr_t)(addr
- seg
->s_base
))))
524 return (segop_setprot(segkp
, addr
, size
, prot
));
527 hat_unload(kas
.a_hat
, addr
, size
, HAT_UNLOAD
);
529 hat_chgprot(kas
.a_hat
, addr
, size
, prot
);
534 * This is a dummy segkmem function overloaded to call segkp
535 * when segkp is under the heap.
539 segkmem_checkprot(struct seg
*seg
, caddr_t addr
, size_t size
, uint_t prot
)
541 ASSERT(RW_LOCK_HELD(&seg
->s_as
->a_lock
));
543 if (seg
->s_as
!= &kas
)
547 * If it is one of segkp pages, call into segkp.
549 if (segkp_bitmap
&& seg
== &kvseg
&&
550 BT_TEST(segkp_bitmap
, btop((uintptr_t)(addr
- seg
->s_base
))))
551 return (segop_checkprot(segkp
, addr
, size
, prot
));
558 * This is a dummy segkmem function overloaded to call segkp
559 * when segkp is under the heap.
563 segkmem_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
)
565 ASSERT(RW_LOCK_HELD(&seg
->s_as
->a_lock
));
567 if (seg
->s_as
!= &kas
)
571 * If it is one of segkp pages, call into segkp.
573 if (segkp_bitmap
&& seg
== &kvseg
&&
574 BT_TEST(segkp_bitmap
, btop((uintptr_t)(addr
- seg
->s_base
))))
575 return (segop_kluster(segkp
, addr
, delta
));
582 segkmem_xdump_range(void *arg
, void *start
, size_t size
)
585 caddr_t addr
= start
;
586 caddr_t addr_end
= addr
+ size
;
588 while (addr
< addr_end
) {
589 pfn_t pfn
= hat_getpfnum(kas
.a_hat
, addr
);
590 if (pfn
!= PFN_INVALID
&& pfn
<= physmax
&& pf_is_memory(pfn
))
591 dump_addpage(as
, addr
, pfn
);
593 dump_timeleft
= dump_timeout
;
598 segkmem_dump_range(void *arg
, void *start
, size_t size
)
600 caddr_t addr
= start
;
601 caddr_t addr_end
= addr
+ size
;
604 * If we are about to start dumping the range of addresses we
605 * carved out of the kernel heap for the large page heap walk
606 * heap_lp_arena to find what segments are actually populated
608 if (SEGKMEM_USE_LARGEPAGES
&&
609 addr
== heap_lp_base
&& addr_end
== heap_lp_end
&&
610 vmem_size(heap_lp_arena
, VMEM_ALLOC
) < size
) {
611 vmem_walk(heap_lp_arena
, VMEM_ALLOC
| VMEM_REENTRANT
,
612 segkmem_xdump_range
, arg
);
614 segkmem_xdump_range(arg
, start
, size
);
619 segkmem_dump(struct seg
*seg
)
622 * The kernel's heap_arena (represented by kvseg) is a very large
623 * VA space, most of which is typically unused. To speed up dumping
624 * we use vmem_walk() to quickly find the pieces of heap_arena that
625 * are actually in use. We do the same for heap32_arena and
628 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage()
629 * may ultimately need to allocate memory. Reentrant walks are
630 * necessarily imperfect snapshots. The kernel heap continues
631 * to change during a live crash dump, for example. For a normal
632 * crash dump, however, we know that there won't be any other threads
633 * messing with the heap. Therefore, at worst, we may fail to dump
634 * the pages that get allocated by the act of dumping; but we will
635 * always dump every page that was allocated when the walk began.
637 * The other segkmem segments are dense (fully populated), so there's
638 * no need to use this technique when dumping them.
640 * Note: when adding special dump handling for any new sparsely-
641 * populated segments, be sure to add similar handling to the ::kgrep
645 vmem_walk(heap_arena
, VMEM_ALLOC
| VMEM_REENTRANT
,
646 segkmem_dump_range
, seg
->s_as
);
648 vmem_walk(heaptext_arena
, VMEM_ALLOC
| VMEM_REENTRANT
,
649 segkmem_dump_range
, seg
->s_as
);
651 } else if (seg
== &kvseg_core
) {
652 vmem_walk(heap_core_arena
, VMEM_ALLOC
| VMEM_REENTRANT
,
653 segkmem_dump_range
, seg
->s_as
);
654 } else if (seg
== &kvseg32
) {
655 vmem_walk(heap32_arena
, VMEM_ALLOC
| VMEM_REENTRANT
,
656 segkmem_dump_range
, seg
->s_as
);
657 vmem_walk(heaptext_arena
, VMEM_ALLOC
| VMEM_REENTRANT
,
658 segkmem_dump_range
, seg
->s_as
);
659 } else if (seg
== &kzioseg
) {
661 * We don't want to dump pages attached to kzioseg since they
662 * contain file data from ZFS. If this page's segment is
663 * kzioseg return instead of writing it to the dump device.
667 segkmem_dump_range(seg
->s_as
, seg
->s_base
, seg
->s_size
);
672 * lock/unlock kmem pages over a given range [addr, addr+len).
673 * Returns a shadow list of pages in ppp. If there are holes
674 * in the range (e.g. some of the kernel mappings do not have
675 * underlying page_ts) returns ENOTSUP so that as_pagelock()
676 * will handle the range via as_fault(F_SOFTLOCK).
680 segkmem_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
681 page_t
***ppp
, enum lock_type type
, enum seg_rw rw
)
683 page_t
**pplist
, *pp
;
687 struct vnode
*vp
= seg
->s_data
;
692 * If it is one of segkp pages, call into segkp.
694 if (segkp_bitmap
&& seg
== &kvseg
&&
695 BT_TEST(segkp_bitmap
, btop((uintptr_t)(addr
- seg
->s_base
))))
696 return (segop_pagelock(segkp
, addr
, len
, ppp
, type
, rw
));
699 nb
= sizeof (page_t
*) * npages
;
701 if (type
== L_PAGEUNLOCK
) {
703 ASSERT(pplist
!= NULL
);
705 for (pg
= 0; pg
< npages
; pg
++) {
709 kmem_free(pplist
, nb
);
713 ASSERT(type
== L_PAGELOCK
);
715 pplist
= kmem_alloc(nb
, KM_NOSLEEP
);
716 if (pplist
== NULL
) {
718 return (ENOTSUP
); /* take the slow path */
721 for (pg
= 0; pg
< npages
; pg
++) {
722 pp
= page_lookup(&vp
->v_object
, (uoff_t
)(uintptr_t)addr
,
726 page_unlock(pplist
[pg
]);
727 kmem_free(pplist
, nb
);
740 * This is a dummy segkmem function overloaded to call segkp
741 * when segkp is under the heap.
745 segkmem_getmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
)
747 ASSERT(RW_LOCK_HELD(&seg
->s_as
->a_lock
));
749 if (seg
->s_as
!= &kas
)
753 * If it is one of segkp pages, call into segkp.
755 if (segkp_bitmap
&& seg
== &kvseg
&&
756 BT_TEST(segkp_bitmap
, btop((uintptr_t)(addr
- seg
->s_base
))))
757 return (segop_getmemid(segkp
, addr
, memidp
));
765 segkmem_capable(struct seg
*seg
, segcapability_t capability
)
767 if (capability
== S_CAPABILITY_NOMINFLT
)
772 const struct seg_ops segkmem_ops
= {
773 .dup
= SEGKMEM_BADOP(int),
774 .unmap
= SEGKMEM_BADOP(int),
775 .free
= SEGKMEM_BADOP(void),
776 .fault
= segkmem_fault
,
777 .faulta
= SEGKMEM_BADOP(faultcode_t
),
778 .setprot
= segkmem_setprot
,
779 .checkprot
= segkmem_checkprot
,
780 .kluster
= segkmem_kluster
,
781 .sync
= SEGKMEM_BADOP(int),
782 .incore
= SEGKMEM_BADOP(size_t),
783 .lockop
= SEGKMEM_BADOP(int),
784 .getprot
= SEGKMEM_BADOP(int),
785 .getoffset
= SEGKMEM_BADOP(uoff_t
),
786 .gettype
= SEGKMEM_BADOP(int),
787 .getvp
= SEGKMEM_BADOP(int),
788 .advise
= SEGKMEM_BADOP(int),
789 .dump
= segkmem_dump
,
790 .pagelock
= segkmem_pagelock
,
791 .setpagesize
= SEGKMEM_BADOP(int),
792 .getmemid
= segkmem_getmemid
,
793 .capable
= segkmem_capable
,
797 segkmem_zio_create(struct seg
*seg
)
799 ASSERT(seg
->s_as
== &kas
&& RW_WRITE_HELD(&kas
.a_lock
));
800 seg
->s_ops
= &segkmem_ops
;
802 kas
.a_size
+= seg
->s_size
;
807 segkmem_create(struct seg
*seg
)
809 ASSERT(seg
->s_as
== &kas
&& RW_WRITE_HELD(&kas
.a_lock
));
810 seg
->s_ops
= &segkmem_ops
;
812 kas
.a_size
+= seg
->s_size
;
818 segkmem_page_create(void *addr
, size_t size
, int vmflag
, void *arg
)
822 struct vnode
*vp
= arg
;
830 if (segkmem_reloc
== 0 || (vmflag
& VM_NORELOC
))
831 pgflags
|= PG_NORELOC
;
832 if ((vmflag
& VM_NOSLEEP
) == 0)
834 if (vmflag
& VM_PANIC
)
836 if (vmflag
& VM_PUSHPAGE
)
837 pgflags
|= PG_PUSHPAGE
;
838 if (vmflag
& VM_NORMALPRI
) {
839 ASSERT(vmflag
& VM_NOSLEEP
);
840 pgflags
|= PG_NORMALPRI
;
843 return (page_create_va(&vp
->v_object
, (uoff_t
)(uintptr_t)addr
, size
,
844 pgflags
, &kseg
, addr
));
848 * Allocate pages to back the virtual address range [addr, addr + size).
849 * If addr is NULL, allocate the virtual address space as well.
852 segkmem_xalloc(vmem_t
*vmp
, void *inaddr
, size_t size
, int vmflag
, uint_t attr
,
853 page_t
*(*page_create_func
)(void *, size_t, int, void *), void *pcarg
)
856 caddr_t addr
= inaddr
;
857 pgcnt_t npages
= btopr(size
);
860 if (inaddr
== NULL
&& (addr
= vmem_alloc(vmp
, size
, vmflag
)) == NULL
)
863 ASSERT(((uintptr_t)addr
& PAGEOFFSET
) == 0);
865 if (page_resv(npages
, vmflag
& VM_KMFLAGS
) == 0) {
867 vmem_free(vmp
, addr
, size
);
871 ppl
= page_create_func(addr
, size
, vmflag
, pcarg
);
874 vmem_free(vmp
, addr
, size
);
880 * Under certain conditions, we need to let the HAT layer know
881 * that it cannot safely allocate memory. Allocations from
882 * the hat_memload vmem arena always need this, to prevent
883 * infinite recursion.
885 * In addition, the x86 hat cannot safely do memory
886 * allocations while in vmem_populate(), because there
887 * is no simple bound on its usage.
889 if (vmflag
& VM_MEMLOAD
)
890 allocflag
= HAT_NO_KALLOC
;
892 else if (vmem_is_populator())
893 allocflag
= HAT_NO_KALLOC
;
898 while (ppl
!= NULL
) {
901 ASSERT(page_iolock_assert(pp
));
902 ASSERT(PAGE_EXCL(pp
));
904 hat_memload(kas
.a_hat
, (caddr_t
)(uintptr_t)pp
->p_offset
, pp
,
905 (PROT_ALL
& ~PROT_USER
) | HAT_NOSYNC
| attr
,
906 HAT_LOAD_LOCK
| allocflag
);
911 if (vmflag
& SEGKMEM_SHARELOCKED
)
922 segkmem_alloc_vn(vmem_t
*vmp
, size_t size
, int vmflag
, struct vnode
*vp
)
925 segkmem_gc_list_t
*gcp
, **prev_gcpp
;
929 if (kvseg
.s_base
== NULL
) {
931 if (bootops
->bsys_alloc
== NULL
)
932 halt("Memory allocation between bop_alloc() and "
937 * There's not a lot of memory to go around during boot,
938 * so recycle it if we can.
940 for (prev_gcpp
= &segkmem_gc_list
; (gcp
= *prev_gcpp
) != NULL
;
941 prev_gcpp
= &gcp
->gc_next
) {
942 if (gcp
->gc_arena
== vmp
&& gcp
->gc_size
== size
) {
943 *prev_gcpp
= gcp
->gc_next
;
948 addr
= vmem_alloc(vmp
, size
, vmflag
| VM_PANIC
);
949 if (boot_alloc(addr
, size
, BO_NO_ALIGN
) != addr
)
950 panic("segkmem_alloc: boot_alloc failed");
953 return (segkmem_xalloc(vmp
, NULL
, size
, vmflag
, 0,
954 segkmem_page_create
, vp
));
958 segkmem_alloc(vmem_t
*vmp
, size_t size
, int vmflag
)
960 return (segkmem_alloc_vn(vmp
, size
, vmflag
, &kvp
));
964 segkmem_zio_alloc(vmem_t
*vmp
, size_t size
, int vmflag
)
966 return (segkmem_alloc_vn(vmp
, size
, vmflag
, &zvp
));
970 * Any changes to this routine must also be carried over to
971 * devmap_free_pages() in the seg_dev driver. This is because
972 * we currently don't have a special kernel segment for non-paged
973 * kernel memory that is exported by drivers to user space.
976 segkmem_free_vn(vmem_t
*vmp
, void *inaddr
, size_t size
, struct vnode
*vp
,
977 void (*func
)(page_t
*))
980 caddr_t addr
= inaddr
;
982 pgcnt_t npages
= btopr(size
);
984 ASSERT(((uintptr_t)addr
& PAGEOFFSET
) == 0);
987 if (kvseg
.s_base
== NULL
) {
988 segkmem_gc_list_t
*gc
= inaddr
;
991 gc
->gc_next
= segkmem_gc_list
;
992 segkmem_gc_list
= gc
;
996 hat_unload(kas
.a_hat
, addr
, size
, HAT_UNLOAD_UNLOCK
);
998 for (eaddr
= addr
+ size
; addr
< eaddr
; addr
+= PAGESIZE
) {
1000 pp
= page_find(&vp
->v_object
, (uoff_t
)(uintptr_t)addr
);
1002 panic("segkmem_free: page not found");
1003 if (!page_tryupgrade(pp
)) {
1005 * Some other thread has a sharelock. Wait for
1006 * it to drop the lock so we can free this page.
1009 pp
= page_lookup(&vp
->v_object
, (uoff_t
)(uintptr_t)addr
,
1013 pp
= page_lookup(&vp
->v_object
, (uoff_t
)(uintptr_t)addr
,
1017 panic("segkmem_free: page not found");
1018 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */
1023 page_destroy(pp
, 0);
1026 page_unresv(npages
);
1029 vmem_free(vmp
, inaddr
, size
);
1034 segkmem_xfree(vmem_t
*vmp
, void *inaddr
, size_t size
, void (*func
)(page_t
*))
1036 segkmem_free_vn(vmp
, inaddr
, size
, &kvp
, func
);
1040 segkmem_free(vmem_t
*vmp
, void *inaddr
, size_t size
)
1042 segkmem_free_vn(vmp
, inaddr
, size
, &kvp
, NULL
);
1046 segkmem_zio_free(vmem_t
*vmp
, void *inaddr
, size_t size
)
1048 segkmem_free_vn(vmp
, inaddr
, size
, &zvp
, NULL
);
1054 ASSERT(kvseg
.s_base
!= NULL
);
1055 while (segkmem_gc_list
!= NULL
) {
1056 segkmem_gc_list_t
*gc
= segkmem_gc_list
;
1057 segkmem_gc_list
= gc
->gc_next
;
1058 segkmem_free(gc
->gc_arena
, gc
, gc
->gc_size
);
1063 * Legacy entry points from here to end of file.
1066 segkmem_mapin(struct seg
*seg
, void *addr
, size_t size
, uint_t vprot
,
1067 pfn_t pfn
, uint_t flags
)
1069 hat_unload(seg
->s_as
->a_hat
, addr
, size
, HAT_UNLOAD_UNLOCK
);
1070 hat_devload(seg
->s_as
->a_hat
, addr
, size
, pfn
, vprot
,
1071 flags
| HAT_LOAD_LOCK
);
1075 segkmem_mapout(struct seg
*seg
, void *addr
, size_t size
)
1077 hat_unload(seg
->s_as
->a_hat
, addr
, size
, HAT_UNLOAD_UNLOCK
);
1081 kmem_getpages(pgcnt_t npages
, int kmflag
)
1083 return (kmem_alloc(ptob(npages
), kmflag
));
1087 kmem_freepages(void *addr
, pgcnt_t npages
)
1089 kmem_free(addr
, ptob(npages
));
1093 * segkmem_page_create_large() allocates a large page to be used for the kmem
1094 * caches. If kpr is enabled we ask for a relocatable page unless requested
1095 * otherwise. If kpr is disabled we have to ask for a non-reloc page
1098 segkmem_page_create_large(void *addr
, size_t size
, int vmflag
, void *arg
)
1104 if (segkmem_reloc
== 0 || (vmflag
& VM_NORELOC
))
1105 pgflags
|= PG_NORELOC
;
1106 if (!(vmflag
& VM_NOSLEEP
))
1108 if (vmflag
& VM_PUSHPAGE
)
1109 pgflags
|= PG_PUSHPAGE
;
1110 if (vmflag
& VM_NORMALPRI
)
1111 pgflags
|= PG_NORMALPRI
;
1113 return (page_create_va_large(&kvp
.v_object
, (uoff_t
)(uintptr_t)addr
,
1114 size
, pgflags
, &kvseg
, addr
, arg
));
1118 * Allocate a large page to back the virtual address range
1119 * [addr, addr + size). If addr is NULL, allocate the virtual address
1123 segkmem_xalloc_lp(vmem_t
*vmp
, void *inaddr
, size_t size
, int vmflag
,
1124 uint_t attr
, page_t
*(*page_create_func
)(void *, size_t, int, void *),
1127 caddr_t addr
= inaddr
, pa
;
1128 size_t lpsize
= segkmem_lpsize
;
1129 pgcnt_t npages
= btopr(size
);
1130 pgcnt_t nbpages
= btop(lpsize
);
1131 pgcnt_t nlpages
= size
>> segkmem_lpshift
;
1132 size_t ppasize
= nbpages
* sizeof (page_t
*);
1133 page_t
*pp
, *rootpp
, **ppa
, *pplist
= NULL
;
1136 vmflag
|= VM_NOSLEEP
;
1138 if (page_resv(npages
, vmflag
& VM_KMFLAGS
) == 0) {
1143 * allocate an array we need for hat_memload_array.
1144 * we use a separate arena to avoid recursion.
1145 * we will not need this array when hat_memload_array learns pp++
1147 if ((ppa
= vmem_alloc(segkmem_ppa_arena
, ppasize
, vmflag
)) == NULL
) {
1148 goto fail_array_alloc
;
1151 if (inaddr
== NULL
&& (addr
= vmem_alloc(vmp
, size
, vmflag
)) == NULL
)
1152 goto fail_vmem_alloc
;
1154 ASSERT(((uintptr_t)addr
& (lpsize
- 1)) == 0);
1156 /* create all the pages */
1157 for (pa
= addr
, i
= 0; i
< nlpages
; i
++, pa
+= lpsize
) {
1158 if ((pp
= page_create_func(pa
, lpsize
, vmflag
, pcarg
)) == NULL
)
1159 goto fail_page_create
;
1160 page_list_concat(&pplist
, &pp
);
1163 /* at this point we have all the resource to complete the request */
1164 while ((rootpp
= pplist
) != NULL
) {
1165 for (i
= 0; i
< nbpages
; i
++) {
1166 ASSERT(pplist
!= NULL
);
1168 page_sub(&pplist
, pp
);
1169 ASSERT(page_iolock_assert(pp
));
1174 * Load the locked entry. It's OK to preload the entry into the
1175 * TSB since we now support large mappings in the kernel TSB.
1177 hat_memload_array(kas
.a_hat
,
1178 (caddr_t
)(uintptr_t)rootpp
->p_offset
, lpsize
,
1179 ppa
, (PROT_ALL
& ~PROT_USER
) | HAT_NOSYNC
| attr
,
1182 for (--i
; i
>= 0; --i
) {
1183 ppa
[i
]->p_lckcnt
= 1;
1184 page_unlock(ppa
[i
]);
1188 vmem_free(segkmem_ppa_arena
, ppa
, ppasize
);
1192 while ((rootpp
= pplist
) != NULL
) {
1193 for (i
= 0, pp
= pplist
; i
< nbpages
; i
++, pp
= pplist
) {
1195 page_sub(&pplist
, pp
);
1196 ASSERT(page_iolock_assert(pp
));
1199 page_destroy_pages(rootpp
);
1203 vmem_free(vmp
, addr
, size
);
1206 vmem_free(segkmem_ppa_arena
, ppa
, ppasize
);
1209 page_unresv(npages
);
1215 segkmem_free_one_lp(caddr_t addr
, size_t size
)
1217 page_t
*pp
, *rootpp
= NULL
;
1218 pgcnt_t pgs_left
= btopr(size
);
1220 ASSERT(size
== segkmem_lpsize
);
1222 hat_unload(kas
.a_hat
, addr
, size
, HAT_UNLOAD_UNLOCK
);
1224 for (; pgs_left
> 0; addr
+= PAGESIZE
, pgs_left
--) {
1225 pp
= page_lookup(&kvp
.v_object
, (uoff_t
)(uintptr_t)addr
, SE_EXCL
);
1227 panic("segkmem_free_one_lp: page not found");
1228 ASSERT(PAGE_EXCL(pp
));
1233 ASSERT(rootpp
!= NULL
);
1234 page_destroy_pages(rootpp
);
1236 /* page_unresv() is done by the caller */
1240 * This function is called to import new spans into the vmem arenas like
1241 * kmem_default_arena and kmem_oversize_arena. It first tries to import
1242 * spans from large page arena - kmem_lp_arena. In order to do this it might
1243 * have to "upgrade the requested size" to kmem_lp_arena quantum. If
1244 * it was not able to satisfy the upgraded request it then calls regular
1245 * segkmem_alloc() that satisfies the request by importing from "*vmp" arena
1249 segkmem_alloc_lp(vmem_t
*vmp
, size_t *sizep
, size_t align
, int vmflag
)
1252 kthread_t
*t
= curthread
;
1253 segkmem_lpcb_t
*lpcb
= &segkmem_lpcb
;
1255 ASSERT(sizep
!= NULL
);
1259 if (lpcb
->lp_uselp
&& !(t
->t_flag
& T_PANIC
) &&
1260 !(vmflag
& SEGKMEM_SHARELOCKED
)) {
1262 size_t kmemlp_qnt
= segkmem_kmemlp_quantum
;
1263 size_t asize
= P2ROUNDUP(size
, kmemlp_qnt
);
1265 ulong_t
*lpthrtp
= &lpcb
->lp_throttle
;
1266 ulong_t lpthrt
= *lpthrtp
;
1270 ASSERT(kmem_lp_arena
!= NULL
);
1271 ASSERT(asize
>= size
);
1274 /* try to update the throttle value */
1275 lpthrt
= atomic_inc_ulong_nv(lpthrtp
);
1276 if (lpthrt
>= segkmem_lpthrottle_max
) {
1277 lpthrt
= atomic_cas_ulong(lpthrtp
, lpthrt
,
1278 segkmem_lpthrottle_max
/ 4);
1282 * when we get above throttle start do an exponential
1283 * backoff at trying large pages and reaping
1285 if (lpthrt
> segkmem_lpthrottle_start
&&
1287 lpcb
->allocs_throttled
++;
1291 return (segkmem_alloc(vmp
, size
, vmflag
));
1295 if (!(vmflag
& VM_NOSLEEP
) &&
1296 segkmem_heaplp_quantum
>= (8 * kmemlp_qnt
) &&
1297 vmem_size(kmem_lp_arena
, VMEM_FREE
) <= kmemlp_qnt
&&
1298 asize
< (segkmem_heaplp_quantum
- kmemlp_qnt
)) {
1301 * we are low on free memory in kmem_lp_arena
1302 * we let only one guy to allocate heap_lp
1303 * quantum size chunk that everybody is going to
1306 mutex_enter(&lpcb
->lp_lock
);
1308 if (lpcb
->lp_wait
) {
1310 /* we are not the first one - wait */
1311 cv_wait(&lpcb
->lp_cv
, &lpcb
->lp_lock
);
1312 if (vmem_size(kmem_lp_arena
, VMEM_FREE
) <
1316 } else if (vmem_size(kmem_lp_arena
, VMEM_FREE
) <=
1320 * we are the first one, make sure we import
1323 if (asize
== kmemlp_qnt
)
1324 asize
+= kmemlp_qnt
;
1329 mutex_exit(&lpcb
->lp_lock
);
1333 * VM_ABORT flag prevents sleeps in vmem_xalloc when
1334 * large pages are not available. In that case this allocation
1335 * attempt will fail and we will retry allocation with small
1336 * pages. We also do not want to panic if this allocation fails
1337 * because we are going to retry.
1340 addr
= vmem_alloc(kmem_lp_arena
, asize
,
1341 (vmflag
| VM_ABORT
) & ~VM_PANIC
);
1344 mutex_enter(&lpcb
->lp_lock
);
1345 ASSERT(lpcb
->lp_wait
!= 0);
1347 cv_broadcast(&lpcb
->lp_cv
);
1348 mutex_exit(&lpcb
->lp_lock
);
1358 if (vmflag
& VM_NOSLEEP
)
1359 lpcb
->nosleep_allocs_failed
++;
1361 lpcb
->sleep_allocs_failed
++;
1362 lpcb
->alloc_bytes_failed
+= size
;
1364 /* if large page throttling is not started yet do it */
1365 if (segkmem_use_lpthrottle
&& lpthrt
== 0) {
1366 lpthrt
= atomic_cas_ulong(lpthrtp
, lpthrt
, 1);
1369 return (segkmem_alloc(vmp
, size
, vmflag
));
1373 segkmem_free_lp(vmem_t
*vmp
, void *inaddr
, size_t size
)
1375 if (kmem_lp_arena
== NULL
|| !IS_KMEM_VA_LARGEPAGE((caddr_t
)inaddr
)) {
1376 segkmem_free(vmp
, inaddr
, size
);
1378 vmem_free(kmem_lp_arena
, inaddr
, size
);
1383 * segkmem_alloc_lpi() imports virtual memory from large page heap arena
1384 * into kmem_lp arena. In the process it maps the imported segment with
1388 segkmem_alloc_lpi(vmem_t
*vmp
, size_t size
, int vmflag
)
1390 segkmem_lpcb_t
*lpcb
= &segkmem_lpcb
;
1394 ASSERT(vmp
== heap_lp_arena
);
1396 /* do not allow large page heap grow beyound limits */
1397 if (vmem_size(vmp
, VMEM_ALLOC
) >= segkmem_kmemlp_max
) {
1398 lpcb
->allocs_limited
++;
1402 addr
= segkmem_xalloc_lp(vmp
, NULL
, size
, vmflag
, 0,
1403 segkmem_page_create_large
, NULL
);
1408 * segkmem_free_lpi() returns virtual memory back into large page heap arena
1409 * from kmem_lp arena. Beore doing this it unmaps the segment and frees
1410 * large pages used to map it.
1413 segkmem_free_lpi(vmem_t
*vmp
, void *inaddr
, size_t size
)
1415 pgcnt_t nlpages
= size
>> segkmem_lpshift
;
1416 size_t lpsize
= segkmem_lpsize
;
1417 caddr_t addr
= inaddr
;
1418 pgcnt_t npages
= btopr(size
);
1421 ASSERT(vmp
== heap_lp_arena
);
1422 ASSERT(IS_KMEM_VA_LARGEPAGE(addr
));
1423 ASSERT(((uintptr_t)inaddr
& (lpsize
- 1)) == 0);
1425 for (i
= 0; i
< nlpages
; i
++) {
1426 segkmem_free_one_lp(addr
, lpsize
);
1430 page_unresv(npages
);
1432 vmem_free(vmp
, inaddr
, size
);
1436 * This function is called at system boot time by kmem_init right after
1437 * /etc/system file has been read. It checks based on hardware configuration
1438 * and /etc/system settings if system is going to use large pages. The
1439 * initialiazation necessary to actually start using large pages
1440 * happens later in the process after segkmem_heap_lp_init() is called.
1445 int use_large_pages
= 0;
1449 size_t memtotal
= physmem
* PAGESIZE
;
1451 if (heap_lp_base
== NULL
) {
1452 segkmem_lpsize
= PAGESIZE
;
1456 /* get a platform dependent value of large page size for kernel heap */
1457 segkmem_lpsize
= get_segkmem_lpsize(segkmem_lpsize
);
1459 if (segkmem_lpsize
<= PAGESIZE
) {
1461 * put virtual space reserved for the large page kernel
1462 * back to the regular heap
1464 vmem_xfree(heap_arena
, heap_lp_base
,
1465 heap_lp_end
- heap_lp_base
);
1466 heap_lp_base
= NULL
;
1468 segkmem_lpsize
= PAGESIZE
;
1472 /* set heap_lp quantum if necessary */
1473 if (segkmem_heaplp_quantum
== 0 || !ISP2(segkmem_heaplp_quantum
) ||
1474 P2PHASE(segkmem_heaplp_quantum
, segkmem_lpsize
)) {
1475 segkmem_heaplp_quantum
= segkmem_lpsize
;
1478 /* set kmem_lp quantum if necessary */
1479 if (segkmem_kmemlp_quantum
== 0 || !ISP2(segkmem_kmemlp_quantum
) ||
1480 segkmem_kmemlp_quantum
> segkmem_heaplp_quantum
) {
1481 segkmem_kmemlp_quantum
= segkmem_heaplp_quantum
;
1484 /* set total amount of memory allowed for large page kernel heap */
1485 if (segkmem_kmemlp_max
== 0) {
1486 if (segkmem_kmemlp_pcnt
== 0 || segkmem_kmemlp_pcnt
> 100)
1487 segkmem_kmemlp_pcnt
= 12;
1488 segkmem_kmemlp_max
= (memtotal
* segkmem_kmemlp_pcnt
) / 100;
1490 segkmem_kmemlp_max
= P2ROUNDUP(segkmem_kmemlp_max
,
1491 segkmem_heaplp_quantum
);
1493 /* fix lp kmem preallocation request if necesssary */
1494 if (segkmem_kmemlp_min
) {
1495 segkmem_kmemlp_min
= P2ROUNDUP(segkmem_kmemlp_min
,
1496 segkmem_heaplp_quantum
);
1497 if (segkmem_kmemlp_min
> segkmem_kmemlp_max
)
1498 segkmem_kmemlp_min
= segkmem_kmemlp_max
;
1501 use_large_pages
= 1;
1502 segkmem_lpszc
= page_szc(segkmem_lpsize
);
1503 segkmem_lpshift
= page_get_shift(segkmem_lpszc
);
1506 return (use_large_pages
);
1510 segkmem_zio_init(void *zio_mem_base
, size_t zio_mem_size
)
1512 ASSERT(zio_mem_base
!= NULL
);
1513 ASSERT(zio_mem_size
!= 0);
1516 * To reduce VA space fragmentation, we set up quantum caches for the
1517 * smaller sizes; we chose 32k because that translates to 128k VA
1518 * slabs, which matches nicely with the common 128k zio_data bufs.
1520 zio_arena
= vmem_create("zfs_file_data", zio_mem_base
, zio_mem_size
,
1521 PAGESIZE
, NULL
, NULL
, NULL
, 32 * 1024, VM_SLEEP
);
1523 zio_alloc_arena
= vmem_create("zfs_file_data_buf", NULL
, 0, PAGESIZE
,
1524 segkmem_zio_alloc
, segkmem_zio_free
, zio_arena
, 0, VM_SLEEP
);
1526 ASSERT(zio_arena
!= NULL
);
1527 ASSERT(zio_alloc_arena
!= NULL
);
1534 segkmem_alloc_ppa(vmem_t
*vmp
, size_t size
, int vmflag
)
1536 size_t ppaquantum
= btopr(segkmem_lpsize
) * sizeof (page_t
*);
1539 if (ppaquantum
<= PAGESIZE
)
1540 return (segkmem_alloc(vmp
, size
, vmflag
));
1542 ASSERT((size
& (ppaquantum
- 1)) == 0);
1544 addr
= vmem_xalloc(vmp
, size
, ppaquantum
, 0, 0, NULL
, NULL
, vmflag
);
1545 if (addr
!= NULL
&& segkmem_xalloc(vmp
, addr
, size
, vmflag
, 0,
1546 segkmem_page_create
, NULL
) == NULL
) {
1547 vmem_xfree(vmp
, addr
, size
);
1555 segkmem_free_ppa(vmem_t
*vmp
, void *addr
, size_t size
)
1557 size_t ppaquantum
= btopr(segkmem_lpsize
) * sizeof (page_t
*);
1559 ASSERT(addr
!= NULL
);
1561 if (ppaquantum
<= PAGESIZE
) {
1562 segkmem_free(vmp
, addr
, size
);
1564 segkmem_free(NULL
, addr
, size
);
1565 vmem_xfree(vmp
, addr
, size
);
1570 segkmem_heap_lp_init()
1572 segkmem_lpcb_t
*lpcb
= &segkmem_lpcb
;
1573 size_t heap_lp_size
= heap_lp_end
- heap_lp_base
;
1574 size_t lpsize
= segkmem_lpsize
;
1578 if (segkmem_lpsize
<= PAGESIZE
) {
1579 ASSERT(heap_lp_base
== NULL
);
1580 ASSERT(heap_lp_end
== NULL
);
1584 ASSERT(segkmem_heaplp_quantum
>= lpsize
);
1585 ASSERT((segkmem_heaplp_quantum
& (lpsize
- 1)) == 0);
1586 ASSERT(lpcb
->lp_uselp
== 0);
1587 ASSERT(heap_lp_base
!= NULL
);
1588 ASSERT(heap_lp_end
!= NULL
);
1589 ASSERT(heap_lp_base
< heap_lp_end
);
1590 ASSERT(heap_lp_arena
== NULL
);
1591 ASSERT(((uintptr_t)heap_lp_base
& (lpsize
- 1)) == 0);
1592 ASSERT(((uintptr_t)heap_lp_end
& (lpsize
- 1)) == 0);
1594 /* create large page heap arena */
1595 heap_lp_arena
= vmem_create("heap_lp", heap_lp_base
, heap_lp_size
,
1596 segkmem_heaplp_quantum
, NULL
, NULL
, NULL
, 0, VM_SLEEP
);
1598 ASSERT(heap_lp_arena
!= NULL
);
1600 /* This arena caches memory already mapped by large pages */
1601 kmem_lp_arena
= vmem_create("kmem_lp", NULL
, 0, segkmem_kmemlp_quantum
,
1602 segkmem_alloc_lpi
, segkmem_free_lpi
, heap_lp_arena
, 0, VM_SLEEP
);
1604 ASSERT(kmem_lp_arena
!= NULL
);
1606 mutex_init(&lpcb
->lp_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1607 cv_init(&lpcb
->lp_cv
, NULL
, CV_DEFAULT
, NULL
);
1610 * this arena is used for the array of page_t pointers necessary
1611 * to call hat_mem_load_array
1613 ppaquantum
= btopr(lpsize
) * sizeof (page_t
*);
1614 segkmem_ppa_arena
= vmem_create("segkmem_ppa", NULL
, 0, ppaquantum
,
1615 segkmem_alloc_ppa
, segkmem_free_ppa
, heap_arena
, ppaquantum
,
1618 ASSERT(segkmem_ppa_arena
!= NULL
);
1620 /* prealloacate some memory for the lp kernel heap */
1621 if (segkmem_kmemlp_min
) {
1623 ASSERT(P2PHASE(segkmem_kmemlp_min
,
1624 segkmem_heaplp_quantum
) == 0);
1626 if ((addr
= segkmem_alloc_lpi(heap_lp_arena
,
1627 segkmem_kmemlp_min
, VM_SLEEP
)) != NULL
) {
1629 addr
= vmem_add(kmem_lp_arena
, addr
,
1630 segkmem_kmemlp_min
, VM_SLEEP
);
1631 ASSERT(addr
!= NULL
);