4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 /* All Rights Reserved */
29 * Portions of this source code were derived from Berkeley 4.3 BSD
30 * under license from the Regents of the University of California.
34 * segkp is a segment driver that administers the allocation and deallocation
35 * of pageable variable size chunks of kernel virtual address space. Each
36 * allocated resource is page-aligned.
38 * The user may specify whether the resource should be initialized to 0,
39 * include a redzone, or locked in memory.
42 #include <sys/types.h>
43 #include <sys/t_lock.h>
44 #include <sys/thread.h>
45 #include <sys/param.h>
46 #include <sys/errno.h>
47 #include <sys/sysmacros.h>
48 #include <sys/systm.h>
51 #include <sys/vnode.h>
52 #include <sys/cmn_err.h>
54 #include <sys/tuneable.h>
58 #include <sys/dumphdr.h>
59 #include <sys/debug.h>
60 #include <sys/vtrace.h>
61 #include <sys/stack.h>
62 #include <sys/atomic.h>
63 #include <sys/archsystm.h>
68 #include <vm/seg_kp.h>
69 #include <vm/seg_kmem.h>
73 #include <sys/bitmap.h>
76 * Private seg op routines
78 static void segkp_badop(void);
79 static void segkp_dump(struct seg
*seg
);
80 static int segkp_checkprot(struct seg
*seg
, caddr_t addr
, size_t len
,
82 static int segkp_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
);
83 static int segkp_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
84 struct page
***page
, enum lock_type type
,
86 static void segkp_insert(struct seg
*seg
, struct segkp_data
*kpd
);
87 static void segkp_delete(struct seg
*seg
, struct segkp_data
*kpd
);
88 static caddr_t
segkp_get_internal(struct seg
*seg
, size_t len
, uint_t flags
,
89 struct segkp_data
**tkpd
, struct anon_map
*amp
);
90 static void segkp_release_internal(struct seg
*seg
,
91 struct segkp_data
*kpd
, size_t len
);
92 static int segkp_unlock(struct hat
*hat
, struct seg
*seg
, caddr_t vaddr
,
93 size_t len
, struct segkp_data
*kpd
, uint_t flags
);
94 static int segkp_load(struct hat
*hat
, struct seg
*seg
, caddr_t vaddr
,
95 size_t len
, struct segkp_data
*kpd
, uint_t flags
);
96 static struct segkp_data
*segkp_find(struct seg
*seg
, caddr_t vaddr
);
97 static int segkp_getmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
);
98 static lgrp_mem_policy_info_t
*segkp_getpolicy(struct seg
*seg
,
100 static int segkp_capable(struct seg
*seg
, segcapability_t capability
);
103 * Lock used to protect the hash table(s) and caches.
105 static kmutex_t segkp_lock
;
110 static struct segkp_cache segkp_cache
[SEGKP_MAX_CACHE
];
112 #define SEGKP_BADOP(t) (t(*)())segkp_badop
115 * When there are fewer than red_minavail bytes left on the stack,
116 * segkp_map_red() will map in the redzone (if called). 5000 seems
117 * to work reasonably well...
119 long red_minavail
= 5000;
122 * will be set to 1 for 32 bit x86 systems only, in startup.c
124 int segkp_fromheap
= 0;
125 ulong_t
*segkp_bitmap
;
128 * If segkp_map_red() is called with the redzone already mapped and
129 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
130 * then the stack situation has become quite serious; if much more stack
131 * is consumed, we have the potential of scrogging the next thread/LWP
132 * structure. To help debug the "can't happen" panics which may
133 * result from this condition, we record hrestime and the calling thread
134 * in red_deep_hires and red_deep_thread respectively.
136 #define RED_DEEP_THRESHOLD 2000
138 hrtime_t red_deep_hires
;
139 kthread_t
*red_deep_thread
;
141 uint32_t red_nmapped
;
142 uint32_t red_closest
= UINT_MAX
;
143 uint32_t red_ndoubles
;
145 pgcnt_t anon_segkp_pages_locked
; /* See vm/anon.h */
146 pgcnt_t anon_segkp_pages_resv
; /* anon reserved by seg_kp */
148 static struct seg_ops segkp_ops
= {
149 SEGKP_BADOP(int), /* dup */
150 SEGKP_BADOP(int), /* unmap */
151 SEGKP_BADOP(void), /* free */
153 SEGKP_BADOP(faultcode_t
), /* faulta */
154 SEGKP_BADOP(int), /* setprot */
157 SEGKP_BADOP(size_t), /* swapout */
158 SEGKP_BADOP(int), /* sync */
159 SEGKP_BADOP(size_t), /* incore */
160 SEGKP_BADOP(int), /* lockop */
161 SEGKP_BADOP(int), /* getprot */
162 SEGKP_BADOP(u_offset_t
), /* getoffset */
163 SEGKP_BADOP(int), /* gettype */
164 SEGKP_BADOP(int), /* getvp */
165 SEGKP_BADOP(int), /* advise */
166 segkp_dump
, /* dump */
167 segkp_pagelock
, /* pagelock */
168 SEGKP_BADOP(int), /* setpgsz */
169 segkp_getmemid
, /* getmemid */
170 segkp_getpolicy
, /* getpolicy */
171 segkp_capable
, /* capable */
172 seg_inherit_notsup
/* inherit */
179 panic("segkp_badop");
183 static void segkpinit_mem_config(struct seg
*);
185 static uint32_t segkp_indel
;
188 * Allocate the segment specific private data struct and fill it in
189 * with the per kp segment mutex, anon ptr. array and hash table.
192 segkp_create(struct seg
*seg
)
194 struct segkp_segdata
*kpsd
;
197 ASSERT(seg
!= NULL
&& seg
->s_as
== &kas
);
198 ASSERT(RW_WRITE_HELD(&seg
->s_as
->a_lock
));
200 if (seg
->s_size
& PAGEOFFSET
) {
201 panic("Bad segkp size");
205 kpsd
= kmem_zalloc(sizeof (struct segkp_segdata
), KM_SLEEP
);
208 * Allocate the virtual memory for segkp and initialize it
210 if (segkp_fromheap
) {
211 np
= btop(kvseg
.s_size
);
212 segkp_bitmap
= kmem_zalloc(BT_SIZEOFMAP(np
), KM_SLEEP
);
213 kpsd
->kpsd_arena
= vmem_create("segkp", NULL
, 0, PAGESIZE
,
214 vmem_alloc
, vmem_free
, heap_arena
, 5 * PAGESIZE
, VM_SLEEP
);
217 np
= btop(seg
->s_size
);
218 kpsd
->kpsd_arena
= vmem_create("segkp", seg
->s_base
,
219 seg
->s_size
, PAGESIZE
, NULL
, NULL
, NULL
, 5 * PAGESIZE
,
223 kpsd
->kpsd_anon
= anon_create(np
, ANON_SLEEP
| ANON_ALLOC_FORCE
);
225 kpsd
->kpsd_hash
= kmem_zalloc(SEGKP_HASHSZ
* sizeof (struct segkp
*),
227 seg
->s_data
= (void *)kpsd
;
228 seg
->s_ops
= &segkp_ops
;
229 segkpinit_mem_config(seg
);
235 * Find a free 'freelist' and initialize it with the appropriate attributes
238 segkp_cache_init(struct seg
*seg
, int maxsize
, size_t len
, uint_t flags
)
242 if ((flags
& KPD_NO_ANON
) && !(flags
& KPD_LOCKED
))
245 mutex_enter(&segkp_lock
);
246 for (i
= 0; i
< SEGKP_MAX_CACHE
; i
++) {
247 if (segkp_cache
[i
].kpf_inuse
)
249 segkp_cache
[i
].kpf_inuse
= 1;
250 segkp_cache
[i
].kpf_max
= maxsize
;
251 segkp_cache
[i
].kpf_flags
= flags
;
252 segkp_cache
[i
].kpf_seg
= seg
;
253 segkp_cache
[i
].kpf_len
= len
;
254 mutex_exit(&segkp_lock
);
255 return ((void *)(uintptr_t)i
);
257 mutex_exit(&segkp_lock
);
262 * Free all the cache resources.
265 segkp_cache_free(void)
267 struct segkp_data
*kpd
;
271 mutex_enter(&segkp_lock
);
272 for (i
= 0; i
< SEGKP_MAX_CACHE
; i
++) {
273 if (!segkp_cache
[i
].kpf_inuse
)
276 * Disconnect the freelist and process each element
278 kpd
= segkp_cache
[i
].kpf_list
;
279 seg
= segkp_cache
[i
].kpf_seg
;
280 segkp_cache
[i
].kpf_list
= NULL
;
281 segkp_cache
[i
].kpf_count
= 0;
282 mutex_exit(&segkp_lock
);
284 while (kpd
!= NULL
) {
285 struct segkp_data
*next
;
288 segkp_release_internal(seg
, kpd
, kpd
->kp_len
);
291 mutex_enter(&segkp_lock
);
293 mutex_exit(&segkp_lock
);
297 * There are 2 entries into segkp_get_internal. The first includes a cookie
298 * used to access a pool of cached segkp resources. The second does not
302 segkp_get(struct seg
*seg
, size_t len
, uint_t flags
)
304 struct segkp_data
*kpd
= NULL
;
306 if (segkp_get_internal(seg
, len
, flags
, &kpd
, NULL
) != NULL
) {
308 return (stom(kpd
->kp_base
, flags
));
314 * Return a 'cached' segkp address
317 segkp_cache_get(void *cookie
)
319 struct segkp_cache
*freelist
= NULL
;
320 struct segkp_data
*kpd
= NULL
;
321 int index
= (int)(uintptr_t)cookie
;
326 if (index
< 0 || index
>= SEGKP_MAX_CACHE
)
328 freelist
= &segkp_cache
[index
];
330 mutex_enter(&segkp_lock
);
331 seg
= freelist
->kpf_seg
;
332 flags
= freelist
->kpf_flags
;
333 if (freelist
->kpf_list
!= NULL
) {
334 kpd
= freelist
->kpf_list
;
335 freelist
->kpf_list
= kpd
->kp_next
;
336 freelist
->kpf_count
--;
337 mutex_exit(&segkp_lock
);
339 segkp_insert(seg
, kpd
);
340 return (stom(kpd
->kp_base
, flags
));
342 len
= freelist
->kpf_len
;
343 mutex_exit(&segkp_lock
);
344 if (segkp_get_internal(seg
, len
, flags
, &kpd
, NULL
) != NULL
) {
345 kpd
->kp_cookie
= index
;
346 return (stom(kpd
->kp_base
, flags
));
352 segkp_get_withanonmap(
356 struct anon_map
*amp
)
358 struct segkp_data
*kpd
= NULL
;
362 if (segkp_get_internal(seg
, len
, flags
, &kpd
, amp
) != NULL
) {
364 return (stom(kpd
->kp_base
, flags
));
370 * This does the real work of segkp allocation.
371 * Return to client base addr. len must be page-aligned. A null value is
372 * returned if there are no more vm resources (e.g. pages, swap). The len
373 * and base recorded in the private data structure include the redzone
374 * and the redzone length (if applicable). If the user requests a redzone
375 * either the first or last page is left unmapped depending whether stacks
376 * grow to low or high memory.
378 * The client may also specify a no-wait flag. If that is set then the
379 * request will choose a non-blocking path when requesting resources.
380 * The default is make the client wait.
387 struct segkp_data
**tkpd
,
388 struct anon_map
*amp
)
390 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
391 struct segkp_data
*kpd
;
392 caddr_t vbase
= NULL
; /* always first virtual, may not be mapped */
393 pgcnt_t np
= 0; /* number of pages in the resource */
398 ulong_t anon_idx
= 0;
399 int kmflag
= (flags
& KPD_NOWAIT
) ? KM_NOSLEEP
: KM_SLEEP
;
400 caddr_t s_base
= (segkp_fromheap
) ? kvseg
.s_base
: seg
->s_base
;
402 if (len
& PAGEOFFSET
) {
403 panic("segkp_get: len is not page-aligned");
407 ASSERT(((flags
& KPD_HASAMP
) == 0) == (amp
== NULL
));
409 /* Only allow KPD_NO_ANON if we are going to lock it down */
410 if ((flags
& (KPD_LOCKED
|KPD_NO_ANON
)) == KPD_NO_ANON
)
413 if ((kpd
= kmem_zalloc(sizeof (struct segkp_data
), kmflag
)) == NULL
)
416 * Fix up the len to reflect the REDZONE if applicable
418 if (flags
& KPD_HASREDZONE
)
422 vbase
= vmem_alloc(SEGKP_VMEM(seg
), len
, kmflag
| VM_BESTFIT
);
424 kmem_free(kpd
, sizeof (struct segkp_data
));
428 /* If locking, reserve physical memory */
429 if (flags
& KPD_LOCKED
) {
430 pages
= btop(SEGKP_MAPLEN(len
, flags
));
431 if (page_resv(pages
, kmflag
) == 0) {
432 vmem_free(SEGKP_VMEM(seg
), vbase
, len
);
433 kmem_free(kpd
, sizeof (struct segkp_data
));
436 if ((flags
& KPD_NO_ANON
) == 0)
437 atomic_add_long(&anon_segkp_pages_locked
, pages
);
441 * Reserve sufficient swap space for this vm resource. We'll
442 * actually allocate it in the loop below, but reserving it
443 * here allows us to back out more gracefully than if we
444 * had an allocation failure in the body of the loop.
446 * Note that we don't need swap space for the red zone page.
450 * The swap reservation has been done, if required, and the
451 * anon_hdr is separate.
454 kpd
->kp_anon_idx
= anon_idx
;
455 kpd
->kp_anon
= amp
->ahp
;
457 TRACE_5(TR_FAC_VM
, TR_ANON_SEGKP
, "anon segkp:%p %p %lu %u %u",
458 kpd
, vbase
, len
, flags
, 1);
460 } else if ((flags
& KPD_NO_ANON
) == 0) {
461 if (anon_resv_zone(SEGKP_MAPLEN(len
, flags
), NULL
) == 0) {
462 if (flags
& KPD_LOCKED
) {
463 atomic_add_long(&anon_segkp_pages_locked
,
467 vmem_free(SEGKP_VMEM(seg
), vbase
, len
);
468 kmem_free(kpd
, sizeof (struct segkp_data
));
471 atomic_add_long(&anon_segkp_pages_resv
,
472 btop(SEGKP_MAPLEN(len
, flags
)));
473 anon_idx
= ((uintptr_t)(vbase
- s_base
)) >> PAGESHIFT
;
474 kpd
->kp_anon_idx
= anon_idx
;
475 kpd
->kp_anon
= kpsd
->kpsd_anon
;
477 TRACE_5(TR_FAC_VM
, TR_ANON_SEGKP
, "anon segkp:%p %p %lu %u %u",
478 kpd
, vbase
, len
, flags
, 1);
481 kpd
->kp_anon_idx
= 0;
485 * Allocate page and anon resources for the virtual address range
489 segkpindex
= btop((uintptr_t)(vbase
- kvseg
.s_base
));
490 for (i
= 0, va
= vbase
; i
< np
; i
++, va
+= PAGESIZE
) {
498 * Mark this page to be a segkp page in the bitmap.
500 if (segkp_fromheap
) {
501 BT_ATOMIC_SET(segkp_bitmap
, segkpindex
);
506 * If this page is the red zone page, we don't need swap
507 * space for it. Note that we skip over the code that
508 * establishes MMU mappings, so that the page remains
511 if ((flags
& KPD_HASREDZONE
) && KPD_REDZONE(kpd
) == i
)
514 if (kpd
->kp_anon
!= NULL
) {
517 ASSERT(anon_get_ptr(kpd
->kp_anon
, anon_idx
+ i
)
520 * Determine the "vp" and "off" of the anon slot.
522 ap
= anon_alloc(NULL
, 0);
524 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
525 (void) anon_set_ptr(kpd
->kp_anon
, anon_idx
+ i
,
528 ANON_LOCK_EXIT(&
->a_rwlock
);
529 swap_xlate(ap
, &vp
, &off
);
532 * Create a page with the specified identity. The
533 * page is returned with the "shared" lock held.
535 err
= VOP_GETPAGE(vp
, (offset_t
)off
, PAGESIZE
,
536 NULL
, pl
, PAGESIZE
, seg
, va
, S_CREATE
,
540 * XXX - This should not fail.
542 panic("segkp_get: no pages");
547 ASSERT(page_exists(&kvp
,
548 (u_offset_t
)(uintptr_t)va
) == NULL
);
550 if ((pp
= page_create_va(&kvp
,
551 (u_offset_t
)(uintptr_t)va
, PAGESIZE
,
552 (flags
& KPD_NOWAIT
? 0 : PG_WAIT
) | PG_EXCL
|
553 PG_NORELOC
, seg
, va
)) == NULL
) {
555 * Legitimize resource; then destroy it.
556 * Easier than trying to unwind here.
558 kpd
->kp_flags
= flags
;
559 kpd
->kp_base
= vbase
;
561 segkp_release_internal(seg
, kpd
, va
- vbase
);
567 if (flags
& KPD_ZERO
)
568 pagezero(pp
, 0, PAGESIZE
);
571 * Load and lock an MMU translation for the page.
573 hat_memload(seg
->s_as
->a_hat
, va
, pp
, (PROT_READ
|PROT_WRITE
),
574 ((flags
& KPD_LOCKED
) ? HAT_LOAD_LOCK
: HAT_LOAD
));
577 * Now, release lock on the page.
579 if (flags
& KPD_LOCKED
) {
581 * Indicate to page_retire framework that this
582 * page can only be retired when it is freed.
590 kpd
->kp_flags
= flags
;
591 kpd
->kp_base
= vbase
;
593 segkp_insert(seg
, kpd
);
595 return (stom(kpd
->kp_base
, flags
));
599 * Release the resource to cache if the pool(designate by the cookie)
600 * has less than the maximum allowable. If inserted in cache,
601 * segkp_delete insures element is taken off of active list.
604 segkp_release(struct seg
*seg
, caddr_t vaddr
)
606 struct segkp_cache
*freelist
;
607 struct segkp_data
*kpd
= NULL
;
609 if ((kpd
= segkp_find(seg
, vaddr
)) == NULL
) {
610 panic("segkp_release: null kpd");
614 if (kpd
->kp_cookie
!= -1) {
615 freelist
= &segkp_cache
[kpd
->kp_cookie
];
616 mutex_enter(&segkp_lock
);
617 if (!segkp_indel
&& freelist
->kpf_count
< freelist
->kpf_max
) {
618 segkp_delete(seg
, kpd
);
619 kpd
->kp_next
= freelist
->kpf_list
;
620 freelist
->kpf_list
= kpd
;
621 freelist
->kpf_count
++;
622 mutex_exit(&segkp_lock
);
625 mutex_exit(&segkp_lock
);
629 segkp_release_internal(seg
, kpd
, kpd
->kp_len
);
633 * Free the entire resource. segkp_unlock gets called with the start of the
634 * mapped portion of the resource. The length is the size of the mapped
638 segkp_release_internal(struct seg
*seg
, struct segkp_data
*kpd
, size_t len
)
651 ASSERT((kpd
->kp_flags
& KPD_HASAMP
) == 0 || kpd
->kp_cookie
== -1);
654 /* Remove from active hash list */
655 if (kpd
->kp_cookie
== -1) {
656 mutex_enter(&segkp_lock
);
657 segkp_delete(seg
, kpd
);
658 mutex_exit(&segkp_lock
);
662 * Precompute redzone page index.
665 if (kpd
->kp_flags
& KPD_HASREDZONE
)
666 redzone
= KPD_REDZONE(kpd
);
671 hat_unload(seg
->s_as
->a_hat
, va
, (np
<< PAGESHIFT
),
672 ((kpd
->kp_flags
& KPD_LOCKED
) ? HAT_UNLOAD_UNLOCK
: HAT_UNLOAD
));
674 * Free up those anon resources that are quiescent.
677 segkpindex
= btop((uintptr_t)(va
- kvseg
.s_base
));
678 for (i
= 0; i
< np
; i
++, va
+= PAGESIZE
) {
681 * Clear the bit for this page from the bitmap.
683 if (segkp_fromheap
) {
684 BT_ATOMIC_CLEAR(segkp_bitmap
, segkpindex
);
692 * Free up anon resources and destroy the
695 * Release the lock if there is one. Have to get the
696 * page to do this, unfortunately.
698 if (kpd
->kp_flags
& KPD_LOCKED
) {
699 ap
= anon_get_ptr(kpd
->kp_anon
,
700 kpd
->kp_anon_idx
+ i
);
701 swap_xlate(ap
, &vp
, &off
);
702 /* Find the shared-locked page. */
703 pp
= page_find(vp
, (u_offset_t
)off
);
705 panic("segkp_release: "
706 "kp_anon: no page to unlock ");
714 if ((kpd
->kp_flags
& KPD_HASAMP
) == 0) {
715 anon_free(kpd
->kp_anon
, kpd
->kp_anon_idx
+ i
,
717 anon_unresv_zone(PAGESIZE
, NULL
);
718 atomic_dec_ulong(&anon_segkp_pages_resv
);
721 TR_ANON_SEGKP
, "anon segkp:%p %p %lu %u %u",
722 kpd
, va
, PAGESIZE
, 0, 0);
724 if (kpd
->kp_flags
& KPD_LOCKED
) {
725 pp
= page_find(&kvp
, (u_offset_t
)(uintptr_t)va
);
727 panic("segkp_release: "
728 "no page to unlock");
734 * We should just upgrade the lock here
735 * but there is no upgrade that waits.
739 pp
= page_lookup(&kvp
, (u_offset_t
)(uintptr_t)va
,
746 /* If locked, release physical memory reservation */
747 if (kpd
->kp_flags
& KPD_LOCKED
) {
748 pgcnt_t pages
= btop(SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
));
749 if ((kpd
->kp_flags
& KPD_NO_ANON
) == 0)
750 atomic_add_long(&anon_segkp_pages_locked
, -pages
);
754 vmem_free(SEGKP_VMEM(seg
), kpd
->kp_base
, kpd
->kp_len
);
755 kmem_free(kpd
, sizeof (struct segkp_data
));
759 * segkp_map_red() will check the current frame pointer against the
760 * stack base. If the amount of stack remaining is questionable
761 * (less than red_minavail), then segkp_map_red() will map in the redzone
762 * and return 1. Otherwise, it will return 0. segkp_map_red() can
763 * _only_ be called when:
765 * - it is safe to sleep on page_create_va().
766 * - the caller is non-swappable.
768 * It is up to the caller to remember whether segkp_map_red() successfully
769 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
770 * time. Note that the caller must _remain_ non-swappable until after
771 * calling segkp_unmap_red().
773 * Currently, this routine is only called from pagefault() (which necessarily
774 * satisfies the above conditions).
776 #if defined(STACK_GROWTH_DOWN)
780 uintptr_t fp
= STACK_BIAS
+ (uintptr_t)getfp();
785 ASSERT(curthread
->t_schedflag
& TS_DONT_SWAP
);
788 * Optimize for the common case where we simply return.
790 if ((curthread
->t_red_pp
== NULL
) &&
791 (fp
- (uintptr_t)curthread
->t_stkbase
>= red_minavail
))
796 * XXX We probably need something better than this.
798 panic("kernel stack overflow");
801 if (curthread
->t_red_pp
== NULL
) {
805 caddr_t red_va
= (caddr_t
)
806 (((uintptr_t)curthread
->t_stkbase
& (uintptr_t)PAGEMASK
) -
809 ASSERT(page_exists(&kvp
, (u_offset_t
)(uintptr_t)red_va
) ==
813 * Allocate the physical for the red page.
816 * No PG_NORELOC here to avoid waits. Unlikely to get
817 * a relocate happening in the short time the page exists
818 * and it will be OK anyway.
822 red_pp
= page_create_va(&kvp
, (u_offset_t
)(uintptr_t)red_va
,
823 PAGESIZE
, PG_WAIT
| PG_EXCL
, &kseg
, red_va
);
824 ASSERT(red_pp
!= NULL
);
827 * So we now have a page to jam into the redzone...
829 page_io_unlock(red_pp
);
831 hat_memload(kas
.a_hat
, red_va
, red_pp
,
832 (PROT_READ
|PROT_WRITE
), HAT_LOAD_LOCK
);
833 page_downgrade(red_pp
);
836 * The page is left SE_SHARED locked so we can hold on to
837 * the page_t pointer.
839 curthread
->t_red_pp
= red_pp
;
841 atomic_inc_32(&red_nmapped
);
842 while (fp
- (uintptr_t)curthread
->t_stkbase
< red_closest
) {
843 (void) atomic_cas_32(&red_closest
, red_closest
,
844 (uint32_t)(fp
- (uintptr_t)curthread
->t_stkbase
));
849 stkbase
= (caddr_t
)(((uintptr_t)curthread
->t_stkbase
&
850 (uintptr_t)PAGEMASK
) - PAGESIZE
);
852 atomic_inc_32(&red_ndoubles
);
854 if (fp
- (uintptr_t)stkbase
< RED_DEEP_THRESHOLD
) {
856 * Oh boy. We're already deep within the mapped-in
857 * redzone page, and the caller is trying to prepare
858 * for a deep stack run. We're running without a
859 * redzone right now: if the caller plows off the
860 * end of the stack, it'll plow another thread or
861 * LWP structure. That situation could result in
862 * a very hard-to-debug panic, so, in the spirit of
863 * recording the name of one's killer in one's own
864 * blood, we're going to record hrestime and the calling
867 red_deep_hires
= hrestime
.tv_nsec
;
868 red_deep_thread
= curthread
;
872 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
874 ASSERT(fp
- (uintptr_t)stkbase
>= RED_DEEP_THRESHOLD
);
880 segkp_unmap_red(void)
883 caddr_t red_va
= (caddr_t
)(((uintptr_t)curthread
->t_stkbase
&
884 (uintptr_t)PAGEMASK
) - PAGESIZE
);
886 ASSERT(curthread
->t_red_pp
!= NULL
);
887 ASSERT(curthread
->t_schedflag
& TS_DONT_SWAP
);
890 * Because we locked the mapping down, we can't simply rely
891 * on page_destroy() to clean everything up; we need to call
892 * hat_unload() to explicitly unlock the mapping resources.
894 hat_unload(kas
.a_hat
, red_va
, PAGESIZE
, HAT_UNLOAD_UNLOCK
);
896 pp
= curthread
->t_red_pp
;
898 ASSERT(pp
== page_find(&kvp
, (u_offset_t
)(uintptr_t)red_va
));
901 * Need to upgrade the SE_SHARED lock to SE_EXCL.
903 if (!page_tryupgrade(pp
)) {
905 * As there is now wait for upgrade, release the
906 * SE_SHARED lock and wait for SE_EXCL.
909 pp
= page_lookup(&kvp
, (u_offset_t
)(uintptr_t)red_va
, SE_EXCL
);
910 /* pp may be NULL here, hence the test below */
914 * Destroy the page, with dontfree set to zero (i.e. free it).
918 curthread
->t_red_pp
= NULL
;
921 #error Red stacks only supported with downwards stack growth.
925 * Handle a fault on an address corresponding to one of the
926 * resources in the segkp segment.
934 enum fault_type type
,
937 struct segkp_data
*kpd
= NULL
;
940 ASSERT(seg
->s_as
== &kas
&& RW_READ_HELD(&seg
->s_as
->a_lock
));
945 if (type
== F_PROT
) {
946 panic("segkp_fault: unexpected F_PROT fault");
950 if ((kpd
= segkp_find(seg
, vaddr
)) == NULL
)
953 mutex_enter(&kpd
->kp_lock
);
955 if (type
== F_SOFTLOCK
) {
956 ASSERT(!(kpd
->kp_flags
& KPD_LOCKED
));
958 * The F_SOFTLOCK case has more stringent
959 * range requirements: the given range must exactly coincide
960 * with the resource's mapped portion. Note reference to
961 * redzone is handled since vaddr would not equal base
963 if (vaddr
!= stom(kpd
->kp_base
, kpd
->kp_flags
) ||
964 len
!= SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
)) {
965 mutex_exit(&kpd
->kp_lock
);
966 return (FC_MAKE_ERR(EFAULT
));
969 if ((err
= segkp_load(hat
, seg
, vaddr
, len
, kpd
, KPD_LOCKED
))) {
970 mutex_exit(&kpd
->kp_lock
);
971 return (FC_MAKE_ERR(err
));
973 kpd
->kp_flags
|= KPD_LOCKED
;
974 mutex_exit(&kpd
->kp_lock
);
978 if (type
== F_INVAL
) {
979 ASSERT(!(kpd
->kp_flags
& KPD_NO_ANON
));
982 * Check if we touched the redzone. Somewhat optimistic
983 * here if we are touching the redzone of our own stack
984 * since we wouldn't have a stack to get this far...
986 if ((kpd
->kp_flags
& KPD_HASREDZONE
) &&
987 btop((uintptr_t)(vaddr
- kpd
->kp_base
)) == KPD_REDZONE(kpd
))
988 panic("segkp_fault: accessing redzone");
991 * This fault may occur while the page is being F_SOFTLOCK'ed.
992 * Return since a 2nd segkp_load is unnecessary and also would
993 * result in the page being locked twice and eventually
994 * hang the thread_reaper thread.
996 if (kpd
->kp_flags
& KPD_LOCKED
) {
997 mutex_exit(&kpd
->kp_lock
);
1001 err
= segkp_load(hat
, seg
, vaddr
, len
, kpd
, kpd
->kp_flags
);
1002 mutex_exit(&kpd
->kp_lock
);
1003 return (err
? FC_MAKE_ERR(err
) : 0);
1006 if (type
== F_SOFTUNLOCK
) {
1010 * Make sure the addr is LOCKED and it has anon backing
1013 if ((kpd
->kp_flags
& (KPD_LOCKED
|KPD_NO_ANON
)) != KPD_LOCKED
) {
1014 panic("segkp_fault: bad unlock");
1018 if (vaddr
!= stom(kpd
->kp_base
, kpd
->kp_flags
) ||
1019 len
!= SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
)) {
1020 panic("segkp_fault: bad range");
1025 flags
= kpd
->kp_flags
| KPD_WRITEDIRTY
;
1027 flags
= kpd
->kp_flags
;
1028 err
= segkp_unlock(hat
, seg
, vaddr
, len
, kpd
, flags
);
1029 kpd
->kp_flags
&= ~KPD_LOCKED
;
1030 mutex_exit(&kpd
->kp_lock
);
1031 return (err
? FC_MAKE_ERR(err
) : 0);
1033 mutex_exit(&kpd
->kp_lock
);
1034 panic("segkp_fault: bogus fault type: %d\n", type
);
1039 * Check that the given protections suffice over the range specified by
1040 * vaddr and len. For this segment type, the only issue is whether or
1041 * not the range lies completely within the mapped part of an allocated
1046 segkp_checkprot(struct seg
*seg
, caddr_t vaddr
, size_t len
, uint_t prot
)
1048 struct segkp_data
*kpd
= NULL
;
1052 if ((kpd
= segkp_find(seg
, vaddr
)) == NULL
)
1055 mutex_enter(&kpd
->kp_lock
);
1056 mbase
= stom(kpd
->kp_base
, kpd
->kp_flags
);
1057 mlen
= SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
);
1058 if (len
> mlen
|| vaddr
< mbase
||
1059 ((vaddr
+ len
) > (mbase
+ mlen
))) {
1060 mutex_exit(&kpd
->kp_lock
);
1063 mutex_exit(&kpd
->kp_lock
);
1069 * Check to see if it makes sense to do kluster/read ahead to
1070 * addr + delta relative to the mapping at addr. We assume here
1071 * that delta is a signed PAGESIZE'd multiple (which can be negative).
1073 * For seg_u we always "approve" of this action from our standpoint.
1077 segkp_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
)
1083 * Load and possibly lock intra-slot resources in the range given by
1092 struct segkp_data
*kpd
,
1100 ASSERT(MUTEX_HELD(&kpd
->kp_lock
));
1102 len
= P2ROUNDUP(len
, PAGESIZE
);
1104 /* If locking, reserve physical memory */
1105 if (flags
& KPD_LOCKED
) {
1106 pgcnt_t pages
= btop(len
);
1107 if ((kpd
->kp_flags
& KPD_NO_ANON
) == 0)
1108 atomic_add_long(&anon_segkp_pages_locked
, pages
);
1109 (void) page_resv(pages
, KM_SLEEP
);
1113 * Loop through the pages in the given range.
1115 va
= (caddr_t
)((uintptr_t)vaddr
& (uintptr_t)PAGEMASK
);
1118 lock
= flags
& KPD_LOCKED
;
1119 i
= ((uintptr_t)(va
- kpd
->kp_base
)) >> PAGESHIFT
;
1120 for (; va
< vlim
; va
+= PAGESIZE
, i
++) {
1121 page_t
*pl
[2]; /* second element NULL terminator */
1128 * Summon the page. If it's not resident, arrange
1129 * for synchronous i/o to pull it in.
1131 ap
= anon_get_ptr(kpd
->kp_anon
, kpd
->kp_anon_idx
+ i
);
1132 swap_xlate(ap
, &vp
, &off
);
1135 * The returned page list will have exactly one entry,
1136 * which is returned to us already kept.
1138 err
= VOP_GETPAGE(vp
, (offset_t
)off
, PAGESIZE
, NULL
,
1139 pl
, PAGESIZE
, seg
, va
, S_READ
, kcred
, NULL
);
1143 * Back out of what we've done so far.
1145 (void) segkp_unlock(hat
, seg
, vaddr
,
1146 (va
- vaddr
), kpd
, flags
);
1151 * Load an MMU translation for the page.
1153 hat_memload(hat
, va
, pl
[0], (PROT_READ
|PROT_WRITE
),
1154 lock
? HAT_LOAD_LOCK
: HAT_LOAD
);
1158 * Now, release "shared" lock on the page.
1167 * At the very least unload the mmu-translations and unlock the range if locked
1168 * Can be called with the following flag value KPD_WRITEDIRTY which specifies
1169 * any dirty pages should be written to disk.
1177 struct segkp_data
*kpd
,
1192 ASSERT(MUTEX_HELD(&kpd
->kp_lock
));
1195 * Loop through the pages in the given range. It is assumed
1196 * segkp_unlock is called with page aligned base
1200 i
= ((uintptr_t)(va
- kpd
->kp_base
)) >> PAGESHIFT
;
1201 hat_unload(hat
, va
, len
,
1202 ((flags
& KPD_LOCKED
) ? HAT_UNLOAD_UNLOCK
: HAT_UNLOAD
));
1203 for (; va
< vlim
; va
+= PAGESIZE
, i
++) {
1205 * Find the page associated with this part of the
1206 * slot, tracking it down through its associated swap
1209 ap
= anon_get_ptr(kpd
->kp_anon
, kpd
->kp_anon_idx
+ i
);
1210 swap_xlate(ap
, &vp
, &off
);
1212 if (flags
& KPD_LOCKED
) {
1213 if ((pp
= page_find(vp
, off
)) == NULL
) {
1214 if (flags
& KPD_LOCKED
) {
1215 panic("segkp_softunlock: missing page");
1221 * Nothing to do if the slot is not locked and the
1222 * page doesn't exist.
1224 if ((pp
= page_lookup(vp
, off
, SE_SHARED
)) == NULL
)
1229 * If the page doesn't have any translations, is
1230 * dirty and not being shared, then push it out
1231 * asynchronously and avoid waiting for the
1232 * pageout daemon to do it for us.
1234 * XXX - Do we really need to get the "exclusive"
1235 * lock via an upgrade?
1237 if ((flags
& KPD_WRITEDIRTY
) && !hat_page_is_mapped(pp
) &&
1238 hat_ismod(pp
) && page_tryupgrade(pp
)) {
1240 * Hold the vnode before releasing the page lock to
1241 * prevent it from being freed and re-used by some
1248 * Want most powerful credentials we can get so
1251 (void) VOP_PUTPAGE(vp
, (offset_t
)off
, PAGESIZE
,
1252 B_ASYNC
| B_FREE
, kcred
, NULL
);
1259 /* If unlocking, release physical memory */
1260 if (flags
& KPD_LOCKED
) {
1261 pgcnt_t pages
= btopr(len
);
1262 if ((kpd
->kp_flags
& KPD_NO_ANON
) == 0)
1263 atomic_add_long(&anon_segkp_pages_locked
, -pages
);
1270 * Insert the kpd in the hash table.
1273 segkp_insert(struct seg
*seg
, struct segkp_data
*kpd
)
1275 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
1279 * Insert the kpd based on the address that will be returned
1280 * via segkp_release.
1282 index
= SEGKP_HASH(stom(kpd
->kp_base
, kpd
->kp_flags
));
1283 mutex_enter(&segkp_lock
);
1284 kpd
->kp_next
= kpsd
->kpsd_hash
[index
];
1285 kpsd
->kpsd_hash
[index
] = kpd
;
1286 mutex_exit(&segkp_lock
);
1290 * Remove kpd from the hash table.
1293 segkp_delete(struct seg
*seg
, struct segkp_data
*kpd
)
1295 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
1296 struct segkp_data
**kpp
;
1299 ASSERT(MUTEX_HELD(&segkp_lock
));
1301 index
= SEGKP_HASH(stom(kpd
->kp_base
, kpd
->kp_flags
));
1302 for (kpp
= &kpsd
->kpsd_hash
[index
];
1303 *kpp
!= NULL
; kpp
= &((*kpp
)->kp_next
)) {
1305 *kpp
= kpd
->kp_next
;
1309 panic("segkp_delete: unable to find element to delete");
1314 * Find the kpd associated with a vaddr.
1316 * Most of the callers of segkp_find will pass the vaddr that
1317 * hashes to the desired index, but there are cases where
1318 * this is not true in which case we have to (potentially) scan
1319 * the whole table looking for it. This should be very rare
1320 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
1321 * middle of the segkp_data region).
1323 static struct segkp_data
*
1324 segkp_find(struct seg
*seg
, caddr_t vaddr
)
1326 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
1327 struct segkp_data
*kpd
;
1331 i
= stop
= SEGKP_HASH(vaddr
);
1332 mutex_enter(&segkp_lock
);
1334 for (kpd
= kpsd
->kpsd_hash
[i
]; kpd
!= NULL
;
1335 kpd
= kpd
->kp_next
) {
1336 if (vaddr
>= kpd
->kp_base
&&
1337 vaddr
< kpd
->kp_base
+ kpd
->kp_len
) {
1338 mutex_exit(&segkp_lock
);
1343 i
= SEGKP_HASHSZ
- 1; /* Wrap */
1344 } while (i
!= stop
);
1345 mutex_exit(&segkp_lock
);
1346 return (NULL
); /* Not found */
1350 * returns size of swappable area.
1355 struct segkp_data
*kpd
;
1357 if ((kpd
= segkp_find(segkp
, v
)) != NULL
)
1358 return (SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
));
1364 * Dump out all the active segkp pages
1367 segkp_dump(struct seg
*seg
)
1370 struct segkp_data
*kpd
;
1371 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
1373 for (i
= 0; i
< SEGKP_HASHSZ
; i
++) {
1374 for (kpd
= kpsd
->kpsd_hash
[i
];
1375 kpd
!= NULL
; kpd
= kpd
->kp_next
) {
1380 addr
= kpd
->kp_base
;
1381 eaddr
= addr
+ kpd
->kp_len
;
1382 while (addr
< eaddr
) {
1383 ASSERT(seg
->s_as
== &kas
);
1384 pfn
= hat_getpfnum(seg
->s_as
->a_hat
, addr
);
1385 if (pfn
!= PFN_INVALID
)
1386 dump_addpage(seg
->s_as
, addr
, pfn
);
1388 dump_timeleft
= dump_timeout
;
1396 segkp_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
1397 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
)
1404 segkp_getmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
)
1410 static lgrp_mem_policy_info_t
*
1411 segkp_getpolicy(struct seg
*seg
, caddr_t addr
)
1418 segkp_capable(struct seg
*seg
, segcapability_t capability
)
1423 #include <sys/mem_config.h>
1427 segkp_mem_config_post_add(void *arg
, pgcnt_t delta_pages
)
1431 * During memory delete, turn off caches so that pages are not held.
1432 * A better solution may be to unlock the pages while they are
1433 * in the cache so that they may be collected naturally.
1438 segkp_mem_config_pre_del(void *arg
, pgcnt_t delta_pages
)
1440 atomic_inc_32(&segkp_indel
);
1447 segkp_mem_config_post_del(void *arg
, pgcnt_t delta_pages
, int cancelled
)
1449 atomic_dec_32(&segkp_indel
);
1452 static kphysm_setup_vector_t segkp_mem_config_vec
= {
1453 KPHYSM_SETUP_VECTOR_VERSION
,
1454 segkp_mem_config_post_add
,
1455 segkp_mem_config_pre_del
,
1456 segkp_mem_config_post_del
,
1460 segkpinit_mem_config(struct seg
*seg
)
1464 ret
= kphysm_setup_func_register(&segkp_mem_config_vec
, (void *)seg
);