4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 /* All Rights Reserved */
29 * Portions of this source code were derived from Berkeley 4.3 BSD
30 * under license from the Regents of the University of California.
34 * segkp is a segment driver that administers the allocation and deallocation
35 * of pageable variable size chunks of kernel virtual address space. Each
36 * allocated resource is page-aligned.
38 * The user may specify whether the resource should be initialized to 0,
39 * include a redzone, or locked in memory.
42 #include <sys/types.h>
43 #include <sys/t_lock.h>
44 #include <sys/thread.h>
45 #include <sys/param.h>
46 #include <sys/errno.h>
47 #include <sys/sysmacros.h>
48 #include <sys/systm.h>
51 #include <sys/vnode.h>
52 #include <sys/cmn_err.h>
54 #include <sys/tuneable.h>
58 #include <sys/dumphdr.h>
59 #include <sys/debug.h>
60 #include <sys/vtrace.h>
61 #include <sys/stack.h>
62 #include <sys/atomic.h>
63 #include <sys/archsystm.h>
68 #include <vm/seg_kp.h>
69 #include <vm/seg_kmem.h>
73 #include <sys/bitmap.h>
76 * Private seg op routines
78 static void segkp_badop(void);
79 static void segkp_dump(struct seg
*seg
);
80 static int segkp_checkprot(struct seg
*seg
, caddr_t addr
, size_t len
,
82 static int segkp_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
);
83 static int segkp_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
84 struct page
***page
, enum lock_type type
,
86 static void segkp_insert(struct seg
*seg
, struct segkp_data
*kpd
);
87 static void segkp_delete(struct seg
*seg
, struct segkp_data
*kpd
);
88 static caddr_t
segkp_get_internal(struct seg
*seg
, size_t len
, uint_t flags
,
89 struct segkp_data
**tkpd
, struct anon_map
*amp
);
90 static void segkp_release_internal(struct seg
*seg
,
91 struct segkp_data
*kpd
, size_t len
);
92 static int segkp_unlock(struct hat
*hat
, struct seg
*seg
, caddr_t vaddr
,
93 size_t len
, struct segkp_data
*kpd
, uint_t flags
);
94 static int segkp_load(struct hat
*hat
, struct seg
*seg
, caddr_t vaddr
,
95 size_t len
, struct segkp_data
*kpd
, uint_t flags
);
96 static struct segkp_data
*segkp_find(struct seg
*seg
, caddr_t vaddr
);
99 * Lock used to protect the hash table(s) and caches.
101 static kmutex_t segkp_lock
;
106 static struct segkp_cache segkp_cache
[SEGKP_MAX_CACHE
];
108 #define SEGKP_BADOP(t) (t(*)())segkp_badop
111 * When there are fewer than red_minavail bytes left on the stack,
112 * segkp_map_red() will map in the redzone (if called). 5000 seems
113 * to work reasonably well...
115 long red_minavail
= 5000;
118 * will be set to 1 for 32 bit x86 systems only, in startup.c
120 int segkp_fromheap
= 0;
121 ulong_t
*segkp_bitmap
;
124 * If segkp_map_red() is called with the redzone already mapped and
125 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
126 * then the stack situation has become quite serious; if much more stack
127 * is consumed, we have the potential of scrogging the next thread/LWP
128 * structure. To help debug the "can't happen" panics which may
129 * result from this condition, we record hrestime and the calling thread
130 * in red_deep_hires and red_deep_thread respectively.
132 #define RED_DEEP_THRESHOLD 2000
134 hrtime_t red_deep_hires
;
135 kthread_t
*red_deep_thread
;
137 uint32_t red_nmapped
;
138 uint32_t red_closest
= UINT_MAX
;
139 uint32_t red_ndoubles
;
141 pgcnt_t anon_segkp_pages_locked
; /* See vm/anon.h */
142 pgcnt_t anon_segkp_pages_resv
; /* anon reserved by seg_kp */
144 static const struct seg_ops segkp_ops
= {
145 .dup
= SEGKP_BADOP(int),
146 .unmap
= SEGKP_BADOP(int),
147 .free
= SEGKP_BADOP(void),
148 .fault
= segkp_fault
,
149 .faulta
= SEGKP_BADOP(faultcode_t
),
150 .setprot
= SEGKP_BADOP(int),
151 .checkprot
= segkp_checkprot
,
152 .kluster
= segkp_kluster
,
153 .sync
= SEGKP_BADOP(int),
154 .incore
= SEGKP_BADOP(size_t),
155 .lockop
= SEGKP_BADOP(int),
156 .getprot
= SEGKP_BADOP(int),
157 .getoffset
= SEGKP_BADOP(uoff_t
),
158 .gettype
= SEGKP_BADOP(int),
159 .getvp
= SEGKP_BADOP(int),
160 .advise
= SEGKP_BADOP(int),
162 .pagelock
= segkp_pagelock
,
163 .setpagesize
= SEGKP_BADOP(int),
170 panic("segkp_badop");
174 static void segkpinit_mem_config(struct seg
*);
176 static uint32_t segkp_indel
;
179 * Allocate the segment specific private data struct and fill it in
180 * with the per kp segment mutex, anon ptr. array and hash table.
183 segkp_create(struct seg
*seg
)
185 struct segkp_segdata
*kpsd
;
188 ASSERT(seg
!= NULL
&& seg
->s_as
== &kas
);
189 ASSERT(RW_WRITE_HELD(&seg
->s_as
->a_lock
));
191 if (seg
->s_size
& PAGEOFFSET
) {
192 panic("Bad segkp size");
196 kpsd
= kmem_zalloc(sizeof (struct segkp_segdata
), KM_SLEEP
);
199 * Allocate the virtual memory for segkp and initialize it
201 if (segkp_fromheap
) {
202 np
= btop(kvseg
.s_size
);
203 segkp_bitmap
= kmem_zalloc(BT_SIZEOFMAP(np
), KM_SLEEP
);
204 kpsd
->kpsd_arena
= vmem_create("segkp", NULL
, 0, PAGESIZE
,
205 vmem_alloc
, vmem_free
, heap_arena
, 5 * PAGESIZE
, VM_SLEEP
);
208 np
= btop(seg
->s_size
);
209 kpsd
->kpsd_arena
= vmem_create("segkp", seg
->s_base
,
210 seg
->s_size
, PAGESIZE
, NULL
, NULL
, NULL
, 5 * PAGESIZE
,
214 kpsd
->kpsd_anon
= anon_create(np
, ANON_SLEEP
| ANON_ALLOC_FORCE
);
216 kpsd
->kpsd_hash
= kmem_zalloc(SEGKP_HASHSZ
* sizeof (struct segkp
*),
218 seg
->s_data
= (void *)kpsd
;
219 seg
->s_ops
= &segkp_ops
;
220 segkpinit_mem_config(seg
);
226 * Find a free 'freelist' and initialize it with the appropriate attributes
229 segkp_cache_init(struct seg
*seg
, int maxsize
, size_t len
, uint_t flags
)
233 if ((flags
& KPD_NO_ANON
) && !(flags
& KPD_LOCKED
))
236 mutex_enter(&segkp_lock
);
237 for (i
= 0; i
< SEGKP_MAX_CACHE
; i
++) {
238 if (segkp_cache
[i
].kpf_inuse
)
240 segkp_cache
[i
].kpf_inuse
= 1;
241 segkp_cache
[i
].kpf_max
= maxsize
;
242 segkp_cache
[i
].kpf_flags
= flags
;
243 segkp_cache
[i
].kpf_seg
= seg
;
244 segkp_cache
[i
].kpf_len
= len
;
245 mutex_exit(&segkp_lock
);
246 return ((void *)(uintptr_t)i
);
248 mutex_exit(&segkp_lock
);
253 * Free all the cache resources.
256 segkp_cache_free(void)
258 struct segkp_data
*kpd
;
262 mutex_enter(&segkp_lock
);
263 for (i
= 0; i
< SEGKP_MAX_CACHE
; i
++) {
264 if (!segkp_cache
[i
].kpf_inuse
)
267 * Disconnect the freelist and process each element
269 kpd
= segkp_cache
[i
].kpf_list
;
270 seg
= segkp_cache
[i
].kpf_seg
;
271 segkp_cache
[i
].kpf_list
= NULL
;
272 segkp_cache
[i
].kpf_count
= 0;
273 mutex_exit(&segkp_lock
);
275 while (kpd
!= NULL
) {
276 struct segkp_data
*next
;
279 segkp_release_internal(seg
, kpd
, kpd
->kp_len
);
282 mutex_enter(&segkp_lock
);
284 mutex_exit(&segkp_lock
);
288 * There are 2 entries into segkp_get_internal. The first includes a cookie
289 * used to access a pool of cached segkp resources. The second does not
293 segkp_get(struct seg
*seg
, size_t len
, uint_t flags
)
295 struct segkp_data
*kpd
= NULL
;
297 if (segkp_get_internal(seg
, len
, flags
, &kpd
, NULL
) != NULL
) {
299 return (stom(kpd
->kp_base
, flags
));
305 * Return a 'cached' segkp address
308 segkp_cache_get(void *cookie
)
310 struct segkp_cache
*freelist
= NULL
;
311 struct segkp_data
*kpd
= NULL
;
312 int index
= (int)(uintptr_t)cookie
;
317 if (index
< 0 || index
>= SEGKP_MAX_CACHE
)
319 freelist
= &segkp_cache
[index
];
321 mutex_enter(&segkp_lock
);
322 seg
= freelist
->kpf_seg
;
323 flags
= freelist
->kpf_flags
;
324 if (freelist
->kpf_list
!= NULL
) {
325 kpd
= freelist
->kpf_list
;
326 freelist
->kpf_list
= kpd
->kp_next
;
327 freelist
->kpf_count
--;
328 mutex_exit(&segkp_lock
);
330 segkp_insert(seg
, kpd
);
331 return (stom(kpd
->kp_base
, flags
));
333 len
= freelist
->kpf_len
;
334 mutex_exit(&segkp_lock
);
335 if (segkp_get_internal(seg
, len
, flags
, &kpd
, NULL
) != NULL
) {
336 kpd
->kp_cookie
= index
;
337 return (stom(kpd
->kp_base
, flags
));
343 segkp_get_withanonmap(
347 struct anon_map
*amp
)
349 struct segkp_data
*kpd
= NULL
;
353 if (segkp_get_internal(seg
, len
, flags
, &kpd
, amp
) != NULL
) {
355 return (stom(kpd
->kp_base
, flags
));
361 * This does the real work of segkp allocation.
362 * Return to client base addr. len must be page-aligned. A null value is
363 * returned if there are no more vm resources (e.g. pages, swap). The len
364 * and base recorded in the private data structure include the redzone
365 * and the redzone length (if applicable). If the user requests a redzone
366 * either the first or last page is left unmapped depending whether stacks
367 * grow to low or high memory.
369 * The client may also specify a no-wait flag. If that is set then the
370 * request will choose a non-blocking path when requesting resources.
371 * The default is make the client wait.
378 struct segkp_data
**tkpd
,
379 struct anon_map
*amp
)
381 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
382 struct segkp_data
*kpd
;
383 caddr_t vbase
= NULL
; /* always first virtual, may not be mapped */
384 pgcnt_t np
= 0; /* number of pages in the resource */
389 ulong_t anon_idx
= 0;
390 int kmflag
= (flags
& KPD_NOWAIT
) ? KM_NOSLEEP
: KM_SLEEP
;
391 caddr_t s_base
= (segkp_fromheap
) ? kvseg
.s_base
: seg
->s_base
;
393 if (len
& PAGEOFFSET
) {
394 panic("segkp_get: len is not page-aligned");
398 ASSERT(((flags
& KPD_HASAMP
) == 0) == (amp
== NULL
));
400 /* Only allow KPD_NO_ANON if we are going to lock it down */
401 if ((flags
& (KPD_LOCKED
|KPD_NO_ANON
)) == KPD_NO_ANON
)
404 if ((kpd
= kmem_zalloc(sizeof (struct segkp_data
), kmflag
)) == NULL
)
407 * Fix up the len to reflect the REDZONE if applicable
409 if (flags
& KPD_HASREDZONE
)
413 vbase
= vmem_alloc(SEGKP_VMEM(seg
), len
, kmflag
| VM_BESTFIT
);
415 kmem_free(kpd
, sizeof (struct segkp_data
));
419 /* If locking, reserve physical memory */
420 if (flags
& KPD_LOCKED
) {
421 pages
= btop(SEGKP_MAPLEN(len
, flags
));
422 if (page_resv(pages
, kmflag
) == 0) {
423 vmem_free(SEGKP_VMEM(seg
), vbase
, len
);
424 kmem_free(kpd
, sizeof (struct segkp_data
));
427 if ((flags
& KPD_NO_ANON
) == 0)
428 atomic_add_long(&anon_segkp_pages_locked
, pages
);
432 * Reserve sufficient swap space for this vm resource. We'll
433 * actually allocate it in the loop below, but reserving it
434 * here allows us to back out more gracefully than if we
435 * had an allocation failure in the body of the loop.
437 * Note that we don't need swap space for the red zone page.
441 * The swap reservation has been done, if required, and the
442 * anon_hdr is separate.
445 kpd
->kp_anon_idx
= anon_idx
;
446 kpd
->kp_anon
= amp
->ahp
;
447 } else if ((flags
& KPD_NO_ANON
) == 0) {
448 if (anon_resv_zone(SEGKP_MAPLEN(len
, flags
), NULL
) == 0) {
449 if (flags
& KPD_LOCKED
) {
450 atomic_add_long(&anon_segkp_pages_locked
,
454 vmem_free(SEGKP_VMEM(seg
), vbase
, len
);
455 kmem_free(kpd
, sizeof (struct segkp_data
));
458 atomic_add_long(&anon_segkp_pages_resv
,
459 btop(SEGKP_MAPLEN(len
, flags
)));
460 anon_idx
= ((uintptr_t)(vbase
- s_base
)) >> PAGESHIFT
;
461 kpd
->kp_anon_idx
= anon_idx
;
462 kpd
->kp_anon
= kpsd
->kpsd_anon
;
465 kpd
->kp_anon_idx
= 0;
469 * Allocate page and anon resources for the virtual address range
473 segkpindex
= btop((uintptr_t)(vbase
- kvseg
.s_base
));
474 for (i
= 0, va
= vbase
; i
< np
; i
++, va
+= PAGESIZE
) {
482 * Mark this page to be a segkp page in the bitmap.
484 if (segkp_fromheap
) {
485 BT_ATOMIC_SET(segkp_bitmap
, segkpindex
);
490 * If this page is the red zone page, we don't need swap
491 * space for it. Note that we skip over the code that
492 * establishes MMU mappings, so that the page remains
495 if ((flags
& KPD_HASREDZONE
) && KPD_REDZONE(kpd
) == i
)
498 if (kpd
->kp_anon
!= NULL
) {
501 ASSERT(anon_get_ptr(kpd
->kp_anon
, anon_idx
+ i
)
504 * Determine the "vp" and "off" of the anon slot.
506 ap
= anon_alloc(NULL
, 0);
508 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
509 (void) anon_set_ptr(kpd
->kp_anon
, anon_idx
+ i
,
512 ANON_LOCK_EXIT(&
->a_rwlock
);
513 swap_xlate(ap
, &vp
, &off
);
516 * Create a page with the specified identity. The
517 * page is returned with the "shared" lock held.
519 err
= fop_getpage(vp
, (offset_t
)off
, PAGESIZE
,
520 NULL
, pl
, PAGESIZE
, seg
, va
, S_CREATE
,
524 * XXX - This should not fail.
526 panic("segkp_get: no pages");
531 ASSERT(page_exists(&kvp
.v_object
,
532 (uoff_t
)(uintptr_t)va
) == NULL
);
534 if ((pp
= page_create_va(&kvp
.v_object
,
535 (uoff_t
)(uintptr_t)va
, PAGESIZE
,
536 (flags
& KPD_NOWAIT
? 0 : PG_WAIT
) | PG_EXCL
|
537 PG_NORELOC
, seg
, va
)) == NULL
) {
539 * Legitimize resource; then destroy it.
540 * Easier than trying to unwind here.
542 kpd
->kp_flags
= flags
;
543 kpd
->kp_base
= vbase
;
545 segkp_release_internal(seg
, kpd
, va
- vbase
);
551 if (flags
& KPD_ZERO
)
552 pagezero(pp
, 0, PAGESIZE
);
555 * Load and lock an MMU translation for the page.
557 hat_memload(seg
->s_as
->a_hat
, va
, pp
, (PROT_READ
|PROT_WRITE
),
558 ((flags
& KPD_LOCKED
) ? HAT_LOAD_LOCK
: HAT_LOAD
));
561 * Now, release lock on the page.
563 if (flags
& KPD_LOCKED
) {
565 * Indicate to page_retire framework that this
566 * page can only be retired when it is freed.
574 kpd
->kp_flags
= flags
;
575 kpd
->kp_base
= vbase
;
577 segkp_insert(seg
, kpd
);
579 return (stom(kpd
->kp_base
, flags
));
583 * Release the resource to cache if the pool(designate by the cookie)
584 * has less than the maximum allowable. If inserted in cache,
585 * segkp_delete insures element is taken off of active list.
588 segkp_release(struct seg
*seg
, caddr_t vaddr
)
590 struct segkp_cache
*freelist
;
591 struct segkp_data
*kpd
= NULL
;
593 if ((kpd
= segkp_find(seg
, vaddr
)) == NULL
) {
594 panic("segkp_release: null kpd");
598 if (kpd
->kp_cookie
!= -1) {
599 freelist
= &segkp_cache
[kpd
->kp_cookie
];
600 mutex_enter(&segkp_lock
);
601 if (!segkp_indel
&& freelist
->kpf_count
< freelist
->kpf_max
) {
602 segkp_delete(seg
, kpd
);
603 kpd
->kp_next
= freelist
->kpf_list
;
604 freelist
->kpf_list
= kpd
;
605 freelist
->kpf_count
++;
606 mutex_exit(&segkp_lock
);
609 mutex_exit(&segkp_lock
);
613 segkp_release_internal(seg
, kpd
, kpd
->kp_len
);
617 * Free the entire resource. segkp_unlock gets called with the start of the
618 * mapped portion of the resource. The length is the size of the mapped
622 segkp_release_internal(struct seg
*seg
, struct segkp_data
*kpd
, size_t len
)
635 ASSERT((kpd
->kp_flags
& KPD_HASAMP
) == 0 || kpd
->kp_cookie
== -1);
638 /* Remove from active hash list */
639 if (kpd
->kp_cookie
== -1) {
640 mutex_enter(&segkp_lock
);
641 segkp_delete(seg
, kpd
);
642 mutex_exit(&segkp_lock
);
646 * Precompute redzone page index.
649 if (kpd
->kp_flags
& KPD_HASREDZONE
)
650 redzone
= KPD_REDZONE(kpd
);
655 hat_unload(seg
->s_as
->a_hat
, va
, (np
<< PAGESHIFT
),
656 ((kpd
->kp_flags
& KPD_LOCKED
) ? HAT_UNLOAD_UNLOCK
: HAT_UNLOAD
));
658 * Free up those anon resources that are quiescent.
661 segkpindex
= btop((uintptr_t)(va
- kvseg
.s_base
));
662 for (i
= 0; i
< np
; i
++, va
+= PAGESIZE
) {
665 * Clear the bit for this page from the bitmap.
667 if (segkp_fromheap
) {
668 BT_ATOMIC_CLEAR(segkp_bitmap
, segkpindex
);
676 * Free up anon resources and destroy the
679 * Release the lock if there is one. Have to get the
680 * page to do this, unfortunately.
682 if (kpd
->kp_flags
& KPD_LOCKED
) {
683 ap
= anon_get_ptr(kpd
->kp_anon
,
684 kpd
->kp_anon_idx
+ i
);
685 swap_xlate(ap
, &vp
, &off
);
686 /* Find the shared-locked page. */
687 pp
= page_find(&vp
->v_object
, (uoff_t
)off
);
689 panic("segkp_release: "
690 "kp_anon: no page to unlock ");
698 if ((kpd
->kp_flags
& KPD_HASAMP
) == 0) {
699 anon_free(kpd
->kp_anon
, kpd
->kp_anon_idx
+ i
,
701 anon_unresv_zone(PAGESIZE
, NULL
);
702 atomic_dec_ulong(&anon_segkp_pages_resv
);
705 if (kpd
->kp_flags
& KPD_LOCKED
) {
706 pp
= page_find(&kvp
.v_object
,
707 (uoff_t
)(uintptr_t)va
);
709 panic("segkp_release: "
710 "no page to unlock");
716 * We should just upgrade the lock here
717 * but there is no upgrade that waits.
721 pp
= page_lookup(&(&kvp
)->v_object
,
722 (uoff_t
)(uintptr_t)va
, SE_EXCL
);
728 /* If locked, release physical memory reservation */
729 if (kpd
->kp_flags
& KPD_LOCKED
) {
730 pgcnt_t pages
= btop(SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
));
731 if ((kpd
->kp_flags
& KPD_NO_ANON
) == 0)
732 atomic_add_long(&anon_segkp_pages_locked
, -pages
);
736 vmem_free(SEGKP_VMEM(seg
), kpd
->kp_base
, kpd
->kp_len
);
737 kmem_free(kpd
, sizeof (struct segkp_data
));
741 * segkp_map_red() will check the current frame pointer against the
742 * stack base. If the amount of stack remaining is questionable
743 * (less than red_minavail), then segkp_map_red() will map in the redzone
744 * and return 1. Otherwise, it will return 0. segkp_map_red() can
745 * _only_ be called when it is safe to sleep on page_create_va().
747 * It is up to the caller to remember whether segkp_map_red() successfully
748 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
751 * Currently, this routine is only called from pagefault() (which necessarily
752 * satisfies the above conditions).
754 #if defined(STACK_GROWTH_DOWN)
758 uintptr_t fp
= STACK_BIAS
+ (uintptr_t)getfp();
764 * Optimize for the common case where we simply return.
766 if ((curthread
->t_red_pp
== NULL
) &&
767 (fp
- (uintptr_t)curthread
->t_stkbase
>= red_minavail
))
772 * XXX We probably need something better than this.
774 panic("kernel stack overflow");
777 if (curthread
->t_red_pp
== NULL
) {
781 caddr_t red_va
= (caddr_t
)
782 (((uintptr_t)curthread
->t_stkbase
& (uintptr_t)PAGEMASK
) -
785 ASSERT(page_exists(&kvp
, (uoff_t
)(uintptr_t)red_va
) ==
789 * Allocate the physical for the red page.
792 * No PG_NORELOC here to avoid waits. Unlikely to get
793 * a relocate happening in the short time the page exists
794 * and it will be OK anyway.
798 red_pp
= page_create_va(&kvp
.v_object
, (uoff_t
)(uintptr_t)red_va
,
799 PAGESIZE
, PG_WAIT
| PG_EXCL
, &kseg
, red_va
);
800 ASSERT(red_pp
!= NULL
);
803 * So we now have a page to jam into the redzone...
805 page_io_unlock(red_pp
);
807 hat_memload(kas
.a_hat
, red_va
, red_pp
,
808 (PROT_READ
|PROT_WRITE
), HAT_LOAD_LOCK
);
809 page_downgrade(red_pp
);
812 * The page is left SE_SHARED locked so we can hold on to
813 * the page_t pointer.
815 curthread
->t_red_pp
= red_pp
;
817 atomic_inc_32(&red_nmapped
);
818 while (fp
- (uintptr_t)curthread
->t_stkbase
< red_closest
) {
819 (void) atomic_cas_32(&red_closest
, red_closest
,
820 (uint32_t)(fp
- (uintptr_t)curthread
->t_stkbase
));
825 stkbase
= (caddr_t
)(((uintptr_t)curthread
->t_stkbase
&
826 (uintptr_t)PAGEMASK
) - PAGESIZE
);
828 atomic_inc_32(&red_ndoubles
);
830 if (fp
- (uintptr_t)stkbase
< RED_DEEP_THRESHOLD
) {
832 * Oh boy. We're already deep within the mapped-in
833 * redzone page, and the caller is trying to prepare
834 * for a deep stack run. We're running without a
835 * redzone right now: if the caller plows off the
836 * end of the stack, it'll plow another thread or
837 * LWP structure. That situation could result in
838 * a very hard-to-debug panic, so, in the spirit of
839 * recording the name of one's killer in one's own
840 * blood, we're going to record hrestime and the calling
843 red_deep_hires
= hrestime
.tv_nsec
;
844 red_deep_thread
= curthread
;
848 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
850 ASSERT(fp
- (uintptr_t)stkbase
>= RED_DEEP_THRESHOLD
);
856 segkp_unmap_red(void)
859 caddr_t red_va
= (caddr_t
)(((uintptr_t)curthread
->t_stkbase
&
860 (uintptr_t)PAGEMASK
) - PAGESIZE
);
862 ASSERT(curthread
->t_red_pp
!= NULL
);
865 * Because we locked the mapping down, we can't simply rely
866 * on page_destroy() to clean everything up; we need to call
867 * hat_unload() to explicitly unlock the mapping resources.
869 hat_unload(kas
.a_hat
, red_va
, PAGESIZE
, HAT_UNLOAD_UNLOCK
);
871 pp
= curthread
->t_red_pp
;
873 ASSERT(pp
== page_find(&kvp
.v_object
, (uoff_t
)(uintptr_t)red_va
));
876 * Need to upgrade the SE_SHARED lock to SE_EXCL.
878 if (!page_tryupgrade(pp
)) {
880 * As there is now wait for upgrade, release the
881 * SE_SHARED lock and wait for SE_EXCL.
884 pp
= page_lookup(&(&kvp
)->v_object
,
885 (uoff_t
)(uintptr_t)red_va
, SE_EXCL
);
886 /* pp may be NULL here, hence the test below */
890 * Destroy the page, with dontfree set to zero (i.e. free it).
894 curthread
->t_red_pp
= NULL
;
897 #error Red stacks only supported with downwards stack growth.
901 * Handle a fault on an address corresponding to one of the
902 * resources in the segkp segment.
910 enum fault_type type
,
913 struct segkp_data
*kpd
= NULL
;
916 ASSERT(seg
->s_as
== &kas
&& RW_READ_HELD(&seg
->s_as
->a_lock
));
921 if (type
== F_PROT
) {
922 panic("segkp_fault: unexpected F_PROT fault");
926 if ((kpd
= segkp_find(seg
, vaddr
)) == NULL
)
929 mutex_enter(&kpd
->kp_lock
);
931 if (type
== F_SOFTLOCK
) {
932 ASSERT(!(kpd
->kp_flags
& KPD_LOCKED
));
934 * The F_SOFTLOCK case has more stringent
935 * range requirements: the given range must exactly coincide
936 * with the resource's mapped portion. Note reference to
937 * redzone is handled since vaddr would not equal base
939 if (vaddr
!= stom(kpd
->kp_base
, kpd
->kp_flags
) ||
940 len
!= SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
)) {
941 mutex_exit(&kpd
->kp_lock
);
942 return (FC_MAKE_ERR(EFAULT
));
945 if ((err
= segkp_load(hat
, seg
, vaddr
, len
, kpd
, KPD_LOCKED
))) {
946 mutex_exit(&kpd
->kp_lock
);
947 return (FC_MAKE_ERR(err
));
949 kpd
->kp_flags
|= KPD_LOCKED
;
950 mutex_exit(&kpd
->kp_lock
);
954 if (type
== F_INVAL
) {
955 ASSERT(!(kpd
->kp_flags
& KPD_NO_ANON
));
958 * Check if we touched the redzone. Somewhat optimistic
959 * here if we are touching the redzone of our own stack
960 * since we wouldn't have a stack to get this far...
962 if ((kpd
->kp_flags
& KPD_HASREDZONE
) &&
963 btop((uintptr_t)(vaddr
- kpd
->kp_base
)) == KPD_REDZONE(kpd
))
964 panic("segkp_fault: accessing redzone");
967 * This fault may occur while the page is being F_SOFTLOCK'ed.
968 * Return since a 2nd segkp_load is unnecessary and also would
969 * result in the page being locked twice and eventually
970 * hang the thread_reaper thread.
972 if (kpd
->kp_flags
& KPD_LOCKED
) {
973 mutex_exit(&kpd
->kp_lock
);
977 err
= segkp_load(hat
, seg
, vaddr
, len
, kpd
, kpd
->kp_flags
);
978 mutex_exit(&kpd
->kp_lock
);
979 return (err
? FC_MAKE_ERR(err
) : 0);
982 if (type
== F_SOFTUNLOCK
) {
986 * Make sure the addr is LOCKED and it has anon backing
989 if ((kpd
->kp_flags
& (KPD_LOCKED
|KPD_NO_ANON
)) != KPD_LOCKED
) {
990 panic("segkp_fault: bad unlock");
994 if (vaddr
!= stom(kpd
->kp_base
, kpd
->kp_flags
) ||
995 len
!= SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
)) {
996 panic("segkp_fault: bad range");
1001 flags
= kpd
->kp_flags
| KPD_WRITEDIRTY
;
1003 flags
= kpd
->kp_flags
;
1004 err
= segkp_unlock(hat
, seg
, vaddr
, len
, kpd
, flags
);
1005 kpd
->kp_flags
&= ~KPD_LOCKED
;
1006 mutex_exit(&kpd
->kp_lock
);
1007 return (err
? FC_MAKE_ERR(err
) : 0);
1009 mutex_exit(&kpd
->kp_lock
);
1010 panic("segkp_fault: bogus fault type: %d\n", type
);
1015 * Check that the given protections suffice over the range specified by
1016 * vaddr and len. For this segment type, the only issue is whether or
1017 * not the range lies completely within the mapped part of an allocated
1022 segkp_checkprot(struct seg
*seg
, caddr_t vaddr
, size_t len
, uint_t prot
)
1024 struct segkp_data
*kpd
= NULL
;
1028 if ((kpd
= segkp_find(seg
, vaddr
)) == NULL
)
1031 mutex_enter(&kpd
->kp_lock
);
1032 mbase
= stom(kpd
->kp_base
, kpd
->kp_flags
);
1033 mlen
= SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
);
1034 if (len
> mlen
|| vaddr
< mbase
||
1035 ((vaddr
+ len
) > (mbase
+ mlen
))) {
1036 mutex_exit(&kpd
->kp_lock
);
1039 mutex_exit(&kpd
->kp_lock
);
1045 * Check to see if it makes sense to do kluster/read ahead to
1046 * addr + delta relative to the mapping at addr. We assume here
1047 * that delta is a signed PAGESIZE'd multiple (which can be negative).
1049 * For seg_u we always "approve" of this action from our standpoint.
1053 segkp_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
)
1059 * Load and possibly lock intra-slot resources in the range given by
1068 struct segkp_data
*kpd
,
1076 ASSERT(MUTEX_HELD(&kpd
->kp_lock
));
1078 len
= P2ROUNDUP(len
, PAGESIZE
);
1080 /* If locking, reserve physical memory */
1081 if (flags
& KPD_LOCKED
) {
1082 pgcnt_t pages
= btop(len
);
1083 if ((kpd
->kp_flags
& KPD_NO_ANON
) == 0)
1084 atomic_add_long(&anon_segkp_pages_locked
, pages
);
1085 (void) page_resv(pages
, KM_SLEEP
);
1089 * Loop through the pages in the given range.
1091 va
= (caddr_t
)((uintptr_t)vaddr
& (uintptr_t)PAGEMASK
);
1094 lock
= flags
& KPD_LOCKED
;
1095 i
= ((uintptr_t)(va
- kpd
->kp_base
)) >> PAGESHIFT
;
1096 for (; va
< vlim
; va
+= PAGESIZE
, i
++) {
1097 page_t
*pl
[2]; /* second element NULL terminator */
1104 * Summon the page. If it's not resident, arrange
1105 * for synchronous i/o to pull it in.
1107 ap
= anon_get_ptr(kpd
->kp_anon
, kpd
->kp_anon_idx
+ i
);
1108 swap_xlate(ap
, &vp
, &off
);
1111 * The returned page list will have exactly one entry,
1112 * which is returned to us already kept.
1114 err
= fop_getpage(vp
, (offset_t
)off
, PAGESIZE
, NULL
,
1115 pl
, PAGESIZE
, seg
, va
, S_READ
, kcred
, NULL
);
1119 * Back out of what we've done so far.
1121 (void) segkp_unlock(hat
, seg
, vaddr
,
1122 (va
- vaddr
), kpd
, flags
);
1127 * Load an MMU translation for the page.
1129 hat_memload(hat
, va
, pl
[0], (PROT_READ
|PROT_WRITE
),
1130 lock
? HAT_LOAD_LOCK
: HAT_LOAD
);
1134 * Now, release "shared" lock on the page.
1143 * At the very least unload the mmu-translations and unlock the range if locked
1144 * Can be called with the following flag value KPD_WRITEDIRTY which specifies
1145 * any dirty pages should be written to disk.
1153 struct segkp_data
*kpd
,
1165 ASSERT(MUTEX_HELD(&kpd
->kp_lock
));
1168 * Loop through the pages in the given range. It is assumed
1169 * segkp_unlock is called with page aligned base
1173 i
= ((uintptr_t)(va
- kpd
->kp_base
)) >> PAGESHIFT
;
1174 hat_unload(hat
, va
, len
,
1175 ((flags
& KPD_LOCKED
) ? HAT_UNLOAD_UNLOCK
: HAT_UNLOAD
));
1176 for (; va
< vlim
; va
+= PAGESIZE
, i
++) {
1178 * Find the page associated with this part of the
1179 * slot, tracking it down through its associated swap
1182 ap
= anon_get_ptr(kpd
->kp_anon
, kpd
->kp_anon_idx
+ i
);
1183 swap_xlate(ap
, &vp
, &off
);
1185 if (flags
& KPD_LOCKED
) {
1186 if ((pp
= page_find(&vp
->v_object
, off
)) == NULL
) {
1187 if (flags
& KPD_LOCKED
) {
1188 panic("segkp_softunlock: missing page");
1194 * Nothing to do if the slot is not locked and the
1195 * page doesn't exist.
1197 if ((pp
= page_lookup(&vp
->v_object
, off
, SE_SHARED
)) == NULL
)
1202 * If the page doesn't have any translations, is
1203 * dirty and not being shared, then push it out
1204 * asynchronously and avoid waiting for the
1205 * pageout daemon to do it for us.
1207 * XXX - Do we really need to get the "exclusive"
1208 * lock via an upgrade?
1210 if ((flags
& KPD_WRITEDIRTY
) && !hat_page_is_mapped(pp
) &&
1211 hat_ismod(pp
) && page_tryupgrade(pp
)) {
1213 * Hold the vnode before releasing the page lock to
1214 * prevent it from being freed and re-used by some
1221 * Want most powerful credentials we can get so
1224 (void) fop_putpage(vp
, (offset_t
)off
, PAGESIZE
,
1225 B_ASYNC
| B_FREE
, kcred
, NULL
);
1232 /* If unlocking, release physical memory */
1233 if (flags
& KPD_LOCKED
) {
1234 pgcnt_t pages
= btopr(len
);
1235 if ((kpd
->kp_flags
& KPD_NO_ANON
) == 0)
1236 atomic_add_long(&anon_segkp_pages_locked
, -pages
);
1243 * Insert the kpd in the hash table.
1246 segkp_insert(struct seg
*seg
, struct segkp_data
*kpd
)
1248 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
1252 * Insert the kpd based on the address that will be returned
1253 * via segkp_release.
1255 index
= SEGKP_HASH(stom(kpd
->kp_base
, kpd
->kp_flags
));
1256 mutex_enter(&segkp_lock
);
1257 kpd
->kp_next
= kpsd
->kpsd_hash
[index
];
1258 kpsd
->kpsd_hash
[index
] = kpd
;
1259 mutex_exit(&segkp_lock
);
1263 * Remove kpd from the hash table.
1266 segkp_delete(struct seg
*seg
, struct segkp_data
*kpd
)
1268 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
1269 struct segkp_data
**kpp
;
1272 ASSERT(MUTEX_HELD(&segkp_lock
));
1274 index
= SEGKP_HASH(stom(kpd
->kp_base
, kpd
->kp_flags
));
1275 for (kpp
= &kpsd
->kpsd_hash
[index
];
1276 *kpp
!= NULL
; kpp
= &((*kpp
)->kp_next
)) {
1278 *kpp
= kpd
->kp_next
;
1282 panic("segkp_delete: unable to find element to delete");
1287 * Find the kpd associated with a vaddr.
1289 * Most of the callers of segkp_find will pass the vaddr that
1290 * hashes to the desired index, but there are cases where
1291 * this is not true in which case we have to (potentially) scan
1292 * the whole table looking for it. This should be very rare
1293 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
1294 * middle of the segkp_data region).
1296 static struct segkp_data
*
1297 segkp_find(struct seg
*seg
, caddr_t vaddr
)
1299 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
1300 struct segkp_data
*kpd
;
1304 i
= stop
= SEGKP_HASH(vaddr
);
1305 mutex_enter(&segkp_lock
);
1307 for (kpd
= kpsd
->kpsd_hash
[i
]; kpd
!= NULL
;
1308 kpd
= kpd
->kp_next
) {
1309 if (vaddr
>= kpd
->kp_base
&&
1310 vaddr
< kpd
->kp_base
+ kpd
->kp_len
) {
1311 mutex_exit(&segkp_lock
);
1316 i
= SEGKP_HASHSZ
- 1; /* Wrap */
1317 } while (i
!= stop
);
1318 mutex_exit(&segkp_lock
);
1319 return (NULL
); /* Not found */
1323 * Dump out all the active segkp pages
1326 segkp_dump(struct seg
*seg
)
1329 struct segkp_data
*kpd
;
1330 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
1332 for (i
= 0; i
< SEGKP_HASHSZ
; i
++) {
1333 for (kpd
= kpsd
->kpsd_hash
[i
];
1334 kpd
!= NULL
; kpd
= kpd
->kp_next
) {
1339 addr
= kpd
->kp_base
;
1340 eaddr
= addr
+ kpd
->kp_len
;
1341 while (addr
< eaddr
) {
1342 ASSERT(seg
->s_as
== &kas
);
1343 pfn
= hat_getpfnum(seg
->s_as
->a_hat
, addr
);
1344 if (pfn
!= PFN_INVALID
)
1345 dump_addpage(seg
->s_as
, addr
, pfn
);
1347 dump_timeleft
= dump_timeout
;
1355 segkp_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
1356 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
)
1361 #include <sys/mem_config.h>
1365 segkp_mem_config_post_add(void *arg
, pgcnt_t delta_pages
)
1369 * During memory delete, turn off caches so that pages are not held.
1370 * A better solution may be to unlock the pages while they are
1371 * in the cache so that they may be collected naturally.
1376 segkp_mem_config_pre_del(void *arg
, pgcnt_t delta_pages
)
1378 atomic_inc_32(&segkp_indel
);
1385 segkp_mem_config_post_del(void *arg
, pgcnt_t delta_pages
, int cancelled
)
1387 atomic_dec_32(&segkp_indel
);
1390 static kphysm_setup_vector_t segkp_mem_config_vec
= {
1391 KPHYSM_SETUP_VECTOR_VERSION
,
1392 segkp_mem_config_post_add
,
1393 segkp_mem_config_pre_del
,
1394 segkp_mem_config_post_del
,
1398 segkpinit_mem_config(struct seg
*seg
)
1402 ret
= kphysm_setup_func_register(&segkp_mem_config_vec
, (void *)seg
);