4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
30 * Portions of this source code were derived from Berkeley 4.3 BSD
31 * under license from the Regents of the University of California.
34 #pragma ident "%Z%%M% %I% %E% SMI"
37 * segkp is a segment driver that administers the allocation and deallocation
38 * of pageable variable size chunks of kernel virtual address space. Each
39 * allocated resource is page-aligned.
41 * The user may specify whether the resource should be initialized to 0,
42 * include a redzone, or locked in memory.
45 #include <sys/types.h>
46 #include <sys/t_lock.h>
47 #include <sys/thread.h>
48 #include <sys/param.h>
49 #include <sys/errno.h>
50 #include <sys/sysmacros.h>
51 #include <sys/systm.h>
54 #include <sys/vnode.h>
55 #include <sys/cmn_err.h>
57 #include <sys/tuneable.h>
61 #include <sys/dumphdr.h>
62 #include <sys/debug.h>
63 #include <sys/vtrace.h>
64 #include <sys/stack.h>
65 #include <sys/atomic.h>
66 #include <sys/archsystm.h>
71 #include <vm/seg_kp.h>
72 #include <vm/seg_kmem.h>
76 #include <sys/bitmap.h>
79 * Private seg op routines
81 static void segkp_badop(void);
82 static void segkp_dump(struct seg
*seg
);
83 static int segkp_checkprot(struct seg
*seg
, caddr_t addr
, size_t len
,
85 static int segkp_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
);
86 static int segkp_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
87 struct page
***page
, enum lock_type type
,
89 static void segkp_insert(struct seg
*seg
, struct segkp_data
*kpd
);
90 static void segkp_delete(struct seg
*seg
, struct segkp_data
*kpd
);
91 static caddr_t
segkp_get_internal(struct seg
*seg
, size_t len
, uint_t flags
,
92 struct segkp_data
**tkpd
, struct anon_map
*amp
);
93 static void segkp_release_internal(struct seg
*seg
,
94 struct segkp_data
*kpd
, size_t len
);
95 static int segkp_unlock(struct hat
*hat
, struct seg
*seg
, caddr_t vaddr
,
96 size_t len
, struct segkp_data
*kpd
, uint_t flags
);
97 static int segkp_load(struct hat
*hat
, struct seg
*seg
, caddr_t vaddr
,
98 size_t len
, struct segkp_data
*kpd
, uint_t flags
);
99 static struct segkp_data
*segkp_find(struct seg
*seg
, caddr_t vaddr
);
100 static int segkp_getmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
);
101 static lgrp_mem_policy_info_t
*segkp_getpolicy(struct seg
*seg
,
103 static int segkp_capable(struct seg
*seg
, segcapability_t capability
);
106 * Lock used to protect the hash table(s) and caches.
108 static kmutex_t segkp_lock
;
113 static struct segkp_cache segkp_cache
[SEGKP_MAX_CACHE
];
115 #define SEGKP_BADOP(t) (t(*)())segkp_badop
118 * When there are fewer than red_minavail bytes left on the stack,
119 * segkp_map_red() will map in the redzone (if called). 5000 seems
120 * to work reasonably well...
122 long red_minavail
= 5000;
125 * will be set to 1 for 32 bit x86 systems only, in startup.c
127 int segkp_fromheap
= 0;
128 ulong_t
*segkp_bitmap
;
131 * If segkp_map_red() is called with the redzone already mapped and
132 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
133 * then the stack situation has become quite serious; if much more stack
134 * is consumed, we have the potential of scrogging the next thread/LWP
135 * structure. To help debug the "can't happen" panics which may
136 * result from this condition, we record lbolt and the calling thread
137 * in red_deep_lbolt and red_deep_thread respectively.
139 #define RED_DEEP_THRESHOLD 2000
141 clock_t red_deep_lbolt
;
142 kthread_t
*red_deep_thread
;
144 uint32_t red_nmapped
;
145 uint32_t red_closest
= UINT_MAX
;
146 uint32_t red_ndoubles
;
148 pgcnt_t anon_segkp_pages_locked
; /* See vm/anon.h */
149 pgcnt_t anon_segkp_pages_resv
; /* anon reserved by seg_kp */
151 static struct seg_ops segkp_ops
= {
152 SEGKP_BADOP(int), /* dup */
153 SEGKP_BADOP(int), /* unmap */
154 SEGKP_BADOP(void), /* free */
156 SEGKP_BADOP(faultcode_t
), /* faulta */
157 SEGKP_BADOP(int), /* setprot */
160 SEGKP_BADOP(size_t), /* swapout */
161 SEGKP_BADOP(int), /* sync */
162 SEGKP_BADOP(size_t), /* incore */
163 SEGKP_BADOP(int), /* lockop */
164 SEGKP_BADOP(int), /* getprot */
165 SEGKP_BADOP(u_offset_t
), /* getoffset */
166 SEGKP_BADOP(int), /* gettype */
167 SEGKP_BADOP(int), /* getvp */
168 SEGKP_BADOP(int), /* advise */
169 segkp_dump
, /* dump */
170 segkp_pagelock
, /* pagelock */
171 SEGKP_BADOP(int), /* setpgsz */
172 segkp_getmemid
, /* getmemid */
173 segkp_getpolicy
, /* getpolicy */
174 segkp_capable
, /* capable */
181 panic("segkp_badop");
185 static void segkpinit_mem_config(struct seg
*);
187 static uint32_t segkp_indel
;
190 * Allocate the segment specific private data struct and fill it in
191 * with the per kp segment mutex, anon ptr. array and hash table.
194 segkp_create(struct seg
*seg
)
196 struct segkp_segdata
*kpsd
;
199 ASSERT(seg
!= NULL
&& seg
->s_as
== &kas
);
200 ASSERT(RW_WRITE_HELD(&seg
->s_as
->a_lock
));
202 if (seg
->s_size
& PAGEOFFSET
) {
203 panic("Bad segkp size");
207 kpsd
= kmem_zalloc(sizeof (struct segkp_segdata
), KM_SLEEP
);
210 * Allocate the virtual memory for segkp and initialize it
212 if (segkp_fromheap
) {
213 np
= btop(kvseg
.s_size
);
214 segkp_bitmap
= kmem_zalloc(BT_SIZEOFMAP(np
), KM_SLEEP
);
215 kpsd
->kpsd_arena
= vmem_create("segkp", NULL
, 0, PAGESIZE
,
216 vmem_alloc
, vmem_free
, heap_arena
, 5 * PAGESIZE
, VM_SLEEP
);
219 np
= btop(seg
->s_size
);
220 kpsd
->kpsd_arena
= vmem_create("segkp", seg
->s_base
,
221 seg
->s_size
, PAGESIZE
, NULL
, NULL
, NULL
, 5 * PAGESIZE
,
225 kpsd
->kpsd_anon
= anon_create(np
, ANON_SLEEP
| ANON_ALLOC_FORCE
);
227 kpsd
->kpsd_hash
= kmem_zalloc(SEGKP_HASHSZ
* sizeof (struct segkp
*),
229 seg
->s_data
= (void *)kpsd
;
230 seg
->s_ops
= &segkp_ops
;
231 segkpinit_mem_config(seg
);
237 * Find a free 'freelist' and initialize it with the appropriate attributes
240 segkp_cache_init(struct seg
*seg
, int maxsize
, size_t len
, uint_t flags
)
244 if ((flags
& KPD_NO_ANON
) && !(flags
& KPD_LOCKED
))
247 mutex_enter(&segkp_lock
);
248 for (i
= 0; i
< SEGKP_MAX_CACHE
; i
++) {
249 if (segkp_cache
[i
].kpf_inuse
)
251 segkp_cache
[i
].kpf_inuse
= 1;
252 segkp_cache
[i
].kpf_max
= maxsize
;
253 segkp_cache
[i
].kpf_flags
= flags
;
254 segkp_cache
[i
].kpf_seg
= seg
;
255 segkp_cache
[i
].kpf_len
= len
;
256 mutex_exit(&segkp_lock
);
257 return ((void *)(uintptr_t)i
);
259 mutex_exit(&segkp_lock
);
264 * Free all the cache resources.
267 segkp_cache_free(void)
269 struct segkp_data
*kpd
;
273 mutex_enter(&segkp_lock
);
274 for (i
= 0; i
< SEGKP_MAX_CACHE
; i
++) {
275 if (!segkp_cache
[i
].kpf_inuse
)
278 * Disconnect the freelist and process each element
280 kpd
= segkp_cache
[i
].kpf_list
;
281 seg
= segkp_cache
[i
].kpf_seg
;
282 segkp_cache
[i
].kpf_list
= NULL
;
283 segkp_cache
[i
].kpf_count
= 0;
284 mutex_exit(&segkp_lock
);
286 while (kpd
!= NULL
) {
287 struct segkp_data
*next
;
290 segkp_release_internal(seg
, kpd
, kpd
->kp_len
);
293 mutex_enter(&segkp_lock
);
295 mutex_exit(&segkp_lock
);
299 * There are 2 entries into segkp_get_internal. The first includes a cookie
300 * used to access a pool of cached segkp resources. The second does not
304 segkp_get(struct seg
*seg
, size_t len
, uint_t flags
)
306 struct segkp_data
*kpd
= NULL
;
308 if (segkp_get_internal(seg
, len
, flags
, &kpd
, NULL
) != NULL
) {
310 return (stom(kpd
->kp_base
, flags
));
316 * Return a 'cached' segkp address
319 segkp_cache_get(void *cookie
)
321 struct segkp_cache
*freelist
= NULL
;
322 struct segkp_data
*kpd
= NULL
;
323 int index
= (int)(uintptr_t)cookie
;
328 if (index
< 0 || index
>= SEGKP_MAX_CACHE
)
330 freelist
= &segkp_cache
[index
];
332 mutex_enter(&segkp_lock
);
333 seg
= freelist
->kpf_seg
;
334 flags
= freelist
->kpf_flags
;
335 if (freelist
->kpf_list
!= NULL
) {
336 kpd
= freelist
->kpf_list
;
337 freelist
->kpf_list
= kpd
->kp_next
;
338 freelist
->kpf_count
--;
339 mutex_exit(&segkp_lock
);
341 segkp_insert(seg
, kpd
);
342 return (stom(kpd
->kp_base
, flags
));
344 len
= freelist
->kpf_len
;
345 mutex_exit(&segkp_lock
);
346 if (segkp_get_internal(seg
, len
, flags
, &kpd
, NULL
) != NULL
) {
347 kpd
->kp_cookie
= index
;
348 return (stom(kpd
->kp_base
, flags
));
354 segkp_get_withanonmap(
358 struct anon_map
*amp
)
360 struct segkp_data
*kpd
= NULL
;
364 if (segkp_get_internal(seg
, len
, flags
, &kpd
, amp
) != NULL
) {
366 return (stom(kpd
->kp_base
, flags
));
372 * This does the real work of segkp allocation.
373 * Return to client base addr. len must be page-aligned. A null value is
374 * returned if there are no more vm resources (e.g. pages, swap). The len
375 * and base recorded in the private data structure include the redzone
376 * and the redzone length (if applicable). If the user requests a redzone
377 * either the first or last page is left unmapped depending whether stacks
378 * grow to low or high memory.
380 * The client may also specify a no-wait flag. If that is set then the
381 * request will choose a non-blocking path when requesting resources.
382 * The default is make the client wait.
389 struct segkp_data
**tkpd
,
390 struct anon_map
*amp
)
392 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
393 struct segkp_data
*kpd
;
394 caddr_t vbase
= NULL
; /* always first virtual, may not be mapped */
395 pgcnt_t np
= 0; /* number of pages in the resource */
400 ulong_t anon_idx
= 0;
401 int kmflag
= (flags
& KPD_NOWAIT
) ? KM_NOSLEEP
: KM_SLEEP
;
402 caddr_t s_base
= (segkp_fromheap
) ? kvseg
.s_base
: seg
->s_base
;
404 if (len
& PAGEOFFSET
) {
405 panic("segkp_get: len is not page-aligned");
409 ASSERT(((flags
& KPD_HASAMP
) == 0) == (amp
== NULL
));
411 /* Only allow KPD_NO_ANON if we are going to lock it down */
412 if ((flags
& (KPD_LOCKED
|KPD_NO_ANON
)) == KPD_NO_ANON
)
415 if ((kpd
= kmem_zalloc(sizeof (struct segkp_data
), kmflag
)) == NULL
)
418 * Fix up the len to reflect the REDZONE if applicable
420 if (flags
& KPD_HASREDZONE
)
424 vbase
= vmem_alloc(SEGKP_VMEM(seg
), len
, kmflag
| VM_BESTFIT
);
426 kmem_free(kpd
, sizeof (struct segkp_data
));
430 /* If locking, reserve physical memory */
431 if (flags
& KPD_LOCKED
) {
432 pages
= btop(SEGKP_MAPLEN(len
, flags
));
433 if (page_resv(pages
, kmflag
) == 0) {
434 vmem_free(SEGKP_VMEM(seg
), vbase
, len
);
435 kmem_free(kpd
, sizeof (struct segkp_data
));
438 if ((flags
& KPD_NO_ANON
) == 0)
439 atomic_add_long(&anon_segkp_pages_locked
, pages
);
443 * Reserve sufficient swap space for this vm resource. We'll
444 * actually allocate it in the loop below, but reserving it
445 * here allows us to back out more gracefully than if we
446 * had an allocation failure in the body of the loop.
448 * Note that we don't need swap space for the red zone page.
452 * The swap reservation has been done, if required, and the
453 * anon_hdr is separate.
456 kpd
->kp_anon_idx
= anon_idx
;
457 kpd
->kp_anon
= amp
->ahp
;
459 TRACE_5(TR_FAC_VM
, TR_ANON_SEGKP
, "anon segkp:%p %p %lu %u %u",
460 kpd
, vbase
, len
, flags
, 1);
462 } else if ((flags
& KPD_NO_ANON
) == 0) {
463 if (anon_resv_zone(SEGKP_MAPLEN(len
, flags
), NULL
) == 0) {
464 if (flags
& KPD_LOCKED
) {
465 atomic_add_long(&anon_segkp_pages_locked
,
469 vmem_free(SEGKP_VMEM(seg
), vbase
, len
);
470 kmem_free(kpd
, sizeof (struct segkp_data
));
473 atomic_add_long(&anon_segkp_pages_resv
,
474 btop(SEGKP_MAPLEN(len
, flags
)));
475 anon_idx
= ((uintptr_t)(vbase
- s_base
)) >> PAGESHIFT
;
476 kpd
->kp_anon_idx
= anon_idx
;
477 kpd
->kp_anon
= kpsd
->kpsd_anon
;
479 TRACE_5(TR_FAC_VM
, TR_ANON_SEGKP
, "anon segkp:%p %p %lu %u %u",
480 kpd
, vbase
, len
, flags
, 1);
483 kpd
->kp_anon_idx
= 0;
487 * Allocate page and anon resources for the virtual address range
491 segkpindex
= btop((uintptr_t)(vbase
- kvseg
.s_base
));
492 for (i
= 0, va
= vbase
; i
< np
; i
++, va
+= PAGESIZE
) {
500 * Mark this page to be a segkp page in the bitmap.
502 if (segkp_fromheap
) {
503 BT_ATOMIC_SET(segkp_bitmap
, segkpindex
);
508 * If this page is the red zone page, we don't need swap
509 * space for it. Note that we skip over the code that
510 * establishes MMU mappings, so that the page remains
513 if ((flags
& KPD_HASREDZONE
) && KPD_REDZONE(kpd
) == i
)
516 if (kpd
->kp_anon
!= NULL
) {
519 ASSERT(anon_get_ptr(kpd
->kp_anon
, anon_idx
+ i
)
522 * Determine the "vp" and "off" of the anon slot.
524 ap
= anon_alloc(NULL
, 0);
526 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
527 (void) anon_set_ptr(kpd
->kp_anon
, anon_idx
+ i
,
530 ANON_LOCK_EXIT(&
->a_rwlock
);
531 swap_xlate(ap
, &vp
, &off
);
534 * Create a page with the specified identity. The
535 * page is returned with the "shared" lock held.
537 err
= VOP_GETPAGE(vp
, (offset_t
)off
, PAGESIZE
,
538 NULL
, pl
, PAGESIZE
, seg
, va
, S_CREATE
,
542 * XXX - This should not fail.
544 panic("segkp_get: no pages");
549 ASSERT(page_exists(&kvp
,
550 (u_offset_t
)(uintptr_t)va
) == NULL
);
552 if ((pp
= page_create_va(&kvp
,
553 (u_offset_t
)(uintptr_t)va
, PAGESIZE
,
554 (flags
& KPD_NOWAIT
? 0 : PG_WAIT
) | PG_EXCL
|
555 PG_NORELOC
, seg
, va
)) == NULL
) {
557 * Legitimize resource; then destroy it.
558 * Easier than trying to unwind here.
560 kpd
->kp_flags
= flags
;
561 kpd
->kp_base
= vbase
;
563 segkp_release_internal(seg
, kpd
, va
- vbase
);
569 if (flags
& KPD_ZERO
)
570 pagezero(pp
, 0, PAGESIZE
);
573 * Load and lock an MMU translation for the page.
575 hat_memload(seg
->s_as
->a_hat
, va
, pp
, (PROT_READ
|PROT_WRITE
),
576 ((flags
& KPD_LOCKED
) ? HAT_LOAD_LOCK
: HAT_LOAD
));
579 * Now, release lock on the page.
581 if (flags
& KPD_LOCKED
)
587 kpd
->kp_flags
= flags
;
588 kpd
->kp_base
= vbase
;
590 segkp_insert(seg
, kpd
);
592 return (stom(kpd
->kp_base
, flags
));
596 * Release the resource to cache if the pool(designate by the cookie)
597 * has less than the maximum allowable. If inserted in cache,
598 * segkp_delete insures element is taken off of active list.
601 segkp_release(struct seg
*seg
, caddr_t vaddr
)
603 struct segkp_cache
*freelist
;
604 struct segkp_data
*kpd
= NULL
;
606 if ((kpd
= segkp_find(seg
, vaddr
)) == NULL
) {
607 panic("segkp_release: null kpd");
611 if (kpd
->kp_cookie
!= -1) {
612 freelist
= &segkp_cache
[kpd
->kp_cookie
];
613 mutex_enter(&segkp_lock
);
614 if (!segkp_indel
&& freelist
->kpf_count
< freelist
->kpf_max
) {
615 segkp_delete(seg
, kpd
);
616 kpd
->kp_next
= freelist
->kpf_list
;
617 freelist
->kpf_list
= kpd
;
618 freelist
->kpf_count
++;
619 mutex_exit(&segkp_lock
);
622 mutex_exit(&segkp_lock
);
626 segkp_release_internal(seg
, kpd
, kpd
->kp_len
);
630 * Free the entire resource. segkp_unlock gets called with the start of the
631 * mapped portion of the resource. The length is the size of the mapped
635 segkp_release_internal(struct seg
*seg
, struct segkp_data
*kpd
, size_t len
)
648 ASSERT((kpd
->kp_flags
& KPD_HASAMP
) == 0 || kpd
->kp_cookie
== -1);
651 /* Remove from active hash list */
652 if (kpd
->kp_cookie
== -1) {
653 mutex_enter(&segkp_lock
);
654 segkp_delete(seg
, kpd
);
655 mutex_exit(&segkp_lock
);
659 * Precompute redzone page index.
662 if (kpd
->kp_flags
& KPD_HASREDZONE
)
663 redzone
= KPD_REDZONE(kpd
);
668 hat_unload(seg
->s_as
->a_hat
, va
, (np
<< PAGESHIFT
),
669 ((kpd
->kp_flags
& KPD_LOCKED
) ? HAT_UNLOAD_UNLOCK
: HAT_UNLOAD
));
671 * Free up those anon resources that are quiescent.
674 segkpindex
= btop((uintptr_t)(va
- kvseg
.s_base
));
675 for (i
= 0; i
< np
; i
++, va
+= PAGESIZE
) {
678 * Clear the bit for this page from the bitmap.
680 if (segkp_fromheap
) {
681 BT_ATOMIC_CLEAR(segkp_bitmap
, segkpindex
);
689 * Free up anon resources and destroy the
692 * Release the lock if there is one. Have to get the
693 * page to do this, unfortunately.
695 if (kpd
->kp_flags
& KPD_LOCKED
) {
696 ap
= anon_get_ptr(kpd
->kp_anon
,
697 kpd
->kp_anon_idx
+ i
);
698 swap_xlate(ap
, &vp
, &off
);
699 /* Find the shared-locked page. */
700 pp
= page_find(vp
, (u_offset_t
)off
);
702 panic("segkp_release: "
703 "kp_anon: no page to unlock ");
708 if ((kpd
->kp_flags
& KPD_HASAMP
) == 0) {
709 anon_free(kpd
->kp_anon
, kpd
->kp_anon_idx
+ i
,
711 anon_unresv_zone(PAGESIZE
, NULL
);
712 atomic_add_long(&anon_segkp_pages_resv
,
716 TR_ANON_SEGKP
, "anon segkp:%p %p %lu %u %u",
717 kpd
, va
, PAGESIZE
, 0, 0);
719 if (kpd
->kp_flags
& KPD_LOCKED
) {
720 pp
= page_find(&kvp
, (u_offset_t
)(uintptr_t)va
);
722 panic("segkp_release: "
723 "no page to unlock");
727 * We should just upgrade the lock here
728 * but there is no upgrade that waits.
732 pp
= page_lookup(&kvp
, (u_offset_t
)(uintptr_t)va
,
739 /* If locked, release physical memory reservation */
740 if (kpd
->kp_flags
& KPD_LOCKED
) {
741 pgcnt_t pages
= btop(SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
));
742 if ((kpd
->kp_flags
& KPD_NO_ANON
) == 0)
743 atomic_add_long(&anon_segkp_pages_locked
, -pages
);
747 vmem_free(SEGKP_VMEM(seg
), kpd
->kp_base
, kpd
->kp_len
);
748 kmem_free(kpd
, sizeof (struct segkp_data
));
752 * segkp_map_red() will check the current frame pointer against the
753 * stack base. If the amount of stack remaining is questionable
754 * (less than red_minavail), then segkp_map_red() will map in the redzone
755 * and return 1. Otherwise, it will return 0. segkp_map_red() can
756 * _only_ be called when:
758 * - it is safe to sleep on page_create_va().
759 * - the caller is non-swappable.
761 * It is up to the caller to remember whether segkp_map_red() successfully
762 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
763 * time. Note that the caller must _remain_ non-swappable until after
764 * calling segkp_unmap_red().
766 * Currently, this routine is only called from pagefault() (which necessarily
767 * satisfies the above conditions).
769 #if defined(STACK_GROWTH_DOWN)
773 uintptr_t fp
= STACK_BIAS
+ (uintptr_t)getfp();
778 ASSERT(curthread
->t_schedflag
& TS_DONT_SWAP
);
781 * Optimize for the common case where we simply return.
783 if ((curthread
->t_red_pp
== NULL
) &&
784 (fp
- (uintptr_t)curthread
->t_stkbase
>= red_minavail
))
789 * XXX We probably need something better than this.
791 panic("kernel stack overflow");
794 if (curthread
->t_red_pp
== NULL
) {
798 caddr_t red_va
= (caddr_t
)
799 (((uintptr_t)curthread
->t_stkbase
& (uintptr_t)PAGEMASK
) -
802 ASSERT(page_exists(&kvp
, (u_offset_t
)(uintptr_t)red_va
) ==
806 * Allocate the physical for the red page.
809 * No PG_NORELOC here to avoid waits. Unlikely to get
810 * a relocate happening in the short time the page exists
811 * and it will be OK anyway.
815 red_pp
= page_create_va(&kvp
, (u_offset_t
)(uintptr_t)red_va
,
816 PAGESIZE
, PG_WAIT
| PG_EXCL
, &kseg
, red_va
);
817 ASSERT(red_pp
!= NULL
);
820 * So we now have a page to jam into the redzone...
822 page_io_unlock(red_pp
);
824 hat_memload(kas
.a_hat
, red_va
, red_pp
,
825 (PROT_READ
|PROT_WRITE
), HAT_LOAD_LOCK
);
826 page_downgrade(red_pp
);
829 * The page is left SE_SHARED locked so we can hold on to
830 * the page_t pointer.
832 curthread
->t_red_pp
= red_pp
;
834 atomic_add_32(&red_nmapped
, 1);
835 while (fp
- (uintptr_t)curthread
->t_stkbase
< red_closest
) {
836 (void) cas32(&red_closest
, red_closest
,
837 (uint32_t)(fp
- (uintptr_t)curthread
->t_stkbase
));
842 stkbase
= (caddr_t
)(((uintptr_t)curthread
->t_stkbase
&
843 (uintptr_t)PAGEMASK
) - PAGESIZE
);
845 atomic_add_32(&red_ndoubles
, 1);
847 if (fp
- (uintptr_t)stkbase
< RED_DEEP_THRESHOLD
) {
849 * Oh boy. We're already deep within the mapped-in
850 * redzone page, and the caller is trying to prepare
851 * for a deep stack run. We're running without a
852 * redzone right now: if the caller plows off the
853 * end of the stack, it'll plow another thread or
854 * LWP structure. That situation could result in
855 * a very hard-to-debug panic, so, in the spirit of
856 * recording the name of one's killer in one's own
857 * blood, we're going to record lbolt and the calling
860 red_deep_lbolt
= lbolt
;
861 red_deep_thread
= curthread
;
865 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
867 ASSERT(fp
- (uintptr_t)stkbase
>= RED_DEEP_THRESHOLD
);
873 segkp_unmap_red(void)
876 caddr_t red_va
= (caddr_t
)(((uintptr_t)curthread
->t_stkbase
&
877 (uintptr_t)PAGEMASK
) - PAGESIZE
);
879 ASSERT(curthread
->t_red_pp
!= NULL
);
880 ASSERT(curthread
->t_schedflag
& TS_DONT_SWAP
);
883 * Because we locked the mapping down, we can't simply rely
884 * on page_destroy() to clean everything up; we need to call
885 * hat_unload() to explicitly unlock the mapping resources.
887 hat_unload(kas
.a_hat
, red_va
, PAGESIZE
, HAT_UNLOAD_UNLOCK
);
889 pp
= curthread
->t_red_pp
;
891 ASSERT(pp
== page_find(&kvp
, (u_offset_t
)(uintptr_t)red_va
));
894 * Need to upgrade the SE_SHARED lock to SE_EXCL.
896 if (!page_tryupgrade(pp
)) {
898 * As there is now wait for upgrade, release the
899 * SE_SHARED lock and wait for SE_EXCL.
902 pp
= page_lookup(&kvp
, (u_offset_t
)(uintptr_t)red_va
, SE_EXCL
);
903 /* pp may be NULL here, hence the test below */
907 * Destroy the page, with dontfree set to zero (i.e. free it).
911 curthread
->t_red_pp
= NULL
;
914 #error Red stacks only supported with downwards stack growth.
918 * Handle a fault on an address corresponding to one of the
919 * resources in the segkp segment.
927 enum fault_type type
,
930 struct segkp_data
*kpd
= NULL
;
933 ASSERT(seg
->s_as
== &kas
&& RW_READ_HELD(&seg
->s_as
->a_lock
));
938 if (type
== F_PROT
) {
939 panic("segkp_fault: unexpected F_PROT fault");
943 if ((kpd
= segkp_find(seg
, vaddr
)) == NULL
)
946 mutex_enter(&kpd
->kp_lock
);
948 if (type
== F_SOFTLOCK
) {
949 ASSERT(!(kpd
->kp_flags
& KPD_LOCKED
));
951 * The F_SOFTLOCK case has more stringent
952 * range requirements: the given range must exactly coincide
953 * with the resource's mapped portion. Note reference to
954 * redzone is handled since vaddr would not equal base
956 if (vaddr
!= stom(kpd
->kp_base
, kpd
->kp_flags
) ||
957 len
!= SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
)) {
958 mutex_exit(&kpd
->kp_lock
);
959 return (FC_MAKE_ERR(EFAULT
));
962 if ((err
= segkp_load(hat
, seg
, vaddr
, len
, kpd
, KPD_LOCKED
))) {
963 mutex_exit(&kpd
->kp_lock
);
964 return (FC_MAKE_ERR(err
));
966 kpd
->kp_flags
|= KPD_LOCKED
;
967 mutex_exit(&kpd
->kp_lock
);
971 if (type
== F_INVAL
) {
972 ASSERT(!(kpd
->kp_flags
& KPD_NO_ANON
));
975 * Check if we touched the redzone. Somewhat optimistic
976 * here if we are touching the redzone of our own stack
977 * since we wouldn't have a stack to get this far...
979 if ((kpd
->kp_flags
& KPD_HASREDZONE
) &&
980 btop((uintptr_t)(vaddr
- kpd
->kp_base
)) == KPD_REDZONE(kpd
))
981 panic("segkp_fault: accessing redzone");
984 * This fault may occur while the page is being F_SOFTLOCK'ed.
985 * Return since a 2nd segkp_load is unnecessary and also would
986 * result in the page being locked twice and eventually
987 * hang the thread_reaper thread.
989 if (kpd
->kp_flags
& KPD_LOCKED
) {
990 mutex_exit(&kpd
->kp_lock
);
994 err
= segkp_load(hat
, seg
, vaddr
, len
, kpd
, kpd
->kp_flags
);
995 mutex_exit(&kpd
->kp_lock
);
996 return (err
? FC_MAKE_ERR(err
) : 0);
999 if (type
== F_SOFTUNLOCK
) {
1003 * Make sure the addr is LOCKED and it has anon backing
1006 if ((kpd
->kp_flags
& (KPD_LOCKED
|KPD_NO_ANON
)) == KPD_NO_ANON
) {
1007 panic("segkp_fault: bad unlock");
1011 if (vaddr
!= stom(kpd
->kp_base
, kpd
->kp_flags
) ||
1012 len
!= SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
)) {
1013 panic("segkp_fault: bad range");
1018 flags
= kpd
->kp_flags
| KPD_WRITEDIRTY
;
1020 flags
= kpd
->kp_flags
;
1021 err
= segkp_unlock(hat
, seg
, vaddr
, len
, kpd
, flags
);
1022 kpd
->kp_flags
&= ~KPD_LOCKED
;
1023 mutex_exit(&kpd
->kp_lock
);
1024 return (err
? FC_MAKE_ERR(err
) : 0);
1026 mutex_exit(&kpd
->kp_lock
);
1027 panic("segkp_fault: bogus fault type: %d\n", type
);
1032 * Check that the given protections suffice over the range specified by
1033 * vaddr and len. For this segment type, the only issue is whether or
1034 * not the range lies completely within the mapped part of an allocated
1039 segkp_checkprot(struct seg
*seg
, caddr_t vaddr
, size_t len
, uint_t prot
)
1041 struct segkp_data
*kpd
= NULL
;
1045 if ((kpd
= segkp_find(seg
, vaddr
)) == NULL
)
1048 mutex_enter(&kpd
->kp_lock
);
1049 mbase
= stom(kpd
->kp_base
, kpd
->kp_flags
);
1050 mlen
= SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
);
1051 if (len
> mlen
|| vaddr
< mbase
||
1052 ((vaddr
+ len
) > (mbase
+ mlen
))) {
1053 mutex_exit(&kpd
->kp_lock
);
1056 mutex_exit(&kpd
->kp_lock
);
1062 * Check to see if it makes sense to do kluster/read ahead to
1063 * addr + delta relative to the mapping at addr. We assume here
1064 * that delta is a signed PAGESIZE'd multiple (which can be negative).
1066 * For seg_u we always "approve" of this action from our standpoint.
1070 segkp_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
)
1076 * Load and possibly lock intra-slot resources in the range given by
1085 struct segkp_data
*kpd
,
1093 ASSERT(MUTEX_HELD(&kpd
->kp_lock
));
1095 len
= P2ROUNDUP(len
, PAGESIZE
);
1097 /* If locking, reserve physical memory */
1098 if (flags
& KPD_LOCKED
) {
1099 pgcnt_t pages
= btop(len
);
1100 if ((kpd
->kp_flags
& KPD_NO_ANON
) == 0)
1101 atomic_add_long(&anon_segkp_pages_locked
, pages
);
1102 (void) page_resv(pages
, KM_SLEEP
);
1106 * Loop through the pages in the given range.
1108 va
= (caddr_t
)((uintptr_t)vaddr
& (uintptr_t)PAGEMASK
);
1111 lock
= flags
& KPD_LOCKED
;
1112 i
= ((uintptr_t)(va
- kpd
->kp_base
)) >> PAGESHIFT
;
1113 for (; va
< vlim
; va
+= PAGESIZE
, i
++) {
1114 page_t
*pl
[2]; /* second element NULL terminator */
1121 * Summon the page. If it's not resident, arrange
1122 * for synchronous i/o to pull it in.
1124 ap
= anon_get_ptr(kpd
->kp_anon
, kpd
->kp_anon_idx
+ i
);
1125 swap_xlate(ap
, &vp
, &off
);
1128 * The returned page list will have exactly one entry,
1129 * which is returned to us already kept.
1131 err
= VOP_GETPAGE(vp
, (offset_t
)off
, PAGESIZE
, NULL
,
1132 pl
, PAGESIZE
, seg
, va
, S_READ
, kcred
, NULL
);
1136 * Back out of what we've done so far.
1138 (void) segkp_unlock(hat
, seg
, vaddr
,
1139 (va
- vaddr
), kpd
, flags
);
1144 * Load an MMU translation for the page.
1146 hat_memload(hat
, va
, pl
[0], (PROT_READ
|PROT_WRITE
),
1147 lock
? HAT_LOAD_LOCK
: HAT_LOAD
);
1151 * Now, release "shared" lock on the page.
1160 * At the very least unload the mmu-translations and unlock the range if locked
1161 * Can be called with the following flag value KPD_WRITEDIRTY which specifies
1162 * any dirty pages should be written to disk.
1170 struct segkp_data
*kpd
,
1185 ASSERT(MUTEX_HELD(&kpd
->kp_lock
));
1188 * Loop through the pages in the given range. It is assumed
1189 * segkp_unlock is called with page aligned base
1193 i
= ((uintptr_t)(va
- kpd
->kp_base
)) >> PAGESHIFT
;
1194 hat_unload(hat
, va
, len
,
1195 ((flags
& KPD_LOCKED
) ? HAT_UNLOAD_UNLOCK
: HAT_UNLOAD
));
1196 for (; va
< vlim
; va
+= PAGESIZE
, i
++) {
1198 * Find the page associated with this part of the
1199 * slot, tracking it down through its associated swap
1202 ap
= anon_get_ptr(kpd
->kp_anon
, kpd
->kp_anon_idx
+ i
);
1203 swap_xlate(ap
, &vp
, &off
);
1205 if (flags
& KPD_LOCKED
) {
1206 if ((pp
= page_find(vp
, off
)) == NULL
) {
1207 if (flags
& KPD_LOCKED
) {
1208 panic("segkp_softunlock: missing page");
1214 * Nothing to do if the slot is not locked and the
1215 * page doesn't exist.
1217 if ((pp
= page_lookup(vp
, off
, SE_SHARED
)) == NULL
)
1222 * If the page doesn't have any translations, is
1223 * dirty and not being shared, then push it out
1224 * asynchronously and avoid waiting for the
1225 * pageout daemon to do it for us.
1227 * XXX - Do we really need to get the "exclusive"
1228 * lock via an upgrade?
1230 if ((flags
& KPD_WRITEDIRTY
) && !hat_page_is_mapped(pp
) &&
1231 hat_ismod(pp
) && page_tryupgrade(pp
)) {
1233 * Hold the vnode before releasing the page lock to
1234 * prevent it from being freed and re-used by some
1241 * Want most powerful credentials we can get so
1244 (void) VOP_PUTPAGE(vp
, (offset_t
)off
, PAGESIZE
,
1245 B_ASYNC
| B_FREE
, kcred
, NULL
);
1252 /* If unlocking, release physical memory */
1253 if (flags
& KPD_LOCKED
) {
1254 pgcnt_t pages
= btopr(len
);
1255 if ((kpd
->kp_flags
& KPD_NO_ANON
) == 0)
1256 atomic_add_long(&anon_segkp_pages_locked
, -pages
);
1263 * Insert the kpd in the hash table.
1266 segkp_insert(struct seg
*seg
, struct segkp_data
*kpd
)
1268 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
1272 * Insert the kpd based on the address that will be returned
1273 * via segkp_release.
1275 index
= SEGKP_HASH(stom(kpd
->kp_base
, kpd
->kp_flags
));
1276 mutex_enter(&segkp_lock
);
1277 kpd
->kp_next
= kpsd
->kpsd_hash
[index
];
1278 kpsd
->kpsd_hash
[index
] = kpd
;
1279 mutex_exit(&segkp_lock
);
1283 * Remove kpd from the hash table.
1286 segkp_delete(struct seg
*seg
, struct segkp_data
*kpd
)
1288 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
1289 struct segkp_data
**kpp
;
1292 ASSERT(MUTEX_HELD(&segkp_lock
));
1294 index
= SEGKP_HASH(stom(kpd
->kp_base
, kpd
->kp_flags
));
1295 for (kpp
= &kpsd
->kpsd_hash
[index
];
1296 *kpp
!= NULL
; kpp
= &((*kpp
)->kp_next
)) {
1298 *kpp
= kpd
->kp_next
;
1302 panic("segkp_delete: unable to find element to delete");
1307 * Find the kpd associated with a vaddr.
1309 * Most of the callers of segkp_find will pass the vaddr that
1310 * hashes to the desired index, but there are cases where
1311 * this is not true in which case we have to (potentially) scan
1312 * the whole table looking for it. This should be very rare
1313 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
1314 * middle of the segkp_data region).
1316 static struct segkp_data
*
1317 segkp_find(struct seg
*seg
, caddr_t vaddr
)
1319 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
1320 struct segkp_data
*kpd
;
1324 i
= stop
= SEGKP_HASH(vaddr
);
1325 mutex_enter(&segkp_lock
);
1327 for (kpd
= kpsd
->kpsd_hash
[i
]; kpd
!= NULL
;
1328 kpd
= kpd
->kp_next
) {
1329 if (vaddr
>= kpd
->kp_base
&&
1330 vaddr
< kpd
->kp_base
+ kpd
->kp_len
) {
1331 mutex_exit(&segkp_lock
);
1336 i
= SEGKP_HASHSZ
- 1; /* Wrap */
1337 } while (i
!= stop
);
1338 mutex_exit(&segkp_lock
);
1339 return (NULL
); /* Not found */
1343 * returns size of swappable area.
1348 struct segkp_data
*kpd
;
1350 if ((kpd
= segkp_find(segkp
, v
)) != NULL
)
1351 return (SEGKP_MAPLEN(kpd
->kp_len
, kpd
->kp_flags
));
1357 * Dump out all the active segkp pages
1360 segkp_dump(struct seg
*seg
)
1363 struct segkp_data
*kpd
;
1364 struct segkp_segdata
*kpsd
= (struct segkp_segdata
*)seg
->s_data
;
1366 for (i
= 0; i
< SEGKP_HASHSZ
; i
++) {
1367 for (kpd
= kpsd
->kpsd_hash
[i
];
1368 kpd
!= NULL
; kpd
= kpd
->kp_next
) {
1373 addr
= kpd
->kp_base
;
1374 eaddr
= addr
+ kpd
->kp_len
;
1375 while (addr
< eaddr
) {
1376 ASSERT(seg
->s_as
== &kas
);
1377 pfn
= hat_getpfnum(seg
->s_as
->a_hat
, addr
);
1378 if (pfn
!= PFN_INVALID
)
1379 dump_addpage(seg
->s_as
, addr
, pfn
);
1381 dump_timeleft
= dump_timeout
;
1389 segkp_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
1390 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
)
1397 segkp_getmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
)
1403 static lgrp_mem_policy_info_t
*
1404 segkp_getpolicy(struct seg
*seg
, caddr_t addr
)
1411 segkp_capable(struct seg
*seg
, segcapability_t capability
)
1416 #include <sys/mem_config.h>
1420 segkp_mem_config_post_add(void *arg
, pgcnt_t delta_pages
)
1424 * During memory delete, turn off caches so that pages are not held.
1425 * A better solution may be to unlock the pages while they are
1426 * in the cache so that they may be collected naturally.
1431 segkp_mem_config_pre_del(void *arg
, pgcnt_t delta_pages
)
1433 atomic_add_32(&segkp_indel
, 1);
1440 segkp_mem_config_post_del(void *arg
, pgcnt_t delta_pages
, int cancelled
)
1442 atomic_add_32(&segkp_indel
, -1);
1445 static kphysm_setup_vector_t segkp_mem_config_vec
= {
1446 KPHYSM_SETUP_VECTOR_VERSION
,
1447 segkp_mem_config_post_add
,
1448 segkp_mem_config_pre_del
,
1449 segkp_mem_config_post_del
,
1453 segkpinit_mem_config(struct seg
*seg
)
1457 ret
= kphysm_setup_func_register(&segkp_mem_config_vec
, (void *)seg
);