4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2018 Joyent, Inc.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
27 #include <sys/param.h>
31 #include <sys/sysmacros.h>
32 #include <sys/cmn_err.h>
33 #include <sys/systm.h>
34 #include <sys/tuneable.h>
42 #include <sys/atomic.h>
43 #include <vm/seg_spt.h>
44 #include <sys/debug.h>
45 #include <sys/vtrace.h>
47 #include <sys/shm_impl.h>
49 #include <sys/vmsystm.h>
50 #include <sys/policy.h>
51 #include <sys/project.h>
52 #include <sys/tnf_probe.h>
55 #define SEGSPTADDR (caddr_t)0x0
58 * # pages used for spt
63 * segspt_minfree is the memory left for system after ISM
64 * locked its pages; it is set up to 5% of availrmem in
65 * sptcreate when ISM is created. ISM should not use more
66 * than ~90% of availrmem; if it does, then the performance
67 * of the system may decrease. Machines with large memories may
68 * be able to use up more memory for ISM so we set the default
69 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
70 * If somebody wants even more memory for ISM (risking hanging
71 * the system) they can patch the segspt_minfree to smaller number.
73 pgcnt_t segspt_minfree
= 0;
75 static int segspt_create(struct seg
**segpp
, void *argsp
);
76 static int segspt_unmap(struct seg
*seg
, caddr_t raddr
, size_t ssize
);
77 static void segspt_free(struct seg
*seg
);
78 static void segspt_free_pages(struct seg
*seg
, caddr_t addr
, size_t len
);
79 static lgrp_mem_policy_info_t
*segspt_getpolicy(struct seg
*seg
, caddr_t addr
);
84 panic("segspt_badop called");
88 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
90 static const struct seg_ops segspt_ops
= {
91 .dup
= SEGSPT_BADOP(int),
92 .unmap
= segspt_unmap
,
94 .fault
= SEGSPT_BADOP(int),
95 .faulta
= SEGSPT_BADOP(faultcode_t
),
96 .setprot
= SEGSPT_BADOP(int),
97 .checkprot
= SEGSPT_BADOP(int),
98 .kluster
= SEGSPT_BADOP(int),
99 .sync
= SEGSPT_BADOP(int),
100 .incore
= SEGSPT_BADOP(size_t),
101 .lockop
= SEGSPT_BADOP(int),
102 .getprot
= SEGSPT_BADOP(int),
103 .getoffset
= SEGSPT_BADOP(uoff_t
),
104 .gettype
= SEGSPT_BADOP(int),
105 .getvp
= SEGSPT_BADOP(int),
106 .advise
= SEGSPT_BADOP(int),
107 .dump
= SEGSPT_BADOP(void),
108 .pagelock
= SEGSPT_BADOP(int),
109 .setpagesize
= SEGSPT_BADOP(int),
110 .getmemid
= SEGSPT_BADOP(int),
111 .getpolicy
= segspt_getpolicy
,
112 .capable
= SEGSPT_BADOP(int),
115 static int segspt_shmdup(struct seg
*seg
, struct seg
*newseg
);
116 static int segspt_shmunmap(struct seg
*seg
, caddr_t raddr
, size_t ssize
);
117 static void segspt_shmfree(struct seg
*seg
);
118 static faultcode_t
segspt_shmfault(struct hat
*hat
, struct seg
*seg
,
119 caddr_t addr
, size_t len
, enum fault_type type
, enum seg_rw rw
);
120 static faultcode_t
segspt_shmfaulta(struct seg
*seg
, caddr_t addr
);
121 static int segspt_shmsetprot(register struct seg
*seg
, register caddr_t addr
,
122 register size_t len
, register uint_t prot
);
123 static int segspt_shmcheckprot(struct seg
*seg
, caddr_t addr
, size_t size
,
125 static int segspt_shmkluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
);
126 static size_t segspt_shmincore(struct seg
*seg
, caddr_t addr
, size_t len
,
128 static int segspt_shmsync(struct seg
*seg
, register caddr_t addr
, size_t len
,
129 int attr
, uint_t flags
);
130 static int segspt_shmlockop(struct seg
*seg
, caddr_t addr
, size_t len
,
131 int attr
, int op
, ulong_t
*lockmap
, size_t pos
);
132 static int segspt_shmgetprot(struct seg
*seg
, caddr_t addr
, size_t len
,
134 static uoff_t
segspt_shmgetoffset(struct seg
*seg
, caddr_t addr
);
135 static int segspt_shmgettype(struct seg
*seg
, caddr_t addr
);
136 static int segspt_shmgetvp(struct seg
*seg
, caddr_t addr
, struct vnode
**vpp
);
137 static int segspt_shmadvise(struct seg
*seg
, caddr_t addr
, size_t len
,
139 static int segspt_shmpagelock(struct seg
*, caddr_t
, size_t,
140 struct page
***, enum lock_type
, enum seg_rw
);
141 static int segspt_shmgetmemid(struct seg
*, caddr_t
, memid_t
*);
142 static lgrp_mem_policy_info_t
*segspt_shmgetpolicy(struct seg
*, caddr_t
);
144 const struct seg_ops segspt_shmops
= {
145 .dup
= segspt_shmdup
,
146 .unmap
= segspt_shmunmap
,
147 .free
= segspt_shmfree
,
148 .fault
= segspt_shmfault
,
149 .faulta
= segspt_shmfaulta
,
150 .setprot
= segspt_shmsetprot
,
151 .checkprot
= segspt_shmcheckprot
,
152 .kluster
= segspt_shmkluster
,
153 .sync
= segspt_shmsync
,
154 .incore
= segspt_shmincore
,
155 .lockop
= segspt_shmlockop
,
156 .getprot
= segspt_shmgetprot
,
157 .getoffset
= segspt_shmgetoffset
,
158 .gettype
= segspt_shmgettype
,
159 .getvp
= segspt_shmgetvp
,
160 .advise
= segspt_shmadvise
,
161 .pagelock
= segspt_shmpagelock
,
162 .getmemid
= segspt_shmgetmemid
,
163 .getpolicy
= segspt_shmgetpolicy
,
166 static void segspt_purge(struct seg
*seg
);
167 static int segspt_reclaim(void *, caddr_t
, size_t, struct page
**,
169 static int spt_anon_getpages(struct seg
*seg
, caddr_t addr
, size_t len
,
176 sptcreate(size_t size
, struct seg
**sptseg
, struct anon_map
*amp
,
177 uint_t prot
, uint_t flags
, uint_t share_szc
)
181 struct segspt_crargs sptcargs
;
183 if (segspt_minfree
== 0) /* leave min 5% of availrmem for */
184 segspt_minfree
= availrmem
/20; /* for the system */
186 if (!hat_supported(HAT_SHARED_PT
, NULL
))
190 * get a new as for this shared memory segment
193 newas
->a_proc
= NULL
;
195 sptcargs
.prot
= prot
;
196 sptcargs
.flags
= flags
;
197 sptcargs
.szc
= share_szc
;
199 * create a shared page table (spt) segment
202 if (err
= as_map(newas
, SEGSPTADDR
, size
, segspt_create
, &sptcargs
)) {
206 *sptseg
= sptcargs
.seg_spt
;
211 sptdestroy(struct as
*as
, struct anon_map
*amp
)
213 (void) as_unmap(as
, SEGSPTADDR
, amp
->size
);
218 * called from seg_free().
219 * free (i.e., unlock, unmap, return to free list)
220 * all the pages in the given seg.
223 segspt_free(struct seg
*seg
)
225 struct spt_data
*sptd
= (struct spt_data
*)seg
->s_data
;
227 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
230 if (sptd
->spt_realsize
)
231 segspt_free_pages(seg
, seg
->s_base
, sptd
->spt_realsize
);
233 if (sptd
->spt_ppa_lckcnt
) {
234 kmem_free(sptd
->spt_ppa_lckcnt
,
235 sizeof (*sptd
->spt_ppa_lckcnt
)
236 * btopr(sptd
->spt_amp
->size
));
238 kmem_free(sptd
->spt_vp
, sizeof (*sptd
->spt_vp
));
239 cv_destroy(&sptd
->spt_cv
);
240 mutex_destroy(&sptd
->spt_lock
);
241 kmem_free(sptd
, sizeof (*sptd
));
247 segspt_shmsync(struct seg
*seg
, caddr_t addr
, size_t len
, int attr
,
250 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
257 segspt_shmincore(struct seg
*seg
, caddr_t addr
, size_t len
, char *vec
)
261 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
263 struct spt_data
*sptd
;
265 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
266 sptseg
= shmd
->shm_sptseg
;
267 sptd
= sptseg
->s_data
;
269 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
271 while (addr
< eo_seg
) {
272 /* page exists, and it's locked. */
273 *vec
++ = SEG_PAGE_INCORE
| SEG_PAGE_LOCKED
|
279 struct anon_map
*amp
= shmd
->shm_amp
;
287 anon_sync_obj_t cookie
;
289 addr
= (caddr_t
)((uintptr_t)addr
& (uintptr_t)PAGEMASK
);
290 anon_index
= seg_page(seg
, addr
);
292 if (anon_index
+ npages
> btopr(shmd
->shm_amp
->size
)) {
295 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
296 for (i
= 0; i
< npages
; i
++, anon_index
++) {
298 anon_array_enter(amp
, anon_index
, &cookie
);
299 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
301 swap_xlate(ap
, &vp
, &off
);
302 anon_array_exit(&cookie
);
303 pp
= page_lookup_nowait(&vp
->v_object
, off
,
306 ret
|= SEG_PAGE_INCORE
| SEG_PAGE_ANON
;
310 anon_array_exit(&cookie
);
312 if (shmd
->shm_vpage
[anon_index
] & DISM_PG_LOCKED
) {
313 ret
|= SEG_PAGE_LOCKED
;
317 ANON_LOCK_EXIT(&
->a_rwlock
);
323 segspt_unmap(struct seg
*seg
, caddr_t raddr
, size_t ssize
)
327 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
330 * seg.s_size may have been rounded up to the largest page size
332 * XXX This should be cleanedup. sptdestroy should take a length
333 * argument which should be the same as sptcreate. Then
334 * this rounding would not be needed (or is done in shm.c)
335 * Only the check for full segment will be needed.
337 * XXX -- shouldn't raddr == 0 always? These tests don't seem
338 * to be useful at all.
340 share_size
= page_get_pagesize(seg
->s_szc
);
341 ssize
= P2ROUNDUP(ssize
, share_size
);
343 if (raddr
== seg
->s_base
&& ssize
== seg
->s_size
) {
351 segspt_create(struct seg
**segpp
, void *argsp
)
353 struct seg
*seg
= *segpp
;
355 caddr_t addr
= seg
->s_base
;
356 struct spt_data
*sptd
;
357 struct segspt_crargs
*sptcargs
= (struct segspt_crargs
*)argsp
;
358 struct anon_map
*amp
= sptcargs
->amp
;
359 struct kshmid
*sp
= amp
->a_sp
;
360 struct cred
*cred
= CRED();
361 ulong_t i
, j
, anon_index
= 0;
362 pgcnt_t npages
= btopr(amp
->size
);
371 proc_t
*procp
= curproc
;
372 rctl_qty_t lockedbytes
= 0;
376 * We are holding the a_lock on the underlying dummy as,
377 * so we can make calls to the HAT layer.
379 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
382 if ((sptcargs
->flags
& SHM_PAGEABLE
) == 0) {
383 if (err
= anon_swap_adjust(npages
))
388 if ((sptd
= kmem_zalloc(sizeof (*sptd
), KM_NOSLEEP
)) == NULL
)
391 if ((sptcargs
->flags
& SHM_PAGEABLE
) == 0) {
392 if ((ppa
= kmem_zalloc(((sizeof (page_t
*)) * npages
),
393 KM_NOSLEEP
)) == NULL
)
397 mutex_init(&sptd
->spt_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
399 if ((vp
= kmem_zalloc(sizeof (*vp
), KM_NOSLEEP
)) == NULL
)
402 seg
->s_ops
= &segspt_ops
;
405 sptd
->spt_prot
= sptcargs
->prot
;
406 sptd
->spt_flags
= sptcargs
->flags
;
407 seg
->s_data
= (caddr_t
)sptd
;
408 sptd
->spt_ppa
= NULL
;
409 sptd
->spt_ppa_lckcnt
= NULL
;
410 seg
->s_szc
= sptcargs
->szc
;
411 cv_init(&sptd
->spt_cv
, NULL
, CV_DEFAULT
, NULL
);
414 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
415 if (seg
->s_szc
> amp
->a_szc
) {
416 amp
->a_szc
= seg
->s_szc
;
418 ANON_LOCK_EXIT(&
->a_rwlock
);
421 * Set policy to affect initial allocation of pages in
422 * anon_map_createpages()
424 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT
, amp
, anon_index
,
425 NULL
, 0, ptob(npages
));
427 if (sptcargs
->flags
& SHM_PAGEABLE
) {
429 pgcnt_t new_npgs
, more_pgs
;
430 struct anon_hdr
*nahp
;
433 share_sz
= page_get_pagesize(seg
->s_szc
);
434 if (!IS_P2ALIGNED(amp
->size
, share_sz
)) {
436 * We are rounding up the size of the anon array
437 * on 4 M boundary because we always create 4 M
438 * of page(s) when locking, faulting pages and we
439 * don't have to check for all corner cases e.g.
440 * if there is enough space to allocate 4 M
443 new_npgs
= btop(P2ROUNDUP(amp
->size
, share_sz
));
444 more_pgs
= new_npgs
- npages
;
447 * The zone will never be NULL, as a fully created
448 * shm always has an owning zone.
450 zone
= sp
->shm_perm
.ipc_zone_ref
.zref_zone
;
451 ASSERT(zone
!= NULL
);
452 if (anon_resv_zone(ptob(more_pgs
), zone
) == 0) {
457 nahp
= anon_create(new_npgs
, ANON_SLEEP
);
458 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
459 (void) anon_copy_ptr(amp
->ahp
, 0, nahp
, 0, npages
,
461 anon_release(amp
->ahp
, npages
);
463 ASSERT(amp
->swresv
== ptob(npages
));
464 amp
->swresv
= amp
->size
= ptob(new_npgs
);
465 ANON_LOCK_EXIT(&
->a_rwlock
);
469 sptd
->spt_ppa_lckcnt
= kmem_zalloc(npages
*
470 sizeof (*sptd
->spt_ppa_lckcnt
), KM_SLEEP
);
471 sptd
->spt_pcachecnt
= 0;
472 sptd
->spt_realsize
= ptob(npages
);
473 sptcargs
->seg_spt
= seg
;
478 * get array of pages for each anon slot in amp
480 if ((err
= anon_map_createpages(amp
, anon_index
, ptob(npages
), ppa
,
481 seg
, addr
, S_CREATE
, cred
)) != 0)
484 mutex_enter(&sp
->shm_mlock
);
486 /* May be partially locked, so, count bytes to charge for locking */
487 for (i
= 0; i
< npages
; i
++)
488 if (ppa
[i
]->p_lckcnt
== 0)
489 lockedbytes
+= PAGESIZE
;
491 proj
= sp
->shm_perm
.ipc_proj
;
493 if (lockedbytes
> 0) {
494 mutex_enter(&procp
->p_lock
);
495 if (rctl_incr_locked_mem(procp
, proj
, lockedbytes
, 0)) {
496 mutex_exit(&procp
->p_lock
);
497 mutex_exit(&sp
->shm_mlock
);
498 for (i
= 0; i
< npages
; i
++)
503 mutex_exit(&procp
->p_lock
);
507 * addr is initial address corresponding to the first page on ppa list
509 for (i
= 0; i
< npages
; i
++) {
510 /* attempt to lock all pages */
511 if (page_pp_lock(ppa
[i
], 0, 1) == 0) {
513 * if unable to lock any page, unlock all
514 * of them and return error
516 for (j
= 0; j
< i
; j
++)
517 page_pp_unlock(ppa
[j
], 0, 1);
518 for (i
= 0; i
< npages
; i
++)
520 rctl_decr_locked_mem(NULL
, proj
, lockedbytes
, 0);
521 mutex_exit(&sp
->shm_mlock
);
526 mutex_exit(&sp
->shm_mlock
);
529 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
530 * for the entire life of the segment. For example platforms
531 * that do not support Dynamic Reconfiguration.
533 hat_flags
= HAT_LOAD_SHARE
;
534 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
))
535 hat_flags
|= HAT_LOAD_LOCK
;
538 * Load translations one lare page at a time
539 * to make sure we don't create mappings bigger than
540 * segment's size code in case underlying pages
541 * are shared with segvn's segment that uses bigger
542 * size code than we do.
544 pgsz
= page_get_pagesize(seg
->s_szc
);
545 pgcnt
= page_get_pagecnt(seg
->s_szc
);
546 for (a
= addr
, pidx
= 0; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
547 sz
= MIN(pgsz
, ptob(npages
- pidx
));
548 hat_memload_array(seg
->s_as
->a_hat
, a
, sz
,
549 &ppa
[pidx
], sptd
->spt_prot
, hat_flags
);
553 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
554 * we will leave the pages locked SE_SHARED for the life
555 * of the ISM segment. This will prevent any calls to
556 * hat_pageunload() on this ISM segment for those platforms.
558 if (!(hat_flags
& HAT_LOAD_LOCK
)) {
560 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
561 * we no longer need to hold the SE_SHARED lock on the pages,
562 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
563 * SE_SHARED lock on the pages as necessary.
565 for (i
= 0; i
< npages
; i
++)
568 sptd
->spt_pcachecnt
= 0;
569 kmem_free(ppa
, ((sizeof (page_t
*)) * npages
));
570 sptd
->spt_realsize
= ptob(npages
);
571 atomic_add_long(&spt_used
, npages
);
572 sptcargs
->seg_spt
= seg
;
577 kmem_free(vp
, sizeof (*vp
));
578 cv_destroy(&sptd
->spt_cv
);
580 mutex_destroy(&sptd
->spt_lock
);
581 if ((sptcargs
->flags
& SHM_PAGEABLE
) == 0)
582 kmem_free(ppa
, (sizeof (*ppa
) * npages
));
584 kmem_free(sptd
, sizeof (*sptd
));
586 if ((sptcargs
->flags
& SHM_PAGEABLE
) == 0)
587 anon_swap_restore(npages
);
593 segspt_free_pages(struct seg
*seg
, caddr_t addr
, size_t len
)
596 struct spt_data
*sptd
= (struct spt_data
*)seg
->s_data
;
599 struct anon_map
*amp
;
605 pgcnt_t pgs
, curnpgs
= 0;
607 rctl_qty_t unlocked_bytes
= 0;
611 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
613 len
= P2ROUNDUP(len
, PAGESIZE
);
617 hat_flags
= HAT_UNLOAD_UNLOCK
| HAT_UNLOAD_UNMAP
;
618 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
)) ||
619 (sptd
->spt_flags
& SHM_PAGEABLE
)) {
620 hat_flags
= HAT_UNLOAD_UNMAP
;
623 hat_unload(seg
->s_as
->a_hat
, addr
, len
, hat_flags
);
626 if (sptd
->spt_flags
& SHM_PAGEABLE
)
627 npages
= btop(amp
->size
);
631 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
633 proj
= sp
->shm_perm
.ipc_proj
;
634 mutex_enter(&sp
->shm_mlock
);
636 for (anon_idx
= 0; anon_idx
< npages
; anon_idx
++) {
637 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
638 if ((ap
= anon_get_ptr(amp
->ahp
, anon_idx
)) == NULL
) {
639 panic("segspt_free_pages: null app");
643 if ((ap
= anon_get_next_ptr(amp
->ahp
, &anon_idx
))
647 ASSERT(ANON_ISBUSY(anon_get_slot(amp
->ahp
, anon_idx
)) == 0);
648 swap_xlate(ap
, &vp
, &off
);
651 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
652 * the pages won't be having SE_SHARED lock at this
655 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
656 * the pages are still held SE_SHARED locked from the
657 * original segspt_create()
659 * Our goal is to get SE_EXCL lock on each page, remove
660 * permanent lock on it and invalidate the page.
662 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
663 if (hat_flags
== HAT_UNLOAD_UNMAP
)
664 pp
= page_lookup(&vp
->v_object
, off
, SE_EXCL
);
666 if ((pp
= page_find(&vp
->v_object
, off
)) == NULL
) {
667 panic("segspt_free_pages: "
671 if (!page_tryupgrade(pp
)) {
673 pp
= page_lookup(&vp
->v_object
, off
,
678 panic("segspt_free_pages: "
679 "page not in the system");
682 ASSERT(pp
->p_lckcnt
> 0);
683 page_pp_unlock(pp
, 0, 1);
684 if (pp
->p_lckcnt
== 0)
685 unlocked_bytes
+= PAGESIZE
;
687 if ((pp
= page_lookup(&vp
->v_object
, off
, SE_EXCL
)) == NULL
)
691 * It's logical to invalidate the pages here as in most cases
692 * these were created by segspt.
694 if (pp
->p_szc
!= 0) {
696 ASSERT(curnpgs
== 0);
699 pgs
= curnpgs
= page_get_pagecnt(pp
->p_szc
);
701 ASSERT(IS_P2ALIGNED(pgs
, pgs
));
702 ASSERT(!(page_pptonum(pp
) & (pgs
- 1)));
704 } else if ((page_pptonum(pp
) & (pgs
- 1)) == pgs
- 1) {
705 ASSERT(curnpgs
== 1);
706 ASSERT(page_pptonum(pp
) ==
707 page_pptonum(rootpp
) + (pgs
- 1));
708 page_destroy_pages(rootpp
);
713 ASSERT(page_pptonum(pp
) ==
714 page_pptonum(rootpp
) + (pgs
- curnpgs
));
718 if (root
!= 0 || curnpgs
!= 0) {
719 panic("segspt_free_pages: bad large page");
723 * Before destroying the pages, we need to take care
724 * of the rctl locked memory accounting. For that
725 * we need to calculte the unlocked_bytes.
727 if (pp
->p_lckcnt
> 0)
728 unlocked_bytes
+= PAGESIZE
;
730 VN_DISPOSE(pp
, B_INVAL
, 0, kcred
);
733 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
734 if (unlocked_bytes
> 0)
735 rctl_decr_locked_mem(NULL
, proj
, unlocked_bytes
, 0);
736 mutex_exit(&sp
->shm_mlock
);
738 if (root
!= 0 || curnpgs
!= 0) {
739 panic("segspt_free_pages: bad large page");
744 * mark that pages have been released
746 sptd
->spt_realsize
= 0;
748 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
749 atomic_add_long(&spt_used
, -npages
);
750 anon_swap_restore(npages
);
755 * Get memory allocation policy info for specified address in given segment
757 static lgrp_mem_policy_info_t
*
758 segspt_getpolicy(struct seg
*seg
, caddr_t addr
)
760 struct anon_map
*amp
;
762 lgrp_mem_policy_info_t
*policy_info
;
763 struct spt_data
*spt_data
;
768 * Get anon_map from segspt
770 * Assume that no lock needs to be held on anon_map, since
771 * it should be protected by its reference count which must be
772 * nonzero for an existing segment
773 * Need to grab readers lock on policy tree though
775 spt_data
= (struct spt_data
*)seg
->s_data
;
776 if (spt_data
== NULL
)
778 amp
= spt_data
->spt_amp
;
779 ASSERT(amp
->refcnt
!= 0);
784 * Assume starting anon index of 0
786 anon_index
= seg_page(seg
, addr
);
787 policy_info
= lgrp_shm_policy_get(amp
, anon_index
, NULL
, 0);
789 return (policy_info
);
794 * Return locked pages over a given range.
796 * We will cache all DISM locked pages and save the pplist for the
797 * entire segment in the ppa field of the underlying DISM segment structure.
798 * Later, during a call to segspt_reclaim() we will use this ppa array
799 * to page_unlock() all of the pages and then we will free this ppa list.
803 segspt_dismpagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
804 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
)
806 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
807 struct seg
*sptseg
= shmd
->shm_sptseg
;
808 struct spt_data
*sptd
= sptseg
->s_data
;
809 pgcnt_t pg_idx
, npages
, tot_npages
, npgs
;
810 struct page
**pplist
, **pl
, **ppa
, *pp
;
811 struct anon_map
*amp
;
818 pgcnt_t claim_availrmem
= 0;
821 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
822 ASSERT(type
== L_PAGELOCK
|| type
== L_PAGEUNLOCK
);
825 * We want to lock/unlock the entire ISM segment. Therefore,
826 * we will be using the underlying sptseg and it's base address
827 * and length for the caching arguments.
832 pg_idx
= seg_page(seg
, addr
);
836 * check if the request is larger than number of pages covered
839 if (pg_idx
+ npages
> btopr(sptd
->spt_amp
->size
)) {
844 if (type
== L_PAGEUNLOCK
) {
845 ASSERT(sptd
->spt_ppa
!= NULL
);
847 seg_pinactive(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
848 sptd
->spt_ppa
, S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
851 * If someone is blocked while unmapping, we purge
852 * segment page cache and thus reclaim pplist synchronously
853 * without waiting for seg_pasync_thread. This speeds up
854 * unmapping in cases where munmap(2) is called, while
855 * raw async i/o is still in progress or where a thread
856 * exits on data fault in a multithreaded application.
858 if ((sptd
->spt_flags
& DISM_PPA_CHANGED
) ||
859 (AS_ISUNMAPWAIT(seg
->s_as
) &&
860 shmd
->shm_softlockcnt
> 0)) {
866 /* The L_PAGELOCK case ... */
868 if (sptd
->spt_flags
& DISM_PPA_CHANGED
) {
871 * for DISM ppa needs to be rebuild since
872 * number of locked pages could be changed
879 * First try to find pages in segment page cache, without
880 * holding the segment lock.
882 pplist
= seg_plookup(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
883 S_WRITE
, SEGP_FORCE_WIRED
);
884 if (pplist
!= NULL
) {
885 ASSERT(sptd
->spt_ppa
!= NULL
);
886 ASSERT(sptd
->spt_ppa
== pplist
);
888 for (an_idx
= pg_idx
; an_idx
< pg_idx
+ npages
; ) {
889 if (ppa
[an_idx
] == NULL
) {
890 seg_pinactive(seg
, NULL
, seg
->s_base
,
891 sptd
->spt_amp
->size
, ppa
,
892 S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
896 if ((szc
= ppa
[an_idx
]->p_szc
) != 0) {
897 npgs
= page_get_pagecnt(szc
);
898 an_idx
= P2ROUNDUP(an_idx
+ 1, npgs
);
904 * Since we cache the entire DISM segment, we want to
905 * set ppp to point to the first slot that corresponds
906 * to the requested addr, i.e. pg_idx.
908 *ppp
= &(sptd
->spt_ppa
[pg_idx
]);
912 mutex_enter(&sptd
->spt_lock
);
914 * try to find pages in segment page cache with mutex
916 pplist
= seg_plookup(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
917 S_WRITE
, SEGP_FORCE_WIRED
);
918 if (pplist
!= NULL
) {
919 ASSERT(sptd
->spt_ppa
!= NULL
);
920 ASSERT(sptd
->spt_ppa
== pplist
);
922 for (an_idx
= pg_idx
; an_idx
< pg_idx
+ npages
; ) {
923 if (ppa
[an_idx
] == NULL
) {
924 mutex_exit(&sptd
->spt_lock
);
925 seg_pinactive(seg
, NULL
, seg
->s_base
,
926 sptd
->spt_amp
->size
, ppa
,
927 S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
931 if ((szc
= ppa
[an_idx
]->p_szc
) != 0) {
932 npgs
= page_get_pagecnt(szc
);
933 an_idx
= P2ROUNDUP(an_idx
+ 1, npgs
);
939 * Since we cache the entire DISM segment, we want to
940 * set ppp to point to the first slot that corresponds
941 * to the requested addr, i.e. pg_idx.
943 mutex_exit(&sptd
->spt_lock
);
944 *ppp
= &(sptd
->spt_ppa
[pg_idx
]);
947 if (seg_pinsert_check(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
948 SEGP_FORCE_WIRED
) == SEGP_FAIL
) {
949 mutex_exit(&sptd
->spt_lock
);
955 * No need to worry about protections because DISM pages are always rw.
961 * Do we need to build the ppa array?
963 if (sptd
->spt_ppa
== NULL
) {
967 tot_npages
= btopr(sptd
->spt_amp
->size
);
969 ASSERT(sptd
->spt_pcachecnt
== 0);
970 pplist
= kmem_zalloc(sizeof (page_t
*) * tot_npages
, KM_SLEEP
);
973 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
974 for (an_idx
= 0; an_idx
< tot_npages
; ) {
975 ap
= anon_get_ptr(amp
->ahp
, an_idx
);
977 * Cache only mlocked pages. For large pages
978 * if one (constituent) page is mlocked
979 * all pages for that large page
980 * are cached also. This is for quick
981 * lookups of ppa array;
983 if ((ap
!= NULL
) && (lpg_cnt
!= 0 ||
984 (sptd
->spt_ppa_lckcnt
[an_idx
] != 0))) {
986 swap_xlate(ap
, &vp
, &off
);
987 pp
= page_lookup(&vp
->v_object
, off
,
993 * For a small page, we are done --
994 * lpg_count is reset to 0 below.
996 * For a large page, we are guaranteed
997 * to find the anon structures of all
998 * constituent pages and a non-zero
999 * lpg_cnt ensures that we don't test
1000 * for mlock for these. We are done
1001 * when lpg_count reaches (npgs + 1).
1002 * If we are not the first constituent
1003 * page, restart at the first one.
1005 npgs
= page_get_pagecnt(pp
->p_szc
);
1006 if (!IS_P2ALIGNED(an_idx
, npgs
)) {
1007 an_idx
= P2ALIGN(an_idx
, npgs
);
1012 if (++lpg_cnt
> npgs
)
1016 * availrmem is decremented only
1017 * for unlocked pages
1019 if (sptd
->spt_ppa_lckcnt
[an_idx
] == 0)
1021 pplist
[an_idx
] = pp
;
1025 ANON_LOCK_EXIT(&
->a_rwlock
);
1027 if (claim_availrmem
) {
1028 mutex_enter(&freemem_lock
);
1029 if (availrmem
< tune
.t_minarmem
+ claim_availrmem
) {
1030 mutex_exit(&freemem_lock
);
1032 claim_availrmem
= 0;
1035 availrmem
-= claim_availrmem
;
1037 mutex_exit(&freemem_lock
);
1043 * We already have a valid ppa[].
1050 ret
= seg_pinsert(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1051 sptd
->spt_amp
->size
, pl
, S_WRITE
, SEGP_FORCE_WIRED
,
1053 if (ret
== SEGP_FAIL
) {
1055 * seg_pinsert failed. We return
1056 * ENOTSUP, so that the as_pagelock() code will
1057 * then try the slower F_SOFTLOCK path.
1061 * No one else has referenced the ppa[].
1062 * We created it and we need to destroy it.
1064 sptd
->spt_ppa
= NULL
;
1071 * In either case, we increment softlockcnt on the 'real' segment.
1073 sptd
->spt_pcachecnt
++;
1074 atomic_inc_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
1076 ppa
= sptd
->spt_ppa
;
1077 for (an_idx
= pg_idx
; an_idx
< pg_idx
+ npages
; ) {
1078 if (ppa
[an_idx
] == NULL
) {
1079 mutex_exit(&sptd
->spt_lock
);
1080 seg_pinactive(seg
, NULL
, seg
->s_base
,
1081 sptd
->spt_amp
->size
,
1082 pl
, S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
1086 if ((szc
= ppa
[an_idx
]->p_szc
) != 0) {
1087 npgs
= page_get_pagecnt(szc
);
1088 an_idx
= P2ROUNDUP(an_idx
+ 1, npgs
);
1094 * We can now drop the sptd->spt_lock since the ppa[]
1095 * exists and we have incremented pacachecnt.
1097 mutex_exit(&sptd
->spt_lock
);
1100 * Since we cache the entire segment, we want to
1101 * set ppp to point to the first slot that corresponds
1102 * to the requested addr, i.e. pg_idx.
1104 *ppp
= &(sptd
->spt_ppa
[pg_idx
]);
1109 * We will only reach this code if we tried and failed.
1111 * And we can drop the lock on the dummy seg, once we've failed
1112 * to set up a new ppa[].
1114 mutex_exit(&sptd
->spt_lock
);
1117 if (claim_availrmem
) {
1118 mutex_enter(&freemem_lock
);
1119 availrmem
+= claim_availrmem
;
1120 mutex_exit(&freemem_lock
);
1124 * We created pl and we need to destroy it.
1127 for (an_idx
= 0; an_idx
< tot_npages
; an_idx
++) {
1128 if (pplist
[an_idx
] != NULL
)
1129 page_unlock(pplist
[an_idx
]);
1131 kmem_free(pl
, sizeof (page_t
*) * tot_npages
);
1134 if (shmd
->shm_softlockcnt
<= 0) {
1135 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1136 mutex_enter(&seg
->s_as
->a_contents
);
1137 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1138 AS_CLRUNMAPWAIT(seg
->s_as
);
1139 cv_broadcast(&seg
->s_as
->a_cv
);
1141 mutex_exit(&seg
->s_as
->a_contents
);
1151 * return locked pages over a given range.
1153 * We will cache the entire ISM segment and save the pplist for the
1154 * entire segment in the ppa field of the underlying ISM segment structure.
1155 * Later, during a call to segspt_reclaim() we will use this ppa array
1156 * to page_unlock() all of the pages and then we will free this ppa list.
1160 segspt_shmpagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
1161 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
)
1163 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1164 struct seg
*sptseg
= shmd
->shm_sptseg
;
1165 struct spt_data
*sptd
= sptseg
->s_data
;
1166 pgcnt_t np
, page_index
, npages
;
1167 caddr_t a
, spt_base
;
1168 struct page
**pplist
, **pl
, *pp
;
1169 struct anon_map
*amp
;
1172 uint_t pl_built
= 0;
1177 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
1178 ASSERT(type
== L_PAGELOCK
|| type
== L_PAGEUNLOCK
);
1182 * We want to lock/unlock the entire ISM segment. Therefore,
1183 * we will be using the underlying sptseg and it's base address
1184 * and length for the caching arguments.
1189 if (sptd
->spt_flags
& SHM_PAGEABLE
) {
1190 return (segspt_dismpagelock(seg
, addr
, len
, ppp
, type
, rw
));
1193 page_index
= seg_page(seg
, addr
);
1194 npages
= btopr(len
);
1197 * check if the request is larger than number of pages covered
1200 if (page_index
+ npages
> btopr(sptd
->spt_amp
->size
)) {
1205 if (type
== L_PAGEUNLOCK
) {
1207 ASSERT(sptd
->spt_ppa
!= NULL
);
1209 seg_pinactive(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1210 sptd
->spt_ppa
, S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
1213 * If someone is blocked while unmapping, we purge
1214 * segment page cache and thus reclaim pplist synchronously
1215 * without waiting for seg_pasync_thread. This speeds up
1216 * unmapping in cases where munmap(2) is called, while
1217 * raw async i/o is still in progress or where a thread
1218 * exits on data fault in a multithreaded application.
1220 if (AS_ISUNMAPWAIT(seg
->s_as
) && (shmd
->shm_softlockcnt
> 0)) {
1226 /* The L_PAGELOCK case... */
1229 * First try to find pages in segment page cache, without
1230 * holding the segment lock.
1232 pplist
= seg_plookup(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1233 S_WRITE
, SEGP_FORCE_WIRED
);
1234 if (pplist
!= NULL
) {
1235 ASSERT(sptd
->spt_ppa
== pplist
);
1236 ASSERT(sptd
->spt_ppa
[page_index
]);
1238 * Since we cache the entire ISM segment, we want to
1239 * set ppp to point to the first slot that corresponds
1240 * to the requested addr, i.e. page_index.
1242 *ppp
= &(sptd
->spt_ppa
[page_index
]);
1246 mutex_enter(&sptd
->spt_lock
);
1249 * try to find pages in segment page cache
1251 pplist
= seg_plookup(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1252 S_WRITE
, SEGP_FORCE_WIRED
);
1253 if (pplist
!= NULL
) {
1254 ASSERT(sptd
->spt_ppa
== pplist
);
1256 * Since we cache the entire segment, we want to
1257 * set ppp to point to the first slot that corresponds
1258 * to the requested addr, i.e. page_index.
1260 mutex_exit(&sptd
->spt_lock
);
1261 *ppp
= &(sptd
->spt_ppa
[page_index
]);
1265 if (seg_pinsert_check(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1266 SEGP_FORCE_WIRED
) == SEGP_FAIL
) {
1267 mutex_exit(&sptd
->spt_lock
);
1273 * No need to worry about protections because ISM pages
1279 * Do we need to build the ppa array?
1281 if (sptd
->spt_ppa
== NULL
) {
1282 ASSERT(sptd
->spt_ppa
== pplist
);
1284 spt_base
= sptseg
->s_base
;
1288 * availrmem is decremented once during anon_swap_adjust()
1289 * and is incremented during the anon_unresv(), which is
1290 * called from shm_rm_amp() when the segment is destroyed.
1292 amp
= sptd
->spt_amp
;
1293 ASSERT(amp
!= NULL
);
1295 /* pcachecnt is protected by sptd->spt_lock */
1296 ASSERT(sptd
->spt_pcachecnt
== 0);
1297 pplist
= kmem_zalloc(sizeof (page_t
*)
1298 * btopr(sptd
->spt_amp
->size
), KM_SLEEP
);
1301 anon_index
= seg_page(sptseg
, spt_base
);
1303 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
1304 for (a
= spt_base
; a
< (spt_base
+ sptd
->spt_amp
->size
);
1305 a
+= PAGESIZE
, anon_index
++, pplist
++) {
1306 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
1308 swap_xlate(ap
, &vp
, &off
);
1309 pp
= page_lookup(&vp
->v_object
, off
, SE_SHARED
);
1313 ANON_LOCK_EXIT(&
->a_rwlock
);
1315 if (a
< (spt_base
+ sptd
->spt_amp
->size
)) {
1322 * We already have a valid ppa[].
1329 ret
= seg_pinsert(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1330 sptd
->spt_amp
->size
, pl
, S_WRITE
, SEGP_FORCE_WIRED
,
1332 if (ret
== SEGP_FAIL
) {
1334 * seg_pinsert failed. We return
1335 * ENOTSUP, so that the as_pagelock() code will
1336 * then try the slower F_SOFTLOCK path.
1340 * No one else has referenced the ppa[].
1341 * We created it and we need to destroy it.
1343 sptd
->spt_ppa
= NULL
;
1350 * In either case, we increment softlockcnt on the 'real' segment.
1352 sptd
->spt_pcachecnt
++;
1353 atomic_inc_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
1356 * We can now drop the sptd->spt_lock since the ppa[]
1357 * exists and we have incremented pacachecnt.
1359 mutex_exit(&sptd
->spt_lock
);
1362 * Since we cache the entire segment, we want to
1363 * set ppp to point to the first slot that corresponds
1364 * to the requested addr, i.e. page_index.
1366 *ppp
= &(sptd
->spt_ppa
[page_index
]);
1371 * We will only reach this code if we tried and failed.
1373 * And we can drop the lock on the dummy seg, once we've failed
1374 * to set up a new ppa[].
1376 mutex_exit(&sptd
->spt_lock
);
1380 * We created pl and we need to destroy it.
1383 np
= (((uintptr_t)(a
- spt_base
)) >> PAGESHIFT
);
1385 page_unlock(*pplist
);
1389 kmem_free(pl
, sizeof (page_t
*) * btopr(sptd
->spt_amp
->size
));
1391 if (shmd
->shm_softlockcnt
<= 0) {
1392 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1393 mutex_enter(&seg
->s_as
->a_contents
);
1394 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1395 AS_CLRUNMAPWAIT(seg
->s_as
);
1396 cv_broadcast(&seg
->s_as
->a_cv
);
1398 mutex_exit(&seg
->s_as
->a_contents
);
1406 * purge any cached pages in the I/O page cache
1409 segspt_purge(struct seg
*seg
)
1411 seg_ppurge(seg
, NULL
, SEGP_FORCE_WIRED
);
1415 segspt_reclaim(void *ptag
, caddr_t addr
, size_t len
, struct page
**pplist
,
1416 enum seg_rw rw
, int async
)
1418 struct seg
*seg
= (struct seg
*)ptag
;
1419 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1421 struct spt_data
*sptd
;
1422 pgcnt_t npages
, i
, free_availrmem
= 0;
1425 sptseg
= shmd
->shm_sptseg
;
1426 sptd
= sptseg
->s_data
;
1427 npages
= (len
>> PAGESHIFT
);
1429 ASSERT(sptd
->spt_pcachecnt
!= 0);
1430 ASSERT(sptd
->spt_ppa
== pplist
);
1431 ASSERT(npages
== btopr(sptd
->spt_amp
->size
));
1432 ASSERT(async
|| AS_LOCK_HELD(seg
->s_as
));
1435 * Acquire the lock on the dummy seg and destroy the
1436 * ppa array IF this is the last pcachecnt.
1438 mutex_enter(&sptd
->spt_lock
);
1439 if (--sptd
->spt_pcachecnt
== 0) {
1440 for (i
= 0; i
< npages
; i
++) {
1441 if (pplist
[i
] == NULL
) {
1444 if (rw
== S_WRITE
) {
1445 hat_setrefmod(pplist
[i
]);
1447 hat_setref(pplist
[i
]);
1449 if ((sptd
->spt_flags
& SHM_PAGEABLE
) &&
1450 (sptd
->spt_ppa_lckcnt
[i
] == 0))
1452 page_unlock(pplist
[i
]);
1454 if ((sptd
->spt_flags
& SHM_PAGEABLE
) && free_availrmem
) {
1455 mutex_enter(&freemem_lock
);
1456 availrmem
+= free_availrmem
;
1457 mutex_exit(&freemem_lock
);
1460 * Since we want to cach/uncache the entire ISM segment,
1461 * we will track the pplist in a segspt specific field
1462 * ppa, that is initialized at the time we add an entry to
1465 ASSERT(sptd
->spt_pcachecnt
== 0);
1466 kmem_free(pplist
, sizeof (page_t
*) * npages
);
1467 sptd
->spt_ppa
= NULL
;
1468 sptd
->spt_flags
&= ~DISM_PPA_CHANGED
;
1470 cv_broadcast(&sptd
->spt_cv
);
1473 mutex_exit(&sptd
->spt_lock
);
1476 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1477 * may not hold AS lock (in this case async argument is not 0). This
1478 * means if softlockcnt drops to 0 after the decrement below address
1479 * space may get freed. We can't allow it since after softlock
1480 * derement to 0 we still need to access as structure for possible
1481 * wakeup of unmap waiters. To prevent the disappearance of as we take
1482 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1483 * this mutex as a barrier to make sure this routine completes before
1486 * The second complication we have to deal with in async case is a
1487 * possibility of missed wake up of unmap wait thread. When we don't
1488 * hold as lock here we may take a_contents lock before unmap wait
1489 * thread that was first to see softlockcnt was still not 0. As a
1490 * result we'll fail to wake up an unmap wait thread. To avoid this
1491 * race we set nounmapwait flag in as structure if we drop softlockcnt
1492 * to 0 if async is not 0. unmapwait thread
1493 * will not block if this flag is set.
1496 mutex_enter(&shmd
->shm_segfree_syncmtx
);
1499 * Now decrement softlockcnt.
1501 ASSERT(shmd
->shm_softlockcnt
> 0);
1502 atomic_dec_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
1504 if (shmd
->shm_softlockcnt
<= 0) {
1505 if (async
|| AS_ISUNMAPWAIT(seg
->s_as
)) {
1506 mutex_enter(&seg
->s_as
->a_contents
);
1508 AS_SETNOUNMAPWAIT(seg
->s_as
);
1509 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1510 AS_CLRUNMAPWAIT(seg
->s_as
);
1511 cv_broadcast(&seg
->s_as
->a_cv
);
1513 mutex_exit(&seg
->s_as
->a_contents
);
1518 mutex_exit(&shmd
->shm_segfree_syncmtx
);
1524 * Do a F_SOFTUNLOCK call over the range requested.
1525 * The range must have already been F_SOFTLOCK'ed.
1527 * The calls to acquire and release the anon map lock mutex were
1528 * removed in order to avoid a deadly embrace during a DR
1529 * memory delete operation. (Eg. DR blocks while waiting for a
1530 * exclusive lock on a page that is being used for kaio; the
1531 * thread that will complete the kaio and call segspt_softunlock
1532 * blocks on the anon map lock; another thread holding the anon
1533 * map lock blocks on another page lock via the segspt_shmfault
1534 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1536 * The appropriateness of the removal is based upon the following:
1537 * 1. If we are holding a segment's reader lock and the page is held
1538 * shared, then the corresponding element in anonmap which points to
1539 * anon struct cannot change and there is no need to acquire the
1540 * anonymous map lock.
1541 * 2. Threads in segspt_softunlock have a reader lock on the segment
1542 * and already have the shared page lock, so we are guaranteed that
1543 * the anon map slot cannot change and therefore can call anon_get_ptr()
1544 * without grabbing the anonymous map lock.
1545 * 3. Threads that softlock a shared page break copy-on-write, even if
1546 * its a read. Thus cow faults can be ignored with respect to soft
1547 * unlocking, since the breaking of cow means that the anon slot(s) will
1551 segspt_softunlock(struct seg
*seg
, caddr_t sptseg_addr
,
1552 size_t len
, enum seg_rw rw
)
1554 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1556 struct spt_data
*sptd
;
1562 struct anon_map
*amp
; /* XXX - for locknest */
1563 struct anon
*ap
= NULL
;
1566 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
1568 sptseg
= shmd
->shm_sptseg
;
1569 sptd
= sptseg
->s_data
;
1572 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1573 * and therefore their pages are SE_SHARED locked
1574 * for the entire life of the segment.
1576 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
)) &&
1577 ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0)) {
1578 goto softlock_decrement
;
1582 * Any thread is free to do a page_find and
1583 * page_unlock() on the pages within this seg.
1585 * We are already holding the as->a_lock on the user's
1586 * real segment, but we need to hold the a_lock on the
1587 * underlying dummy as. This is mostly to satisfy the
1588 * underlying HAT layer.
1590 AS_LOCK_ENTER(sptseg
->s_as
, RW_READER
);
1591 hat_unlock(sptseg
->s_as
->a_hat
, sptseg_addr
, len
);
1592 AS_LOCK_EXIT(sptseg
->s_as
);
1594 amp
= sptd
->spt_amp
;
1595 ASSERT(amp
!= NULL
);
1596 anon_index
= seg_page(sptseg
, sptseg_addr
);
1598 for (adr
= sptseg_addr
; adr
< sptseg_addr
+ len
; adr
+= PAGESIZE
) {
1599 ap
= anon_get_ptr(amp
->ahp
, anon_index
++);
1601 swap_xlate(ap
, &vp
, &offset
);
1604 * Use page_find() instead of page_lookup() to
1605 * find the page since we know that it has a
1608 pp
= page_find(&vp
->v_object
, offset
);
1609 ASSERT(ap
== anon_get_ptr(amp
->ahp
, anon_index
- 1));
1611 panic("segspt_softunlock: "
1612 "addr %p, ap %p, vp %p, off %llx",
1613 (void *)adr
, (void *)ap
, (void *)vp
, offset
);
1617 if (rw
== S_WRITE
) {
1619 } else if (rw
!= S_OTHER
) {
1626 npages
= btopr(len
);
1627 ASSERT(shmd
->shm_softlockcnt
>= npages
);
1628 atomic_add_long((ulong_t
*)(&(shmd
->shm_softlockcnt
)), -npages
);
1629 if (shmd
->shm_softlockcnt
== 0) {
1631 * All SOFTLOCKS are gone. Wakeup any waiting
1632 * unmappers so they can try again to unmap.
1633 * Check for waiters first without the mutex
1634 * held so we don't always grab the mutex on
1637 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1638 mutex_enter(&seg
->s_as
->a_contents
);
1639 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1640 AS_CLRUNMAPWAIT(seg
->s_as
);
1641 cv_broadcast(&seg
->s_as
->a_cv
);
1643 mutex_exit(&seg
->s_as
->a_contents
);
1649 segspt_shmattach(struct seg
**segpp
, void *argsp
)
1651 struct seg
*seg
= *segpp
;
1652 struct shm_data
*shmd_arg
= (struct shm_data
*)argsp
;
1653 struct shm_data
*shmd
;
1654 struct anon_map
*shm_amp
= shmd_arg
->shm_amp
;
1655 struct spt_data
*sptd
;
1658 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1660 shmd
= kmem_zalloc((sizeof (*shmd
)), KM_NOSLEEP
);
1664 shmd
->shm_sptas
= shmd_arg
->shm_sptas
;
1665 shmd
->shm_amp
= shm_amp
;
1666 shmd
->shm_sptseg
= shmd_arg
->shm_sptseg
;
1668 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT
, shm_amp
, 0,
1669 NULL
, 0, seg
->s_size
);
1671 mutex_init(&shmd
->shm_segfree_syncmtx
, NULL
, MUTEX_DEFAULT
, NULL
);
1673 seg
->s_data
= (void *)shmd
;
1674 seg
->s_ops
= &segspt_shmops
;
1675 seg
->s_szc
= shmd
->shm_sptseg
->s_szc
;
1676 sptd
= shmd
->shm_sptseg
->s_data
;
1678 if (sptd
->spt_flags
& SHM_PAGEABLE
) {
1679 if ((shmd
->shm_vpage
= kmem_zalloc(btopr(shm_amp
->size
),
1680 KM_NOSLEEP
)) == NULL
) {
1682 kmem_free(shmd
, (sizeof (*shmd
)));
1685 shmd
->shm_lckpgs
= 0;
1686 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
)) {
1687 if ((error
= hat_share(seg
->s_as
->a_hat
, seg
->s_base
,
1688 shmd_arg
->shm_sptas
->a_hat
, SEGSPTADDR
,
1689 seg
->s_size
, seg
->s_szc
)) != 0) {
1690 kmem_free(shmd
->shm_vpage
,
1691 btopr(shm_amp
->size
));
1695 error
= hat_share(seg
->s_as
->a_hat
, seg
->s_base
,
1696 shmd_arg
->shm_sptas
->a_hat
, SEGSPTADDR
,
1697 seg
->s_size
, seg
->s_szc
);
1702 kmem_free(shmd
, (sizeof (*shmd
)));
1704 ANON_LOCK_ENTER(&shm_amp
->a_rwlock
, RW_WRITER
);
1706 ANON_LOCK_EXIT(&shm_amp
->a_rwlock
);
1712 segspt_shmunmap(struct seg
*seg
, caddr_t raddr
, size_t ssize
)
1714 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1717 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1719 if (shmd
->shm_softlockcnt
> 0) {
1728 if (ssize
!= seg
->s_size
) {
1730 cmn_err(CE_WARN
, "Incompatible ssize %lx s_size %lx\n",
1731 ssize
, seg
->s_size
);
1736 (void) segspt_shmlockop(seg
, raddr
, shmd
->shm_amp
->size
, 0, MC_UNLOCK
,
1738 hat_unshare(seg
->s_as
->a_hat
, raddr
, ssize
, seg
->s_szc
);
1746 segspt_shmfree(struct seg
*seg
)
1748 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1749 struct anon_map
*shm_amp
= shmd
->shm_amp
;
1751 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1753 (void) segspt_shmlockop(seg
, seg
->s_base
, shm_amp
->size
, 0,
1754 MC_UNLOCK
, NULL
, 0);
1757 * Need to increment refcnt when attaching
1758 * and decrement when detaching because of dup().
1760 ANON_LOCK_ENTER(&shm_amp
->a_rwlock
, RW_WRITER
);
1762 ANON_LOCK_EXIT(&shm_amp
->a_rwlock
);
1764 if (shmd
->shm_vpage
) { /* only for DISM */
1765 kmem_free(shmd
->shm_vpage
, btopr(shm_amp
->size
));
1766 shmd
->shm_vpage
= NULL
;
1770 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1771 * still working with this segment without holding as lock.
1773 ASSERT(shmd
->shm_softlockcnt
== 0);
1774 mutex_enter(&shmd
->shm_segfree_syncmtx
);
1775 mutex_destroy(&shmd
->shm_segfree_syncmtx
);
1777 kmem_free(shmd
, sizeof (*shmd
));
1782 segspt_shmsetprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t prot
)
1784 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
1787 * Shared page table is more than shared mapping.
1788 * Individual process sharing page tables can't change prot
1789 * because there is only one set of page tables.
1790 * This will be allowed after private page table is
1793 /* need to return correct status error? */
1799 segspt_dismfault(struct hat
*hat
, struct seg
*seg
, caddr_t addr
,
1800 size_t len
, enum fault_type type
, enum seg_rw rw
)
1802 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1803 struct seg
*sptseg
= shmd
->shm_sptseg
;
1804 struct as
*curspt
= shmd
->shm_sptas
;
1805 struct spt_data
*sptd
= sptseg
->s_data
;
1808 caddr_t segspt_addr
, shm_addr
;
1813 int dyn_ism_unmap
= hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
);
1819 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
1822 * Because of the way spt is implemented
1823 * the realsize of the segment does not have to be
1824 * equal to the segment size itself. The segment size is
1825 * often in multiples of a page size larger than PAGESIZE.
1826 * The realsize is rounded up to the nearest PAGESIZE
1827 * based on what the user requested. This is a bit of
1828 * ungliness that is historical but not easily fixed
1829 * without re-designing the higher levels of ISM.
1831 ASSERT(addr
>= seg
->s_base
);
1832 if (((addr
+ len
) - seg
->s_base
) > sptd
->spt_realsize
)
1835 * For all of the following cases except F_PROT, we need to
1836 * make any necessary adjustments to addr and len
1837 * and get all of the necessary page_t's into an array called ppa[].
1839 * The code in shmat() forces base addr and len of ISM segment
1840 * to be aligned to largest page size supported. Therefore,
1841 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1842 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1843 * in large pagesize chunks, or else we will screw up the HAT
1844 * layer by calling hat_memload_array() with differing page sizes
1845 * over a given virtual range.
1847 pgsz
= page_get_pagesize(sptseg
->s_szc
);
1848 pgcnt
= page_get_pagecnt(sptseg
->s_szc
);
1849 shm_addr
= (caddr_t
)P2ALIGN((uintptr_t)(addr
), pgsz
);
1850 size
= P2ROUNDUP((uintptr_t)(((addr
+ len
) - shm_addr
)), pgsz
);
1851 npages
= btopr(size
);
1854 * Now we need to convert from addr in segshm to addr in segspt.
1856 an_idx
= seg_page(seg
, shm_addr
);
1857 segspt_addr
= sptseg
->s_base
+ ptob(an_idx
);
1859 ASSERT((segspt_addr
+ ptob(npages
)) <=
1860 (sptseg
->s_base
+ sptd
->spt_realsize
));
1861 ASSERT(segspt_addr
< (sptseg
->s_base
+ sptseg
->s_size
));
1867 atomic_add_long((ulong_t
*)(&(shmd
->shm_softlockcnt
)), npages
);
1869 * Fall through to the F_INVAL case to load up the hat layer
1870 * entries with the HAT_LOAD_LOCK flag.
1875 if ((rw
== S_EXEC
) && !(sptd
->spt_prot
& PROT_EXEC
))
1878 ppa
= kmem_zalloc(npages
* sizeof (page_t
*), KM_SLEEP
);
1880 err
= spt_anon_getpages(sptseg
, segspt_addr
, size
, ppa
);
1882 if (type
== F_SOFTLOCK
) {
1883 atomic_add_long((ulong_t
*)(
1884 &(shmd
->shm_softlockcnt
)), -npages
);
1888 AS_LOCK_ENTER(sptseg
->s_as
, RW_READER
);
1891 if (type
== F_SOFTLOCK
) {
1894 * Load up the translation keeping it
1895 * locked and don't unlock the page.
1897 for (; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
1898 hat_memload_array(sptseg
->s_as
->a_hat
,
1899 a
, pgsz
, &ppa
[pidx
], sptd
->spt_prot
,
1900 HAT_LOAD_LOCK
| HAT_LOAD_SHARE
);
1904 * Migrate pages marked for migration
1906 if (lgrp_optimizations())
1907 page_migrate(seg
, shm_addr
, ppa
, npages
);
1909 for (; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
1910 hat_memload_array(sptseg
->s_as
->a_hat
,
1911 a
, pgsz
, &ppa
[pidx
],
1917 * And now drop the SE_SHARED lock(s).
1919 if (dyn_ism_unmap
) {
1920 for (i
= 0; i
< npages
; i
++) {
1921 page_unlock(ppa
[i
]);
1926 if (!dyn_ism_unmap
) {
1927 if (hat_share(seg
->s_as
->a_hat
, shm_addr
,
1928 curspt
->a_hat
, segspt_addr
, ptob(npages
),
1930 panic("hat_share err in DISM fault");
1933 if (type
== F_INVAL
) {
1934 for (i
= 0; i
< npages
; i
++) {
1935 page_unlock(ppa
[i
]);
1939 AS_LOCK_EXIT(sptseg
->s_as
);
1941 kmem_free(ppa
, npages
* sizeof (page_t
*));
1947 * This is a bit ugly, we pass in the real seg pointer,
1948 * but the segspt_addr is the virtual address within the
1951 segspt_softunlock(seg
, segspt_addr
, size
, rw
);
1957 * This takes care of the unusual case where a user
1958 * allocates a stack in shared memory and a register
1959 * window overflow is written to that stack page before
1960 * it is otherwise modified.
1962 * We can get away with this because ISM segments are
1963 * always rw. Other than this unusual case, there
1964 * should be no instances of protection violations.
1970 panic("segspt_dismfault default type?");
1979 segspt_shmfault(struct hat
*hat
, struct seg
*seg
, caddr_t addr
,
1980 size_t len
, enum fault_type type
, enum seg_rw rw
)
1982 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1983 struct seg
*sptseg
= shmd
->shm_sptseg
;
1984 struct as
*curspt
= shmd
->shm_sptas
;
1985 struct spt_data
*sptd
= sptseg
->s_data
;
1988 caddr_t sptseg_addr
, shm_addr
;
1992 ulong_t anon_index
= 0;
1994 struct anon_map
*amp
; /* XXX - for locknest */
1995 struct anon
*ap
= NULL
;
2003 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2005 if (sptd
->spt_flags
& SHM_PAGEABLE
) {
2006 return (segspt_dismfault(hat
, seg
, addr
, len
, type
, rw
));
2010 * Because of the way spt is implemented
2011 * the realsize of the segment does not have to be
2012 * equal to the segment size itself. The segment size is
2013 * often in multiples of a page size larger than PAGESIZE.
2014 * The realsize is rounded up to the nearest PAGESIZE
2015 * based on what the user requested. This is a bit of
2016 * ungliness that is historical but not easily fixed
2017 * without re-designing the higher levels of ISM.
2019 ASSERT(addr
>= seg
->s_base
);
2020 if (((addr
+ len
) - seg
->s_base
) > sptd
->spt_realsize
)
2023 * For all of the following cases except F_PROT, we need to
2024 * make any necessary adjustments to addr and len
2025 * and get all of the necessary page_t's into an array called ppa[].
2027 * The code in shmat() forces base addr and len of ISM segment
2028 * to be aligned to largest page size supported. Therefore,
2029 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2030 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2031 * in large pagesize chunks, or else we will screw up the HAT
2032 * layer by calling hat_memload_array() with differing page sizes
2033 * over a given virtual range.
2035 pgsz
= page_get_pagesize(sptseg
->s_szc
);
2036 pgcnt
= page_get_pagecnt(sptseg
->s_szc
);
2037 shm_addr
= (caddr_t
)P2ALIGN((uintptr_t)(addr
), pgsz
);
2038 size
= P2ROUNDUP((uintptr_t)(((addr
+ len
) - shm_addr
)), pgsz
);
2039 npages
= btopr(size
);
2042 * Now we need to convert from addr in segshm to addr in segspt.
2044 anon_index
= seg_page(seg
, shm_addr
);
2045 sptseg_addr
= sptseg
->s_base
+ ptob(anon_index
);
2048 * And now we may have to adjust npages downward if we have
2049 * exceeded the realsize of the segment or initial anon
2052 if ((sptseg_addr
+ ptob(npages
)) >
2053 (sptseg
->s_base
+ sptd
->spt_realsize
))
2054 size
= (sptseg
->s_base
+ sptd
->spt_realsize
) - sptseg_addr
;
2056 npages
= btopr(size
);
2058 ASSERT(sptseg_addr
< (sptseg
->s_base
+ sptseg
->s_size
));
2059 ASSERT((sptd
->spt_flags
& SHM_PAGEABLE
) == 0);
2066 * availrmem is decremented once during anon_swap_adjust()
2067 * and is incremented during the anon_unresv(), which is
2068 * called from shm_rm_amp() when the segment is destroyed.
2070 atomic_add_long((ulong_t
*)(&(shmd
->shm_softlockcnt
)), npages
);
2072 * Some platforms assume that ISM pages are SE_SHARED
2073 * locked for the entire life of the segment.
2075 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
))
2078 * Fall through to the F_INVAL case to load up the hat layer
2079 * entries with the HAT_LOAD_LOCK flag.
2085 if ((rw
== S_EXEC
) && !(sptd
->spt_prot
& PROT_EXEC
))
2089 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2090 * may still rely on this call to hat_share(). That
2091 * would imply that those hat's can fault on a
2092 * HAT_LOAD_LOCK translation, which would seem
2095 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
)) {
2096 if (hat_share(seg
->s_as
->a_hat
, seg
->s_base
,
2097 curspt
->a_hat
, sptseg
->s_base
,
2098 sptseg
->s_size
, sptseg
->s_szc
) != 0) {
2099 panic("hat_share error in ISM fault");
2104 ppa
= kmem_zalloc(sizeof (page_t
*) * npages
, KM_SLEEP
);
2107 * I see no need to lock the real seg,
2108 * here, because all of our work will be on the underlying
2111 * sptseg_addr and npages now account for large pages.
2113 amp
= sptd
->spt_amp
;
2114 ASSERT(amp
!= NULL
);
2115 anon_index
= seg_page(sptseg
, sptseg_addr
);
2117 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2118 for (i
= 0; i
< npages
; i
++) {
2119 ap
= anon_get_ptr(amp
->ahp
, anon_index
++);
2121 swap_xlate(ap
, &vp
, &offset
);
2122 pp
= page_lookup(&vp
->v_object
, offset
, SE_SHARED
);
2126 ANON_LOCK_EXIT(&
->a_rwlock
);
2127 ASSERT(i
== npages
);
2130 * We are already holding the as->a_lock on the user's
2131 * real segment, but we need to hold the a_lock on the
2132 * underlying dummy as. This is mostly to satisfy the
2133 * underlying HAT layer.
2135 AS_LOCK_ENTER(sptseg
->s_as
, RW_READER
);
2138 if (type
== F_SOFTLOCK
) {
2140 * Load up the translation keeping it
2141 * locked and don't unlock the page.
2143 for (; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
2144 sz
= MIN(pgsz
, ptob(npages
- pidx
));
2145 hat_memload_array(sptseg
->s_as
->a_hat
, a
,
2146 sz
, &ppa
[pidx
], sptd
->spt_prot
,
2147 HAT_LOAD_LOCK
| HAT_LOAD_SHARE
);
2151 * Migrate pages marked for migration.
2153 if (lgrp_optimizations())
2154 page_migrate(seg
, shm_addr
, ppa
, npages
);
2156 for (; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
2157 sz
= MIN(pgsz
, ptob(npages
- pidx
));
2158 hat_memload_array(sptseg
->s_as
->a_hat
,
2160 sptd
->spt_prot
, HAT_LOAD_SHARE
);
2164 * And now drop the SE_SHARED lock(s).
2166 for (i
= 0; i
< npages
; i
++)
2167 page_unlock(ppa
[i
]);
2169 AS_LOCK_EXIT(sptseg
->s_as
);
2171 kmem_free(ppa
, sizeof (page_t
*) * npages
);
2176 * This is a bit ugly, we pass in the real seg pointer,
2177 * but the sptseg_addr is the virtual address within the
2180 segspt_softunlock(seg
, sptseg_addr
, ptob(npages
), rw
);
2186 * This takes care of the unusual case where a user
2187 * allocates a stack in shared memory and a register
2188 * window overflow is written to that stack page before
2189 * it is otherwise modified.
2191 * We can get away with this because ISM segments are
2192 * always rw. Other than this unusual case, there
2193 * should be no instances of protection violations.
2199 cmn_err(CE_WARN
, "segspt_shmfault default type?");
2207 segspt_shmfaulta(struct seg
*seg
, caddr_t addr
)
2214 segspt_shmkluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
)
2220 * duplicate the shared page tables
2223 segspt_shmdup(struct seg
*seg
, struct seg
*newseg
)
2225 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2226 struct anon_map
*amp
= shmd
->shm_amp
;
2227 struct shm_data
*shmd_new
;
2228 struct seg
*spt_seg
= shmd
->shm_sptseg
;
2229 struct spt_data
*sptd
= spt_seg
->s_data
;
2232 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
2234 shmd_new
= kmem_zalloc((sizeof (*shmd_new
)), KM_SLEEP
);
2235 newseg
->s_data
= (void *)shmd_new
;
2236 shmd_new
->shm_sptas
= shmd
->shm_sptas
;
2237 shmd_new
->shm_amp
= amp
;
2238 shmd_new
->shm_sptseg
= shmd
->shm_sptseg
;
2239 newseg
->s_ops
= &segspt_shmops
;
2240 newseg
->s_szc
= seg
->s_szc
;
2241 ASSERT(seg
->s_szc
== shmd
->shm_sptseg
->s_szc
);
2243 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2245 ANON_LOCK_EXIT(&
->a_rwlock
);
2247 if (sptd
->spt_flags
& SHM_PAGEABLE
) {
2248 shmd_new
->shm_vpage
= kmem_zalloc(btopr(amp
->size
), KM_SLEEP
);
2249 shmd_new
->shm_lckpgs
= 0;
2250 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
)) {
2251 if ((error
= hat_share(newseg
->s_as
->a_hat
,
2252 newseg
->s_base
, shmd
->shm_sptas
->a_hat
, SEGSPTADDR
,
2253 seg
->s_size
, seg
->s_szc
)) != 0) {
2254 kmem_free(shmd_new
->shm_vpage
,
2260 return (hat_share(newseg
->s_as
->a_hat
, newseg
->s_base
,
2261 shmd
->shm_sptas
->a_hat
, SEGSPTADDR
, seg
->s_size
,
2269 segspt_shmcheckprot(struct seg
*seg
, caddr_t addr
, size_t size
, uint_t prot
)
2271 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2272 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2274 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2277 * ISM segment is always rw.
2279 return (((sptd
->spt_prot
& prot
) != prot
) ? EACCES
: 0);
2283 * Return an array of locked large pages, for empty slots allocate
2284 * private zero-filled anon pages.
2293 struct spt_data
*sptd
= sptseg
->s_data
;
2294 struct anon_map
*amp
= sptd
->spt_amp
;
2295 enum seg_rw rw
= sptd
->spt_prot
;
2296 uint_t szc
= sptseg
->s_szc
;
2297 size_t pg_sz
, share_sz
= page_get_pagesize(szc
);
2299 caddr_t lp_addr
, e_sptaddr
;
2300 uint_t vpprot
, ppa_szc
= 0;
2301 struct vpage
*vpage
= NULL
;
2305 anon_sync_obj_t cookie
;
2306 int anon_locked
= 0;
2310 ASSERT(IS_P2ALIGNED(sptaddr
, share_sz
) && IS_P2ALIGNED(len
, share_sz
));
2314 lp_npgs
= btop(pg_sz
);
2316 e_sptaddr
= sptaddr
+ len
;
2317 an_idx
= seg_page(sptseg
, sptaddr
);
2320 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2322 amp_pgs
= page_get_pagecnt(amp
->a_szc
);
2326 for (; lp_addr
< e_sptaddr
;
2327 an_idx
+= lp_npgs
, lp_addr
+= pg_sz
, ppa_idx
+= lp_npgs
) {
2330 * If we're currently locked, and we get to a new
2331 * page, unlock our current anon chunk.
2333 if (anon_locked
&& P2PHASE(an_idx
, amp_pgs
) == 0) {
2334 anon_array_exit(&cookie
);
2338 anon_array_enter(amp
, an_idx
, &cookie
);
2341 ppa_szc
= (uint_t
)-1;
2342 ierr
= anon_map_getpages(amp
, an_idx
, szc
, sptseg
,
2343 lp_addr
, sptd
->spt_prot
, &vpprot
, &ppa
[ppa_idx
],
2344 &ppa_szc
, vpage
, rw
, 0, segvn_anypgsz
, 0, kcred
);
2348 err
= FC_MAKE_ERR(ierr
);
2354 if (lp_addr
== e_sptaddr
) {
2357 ASSERT(lp_addr
< e_sptaddr
);
2360 * ierr == -1 means we failed to allocate a large page.
2361 * so do a size down operation.
2363 * ierr == -2 means some other process that privately shares
2364 * pages with this process has allocated a larger page and we
2365 * need to retry with larger pages. So do a size up
2366 * operation. This relies on the fact that large pages are
2367 * never partially shared i.e. if we share any constituent
2368 * page of a large page with another process we must share the
2369 * entire large page. Note this cannot happen for SOFTLOCK
2370 * case, unless current address (lpaddr) is at the beginning
2371 * of the next page size boundary because the other process
2372 * couldn't have relocated locked pages.
2374 ASSERT(ierr
== -1 || ierr
== -2);
2375 if (segvn_anypgsz
) {
2376 ASSERT(ierr
== -2 || szc
!= 0);
2377 ASSERT(ierr
== -1 || szc
< sptseg
->s_szc
);
2378 szc
= (ierr
== -1) ? szc
- 1 : szc
+ 1;
2381 * For faults and segvn_anypgsz == 0
2382 * we need to be careful not to loop forever
2383 * if existing page is found with szc other
2384 * than 0 or seg->s_szc. This could be due
2385 * to page relocations on behalf of DR or
2386 * more likely large page creation. For this
2387 * case simply re-size to existing page's szc
2388 * if returned by anon_map_getpages().
2390 if (ppa_szc
== (uint_t
)-1) {
2391 szc
= (ierr
== -1) ? 0 : sptseg
->s_szc
;
2393 ASSERT(ppa_szc
<= sptseg
->s_szc
);
2394 ASSERT(ierr
== -2 || ppa_szc
< szc
);
2395 ASSERT(ierr
== -1 || ppa_szc
> szc
);
2399 pg_sz
= page_get_pagesize(szc
);
2400 lp_npgs
= btop(pg_sz
);
2401 ASSERT(IS_P2ALIGNED(lp_addr
, pg_sz
));
2404 anon_array_exit(&cookie
);
2406 ANON_LOCK_EXIT(&
->a_rwlock
);
2411 anon_array_exit(&cookie
);
2413 ANON_LOCK_EXIT(&
->a_rwlock
);
2414 for (j
= 0; j
< ppa_idx
; j
++)
2415 page_unlock(ppa
[j
]);
2420 * count the number of bytes in a set of spt pages that are currently not
2424 spt_unlockedbytes(pgcnt_t npages
, page_t
**ppa
)
2427 rctl_qty_t unlocked
= 0;
2429 for (i
= 0; i
< npages
; i
++) {
2430 if (ppa
[i
]->p_lckcnt
== 0)
2431 unlocked
+= PAGESIZE
;
2436 extern u_longlong_t
randtick(void);
2437 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2438 #define NLCK (NCPU_P2)
2439 /* Random number with a range [0, n-1], n must be power of two */
2440 #define RAND_P2(n) \
2441 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2444 spt_lockpages(struct seg
*seg
, pgcnt_t anon_index
, pgcnt_t npages
,
2445 page_t
**ppa
, ulong_t
*lockmap
, size_t pos
,
2448 struct shm_data
*shmd
= seg
->s_data
;
2449 struct spt_data
*sptd
= shmd
->shm_sptseg
->s_data
;
2454 int use_reserved
= 1;
2456 /* return the number of bytes actually locked */
2460 * To avoid contention on freemem_lock, availrmem and pages_locked
2461 * global counters are updated only every nlck locked pages instead of
2462 * every time. Reserve nlck locks up front and deduct from this
2463 * reservation for each page that requires a lock. When the reservation
2464 * is consumed, reserve again. nlck is randomized, so the competing
2465 * threads do not fall into a cyclic lock contention pattern. When
2466 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2467 * is used to lock pages.
2469 for (i
= 0; i
< npages
; anon_index
++, pos
++, i
++) {
2470 if (nlck
== 0 && use_reserved
== 1) {
2471 nlck
= NLCK
+ RAND_P2(NLCK
);
2472 /* if fewer loops left, decrease nlck */
2473 nlck
= MIN(nlck
, npages
- i
);
2475 * Reserve nlck locks up front and deduct from this
2476 * reservation for each page that requires a lock. When
2477 * the reservation is consumed, reserve again.
2479 mutex_enter(&freemem_lock
);
2480 if ((availrmem
- nlck
) < pages_pp_maximum
) {
2481 /* Do not do advance memory reserves */
2485 pages_locked
+= nlck
;
2487 mutex_exit(&freemem_lock
);
2489 if (!(shmd
->shm_vpage
[anon_index
] & DISM_PG_LOCKED
)) {
2490 if (sptd
->spt_ppa_lckcnt
[anon_index
] <
2491 (ushort_t
)DISM_LOCK_MAX
) {
2492 if (++sptd
->spt_ppa_lckcnt
[anon_index
] ==
2493 (ushort_t
)DISM_LOCK_MAX
) {
2495 "DISM page lock limit "
2496 "reached on DISM offset 0x%lx\n",
2497 anon_index
<< PAGESHIFT
);
2499 kernel
= (sptd
->spt_ppa
&&
2500 sptd
->spt_ppa
[anon_index
]);
2501 if (!page_pp_lock(ppa
[i
], 0, kernel
||
2503 sptd
->spt_ppa_lckcnt
[anon_index
]--;
2507 /* if this is a newly locked page, count it */
2508 if (ppa
[i
]->p_lckcnt
== 1) {
2509 if (kernel
== 0 && use_reserved
== 1)
2511 *locked
+= PAGESIZE
;
2514 shmd
->shm_vpage
[anon_index
] |= DISM_PG_LOCKED
;
2515 if (lockmap
!= NULL
)
2516 BT_SET(lockmap
, pos
);
2520 /* Return unused lock reservation */
2521 if (nlck
!= 0 && use_reserved
== 1) {
2522 mutex_enter(&freemem_lock
);
2524 pages_locked
-= nlck
;
2525 mutex_exit(&freemem_lock
);
2532 spt_unlockpages(struct seg
*seg
, pgcnt_t anon_index
, pgcnt_t npages
,
2533 rctl_qty_t
*unlocked
)
2535 struct shm_data
*shmd
= seg
->s_data
;
2536 struct spt_data
*sptd
= shmd
->shm_sptseg
->s_data
;
2537 struct anon_map
*amp
= sptd
->spt_amp
;
2543 anon_sync_obj_t cookie
;
2546 pgcnt_t nlck_limit
= NLCK
;
2548 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2549 for (i
= 0; i
< npages
; i
++, anon_index
++) {
2550 if (shmd
->shm_vpage
[anon_index
] & DISM_PG_LOCKED
) {
2551 anon_array_enter(amp
, anon_index
, &cookie
);
2552 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
2555 swap_xlate(ap
, &vp
, &off
);
2556 anon_array_exit(&cookie
);
2557 pp
= page_lookup(&vp
->v_object
, off
, SE_SHARED
);
2560 * availrmem is decremented only for pages which are not
2561 * in seg pcache, for pages in seg pcache availrmem was
2562 * decremented in _dismpagelock()
2564 kernel
= (sptd
->spt_ppa
&& sptd
->spt_ppa
[anon_index
]);
2565 ASSERT(pp
->p_lckcnt
> 0);
2568 * lock page but do not change availrmem, we do it
2569 * ourselves every nlck loops.
2571 page_pp_unlock(pp
, 0, 1);
2572 if (pp
->p_lckcnt
== 0) {
2575 *unlocked
+= PAGESIZE
;
2578 shmd
->shm_vpage
[anon_index
] &= ~DISM_PG_LOCKED
;
2579 sptd
->spt_ppa_lckcnt
[anon_index
]--;
2584 * To reduce freemem_lock contention, do not update availrmem
2585 * until at least NLCK pages have been unlocked.
2586 * 1. No need to update if nlck is zero
2587 * 2. Always update if the last iteration
2589 if (nlck
> 0 && (nlck
== nlck_limit
|| i
== npages
- 1)) {
2590 mutex_enter(&freemem_lock
);
2592 pages_locked
-= nlck
;
2593 mutex_exit(&freemem_lock
);
2595 nlck_limit
= NLCK
+ RAND_P2(NLCK
);
2598 ANON_LOCK_EXIT(&
->a_rwlock
);
2605 segspt_shmlockop(struct seg
*seg
, caddr_t addr
, size_t len
,
2606 int attr
, int op
, ulong_t
*lockmap
, size_t pos
)
2608 struct shm_data
*shmd
= seg
->s_data
;
2609 struct seg
*sptseg
= shmd
->shm_sptseg
;
2610 struct spt_data
*sptd
= sptseg
->s_data
;
2611 struct kshmid
*sp
= sptd
->spt_amp
->a_sp
;
2612 pgcnt_t npages
, a_npages
;
2614 pgcnt_t an_idx
, a_an_idx
, ppa_idx
;
2615 caddr_t spt_addr
, a_addr
; /* spt and aligned address */
2616 size_t a_len
; /* aligned len */
2620 rctl_qty_t unlocked
= 0;
2621 rctl_qty_t locked
= 0;
2622 struct proc
*p
= curproc
;
2625 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2628 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
2632 addr
= (caddr_t
)((uintptr_t)addr
& (uintptr_t)PAGEMASK
);
2633 an_idx
= seg_page(seg
, addr
);
2634 npages
= btopr(len
);
2636 if (an_idx
+ npages
> btopr(shmd
->shm_amp
->size
)) {
2641 * A shm's project never changes, so no lock needed.
2642 * The shm has a hold on the project, so it will not go away.
2643 * Since we have a mapping to shm within this zone, we know
2644 * that the zone will not go away.
2646 proj
= sp
->shm_perm
.ipc_proj
;
2648 if (op
== MC_LOCK
) {
2651 * Need to align addr and size request if they are not
2652 * aligned so we can always allocate large page(s) however
2653 * we only lock what was requested in initial request.
2655 share_sz
= page_get_pagesize(sptseg
->s_szc
);
2656 a_addr
= (caddr_t
)P2ALIGN((uintptr_t)(addr
), share_sz
);
2657 a_len
= P2ROUNDUP((uintptr_t)(((addr
+ len
) - a_addr
)),
2659 a_npages
= btop(a_len
);
2660 a_an_idx
= seg_page(seg
, a_addr
);
2661 spt_addr
= sptseg
->s_base
+ ptob(a_an_idx
);
2662 ppa_idx
= an_idx
- a_an_idx
;
2664 if ((ppa
= kmem_zalloc(((sizeof (page_t
*)) * a_npages
),
2665 KM_NOSLEEP
)) == NULL
) {
2670 * Don't cache any new pages for IO and
2671 * flush any cached pages.
2673 mutex_enter(&sptd
->spt_lock
);
2674 if (sptd
->spt_ppa
!= NULL
)
2675 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2677 sts
= spt_anon_getpages(sptseg
, spt_addr
, a_len
, ppa
);
2679 mutex_exit(&sptd
->spt_lock
);
2680 kmem_free(ppa
, ((sizeof (page_t
*)) * a_npages
));
2684 mutex_enter(&sp
->shm_mlock
);
2685 /* enforce locked memory rctl */
2686 unlocked
= spt_unlockedbytes(npages
, &ppa
[ppa_idx
]);
2688 mutex_enter(&p
->p_lock
);
2689 if (rctl_incr_locked_mem(p
, proj
, unlocked
, 0)) {
2690 mutex_exit(&p
->p_lock
);
2693 mutex_exit(&p
->p_lock
);
2694 sts
= spt_lockpages(seg
, an_idx
, npages
,
2695 &ppa
[ppa_idx
], lockmap
, pos
, &locked
);
2698 * correct locked count if not all pages could be
2701 if ((unlocked
- locked
) > 0) {
2702 rctl_decr_locked_mem(NULL
, proj
,
2703 (unlocked
- locked
), 0);
2709 for (i
= 0; i
< a_npages
; i
++)
2710 page_unlock(ppa
[i
]);
2711 if (sptd
->spt_ppa
!= NULL
)
2712 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2713 mutex_exit(&sp
->shm_mlock
);
2714 mutex_exit(&sptd
->spt_lock
);
2716 kmem_free(ppa
, ((sizeof (page_t
*)) * a_npages
));
2718 } else if (op
== MC_UNLOCK
) { /* unlock */
2721 mutex_enter(&sptd
->spt_lock
);
2722 if (shmd
->shm_lckpgs
== 0) {
2723 mutex_exit(&sptd
->spt_lock
);
2727 * Don't cache new IO pages.
2729 if (sptd
->spt_ppa
!= NULL
)
2730 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2732 mutex_enter(&sp
->shm_mlock
);
2733 sts
= spt_unlockpages(seg
, an_idx
, npages
, &unlocked
);
2734 if ((ppa
= sptd
->spt_ppa
) != NULL
)
2735 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2736 mutex_exit(&sptd
->spt_lock
);
2738 rctl_decr_locked_mem(NULL
, proj
, unlocked
, 0);
2739 mutex_exit(&sp
->shm_mlock
);
2742 seg_ppurge_wiredpp(ppa
);
2749 segspt_shmgetprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t
*protv
)
2751 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2752 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2753 spgcnt_t pgno
= seg_page(seg
, addr
+len
) - seg_page(seg
, addr
) + 1;
2755 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2758 * ISM segment is always rw.
2761 *protv
++ = sptd
->spt_prot
;
2767 segspt_shmgetoffset(struct seg
*seg
, caddr_t addr
)
2769 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2771 /* Offset does not matter in ISM memory */
2778 segspt_shmgettype(struct seg
*seg
, caddr_t addr
)
2780 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2781 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2783 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2786 * The shared memory mapping is always MAP_SHARED, SWAP is only
2789 return (MAP_SHARED
|
2790 ((sptd
->spt_flags
& SHM_PAGEABLE
) ? 0 : MAP_NORESERVE
));
2795 segspt_shmgetvp(struct seg
*seg
, caddr_t addr
, struct vnode
**vpp
)
2797 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2798 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2800 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2802 *vpp
= sptd
->spt_vp
;
2807 * We need to wait for pending IO to complete to a DISM segment in order for
2808 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2809 * than enough time to wait.
2811 static clock_t spt_pcache_wait
= 120;
2815 segspt_shmadvise(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t behav
)
2817 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2818 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2819 struct anon_map
*amp
;
2826 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2828 if (behav
== MADV_FREE
|| behav
== MADV_PURGE
) {
2829 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0)
2832 amp
= sptd
->spt_amp
;
2833 pg_idx
= seg_page(seg
, addr
);
2835 mutex_enter(&sptd
->spt_lock
);
2836 if ((ppa
= sptd
->spt_ppa
) == NULL
) {
2837 mutex_exit(&sptd
->spt_lock
);
2838 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2839 (void) anon_disclaim(amp
, pg_idx
, len
, behav
, NULL
);
2840 ANON_LOCK_EXIT(&
->a_rwlock
);
2844 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2845 gen
= sptd
->spt_gen
;
2847 mutex_exit(&sptd
->spt_lock
);
2850 * Purge all DISM cached pages
2852 seg_ppurge_wiredpp(ppa
);
2855 * Drop the AS_LOCK so that other threads can grab it
2856 * in the as_pageunlock path and hopefully get the segment
2857 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2858 * to keep this segment resident.
2860 writer
= AS_WRITE_HELD(seg
->s_as
);
2861 atomic_inc_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
2862 AS_LOCK_EXIT(seg
->s_as
);
2864 mutex_enter(&sptd
->spt_lock
);
2866 end_lbolt
= ddi_get_lbolt() + (hz
* spt_pcache_wait
);
2869 * Try to wait for pages to get kicked out of the seg_pcache.
2871 while (sptd
->spt_gen
== gen
&&
2872 (sptd
->spt_flags
& DISM_PPA_CHANGED
) &&
2873 ddi_get_lbolt() < end_lbolt
) {
2874 if (!cv_timedwait_sig(&sptd
->spt_cv
,
2875 &sptd
->spt_lock
, end_lbolt
)) {
2880 mutex_exit(&sptd
->spt_lock
);
2882 /* Regrab the AS_LOCK and release our hold on the segment */
2883 AS_LOCK_ENTER(seg
->s_as
, writer
? RW_WRITER
: RW_READER
);
2884 atomic_dec_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
2885 if (shmd
->shm_softlockcnt
<= 0) {
2886 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2887 mutex_enter(&seg
->s_as
->a_contents
);
2888 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2889 AS_CLRUNMAPWAIT(seg
->s_as
);
2890 cv_broadcast(&seg
->s_as
->a_cv
);
2892 mutex_exit(&seg
->s_as
->a_contents
);
2896 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2897 (void) anon_disclaim(amp
, pg_idx
, len
, behav
, NULL
);
2898 ANON_LOCK_EXIT(&
->a_rwlock
);
2899 } else if (lgrp_optimizations() && (behav
== MADV_ACCESS_LWP
||
2900 behav
== MADV_ACCESS_MANY
|| behav
== MADV_ACCESS_DEFAULT
)) {
2903 lgrp_mem_policy_t policy
;
2907 struct seg
*sptseg
= shmd
->shm_sptseg
;
2908 caddr_t sptseg_addr
;
2911 * Align address and length to page size of underlying segment
2913 share_size
= page_get_pagesize(shmd
->shm_sptseg
->s_szc
);
2914 shm_addr
= (caddr_t
)P2ALIGN((uintptr_t)(addr
), share_size
);
2915 size
= P2ROUNDUP((uintptr_t)(((addr
+ len
) - shm_addr
)),
2918 amp
= shmd
->shm_amp
;
2919 anon_index
= seg_page(seg
, shm_addr
);
2922 * And now we may have to adjust size downward if we have
2923 * exceeded the realsize of the segment or initial anon
2926 sptseg_addr
= sptseg
->s_base
+ ptob(anon_index
);
2927 if ((sptseg_addr
+ size
) >
2928 (sptseg
->s_base
+ sptd
->spt_realsize
))
2929 size
= (sptseg
->s_base
+ sptd
->spt_realsize
) -
2933 * Set memory allocation policy for this segment
2935 policy
= lgrp_madv_to_policy(behav
, len
, MAP_SHARED
);
2936 already_set
= lgrp_shm_policy_set(policy
, amp
, anon_index
,
2940 * If random memory allocation policy set already,
2941 * don't bother reapplying it.
2943 if (already_set
&& !LGRP_MEM_POLICY_REAPPLICABLE(policy
))
2947 * Mark any existing pages in the given range for
2948 * migration, flushing the I/O page cache, and using
2949 * underlying segment to calculate anon index and get
2950 * anonmap and vnode pointer from
2952 if (shmd
->shm_softlockcnt
> 0)
2955 page_mark_migrate(seg
, shm_addr
, size
, amp
, 0, NULL
, 0, 0);
2962 * get a memory ID for an addr in a given segment
2965 segspt_shmgetmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
)
2967 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2970 struct anon_map
*amp
= shmd
->shm_amp
;
2971 struct spt_data
*sptd
= shmd
->shm_sptseg
->s_data
;
2972 struct seg
*sptseg
= shmd
->shm_sptseg
;
2973 anon_sync_obj_t cookie
;
2975 anon_index
= seg_page(seg
, addr
);
2977 if (addr
> (seg
->s_base
+ sptd
->spt_realsize
)) {
2981 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2982 anon_array_enter(amp
, anon_index
, &cookie
);
2983 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
2986 caddr_t spt_addr
= sptseg
->s_base
+ ptob(anon_index
);
2988 pp
= anon_zero(sptseg
, spt_addr
, &ap
, kcred
);
2990 anon_array_exit(&cookie
);
2991 ANON_LOCK_EXIT(&
->a_rwlock
);
2994 (void) anon_set_ptr(amp
->ahp
, anon_index
, ap
, ANON_SLEEP
);
2997 anon_array_exit(&cookie
);
2998 ANON_LOCK_EXIT(&
->a_rwlock
);
2999 memidp
->val
[0] = (uintptr_t)ap
;
3000 memidp
->val
[1] = (uintptr_t)addr
& PAGEOFFSET
;
3005 * Get memory allocation policy info for specified address in given segment
3007 static lgrp_mem_policy_info_t
*
3008 segspt_shmgetpolicy(struct seg
*seg
, caddr_t addr
)
3010 struct anon_map
*amp
;
3012 lgrp_mem_policy_info_t
*policy_info
;
3013 struct shm_data
*shm_data
;
3015 ASSERT(seg
!= NULL
);
3018 * Get anon_map from segshm
3020 * Assume that no lock needs to be held on anon_map, since
3021 * it should be protected by its reference count which must be
3022 * nonzero for an existing segment
3023 * Need to grab readers lock on policy tree though
3025 shm_data
= (struct shm_data
*)seg
->s_data
;
3026 if (shm_data
== NULL
)
3028 amp
= shm_data
->shm_amp
;
3029 ASSERT(amp
->refcnt
!= 0);
3034 * Assume starting anon index of 0
3036 anon_index
= seg_page(seg
, addr
);
3037 policy_info
= lgrp_shm_policy_get(amp
, anon_index
, NULL
, 0);
3039 return (policy_info
);