4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cmn_err.h>
31 #include <sys/systm.h>
32 #include <sys/tuneable.h>
40 #include <sys/atomic.h>
41 #include <vm/seg_spt.h>
42 #include <sys/debug.h>
43 #include <sys/vtrace.h>
45 #include <sys/shm_impl.h>
47 #include <sys/vmsystm.h>
48 #include <sys/policy.h>
49 #include <sys/project.h>
50 #include <sys/tnf_probe.h>
53 #define SEGSPTADDR (caddr_t)0x0
56 * # pages used for spt
61 * segspt_minfree is the memory left for system after ISM
62 * locked its pages; it is set up to 5% of availrmem in
63 * sptcreate when ISM is created. ISM should not use more
64 * than ~90% of availrmem; if it does, then the performance
65 * of the system may decrease. Machines with large memories may
66 * be able to use up more memory for ISM so we set the default
67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 * If somebody wants even more memory for ISM (risking hanging
69 * the system) they can patch the segspt_minfree to smaller number.
71 pgcnt_t segspt_minfree
= 0;
73 static int segspt_create(struct seg
*seg
, caddr_t argsp
);
74 static int segspt_unmap(struct seg
*seg
, caddr_t raddr
, size_t ssize
);
75 static void segspt_free(struct seg
*seg
);
76 static void segspt_free_pages(struct seg
*seg
, caddr_t addr
, size_t len
);
77 static lgrp_mem_policy_info_t
*segspt_getpolicy(struct seg
*seg
, caddr_t addr
);
82 panic("segspt_badop called");
86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
88 struct seg_ops segspt_ops
= {
89 SEGSPT_BADOP(int), /* dup */
92 SEGSPT_BADOP(int), /* fault */
93 SEGSPT_BADOP(faultcode_t
), /* faulta */
94 SEGSPT_BADOP(int), /* setprot */
95 SEGSPT_BADOP(int), /* checkprot */
96 SEGSPT_BADOP(int), /* kluster */
97 SEGSPT_BADOP(size_t), /* swapout */
98 SEGSPT_BADOP(int), /* sync */
99 SEGSPT_BADOP(size_t), /* incore */
100 SEGSPT_BADOP(int), /* lockop */
101 SEGSPT_BADOP(int), /* getprot */
102 SEGSPT_BADOP(u_offset_t
), /* getoffset */
103 SEGSPT_BADOP(int), /* gettype */
104 SEGSPT_BADOP(int), /* getvp */
105 SEGSPT_BADOP(int), /* advise */
106 SEGSPT_BADOP(void), /* dump */
107 SEGSPT_BADOP(int), /* pagelock */
108 SEGSPT_BADOP(int), /* setpgsz */
109 SEGSPT_BADOP(int), /* getmemid */
110 segspt_getpolicy
, /* getpolicy */
111 SEGSPT_BADOP(int), /* capable */
114 static int segspt_shmdup(struct seg
*seg
, struct seg
*newseg
);
115 static int segspt_shmunmap(struct seg
*seg
, caddr_t raddr
, size_t ssize
);
116 static void segspt_shmfree(struct seg
*seg
);
117 static faultcode_t
segspt_shmfault(struct hat
*hat
, struct seg
*seg
,
118 caddr_t addr
, size_t len
, enum fault_type type
, enum seg_rw rw
);
119 static faultcode_t
segspt_shmfaulta(struct seg
*seg
, caddr_t addr
);
120 static int segspt_shmsetprot(register struct seg
*seg
, register caddr_t addr
,
121 register size_t len
, register uint_t prot
);
122 static int segspt_shmcheckprot(struct seg
*seg
, caddr_t addr
, size_t size
,
124 static int segspt_shmkluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
);
125 static size_t segspt_shmswapout(struct seg
*seg
);
126 static size_t segspt_shmincore(struct seg
*seg
, caddr_t addr
, size_t len
,
128 static int segspt_shmsync(struct seg
*seg
, register caddr_t addr
, size_t len
,
129 int attr
, uint_t flags
);
130 static int segspt_shmlockop(struct seg
*seg
, caddr_t addr
, size_t len
,
131 int attr
, int op
, ulong_t
*lockmap
, size_t pos
);
132 static int segspt_shmgetprot(struct seg
*seg
, caddr_t addr
, size_t len
,
134 static u_offset_t
segspt_shmgetoffset(struct seg
*seg
, caddr_t addr
);
135 static int segspt_shmgettype(struct seg
*seg
, caddr_t addr
);
136 static int segspt_shmgetvp(struct seg
*seg
, caddr_t addr
, struct vnode
**vpp
);
137 static int segspt_shmadvise(struct seg
*seg
, caddr_t addr
, size_t len
,
139 static void segspt_shmdump(struct seg
*seg
);
140 static int segspt_shmpagelock(struct seg
*, caddr_t
, size_t,
141 struct page
***, enum lock_type
, enum seg_rw
);
142 static int segspt_shmsetpgsz(struct seg
*, caddr_t
, size_t, uint_t
);
143 static int segspt_shmgetmemid(struct seg
*, caddr_t
, memid_t
*);
144 static lgrp_mem_policy_info_t
*segspt_shmgetpolicy(struct seg
*, caddr_t
);
145 static int segspt_shmcapable(struct seg
*, segcapability_t
);
147 struct seg_ops segspt_shmops
= {
164 segspt_shmadvise
, /* advise */
173 static void segspt_purge(struct seg
*seg
);
174 static int segspt_reclaim(void *, caddr_t
, size_t, struct page
**,
176 static int spt_anon_getpages(struct seg
*seg
, caddr_t addr
, size_t len
,
183 sptcreate(size_t size
, struct seg
**sptseg
, struct anon_map
*amp
,
184 uint_t prot
, uint_t flags
, uint_t share_szc
)
188 struct segspt_crargs sptcargs
;
191 TNF_PROBE_1(sptcreate
, "spt", /* CSTYLED */,
192 tnf_ulong
, size
, size
);
194 if (segspt_minfree
== 0) /* leave min 5% of availrmem for */
195 segspt_minfree
= availrmem
/20; /* for the system */
197 if (!hat_supported(HAT_SHARED_PT
, (void *)0))
201 * get a new as for this shared memory segment
204 newas
->a_proc
= NULL
;
206 sptcargs
.prot
= prot
;
207 sptcargs
.flags
= flags
;
208 sptcargs
.szc
= share_szc
;
210 * create a shared page table (spt) segment
213 if (err
= as_map(newas
, SEGSPTADDR
, size
, segspt_create
, &sptcargs
)) {
217 *sptseg
= sptcargs
.seg_spt
;
222 sptdestroy(struct as
*as
, struct anon_map
*amp
)
226 TNF_PROBE_0(sptdestroy
, "spt", /* CSTYLED */);
228 (void) as_unmap(as
, SEGSPTADDR
, amp
->size
);
233 * called from seg_free().
234 * free (i.e., unlock, unmap, return to free list)
235 * all the pages in the given seg.
238 segspt_free(struct seg
*seg
)
240 struct spt_data
*sptd
= (struct spt_data
*)seg
->s_data
;
242 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
245 if (sptd
->spt_realsize
)
246 segspt_free_pages(seg
, seg
->s_base
, sptd
->spt_realsize
);
248 if (sptd
->spt_ppa_lckcnt
)
249 kmem_free(sptd
->spt_ppa_lckcnt
,
250 sizeof (*sptd
->spt_ppa_lckcnt
)
251 * btopr(sptd
->spt_amp
->size
));
252 kmem_free(sptd
->spt_vp
, sizeof (*sptd
->spt_vp
));
253 cv_destroy(&sptd
->spt_cv
);
254 mutex_destroy(&sptd
->spt_lock
);
255 kmem_free(sptd
, sizeof (*sptd
));
261 segspt_shmsync(struct seg
*seg
, caddr_t addr
, size_t len
, int attr
,
264 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
271 segspt_shmincore(struct seg
*seg
, caddr_t addr
, size_t len
, char *vec
)
275 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
277 struct spt_data
*sptd
;
279 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
283 sptseg
= shmd
->shm_sptseg
;
284 sptd
= sptseg
->s_data
;
286 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
288 while (addr
< eo_seg
) {
289 /* page exists, and it's locked. */
290 *vec
++ = SEG_PAGE_INCORE
| SEG_PAGE_LOCKED
|
296 struct anon_map
*amp
= shmd
->shm_amp
;
304 anon_sync_obj_t cookie
;
306 addr
= (caddr_t
)((uintptr_t)addr
& (uintptr_t)PAGEMASK
);
307 anon_index
= seg_page(seg
, addr
);
309 if (anon_index
+ npages
> btopr(shmd
->shm_amp
->size
)) {
312 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
313 for (i
= 0; i
< npages
; i
++, anon_index
++) {
315 anon_array_enter(amp
, anon_index
, &cookie
);
316 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
318 swap_xlate(ap
, &vp
, &off
);
319 anon_array_exit(&cookie
);
320 pp
= page_lookup_nowait(vp
, off
, SE_SHARED
);
322 ret
|= SEG_PAGE_INCORE
| SEG_PAGE_ANON
;
326 anon_array_exit(&cookie
);
328 if (shmd
->shm_vpage
[anon_index
] & DISM_PG_LOCKED
) {
329 ret
|= SEG_PAGE_LOCKED
;
333 ANON_LOCK_EXIT(&
->a_rwlock
);
339 segspt_unmap(struct seg
*seg
, caddr_t raddr
, size_t ssize
)
343 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
346 * seg.s_size may have been rounded up to the largest page size
348 * XXX This should be cleanedup. sptdestroy should take a length
349 * argument which should be the same as sptcreate. Then
350 * this rounding would not be needed (or is done in shm.c)
351 * Only the check for full segment will be needed.
353 * XXX -- shouldn't raddr == 0 always? These tests don't seem
354 * to be useful at all.
356 share_size
= page_get_pagesize(seg
->s_szc
);
357 ssize
= P2ROUNDUP(ssize
, share_size
);
359 if (raddr
== seg
->s_base
&& ssize
== seg
->s_size
) {
367 segspt_create(struct seg
*seg
, caddr_t argsp
)
370 caddr_t addr
= seg
->s_base
;
371 struct spt_data
*sptd
;
372 struct segspt_crargs
*sptcargs
= (struct segspt_crargs
*)argsp
;
373 struct anon_map
*amp
= sptcargs
->amp
;
374 struct kshmid
*sp
= amp
->a_sp
;
375 struct cred
*cred
= CRED();
376 ulong_t i
, j
, anon_index
= 0;
377 pgcnt_t npages
= btopr(amp
->size
);
386 proc_t
*procp
= curproc
;
387 rctl_qty_t lockedbytes
= 0;
391 * We are holding the a_lock on the underlying dummy as,
392 * so we can make calls to the HAT layer.
394 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
398 TNF_PROBE_2(segspt_create
, "spt", /* CSTYLED */,
399 tnf_opaque
, addr
, addr
, tnf_ulong
, len
, seg
->s_size
);
401 if ((sptcargs
->flags
& SHM_PAGEABLE
) == 0) {
402 if (err
= anon_swap_adjust(npages
))
407 if ((sptd
= kmem_zalloc(sizeof (*sptd
), KM_NOSLEEP
)) == NULL
)
410 if ((sptcargs
->flags
& SHM_PAGEABLE
) == 0) {
411 if ((ppa
= kmem_zalloc(((sizeof (page_t
*)) * npages
),
412 KM_NOSLEEP
)) == NULL
)
416 mutex_init(&sptd
->spt_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
418 if ((vp
= kmem_zalloc(sizeof (*vp
), KM_NOSLEEP
)) == NULL
)
421 seg
->s_ops
= &segspt_ops
;
424 sptd
->spt_prot
= sptcargs
->prot
;
425 sptd
->spt_flags
= sptcargs
->flags
;
426 seg
->s_data
= (caddr_t
)sptd
;
427 sptd
->spt_ppa
= NULL
;
428 sptd
->spt_ppa_lckcnt
= NULL
;
429 seg
->s_szc
= sptcargs
->szc
;
430 cv_init(&sptd
->spt_cv
, NULL
, CV_DEFAULT
, NULL
);
433 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
434 if (seg
->s_szc
> amp
->a_szc
) {
435 amp
->a_szc
= seg
->s_szc
;
437 ANON_LOCK_EXIT(&
->a_rwlock
);
440 * Set policy to affect initial allocation of pages in
441 * anon_map_createpages()
443 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT
, amp
, anon_index
,
444 NULL
, 0, ptob(npages
));
446 if (sptcargs
->flags
& SHM_PAGEABLE
) {
448 pgcnt_t new_npgs
, more_pgs
;
449 struct anon_hdr
*nahp
;
452 share_sz
= page_get_pagesize(seg
->s_szc
);
453 if (!IS_P2ALIGNED(amp
->size
, share_sz
)) {
455 * We are rounding up the size of the anon array
456 * on 4 M boundary because we always create 4 M
457 * of page(s) when locking, faulting pages and we
458 * don't have to check for all corner cases e.g.
459 * if there is enough space to allocate 4 M
462 new_npgs
= btop(P2ROUNDUP(amp
->size
, share_sz
));
463 more_pgs
= new_npgs
- npages
;
466 * The zone will never be NULL, as a fully created
467 * shm always has an owning zone.
469 zone
= sp
->shm_perm
.ipc_zone_ref
.zref_zone
;
470 ASSERT(zone
!= NULL
);
471 if (anon_resv_zone(ptob(more_pgs
), zone
) == 0) {
476 nahp
= anon_create(new_npgs
, ANON_SLEEP
);
477 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
478 (void) anon_copy_ptr(amp
->ahp
, 0, nahp
, 0, npages
,
480 anon_release(amp
->ahp
, npages
);
482 ASSERT(amp
->swresv
== ptob(npages
));
483 amp
->swresv
= amp
->size
= ptob(new_npgs
);
484 ANON_LOCK_EXIT(&
->a_rwlock
);
488 sptd
->spt_ppa_lckcnt
= kmem_zalloc(npages
*
489 sizeof (*sptd
->spt_ppa_lckcnt
), KM_SLEEP
);
490 sptd
->spt_pcachecnt
= 0;
491 sptd
->spt_realsize
= ptob(npages
);
492 sptcargs
->seg_spt
= seg
;
497 * get array of pages for each anon slot in amp
499 if ((err
= anon_map_createpages(amp
, anon_index
, ptob(npages
), ppa
,
500 seg
, addr
, S_CREATE
, cred
)) != 0)
503 mutex_enter(&sp
->shm_mlock
);
505 /* May be partially locked, so, count bytes to charge for locking */
506 for (i
= 0; i
< npages
; i
++)
507 if (ppa
[i
]->p_lckcnt
== 0)
508 lockedbytes
+= PAGESIZE
;
510 proj
= sp
->shm_perm
.ipc_proj
;
512 if (lockedbytes
> 0) {
513 mutex_enter(&procp
->p_lock
);
514 if (rctl_incr_locked_mem(procp
, proj
, lockedbytes
, 0)) {
515 mutex_exit(&procp
->p_lock
);
516 mutex_exit(&sp
->shm_mlock
);
517 for (i
= 0; i
< npages
; i
++)
522 mutex_exit(&procp
->p_lock
);
526 * addr is initial address corresponding to the first page on ppa list
528 for (i
= 0; i
< npages
; i
++) {
529 /* attempt to lock all pages */
530 if (page_pp_lock(ppa
[i
], 0, 1) == 0) {
532 * if unable to lock any page, unlock all
533 * of them and return error
535 for (j
= 0; j
< i
; j
++)
536 page_pp_unlock(ppa
[j
], 0, 1);
537 for (i
= 0; i
< npages
; i
++)
539 rctl_decr_locked_mem(NULL
, proj
, lockedbytes
, 0);
540 mutex_exit(&sp
->shm_mlock
);
545 mutex_exit(&sp
->shm_mlock
);
548 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
549 * for the entire life of the segment. For example platforms
550 * that do not support Dynamic Reconfiguration.
552 hat_flags
= HAT_LOAD_SHARE
;
553 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
))
554 hat_flags
|= HAT_LOAD_LOCK
;
557 * Load translations one lare page at a time
558 * to make sure we don't create mappings bigger than
559 * segment's size code in case underlying pages
560 * are shared with segvn's segment that uses bigger
561 * size code than we do.
563 pgsz
= page_get_pagesize(seg
->s_szc
);
564 pgcnt
= page_get_pagecnt(seg
->s_szc
);
565 for (a
= addr
, pidx
= 0; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
566 sz
= MIN(pgsz
, ptob(npages
- pidx
));
567 hat_memload_array(seg
->s_as
->a_hat
, a
, sz
,
568 &ppa
[pidx
], sptd
->spt_prot
, hat_flags
);
572 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
573 * we will leave the pages locked SE_SHARED for the life
574 * of the ISM segment. This will prevent any calls to
575 * hat_pageunload() on this ISM segment for those platforms.
577 if (!(hat_flags
& HAT_LOAD_LOCK
)) {
579 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
580 * we no longer need to hold the SE_SHARED lock on the pages,
581 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
582 * SE_SHARED lock on the pages as necessary.
584 for (i
= 0; i
< npages
; i
++)
587 sptd
->spt_pcachecnt
= 0;
588 kmem_free(ppa
, ((sizeof (page_t
*)) * npages
));
589 sptd
->spt_realsize
= ptob(npages
);
590 atomic_add_long(&spt_used
, npages
);
591 sptcargs
->seg_spt
= seg
;
596 kmem_free(vp
, sizeof (*vp
));
597 cv_destroy(&sptd
->spt_cv
);
599 mutex_destroy(&sptd
->spt_lock
);
600 if ((sptcargs
->flags
& SHM_PAGEABLE
) == 0)
601 kmem_free(ppa
, (sizeof (*ppa
) * npages
));
603 kmem_free(sptd
, sizeof (*sptd
));
605 if ((sptcargs
->flags
& SHM_PAGEABLE
) == 0)
606 anon_swap_restore(npages
);
612 segspt_free_pages(struct seg
*seg
, caddr_t addr
, size_t len
)
615 struct spt_data
*sptd
= (struct spt_data
*)seg
->s_data
;
618 struct anon_map
*amp
;
624 pgcnt_t pgs
, curnpgs
= 0;
626 rctl_qty_t unlocked_bytes
= 0;
630 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
632 len
= P2ROUNDUP(len
, PAGESIZE
);
636 hat_flags
= HAT_UNLOAD_UNLOCK
| HAT_UNLOAD_UNMAP
;
637 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP
, (void *)0)) ||
638 (sptd
->spt_flags
& SHM_PAGEABLE
)) {
639 hat_flags
= HAT_UNLOAD_UNMAP
;
642 hat_unload(seg
->s_as
->a_hat
, addr
, len
, hat_flags
);
645 if (sptd
->spt_flags
& SHM_PAGEABLE
)
646 npages
= btop(amp
->size
);
650 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
652 proj
= sp
->shm_perm
.ipc_proj
;
653 mutex_enter(&sp
->shm_mlock
);
655 for (anon_idx
= 0; anon_idx
< npages
; anon_idx
++) {
656 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
657 if ((ap
= anon_get_ptr(amp
->ahp
, anon_idx
)) == NULL
) {
658 panic("segspt_free_pages: null app");
662 if ((ap
= anon_get_next_ptr(amp
->ahp
, &anon_idx
))
666 ASSERT(ANON_ISBUSY(anon_get_slot(amp
->ahp
, anon_idx
)) == 0);
667 swap_xlate(ap
, &vp
, &off
);
670 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
671 * the pages won't be having SE_SHARED lock at this
674 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
675 * the pages are still held SE_SHARED locked from the
676 * original segspt_create()
678 * Our goal is to get SE_EXCL lock on each page, remove
679 * permanent lock on it and invalidate the page.
681 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
682 if (hat_flags
== HAT_UNLOAD_UNMAP
)
683 pp
= page_lookup(vp
, off
, SE_EXCL
);
685 if ((pp
= page_find(vp
, off
)) == NULL
) {
686 panic("segspt_free_pages: "
690 if (!page_tryupgrade(pp
)) {
692 pp
= page_lookup(vp
, off
, SE_EXCL
);
696 panic("segspt_free_pages: "
697 "page not in the system");
700 ASSERT(pp
->p_lckcnt
> 0);
701 page_pp_unlock(pp
, 0, 1);
702 if (pp
->p_lckcnt
== 0)
703 unlocked_bytes
+= PAGESIZE
;
705 if ((pp
= page_lookup(vp
, off
, SE_EXCL
)) == NULL
)
709 * It's logical to invalidate the pages here as in most cases
710 * these were created by segspt.
712 if (pp
->p_szc
!= 0) {
714 ASSERT(curnpgs
== 0);
717 pgs
= curnpgs
= page_get_pagecnt(pp
->p_szc
);
719 ASSERT(IS_P2ALIGNED(pgs
, pgs
));
720 ASSERT(!(page_pptonum(pp
) & (pgs
- 1)));
722 } else if ((page_pptonum(pp
) & (pgs
- 1)) == pgs
- 1) {
723 ASSERT(curnpgs
== 1);
724 ASSERT(page_pptonum(pp
) ==
725 page_pptonum(rootpp
) + (pgs
- 1));
726 page_destroy_pages(rootpp
);
731 ASSERT(page_pptonum(pp
) ==
732 page_pptonum(rootpp
) + (pgs
- curnpgs
));
736 if (root
!= 0 || curnpgs
!= 0) {
737 panic("segspt_free_pages: bad large page");
741 * Before destroying the pages, we need to take care
742 * of the rctl locked memory accounting. For that
743 * we need to calculte the unlocked_bytes.
745 if (pp
->p_lckcnt
> 0)
746 unlocked_bytes
+= PAGESIZE
;
747 /*LINTED: constant in conditional context */
748 VN_DISPOSE(pp
, B_INVAL
, 0, kcred
);
751 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
752 if (unlocked_bytes
> 0)
753 rctl_decr_locked_mem(NULL
, proj
, unlocked_bytes
, 0);
754 mutex_exit(&sp
->shm_mlock
);
756 if (root
!= 0 || curnpgs
!= 0) {
757 panic("segspt_free_pages: bad large page");
762 * mark that pages have been released
764 sptd
->spt_realsize
= 0;
766 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
767 atomic_add_long(&spt_used
, -npages
);
768 anon_swap_restore(npages
);
773 * Get memory allocation policy info for specified address in given segment
775 static lgrp_mem_policy_info_t
*
776 segspt_getpolicy(struct seg
*seg
, caddr_t addr
)
778 struct anon_map
*amp
;
780 lgrp_mem_policy_info_t
*policy_info
;
781 struct spt_data
*spt_data
;
786 * Get anon_map from segspt
788 * Assume that no lock needs to be held on anon_map, since
789 * it should be protected by its reference count which must be
790 * nonzero for an existing segment
791 * Need to grab readers lock on policy tree though
793 spt_data
= (struct spt_data
*)seg
->s_data
;
794 if (spt_data
== NULL
)
796 amp
= spt_data
->spt_amp
;
797 ASSERT(amp
->refcnt
!= 0);
802 * Assume starting anon index of 0
804 anon_index
= seg_page(seg
, addr
);
805 policy_info
= lgrp_shm_policy_get(amp
, anon_index
, NULL
, 0);
807 return (policy_info
);
812 * Return locked pages over a given range.
814 * We will cache all DISM locked pages and save the pplist for the
815 * entire segment in the ppa field of the underlying DISM segment structure.
816 * Later, during a call to segspt_reclaim() we will use this ppa array
817 * to page_unlock() all of the pages and then we will free this ppa list.
821 segspt_dismpagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
822 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
)
824 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
825 struct seg
*sptseg
= shmd
->shm_sptseg
;
826 struct spt_data
*sptd
= sptseg
->s_data
;
827 pgcnt_t pg_idx
, npages
, tot_npages
, npgs
;
828 struct page
**pplist
, **pl
, **ppa
, *pp
;
829 struct anon_map
*amp
;
836 pgcnt_t claim_availrmem
= 0;
839 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
840 ASSERT(type
== L_PAGELOCK
|| type
== L_PAGEUNLOCK
);
843 * We want to lock/unlock the entire ISM segment. Therefore,
844 * we will be using the underlying sptseg and it's base address
845 * and length for the caching arguments.
850 pg_idx
= seg_page(seg
, addr
);
854 * check if the request is larger than number of pages covered
857 if (pg_idx
+ npages
> btopr(sptd
->spt_amp
->size
)) {
862 if (type
== L_PAGEUNLOCK
) {
863 ASSERT(sptd
->spt_ppa
!= NULL
);
865 seg_pinactive(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
866 sptd
->spt_ppa
, S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
869 * If someone is blocked while unmapping, we purge
870 * segment page cache and thus reclaim pplist synchronously
871 * without waiting for seg_pasync_thread. This speeds up
872 * unmapping in cases where munmap(2) is called, while
873 * raw async i/o is still in progress or where a thread
874 * exits on data fault in a multithreaded application.
876 if ((sptd
->spt_flags
& DISM_PPA_CHANGED
) ||
877 (AS_ISUNMAPWAIT(seg
->s_as
) &&
878 shmd
->shm_softlockcnt
> 0)) {
884 /* The L_PAGELOCK case ... */
886 if (sptd
->spt_flags
& DISM_PPA_CHANGED
) {
889 * for DISM ppa needs to be rebuild since
890 * number of locked pages could be changed
897 * First try to find pages in segment page cache, without
898 * holding the segment lock.
900 pplist
= seg_plookup(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
901 S_WRITE
, SEGP_FORCE_WIRED
);
902 if (pplist
!= NULL
) {
903 ASSERT(sptd
->spt_ppa
!= NULL
);
904 ASSERT(sptd
->spt_ppa
== pplist
);
906 for (an_idx
= pg_idx
; an_idx
< pg_idx
+ npages
; ) {
907 if (ppa
[an_idx
] == NULL
) {
908 seg_pinactive(seg
, NULL
, seg
->s_base
,
909 sptd
->spt_amp
->size
, ppa
,
910 S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
914 if ((szc
= ppa
[an_idx
]->p_szc
) != 0) {
915 npgs
= page_get_pagecnt(szc
);
916 an_idx
= P2ROUNDUP(an_idx
+ 1, npgs
);
922 * Since we cache the entire DISM segment, we want to
923 * set ppp to point to the first slot that corresponds
924 * to the requested addr, i.e. pg_idx.
926 *ppp
= &(sptd
->spt_ppa
[pg_idx
]);
930 mutex_enter(&sptd
->spt_lock
);
932 * try to find pages in segment page cache with mutex
934 pplist
= seg_plookup(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
935 S_WRITE
, SEGP_FORCE_WIRED
);
936 if (pplist
!= NULL
) {
937 ASSERT(sptd
->spt_ppa
!= NULL
);
938 ASSERT(sptd
->spt_ppa
== pplist
);
940 for (an_idx
= pg_idx
; an_idx
< pg_idx
+ npages
; ) {
941 if (ppa
[an_idx
] == NULL
) {
942 mutex_exit(&sptd
->spt_lock
);
943 seg_pinactive(seg
, NULL
, seg
->s_base
,
944 sptd
->spt_amp
->size
, ppa
,
945 S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
949 if ((szc
= ppa
[an_idx
]->p_szc
) != 0) {
950 npgs
= page_get_pagecnt(szc
);
951 an_idx
= P2ROUNDUP(an_idx
+ 1, npgs
);
957 * Since we cache the entire DISM segment, we want to
958 * set ppp to point to the first slot that corresponds
959 * to the requested addr, i.e. pg_idx.
961 mutex_exit(&sptd
->spt_lock
);
962 *ppp
= &(sptd
->spt_ppa
[pg_idx
]);
965 if (seg_pinsert_check(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
966 SEGP_FORCE_WIRED
) == SEGP_FAIL
) {
967 mutex_exit(&sptd
->spt_lock
);
973 * No need to worry about protections because DISM pages are always rw.
979 * Do we need to build the ppa array?
981 if (sptd
->spt_ppa
== NULL
) {
985 tot_npages
= btopr(sptd
->spt_amp
->size
);
987 ASSERT(sptd
->spt_pcachecnt
== 0);
988 pplist
= kmem_zalloc(sizeof (page_t
*) * tot_npages
, KM_SLEEP
);
991 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
992 for (an_idx
= 0; an_idx
< tot_npages
; ) {
993 ap
= anon_get_ptr(amp
->ahp
, an_idx
);
995 * Cache only mlocked pages. For large pages
996 * if one (constituent) page is mlocked
997 * all pages for that large page
998 * are cached also. This is for quick
999 * lookups of ppa array;
1001 if ((ap
!= NULL
) && (lpg_cnt
!= 0 ||
1002 (sptd
->spt_ppa_lckcnt
[an_idx
] != 0))) {
1004 swap_xlate(ap
, &vp
, &off
);
1005 pp
= page_lookup(vp
, off
, SE_SHARED
);
1010 * For a small page, we are done --
1011 * lpg_count is reset to 0 below.
1013 * For a large page, we are guaranteed
1014 * to find the anon structures of all
1015 * constituent pages and a non-zero
1016 * lpg_cnt ensures that we don't test
1017 * for mlock for these. We are done
1018 * when lpg_count reaches (npgs + 1).
1019 * If we are not the first constituent
1020 * page, restart at the first one.
1022 npgs
= page_get_pagecnt(pp
->p_szc
);
1023 if (!IS_P2ALIGNED(an_idx
, npgs
)) {
1024 an_idx
= P2ALIGN(an_idx
, npgs
);
1029 if (++lpg_cnt
> npgs
)
1033 * availrmem is decremented only
1034 * for unlocked pages
1036 if (sptd
->spt_ppa_lckcnt
[an_idx
] == 0)
1038 pplist
[an_idx
] = pp
;
1042 ANON_LOCK_EXIT(&
->a_rwlock
);
1044 if (claim_availrmem
) {
1045 mutex_enter(&freemem_lock
);
1046 if (availrmem
< tune
.t_minarmem
+ claim_availrmem
) {
1047 mutex_exit(&freemem_lock
);
1049 claim_availrmem
= 0;
1052 availrmem
-= claim_availrmem
;
1054 mutex_exit(&freemem_lock
);
1060 * We already have a valid ppa[].
1067 ret
= seg_pinsert(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1068 sptd
->spt_amp
->size
, pl
, S_WRITE
, SEGP_FORCE_WIRED
,
1070 if (ret
== SEGP_FAIL
) {
1072 * seg_pinsert failed. We return
1073 * ENOTSUP, so that the as_pagelock() code will
1074 * then try the slower F_SOFTLOCK path.
1078 * No one else has referenced the ppa[].
1079 * We created it and we need to destroy it.
1081 sptd
->spt_ppa
= NULL
;
1088 * In either case, we increment softlockcnt on the 'real' segment.
1090 sptd
->spt_pcachecnt
++;
1091 atomic_inc_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
1093 ppa
= sptd
->spt_ppa
;
1094 for (an_idx
= pg_idx
; an_idx
< pg_idx
+ npages
; ) {
1095 if (ppa
[an_idx
] == NULL
) {
1096 mutex_exit(&sptd
->spt_lock
);
1097 seg_pinactive(seg
, NULL
, seg
->s_base
,
1098 sptd
->spt_amp
->size
,
1099 pl
, S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
1103 if ((szc
= ppa
[an_idx
]->p_szc
) != 0) {
1104 npgs
= page_get_pagecnt(szc
);
1105 an_idx
= P2ROUNDUP(an_idx
+ 1, npgs
);
1111 * We can now drop the sptd->spt_lock since the ppa[]
1112 * exists and he have incremented pacachecnt.
1114 mutex_exit(&sptd
->spt_lock
);
1117 * Since we cache the entire segment, we want to
1118 * set ppp to point to the first slot that corresponds
1119 * to the requested addr, i.e. pg_idx.
1121 *ppp
= &(sptd
->spt_ppa
[pg_idx
]);
1126 * We will only reach this code if we tried and failed.
1128 * And we can drop the lock on the dummy seg, once we've failed
1129 * to set up a new ppa[].
1131 mutex_exit(&sptd
->spt_lock
);
1134 if (claim_availrmem
) {
1135 mutex_enter(&freemem_lock
);
1136 availrmem
+= claim_availrmem
;
1137 mutex_exit(&freemem_lock
);
1141 * We created pl and we need to destroy it.
1144 for (an_idx
= 0; an_idx
< tot_npages
; an_idx
++) {
1145 if (pplist
[an_idx
] != NULL
)
1146 page_unlock(pplist
[an_idx
]);
1148 kmem_free(pl
, sizeof (page_t
*) * tot_npages
);
1151 if (shmd
->shm_softlockcnt
<= 0) {
1152 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1153 mutex_enter(&seg
->s_as
->a_contents
);
1154 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1155 AS_CLRUNMAPWAIT(seg
->s_as
);
1156 cv_broadcast(&seg
->s_as
->a_cv
);
1158 mutex_exit(&seg
->s_as
->a_contents
);
1168 * return locked pages over a given range.
1170 * We will cache the entire ISM segment and save the pplist for the
1171 * entire segment in the ppa field of the underlying ISM segment structure.
1172 * Later, during a call to segspt_reclaim() we will use this ppa array
1173 * to page_unlock() all of the pages and then we will free this ppa list.
1177 segspt_shmpagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
1178 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
)
1180 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1181 struct seg
*sptseg
= shmd
->shm_sptseg
;
1182 struct spt_data
*sptd
= sptseg
->s_data
;
1183 pgcnt_t np
, page_index
, npages
;
1184 caddr_t a
, spt_base
;
1185 struct page
**pplist
, **pl
, *pp
;
1186 struct anon_map
*amp
;
1189 uint_t pl_built
= 0;
1194 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
1195 ASSERT(type
== L_PAGELOCK
|| type
== L_PAGEUNLOCK
);
1199 * We want to lock/unlock the entire ISM segment. Therefore,
1200 * we will be using the underlying sptseg and it's base address
1201 * and length for the caching arguments.
1206 if (sptd
->spt_flags
& SHM_PAGEABLE
) {
1207 return (segspt_dismpagelock(seg
, addr
, len
, ppp
, type
, rw
));
1210 page_index
= seg_page(seg
, addr
);
1211 npages
= btopr(len
);
1214 * check if the request is larger than number of pages covered
1217 if (page_index
+ npages
> btopr(sptd
->spt_amp
->size
)) {
1222 if (type
== L_PAGEUNLOCK
) {
1224 ASSERT(sptd
->spt_ppa
!= NULL
);
1226 seg_pinactive(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1227 sptd
->spt_ppa
, S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
1230 * If someone is blocked while unmapping, we purge
1231 * segment page cache and thus reclaim pplist synchronously
1232 * without waiting for seg_pasync_thread. This speeds up
1233 * unmapping in cases where munmap(2) is called, while
1234 * raw async i/o is still in progress or where a thread
1235 * exits on data fault in a multithreaded application.
1237 if (AS_ISUNMAPWAIT(seg
->s_as
) && (shmd
->shm_softlockcnt
> 0)) {
1243 /* The L_PAGELOCK case... */
1246 * First try to find pages in segment page cache, without
1247 * holding the segment lock.
1249 pplist
= seg_plookup(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1250 S_WRITE
, SEGP_FORCE_WIRED
);
1251 if (pplist
!= NULL
) {
1252 ASSERT(sptd
->spt_ppa
== pplist
);
1253 ASSERT(sptd
->spt_ppa
[page_index
]);
1255 * Since we cache the entire ISM segment, we want to
1256 * set ppp to point to the first slot that corresponds
1257 * to the requested addr, i.e. page_index.
1259 *ppp
= &(sptd
->spt_ppa
[page_index
]);
1263 mutex_enter(&sptd
->spt_lock
);
1266 * try to find pages in segment page cache
1268 pplist
= seg_plookup(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1269 S_WRITE
, SEGP_FORCE_WIRED
);
1270 if (pplist
!= NULL
) {
1271 ASSERT(sptd
->spt_ppa
== pplist
);
1273 * Since we cache the entire segment, we want to
1274 * set ppp to point to the first slot that corresponds
1275 * to the requested addr, i.e. page_index.
1277 mutex_exit(&sptd
->spt_lock
);
1278 *ppp
= &(sptd
->spt_ppa
[page_index
]);
1282 if (seg_pinsert_check(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1283 SEGP_FORCE_WIRED
) == SEGP_FAIL
) {
1284 mutex_exit(&sptd
->spt_lock
);
1290 * No need to worry about protections because ISM pages
1296 * Do we need to build the ppa array?
1298 if (sptd
->spt_ppa
== NULL
) {
1299 ASSERT(sptd
->spt_ppa
== pplist
);
1301 spt_base
= sptseg
->s_base
;
1305 * availrmem is decremented once during anon_swap_adjust()
1306 * and is incremented during the anon_unresv(), which is
1307 * called from shm_rm_amp() when the segment is destroyed.
1309 amp
= sptd
->spt_amp
;
1310 ASSERT(amp
!= NULL
);
1312 /* pcachecnt is protected by sptd->spt_lock */
1313 ASSERT(sptd
->spt_pcachecnt
== 0);
1314 pplist
= kmem_zalloc(sizeof (page_t
*)
1315 * btopr(sptd
->spt_amp
->size
), KM_SLEEP
);
1318 anon_index
= seg_page(sptseg
, spt_base
);
1320 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
1321 for (a
= spt_base
; a
< (spt_base
+ sptd
->spt_amp
->size
);
1322 a
+= PAGESIZE
, anon_index
++, pplist
++) {
1323 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
1325 swap_xlate(ap
, &vp
, &off
);
1326 pp
= page_lookup(vp
, off
, SE_SHARED
);
1330 ANON_LOCK_EXIT(&
->a_rwlock
);
1332 if (a
< (spt_base
+ sptd
->spt_amp
->size
)) {
1339 * We already have a valid ppa[].
1346 ret
= seg_pinsert(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1347 sptd
->spt_amp
->size
, pl
, S_WRITE
, SEGP_FORCE_WIRED
,
1349 if (ret
== SEGP_FAIL
) {
1351 * seg_pinsert failed. We return
1352 * ENOTSUP, so that the as_pagelock() code will
1353 * then try the slower F_SOFTLOCK path.
1357 * No one else has referenced the ppa[].
1358 * We created it and we need to destroy it.
1360 sptd
->spt_ppa
= NULL
;
1367 * In either case, we increment softlockcnt on the 'real' segment.
1369 sptd
->spt_pcachecnt
++;
1370 atomic_inc_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
1373 * We can now drop the sptd->spt_lock since the ppa[]
1374 * exists and he have incremented pacachecnt.
1376 mutex_exit(&sptd
->spt_lock
);
1379 * Since we cache the entire segment, we want to
1380 * set ppp to point to the first slot that corresponds
1381 * to the requested addr, i.e. page_index.
1383 *ppp
= &(sptd
->spt_ppa
[page_index
]);
1388 * We will only reach this code if we tried and failed.
1390 * And we can drop the lock on the dummy seg, once we've failed
1391 * to set up a new ppa[].
1393 mutex_exit(&sptd
->spt_lock
);
1397 * We created pl and we need to destroy it.
1400 np
= (((uintptr_t)(a
- spt_base
)) >> PAGESHIFT
);
1402 page_unlock(*pplist
);
1406 kmem_free(pl
, sizeof (page_t
*) * btopr(sptd
->spt_amp
->size
));
1408 if (shmd
->shm_softlockcnt
<= 0) {
1409 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1410 mutex_enter(&seg
->s_as
->a_contents
);
1411 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1412 AS_CLRUNMAPWAIT(seg
->s_as
);
1413 cv_broadcast(&seg
->s_as
->a_cv
);
1415 mutex_exit(&seg
->s_as
->a_contents
);
1423 * purge any cached pages in the I/O page cache
1426 segspt_purge(struct seg
*seg
)
1428 seg_ppurge(seg
, NULL
, SEGP_FORCE_WIRED
);
1432 segspt_reclaim(void *ptag
, caddr_t addr
, size_t len
, struct page
**pplist
,
1433 enum seg_rw rw
, int async
)
1435 struct seg
*seg
= (struct seg
*)ptag
;
1436 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1438 struct spt_data
*sptd
;
1439 pgcnt_t npages
, i
, free_availrmem
= 0;
1445 sptseg
= shmd
->shm_sptseg
;
1446 sptd
= sptseg
->s_data
;
1447 npages
= (len
>> PAGESHIFT
);
1449 ASSERT(sptd
->spt_pcachecnt
!= 0);
1450 ASSERT(sptd
->spt_ppa
== pplist
);
1451 ASSERT(npages
== btopr(sptd
->spt_amp
->size
));
1452 ASSERT(async
|| AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
1455 * Acquire the lock on the dummy seg and destroy the
1456 * ppa array IF this is the last pcachecnt.
1458 mutex_enter(&sptd
->spt_lock
);
1459 if (--sptd
->spt_pcachecnt
== 0) {
1460 for (i
= 0; i
< npages
; i
++) {
1461 if (pplist
[i
] == NULL
) {
1464 if (rw
== S_WRITE
) {
1465 hat_setrefmod(pplist
[i
]);
1467 hat_setref(pplist
[i
]);
1469 if ((sptd
->spt_flags
& SHM_PAGEABLE
) &&
1470 (sptd
->spt_ppa_lckcnt
[i
] == 0))
1472 page_unlock(pplist
[i
]);
1474 if ((sptd
->spt_flags
& SHM_PAGEABLE
) && free_availrmem
) {
1475 mutex_enter(&freemem_lock
);
1476 availrmem
+= free_availrmem
;
1477 mutex_exit(&freemem_lock
);
1480 * Since we want to cach/uncache the entire ISM segment,
1481 * we will track the pplist in a segspt specific field
1482 * ppa, that is initialized at the time we add an entry to
1485 ASSERT(sptd
->spt_pcachecnt
== 0);
1486 kmem_free(pplist
, sizeof (page_t
*) * npages
);
1487 sptd
->spt_ppa
= NULL
;
1488 sptd
->spt_flags
&= ~DISM_PPA_CHANGED
;
1490 cv_broadcast(&sptd
->spt_cv
);
1493 mutex_exit(&sptd
->spt_lock
);
1496 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1497 * may not hold AS lock (in this case async argument is not 0). This
1498 * means if softlockcnt drops to 0 after the decrement below address
1499 * space may get freed. We can't allow it since after softlock
1500 * derement to 0 we still need to access as structure for possible
1501 * wakeup of unmap waiters. To prevent the disappearance of as we take
1502 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1503 * this mutex as a barrier to make sure this routine completes before
1506 * The second complication we have to deal with in async case is a
1507 * possibility of missed wake up of unmap wait thread. When we don't
1508 * hold as lock here we may take a_contents lock before unmap wait
1509 * thread that was first to see softlockcnt was still not 0. As a
1510 * result we'll fail to wake up an unmap wait thread. To avoid this
1511 * race we set nounmapwait flag in as structure if we drop softlockcnt
1512 * to 0 if async is not 0. unmapwait thread
1513 * will not block if this flag is set.
1516 mutex_enter(&shmd
->shm_segfree_syncmtx
);
1519 * Now decrement softlockcnt.
1521 ASSERT(shmd
->shm_softlockcnt
> 0);
1522 atomic_dec_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
1524 if (shmd
->shm_softlockcnt
<= 0) {
1525 if (async
|| AS_ISUNMAPWAIT(seg
->s_as
)) {
1526 mutex_enter(&seg
->s_as
->a_contents
);
1528 AS_SETNOUNMAPWAIT(seg
->s_as
);
1529 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1530 AS_CLRUNMAPWAIT(seg
->s_as
);
1531 cv_broadcast(&seg
->s_as
->a_cv
);
1533 mutex_exit(&seg
->s_as
->a_contents
);
1538 mutex_exit(&shmd
->shm_segfree_syncmtx
);
1544 * Do a F_SOFTUNLOCK call over the range requested.
1545 * The range must have already been F_SOFTLOCK'ed.
1547 * The calls to acquire and release the anon map lock mutex were
1548 * removed in order to avoid a deadly embrace during a DR
1549 * memory delete operation. (Eg. DR blocks while waiting for a
1550 * exclusive lock on a page that is being used for kaio; the
1551 * thread that will complete the kaio and call segspt_softunlock
1552 * blocks on the anon map lock; another thread holding the anon
1553 * map lock blocks on another page lock via the segspt_shmfault
1554 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1556 * The appropriateness of the removal is based upon the following:
1557 * 1. If we are holding a segment's reader lock and the page is held
1558 * shared, then the corresponding element in anonmap which points to
1559 * anon struct cannot change and there is no need to acquire the
1560 * anonymous map lock.
1561 * 2. Threads in segspt_softunlock have a reader lock on the segment
1562 * and already have the shared page lock, so we are guaranteed that
1563 * the anon map slot cannot change and therefore can call anon_get_ptr()
1564 * without grabbing the anonymous map lock.
1565 * 3. Threads that softlock a shared page break copy-on-write, even if
1566 * its a read. Thus cow faults can be ignored with respect to soft
1567 * unlocking, since the breaking of cow means that the anon slot(s) will
1571 segspt_softunlock(struct seg
*seg
, caddr_t sptseg_addr
,
1572 size_t len
, enum seg_rw rw
)
1574 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1576 struct spt_data
*sptd
;
1582 struct anon_map
*amp
; /* XXX - for locknest */
1583 struct anon
*ap
= NULL
;
1586 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
1588 sptseg
= shmd
->shm_sptseg
;
1589 sptd
= sptseg
->s_data
;
1592 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1593 * and therefore their pages are SE_SHARED locked
1594 * for the entire life of the segment.
1596 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP
, (void *)0)) &&
1597 ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0)) {
1598 goto softlock_decrement
;
1602 * Any thread is free to do a page_find and
1603 * page_unlock() on the pages within this seg.
1605 * We are already holding the as->a_lock on the user's
1606 * real segment, but we need to hold the a_lock on the
1607 * underlying dummy as. This is mostly to satisfy the
1608 * underlying HAT layer.
1610 AS_LOCK_ENTER(sptseg
->s_as
, &sptseg
->s_as
->a_lock
, RW_READER
);
1611 hat_unlock(sptseg
->s_as
->a_hat
, sptseg_addr
, len
);
1612 AS_LOCK_EXIT(sptseg
->s_as
, &sptseg
->s_as
->a_lock
);
1614 amp
= sptd
->spt_amp
;
1615 ASSERT(amp
!= NULL
);
1616 anon_index
= seg_page(sptseg
, sptseg_addr
);
1618 for (adr
= sptseg_addr
; adr
< sptseg_addr
+ len
; adr
+= PAGESIZE
) {
1619 ap
= anon_get_ptr(amp
->ahp
, anon_index
++);
1621 swap_xlate(ap
, &vp
, &offset
);
1624 * Use page_find() instead of page_lookup() to
1625 * find the page since we know that it has a
1628 pp
= page_find(vp
, offset
);
1629 ASSERT(ap
== anon_get_ptr(amp
->ahp
, anon_index
- 1));
1631 panic("segspt_softunlock: "
1632 "addr %p, ap %p, vp %p, off %llx",
1633 (void *)adr
, (void *)ap
, (void *)vp
, offset
);
1637 if (rw
== S_WRITE
) {
1639 } else if (rw
!= S_OTHER
) {
1646 npages
= btopr(len
);
1647 ASSERT(shmd
->shm_softlockcnt
>= npages
);
1648 atomic_add_long((ulong_t
*)(&(shmd
->shm_softlockcnt
)), -npages
);
1649 if (shmd
->shm_softlockcnt
== 0) {
1651 * All SOFTLOCKS are gone. Wakeup any waiting
1652 * unmappers so they can try again to unmap.
1653 * Check for waiters first without the mutex
1654 * held so we don't always grab the mutex on
1657 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1658 mutex_enter(&seg
->s_as
->a_contents
);
1659 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1660 AS_CLRUNMAPWAIT(seg
->s_as
);
1661 cv_broadcast(&seg
->s_as
->a_cv
);
1663 mutex_exit(&seg
->s_as
->a_contents
);
1669 segspt_shmattach(struct seg
*seg
, caddr_t
*argsp
)
1671 struct shm_data
*shmd_arg
= (struct shm_data
*)argsp
;
1672 struct shm_data
*shmd
;
1673 struct anon_map
*shm_amp
= shmd_arg
->shm_amp
;
1674 struct spt_data
*sptd
;
1677 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
1679 shmd
= kmem_zalloc((sizeof (*shmd
)), KM_NOSLEEP
);
1683 shmd
->shm_sptas
= shmd_arg
->shm_sptas
;
1684 shmd
->shm_amp
= shm_amp
;
1685 shmd
->shm_sptseg
= shmd_arg
->shm_sptseg
;
1687 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT
, shm_amp
, 0,
1688 NULL
, 0, seg
->s_size
);
1690 mutex_init(&shmd
->shm_segfree_syncmtx
, NULL
, MUTEX_DEFAULT
, NULL
);
1692 seg
->s_data
= (void *)shmd
;
1693 seg
->s_ops
= &segspt_shmops
;
1694 seg
->s_szc
= shmd
->shm_sptseg
->s_szc
;
1695 sptd
= shmd
->shm_sptseg
->s_data
;
1697 if (sptd
->spt_flags
& SHM_PAGEABLE
) {
1698 if ((shmd
->shm_vpage
= kmem_zalloc(btopr(shm_amp
->size
),
1699 KM_NOSLEEP
)) == NULL
) {
1700 seg
->s_data
= (void *)NULL
;
1701 kmem_free(shmd
, (sizeof (*shmd
)));
1704 shmd
->shm_lckpgs
= 0;
1705 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP
, (void *)0)) {
1706 if ((error
= hat_share(seg
->s_as
->a_hat
, seg
->s_base
,
1707 shmd_arg
->shm_sptas
->a_hat
, SEGSPTADDR
,
1708 seg
->s_size
, seg
->s_szc
)) != 0) {
1709 kmem_free(shmd
->shm_vpage
,
1710 btopr(shm_amp
->size
));
1714 error
= hat_share(seg
->s_as
->a_hat
, seg
->s_base
,
1715 shmd_arg
->shm_sptas
->a_hat
, SEGSPTADDR
,
1716 seg
->s_size
, seg
->s_szc
);
1720 seg
->s_data
= (void *)NULL
;
1721 kmem_free(shmd
, (sizeof (*shmd
)));
1723 ANON_LOCK_ENTER(&shm_amp
->a_rwlock
, RW_WRITER
);
1725 ANON_LOCK_EXIT(&shm_amp
->a_rwlock
);
1731 segspt_shmunmap(struct seg
*seg
, caddr_t raddr
, size_t ssize
)
1733 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1736 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
1738 if (shmd
->shm_softlockcnt
> 0) {
1747 if (ssize
!= seg
->s_size
) {
1749 cmn_err(CE_WARN
, "Incompatible ssize %lx s_size %lx\n",
1750 ssize
, seg
->s_size
);
1755 (void) segspt_shmlockop(seg
, raddr
, shmd
->shm_amp
->size
, 0, MC_UNLOCK
,
1757 hat_unshare(seg
->s_as
->a_hat
, raddr
, ssize
, seg
->s_szc
);
1765 segspt_shmfree(struct seg
*seg
)
1767 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1768 struct anon_map
*shm_amp
= shmd
->shm_amp
;
1770 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
1772 (void) segspt_shmlockop(seg
, seg
->s_base
, shm_amp
->size
, 0,
1773 MC_UNLOCK
, NULL
, 0);
1776 * Need to increment refcnt when attaching
1777 * and decrement when detaching because of dup().
1779 ANON_LOCK_ENTER(&shm_amp
->a_rwlock
, RW_WRITER
);
1781 ANON_LOCK_EXIT(&shm_amp
->a_rwlock
);
1783 if (shmd
->shm_vpage
) { /* only for DISM */
1784 kmem_free(shmd
->shm_vpage
, btopr(shm_amp
->size
));
1785 shmd
->shm_vpage
= NULL
;
1789 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1790 * still working with this segment without holding as lock.
1792 ASSERT(shmd
->shm_softlockcnt
== 0);
1793 mutex_enter(&shmd
->shm_segfree_syncmtx
);
1794 mutex_destroy(&shmd
->shm_segfree_syncmtx
);
1796 kmem_free(shmd
, sizeof (*shmd
));
1801 segspt_shmsetprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t prot
)
1803 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
1806 * Shared page table is more than shared mapping.
1807 * Individual process sharing page tables can't change prot
1808 * because there is only one set of page tables.
1809 * This will be allowed after private page table is
1812 /* need to return correct status error? */
1818 segspt_dismfault(struct hat
*hat
, struct seg
*seg
, caddr_t addr
,
1819 size_t len
, enum fault_type type
, enum seg_rw rw
)
1821 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1822 struct seg
*sptseg
= shmd
->shm_sptseg
;
1823 struct as
*curspt
= shmd
->shm_sptas
;
1824 struct spt_data
*sptd
= sptseg
->s_data
;
1827 caddr_t segspt_addr
, shm_addr
;
1832 int dyn_ism_unmap
= hat_supported(HAT_DYNAMIC_ISM_UNMAP
, (void *)0);
1841 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
1844 * Because of the way spt is implemented
1845 * the realsize of the segment does not have to be
1846 * equal to the segment size itself. The segment size is
1847 * often in multiples of a page size larger than PAGESIZE.
1848 * The realsize is rounded up to the nearest PAGESIZE
1849 * based on what the user requested. This is a bit of
1850 * ungliness that is historical but not easily fixed
1851 * without re-designing the higher levels of ISM.
1853 ASSERT(addr
>= seg
->s_base
);
1854 if (((addr
+ len
) - seg
->s_base
) > sptd
->spt_realsize
)
1857 * For all of the following cases except F_PROT, we need to
1858 * make any necessary adjustments to addr and len
1859 * and get all of the necessary page_t's into an array called ppa[].
1861 * The code in shmat() forces base addr and len of ISM segment
1862 * to be aligned to largest page size supported. Therefore,
1863 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1864 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1865 * in large pagesize chunks, or else we will screw up the HAT
1866 * layer by calling hat_memload_array() with differing page sizes
1867 * over a given virtual range.
1869 pgsz
= page_get_pagesize(sptseg
->s_szc
);
1870 pgcnt
= page_get_pagecnt(sptseg
->s_szc
);
1871 shm_addr
= (caddr_t
)P2ALIGN((uintptr_t)(addr
), pgsz
);
1872 size
= P2ROUNDUP((uintptr_t)(((addr
+ len
) - shm_addr
)), pgsz
);
1873 npages
= btopr(size
);
1876 * Now we need to convert from addr in segshm to addr in segspt.
1878 an_idx
= seg_page(seg
, shm_addr
);
1879 segspt_addr
= sptseg
->s_base
+ ptob(an_idx
);
1881 ASSERT((segspt_addr
+ ptob(npages
)) <=
1882 (sptseg
->s_base
+ sptd
->spt_realsize
));
1883 ASSERT(segspt_addr
< (sptseg
->s_base
+ sptseg
->s_size
));
1889 atomic_add_long((ulong_t
*)(&(shmd
->shm_softlockcnt
)), npages
);
1891 * Fall through to the F_INVAL case to load up the hat layer
1892 * entries with the HAT_LOAD_LOCK flag.
1897 if ((rw
== S_EXEC
) && !(sptd
->spt_prot
& PROT_EXEC
))
1900 ppa
= kmem_zalloc(npages
* sizeof (page_t
*), KM_SLEEP
);
1902 err
= spt_anon_getpages(sptseg
, segspt_addr
, size
, ppa
);
1904 if (type
== F_SOFTLOCK
) {
1905 atomic_add_long((ulong_t
*)(
1906 &(shmd
->shm_softlockcnt
)), -npages
);
1910 AS_LOCK_ENTER(sptseg
->s_as
, &sptseg
->s_as
->a_lock
, RW_READER
);
1913 if (type
== F_SOFTLOCK
) {
1916 * Load up the translation keeping it
1917 * locked and don't unlock the page.
1919 for (; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
1920 hat_memload_array(sptseg
->s_as
->a_hat
,
1921 a
, pgsz
, &ppa
[pidx
], sptd
->spt_prot
,
1922 HAT_LOAD_LOCK
| HAT_LOAD_SHARE
);
1925 if (hat
== seg
->s_as
->a_hat
) {
1928 * Migrate pages marked for migration
1930 if (lgrp_optimizations())
1931 page_migrate(seg
, shm_addr
, ppa
,
1935 for (; pidx
< npages
;
1936 a
+= pgsz
, pidx
+= pgcnt
) {
1937 hat_memload_array(sptseg
->s_as
->a_hat
,
1938 a
, pgsz
, &ppa
[pidx
],
1943 /* XHAT. Pass real address */
1944 hat_memload_array(hat
, shm_addr
,
1945 size
, ppa
, sptd
->spt_prot
, HAT_LOAD_SHARE
);
1949 * And now drop the SE_SHARED lock(s).
1951 if (dyn_ism_unmap
) {
1952 for (i
= 0; i
< npages
; i
++) {
1953 page_unlock(ppa
[i
]);
1958 if (!dyn_ism_unmap
) {
1959 if (hat_share(seg
->s_as
->a_hat
, shm_addr
,
1960 curspt
->a_hat
, segspt_addr
, ptob(npages
),
1962 panic("hat_share err in DISM fault");
1965 if (type
== F_INVAL
) {
1966 for (i
= 0; i
< npages
; i
++) {
1967 page_unlock(ppa
[i
]);
1971 AS_LOCK_EXIT(sptseg
->s_as
, &sptseg
->s_as
->a_lock
);
1973 kmem_free(ppa
, npages
* sizeof (page_t
*));
1979 * This is a bit ugly, we pass in the real seg pointer,
1980 * but the segspt_addr is the virtual address within the
1983 segspt_softunlock(seg
, segspt_addr
, size
, rw
);
1989 * This takes care of the unusual case where a user
1990 * allocates a stack in shared memory and a register
1991 * window overflow is written to that stack page before
1992 * it is otherwise modified.
1994 * We can get away with this because ISM segments are
1995 * always rw. Other than this unusual case, there
1996 * should be no instances of protection violations.
2002 panic("segspt_dismfault default type?");
2011 segspt_shmfault(struct hat
*hat
, struct seg
*seg
, caddr_t addr
,
2012 size_t len
, enum fault_type type
, enum seg_rw rw
)
2014 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2015 struct seg
*sptseg
= shmd
->shm_sptseg
;
2016 struct as
*curspt
= shmd
->shm_sptas
;
2017 struct spt_data
*sptd
= sptseg
->s_data
;
2020 caddr_t sptseg_addr
, shm_addr
;
2024 ulong_t anon_index
= 0;
2026 struct anon_map
*amp
; /* XXX - for locknest */
2027 struct anon
*ap
= NULL
;
2038 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
2040 if (sptd
->spt_flags
& SHM_PAGEABLE
) {
2041 return (segspt_dismfault(hat
, seg
, addr
, len
, type
, rw
));
2045 * Because of the way spt is implemented
2046 * the realsize of the segment does not have to be
2047 * equal to the segment size itself. The segment size is
2048 * often in multiples of a page size larger than PAGESIZE.
2049 * The realsize is rounded up to the nearest PAGESIZE
2050 * based on what the user requested. This is a bit of
2051 * ungliness that is historical but not easily fixed
2052 * without re-designing the higher levels of ISM.
2054 ASSERT(addr
>= seg
->s_base
);
2055 if (((addr
+ len
) - seg
->s_base
) > sptd
->spt_realsize
)
2058 * For all of the following cases except F_PROT, we need to
2059 * make any necessary adjustments to addr and len
2060 * and get all of the necessary page_t's into an array called ppa[].
2062 * The code in shmat() forces base addr and len of ISM segment
2063 * to be aligned to largest page size supported. Therefore,
2064 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2065 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2066 * in large pagesize chunks, or else we will screw up the HAT
2067 * layer by calling hat_memload_array() with differing page sizes
2068 * over a given virtual range.
2070 pgsz
= page_get_pagesize(sptseg
->s_szc
);
2071 pgcnt
= page_get_pagecnt(sptseg
->s_szc
);
2072 shm_addr
= (caddr_t
)P2ALIGN((uintptr_t)(addr
), pgsz
);
2073 size
= P2ROUNDUP((uintptr_t)(((addr
+ len
) - shm_addr
)), pgsz
);
2074 npages
= btopr(size
);
2077 * Now we need to convert from addr in segshm to addr in segspt.
2079 anon_index
= seg_page(seg
, shm_addr
);
2080 sptseg_addr
= sptseg
->s_base
+ ptob(anon_index
);
2083 * And now we may have to adjust npages downward if we have
2084 * exceeded the realsize of the segment or initial anon
2087 if ((sptseg_addr
+ ptob(npages
)) >
2088 (sptseg
->s_base
+ sptd
->spt_realsize
))
2089 size
= (sptseg
->s_base
+ sptd
->spt_realsize
) - sptseg_addr
;
2091 npages
= btopr(size
);
2093 ASSERT(sptseg_addr
< (sptseg
->s_base
+ sptseg
->s_size
));
2094 ASSERT((sptd
->spt_flags
& SHM_PAGEABLE
) == 0);
2101 * availrmem is decremented once during anon_swap_adjust()
2102 * and is incremented during the anon_unresv(), which is
2103 * called from shm_rm_amp() when the segment is destroyed.
2105 atomic_add_long((ulong_t
*)(&(shmd
->shm_softlockcnt
)), npages
);
2107 * Some platforms assume that ISM pages are SE_SHARED
2108 * locked for the entire life of the segment.
2110 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP
, (void *)0))
2113 * Fall through to the F_INVAL case to load up the hat layer
2114 * entries with the HAT_LOAD_LOCK flag.
2120 if ((rw
== S_EXEC
) && !(sptd
->spt_prot
& PROT_EXEC
))
2124 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2125 * may still rely on this call to hat_share(). That
2126 * would imply that those hat's can fault on a
2127 * HAT_LOAD_LOCK translation, which would seem
2130 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP
, (void *)0)) {
2131 if (hat_share(seg
->s_as
->a_hat
, seg
->s_base
,
2132 curspt
->a_hat
, sptseg
->s_base
,
2133 sptseg
->s_size
, sptseg
->s_szc
) != 0) {
2134 panic("hat_share error in ISM fault");
2139 ppa
= kmem_zalloc(sizeof (page_t
*) * npages
, KM_SLEEP
);
2142 * I see no need to lock the real seg,
2143 * here, because all of our work will be on the underlying
2146 * sptseg_addr and npages now account for large pages.
2148 amp
= sptd
->spt_amp
;
2149 ASSERT(amp
!= NULL
);
2150 anon_index
= seg_page(sptseg
, sptseg_addr
);
2152 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2153 for (i
= 0; i
< npages
; i
++) {
2154 ap
= anon_get_ptr(amp
->ahp
, anon_index
++);
2156 swap_xlate(ap
, &vp
, &offset
);
2157 pp
= page_lookup(vp
, offset
, SE_SHARED
);
2161 ANON_LOCK_EXIT(&
->a_rwlock
);
2162 ASSERT(i
== npages
);
2165 * We are already holding the as->a_lock on the user's
2166 * real segment, but we need to hold the a_lock on the
2167 * underlying dummy as. This is mostly to satisfy the
2168 * underlying HAT layer.
2170 AS_LOCK_ENTER(sptseg
->s_as
, &sptseg
->s_as
->a_lock
, RW_READER
);
2173 if (type
== F_SOFTLOCK
) {
2175 * Load up the translation keeping it
2176 * locked and don't unlock the page.
2178 for (; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
2179 sz
= MIN(pgsz
, ptob(npages
- pidx
));
2180 hat_memload_array(sptseg
->s_as
->a_hat
, a
,
2181 sz
, &ppa
[pidx
], sptd
->spt_prot
,
2182 HAT_LOAD_LOCK
| HAT_LOAD_SHARE
);
2185 if (hat
== seg
->s_as
->a_hat
) {
2188 * Migrate pages marked for migration.
2190 if (lgrp_optimizations())
2191 page_migrate(seg
, shm_addr
, ppa
,
2195 for (; pidx
< npages
;
2196 a
+= pgsz
, pidx
+= pgcnt
) {
2197 sz
= MIN(pgsz
, ptob(npages
- pidx
));
2198 hat_memload_array(sptseg
->s_as
->a_hat
,
2200 sptd
->spt_prot
, HAT_LOAD_SHARE
);
2203 /* XHAT. Pass real address */
2204 hat_memload_array(hat
, shm_addr
,
2205 ptob(npages
), ppa
, sptd
->spt_prot
,
2210 * And now drop the SE_SHARED lock(s).
2212 for (i
= 0; i
< npages
; i
++)
2213 page_unlock(ppa
[i
]);
2215 AS_LOCK_EXIT(sptseg
->s_as
, &sptseg
->s_as
->a_lock
);
2217 kmem_free(ppa
, sizeof (page_t
*) * npages
);
2222 * This is a bit ugly, we pass in the real seg pointer,
2223 * but the sptseg_addr is the virtual address within the
2226 segspt_softunlock(seg
, sptseg_addr
, ptob(npages
), rw
);
2232 * This takes care of the unusual case where a user
2233 * allocates a stack in shared memory and a register
2234 * window overflow is written to that stack page before
2235 * it is otherwise modified.
2237 * We can get away with this because ISM segments are
2238 * always rw. Other than this unusual case, there
2239 * should be no instances of protection violations.
2245 cmn_err(CE_WARN
, "segspt_shmfault default type?");
2253 segspt_shmfaulta(struct seg
*seg
, caddr_t addr
)
2260 segspt_shmkluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
)
2267 segspt_shmswapout(struct seg
*seg
)
2273 * duplicate the shared page tables
2276 segspt_shmdup(struct seg
*seg
, struct seg
*newseg
)
2278 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2279 struct anon_map
*amp
= shmd
->shm_amp
;
2280 struct shm_data
*shmd_new
;
2281 struct seg
*spt_seg
= shmd
->shm_sptseg
;
2282 struct spt_data
*sptd
= spt_seg
->s_data
;
2285 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
2287 shmd_new
= kmem_zalloc((sizeof (*shmd_new
)), KM_SLEEP
);
2288 newseg
->s_data
= (void *)shmd_new
;
2289 shmd_new
->shm_sptas
= shmd
->shm_sptas
;
2290 shmd_new
->shm_amp
= amp
;
2291 shmd_new
->shm_sptseg
= shmd
->shm_sptseg
;
2292 newseg
->s_ops
= &segspt_shmops
;
2293 newseg
->s_szc
= seg
->s_szc
;
2294 ASSERT(seg
->s_szc
== shmd
->shm_sptseg
->s_szc
);
2296 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2298 ANON_LOCK_EXIT(&
->a_rwlock
);
2300 if (sptd
->spt_flags
& SHM_PAGEABLE
) {
2301 shmd_new
->shm_vpage
= kmem_zalloc(btopr(amp
->size
), KM_SLEEP
);
2302 shmd_new
->shm_lckpgs
= 0;
2303 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP
, (void *)0)) {
2304 if ((error
= hat_share(newseg
->s_as
->a_hat
,
2305 newseg
->s_base
, shmd
->shm_sptas
->a_hat
, SEGSPTADDR
,
2306 seg
->s_size
, seg
->s_szc
)) != 0) {
2307 kmem_free(shmd_new
->shm_vpage
,
2313 return (hat_share(newseg
->s_as
->a_hat
, newseg
->s_base
,
2314 shmd
->shm_sptas
->a_hat
, SEGSPTADDR
, seg
->s_size
,
2322 segspt_shmcheckprot(struct seg
*seg
, caddr_t addr
, size_t size
, uint_t prot
)
2324 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2325 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2327 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
2330 * ISM segment is always rw.
2332 return (((sptd
->spt_prot
& prot
) != prot
) ? EACCES
: 0);
2336 * Return an array of locked large pages, for empty slots allocate
2337 * private zero-filled anon pages.
2346 struct spt_data
*sptd
= sptseg
->s_data
;
2347 struct anon_map
*amp
= sptd
->spt_amp
;
2348 enum seg_rw rw
= sptd
->spt_prot
;
2349 uint_t szc
= sptseg
->s_szc
;
2350 size_t pg_sz
, share_sz
= page_get_pagesize(szc
);
2352 caddr_t lp_addr
, e_sptaddr
;
2353 uint_t vpprot
, ppa_szc
= 0;
2354 struct vpage
*vpage
= NULL
;
2358 anon_sync_obj_t cookie
;
2359 int anon_locked
= 0;
2363 ASSERT(IS_P2ALIGNED(sptaddr
, share_sz
) && IS_P2ALIGNED(len
, share_sz
));
2367 lp_npgs
= btop(pg_sz
);
2369 e_sptaddr
= sptaddr
+ len
;
2370 an_idx
= seg_page(sptseg
, sptaddr
);
2373 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2375 amp_pgs
= page_get_pagecnt(amp
->a_szc
);
2379 for (; lp_addr
< e_sptaddr
;
2380 an_idx
+= lp_npgs
, lp_addr
+= pg_sz
, ppa_idx
+= lp_npgs
) {
2383 * If we're currently locked, and we get to a new
2384 * page, unlock our current anon chunk.
2386 if (anon_locked
&& P2PHASE(an_idx
, amp_pgs
) == 0) {
2387 anon_array_exit(&cookie
);
2391 anon_array_enter(amp
, an_idx
, &cookie
);
2394 ppa_szc
= (uint_t
)-1;
2395 ierr
= anon_map_getpages(amp
, an_idx
, szc
, sptseg
,
2396 lp_addr
, sptd
->spt_prot
, &vpprot
, &ppa
[ppa_idx
],
2397 &ppa_szc
, vpage
, rw
, 0, segvn_anypgsz
, 0, kcred
);
2401 err
= FC_MAKE_ERR(ierr
);
2407 if (lp_addr
== e_sptaddr
) {
2410 ASSERT(lp_addr
< e_sptaddr
);
2413 * ierr == -1 means we failed to allocate a large page.
2414 * so do a size down operation.
2416 * ierr == -2 means some other process that privately shares
2417 * pages with this process has allocated a larger page and we
2418 * need to retry with larger pages. So do a size up
2419 * operation. This relies on the fact that large pages are
2420 * never partially shared i.e. if we share any constituent
2421 * page of a large page with another process we must share the
2422 * entire large page. Note this cannot happen for SOFTLOCK
2423 * case, unless current address (lpaddr) is at the beginning
2424 * of the next page size boundary because the other process
2425 * couldn't have relocated locked pages.
2427 ASSERT(ierr
== -1 || ierr
== -2);
2428 if (segvn_anypgsz
) {
2429 ASSERT(ierr
== -2 || szc
!= 0);
2430 ASSERT(ierr
== -1 || szc
< sptseg
->s_szc
);
2431 szc
= (ierr
== -1) ? szc
- 1 : szc
+ 1;
2434 * For faults and segvn_anypgsz == 0
2435 * we need to be careful not to loop forever
2436 * if existing page is found with szc other
2437 * than 0 or seg->s_szc. This could be due
2438 * to page relocations on behalf of DR or
2439 * more likely large page creation. For this
2440 * case simply re-size to existing page's szc
2441 * if returned by anon_map_getpages().
2443 if (ppa_szc
== (uint_t
)-1) {
2444 szc
= (ierr
== -1) ? 0 : sptseg
->s_szc
;
2446 ASSERT(ppa_szc
<= sptseg
->s_szc
);
2447 ASSERT(ierr
== -2 || ppa_szc
< szc
);
2448 ASSERT(ierr
== -1 || ppa_szc
> szc
);
2452 pg_sz
= page_get_pagesize(szc
);
2453 lp_npgs
= btop(pg_sz
);
2454 ASSERT(IS_P2ALIGNED(lp_addr
, pg_sz
));
2457 anon_array_exit(&cookie
);
2459 ANON_LOCK_EXIT(&
->a_rwlock
);
2464 anon_array_exit(&cookie
);
2466 ANON_LOCK_EXIT(&
->a_rwlock
);
2467 for (j
= 0; j
< ppa_idx
; j
++)
2468 page_unlock(ppa
[j
]);
2473 * count the number of bytes in a set of spt pages that are currently not
2477 spt_unlockedbytes(pgcnt_t npages
, page_t
**ppa
)
2480 rctl_qty_t unlocked
= 0;
2482 for (i
= 0; i
< npages
; i
++) {
2483 if (ppa
[i
]->p_lckcnt
== 0)
2484 unlocked
+= PAGESIZE
;
2489 extern u_longlong_t
randtick(void);
2490 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2491 #define NLCK (NCPU_P2)
2492 /* Random number with a range [0, n-1], n must be power of two */
2493 #define RAND_P2(n) \
2494 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2497 spt_lockpages(struct seg
*seg
, pgcnt_t anon_index
, pgcnt_t npages
,
2498 page_t
**ppa
, ulong_t
*lockmap
, size_t pos
,
2501 struct shm_data
*shmd
= seg
->s_data
;
2502 struct spt_data
*sptd
= shmd
->shm_sptseg
->s_data
;
2507 int use_reserved
= 1;
2509 /* return the number of bytes actually locked */
2513 * To avoid contention on freemem_lock, availrmem and pages_locked
2514 * global counters are updated only every nlck locked pages instead of
2515 * every time. Reserve nlck locks up front and deduct from this
2516 * reservation for each page that requires a lock. When the reservation
2517 * is consumed, reserve again. nlck is randomized, so the competing
2518 * threads do not fall into a cyclic lock contention pattern. When
2519 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2520 * is used to lock pages.
2522 for (i
= 0; i
< npages
; anon_index
++, pos
++, i
++) {
2523 if (nlck
== 0 && use_reserved
== 1) {
2524 nlck
= NLCK
+ RAND_P2(NLCK
);
2525 /* if fewer loops left, decrease nlck */
2526 nlck
= MIN(nlck
, npages
- i
);
2528 * Reserve nlck locks up front and deduct from this
2529 * reservation for each page that requires a lock. When
2530 * the reservation is consumed, reserve again.
2532 mutex_enter(&freemem_lock
);
2533 if ((availrmem
- nlck
) < pages_pp_maximum
) {
2534 /* Do not do advance memory reserves */
2538 pages_locked
+= nlck
;
2540 mutex_exit(&freemem_lock
);
2542 if (!(shmd
->shm_vpage
[anon_index
] & DISM_PG_LOCKED
)) {
2543 if (sptd
->spt_ppa_lckcnt
[anon_index
] <
2544 (ushort_t
)DISM_LOCK_MAX
) {
2545 if (++sptd
->spt_ppa_lckcnt
[anon_index
] ==
2546 (ushort_t
)DISM_LOCK_MAX
) {
2548 "DISM page lock limit "
2549 "reached on DISM offset 0x%lx\n",
2550 anon_index
<< PAGESHIFT
);
2552 kernel
= (sptd
->spt_ppa
&&
2553 sptd
->spt_ppa
[anon_index
]);
2554 if (!page_pp_lock(ppa
[i
], 0, kernel
||
2556 sptd
->spt_ppa_lckcnt
[anon_index
]--;
2560 /* if this is a newly locked page, count it */
2561 if (ppa
[i
]->p_lckcnt
== 1) {
2562 if (kernel
== 0 && use_reserved
== 1)
2564 *locked
+= PAGESIZE
;
2567 shmd
->shm_vpage
[anon_index
] |= DISM_PG_LOCKED
;
2568 if (lockmap
!= NULL
)
2569 BT_SET(lockmap
, pos
);
2573 /* Return unused lock reservation */
2574 if (nlck
!= 0 && use_reserved
== 1) {
2575 mutex_enter(&freemem_lock
);
2577 pages_locked
-= nlck
;
2578 mutex_exit(&freemem_lock
);
2585 spt_unlockpages(struct seg
*seg
, pgcnt_t anon_index
, pgcnt_t npages
,
2586 rctl_qty_t
*unlocked
)
2588 struct shm_data
*shmd
= seg
->s_data
;
2589 struct spt_data
*sptd
= shmd
->shm_sptseg
->s_data
;
2590 struct anon_map
*amp
= sptd
->spt_amp
;
2596 anon_sync_obj_t cookie
;
2599 pgcnt_t nlck_limit
= NLCK
;
2601 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2602 for (i
= 0; i
< npages
; i
++, anon_index
++) {
2603 if (shmd
->shm_vpage
[anon_index
] & DISM_PG_LOCKED
) {
2604 anon_array_enter(amp
, anon_index
, &cookie
);
2605 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
2608 swap_xlate(ap
, &vp
, &off
);
2609 anon_array_exit(&cookie
);
2610 pp
= page_lookup(vp
, off
, SE_SHARED
);
2613 * availrmem is decremented only for pages which are not
2614 * in seg pcache, for pages in seg pcache availrmem was
2615 * decremented in _dismpagelock()
2617 kernel
= (sptd
->spt_ppa
&& sptd
->spt_ppa
[anon_index
]);
2618 ASSERT(pp
->p_lckcnt
> 0);
2621 * lock page but do not change availrmem, we do it
2622 * ourselves every nlck loops.
2624 page_pp_unlock(pp
, 0, 1);
2625 if (pp
->p_lckcnt
== 0) {
2628 *unlocked
+= PAGESIZE
;
2631 shmd
->shm_vpage
[anon_index
] &= ~DISM_PG_LOCKED
;
2632 sptd
->spt_ppa_lckcnt
[anon_index
]--;
2637 * To reduce freemem_lock contention, do not update availrmem
2638 * until at least NLCK pages have been unlocked.
2639 * 1. No need to update if nlck is zero
2640 * 2. Always update if the last iteration
2642 if (nlck
> 0 && (nlck
== nlck_limit
|| i
== npages
- 1)) {
2643 mutex_enter(&freemem_lock
);
2645 pages_locked
-= nlck
;
2646 mutex_exit(&freemem_lock
);
2648 nlck_limit
= NLCK
+ RAND_P2(NLCK
);
2651 ANON_LOCK_EXIT(&
->a_rwlock
);
2658 segspt_shmlockop(struct seg
*seg
, caddr_t addr
, size_t len
,
2659 int attr
, int op
, ulong_t
*lockmap
, size_t pos
)
2661 struct shm_data
*shmd
= seg
->s_data
;
2662 struct seg
*sptseg
= shmd
->shm_sptseg
;
2663 struct spt_data
*sptd
= sptseg
->s_data
;
2664 struct kshmid
*sp
= sptd
->spt_amp
->a_sp
;
2665 pgcnt_t npages
, a_npages
;
2667 pgcnt_t an_idx
, a_an_idx
, ppa_idx
;
2668 caddr_t spt_addr
, a_addr
; /* spt and aligned address */
2669 size_t a_len
; /* aligned len */
2673 rctl_qty_t unlocked
= 0;
2674 rctl_qty_t locked
= 0;
2675 struct proc
*p
= curproc
;
2678 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
2681 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
2685 addr
= (caddr_t
)((uintptr_t)addr
& (uintptr_t)PAGEMASK
);
2686 an_idx
= seg_page(seg
, addr
);
2687 npages
= btopr(len
);
2689 if (an_idx
+ npages
> btopr(shmd
->shm_amp
->size
)) {
2694 * A shm's project never changes, so no lock needed.
2695 * The shm has a hold on the project, so it will not go away.
2696 * Since we have a mapping to shm within this zone, we know
2697 * that the zone will not go away.
2699 proj
= sp
->shm_perm
.ipc_proj
;
2701 if (op
== MC_LOCK
) {
2704 * Need to align addr and size request if they are not
2705 * aligned so we can always allocate large page(s) however
2706 * we only lock what was requested in initial request.
2708 share_sz
= page_get_pagesize(sptseg
->s_szc
);
2709 a_addr
= (caddr_t
)P2ALIGN((uintptr_t)(addr
), share_sz
);
2710 a_len
= P2ROUNDUP((uintptr_t)(((addr
+ len
) - a_addr
)),
2712 a_npages
= btop(a_len
);
2713 a_an_idx
= seg_page(seg
, a_addr
);
2714 spt_addr
= sptseg
->s_base
+ ptob(a_an_idx
);
2715 ppa_idx
= an_idx
- a_an_idx
;
2717 if ((ppa
= kmem_zalloc(((sizeof (page_t
*)) * a_npages
),
2718 KM_NOSLEEP
)) == NULL
) {
2723 * Don't cache any new pages for IO and
2724 * flush any cached pages.
2726 mutex_enter(&sptd
->spt_lock
);
2727 if (sptd
->spt_ppa
!= NULL
)
2728 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2730 sts
= spt_anon_getpages(sptseg
, spt_addr
, a_len
, ppa
);
2732 mutex_exit(&sptd
->spt_lock
);
2733 kmem_free(ppa
, ((sizeof (page_t
*)) * a_npages
));
2737 mutex_enter(&sp
->shm_mlock
);
2738 /* enforce locked memory rctl */
2739 unlocked
= spt_unlockedbytes(npages
, &ppa
[ppa_idx
]);
2741 mutex_enter(&p
->p_lock
);
2742 if (rctl_incr_locked_mem(p
, proj
, unlocked
, 0)) {
2743 mutex_exit(&p
->p_lock
);
2746 mutex_exit(&p
->p_lock
);
2747 sts
= spt_lockpages(seg
, an_idx
, npages
,
2748 &ppa
[ppa_idx
], lockmap
, pos
, &locked
);
2751 * correct locked count if not all pages could be
2754 if ((unlocked
- locked
) > 0) {
2755 rctl_decr_locked_mem(NULL
, proj
,
2756 (unlocked
- locked
), 0);
2762 for (i
= 0; i
< a_npages
; i
++)
2763 page_unlock(ppa
[i
]);
2764 if (sptd
->spt_ppa
!= NULL
)
2765 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2766 mutex_exit(&sp
->shm_mlock
);
2767 mutex_exit(&sptd
->spt_lock
);
2769 kmem_free(ppa
, ((sizeof (page_t
*)) * a_npages
));
2771 } else if (op
== MC_UNLOCK
) { /* unlock */
2774 mutex_enter(&sptd
->spt_lock
);
2775 if (shmd
->shm_lckpgs
== 0) {
2776 mutex_exit(&sptd
->spt_lock
);
2780 * Don't cache new IO pages.
2782 if (sptd
->spt_ppa
!= NULL
)
2783 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2785 mutex_enter(&sp
->shm_mlock
);
2786 sts
= spt_unlockpages(seg
, an_idx
, npages
, &unlocked
);
2787 if ((ppa
= sptd
->spt_ppa
) != NULL
)
2788 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2789 mutex_exit(&sptd
->spt_lock
);
2791 rctl_decr_locked_mem(NULL
, proj
, unlocked
, 0);
2792 mutex_exit(&sp
->shm_mlock
);
2795 seg_ppurge_wiredpp(ppa
);
2802 segspt_shmgetprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t
*protv
)
2804 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2805 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2806 spgcnt_t pgno
= seg_page(seg
, addr
+len
) - seg_page(seg
, addr
) + 1;
2808 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
2811 * ISM segment is always rw.
2814 *protv
++ = sptd
->spt_prot
;
2820 segspt_shmgetoffset(struct seg
*seg
, caddr_t addr
)
2822 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
2824 /* Offset does not matter in ISM memory */
2826 return ((u_offset_t
)0);
2831 segspt_shmgettype(struct seg
*seg
, caddr_t addr
)
2833 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2834 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2836 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
2839 * The shared memory mapping is always MAP_SHARED, SWAP is only
2842 return (MAP_SHARED
|
2843 ((sptd
->spt_flags
& SHM_PAGEABLE
) ? 0 : MAP_NORESERVE
));
2848 segspt_shmgetvp(struct seg
*seg
, caddr_t addr
, struct vnode
**vpp
)
2850 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2851 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2853 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
2855 *vpp
= sptd
->spt_vp
;
2860 * We need to wait for pending IO to complete to a DISM segment in order for
2861 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2862 * than enough time to wait.
2864 static clock_t spt_pcache_wait
= 120;
2868 segspt_shmadvise(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t behav
)
2870 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2871 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2872 struct anon_map
*amp
;
2879 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
2881 if (behav
== MADV_FREE
) {
2882 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0)
2885 amp
= sptd
->spt_amp
;
2886 pg_idx
= seg_page(seg
, addr
);
2888 mutex_enter(&sptd
->spt_lock
);
2889 if ((ppa
= sptd
->spt_ppa
) == NULL
) {
2890 mutex_exit(&sptd
->spt_lock
);
2891 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2892 anon_disclaim(amp
, pg_idx
, len
);
2893 ANON_LOCK_EXIT(&
->a_rwlock
);
2897 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2898 gen
= sptd
->spt_gen
;
2900 mutex_exit(&sptd
->spt_lock
);
2903 * Purge all DISM cached pages
2905 seg_ppurge_wiredpp(ppa
);
2908 * Drop the AS_LOCK so that other threads can grab it
2909 * in the as_pageunlock path and hopefully get the segment
2910 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2911 * to keep this segment resident.
2913 writer
= AS_WRITE_HELD(seg
->s_as
, &seg
->s_as
->a_lock
);
2914 atomic_inc_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
2915 AS_LOCK_EXIT(seg
->s_as
, &seg
->s_as
->a_lock
);
2917 mutex_enter(&sptd
->spt_lock
);
2919 end_lbolt
= ddi_get_lbolt() + (hz
* spt_pcache_wait
);
2922 * Try to wait for pages to get kicked out of the seg_pcache.
2924 while (sptd
->spt_gen
== gen
&&
2925 (sptd
->spt_flags
& DISM_PPA_CHANGED
) &&
2926 ddi_get_lbolt() < end_lbolt
) {
2927 if (!cv_timedwait_sig(&sptd
->spt_cv
,
2928 &sptd
->spt_lock
, end_lbolt
)) {
2933 mutex_exit(&sptd
->spt_lock
);
2935 /* Regrab the AS_LOCK and release our hold on the segment */
2936 AS_LOCK_ENTER(seg
->s_as
, &seg
->s_as
->a_lock
,
2937 writer
? RW_WRITER
: RW_READER
);
2938 atomic_dec_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
2939 if (shmd
->shm_softlockcnt
<= 0) {
2940 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2941 mutex_enter(&seg
->s_as
->a_contents
);
2942 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2943 AS_CLRUNMAPWAIT(seg
->s_as
);
2944 cv_broadcast(&seg
->s_as
->a_cv
);
2946 mutex_exit(&seg
->s_as
->a_contents
);
2950 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2951 anon_disclaim(amp
, pg_idx
, len
);
2952 ANON_LOCK_EXIT(&
->a_rwlock
);
2953 } else if (lgrp_optimizations() && (behav
== MADV_ACCESS_LWP
||
2954 behav
== MADV_ACCESS_MANY
|| behav
== MADV_ACCESS_DEFAULT
)) {
2957 lgrp_mem_policy_t policy
;
2961 struct seg
*sptseg
= shmd
->shm_sptseg
;
2962 caddr_t sptseg_addr
;
2965 * Align address and length to page size of underlying segment
2967 share_size
= page_get_pagesize(shmd
->shm_sptseg
->s_szc
);
2968 shm_addr
= (caddr_t
)P2ALIGN((uintptr_t)(addr
), share_size
);
2969 size
= P2ROUNDUP((uintptr_t)(((addr
+ len
) - shm_addr
)),
2972 amp
= shmd
->shm_amp
;
2973 anon_index
= seg_page(seg
, shm_addr
);
2976 * And now we may have to adjust size downward if we have
2977 * exceeded the realsize of the segment or initial anon
2980 sptseg_addr
= sptseg
->s_base
+ ptob(anon_index
);
2981 if ((sptseg_addr
+ size
) >
2982 (sptseg
->s_base
+ sptd
->spt_realsize
))
2983 size
= (sptseg
->s_base
+ sptd
->spt_realsize
) -
2987 * Set memory allocation policy for this segment
2989 policy
= lgrp_madv_to_policy(behav
, len
, MAP_SHARED
);
2990 already_set
= lgrp_shm_policy_set(policy
, amp
, anon_index
,
2994 * If random memory allocation policy set already,
2995 * don't bother reapplying it.
2997 if (already_set
&& !LGRP_MEM_POLICY_REAPPLICABLE(policy
))
3001 * Mark any existing pages in the given range for
3002 * migration, flushing the I/O page cache, and using
3003 * underlying segment to calculate anon index and get
3004 * anonmap and vnode pointer from
3006 if (shmd
->shm_softlockcnt
> 0)
3009 page_mark_migrate(seg
, shm_addr
, size
, amp
, 0, NULL
, 0, 0);
3017 segspt_shmdump(struct seg
*seg
)
3019 /* no-op for ISM segment */
3024 segspt_shmsetpgsz(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t szc
)
3030 * get a memory ID for an addr in a given segment
3033 segspt_shmgetmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
)
3035 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
3038 struct anon_map
*amp
= shmd
->shm_amp
;
3039 struct spt_data
*sptd
= shmd
->shm_sptseg
->s_data
;
3040 struct seg
*sptseg
= shmd
->shm_sptseg
;
3041 anon_sync_obj_t cookie
;
3043 anon_index
= seg_page(seg
, addr
);
3045 if (addr
> (seg
->s_base
+ sptd
->spt_realsize
)) {
3049 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
3050 anon_array_enter(amp
, anon_index
, &cookie
);
3051 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
3054 caddr_t spt_addr
= sptseg
->s_base
+ ptob(anon_index
);
3056 pp
= anon_zero(sptseg
, spt_addr
, &ap
, kcred
);
3058 anon_array_exit(&cookie
);
3059 ANON_LOCK_EXIT(&
->a_rwlock
);
3062 (void) anon_set_ptr(amp
->ahp
, anon_index
, ap
, ANON_SLEEP
);
3065 anon_array_exit(&cookie
);
3066 ANON_LOCK_EXIT(&
->a_rwlock
);
3067 memidp
->val
[0] = (uintptr_t)ap
;
3068 memidp
->val
[1] = (uintptr_t)addr
& PAGEOFFSET
;
3073 * Get memory allocation policy info for specified address in given segment
3075 static lgrp_mem_policy_info_t
*
3076 segspt_shmgetpolicy(struct seg
*seg
, caddr_t addr
)
3078 struct anon_map
*amp
;
3080 lgrp_mem_policy_info_t
*policy_info
;
3081 struct shm_data
*shm_data
;
3083 ASSERT(seg
!= NULL
);
3086 * Get anon_map from segshm
3088 * Assume that no lock needs to be held on anon_map, since
3089 * it should be protected by its reference count which must be
3090 * nonzero for an existing segment
3091 * Need to grab readers lock on policy tree though
3093 shm_data
= (struct shm_data
*)seg
->s_data
;
3094 if (shm_data
== NULL
)
3096 amp
= shm_data
->shm_amp
;
3097 ASSERT(amp
->refcnt
!= 0);
3102 * Assume starting anon index of 0
3104 anon_index
= seg_page(seg
, addr
);
3105 policy_info
= lgrp_shm_policy_get(amp
, anon_index
, NULL
, 0);
3107 return (policy_info
);
3112 segspt_shmcapable(struct seg
*seg
, segcapability_t capability
)