4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2015, Joyent, Inc. All rights reserved.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
27 #include <sys/param.h>
31 #include <sys/sysmacros.h>
32 #include <sys/cmn_err.h>
33 #include <sys/systm.h>
34 #include <sys/tuneable.h>
42 #include <sys/atomic.h>
43 #include <vm/seg_spt.h>
44 #include <sys/debug.h>
45 #include <sys/vtrace.h>
47 #include <sys/shm_impl.h>
49 #include <sys/vmsystm.h>
50 #include <sys/policy.h>
51 #include <sys/project.h>
52 #include <sys/tnf_probe.h>
55 #define SEGSPTADDR (caddr_t)0x0
58 * # pages used for spt
63 * segspt_minfree is the memory left for system after ISM
64 * locked its pages; it is set up to 5% of availrmem in
65 * sptcreate when ISM is created. ISM should not use more
66 * than ~90% of availrmem; if it does, then the performance
67 * of the system may decrease. Machines with large memories may
68 * be able to use up more memory for ISM so we set the default
69 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
70 * If somebody wants even more memory for ISM (risking hanging
71 * the system) they can patch the segspt_minfree to smaller number.
73 pgcnt_t segspt_minfree
= 0;
75 static int segspt_create(struct seg
*seg
, caddr_t argsp
);
76 static int segspt_unmap(struct seg
*seg
, caddr_t raddr
, size_t ssize
);
77 static void segspt_free(struct seg
*seg
);
78 static void segspt_free_pages(struct seg
*seg
, caddr_t addr
, size_t len
);
79 static lgrp_mem_policy_info_t
*segspt_getpolicy(struct seg
*seg
, caddr_t addr
);
84 panic("segspt_badop called");
88 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
90 static const struct seg_ops segspt_ops
= {
91 .dup
= SEGSPT_BADOP(int),
92 .unmap
= segspt_unmap
,
94 .fault
= SEGSPT_BADOP(int),
95 .faulta
= SEGSPT_BADOP(faultcode_t
),
96 .setprot
= SEGSPT_BADOP(int),
97 .checkprot
= SEGSPT_BADOP(int),
98 .kluster
= SEGSPT_BADOP(int),
99 .sync
= SEGSPT_BADOP(int),
100 .incore
= SEGSPT_BADOP(size_t),
101 .lockop
= SEGSPT_BADOP(int),
102 .getprot
= SEGSPT_BADOP(int),
103 .getoffset
= SEGSPT_BADOP(uoff_t
),
104 .gettype
= SEGSPT_BADOP(int),
105 .getvp
= SEGSPT_BADOP(int),
106 .advise
= SEGSPT_BADOP(int),
107 .dump
= SEGSPT_BADOP(void),
108 .pagelock
= SEGSPT_BADOP(int),
109 .setpagesize
= SEGSPT_BADOP(int),
110 .getmemid
= SEGSPT_BADOP(int),
111 .getpolicy
= segspt_getpolicy
,
112 .capable
= SEGSPT_BADOP(int),
115 static int segspt_shmdup(struct seg
*seg
, struct seg
*newseg
);
116 static int segspt_shmunmap(struct seg
*seg
, caddr_t raddr
, size_t ssize
);
117 static void segspt_shmfree(struct seg
*seg
);
118 static faultcode_t
segspt_shmfault(struct hat
*hat
, struct seg
*seg
,
119 caddr_t addr
, size_t len
, enum fault_type type
, enum seg_rw rw
);
120 static faultcode_t
segspt_shmfaulta(struct seg
*seg
, caddr_t addr
);
121 static int segspt_shmsetprot(register struct seg
*seg
, register caddr_t addr
,
122 register size_t len
, register uint_t prot
);
123 static int segspt_shmcheckprot(struct seg
*seg
, caddr_t addr
, size_t size
,
125 static int segspt_shmkluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
);
126 static size_t segspt_shmincore(struct seg
*seg
, caddr_t addr
, size_t len
,
128 static int segspt_shmsync(struct seg
*seg
, register caddr_t addr
, size_t len
,
129 int attr
, uint_t flags
);
130 static int segspt_shmlockop(struct seg
*seg
, caddr_t addr
, size_t len
,
131 int attr
, int op
, ulong_t
*lockmap
, size_t pos
);
132 static int segspt_shmgetprot(struct seg
*seg
, caddr_t addr
, size_t len
,
134 static uoff_t
segspt_shmgetoffset(struct seg
*seg
, caddr_t addr
);
135 static int segspt_shmgettype(struct seg
*seg
, caddr_t addr
);
136 static int segspt_shmgetvp(struct seg
*seg
, caddr_t addr
, struct vnode
**vpp
);
137 static int segspt_shmadvise(struct seg
*seg
, caddr_t addr
, size_t len
,
139 static int segspt_shmpagelock(struct seg
*, caddr_t
, size_t,
140 struct page
***, enum lock_type
, enum seg_rw
);
141 static int segspt_shmgetmemid(struct seg
*, caddr_t
, memid_t
*);
142 static lgrp_mem_policy_info_t
*segspt_shmgetpolicy(struct seg
*, caddr_t
);
144 const struct seg_ops segspt_shmops
= {
145 .dup
= segspt_shmdup
,
146 .unmap
= segspt_shmunmap
,
147 .free
= segspt_shmfree
,
148 .fault
= segspt_shmfault
,
149 .faulta
= segspt_shmfaulta
,
150 .setprot
= segspt_shmsetprot
,
151 .checkprot
= segspt_shmcheckprot
,
152 .kluster
= segspt_shmkluster
,
153 .sync
= segspt_shmsync
,
154 .incore
= segspt_shmincore
,
155 .lockop
= segspt_shmlockop
,
156 .getprot
= segspt_shmgetprot
,
157 .getoffset
= segspt_shmgetoffset
,
158 .gettype
= segspt_shmgettype
,
159 .getvp
= segspt_shmgetvp
,
160 .advise
= segspt_shmadvise
,
161 .pagelock
= segspt_shmpagelock
,
162 .getmemid
= segspt_shmgetmemid
,
163 .getpolicy
= segspt_shmgetpolicy
,
166 static void segspt_purge(struct seg
*seg
);
167 static int segspt_reclaim(void *, caddr_t
, size_t, struct page
**,
169 static int spt_anon_getpages(struct seg
*seg
, caddr_t addr
, size_t len
,
176 sptcreate(size_t size
, struct seg
**sptseg
, struct anon_map
*amp
,
177 uint_t prot
, uint_t flags
, uint_t share_szc
)
181 struct segspt_crargs sptcargs
;
183 if (segspt_minfree
== 0) /* leave min 5% of availrmem for */
184 segspt_minfree
= availrmem
/20; /* for the system */
186 if (!hat_supported(HAT_SHARED_PT
, NULL
))
190 * get a new as for this shared memory segment
193 newas
->a_proc
= NULL
;
195 sptcargs
.prot
= prot
;
196 sptcargs
.flags
= flags
;
197 sptcargs
.szc
= share_szc
;
199 * create a shared page table (spt) segment
202 if (err
= as_map(newas
, SEGSPTADDR
, size
, segspt_create
, &sptcargs
)) {
206 *sptseg
= sptcargs
.seg_spt
;
211 sptdestroy(struct as
*as
, struct anon_map
*amp
)
213 (void) as_unmap(as
, SEGSPTADDR
, amp
->size
);
218 * called from seg_free().
219 * free (i.e., unlock, unmap, return to free list)
220 * all the pages in the given seg.
223 segspt_free(struct seg
*seg
)
225 struct spt_data
*sptd
= (struct spt_data
*)seg
->s_data
;
227 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
230 if (sptd
->spt_realsize
)
231 segspt_free_pages(seg
, seg
->s_base
, sptd
->spt_realsize
);
233 if (sptd
->spt_ppa_lckcnt
) {
234 kmem_free(sptd
->spt_ppa_lckcnt
,
235 sizeof (*sptd
->spt_ppa_lckcnt
)
236 * btopr(sptd
->spt_amp
->size
));
238 kmem_free(sptd
->spt_vp
, sizeof (*sptd
->spt_vp
));
239 cv_destroy(&sptd
->spt_cv
);
240 mutex_destroy(&sptd
->spt_lock
);
241 kmem_free(sptd
, sizeof (*sptd
));
247 segspt_shmsync(struct seg
*seg
, caddr_t addr
, size_t len
, int attr
,
250 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
257 segspt_shmincore(struct seg
*seg
, caddr_t addr
, size_t len
, char *vec
)
261 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
263 struct spt_data
*sptd
;
265 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
266 sptseg
= shmd
->shm_sptseg
;
267 sptd
= sptseg
->s_data
;
269 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
271 while (addr
< eo_seg
) {
272 /* page exists, and it's locked. */
273 *vec
++ = SEG_PAGE_INCORE
| SEG_PAGE_LOCKED
|
279 struct anon_map
*amp
= shmd
->shm_amp
;
287 anon_sync_obj_t cookie
;
289 addr
= (caddr_t
)((uintptr_t)addr
& (uintptr_t)PAGEMASK
);
290 anon_index
= seg_page(seg
, addr
);
292 if (anon_index
+ npages
> btopr(shmd
->shm_amp
->size
)) {
295 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
296 for (i
= 0; i
< npages
; i
++, anon_index
++) {
298 anon_array_enter(amp
, anon_index
, &cookie
);
299 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
301 swap_xlate(ap
, &vp
, &off
);
302 anon_array_exit(&cookie
);
303 pp
= page_lookup_nowait(vp
, off
, SE_SHARED
);
305 ret
|= SEG_PAGE_INCORE
| SEG_PAGE_ANON
;
309 anon_array_exit(&cookie
);
311 if (shmd
->shm_vpage
[anon_index
] & DISM_PG_LOCKED
) {
312 ret
|= SEG_PAGE_LOCKED
;
316 ANON_LOCK_EXIT(&
->a_rwlock
);
322 segspt_unmap(struct seg
*seg
, caddr_t raddr
, size_t ssize
)
326 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
329 * seg.s_size may have been rounded up to the largest page size
331 * XXX This should be cleanedup. sptdestroy should take a length
332 * argument which should be the same as sptcreate. Then
333 * this rounding would not be needed (or is done in shm.c)
334 * Only the check for full segment will be needed.
336 * XXX -- shouldn't raddr == 0 always? These tests don't seem
337 * to be useful at all.
339 share_size
= page_get_pagesize(seg
->s_szc
);
340 ssize
= P2ROUNDUP(ssize
, share_size
);
342 if (raddr
== seg
->s_base
&& ssize
== seg
->s_size
) {
350 segspt_create(struct seg
*seg
, caddr_t argsp
)
353 caddr_t addr
= seg
->s_base
;
354 struct spt_data
*sptd
;
355 struct segspt_crargs
*sptcargs
= (struct segspt_crargs
*)argsp
;
356 struct anon_map
*amp
= sptcargs
->amp
;
357 struct kshmid
*sp
= amp
->a_sp
;
358 struct cred
*cred
= CRED();
359 ulong_t i
, j
, anon_index
= 0;
360 pgcnt_t npages
= btopr(amp
->size
);
369 proc_t
*procp
= curproc
;
370 rctl_qty_t lockedbytes
= 0;
374 * We are holding the a_lock on the underlying dummy as,
375 * so we can make calls to the HAT layer.
377 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
380 if ((sptcargs
->flags
& SHM_PAGEABLE
) == 0) {
381 if (err
= anon_swap_adjust(npages
))
386 if ((sptd
= kmem_zalloc(sizeof (*sptd
), KM_NOSLEEP
)) == NULL
)
389 if ((sptcargs
->flags
& SHM_PAGEABLE
) == 0) {
390 if ((ppa
= kmem_zalloc(((sizeof (page_t
*)) * npages
),
391 KM_NOSLEEP
)) == NULL
)
395 mutex_init(&sptd
->spt_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
397 if ((vp
= kmem_zalloc(sizeof (*vp
), KM_NOSLEEP
)) == NULL
)
400 seg
->s_ops
= &segspt_ops
;
403 sptd
->spt_prot
= sptcargs
->prot
;
404 sptd
->spt_flags
= sptcargs
->flags
;
405 seg
->s_data
= (caddr_t
)sptd
;
406 sptd
->spt_ppa
= NULL
;
407 sptd
->spt_ppa_lckcnt
= NULL
;
408 seg
->s_szc
= sptcargs
->szc
;
409 cv_init(&sptd
->spt_cv
, NULL
, CV_DEFAULT
, NULL
);
412 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
413 if (seg
->s_szc
> amp
->a_szc
) {
414 amp
->a_szc
= seg
->s_szc
;
416 ANON_LOCK_EXIT(&
->a_rwlock
);
419 * Set policy to affect initial allocation of pages in
420 * anon_map_createpages()
422 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT
, amp
, anon_index
,
423 NULL
, 0, ptob(npages
));
425 if (sptcargs
->flags
& SHM_PAGEABLE
) {
427 pgcnt_t new_npgs
, more_pgs
;
428 struct anon_hdr
*nahp
;
431 share_sz
= page_get_pagesize(seg
->s_szc
);
432 if (!IS_P2ALIGNED(amp
->size
, share_sz
)) {
434 * We are rounding up the size of the anon array
435 * on 4 M boundary because we always create 4 M
436 * of page(s) when locking, faulting pages and we
437 * don't have to check for all corner cases e.g.
438 * if there is enough space to allocate 4 M
441 new_npgs
= btop(P2ROUNDUP(amp
->size
, share_sz
));
442 more_pgs
= new_npgs
- npages
;
445 * The zone will never be NULL, as a fully created
446 * shm always has an owning zone.
448 zone
= sp
->shm_perm
.ipc_zone_ref
.zref_zone
;
449 ASSERT(zone
!= NULL
);
450 if (anon_resv_zone(ptob(more_pgs
), zone
) == 0) {
455 nahp
= anon_create(new_npgs
, ANON_SLEEP
);
456 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
457 (void) anon_copy_ptr(amp
->ahp
, 0, nahp
, 0, npages
,
459 anon_release(amp
->ahp
, npages
);
461 ASSERT(amp
->swresv
== ptob(npages
));
462 amp
->swresv
= amp
->size
= ptob(new_npgs
);
463 ANON_LOCK_EXIT(&
->a_rwlock
);
467 sptd
->spt_ppa_lckcnt
= kmem_zalloc(npages
*
468 sizeof (*sptd
->spt_ppa_lckcnt
), KM_SLEEP
);
469 sptd
->spt_pcachecnt
= 0;
470 sptd
->spt_realsize
= ptob(npages
);
471 sptcargs
->seg_spt
= seg
;
476 * get array of pages for each anon slot in amp
478 if ((err
= anon_map_createpages(amp
, anon_index
, ptob(npages
), ppa
,
479 seg
, addr
, S_CREATE
, cred
)) != 0)
482 mutex_enter(&sp
->shm_mlock
);
484 /* May be partially locked, so, count bytes to charge for locking */
485 for (i
= 0; i
< npages
; i
++)
486 if (ppa
[i
]->p_lckcnt
== 0)
487 lockedbytes
+= PAGESIZE
;
489 proj
= sp
->shm_perm
.ipc_proj
;
491 if (lockedbytes
> 0) {
492 mutex_enter(&procp
->p_lock
);
493 if (rctl_incr_locked_mem(procp
, proj
, lockedbytes
, 0)) {
494 mutex_exit(&procp
->p_lock
);
495 mutex_exit(&sp
->shm_mlock
);
496 for (i
= 0; i
< npages
; i
++)
501 mutex_exit(&procp
->p_lock
);
505 * addr is initial address corresponding to the first page on ppa list
507 for (i
= 0; i
< npages
; i
++) {
508 /* attempt to lock all pages */
509 if (page_pp_lock(ppa
[i
], 0, 1) == 0) {
511 * if unable to lock any page, unlock all
512 * of them and return error
514 for (j
= 0; j
< i
; j
++)
515 page_pp_unlock(ppa
[j
], 0, 1);
516 for (i
= 0; i
< npages
; i
++)
518 rctl_decr_locked_mem(NULL
, proj
, lockedbytes
, 0);
519 mutex_exit(&sp
->shm_mlock
);
524 mutex_exit(&sp
->shm_mlock
);
527 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
528 * for the entire life of the segment. For example platforms
529 * that do not support Dynamic Reconfiguration.
531 hat_flags
= HAT_LOAD_SHARE
;
532 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
))
533 hat_flags
|= HAT_LOAD_LOCK
;
536 * Load translations one lare page at a time
537 * to make sure we don't create mappings bigger than
538 * segment's size code in case underlying pages
539 * are shared with segvn's segment that uses bigger
540 * size code than we do.
542 pgsz
= page_get_pagesize(seg
->s_szc
);
543 pgcnt
= page_get_pagecnt(seg
->s_szc
);
544 for (a
= addr
, pidx
= 0; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
545 sz
= MIN(pgsz
, ptob(npages
- pidx
));
546 hat_memload_array(seg
->s_as
->a_hat
, a
, sz
,
547 &ppa
[pidx
], sptd
->spt_prot
, hat_flags
);
551 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
552 * we will leave the pages locked SE_SHARED for the life
553 * of the ISM segment. This will prevent any calls to
554 * hat_pageunload() on this ISM segment for those platforms.
556 if (!(hat_flags
& HAT_LOAD_LOCK
)) {
558 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
559 * we no longer need to hold the SE_SHARED lock on the pages,
560 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
561 * SE_SHARED lock on the pages as necessary.
563 for (i
= 0; i
< npages
; i
++)
566 sptd
->spt_pcachecnt
= 0;
567 kmem_free(ppa
, ((sizeof (page_t
*)) * npages
));
568 sptd
->spt_realsize
= ptob(npages
);
569 atomic_add_long(&spt_used
, npages
);
570 sptcargs
->seg_spt
= seg
;
575 kmem_free(vp
, sizeof (*vp
));
576 cv_destroy(&sptd
->spt_cv
);
578 mutex_destroy(&sptd
->spt_lock
);
579 if ((sptcargs
->flags
& SHM_PAGEABLE
) == 0)
580 kmem_free(ppa
, (sizeof (*ppa
) * npages
));
582 kmem_free(sptd
, sizeof (*sptd
));
584 if ((sptcargs
->flags
& SHM_PAGEABLE
) == 0)
585 anon_swap_restore(npages
);
591 segspt_free_pages(struct seg
*seg
, caddr_t addr
, size_t len
)
594 struct spt_data
*sptd
= (struct spt_data
*)seg
->s_data
;
597 struct anon_map
*amp
;
603 pgcnt_t pgs
, curnpgs
= 0;
605 rctl_qty_t unlocked_bytes
= 0;
609 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
611 len
= P2ROUNDUP(len
, PAGESIZE
);
615 hat_flags
= HAT_UNLOAD_UNLOCK
| HAT_UNLOAD_UNMAP
;
616 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
)) ||
617 (sptd
->spt_flags
& SHM_PAGEABLE
)) {
618 hat_flags
= HAT_UNLOAD_UNMAP
;
621 hat_unload(seg
->s_as
->a_hat
, addr
, len
, hat_flags
);
624 if (sptd
->spt_flags
& SHM_PAGEABLE
)
625 npages
= btop(amp
->size
);
629 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
631 proj
= sp
->shm_perm
.ipc_proj
;
632 mutex_enter(&sp
->shm_mlock
);
634 for (anon_idx
= 0; anon_idx
< npages
; anon_idx
++) {
635 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
636 if ((ap
= anon_get_ptr(amp
->ahp
, anon_idx
)) == NULL
) {
637 panic("segspt_free_pages: null app");
641 if ((ap
= anon_get_next_ptr(amp
->ahp
, &anon_idx
))
645 ASSERT(ANON_ISBUSY(anon_get_slot(amp
->ahp
, anon_idx
)) == 0);
646 swap_xlate(ap
, &vp
, &off
);
649 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
650 * the pages won't be having SE_SHARED lock at this
653 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
654 * the pages are still held SE_SHARED locked from the
655 * original segspt_create()
657 * Our goal is to get SE_EXCL lock on each page, remove
658 * permanent lock on it and invalidate the page.
660 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
661 if (hat_flags
== HAT_UNLOAD_UNMAP
)
662 pp
= page_lookup(vp
, off
, SE_EXCL
);
664 if ((pp
= page_find(vp
, off
)) == NULL
) {
665 panic("segspt_free_pages: "
669 if (!page_tryupgrade(pp
)) {
671 pp
= page_lookup(vp
, off
, SE_EXCL
);
675 panic("segspt_free_pages: "
676 "page not in the system");
679 ASSERT(pp
->p_lckcnt
> 0);
680 page_pp_unlock(pp
, 0, 1);
681 if (pp
->p_lckcnt
== 0)
682 unlocked_bytes
+= PAGESIZE
;
684 if ((pp
= page_lookup(vp
, off
, SE_EXCL
)) == NULL
)
688 * It's logical to invalidate the pages here as in most cases
689 * these were created by segspt.
691 if (pp
->p_szc
!= 0) {
693 ASSERT(curnpgs
== 0);
696 pgs
= curnpgs
= page_get_pagecnt(pp
->p_szc
);
698 ASSERT(IS_P2ALIGNED(pgs
, pgs
));
699 ASSERT(!(page_pptonum(pp
) & (pgs
- 1)));
701 } else if ((page_pptonum(pp
) & (pgs
- 1)) == pgs
- 1) {
702 ASSERT(curnpgs
== 1);
703 ASSERT(page_pptonum(pp
) ==
704 page_pptonum(rootpp
) + (pgs
- 1));
705 page_destroy_pages(rootpp
);
710 ASSERT(page_pptonum(pp
) ==
711 page_pptonum(rootpp
) + (pgs
- curnpgs
));
715 if (root
!= 0 || curnpgs
!= 0) {
716 panic("segspt_free_pages: bad large page");
720 * Before destroying the pages, we need to take care
721 * of the rctl locked memory accounting. For that
722 * we need to calculte the unlocked_bytes.
724 if (pp
->p_lckcnt
> 0)
725 unlocked_bytes
+= PAGESIZE
;
727 VN_DISPOSE(pp
, B_INVAL
, 0, kcred
);
730 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
731 if (unlocked_bytes
> 0)
732 rctl_decr_locked_mem(NULL
, proj
, unlocked_bytes
, 0);
733 mutex_exit(&sp
->shm_mlock
);
735 if (root
!= 0 || curnpgs
!= 0) {
736 panic("segspt_free_pages: bad large page");
741 * mark that pages have been released
743 sptd
->spt_realsize
= 0;
745 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
746 atomic_add_long(&spt_used
, -npages
);
747 anon_swap_restore(npages
);
752 * Get memory allocation policy info for specified address in given segment
754 static lgrp_mem_policy_info_t
*
755 segspt_getpolicy(struct seg
*seg
, caddr_t addr
)
757 struct anon_map
*amp
;
759 lgrp_mem_policy_info_t
*policy_info
;
760 struct spt_data
*spt_data
;
765 * Get anon_map from segspt
767 * Assume that no lock needs to be held on anon_map, since
768 * it should be protected by its reference count which must be
769 * nonzero for an existing segment
770 * Need to grab readers lock on policy tree though
772 spt_data
= (struct spt_data
*)seg
->s_data
;
773 if (spt_data
== NULL
)
775 amp
= spt_data
->spt_amp
;
776 ASSERT(amp
->refcnt
!= 0);
781 * Assume starting anon index of 0
783 anon_index
= seg_page(seg
, addr
);
784 policy_info
= lgrp_shm_policy_get(amp
, anon_index
, NULL
, 0);
786 return (policy_info
);
791 * Return locked pages over a given range.
793 * We will cache all DISM locked pages and save the pplist for the
794 * entire segment in the ppa field of the underlying DISM segment structure.
795 * Later, during a call to segspt_reclaim() we will use this ppa array
796 * to page_unlock() all of the pages and then we will free this ppa list.
800 segspt_dismpagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
801 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
)
803 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
804 struct seg
*sptseg
= shmd
->shm_sptseg
;
805 struct spt_data
*sptd
= sptseg
->s_data
;
806 pgcnt_t pg_idx
, npages
, tot_npages
, npgs
;
807 struct page
**pplist
, **pl
, **ppa
, *pp
;
808 struct anon_map
*amp
;
815 pgcnt_t claim_availrmem
= 0;
818 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
819 ASSERT(type
== L_PAGELOCK
|| type
== L_PAGEUNLOCK
);
822 * We want to lock/unlock the entire ISM segment. Therefore,
823 * we will be using the underlying sptseg and it's base address
824 * and length for the caching arguments.
829 pg_idx
= seg_page(seg
, addr
);
833 * check if the request is larger than number of pages covered
836 if (pg_idx
+ npages
> btopr(sptd
->spt_amp
->size
)) {
841 if (type
== L_PAGEUNLOCK
) {
842 ASSERT(sptd
->spt_ppa
!= NULL
);
844 seg_pinactive(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
845 sptd
->spt_ppa
, S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
848 * If someone is blocked while unmapping, we purge
849 * segment page cache and thus reclaim pplist synchronously
850 * without waiting for seg_pasync_thread. This speeds up
851 * unmapping in cases where munmap(2) is called, while
852 * raw async i/o is still in progress or where a thread
853 * exits on data fault in a multithreaded application.
855 if ((sptd
->spt_flags
& DISM_PPA_CHANGED
) ||
856 (AS_ISUNMAPWAIT(seg
->s_as
) &&
857 shmd
->shm_softlockcnt
> 0)) {
863 /* The L_PAGELOCK case ... */
865 if (sptd
->spt_flags
& DISM_PPA_CHANGED
) {
868 * for DISM ppa needs to be rebuild since
869 * number of locked pages could be changed
876 * First try to find pages in segment page cache, without
877 * holding the segment lock.
879 pplist
= seg_plookup(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
880 S_WRITE
, SEGP_FORCE_WIRED
);
881 if (pplist
!= NULL
) {
882 ASSERT(sptd
->spt_ppa
!= NULL
);
883 ASSERT(sptd
->spt_ppa
== pplist
);
885 for (an_idx
= pg_idx
; an_idx
< pg_idx
+ npages
; ) {
886 if (ppa
[an_idx
] == NULL
) {
887 seg_pinactive(seg
, NULL
, seg
->s_base
,
888 sptd
->spt_amp
->size
, ppa
,
889 S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
893 if ((szc
= ppa
[an_idx
]->p_szc
) != 0) {
894 npgs
= page_get_pagecnt(szc
);
895 an_idx
= P2ROUNDUP(an_idx
+ 1, npgs
);
901 * Since we cache the entire DISM segment, we want to
902 * set ppp to point to the first slot that corresponds
903 * to the requested addr, i.e. pg_idx.
905 *ppp
= &(sptd
->spt_ppa
[pg_idx
]);
909 mutex_enter(&sptd
->spt_lock
);
911 * try to find pages in segment page cache with mutex
913 pplist
= seg_plookup(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
914 S_WRITE
, SEGP_FORCE_WIRED
);
915 if (pplist
!= NULL
) {
916 ASSERT(sptd
->spt_ppa
!= NULL
);
917 ASSERT(sptd
->spt_ppa
== pplist
);
919 for (an_idx
= pg_idx
; an_idx
< pg_idx
+ npages
; ) {
920 if (ppa
[an_idx
] == NULL
) {
921 mutex_exit(&sptd
->spt_lock
);
922 seg_pinactive(seg
, NULL
, seg
->s_base
,
923 sptd
->spt_amp
->size
, ppa
,
924 S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
928 if ((szc
= ppa
[an_idx
]->p_szc
) != 0) {
929 npgs
= page_get_pagecnt(szc
);
930 an_idx
= P2ROUNDUP(an_idx
+ 1, npgs
);
936 * Since we cache the entire DISM segment, we want to
937 * set ppp to point to the first slot that corresponds
938 * to the requested addr, i.e. pg_idx.
940 mutex_exit(&sptd
->spt_lock
);
941 *ppp
= &(sptd
->spt_ppa
[pg_idx
]);
944 if (seg_pinsert_check(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
945 SEGP_FORCE_WIRED
) == SEGP_FAIL
) {
946 mutex_exit(&sptd
->spt_lock
);
952 * No need to worry about protections because DISM pages are always rw.
958 * Do we need to build the ppa array?
960 if (sptd
->spt_ppa
== NULL
) {
964 tot_npages
= btopr(sptd
->spt_amp
->size
);
966 ASSERT(sptd
->spt_pcachecnt
== 0);
967 pplist
= kmem_zalloc(sizeof (page_t
*) * tot_npages
, KM_SLEEP
);
970 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
971 for (an_idx
= 0; an_idx
< tot_npages
; ) {
972 ap
= anon_get_ptr(amp
->ahp
, an_idx
);
974 * Cache only mlocked pages. For large pages
975 * if one (constituent) page is mlocked
976 * all pages for that large page
977 * are cached also. This is for quick
978 * lookups of ppa array;
980 if ((ap
!= NULL
) && (lpg_cnt
!= 0 ||
981 (sptd
->spt_ppa_lckcnt
[an_idx
] != 0))) {
983 swap_xlate(ap
, &vp
, &off
);
984 pp
= page_lookup(vp
, off
, SE_SHARED
);
989 * For a small page, we are done --
990 * lpg_count is reset to 0 below.
992 * For a large page, we are guaranteed
993 * to find the anon structures of all
994 * constituent pages and a non-zero
995 * lpg_cnt ensures that we don't test
996 * for mlock for these. We are done
997 * when lpg_count reaches (npgs + 1).
998 * If we are not the first constituent
999 * page, restart at the first one.
1001 npgs
= page_get_pagecnt(pp
->p_szc
);
1002 if (!IS_P2ALIGNED(an_idx
, npgs
)) {
1003 an_idx
= P2ALIGN(an_idx
, npgs
);
1008 if (++lpg_cnt
> npgs
)
1012 * availrmem is decremented only
1013 * for unlocked pages
1015 if (sptd
->spt_ppa_lckcnt
[an_idx
] == 0)
1017 pplist
[an_idx
] = pp
;
1021 ANON_LOCK_EXIT(&
->a_rwlock
);
1023 if (claim_availrmem
) {
1024 mutex_enter(&freemem_lock
);
1025 if (availrmem
< tune
.t_minarmem
+ claim_availrmem
) {
1026 mutex_exit(&freemem_lock
);
1028 claim_availrmem
= 0;
1031 availrmem
-= claim_availrmem
;
1033 mutex_exit(&freemem_lock
);
1039 * We already have a valid ppa[].
1046 ret
= seg_pinsert(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1047 sptd
->spt_amp
->size
, pl
, S_WRITE
, SEGP_FORCE_WIRED
,
1049 if (ret
== SEGP_FAIL
) {
1051 * seg_pinsert failed. We return
1052 * ENOTSUP, so that the as_pagelock() code will
1053 * then try the slower F_SOFTLOCK path.
1057 * No one else has referenced the ppa[].
1058 * We created it and we need to destroy it.
1060 sptd
->spt_ppa
= NULL
;
1067 * In either case, we increment softlockcnt on the 'real' segment.
1069 sptd
->spt_pcachecnt
++;
1070 atomic_inc_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
1072 ppa
= sptd
->spt_ppa
;
1073 for (an_idx
= pg_idx
; an_idx
< pg_idx
+ npages
; ) {
1074 if (ppa
[an_idx
] == NULL
) {
1075 mutex_exit(&sptd
->spt_lock
);
1076 seg_pinactive(seg
, NULL
, seg
->s_base
,
1077 sptd
->spt_amp
->size
,
1078 pl
, S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
1082 if ((szc
= ppa
[an_idx
]->p_szc
) != 0) {
1083 npgs
= page_get_pagecnt(szc
);
1084 an_idx
= P2ROUNDUP(an_idx
+ 1, npgs
);
1090 * We can now drop the sptd->spt_lock since the ppa[]
1091 * exists and we have incremented pacachecnt.
1093 mutex_exit(&sptd
->spt_lock
);
1096 * Since we cache the entire segment, we want to
1097 * set ppp to point to the first slot that corresponds
1098 * to the requested addr, i.e. pg_idx.
1100 *ppp
= &(sptd
->spt_ppa
[pg_idx
]);
1105 * We will only reach this code if we tried and failed.
1107 * And we can drop the lock on the dummy seg, once we've failed
1108 * to set up a new ppa[].
1110 mutex_exit(&sptd
->spt_lock
);
1113 if (claim_availrmem
) {
1114 mutex_enter(&freemem_lock
);
1115 availrmem
+= claim_availrmem
;
1116 mutex_exit(&freemem_lock
);
1120 * We created pl and we need to destroy it.
1123 for (an_idx
= 0; an_idx
< tot_npages
; an_idx
++) {
1124 if (pplist
[an_idx
] != NULL
)
1125 page_unlock(pplist
[an_idx
]);
1127 kmem_free(pl
, sizeof (page_t
*) * tot_npages
);
1130 if (shmd
->shm_softlockcnt
<= 0) {
1131 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1132 mutex_enter(&seg
->s_as
->a_contents
);
1133 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1134 AS_CLRUNMAPWAIT(seg
->s_as
);
1135 cv_broadcast(&seg
->s_as
->a_cv
);
1137 mutex_exit(&seg
->s_as
->a_contents
);
1147 * return locked pages over a given range.
1149 * We will cache the entire ISM segment and save the pplist for the
1150 * entire segment in the ppa field of the underlying ISM segment structure.
1151 * Later, during a call to segspt_reclaim() we will use this ppa array
1152 * to page_unlock() all of the pages and then we will free this ppa list.
1156 segspt_shmpagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
1157 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
)
1159 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1160 struct seg
*sptseg
= shmd
->shm_sptseg
;
1161 struct spt_data
*sptd
= sptseg
->s_data
;
1162 pgcnt_t np
, page_index
, npages
;
1163 caddr_t a
, spt_base
;
1164 struct page
**pplist
, **pl
, *pp
;
1165 struct anon_map
*amp
;
1168 uint_t pl_built
= 0;
1173 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
1174 ASSERT(type
== L_PAGELOCK
|| type
== L_PAGEUNLOCK
);
1178 * We want to lock/unlock the entire ISM segment. Therefore,
1179 * we will be using the underlying sptseg and it's base address
1180 * and length for the caching arguments.
1185 if (sptd
->spt_flags
& SHM_PAGEABLE
) {
1186 return (segspt_dismpagelock(seg
, addr
, len
, ppp
, type
, rw
));
1189 page_index
= seg_page(seg
, addr
);
1190 npages
= btopr(len
);
1193 * check if the request is larger than number of pages covered
1196 if (page_index
+ npages
> btopr(sptd
->spt_amp
->size
)) {
1201 if (type
== L_PAGEUNLOCK
) {
1203 ASSERT(sptd
->spt_ppa
!= NULL
);
1205 seg_pinactive(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1206 sptd
->spt_ppa
, S_WRITE
, SEGP_FORCE_WIRED
, segspt_reclaim
);
1209 * If someone is blocked while unmapping, we purge
1210 * segment page cache and thus reclaim pplist synchronously
1211 * without waiting for seg_pasync_thread. This speeds up
1212 * unmapping in cases where munmap(2) is called, while
1213 * raw async i/o is still in progress or where a thread
1214 * exits on data fault in a multithreaded application.
1216 if (AS_ISUNMAPWAIT(seg
->s_as
) && (shmd
->shm_softlockcnt
> 0)) {
1222 /* The L_PAGELOCK case... */
1225 * First try to find pages in segment page cache, without
1226 * holding the segment lock.
1228 pplist
= seg_plookup(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1229 S_WRITE
, SEGP_FORCE_WIRED
);
1230 if (pplist
!= NULL
) {
1231 ASSERT(sptd
->spt_ppa
== pplist
);
1232 ASSERT(sptd
->spt_ppa
[page_index
]);
1234 * Since we cache the entire ISM segment, we want to
1235 * set ppp to point to the first slot that corresponds
1236 * to the requested addr, i.e. page_index.
1238 *ppp
= &(sptd
->spt_ppa
[page_index
]);
1242 mutex_enter(&sptd
->spt_lock
);
1245 * try to find pages in segment page cache
1247 pplist
= seg_plookup(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1248 S_WRITE
, SEGP_FORCE_WIRED
);
1249 if (pplist
!= NULL
) {
1250 ASSERT(sptd
->spt_ppa
== pplist
);
1252 * Since we cache the entire segment, we want to
1253 * set ppp to point to the first slot that corresponds
1254 * to the requested addr, i.e. page_index.
1256 mutex_exit(&sptd
->spt_lock
);
1257 *ppp
= &(sptd
->spt_ppa
[page_index
]);
1261 if (seg_pinsert_check(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1262 SEGP_FORCE_WIRED
) == SEGP_FAIL
) {
1263 mutex_exit(&sptd
->spt_lock
);
1269 * No need to worry about protections because ISM pages
1275 * Do we need to build the ppa array?
1277 if (sptd
->spt_ppa
== NULL
) {
1278 ASSERT(sptd
->spt_ppa
== pplist
);
1280 spt_base
= sptseg
->s_base
;
1284 * availrmem is decremented once during anon_swap_adjust()
1285 * and is incremented during the anon_unresv(), which is
1286 * called from shm_rm_amp() when the segment is destroyed.
1288 amp
= sptd
->spt_amp
;
1289 ASSERT(amp
!= NULL
);
1291 /* pcachecnt is protected by sptd->spt_lock */
1292 ASSERT(sptd
->spt_pcachecnt
== 0);
1293 pplist
= kmem_zalloc(sizeof (page_t
*)
1294 * btopr(sptd
->spt_amp
->size
), KM_SLEEP
);
1297 anon_index
= seg_page(sptseg
, spt_base
);
1299 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
1300 for (a
= spt_base
; a
< (spt_base
+ sptd
->spt_amp
->size
);
1301 a
+= PAGESIZE
, anon_index
++, pplist
++) {
1302 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
1304 swap_xlate(ap
, &vp
, &off
);
1305 pp
= page_lookup(vp
, off
, SE_SHARED
);
1309 ANON_LOCK_EXIT(&
->a_rwlock
);
1311 if (a
< (spt_base
+ sptd
->spt_amp
->size
)) {
1318 * We already have a valid ppa[].
1325 ret
= seg_pinsert(seg
, NULL
, seg
->s_base
, sptd
->spt_amp
->size
,
1326 sptd
->spt_amp
->size
, pl
, S_WRITE
, SEGP_FORCE_WIRED
,
1328 if (ret
== SEGP_FAIL
) {
1330 * seg_pinsert failed. We return
1331 * ENOTSUP, so that the as_pagelock() code will
1332 * then try the slower F_SOFTLOCK path.
1336 * No one else has referenced the ppa[].
1337 * We created it and we need to destroy it.
1339 sptd
->spt_ppa
= NULL
;
1346 * In either case, we increment softlockcnt on the 'real' segment.
1348 sptd
->spt_pcachecnt
++;
1349 atomic_inc_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
1352 * We can now drop the sptd->spt_lock since the ppa[]
1353 * exists and we have incremented pacachecnt.
1355 mutex_exit(&sptd
->spt_lock
);
1358 * Since we cache the entire segment, we want to
1359 * set ppp to point to the first slot that corresponds
1360 * to the requested addr, i.e. page_index.
1362 *ppp
= &(sptd
->spt_ppa
[page_index
]);
1367 * We will only reach this code if we tried and failed.
1369 * And we can drop the lock on the dummy seg, once we've failed
1370 * to set up a new ppa[].
1372 mutex_exit(&sptd
->spt_lock
);
1376 * We created pl and we need to destroy it.
1379 np
= (((uintptr_t)(a
- spt_base
)) >> PAGESHIFT
);
1381 page_unlock(*pplist
);
1385 kmem_free(pl
, sizeof (page_t
*) * btopr(sptd
->spt_amp
->size
));
1387 if (shmd
->shm_softlockcnt
<= 0) {
1388 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1389 mutex_enter(&seg
->s_as
->a_contents
);
1390 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1391 AS_CLRUNMAPWAIT(seg
->s_as
);
1392 cv_broadcast(&seg
->s_as
->a_cv
);
1394 mutex_exit(&seg
->s_as
->a_contents
);
1402 * purge any cached pages in the I/O page cache
1405 segspt_purge(struct seg
*seg
)
1407 seg_ppurge(seg
, NULL
, SEGP_FORCE_WIRED
);
1411 segspt_reclaim(void *ptag
, caddr_t addr
, size_t len
, struct page
**pplist
,
1412 enum seg_rw rw
, int async
)
1414 struct seg
*seg
= (struct seg
*)ptag
;
1415 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1417 struct spt_data
*sptd
;
1418 pgcnt_t npages
, i
, free_availrmem
= 0;
1421 sptseg
= shmd
->shm_sptseg
;
1422 sptd
= sptseg
->s_data
;
1423 npages
= (len
>> PAGESHIFT
);
1425 ASSERT(sptd
->spt_pcachecnt
!= 0);
1426 ASSERT(sptd
->spt_ppa
== pplist
);
1427 ASSERT(npages
== btopr(sptd
->spt_amp
->size
));
1428 ASSERT(async
|| AS_LOCK_HELD(seg
->s_as
));
1431 * Acquire the lock on the dummy seg and destroy the
1432 * ppa array IF this is the last pcachecnt.
1434 mutex_enter(&sptd
->spt_lock
);
1435 if (--sptd
->spt_pcachecnt
== 0) {
1436 for (i
= 0; i
< npages
; i
++) {
1437 if (pplist
[i
] == NULL
) {
1440 if (rw
== S_WRITE
) {
1441 hat_setrefmod(pplist
[i
]);
1443 hat_setref(pplist
[i
]);
1445 if ((sptd
->spt_flags
& SHM_PAGEABLE
) &&
1446 (sptd
->spt_ppa_lckcnt
[i
] == 0))
1448 page_unlock(pplist
[i
]);
1450 if ((sptd
->spt_flags
& SHM_PAGEABLE
) && free_availrmem
) {
1451 mutex_enter(&freemem_lock
);
1452 availrmem
+= free_availrmem
;
1453 mutex_exit(&freemem_lock
);
1456 * Since we want to cach/uncache the entire ISM segment,
1457 * we will track the pplist in a segspt specific field
1458 * ppa, that is initialized at the time we add an entry to
1461 ASSERT(sptd
->spt_pcachecnt
== 0);
1462 kmem_free(pplist
, sizeof (page_t
*) * npages
);
1463 sptd
->spt_ppa
= NULL
;
1464 sptd
->spt_flags
&= ~DISM_PPA_CHANGED
;
1466 cv_broadcast(&sptd
->spt_cv
);
1469 mutex_exit(&sptd
->spt_lock
);
1472 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1473 * may not hold AS lock (in this case async argument is not 0). This
1474 * means if softlockcnt drops to 0 after the decrement below address
1475 * space may get freed. We can't allow it since after softlock
1476 * derement to 0 we still need to access as structure for possible
1477 * wakeup of unmap waiters. To prevent the disappearance of as we take
1478 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1479 * this mutex as a barrier to make sure this routine completes before
1482 * The second complication we have to deal with in async case is a
1483 * possibility of missed wake up of unmap wait thread. When we don't
1484 * hold as lock here we may take a_contents lock before unmap wait
1485 * thread that was first to see softlockcnt was still not 0. As a
1486 * result we'll fail to wake up an unmap wait thread. To avoid this
1487 * race we set nounmapwait flag in as structure if we drop softlockcnt
1488 * to 0 if async is not 0. unmapwait thread
1489 * will not block if this flag is set.
1492 mutex_enter(&shmd
->shm_segfree_syncmtx
);
1495 * Now decrement softlockcnt.
1497 ASSERT(shmd
->shm_softlockcnt
> 0);
1498 atomic_dec_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
1500 if (shmd
->shm_softlockcnt
<= 0) {
1501 if (async
|| AS_ISUNMAPWAIT(seg
->s_as
)) {
1502 mutex_enter(&seg
->s_as
->a_contents
);
1504 AS_SETNOUNMAPWAIT(seg
->s_as
);
1505 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1506 AS_CLRUNMAPWAIT(seg
->s_as
);
1507 cv_broadcast(&seg
->s_as
->a_cv
);
1509 mutex_exit(&seg
->s_as
->a_contents
);
1514 mutex_exit(&shmd
->shm_segfree_syncmtx
);
1520 * Do a F_SOFTUNLOCK call over the range requested.
1521 * The range must have already been F_SOFTLOCK'ed.
1523 * The calls to acquire and release the anon map lock mutex were
1524 * removed in order to avoid a deadly embrace during a DR
1525 * memory delete operation. (Eg. DR blocks while waiting for a
1526 * exclusive lock on a page that is being used for kaio; the
1527 * thread that will complete the kaio and call segspt_softunlock
1528 * blocks on the anon map lock; another thread holding the anon
1529 * map lock blocks on another page lock via the segspt_shmfault
1530 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1532 * The appropriateness of the removal is based upon the following:
1533 * 1. If we are holding a segment's reader lock and the page is held
1534 * shared, then the corresponding element in anonmap which points to
1535 * anon struct cannot change and there is no need to acquire the
1536 * anonymous map lock.
1537 * 2. Threads in segspt_softunlock have a reader lock on the segment
1538 * and already have the shared page lock, so we are guaranteed that
1539 * the anon map slot cannot change and therefore can call anon_get_ptr()
1540 * without grabbing the anonymous map lock.
1541 * 3. Threads that softlock a shared page break copy-on-write, even if
1542 * its a read. Thus cow faults can be ignored with respect to soft
1543 * unlocking, since the breaking of cow means that the anon slot(s) will
1547 segspt_softunlock(struct seg
*seg
, caddr_t sptseg_addr
,
1548 size_t len
, enum seg_rw rw
)
1550 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1552 struct spt_data
*sptd
;
1558 struct anon_map
*amp
; /* XXX - for locknest */
1559 struct anon
*ap
= NULL
;
1562 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
1564 sptseg
= shmd
->shm_sptseg
;
1565 sptd
= sptseg
->s_data
;
1568 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1569 * and therefore their pages are SE_SHARED locked
1570 * for the entire life of the segment.
1572 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
)) &&
1573 ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0)) {
1574 goto softlock_decrement
;
1578 * Any thread is free to do a page_find and
1579 * page_unlock() on the pages within this seg.
1581 * We are already holding the as->a_lock on the user's
1582 * real segment, but we need to hold the a_lock on the
1583 * underlying dummy as. This is mostly to satisfy the
1584 * underlying HAT layer.
1586 AS_LOCK_ENTER(sptseg
->s_as
, RW_READER
);
1587 hat_unlock(sptseg
->s_as
->a_hat
, sptseg_addr
, len
);
1588 AS_LOCK_EXIT(sptseg
->s_as
);
1590 amp
= sptd
->spt_amp
;
1591 ASSERT(amp
!= NULL
);
1592 anon_index
= seg_page(sptseg
, sptseg_addr
);
1594 for (adr
= sptseg_addr
; adr
< sptseg_addr
+ len
; adr
+= PAGESIZE
) {
1595 ap
= anon_get_ptr(amp
->ahp
, anon_index
++);
1597 swap_xlate(ap
, &vp
, &offset
);
1600 * Use page_find() instead of page_lookup() to
1601 * find the page since we know that it has a
1604 pp
= page_find(vp
, offset
);
1605 ASSERT(ap
== anon_get_ptr(amp
->ahp
, anon_index
- 1));
1607 panic("segspt_softunlock: "
1608 "addr %p, ap %p, vp %p, off %llx",
1609 (void *)adr
, (void *)ap
, (void *)vp
, offset
);
1613 if (rw
== S_WRITE
) {
1615 } else if (rw
!= S_OTHER
) {
1622 npages
= btopr(len
);
1623 ASSERT(shmd
->shm_softlockcnt
>= npages
);
1624 atomic_add_long((ulong_t
*)(&(shmd
->shm_softlockcnt
)), -npages
);
1625 if (shmd
->shm_softlockcnt
== 0) {
1627 * All SOFTLOCKS are gone. Wakeup any waiting
1628 * unmappers so they can try again to unmap.
1629 * Check for waiters first without the mutex
1630 * held so we don't always grab the mutex on
1633 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1634 mutex_enter(&seg
->s_as
->a_contents
);
1635 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
1636 AS_CLRUNMAPWAIT(seg
->s_as
);
1637 cv_broadcast(&seg
->s_as
->a_cv
);
1639 mutex_exit(&seg
->s_as
->a_contents
);
1645 segspt_shmattach(struct seg
*seg
, caddr_t
*argsp
)
1647 struct shm_data
*shmd_arg
= (struct shm_data
*)argsp
;
1648 struct shm_data
*shmd
;
1649 struct anon_map
*shm_amp
= shmd_arg
->shm_amp
;
1650 struct spt_data
*sptd
;
1653 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1655 shmd
= kmem_zalloc((sizeof (*shmd
)), KM_NOSLEEP
);
1659 shmd
->shm_sptas
= shmd_arg
->shm_sptas
;
1660 shmd
->shm_amp
= shm_amp
;
1661 shmd
->shm_sptseg
= shmd_arg
->shm_sptseg
;
1663 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT
, shm_amp
, 0,
1664 NULL
, 0, seg
->s_size
);
1666 mutex_init(&shmd
->shm_segfree_syncmtx
, NULL
, MUTEX_DEFAULT
, NULL
);
1668 seg
->s_data
= (void *)shmd
;
1669 seg
->s_ops
= &segspt_shmops
;
1670 seg
->s_szc
= shmd
->shm_sptseg
->s_szc
;
1671 sptd
= shmd
->shm_sptseg
->s_data
;
1673 if (sptd
->spt_flags
& SHM_PAGEABLE
) {
1674 if ((shmd
->shm_vpage
= kmem_zalloc(btopr(shm_amp
->size
),
1675 KM_NOSLEEP
)) == NULL
) {
1677 kmem_free(shmd
, (sizeof (*shmd
)));
1680 shmd
->shm_lckpgs
= 0;
1681 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
)) {
1682 if ((error
= hat_share(seg
->s_as
->a_hat
, seg
->s_base
,
1683 shmd_arg
->shm_sptas
->a_hat
, SEGSPTADDR
,
1684 seg
->s_size
, seg
->s_szc
)) != 0) {
1685 kmem_free(shmd
->shm_vpage
,
1686 btopr(shm_amp
->size
));
1690 error
= hat_share(seg
->s_as
->a_hat
, seg
->s_base
,
1691 shmd_arg
->shm_sptas
->a_hat
, SEGSPTADDR
,
1692 seg
->s_size
, seg
->s_szc
);
1697 kmem_free(shmd
, (sizeof (*shmd
)));
1699 ANON_LOCK_ENTER(&shm_amp
->a_rwlock
, RW_WRITER
);
1701 ANON_LOCK_EXIT(&shm_amp
->a_rwlock
);
1707 segspt_shmunmap(struct seg
*seg
, caddr_t raddr
, size_t ssize
)
1709 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1712 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1714 if (shmd
->shm_softlockcnt
> 0) {
1723 if (ssize
!= seg
->s_size
) {
1725 cmn_err(CE_WARN
, "Incompatible ssize %lx s_size %lx\n",
1726 ssize
, seg
->s_size
);
1731 (void) segspt_shmlockop(seg
, raddr
, shmd
->shm_amp
->size
, 0, MC_UNLOCK
,
1733 hat_unshare(seg
->s_as
->a_hat
, raddr
, ssize
, seg
->s_szc
);
1741 segspt_shmfree(struct seg
*seg
)
1743 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1744 struct anon_map
*shm_amp
= shmd
->shm_amp
;
1746 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1748 (void) segspt_shmlockop(seg
, seg
->s_base
, shm_amp
->size
, 0,
1749 MC_UNLOCK
, NULL
, 0);
1752 * Need to increment refcnt when attaching
1753 * and decrement when detaching because of dup().
1755 ANON_LOCK_ENTER(&shm_amp
->a_rwlock
, RW_WRITER
);
1757 ANON_LOCK_EXIT(&shm_amp
->a_rwlock
);
1759 if (shmd
->shm_vpage
) { /* only for DISM */
1760 kmem_free(shmd
->shm_vpage
, btopr(shm_amp
->size
));
1761 shmd
->shm_vpage
= NULL
;
1765 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1766 * still working with this segment without holding as lock.
1768 ASSERT(shmd
->shm_softlockcnt
== 0);
1769 mutex_enter(&shmd
->shm_segfree_syncmtx
);
1770 mutex_destroy(&shmd
->shm_segfree_syncmtx
);
1772 kmem_free(shmd
, sizeof (*shmd
));
1777 segspt_shmsetprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t prot
)
1779 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
1782 * Shared page table is more than shared mapping.
1783 * Individual process sharing page tables can't change prot
1784 * because there is only one set of page tables.
1785 * This will be allowed after private page table is
1788 /* need to return correct status error? */
1794 segspt_dismfault(struct hat
*hat
, struct seg
*seg
, caddr_t addr
,
1795 size_t len
, enum fault_type type
, enum seg_rw rw
)
1797 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1798 struct seg
*sptseg
= shmd
->shm_sptseg
;
1799 struct as
*curspt
= shmd
->shm_sptas
;
1800 struct spt_data
*sptd
= sptseg
->s_data
;
1803 caddr_t segspt_addr
, shm_addr
;
1808 int dyn_ism_unmap
= hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
);
1814 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
1817 * Because of the way spt is implemented
1818 * the realsize of the segment does not have to be
1819 * equal to the segment size itself. The segment size is
1820 * often in multiples of a page size larger than PAGESIZE.
1821 * The realsize is rounded up to the nearest PAGESIZE
1822 * based on what the user requested. This is a bit of
1823 * ungliness that is historical but not easily fixed
1824 * without re-designing the higher levels of ISM.
1826 ASSERT(addr
>= seg
->s_base
);
1827 if (((addr
+ len
) - seg
->s_base
) > sptd
->spt_realsize
)
1830 * For all of the following cases except F_PROT, we need to
1831 * make any necessary adjustments to addr and len
1832 * and get all of the necessary page_t's into an array called ppa[].
1834 * The code in shmat() forces base addr and len of ISM segment
1835 * to be aligned to largest page size supported. Therefore,
1836 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1837 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1838 * in large pagesize chunks, or else we will screw up the HAT
1839 * layer by calling hat_memload_array() with differing page sizes
1840 * over a given virtual range.
1842 pgsz
= page_get_pagesize(sptseg
->s_szc
);
1843 pgcnt
= page_get_pagecnt(sptseg
->s_szc
);
1844 shm_addr
= (caddr_t
)P2ALIGN((uintptr_t)(addr
), pgsz
);
1845 size
= P2ROUNDUP((uintptr_t)(((addr
+ len
) - shm_addr
)), pgsz
);
1846 npages
= btopr(size
);
1849 * Now we need to convert from addr in segshm to addr in segspt.
1851 an_idx
= seg_page(seg
, shm_addr
);
1852 segspt_addr
= sptseg
->s_base
+ ptob(an_idx
);
1854 ASSERT((segspt_addr
+ ptob(npages
)) <=
1855 (sptseg
->s_base
+ sptd
->spt_realsize
));
1856 ASSERT(segspt_addr
< (sptseg
->s_base
+ sptseg
->s_size
));
1862 atomic_add_long((ulong_t
*)(&(shmd
->shm_softlockcnt
)), npages
);
1864 * Fall through to the F_INVAL case to load up the hat layer
1865 * entries with the HAT_LOAD_LOCK flag.
1870 if ((rw
== S_EXEC
) && !(sptd
->spt_prot
& PROT_EXEC
))
1873 ppa
= kmem_zalloc(npages
* sizeof (page_t
*), KM_SLEEP
);
1875 err
= spt_anon_getpages(sptseg
, segspt_addr
, size
, ppa
);
1877 if (type
== F_SOFTLOCK
) {
1878 atomic_add_long((ulong_t
*)(
1879 &(shmd
->shm_softlockcnt
)), -npages
);
1883 AS_LOCK_ENTER(sptseg
->s_as
, RW_READER
);
1886 if (type
== F_SOFTLOCK
) {
1889 * Load up the translation keeping it
1890 * locked and don't unlock the page.
1892 for (; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
1893 hat_memload_array(sptseg
->s_as
->a_hat
,
1894 a
, pgsz
, &ppa
[pidx
], sptd
->spt_prot
,
1895 HAT_LOAD_LOCK
| HAT_LOAD_SHARE
);
1899 * Migrate pages marked for migration
1901 if (lgrp_optimizations())
1902 page_migrate(seg
, shm_addr
, ppa
, npages
);
1904 for (; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
1905 hat_memload_array(sptseg
->s_as
->a_hat
,
1906 a
, pgsz
, &ppa
[pidx
],
1912 * And now drop the SE_SHARED lock(s).
1914 if (dyn_ism_unmap
) {
1915 for (i
= 0; i
< npages
; i
++) {
1916 page_unlock(ppa
[i
]);
1921 if (!dyn_ism_unmap
) {
1922 if (hat_share(seg
->s_as
->a_hat
, shm_addr
,
1923 curspt
->a_hat
, segspt_addr
, ptob(npages
),
1925 panic("hat_share err in DISM fault");
1928 if (type
== F_INVAL
) {
1929 for (i
= 0; i
< npages
; i
++) {
1930 page_unlock(ppa
[i
]);
1934 AS_LOCK_EXIT(sptseg
->s_as
);
1936 kmem_free(ppa
, npages
* sizeof (page_t
*));
1942 * This is a bit ugly, we pass in the real seg pointer,
1943 * but the segspt_addr is the virtual address within the
1946 segspt_softunlock(seg
, segspt_addr
, size
, rw
);
1952 * This takes care of the unusual case where a user
1953 * allocates a stack in shared memory and a register
1954 * window overflow is written to that stack page before
1955 * it is otherwise modified.
1957 * We can get away with this because ISM segments are
1958 * always rw. Other than this unusual case, there
1959 * should be no instances of protection violations.
1965 panic("segspt_dismfault default type?");
1974 segspt_shmfault(struct hat
*hat
, struct seg
*seg
, caddr_t addr
,
1975 size_t len
, enum fault_type type
, enum seg_rw rw
)
1977 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
1978 struct seg
*sptseg
= shmd
->shm_sptseg
;
1979 struct as
*curspt
= shmd
->shm_sptas
;
1980 struct spt_data
*sptd
= sptseg
->s_data
;
1983 caddr_t sptseg_addr
, shm_addr
;
1987 ulong_t anon_index
= 0;
1989 struct anon_map
*amp
; /* XXX - for locknest */
1990 struct anon
*ap
= NULL
;
1998 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2000 if (sptd
->spt_flags
& SHM_PAGEABLE
) {
2001 return (segspt_dismfault(hat
, seg
, addr
, len
, type
, rw
));
2005 * Because of the way spt is implemented
2006 * the realsize of the segment does not have to be
2007 * equal to the segment size itself. The segment size is
2008 * often in multiples of a page size larger than PAGESIZE.
2009 * The realsize is rounded up to the nearest PAGESIZE
2010 * based on what the user requested. This is a bit of
2011 * ungliness that is historical but not easily fixed
2012 * without re-designing the higher levels of ISM.
2014 ASSERT(addr
>= seg
->s_base
);
2015 if (((addr
+ len
) - seg
->s_base
) > sptd
->spt_realsize
)
2018 * For all of the following cases except F_PROT, we need to
2019 * make any necessary adjustments to addr and len
2020 * and get all of the necessary page_t's into an array called ppa[].
2022 * The code in shmat() forces base addr and len of ISM segment
2023 * to be aligned to largest page size supported. Therefore,
2024 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2025 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2026 * in large pagesize chunks, or else we will screw up the HAT
2027 * layer by calling hat_memload_array() with differing page sizes
2028 * over a given virtual range.
2030 pgsz
= page_get_pagesize(sptseg
->s_szc
);
2031 pgcnt
= page_get_pagecnt(sptseg
->s_szc
);
2032 shm_addr
= (caddr_t
)P2ALIGN((uintptr_t)(addr
), pgsz
);
2033 size
= P2ROUNDUP((uintptr_t)(((addr
+ len
) - shm_addr
)), pgsz
);
2034 npages
= btopr(size
);
2037 * Now we need to convert from addr in segshm to addr in segspt.
2039 anon_index
= seg_page(seg
, shm_addr
);
2040 sptseg_addr
= sptseg
->s_base
+ ptob(anon_index
);
2043 * And now we may have to adjust npages downward if we have
2044 * exceeded the realsize of the segment or initial anon
2047 if ((sptseg_addr
+ ptob(npages
)) >
2048 (sptseg
->s_base
+ sptd
->spt_realsize
))
2049 size
= (sptseg
->s_base
+ sptd
->spt_realsize
) - sptseg_addr
;
2051 npages
= btopr(size
);
2053 ASSERT(sptseg_addr
< (sptseg
->s_base
+ sptseg
->s_size
));
2054 ASSERT((sptd
->spt_flags
& SHM_PAGEABLE
) == 0);
2061 * availrmem is decremented once during anon_swap_adjust()
2062 * and is incremented during the anon_unresv(), which is
2063 * called from shm_rm_amp() when the segment is destroyed.
2065 atomic_add_long((ulong_t
*)(&(shmd
->shm_softlockcnt
)), npages
);
2067 * Some platforms assume that ISM pages are SE_SHARED
2068 * locked for the entire life of the segment.
2070 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
))
2073 * Fall through to the F_INVAL case to load up the hat layer
2074 * entries with the HAT_LOAD_LOCK flag.
2080 if ((rw
== S_EXEC
) && !(sptd
->spt_prot
& PROT_EXEC
))
2084 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2085 * may still rely on this call to hat_share(). That
2086 * would imply that those hat's can fault on a
2087 * HAT_LOAD_LOCK translation, which would seem
2090 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
)) {
2091 if (hat_share(seg
->s_as
->a_hat
, seg
->s_base
,
2092 curspt
->a_hat
, sptseg
->s_base
,
2093 sptseg
->s_size
, sptseg
->s_szc
) != 0) {
2094 panic("hat_share error in ISM fault");
2099 ppa
= kmem_zalloc(sizeof (page_t
*) * npages
, KM_SLEEP
);
2102 * I see no need to lock the real seg,
2103 * here, because all of our work will be on the underlying
2106 * sptseg_addr and npages now account for large pages.
2108 amp
= sptd
->spt_amp
;
2109 ASSERT(amp
!= NULL
);
2110 anon_index
= seg_page(sptseg
, sptseg_addr
);
2112 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2113 for (i
= 0; i
< npages
; i
++) {
2114 ap
= anon_get_ptr(amp
->ahp
, anon_index
++);
2116 swap_xlate(ap
, &vp
, &offset
);
2117 pp
= page_lookup(vp
, offset
, SE_SHARED
);
2121 ANON_LOCK_EXIT(&
->a_rwlock
);
2122 ASSERT(i
== npages
);
2125 * We are already holding the as->a_lock on the user's
2126 * real segment, but we need to hold the a_lock on the
2127 * underlying dummy as. This is mostly to satisfy the
2128 * underlying HAT layer.
2130 AS_LOCK_ENTER(sptseg
->s_as
, RW_READER
);
2133 if (type
== F_SOFTLOCK
) {
2135 * Load up the translation keeping it
2136 * locked and don't unlock the page.
2138 for (; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
2139 sz
= MIN(pgsz
, ptob(npages
- pidx
));
2140 hat_memload_array(sptseg
->s_as
->a_hat
, a
,
2141 sz
, &ppa
[pidx
], sptd
->spt_prot
,
2142 HAT_LOAD_LOCK
| HAT_LOAD_SHARE
);
2146 * Migrate pages marked for migration.
2148 if (lgrp_optimizations())
2149 page_migrate(seg
, shm_addr
, ppa
, npages
);
2151 for (; pidx
< npages
; a
+= pgsz
, pidx
+= pgcnt
) {
2152 sz
= MIN(pgsz
, ptob(npages
- pidx
));
2153 hat_memload_array(sptseg
->s_as
->a_hat
,
2155 sptd
->spt_prot
, HAT_LOAD_SHARE
);
2159 * And now drop the SE_SHARED lock(s).
2161 for (i
= 0; i
< npages
; i
++)
2162 page_unlock(ppa
[i
]);
2164 AS_LOCK_EXIT(sptseg
->s_as
);
2166 kmem_free(ppa
, sizeof (page_t
*) * npages
);
2171 * This is a bit ugly, we pass in the real seg pointer,
2172 * but the sptseg_addr is the virtual address within the
2175 segspt_softunlock(seg
, sptseg_addr
, ptob(npages
), rw
);
2181 * This takes care of the unusual case where a user
2182 * allocates a stack in shared memory and a register
2183 * window overflow is written to that stack page before
2184 * it is otherwise modified.
2186 * We can get away with this because ISM segments are
2187 * always rw. Other than this unusual case, there
2188 * should be no instances of protection violations.
2194 cmn_err(CE_WARN
, "segspt_shmfault default type?");
2202 segspt_shmfaulta(struct seg
*seg
, caddr_t addr
)
2209 segspt_shmkluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
)
2215 * duplicate the shared page tables
2218 segspt_shmdup(struct seg
*seg
, struct seg
*newseg
)
2220 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2221 struct anon_map
*amp
= shmd
->shm_amp
;
2222 struct shm_data
*shmd_new
;
2223 struct seg
*spt_seg
= shmd
->shm_sptseg
;
2224 struct spt_data
*sptd
= spt_seg
->s_data
;
2227 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
2229 shmd_new
= kmem_zalloc((sizeof (*shmd_new
)), KM_SLEEP
);
2230 newseg
->s_data
= (void *)shmd_new
;
2231 shmd_new
->shm_sptas
= shmd
->shm_sptas
;
2232 shmd_new
->shm_amp
= amp
;
2233 shmd_new
->shm_sptseg
= shmd
->shm_sptseg
;
2234 newseg
->s_ops
= &segspt_shmops
;
2235 newseg
->s_szc
= seg
->s_szc
;
2236 ASSERT(seg
->s_szc
== shmd
->shm_sptseg
->s_szc
);
2238 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2240 ANON_LOCK_EXIT(&
->a_rwlock
);
2242 if (sptd
->spt_flags
& SHM_PAGEABLE
) {
2243 shmd_new
->shm_vpage
= kmem_zalloc(btopr(amp
->size
), KM_SLEEP
);
2244 shmd_new
->shm_lckpgs
= 0;
2245 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP
, NULL
)) {
2246 if ((error
= hat_share(newseg
->s_as
->a_hat
,
2247 newseg
->s_base
, shmd
->shm_sptas
->a_hat
, SEGSPTADDR
,
2248 seg
->s_size
, seg
->s_szc
)) != 0) {
2249 kmem_free(shmd_new
->shm_vpage
,
2255 return (hat_share(newseg
->s_as
->a_hat
, newseg
->s_base
,
2256 shmd
->shm_sptas
->a_hat
, SEGSPTADDR
, seg
->s_size
,
2264 segspt_shmcheckprot(struct seg
*seg
, caddr_t addr
, size_t size
, uint_t prot
)
2266 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2267 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2269 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2272 * ISM segment is always rw.
2274 return (((sptd
->spt_prot
& prot
) != prot
) ? EACCES
: 0);
2278 * Return an array of locked large pages, for empty slots allocate
2279 * private zero-filled anon pages.
2288 struct spt_data
*sptd
= sptseg
->s_data
;
2289 struct anon_map
*amp
= sptd
->spt_amp
;
2290 enum seg_rw rw
= sptd
->spt_prot
;
2291 uint_t szc
= sptseg
->s_szc
;
2292 size_t pg_sz
, share_sz
= page_get_pagesize(szc
);
2294 caddr_t lp_addr
, e_sptaddr
;
2295 uint_t vpprot
, ppa_szc
= 0;
2296 struct vpage
*vpage
= NULL
;
2300 anon_sync_obj_t cookie
;
2301 int anon_locked
= 0;
2305 ASSERT(IS_P2ALIGNED(sptaddr
, share_sz
) && IS_P2ALIGNED(len
, share_sz
));
2309 lp_npgs
= btop(pg_sz
);
2311 e_sptaddr
= sptaddr
+ len
;
2312 an_idx
= seg_page(sptseg
, sptaddr
);
2315 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2317 amp_pgs
= page_get_pagecnt(amp
->a_szc
);
2321 for (; lp_addr
< e_sptaddr
;
2322 an_idx
+= lp_npgs
, lp_addr
+= pg_sz
, ppa_idx
+= lp_npgs
) {
2325 * If we're currently locked, and we get to a new
2326 * page, unlock our current anon chunk.
2328 if (anon_locked
&& P2PHASE(an_idx
, amp_pgs
) == 0) {
2329 anon_array_exit(&cookie
);
2333 anon_array_enter(amp
, an_idx
, &cookie
);
2336 ppa_szc
= (uint_t
)-1;
2337 ierr
= anon_map_getpages(amp
, an_idx
, szc
, sptseg
,
2338 lp_addr
, sptd
->spt_prot
, &vpprot
, &ppa
[ppa_idx
],
2339 &ppa_szc
, vpage
, rw
, 0, segvn_anypgsz
, 0, kcred
);
2343 err
= FC_MAKE_ERR(ierr
);
2349 if (lp_addr
== e_sptaddr
) {
2352 ASSERT(lp_addr
< e_sptaddr
);
2355 * ierr == -1 means we failed to allocate a large page.
2356 * so do a size down operation.
2358 * ierr == -2 means some other process that privately shares
2359 * pages with this process has allocated a larger page and we
2360 * need to retry with larger pages. So do a size up
2361 * operation. This relies on the fact that large pages are
2362 * never partially shared i.e. if we share any constituent
2363 * page of a large page with another process we must share the
2364 * entire large page. Note this cannot happen for SOFTLOCK
2365 * case, unless current address (lpaddr) is at the beginning
2366 * of the next page size boundary because the other process
2367 * couldn't have relocated locked pages.
2369 ASSERT(ierr
== -1 || ierr
== -2);
2370 if (segvn_anypgsz
) {
2371 ASSERT(ierr
== -2 || szc
!= 0);
2372 ASSERT(ierr
== -1 || szc
< sptseg
->s_szc
);
2373 szc
= (ierr
== -1) ? szc
- 1 : szc
+ 1;
2376 * For faults and segvn_anypgsz == 0
2377 * we need to be careful not to loop forever
2378 * if existing page is found with szc other
2379 * than 0 or seg->s_szc. This could be due
2380 * to page relocations on behalf of DR or
2381 * more likely large page creation. For this
2382 * case simply re-size to existing page's szc
2383 * if returned by anon_map_getpages().
2385 if (ppa_szc
== (uint_t
)-1) {
2386 szc
= (ierr
== -1) ? 0 : sptseg
->s_szc
;
2388 ASSERT(ppa_szc
<= sptseg
->s_szc
);
2389 ASSERT(ierr
== -2 || ppa_szc
< szc
);
2390 ASSERT(ierr
== -1 || ppa_szc
> szc
);
2394 pg_sz
= page_get_pagesize(szc
);
2395 lp_npgs
= btop(pg_sz
);
2396 ASSERT(IS_P2ALIGNED(lp_addr
, pg_sz
));
2399 anon_array_exit(&cookie
);
2401 ANON_LOCK_EXIT(&
->a_rwlock
);
2406 anon_array_exit(&cookie
);
2408 ANON_LOCK_EXIT(&
->a_rwlock
);
2409 for (j
= 0; j
< ppa_idx
; j
++)
2410 page_unlock(ppa
[j
]);
2415 * count the number of bytes in a set of spt pages that are currently not
2419 spt_unlockedbytes(pgcnt_t npages
, page_t
**ppa
)
2422 rctl_qty_t unlocked
= 0;
2424 for (i
= 0; i
< npages
; i
++) {
2425 if (ppa
[i
]->p_lckcnt
== 0)
2426 unlocked
+= PAGESIZE
;
2431 extern u_longlong_t
randtick(void);
2432 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2433 #define NLCK (NCPU_P2)
2434 /* Random number with a range [0, n-1], n must be power of two */
2435 #define RAND_P2(n) \
2436 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2439 spt_lockpages(struct seg
*seg
, pgcnt_t anon_index
, pgcnt_t npages
,
2440 page_t
**ppa
, ulong_t
*lockmap
, size_t pos
,
2443 struct shm_data
*shmd
= seg
->s_data
;
2444 struct spt_data
*sptd
= shmd
->shm_sptseg
->s_data
;
2449 int use_reserved
= 1;
2451 /* return the number of bytes actually locked */
2455 * To avoid contention on freemem_lock, availrmem and pages_locked
2456 * global counters are updated only every nlck locked pages instead of
2457 * every time. Reserve nlck locks up front and deduct from this
2458 * reservation for each page that requires a lock. When the reservation
2459 * is consumed, reserve again. nlck is randomized, so the competing
2460 * threads do not fall into a cyclic lock contention pattern. When
2461 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2462 * is used to lock pages.
2464 for (i
= 0; i
< npages
; anon_index
++, pos
++, i
++) {
2465 if (nlck
== 0 && use_reserved
== 1) {
2466 nlck
= NLCK
+ RAND_P2(NLCK
);
2467 /* if fewer loops left, decrease nlck */
2468 nlck
= MIN(nlck
, npages
- i
);
2470 * Reserve nlck locks up front and deduct from this
2471 * reservation for each page that requires a lock. When
2472 * the reservation is consumed, reserve again.
2474 mutex_enter(&freemem_lock
);
2475 if ((availrmem
- nlck
) < pages_pp_maximum
) {
2476 /* Do not do advance memory reserves */
2480 pages_locked
+= nlck
;
2482 mutex_exit(&freemem_lock
);
2484 if (!(shmd
->shm_vpage
[anon_index
] & DISM_PG_LOCKED
)) {
2485 if (sptd
->spt_ppa_lckcnt
[anon_index
] <
2486 (ushort_t
)DISM_LOCK_MAX
) {
2487 if (++sptd
->spt_ppa_lckcnt
[anon_index
] ==
2488 (ushort_t
)DISM_LOCK_MAX
) {
2490 "DISM page lock limit "
2491 "reached on DISM offset 0x%lx\n",
2492 anon_index
<< PAGESHIFT
);
2494 kernel
= (sptd
->spt_ppa
&&
2495 sptd
->spt_ppa
[anon_index
]);
2496 if (!page_pp_lock(ppa
[i
], 0, kernel
||
2498 sptd
->spt_ppa_lckcnt
[anon_index
]--;
2502 /* if this is a newly locked page, count it */
2503 if (ppa
[i
]->p_lckcnt
== 1) {
2504 if (kernel
== 0 && use_reserved
== 1)
2506 *locked
+= PAGESIZE
;
2509 shmd
->shm_vpage
[anon_index
] |= DISM_PG_LOCKED
;
2510 if (lockmap
!= NULL
)
2511 BT_SET(lockmap
, pos
);
2515 /* Return unused lock reservation */
2516 if (nlck
!= 0 && use_reserved
== 1) {
2517 mutex_enter(&freemem_lock
);
2519 pages_locked
-= nlck
;
2520 mutex_exit(&freemem_lock
);
2527 spt_unlockpages(struct seg
*seg
, pgcnt_t anon_index
, pgcnt_t npages
,
2528 rctl_qty_t
*unlocked
)
2530 struct shm_data
*shmd
= seg
->s_data
;
2531 struct spt_data
*sptd
= shmd
->shm_sptseg
->s_data
;
2532 struct anon_map
*amp
= sptd
->spt_amp
;
2538 anon_sync_obj_t cookie
;
2541 pgcnt_t nlck_limit
= NLCK
;
2543 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2544 for (i
= 0; i
< npages
; i
++, anon_index
++) {
2545 if (shmd
->shm_vpage
[anon_index
] & DISM_PG_LOCKED
) {
2546 anon_array_enter(amp
, anon_index
, &cookie
);
2547 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
2550 swap_xlate(ap
, &vp
, &off
);
2551 anon_array_exit(&cookie
);
2552 pp
= page_lookup(vp
, off
, SE_SHARED
);
2555 * availrmem is decremented only for pages which are not
2556 * in seg pcache, for pages in seg pcache availrmem was
2557 * decremented in _dismpagelock()
2559 kernel
= (sptd
->spt_ppa
&& sptd
->spt_ppa
[anon_index
]);
2560 ASSERT(pp
->p_lckcnt
> 0);
2563 * lock page but do not change availrmem, we do it
2564 * ourselves every nlck loops.
2566 page_pp_unlock(pp
, 0, 1);
2567 if (pp
->p_lckcnt
== 0) {
2570 *unlocked
+= PAGESIZE
;
2573 shmd
->shm_vpage
[anon_index
] &= ~DISM_PG_LOCKED
;
2574 sptd
->spt_ppa_lckcnt
[anon_index
]--;
2579 * To reduce freemem_lock contention, do not update availrmem
2580 * until at least NLCK pages have been unlocked.
2581 * 1. No need to update if nlck is zero
2582 * 2. Always update if the last iteration
2584 if (nlck
> 0 && (nlck
== nlck_limit
|| i
== npages
- 1)) {
2585 mutex_enter(&freemem_lock
);
2587 pages_locked
-= nlck
;
2588 mutex_exit(&freemem_lock
);
2590 nlck_limit
= NLCK
+ RAND_P2(NLCK
);
2593 ANON_LOCK_EXIT(&
->a_rwlock
);
2600 segspt_shmlockop(struct seg
*seg
, caddr_t addr
, size_t len
,
2601 int attr
, int op
, ulong_t
*lockmap
, size_t pos
)
2603 struct shm_data
*shmd
= seg
->s_data
;
2604 struct seg
*sptseg
= shmd
->shm_sptseg
;
2605 struct spt_data
*sptd
= sptseg
->s_data
;
2606 struct kshmid
*sp
= sptd
->spt_amp
->a_sp
;
2607 pgcnt_t npages
, a_npages
;
2609 pgcnt_t an_idx
, a_an_idx
, ppa_idx
;
2610 caddr_t spt_addr
, a_addr
; /* spt and aligned address */
2611 size_t a_len
; /* aligned len */
2615 rctl_qty_t unlocked
= 0;
2616 rctl_qty_t locked
= 0;
2617 struct proc
*p
= curproc
;
2620 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2623 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0) {
2627 addr
= (caddr_t
)((uintptr_t)addr
& (uintptr_t)PAGEMASK
);
2628 an_idx
= seg_page(seg
, addr
);
2629 npages
= btopr(len
);
2631 if (an_idx
+ npages
> btopr(shmd
->shm_amp
->size
)) {
2636 * A shm's project never changes, so no lock needed.
2637 * The shm has a hold on the project, so it will not go away.
2638 * Since we have a mapping to shm within this zone, we know
2639 * that the zone will not go away.
2641 proj
= sp
->shm_perm
.ipc_proj
;
2643 if (op
== MC_LOCK
) {
2646 * Need to align addr and size request if they are not
2647 * aligned so we can always allocate large page(s) however
2648 * we only lock what was requested in initial request.
2650 share_sz
= page_get_pagesize(sptseg
->s_szc
);
2651 a_addr
= (caddr_t
)P2ALIGN((uintptr_t)(addr
), share_sz
);
2652 a_len
= P2ROUNDUP((uintptr_t)(((addr
+ len
) - a_addr
)),
2654 a_npages
= btop(a_len
);
2655 a_an_idx
= seg_page(seg
, a_addr
);
2656 spt_addr
= sptseg
->s_base
+ ptob(a_an_idx
);
2657 ppa_idx
= an_idx
- a_an_idx
;
2659 if ((ppa
= kmem_zalloc(((sizeof (page_t
*)) * a_npages
),
2660 KM_NOSLEEP
)) == NULL
) {
2665 * Don't cache any new pages for IO and
2666 * flush any cached pages.
2668 mutex_enter(&sptd
->spt_lock
);
2669 if (sptd
->spt_ppa
!= NULL
)
2670 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2672 sts
= spt_anon_getpages(sptseg
, spt_addr
, a_len
, ppa
);
2674 mutex_exit(&sptd
->spt_lock
);
2675 kmem_free(ppa
, ((sizeof (page_t
*)) * a_npages
));
2679 mutex_enter(&sp
->shm_mlock
);
2680 /* enforce locked memory rctl */
2681 unlocked
= spt_unlockedbytes(npages
, &ppa
[ppa_idx
]);
2683 mutex_enter(&p
->p_lock
);
2684 if (rctl_incr_locked_mem(p
, proj
, unlocked
, 0)) {
2685 mutex_exit(&p
->p_lock
);
2688 mutex_exit(&p
->p_lock
);
2689 sts
= spt_lockpages(seg
, an_idx
, npages
,
2690 &ppa
[ppa_idx
], lockmap
, pos
, &locked
);
2693 * correct locked count if not all pages could be
2696 if ((unlocked
- locked
) > 0) {
2697 rctl_decr_locked_mem(NULL
, proj
,
2698 (unlocked
- locked
), 0);
2704 for (i
= 0; i
< a_npages
; i
++)
2705 page_unlock(ppa
[i
]);
2706 if (sptd
->spt_ppa
!= NULL
)
2707 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2708 mutex_exit(&sp
->shm_mlock
);
2709 mutex_exit(&sptd
->spt_lock
);
2711 kmem_free(ppa
, ((sizeof (page_t
*)) * a_npages
));
2713 } else if (op
== MC_UNLOCK
) { /* unlock */
2716 mutex_enter(&sptd
->spt_lock
);
2717 if (shmd
->shm_lckpgs
== 0) {
2718 mutex_exit(&sptd
->spt_lock
);
2722 * Don't cache new IO pages.
2724 if (sptd
->spt_ppa
!= NULL
)
2725 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2727 mutex_enter(&sp
->shm_mlock
);
2728 sts
= spt_unlockpages(seg
, an_idx
, npages
, &unlocked
);
2729 if ((ppa
= sptd
->spt_ppa
) != NULL
)
2730 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2731 mutex_exit(&sptd
->spt_lock
);
2733 rctl_decr_locked_mem(NULL
, proj
, unlocked
, 0);
2734 mutex_exit(&sp
->shm_mlock
);
2737 seg_ppurge_wiredpp(ppa
);
2744 segspt_shmgetprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t
*protv
)
2746 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2747 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2748 spgcnt_t pgno
= seg_page(seg
, addr
+len
) - seg_page(seg
, addr
) + 1;
2750 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2753 * ISM segment is always rw.
2756 *protv
++ = sptd
->spt_prot
;
2762 segspt_shmgetoffset(struct seg
*seg
, caddr_t addr
)
2764 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2766 /* Offset does not matter in ISM memory */
2773 segspt_shmgettype(struct seg
*seg
, caddr_t addr
)
2775 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2776 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2778 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2781 * The shared memory mapping is always MAP_SHARED, SWAP is only
2784 return (MAP_SHARED
|
2785 ((sptd
->spt_flags
& SHM_PAGEABLE
) ? 0 : MAP_NORESERVE
));
2790 segspt_shmgetvp(struct seg
*seg
, caddr_t addr
, struct vnode
**vpp
)
2792 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2793 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2795 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2797 *vpp
= sptd
->spt_vp
;
2802 * We need to wait for pending IO to complete to a DISM segment in order for
2803 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2804 * than enough time to wait.
2806 static clock_t spt_pcache_wait
= 120;
2810 segspt_shmadvise(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t behav
)
2812 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2813 struct spt_data
*sptd
= (struct spt_data
*)shmd
->shm_sptseg
->s_data
;
2814 struct anon_map
*amp
;
2821 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2823 if (behav
== MADV_FREE
|| behav
== MADV_PURGE
) {
2824 if ((sptd
->spt_flags
& SHM_PAGEABLE
) == 0)
2827 amp
= sptd
->spt_amp
;
2828 pg_idx
= seg_page(seg
, addr
);
2830 mutex_enter(&sptd
->spt_lock
);
2831 if ((ppa
= sptd
->spt_ppa
) == NULL
) {
2832 mutex_exit(&sptd
->spt_lock
);
2833 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2834 (void) anon_disclaim(amp
, pg_idx
, len
, behav
, NULL
);
2835 ANON_LOCK_EXIT(&
->a_rwlock
);
2839 sptd
->spt_flags
|= DISM_PPA_CHANGED
;
2840 gen
= sptd
->spt_gen
;
2842 mutex_exit(&sptd
->spt_lock
);
2845 * Purge all DISM cached pages
2847 seg_ppurge_wiredpp(ppa
);
2850 * Drop the AS_LOCK so that other threads can grab it
2851 * in the as_pageunlock path and hopefully get the segment
2852 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2853 * to keep this segment resident.
2855 writer
= AS_WRITE_HELD(seg
->s_as
);
2856 atomic_inc_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
2857 AS_LOCK_EXIT(seg
->s_as
);
2859 mutex_enter(&sptd
->spt_lock
);
2861 end_lbolt
= ddi_get_lbolt() + (hz
* spt_pcache_wait
);
2864 * Try to wait for pages to get kicked out of the seg_pcache.
2866 while (sptd
->spt_gen
== gen
&&
2867 (sptd
->spt_flags
& DISM_PPA_CHANGED
) &&
2868 ddi_get_lbolt() < end_lbolt
) {
2869 if (!cv_timedwait_sig(&sptd
->spt_cv
,
2870 &sptd
->spt_lock
, end_lbolt
)) {
2875 mutex_exit(&sptd
->spt_lock
);
2877 /* Regrab the AS_LOCK and release our hold on the segment */
2878 AS_LOCK_ENTER(seg
->s_as
, writer
? RW_WRITER
: RW_READER
);
2879 atomic_dec_ulong((ulong_t
*)(&(shmd
->shm_softlockcnt
)));
2880 if (shmd
->shm_softlockcnt
<= 0) {
2881 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2882 mutex_enter(&seg
->s_as
->a_contents
);
2883 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2884 AS_CLRUNMAPWAIT(seg
->s_as
);
2885 cv_broadcast(&seg
->s_as
->a_cv
);
2887 mutex_exit(&seg
->s_as
->a_contents
);
2891 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2892 (void) anon_disclaim(amp
, pg_idx
, len
, behav
, NULL
);
2893 ANON_LOCK_EXIT(&
->a_rwlock
);
2894 } else if (lgrp_optimizations() && (behav
== MADV_ACCESS_LWP
||
2895 behav
== MADV_ACCESS_MANY
|| behav
== MADV_ACCESS_DEFAULT
)) {
2898 lgrp_mem_policy_t policy
;
2902 struct seg
*sptseg
= shmd
->shm_sptseg
;
2903 caddr_t sptseg_addr
;
2906 * Align address and length to page size of underlying segment
2908 share_size
= page_get_pagesize(shmd
->shm_sptseg
->s_szc
);
2909 shm_addr
= (caddr_t
)P2ALIGN((uintptr_t)(addr
), share_size
);
2910 size
= P2ROUNDUP((uintptr_t)(((addr
+ len
) - shm_addr
)),
2913 amp
= shmd
->shm_amp
;
2914 anon_index
= seg_page(seg
, shm_addr
);
2917 * And now we may have to adjust size downward if we have
2918 * exceeded the realsize of the segment or initial anon
2921 sptseg_addr
= sptseg
->s_base
+ ptob(anon_index
);
2922 if ((sptseg_addr
+ size
) >
2923 (sptseg
->s_base
+ sptd
->spt_realsize
))
2924 size
= (sptseg
->s_base
+ sptd
->spt_realsize
) -
2928 * Set memory allocation policy for this segment
2930 policy
= lgrp_madv_to_policy(behav
, len
, MAP_SHARED
);
2931 already_set
= lgrp_shm_policy_set(policy
, amp
, anon_index
,
2935 * If random memory allocation policy set already,
2936 * don't bother reapplying it.
2938 if (already_set
&& !LGRP_MEM_POLICY_REAPPLICABLE(policy
))
2942 * Mark any existing pages in the given range for
2943 * migration, flushing the I/O page cache, and using
2944 * underlying segment to calculate anon index and get
2945 * anonmap and vnode pointer from
2947 if (shmd
->shm_softlockcnt
> 0)
2950 page_mark_migrate(seg
, shm_addr
, size
, amp
, 0, NULL
, 0, 0);
2957 * get a memory ID for an addr in a given segment
2960 segspt_shmgetmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
)
2962 struct shm_data
*shmd
= (struct shm_data
*)seg
->s_data
;
2965 struct anon_map
*amp
= shmd
->shm_amp
;
2966 struct spt_data
*sptd
= shmd
->shm_sptseg
->s_data
;
2967 struct seg
*sptseg
= shmd
->shm_sptseg
;
2968 anon_sync_obj_t cookie
;
2970 anon_index
= seg_page(seg
, addr
);
2972 if (addr
> (seg
->s_base
+ sptd
->spt_realsize
)) {
2976 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2977 anon_array_enter(amp
, anon_index
, &cookie
);
2978 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
2981 caddr_t spt_addr
= sptseg
->s_base
+ ptob(anon_index
);
2983 pp
= anon_zero(sptseg
, spt_addr
, &ap
, kcred
);
2985 anon_array_exit(&cookie
);
2986 ANON_LOCK_EXIT(&
->a_rwlock
);
2989 (void) anon_set_ptr(amp
->ahp
, anon_index
, ap
, ANON_SLEEP
);
2992 anon_array_exit(&cookie
);
2993 ANON_LOCK_EXIT(&
->a_rwlock
);
2994 memidp
->val
[0] = (uintptr_t)ap
;
2995 memidp
->val
[1] = (uintptr_t)addr
& PAGEOFFSET
;
3000 * Get memory allocation policy info for specified address in given segment
3002 static lgrp_mem_policy_info_t
*
3003 segspt_shmgetpolicy(struct seg
*seg
, caddr_t addr
)
3005 struct anon_map
*amp
;
3007 lgrp_mem_policy_info_t
*policy_info
;
3008 struct shm_data
*shm_data
;
3010 ASSERT(seg
!= NULL
);
3013 * Get anon_map from segshm
3015 * Assume that no lock needs to be held on anon_map, since
3016 * it should be protected by its reference count which must be
3017 * nonzero for an existing segment
3018 * Need to grab readers lock on policy tree though
3020 shm_data
= (struct shm_data
*)seg
->s_data
;
3021 if (shm_data
== NULL
)
3023 amp
= shm_data
->shm_amp
;
3024 ASSERT(amp
->refcnt
!= 0);
3029 * Assume starting anon index of 0
3031 anon_index
= seg_page(seg
, addr
);
3032 policy_info
= lgrp_shm_policy_get(amp
, anon_index
, NULL
, 0);
3034 return (policy_info
);