6345 remove xhat support
[illumos-gate.git] / usr / src / uts / common / vm / seg_spt.c
blob1573e1726b72e6827db1c30f5996bba6698324a9
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/param.h>
26 #include <sys/user.h>
27 #include <sys/mman.h>
28 #include <sys/kmem.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cmn_err.h>
31 #include <sys/systm.h>
32 #include <sys/tuneable.h>
33 #include <vm/hat.h>
34 #include <vm/seg.h>
35 #include <vm/as.h>
36 #include <vm/anon.h>
37 #include <vm/page.h>
38 #include <sys/buf.h>
39 #include <sys/swap.h>
40 #include <sys/atomic.h>
41 #include <vm/seg_spt.h>
42 #include <sys/debug.h>
43 #include <sys/vtrace.h>
44 #include <sys/shm.h>
45 #include <sys/shm_impl.h>
46 #include <sys/lgrp.h>
47 #include <sys/vmsystm.h>
48 #include <sys/policy.h>
49 #include <sys/project.h>
50 #include <sys/tnf_probe.h>
51 #include <sys/zone.h>
53 #define SEGSPTADDR (caddr_t)0x0
56 * # pages used for spt
58 size_t spt_used;
61 * segspt_minfree is the memory left for system after ISM
62 * locked its pages; it is set up to 5% of availrmem in
63 * sptcreate when ISM is created. ISM should not use more
64 * than ~90% of availrmem; if it does, then the performance
65 * of the system may decrease. Machines with large memories may
66 * be able to use up more memory for ISM so we set the default
67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 * If somebody wants even more memory for ISM (risking hanging
69 * the system) they can patch the segspt_minfree to smaller number.
71 pgcnt_t segspt_minfree = 0;
73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 static void segspt_free(struct seg *seg);
76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
79 static void
80 segspt_badop()
82 panic("segspt_badop called");
83 /*NOTREACHED*/
86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
88 struct seg_ops segspt_ops = {
89 SEGSPT_BADOP(int), /* dup */
90 segspt_unmap,
91 segspt_free,
92 SEGSPT_BADOP(int), /* fault */
93 SEGSPT_BADOP(faultcode_t), /* faulta */
94 SEGSPT_BADOP(int), /* setprot */
95 SEGSPT_BADOP(int), /* checkprot */
96 SEGSPT_BADOP(int), /* kluster */
97 SEGSPT_BADOP(size_t), /* swapout */
98 SEGSPT_BADOP(int), /* sync */
99 SEGSPT_BADOP(size_t), /* incore */
100 SEGSPT_BADOP(int), /* lockop */
101 SEGSPT_BADOP(int), /* getprot */
102 SEGSPT_BADOP(u_offset_t), /* getoffset */
103 SEGSPT_BADOP(int), /* gettype */
104 SEGSPT_BADOP(int), /* getvp */
105 SEGSPT_BADOP(int), /* advise */
106 SEGSPT_BADOP(void), /* dump */
107 SEGSPT_BADOP(int), /* pagelock */
108 SEGSPT_BADOP(int), /* setpgsz */
109 SEGSPT_BADOP(int), /* getmemid */
110 segspt_getpolicy, /* getpolicy */
111 SEGSPT_BADOP(int), /* capable */
112 seg_inherit_notsup /* inherit */
115 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
116 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
117 static void segspt_shmfree(struct seg *seg);
118 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
119 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
120 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
121 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
122 register size_t len, register uint_t prot);
123 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
124 uint_t prot);
125 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
126 static size_t segspt_shmswapout(struct seg *seg);
127 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
128 register char *vec);
129 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
130 int attr, uint_t flags);
131 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
132 int attr, int op, ulong_t *lockmap, size_t pos);
133 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
134 uint_t *protv);
135 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
136 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
137 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
138 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
139 uint_t behav);
140 static void segspt_shmdump(struct seg *seg);
141 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
142 struct page ***, enum lock_type, enum seg_rw);
143 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
144 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
145 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
146 static int segspt_shmcapable(struct seg *, segcapability_t);
148 struct seg_ops segspt_shmops = {
149 segspt_shmdup,
150 segspt_shmunmap,
151 segspt_shmfree,
152 segspt_shmfault,
153 segspt_shmfaulta,
154 segspt_shmsetprot,
155 segspt_shmcheckprot,
156 segspt_shmkluster,
157 segspt_shmswapout,
158 segspt_shmsync,
159 segspt_shmincore,
160 segspt_shmlockop,
161 segspt_shmgetprot,
162 segspt_shmgetoffset,
163 segspt_shmgettype,
164 segspt_shmgetvp,
165 segspt_shmadvise, /* advise */
166 segspt_shmdump,
167 segspt_shmpagelock,
168 segspt_shmsetpgsz,
169 segspt_shmgetmemid,
170 segspt_shmgetpolicy,
171 segspt_shmcapable,
172 seg_inherit_notsup
175 static void segspt_purge(struct seg *seg);
176 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
177 enum seg_rw, int);
178 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
179 page_t **ppa);
183 /*ARGSUSED*/
185 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
186 uint_t prot, uint_t flags, uint_t share_szc)
188 int err;
189 struct as *newas;
190 struct segspt_crargs sptcargs;
192 #ifdef DEBUG
193 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
194 tnf_ulong, size, size );
195 #endif
196 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
197 segspt_minfree = availrmem/20; /* for the system */
199 if (!hat_supported(HAT_SHARED_PT, (void *)0))
200 return (EINVAL);
203 * get a new as for this shared memory segment
205 newas = as_alloc();
206 newas->a_proc = NULL;
207 sptcargs.amp = amp;
208 sptcargs.prot = prot;
209 sptcargs.flags = flags;
210 sptcargs.szc = share_szc;
212 * create a shared page table (spt) segment
215 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
216 as_free(newas);
217 return (err);
219 *sptseg = sptcargs.seg_spt;
220 return (0);
223 void
224 sptdestroy(struct as *as, struct anon_map *amp)
227 #ifdef DEBUG
228 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
229 #endif
230 (void) as_unmap(as, SEGSPTADDR, amp->size);
231 as_free(as);
235 * called from seg_free().
236 * free (i.e., unlock, unmap, return to free list)
237 * all the pages in the given seg.
239 void
240 segspt_free(struct seg *seg)
242 struct spt_data *sptd = (struct spt_data *)seg->s_data;
244 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
246 if (sptd != NULL) {
247 if (sptd->spt_realsize)
248 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
250 if (sptd->spt_ppa_lckcnt)
251 kmem_free(sptd->spt_ppa_lckcnt,
252 sizeof (*sptd->spt_ppa_lckcnt)
253 * btopr(sptd->spt_amp->size));
254 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
255 cv_destroy(&sptd->spt_cv);
256 mutex_destroy(&sptd->spt_lock);
257 kmem_free(sptd, sizeof (*sptd));
261 /*ARGSUSED*/
262 static int
263 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
264 uint_t flags)
266 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
268 return (0);
271 /*ARGSUSED*/
272 static size_t
273 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
275 caddr_t eo_seg;
276 pgcnt_t npages;
277 struct shm_data *shmd = (struct shm_data *)seg->s_data;
278 struct seg *sptseg;
279 struct spt_data *sptd;
281 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
282 #ifdef lint
283 seg = seg;
284 #endif
285 sptseg = shmd->shm_sptseg;
286 sptd = sptseg->s_data;
288 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
289 eo_seg = addr + len;
290 while (addr < eo_seg) {
291 /* page exists, and it's locked. */
292 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
293 SEG_PAGE_ANON;
294 addr += PAGESIZE;
296 return (len);
297 } else {
298 struct anon_map *amp = shmd->shm_amp;
299 struct anon *ap;
300 page_t *pp;
301 pgcnt_t anon_index;
302 struct vnode *vp;
303 u_offset_t off;
304 ulong_t i;
305 int ret;
306 anon_sync_obj_t cookie;
308 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
309 anon_index = seg_page(seg, addr);
310 npages = btopr(len);
311 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
312 return (EINVAL);
314 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
315 for (i = 0; i < npages; i++, anon_index++) {
316 ret = 0;
317 anon_array_enter(amp, anon_index, &cookie);
318 ap = anon_get_ptr(amp->ahp, anon_index);
319 if (ap != NULL) {
320 swap_xlate(ap, &vp, &off);
321 anon_array_exit(&cookie);
322 pp = page_lookup_nowait(vp, off, SE_SHARED);
323 if (pp != NULL) {
324 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
325 page_unlock(pp);
327 } else {
328 anon_array_exit(&cookie);
330 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
331 ret |= SEG_PAGE_LOCKED;
333 *vec++ = (char)ret;
335 ANON_LOCK_EXIT(&amp->a_rwlock);
336 return (len);
340 static int
341 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
343 size_t share_size;
345 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
348 * seg.s_size may have been rounded up to the largest page size
349 * in shmat().
350 * XXX This should be cleanedup. sptdestroy should take a length
351 * argument which should be the same as sptcreate. Then
352 * this rounding would not be needed (or is done in shm.c)
353 * Only the check for full segment will be needed.
355 * XXX -- shouldn't raddr == 0 always? These tests don't seem
356 * to be useful at all.
358 share_size = page_get_pagesize(seg->s_szc);
359 ssize = P2ROUNDUP(ssize, share_size);
361 if (raddr == seg->s_base && ssize == seg->s_size) {
362 seg_free(seg);
363 return (0);
364 } else
365 return (EINVAL);
369 segspt_create(struct seg *seg, caddr_t argsp)
371 int err;
372 caddr_t addr = seg->s_base;
373 struct spt_data *sptd;
374 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
375 struct anon_map *amp = sptcargs->amp;
376 struct kshmid *sp = amp->a_sp;
377 struct cred *cred = CRED();
378 ulong_t i, j, anon_index = 0;
379 pgcnt_t npages = btopr(amp->size);
380 struct vnode *vp;
381 page_t **ppa;
382 uint_t hat_flags;
383 size_t pgsz;
384 pgcnt_t pgcnt;
385 caddr_t a;
386 pgcnt_t pidx;
387 size_t sz;
388 proc_t *procp = curproc;
389 rctl_qty_t lockedbytes = 0;
390 kproject_t *proj;
393 * We are holding the a_lock on the underlying dummy as,
394 * so we can make calls to the HAT layer.
396 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
397 ASSERT(sp != NULL);
399 #ifdef DEBUG
400 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
401 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
402 #endif
403 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
404 if (err = anon_swap_adjust(npages))
405 return (err);
407 err = ENOMEM;
409 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
410 goto out1;
412 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
413 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
414 KM_NOSLEEP)) == NULL)
415 goto out2;
418 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
420 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
421 goto out3;
423 seg->s_ops = &segspt_ops;
424 sptd->spt_vp = vp;
425 sptd->spt_amp = amp;
426 sptd->spt_prot = sptcargs->prot;
427 sptd->spt_flags = sptcargs->flags;
428 seg->s_data = (caddr_t)sptd;
429 sptd->spt_ppa = NULL;
430 sptd->spt_ppa_lckcnt = NULL;
431 seg->s_szc = sptcargs->szc;
432 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
433 sptd->spt_gen = 0;
435 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
436 if (seg->s_szc > amp->a_szc) {
437 amp->a_szc = seg->s_szc;
439 ANON_LOCK_EXIT(&amp->a_rwlock);
442 * Set policy to affect initial allocation of pages in
443 * anon_map_createpages()
445 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
446 NULL, 0, ptob(npages));
448 if (sptcargs->flags & SHM_PAGEABLE) {
449 size_t share_sz;
450 pgcnt_t new_npgs, more_pgs;
451 struct anon_hdr *nahp;
452 zone_t *zone;
454 share_sz = page_get_pagesize(seg->s_szc);
455 if (!IS_P2ALIGNED(amp->size, share_sz)) {
457 * We are rounding up the size of the anon array
458 * on 4 M boundary because we always create 4 M
459 * of page(s) when locking, faulting pages and we
460 * don't have to check for all corner cases e.g.
461 * if there is enough space to allocate 4 M
462 * page.
464 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
465 more_pgs = new_npgs - npages;
468 * The zone will never be NULL, as a fully created
469 * shm always has an owning zone.
471 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
472 ASSERT(zone != NULL);
473 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
474 err = ENOMEM;
475 goto out4;
478 nahp = anon_create(new_npgs, ANON_SLEEP);
479 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
480 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
481 ANON_SLEEP);
482 anon_release(amp->ahp, npages);
483 amp->ahp = nahp;
484 ASSERT(amp->swresv == ptob(npages));
485 amp->swresv = amp->size = ptob(new_npgs);
486 ANON_LOCK_EXIT(&amp->a_rwlock);
487 npages = new_npgs;
490 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
491 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
492 sptd->spt_pcachecnt = 0;
493 sptd->spt_realsize = ptob(npages);
494 sptcargs->seg_spt = seg;
495 return (0);
499 * get array of pages for each anon slot in amp
501 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
502 seg, addr, S_CREATE, cred)) != 0)
503 goto out4;
505 mutex_enter(&sp->shm_mlock);
507 /* May be partially locked, so, count bytes to charge for locking */
508 for (i = 0; i < npages; i++)
509 if (ppa[i]->p_lckcnt == 0)
510 lockedbytes += PAGESIZE;
512 proj = sp->shm_perm.ipc_proj;
514 if (lockedbytes > 0) {
515 mutex_enter(&procp->p_lock);
516 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
517 mutex_exit(&procp->p_lock);
518 mutex_exit(&sp->shm_mlock);
519 for (i = 0; i < npages; i++)
520 page_unlock(ppa[i]);
521 err = ENOMEM;
522 goto out4;
524 mutex_exit(&procp->p_lock);
528 * addr is initial address corresponding to the first page on ppa list
530 for (i = 0; i < npages; i++) {
531 /* attempt to lock all pages */
532 if (page_pp_lock(ppa[i], 0, 1) == 0) {
534 * if unable to lock any page, unlock all
535 * of them and return error
537 for (j = 0; j < i; j++)
538 page_pp_unlock(ppa[j], 0, 1);
539 for (i = 0; i < npages; i++)
540 page_unlock(ppa[i]);
541 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
542 mutex_exit(&sp->shm_mlock);
543 err = ENOMEM;
544 goto out4;
547 mutex_exit(&sp->shm_mlock);
550 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
551 * for the entire life of the segment. For example platforms
552 * that do not support Dynamic Reconfiguration.
554 hat_flags = HAT_LOAD_SHARE;
555 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
556 hat_flags |= HAT_LOAD_LOCK;
559 * Load translations one lare page at a time
560 * to make sure we don't create mappings bigger than
561 * segment's size code in case underlying pages
562 * are shared with segvn's segment that uses bigger
563 * size code than we do.
565 pgsz = page_get_pagesize(seg->s_szc);
566 pgcnt = page_get_pagecnt(seg->s_szc);
567 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
568 sz = MIN(pgsz, ptob(npages - pidx));
569 hat_memload_array(seg->s_as->a_hat, a, sz,
570 &ppa[pidx], sptd->spt_prot, hat_flags);
574 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
575 * we will leave the pages locked SE_SHARED for the life
576 * of the ISM segment. This will prevent any calls to
577 * hat_pageunload() on this ISM segment for those platforms.
579 if (!(hat_flags & HAT_LOAD_LOCK)) {
581 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
582 * we no longer need to hold the SE_SHARED lock on the pages,
583 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
584 * SE_SHARED lock on the pages as necessary.
586 for (i = 0; i < npages; i++)
587 page_unlock(ppa[i]);
589 sptd->spt_pcachecnt = 0;
590 kmem_free(ppa, ((sizeof (page_t *)) * npages));
591 sptd->spt_realsize = ptob(npages);
592 atomic_add_long(&spt_used, npages);
593 sptcargs->seg_spt = seg;
594 return (0);
596 out4:
597 seg->s_data = NULL;
598 kmem_free(vp, sizeof (*vp));
599 cv_destroy(&sptd->spt_cv);
600 out3:
601 mutex_destroy(&sptd->spt_lock);
602 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
603 kmem_free(ppa, (sizeof (*ppa) * npages));
604 out2:
605 kmem_free(sptd, sizeof (*sptd));
606 out1:
607 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
608 anon_swap_restore(npages);
609 return (err);
612 /*ARGSUSED*/
613 void
614 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
616 struct page *pp;
617 struct spt_data *sptd = (struct spt_data *)seg->s_data;
618 pgcnt_t npages;
619 ulong_t anon_idx;
620 struct anon_map *amp;
621 struct anon *ap;
622 struct vnode *vp;
623 u_offset_t off;
624 uint_t hat_flags;
625 int root = 0;
626 pgcnt_t pgs, curnpgs = 0;
627 page_t *rootpp;
628 rctl_qty_t unlocked_bytes = 0;
629 kproject_t *proj;
630 kshmid_t *sp;
632 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
634 len = P2ROUNDUP(len, PAGESIZE);
636 npages = btop(len);
638 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
639 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
640 (sptd->spt_flags & SHM_PAGEABLE)) {
641 hat_flags = HAT_UNLOAD_UNMAP;
644 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
646 amp = sptd->spt_amp;
647 if (sptd->spt_flags & SHM_PAGEABLE)
648 npages = btop(amp->size);
650 ASSERT(amp != NULL);
652 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
653 sp = amp->a_sp;
654 proj = sp->shm_perm.ipc_proj;
655 mutex_enter(&sp->shm_mlock);
657 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
658 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
659 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
660 panic("segspt_free_pages: null app");
661 /*NOTREACHED*/
663 } else {
664 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
665 == NULL)
666 continue;
668 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
669 swap_xlate(ap, &vp, &off);
672 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
673 * the pages won't be having SE_SHARED lock at this
674 * point.
676 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
677 * the pages are still held SE_SHARED locked from the
678 * original segspt_create()
680 * Our goal is to get SE_EXCL lock on each page, remove
681 * permanent lock on it and invalidate the page.
683 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
684 if (hat_flags == HAT_UNLOAD_UNMAP)
685 pp = page_lookup(vp, off, SE_EXCL);
686 else {
687 if ((pp = page_find(vp, off)) == NULL) {
688 panic("segspt_free_pages: "
689 "page not locked");
690 /*NOTREACHED*/
692 if (!page_tryupgrade(pp)) {
693 page_unlock(pp);
694 pp = page_lookup(vp, off, SE_EXCL);
697 if (pp == NULL) {
698 panic("segspt_free_pages: "
699 "page not in the system");
700 /*NOTREACHED*/
702 ASSERT(pp->p_lckcnt > 0);
703 page_pp_unlock(pp, 0, 1);
704 if (pp->p_lckcnt == 0)
705 unlocked_bytes += PAGESIZE;
706 } else {
707 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
708 continue;
711 * It's logical to invalidate the pages here as in most cases
712 * these were created by segspt.
714 if (pp->p_szc != 0) {
715 if (root == 0) {
716 ASSERT(curnpgs == 0);
717 root = 1;
718 rootpp = pp;
719 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
720 ASSERT(pgs > 1);
721 ASSERT(IS_P2ALIGNED(pgs, pgs));
722 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
723 curnpgs--;
724 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
725 ASSERT(curnpgs == 1);
726 ASSERT(page_pptonum(pp) ==
727 page_pptonum(rootpp) + (pgs - 1));
728 page_destroy_pages(rootpp);
729 root = 0;
730 curnpgs = 0;
731 } else {
732 ASSERT(curnpgs > 1);
733 ASSERT(page_pptonum(pp) ==
734 page_pptonum(rootpp) + (pgs - curnpgs));
735 curnpgs--;
737 } else {
738 if (root != 0 || curnpgs != 0) {
739 panic("segspt_free_pages: bad large page");
740 /*NOTREACHED*/
743 * Before destroying the pages, we need to take care
744 * of the rctl locked memory accounting. For that
745 * we need to calculte the unlocked_bytes.
747 if (pp->p_lckcnt > 0)
748 unlocked_bytes += PAGESIZE;
749 /*LINTED: constant in conditional context */
750 VN_DISPOSE(pp, B_INVAL, 0, kcred);
753 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
754 if (unlocked_bytes > 0)
755 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
756 mutex_exit(&sp->shm_mlock);
758 if (root != 0 || curnpgs != 0) {
759 panic("segspt_free_pages: bad large page");
760 /*NOTREACHED*/
764 * mark that pages have been released
766 sptd->spt_realsize = 0;
768 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
769 atomic_add_long(&spt_used, -npages);
770 anon_swap_restore(npages);
775 * Get memory allocation policy info for specified address in given segment
777 static lgrp_mem_policy_info_t *
778 segspt_getpolicy(struct seg *seg, caddr_t addr)
780 struct anon_map *amp;
781 ulong_t anon_index;
782 lgrp_mem_policy_info_t *policy_info;
783 struct spt_data *spt_data;
785 ASSERT(seg != NULL);
788 * Get anon_map from segspt
790 * Assume that no lock needs to be held on anon_map, since
791 * it should be protected by its reference count which must be
792 * nonzero for an existing segment
793 * Need to grab readers lock on policy tree though
795 spt_data = (struct spt_data *)seg->s_data;
796 if (spt_data == NULL)
797 return (NULL);
798 amp = spt_data->spt_amp;
799 ASSERT(amp->refcnt != 0);
802 * Get policy info
804 * Assume starting anon index of 0
806 anon_index = seg_page(seg, addr);
807 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
809 return (policy_info);
813 * DISM only.
814 * Return locked pages over a given range.
816 * We will cache all DISM locked pages and save the pplist for the
817 * entire segment in the ppa field of the underlying DISM segment structure.
818 * Later, during a call to segspt_reclaim() we will use this ppa array
819 * to page_unlock() all of the pages and then we will free this ppa list.
821 /*ARGSUSED*/
822 static int
823 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
824 struct page ***ppp, enum lock_type type, enum seg_rw rw)
826 struct shm_data *shmd = (struct shm_data *)seg->s_data;
827 struct seg *sptseg = shmd->shm_sptseg;
828 struct spt_data *sptd = sptseg->s_data;
829 pgcnt_t pg_idx, npages, tot_npages, npgs;
830 struct page **pplist, **pl, **ppa, *pp;
831 struct anon_map *amp;
832 spgcnt_t an_idx;
833 int ret = ENOTSUP;
834 uint_t pl_built = 0;
835 struct anon *ap;
836 struct vnode *vp;
837 u_offset_t off;
838 pgcnt_t claim_availrmem = 0;
839 uint_t szc;
841 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
842 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
845 * We want to lock/unlock the entire ISM segment. Therefore,
846 * we will be using the underlying sptseg and it's base address
847 * and length for the caching arguments.
849 ASSERT(sptseg);
850 ASSERT(sptd);
852 pg_idx = seg_page(seg, addr);
853 npages = btopr(len);
856 * check if the request is larger than number of pages covered
857 * by amp
859 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
860 *ppp = NULL;
861 return (ENOTSUP);
864 if (type == L_PAGEUNLOCK) {
865 ASSERT(sptd->spt_ppa != NULL);
867 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
868 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
871 * If someone is blocked while unmapping, we purge
872 * segment page cache and thus reclaim pplist synchronously
873 * without waiting for seg_pasync_thread. This speeds up
874 * unmapping in cases where munmap(2) is called, while
875 * raw async i/o is still in progress or where a thread
876 * exits on data fault in a multithreaded application.
878 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
879 (AS_ISUNMAPWAIT(seg->s_as) &&
880 shmd->shm_softlockcnt > 0)) {
881 segspt_purge(seg);
883 return (0);
886 /* The L_PAGELOCK case ... */
888 if (sptd->spt_flags & DISM_PPA_CHANGED) {
889 segspt_purge(seg);
891 * for DISM ppa needs to be rebuild since
892 * number of locked pages could be changed
894 *ppp = NULL;
895 return (ENOTSUP);
899 * First try to find pages in segment page cache, without
900 * holding the segment lock.
902 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
903 S_WRITE, SEGP_FORCE_WIRED);
904 if (pplist != NULL) {
905 ASSERT(sptd->spt_ppa != NULL);
906 ASSERT(sptd->spt_ppa == pplist);
907 ppa = sptd->spt_ppa;
908 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
909 if (ppa[an_idx] == NULL) {
910 seg_pinactive(seg, NULL, seg->s_base,
911 sptd->spt_amp->size, ppa,
912 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
913 *ppp = NULL;
914 return (ENOTSUP);
916 if ((szc = ppa[an_idx]->p_szc) != 0) {
917 npgs = page_get_pagecnt(szc);
918 an_idx = P2ROUNDUP(an_idx + 1, npgs);
919 } else {
920 an_idx++;
924 * Since we cache the entire DISM segment, we want to
925 * set ppp to point to the first slot that corresponds
926 * to the requested addr, i.e. pg_idx.
928 *ppp = &(sptd->spt_ppa[pg_idx]);
929 return (0);
932 mutex_enter(&sptd->spt_lock);
934 * try to find pages in segment page cache with mutex
936 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
937 S_WRITE, SEGP_FORCE_WIRED);
938 if (pplist != NULL) {
939 ASSERT(sptd->spt_ppa != NULL);
940 ASSERT(sptd->spt_ppa == pplist);
941 ppa = sptd->spt_ppa;
942 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
943 if (ppa[an_idx] == NULL) {
944 mutex_exit(&sptd->spt_lock);
945 seg_pinactive(seg, NULL, seg->s_base,
946 sptd->spt_amp->size, ppa,
947 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
948 *ppp = NULL;
949 return (ENOTSUP);
951 if ((szc = ppa[an_idx]->p_szc) != 0) {
952 npgs = page_get_pagecnt(szc);
953 an_idx = P2ROUNDUP(an_idx + 1, npgs);
954 } else {
955 an_idx++;
959 * Since we cache the entire DISM segment, we want to
960 * set ppp to point to the first slot that corresponds
961 * to the requested addr, i.e. pg_idx.
963 mutex_exit(&sptd->spt_lock);
964 *ppp = &(sptd->spt_ppa[pg_idx]);
965 return (0);
967 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
968 SEGP_FORCE_WIRED) == SEGP_FAIL) {
969 mutex_exit(&sptd->spt_lock);
970 *ppp = NULL;
971 return (ENOTSUP);
975 * No need to worry about protections because DISM pages are always rw.
977 pl = pplist = NULL;
978 amp = sptd->spt_amp;
981 * Do we need to build the ppa array?
983 if (sptd->spt_ppa == NULL) {
984 pgcnt_t lpg_cnt = 0;
986 pl_built = 1;
987 tot_npages = btopr(sptd->spt_amp->size);
989 ASSERT(sptd->spt_pcachecnt == 0);
990 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
991 pl = pplist;
993 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
994 for (an_idx = 0; an_idx < tot_npages; ) {
995 ap = anon_get_ptr(amp->ahp, an_idx);
997 * Cache only mlocked pages. For large pages
998 * if one (constituent) page is mlocked
999 * all pages for that large page
1000 * are cached also. This is for quick
1001 * lookups of ppa array;
1003 if ((ap != NULL) && (lpg_cnt != 0 ||
1004 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1006 swap_xlate(ap, &vp, &off);
1007 pp = page_lookup(vp, off, SE_SHARED);
1008 ASSERT(pp != NULL);
1009 if (lpg_cnt == 0) {
1010 lpg_cnt++;
1012 * For a small page, we are done --
1013 * lpg_count is reset to 0 below.
1015 * For a large page, we are guaranteed
1016 * to find the anon structures of all
1017 * constituent pages and a non-zero
1018 * lpg_cnt ensures that we don't test
1019 * for mlock for these. We are done
1020 * when lpg_count reaches (npgs + 1).
1021 * If we are not the first constituent
1022 * page, restart at the first one.
1024 npgs = page_get_pagecnt(pp->p_szc);
1025 if (!IS_P2ALIGNED(an_idx, npgs)) {
1026 an_idx = P2ALIGN(an_idx, npgs);
1027 page_unlock(pp);
1028 continue;
1031 if (++lpg_cnt > npgs)
1032 lpg_cnt = 0;
1035 * availrmem is decremented only
1036 * for unlocked pages
1038 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1039 claim_availrmem++;
1040 pplist[an_idx] = pp;
1042 an_idx++;
1044 ANON_LOCK_EXIT(&amp->a_rwlock);
1046 if (claim_availrmem) {
1047 mutex_enter(&freemem_lock);
1048 if (availrmem < tune.t_minarmem + claim_availrmem) {
1049 mutex_exit(&freemem_lock);
1050 ret = ENOTSUP;
1051 claim_availrmem = 0;
1052 goto insert_fail;
1053 } else {
1054 availrmem -= claim_availrmem;
1056 mutex_exit(&freemem_lock);
1059 sptd->spt_ppa = pl;
1060 } else {
1062 * We already have a valid ppa[].
1064 pl = sptd->spt_ppa;
1067 ASSERT(pl != NULL);
1069 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1070 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1071 segspt_reclaim);
1072 if (ret == SEGP_FAIL) {
1074 * seg_pinsert failed. We return
1075 * ENOTSUP, so that the as_pagelock() code will
1076 * then try the slower F_SOFTLOCK path.
1078 if (pl_built) {
1080 * No one else has referenced the ppa[].
1081 * We created it and we need to destroy it.
1083 sptd->spt_ppa = NULL;
1085 ret = ENOTSUP;
1086 goto insert_fail;
1090 * In either case, we increment softlockcnt on the 'real' segment.
1092 sptd->spt_pcachecnt++;
1093 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1095 ppa = sptd->spt_ppa;
1096 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1097 if (ppa[an_idx] == NULL) {
1098 mutex_exit(&sptd->spt_lock);
1099 seg_pinactive(seg, NULL, seg->s_base,
1100 sptd->spt_amp->size,
1101 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1102 *ppp = NULL;
1103 return (ENOTSUP);
1105 if ((szc = ppa[an_idx]->p_szc) != 0) {
1106 npgs = page_get_pagecnt(szc);
1107 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1108 } else {
1109 an_idx++;
1113 * We can now drop the sptd->spt_lock since the ppa[]
1114 * exists and he have incremented pacachecnt.
1116 mutex_exit(&sptd->spt_lock);
1119 * Since we cache the entire segment, we want to
1120 * set ppp to point to the first slot that corresponds
1121 * to the requested addr, i.e. pg_idx.
1123 *ppp = &(sptd->spt_ppa[pg_idx]);
1124 return (0);
1126 insert_fail:
1128 * We will only reach this code if we tried and failed.
1130 * And we can drop the lock on the dummy seg, once we've failed
1131 * to set up a new ppa[].
1133 mutex_exit(&sptd->spt_lock);
1135 if (pl_built) {
1136 if (claim_availrmem) {
1137 mutex_enter(&freemem_lock);
1138 availrmem += claim_availrmem;
1139 mutex_exit(&freemem_lock);
1143 * We created pl and we need to destroy it.
1145 pplist = pl;
1146 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1147 if (pplist[an_idx] != NULL)
1148 page_unlock(pplist[an_idx]);
1150 kmem_free(pl, sizeof (page_t *) * tot_npages);
1153 if (shmd->shm_softlockcnt <= 0) {
1154 if (AS_ISUNMAPWAIT(seg->s_as)) {
1155 mutex_enter(&seg->s_as->a_contents);
1156 if (AS_ISUNMAPWAIT(seg->s_as)) {
1157 AS_CLRUNMAPWAIT(seg->s_as);
1158 cv_broadcast(&seg->s_as->a_cv);
1160 mutex_exit(&seg->s_as->a_contents);
1163 *ppp = NULL;
1164 return (ret);
1170 * return locked pages over a given range.
1172 * We will cache the entire ISM segment and save the pplist for the
1173 * entire segment in the ppa field of the underlying ISM segment structure.
1174 * Later, during a call to segspt_reclaim() we will use this ppa array
1175 * to page_unlock() all of the pages and then we will free this ppa list.
1177 /*ARGSUSED*/
1178 static int
1179 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1180 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1182 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1183 struct seg *sptseg = shmd->shm_sptseg;
1184 struct spt_data *sptd = sptseg->s_data;
1185 pgcnt_t np, page_index, npages;
1186 caddr_t a, spt_base;
1187 struct page **pplist, **pl, *pp;
1188 struct anon_map *amp;
1189 ulong_t anon_index;
1190 int ret = ENOTSUP;
1191 uint_t pl_built = 0;
1192 struct anon *ap;
1193 struct vnode *vp;
1194 u_offset_t off;
1196 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1197 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1201 * We want to lock/unlock the entire ISM segment. Therefore,
1202 * we will be using the underlying sptseg and it's base address
1203 * and length for the caching arguments.
1205 ASSERT(sptseg);
1206 ASSERT(sptd);
1208 if (sptd->spt_flags & SHM_PAGEABLE) {
1209 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1212 page_index = seg_page(seg, addr);
1213 npages = btopr(len);
1216 * check if the request is larger than number of pages covered
1217 * by amp
1219 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1220 *ppp = NULL;
1221 return (ENOTSUP);
1224 if (type == L_PAGEUNLOCK) {
1226 ASSERT(sptd->spt_ppa != NULL);
1228 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1229 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1232 * If someone is blocked while unmapping, we purge
1233 * segment page cache and thus reclaim pplist synchronously
1234 * without waiting for seg_pasync_thread. This speeds up
1235 * unmapping in cases where munmap(2) is called, while
1236 * raw async i/o is still in progress or where a thread
1237 * exits on data fault in a multithreaded application.
1239 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1240 segspt_purge(seg);
1242 return (0);
1245 /* The L_PAGELOCK case... */
1248 * First try to find pages in segment page cache, without
1249 * holding the segment lock.
1251 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1252 S_WRITE, SEGP_FORCE_WIRED);
1253 if (pplist != NULL) {
1254 ASSERT(sptd->spt_ppa == pplist);
1255 ASSERT(sptd->spt_ppa[page_index]);
1257 * Since we cache the entire ISM segment, we want to
1258 * set ppp to point to the first slot that corresponds
1259 * to the requested addr, i.e. page_index.
1261 *ppp = &(sptd->spt_ppa[page_index]);
1262 return (0);
1265 mutex_enter(&sptd->spt_lock);
1268 * try to find pages in segment page cache
1270 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1271 S_WRITE, SEGP_FORCE_WIRED);
1272 if (pplist != NULL) {
1273 ASSERT(sptd->spt_ppa == pplist);
1275 * Since we cache the entire segment, we want to
1276 * set ppp to point to the first slot that corresponds
1277 * to the requested addr, i.e. page_index.
1279 mutex_exit(&sptd->spt_lock);
1280 *ppp = &(sptd->spt_ppa[page_index]);
1281 return (0);
1284 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1285 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1286 mutex_exit(&sptd->spt_lock);
1287 *ppp = NULL;
1288 return (ENOTSUP);
1292 * No need to worry about protections because ISM pages
1293 * are always rw.
1295 pl = pplist = NULL;
1298 * Do we need to build the ppa array?
1300 if (sptd->spt_ppa == NULL) {
1301 ASSERT(sptd->spt_ppa == pplist);
1303 spt_base = sptseg->s_base;
1304 pl_built = 1;
1307 * availrmem is decremented once during anon_swap_adjust()
1308 * and is incremented during the anon_unresv(), which is
1309 * called from shm_rm_amp() when the segment is destroyed.
1311 amp = sptd->spt_amp;
1312 ASSERT(amp != NULL);
1314 /* pcachecnt is protected by sptd->spt_lock */
1315 ASSERT(sptd->spt_pcachecnt == 0);
1316 pplist = kmem_zalloc(sizeof (page_t *)
1317 * btopr(sptd->spt_amp->size), KM_SLEEP);
1318 pl = pplist;
1320 anon_index = seg_page(sptseg, spt_base);
1322 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1323 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1324 a += PAGESIZE, anon_index++, pplist++) {
1325 ap = anon_get_ptr(amp->ahp, anon_index);
1326 ASSERT(ap != NULL);
1327 swap_xlate(ap, &vp, &off);
1328 pp = page_lookup(vp, off, SE_SHARED);
1329 ASSERT(pp != NULL);
1330 *pplist = pp;
1332 ANON_LOCK_EXIT(&amp->a_rwlock);
1334 if (a < (spt_base + sptd->spt_amp->size)) {
1335 ret = ENOTSUP;
1336 goto insert_fail;
1338 sptd->spt_ppa = pl;
1339 } else {
1341 * We already have a valid ppa[].
1343 pl = sptd->spt_ppa;
1346 ASSERT(pl != NULL);
1348 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1349 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1350 segspt_reclaim);
1351 if (ret == SEGP_FAIL) {
1353 * seg_pinsert failed. We return
1354 * ENOTSUP, so that the as_pagelock() code will
1355 * then try the slower F_SOFTLOCK path.
1357 if (pl_built) {
1359 * No one else has referenced the ppa[].
1360 * We created it and we need to destroy it.
1362 sptd->spt_ppa = NULL;
1364 ret = ENOTSUP;
1365 goto insert_fail;
1369 * In either case, we increment softlockcnt on the 'real' segment.
1371 sptd->spt_pcachecnt++;
1372 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1375 * We can now drop the sptd->spt_lock since the ppa[]
1376 * exists and he have incremented pacachecnt.
1378 mutex_exit(&sptd->spt_lock);
1381 * Since we cache the entire segment, we want to
1382 * set ppp to point to the first slot that corresponds
1383 * to the requested addr, i.e. page_index.
1385 *ppp = &(sptd->spt_ppa[page_index]);
1386 return (0);
1388 insert_fail:
1390 * We will only reach this code if we tried and failed.
1392 * And we can drop the lock on the dummy seg, once we've failed
1393 * to set up a new ppa[].
1395 mutex_exit(&sptd->spt_lock);
1397 if (pl_built) {
1399 * We created pl and we need to destroy it.
1401 pplist = pl;
1402 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1403 while (np) {
1404 page_unlock(*pplist);
1405 np--;
1406 pplist++;
1408 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1410 if (shmd->shm_softlockcnt <= 0) {
1411 if (AS_ISUNMAPWAIT(seg->s_as)) {
1412 mutex_enter(&seg->s_as->a_contents);
1413 if (AS_ISUNMAPWAIT(seg->s_as)) {
1414 AS_CLRUNMAPWAIT(seg->s_as);
1415 cv_broadcast(&seg->s_as->a_cv);
1417 mutex_exit(&seg->s_as->a_contents);
1420 *ppp = NULL;
1421 return (ret);
1425 * purge any cached pages in the I/O page cache
1427 static void
1428 segspt_purge(struct seg *seg)
1430 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1433 static int
1434 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1435 enum seg_rw rw, int async)
1437 struct seg *seg = (struct seg *)ptag;
1438 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1439 struct seg *sptseg;
1440 struct spt_data *sptd;
1441 pgcnt_t npages, i, free_availrmem = 0;
1442 int done = 0;
1444 #ifdef lint
1445 addr = addr;
1446 #endif
1447 sptseg = shmd->shm_sptseg;
1448 sptd = sptseg->s_data;
1449 npages = (len >> PAGESHIFT);
1450 ASSERT(npages);
1451 ASSERT(sptd->spt_pcachecnt != 0);
1452 ASSERT(sptd->spt_ppa == pplist);
1453 ASSERT(npages == btopr(sptd->spt_amp->size));
1454 ASSERT(async || AS_LOCK_HELD(seg->s_as));
1457 * Acquire the lock on the dummy seg and destroy the
1458 * ppa array IF this is the last pcachecnt.
1460 mutex_enter(&sptd->spt_lock);
1461 if (--sptd->spt_pcachecnt == 0) {
1462 for (i = 0; i < npages; i++) {
1463 if (pplist[i] == NULL) {
1464 continue;
1466 if (rw == S_WRITE) {
1467 hat_setrefmod(pplist[i]);
1468 } else {
1469 hat_setref(pplist[i]);
1471 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1472 (sptd->spt_ppa_lckcnt[i] == 0))
1473 free_availrmem++;
1474 page_unlock(pplist[i]);
1476 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1477 mutex_enter(&freemem_lock);
1478 availrmem += free_availrmem;
1479 mutex_exit(&freemem_lock);
1482 * Since we want to cach/uncache the entire ISM segment,
1483 * we will track the pplist in a segspt specific field
1484 * ppa, that is initialized at the time we add an entry to
1485 * the cache.
1487 ASSERT(sptd->spt_pcachecnt == 0);
1488 kmem_free(pplist, sizeof (page_t *) * npages);
1489 sptd->spt_ppa = NULL;
1490 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1491 sptd->spt_gen++;
1492 cv_broadcast(&sptd->spt_cv);
1493 done = 1;
1495 mutex_exit(&sptd->spt_lock);
1498 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1499 * may not hold AS lock (in this case async argument is not 0). This
1500 * means if softlockcnt drops to 0 after the decrement below address
1501 * space may get freed. We can't allow it since after softlock
1502 * derement to 0 we still need to access as structure for possible
1503 * wakeup of unmap waiters. To prevent the disappearance of as we take
1504 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1505 * this mutex as a barrier to make sure this routine completes before
1506 * segment is freed.
1508 * The second complication we have to deal with in async case is a
1509 * possibility of missed wake up of unmap wait thread. When we don't
1510 * hold as lock here we may take a_contents lock before unmap wait
1511 * thread that was first to see softlockcnt was still not 0. As a
1512 * result we'll fail to wake up an unmap wait thread. To avoid this
1513 * race we set nounmapwait flag in as structure if we drop softlockcnt
1514 * to 0 if async is not 0. unmapwait thread
1515 * will not block if this flag is set.
1517 if (async)
1518 mutex_enter(&shmd->shm_segfree_syncmtx);
1521 * Now decrement softlockcnt.
1523 ASSERT(shmd->shm_softlockcnt > 0);
1524 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1526 if (shmd->shm_softlockcnt <= 0) {
1527 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1528 mutex_enter(&seg->s_as->a_contents);
1529 if (async)
1530 AS_SETNOUNMAPWAIT(seg->s_as);
1531 if (AS_ISUNMAPWAIT(seg->s_as)) {
1532 AS_CLRUNMAPWAIT(seg->s_as);
1533 cv_broadcast(&seg->s_as->a_cv);
1535 mutex_exit(&seg->s_as->a_contents);
1539 if (async)
1540 mutex_exit(&shmd->shm_segfree_syncmtx);
1542 return (done);
1546 * Do a F_SOFTUNLOCK call over the range requested.
1547 * The range must have already been F_SOFTLOCK'ed.
1549 * The calls to acquire and release the anon map lock mutex were
1550 * removed in order to avoid a deadly embrace during a DR
1551 * memory delete operation. (Eg. DR blocks while waiting for a
1552 * exclusive lock on a page that is being used for kaio; the
1553 * thread that will complete the kaio and call segspt_softunlock
1554 * blocks on the anon map lock; another thread holding the anon
1555 * map lock blocks on another page lock via the segspt_shmfault
1556 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1558 * The appropriateness of the removal is based upon the following:
1559 * 1. If we are holding a segment's reader lock and the page is held
1560 * shared, then the corresponding element in anonmap which points to
1561 * anon struct cannot change and there is no need to acquire the
1562 * anonymous map lock.
1563 * 2. Threads in segspt_softunlock have a reader lock on the segment
1564 * and already have the shared page lock, so we are guaranteed that
1565 * the anon map slot cannot change and therefore can call anon_get_ptr()
1566 * without grabbing the anonymous map lock.
1567 * 3. Threads that softlock a shared page break copy-on-write, even if
1568 * its a read. Thus cow faults can be ignored with respect to soft
1569 * unlocking, since the breaking of cow means that the anon slot(s) will
1570 * not be shared.
1572 static void
1573 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1574 size_t len, enum seg_rw rw)
1576 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1577 struct seg *sptseg;
1578 struct spt_data *sptd;
1579 page_t *pp;
1580 caddr_t adr;
1581 struct vnode *vp;
1582 u_offset_t offset;
1583 ulong_t anon_index;
1584 struct anon_map *amp; /* XXX - for locknest */
1585 struct anon *ap = NULL;
1586 pgcnt_t npages;
1588 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1590 sptseg = shmd->shm_sptseg;
1591 sptd = sptseg->s_data;
1594 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1595 * and therefore their pages are SE_SHARED locked
1596 * for the entire life of the segment.
1598 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1599 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1600 goto softlock_decrement;
1604 * Any thread is free to do a page_find and
1605 * page_unlock() on the pages within this seg.
1607 * We are already holding the as->a_lock on the user's
1608 * real segment, but we need to hold the a_lock on the
1609 * underlying dummy as. This is mostly to satisfy the
1610 * underlying HAT layer.
1612 AS_LOCK_ENTER(sptseg->s_as, RW_READER);
1613 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1614 AS_LOCK_EXIT(sptseg->s_as);
1616 amp = sptd->spt_amp;
1617 ASSERT(amp != NULL);
1618 anon_index = seg_page(sptseg, sptseg_addr);
1620 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1621 ap = anon_get_ptr(amp->ahp, anon_index++);
1622 ASSERT(ap != NULL);
1623 swap_xlate(ap, &vp, &offset);
1626 * Use page_find() instead of page_lookup() to
1627 * find the page since we know that it has a
1628 * "shared" lock.
1630 pp = page_find(vp, offset);
1631 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1632 if (pp == NULL) {
1633 panic("segspt_softunlock: "
1634 "addr %p, ap %p, vp %p, off %llx",
1635 (void *)adr, (void *)ap, (void *)vp, offset);
1636 /*NOTREACHED*/
1639 if (rw == S_WRITE) {
1640 hat_setrefmod(pp);
1641 } else if (rw != S_OTHER) {
1642 hat_setref(pp);
1644 page_unlock(pp);
1647 softlock_decrement:
1648 npages = btopr(len);
1649 ASSERT(shmd->shm_softlockcnt >= npages);
1650 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1651 if (shmd->shm_softlockcnt == 0) {
1653 * All SOFTLOCKS are gone. Wakeup any waiting
1654 * unmappers so they can try again to unmap.
1655 * Check for waiters first without the mutex
1656 * held so we don't always grab the mutex on
1657 * softunlocks.
1659 if (AS_ISUNMAPWAIT(seg->s_as)) {
1660 mutex_enter(&seg->s_as->a_contents);
1661 if (AS_ISUNMAPWAIT(seg->s_as)) {
1662 AS_CLRUNMAPWAIT(seg->s_as);
1663 cv_broadcast(&seg->s_as->a_cv);
1665 mutex_exit(&seg->s_as->a_contents);
1671 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1673 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1674 struct shm_data *shmd;
1675 struct anon_map *shm_amp = shmd_arg->shm_amp;
1676 struct spt_data *sptd;
1677 int error = 0;
1679 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1681 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1682 if (shmd == NULL)
1683 return (ENOMEM);
1685 shmd->shm_sptas = shmd_arg->shm_sptas;
1686 shmd->shm_amp = shm_amp;
1687 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1689 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1690 NULL, 0, seg->s_size);
1692 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1694 seg->s_data = (void *)shmd;
1695 seg->s_ops = &segspt_shmops;
1696 seg->s_szc = shmd->shm_sptseg->s_szc;
1697 sptd = shmd->shm_sptseg->s_data;
1699 if (sptd->spt_flags & SHM_PAGEABLE) {
1700 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1701 KM_NOSLEEP)) == NULL) {
1702 seg->s_data = (void *)NULL;
1703 kmem_free(shmd, (sizeof (*shmd)));
1704 return (ENOMEM);
1706 shmd->shm_lckpgs = 0;
1707 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1708 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1709 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1710 seg->s_size, seg->s_szc)) != 0) {
1711 kmem_free(shmd->shm_vpage,
1712 btopr(shm_amp->size));
1715 } else {
1716 error = hat_share(seg->s_as->a_hat, seg->s_base,
1717 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1718 seg->s_size, seg->s_szc);
1720 if (error) {
1721 seg->s_szc = 0;
1722 seg->s_data = (void *)NULL;
1723 kmem_free(shmd, (sizeof (*shmd)));
1724 } else {
1725 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1726 shm_amp->refcnt++;
1727 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1729 return (error);
1733 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1735 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1736 int reclaim = 1;
1738 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1739 retry:
1740 if (shmd->shm_softlockcnt > 0) {
1741 if (reclaim == 1) {
1742 segspt_purge(seg);
1743 reclaim = 0;
1744 goto retry;
1746 return (EAGAIN);
1749 if (ssize != seg->s_size) {
1750 #ifdef DEBUG
1751 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1752 ssize, seg->s_size);
1753 #endif
1754 return (EINVAL);
1757 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1758 NULL, 0);
1759 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1761 seg_free(seg);
1763 return (0);
1766 void
1767 segspt_shmfree(struct seg *seg)
1769 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1770 struct anon_map *shm_amp = shmd->shm_amp;
1772 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1774 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1775 MC_UNLOCK, NULL, 0);
1778 * Need to increment refcnt when attaching
1779 * and decrement when detaching because of dup().
1781 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1782 shm_amp->refcnt--;
1783 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1785 if (shmd->shm_vpage) { /* only for DISM */
1786 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1787 shmd->shm_vpage = NULL;
1791 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1792 * still working with this segment without holding as lock.
1794 ASSERT(shmd->shm_softlockcnt == 0);
1795 mutex_enter(&shmd->shm_segfree_syncmtx);
1796 mutex_destroy(&shmd->shm_segfree_syncmtx);
1798 kmem_free(shmd, sizeof (*shmd));
1801 /*ARGSUSED*/
1803 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1805 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1808 * Shared page table is more than shared mapping.
1809 * Individual process sharing page tables can't change prot
1810 * because there is only one set of page tables.
1811 * This will be allowed after private page table is
1812 * supported.
1814 /* need to return correct status error? */
1815 return (0);
1819 faultcode_t
1820 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1821 size_t len, enum fault_type type, enum seg_rw rw)
1823 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1824 struct seg *sptseg = shmd->shm_sptseg;
1825 struct as *curspt = shmd->shm_sptas;
1826 struct spt_data *sptd = sptseg->s_data;
1827 pgcnt_t npages;
1828 size_t size;
1829 caddr_t segspt_addr, shm_addr;
1830 page_t **ppa;
1831 int i;
1832 ulong_t an_idx = 0;
1833 int err = 0;
1834 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1835 size_t pgsz;
1836 pgcnt_t pgcnt;
1837 caddr_t a;
1838 pgcnt_t pidx;
1840 #ifdef lint
1841 hat = hat;
1842 #endif
1843 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1846 * Because of the way spt is implemented
1847 * the realsize of the segment does not have to be
1848 * equal to the segment size itself. The segment size is
1849 * often in multiples of a page size larger than PAGESIZE.
1850 * The realsize is rounded up to the nearest PAGESIZE
1851 * based on what the user requested. This is a bit of
1852 * ungliness that is historical but not easily fixed
1853 * without re-designing the higher levels of ISM.
1855 ASSERT(addr >= seg->s_base);
1856 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1857 return (FC_NOMAP);
1859 * For all of the following cases except F_PROT, we need to
1860 * make any necessary adjustments to addr and len
1861 * and get all of the necessary page_t's into an array called ppa[].
1863 * The code in shmat() forces base addr and len of ISM segment
1864 * to be aligned to largest page size supported. Therefore,
1865 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1866 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1867 * in large pagesize chunks, or else we will screw up the HAT
1868 * layer by calling hat_memload_array() with differing page sizes
1869 * over a given virtual range.
1871 pgsz = page_get_pagesize(sptseg->s_szc);
1872 pgcnt = page_get_pagecnt(sptseg->s_szc);
1873 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1874 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1875 npages = btopr(size);
1878 * Now we need to convert from addr in segshm to addr in segspt.
1880 an_idx = seg_page(seg, shm_addr);
1881 segspt_addr = sptseg->s_base + ptob(an_idx);
1883 ASSERT((segspt_addr + ptob(npages)) <=
1884 (sptseg->s_base + sptd->spt_realsize));
1885 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1887 switch (type) {
1889 case F_SOFTLOCK:
1891 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1893 * Fall through to the F_INVAL case to load up the hat layer
1894 * entries with the HAT_LOAD_LOCK flag.
1896 /* FALLTHRU */
1897 case F_INVAL:
1899 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1900 return (FC_NOMAP);
1902 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1904 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1905 if (err != 0) {
1906 if (type == F_SOFTLOCK) {
1907 atomic_add_long((ulong_t *)(
1908 &(shmd->shm_softlockcnt)), -npages);
1910 goto dism_err;
1912 AS_LOCK_ENTER(sptseg->s_as, RW_READER);
1913 a = segspt_addr;
1914 pidx = 0;
1915 if (type == F_SOFTLOCK) {
1918 * Load up the translation keeping it
1919 * locked and don't unlock the page.
1921 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1922 hat_memload_array(sptseg->s_as->a_hat,
1923 a, pgsz, &ppa[pidx], sptd->spt_prot,
1924 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1926 } else {
1928 * Migrate pages marked for migration
1930 if (lgrp_optimizations())
1931 page_migrate(seg, shm_addr, ppa, npages);
1933 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1934 hat_memload_array(sptseg->s_as->a_hat,
1935 a, pgsz, &ppa[pidx],
1936 sptd->spt_prot,
1937 HAT_LOAD_SHARE);
1941 * And now drop the SE_SHARED lock(s).
1943 if (dyn_ism_unmap) {
1944 for (i = 0; i < npages; i++) {
1945 page_unlock(ppa[i]);
1950 if (!dyn_ism_unmap) {
1951 if (hat_share(seg->s_as->a_hat, shm_addr,
1952 curspt->a_hat, segspt_addr, ptob(npages),
1953 seg->s_szc) != 0) {
1954 panic("hat_share err in DISM fault");
1955 /* NOTREACHED */
1957 if (type == F_INVAL) {
1958 for (i = 0; i < npages; i++) {
1959 page_unlock(ppa[i]);
1963 AS_LOCK_EXIT(sptseg->s_as);
1964 dism_err:
1965 kmem_free(ppa, npages * sizeof (page_t *));
1966 return (err);
1968 case F_SOFTUNLOCK:
1971 * This is a bit ugly, we pass in the real seg pointer,
1972 * but the segspt_addr is the virtual address within the
1973 * dummy seg.
1975 segspt_softunlock(seg, segspt_addr, size, rw);
1976 return (0);
1978 case F_PROT:
1981 * This takes care of the unusual case where a user
1982 * allocates a stack in shared memory and a register
1983 * window overflow is written to that stack page before
1984 * it is otherwise modified.
1986 * We can get away with this because ISM segments are
1987 * always rw. Other than this unusual case, there
1988 * should be no instances of protection violations.
1990 return (0);
1992 default:
1993 #ifdef DEBUG
1994 panic("segspt_dismfault default type?");
1995 #else
1996 return (FC_NOMAP);
1997 #endif
2002 faultcode_t
2003 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2004 size_t len, enum fault_type type, enum seg_rw rw)
2006 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2007 struct seg *sptseg = shmd->shm_sptseg;
2008 struct as *curspt = shmd->shm_sptas;
2009 struct spt_data *sptd = sptseg->s_data;
2010 pgcnt_t npages;
2011 size_t size;
2012 caddr_t sptseg_addr, shm_addr;
2013 page_t *pp, **ppa;
2014 int i;
2015 u_offset_t offset;
2016 ulong_t anon_index = 0;
2017 struct vnode *vp;
2018 struct anon_map *amp; /* XXX - for locknest */
2019 struct anon *ap = NULL;
2020 size_t pgsz;
2021 pgcnt_t pgcnt;
2022 caddr_t a;
2023 pgcnt_t pidx;
2024 size_t sz;
2026 #ifdef lint
2027 hat = hat;
2028 #endif
2030 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2032 if (sptd->spt_flags & SHM_PAGEABLE) {
2033 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2037 * Because of the way spt is implemented
2038 * the realsize of the segment does not have to be
2039 * equal to the segment size itself. The segment size is
2040 * often in multiples of a page size larger than PAGESIZE.
2041 * The realsize is rounded up to the nearest PAGESIZE
2042 * based on what the user requested. This is a bit of
2043 * ungliness that is historical but not easily fixed
2044 * without re-designing the higher levels of ISM.
2046 ASSERT(addr >= seg->s_base);
2047 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2048 return (FC_NOMAP);
2050 * For all of the following cases except F_PROT, we need to
2051 * make any necessary adjustments to addr and len
2052 * and get all of the necessary page_t's into an array called ppa[].
2054 * The code in shmat() forces base addr and len of ISM segment
2055 * to be aligned to largest page size supported. Therefore,
2056 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2057 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2058 * in large pagesize chunks, or else we will screw up the HAT
2059 * layer by calling hat_memload_array() with differing page sizes
2060 * over a given virtual range.
2062 pgsz = page_get_pagesize(sptseg->s_szc);
2063 pgcnt = page_get_pagecnt(sptseg->s_szc);
2064 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2065 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2066 npages = btopr(size);
2069 * Now we need to convert from addr in segshm to addr in segspt.
2071 anon_index = seg_page(seg, shm_addr);
2072 sptseg_addr = sptseg->s_base + ptob(anon_index);
2075 * And now we may have to adjust npages downward if we have
2076 * exceeded the realsize of the segment or initial anon
2077 * allocations.
2079 if ((sptseg_addr + ptob(npages)) >
2080 (sptseg->s_base + sptd->spt_realsize))
2081 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2083 npages = btopr(size);
2085 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2086 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2088 switch (type) {
2090 case F_SOFTLOCK:
2093 * availrmem is decremented once during anon_swap_adjust()
2094 * and is incremented during the anon_unresv(), which is
2095 * called from shm_rm_amp() when the segment is destroyed.
2097 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2099 * Some platforms assume that ISM pages are SE_SHARED
2100 * locked for the entire life of the segment.
2102 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2103 return (0);
2105 * Fall through to the F_INVAL case to load up the hat layer
2106 * entries with the HAT_LOAD_LOCK flag.
2109 /* FALLTHRU */
2110 case F_INVAL:
2112 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2113 return (FC_NOMAP);
2116 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2117 * may still rely on this call to hat_share(). That
2118 * would imply that those hat's can fault on a
2119 * HAT_LOAD_LOCK translation, which would seem
2120 * contradictory.
2122 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2123 if (hat_share(seg->s_as->a_hat, seg->s_base,
2124 curspt->a_hat, sptseg->s_base,
2125 sptseg->s_size, sptseg->s_szc) != 0) {
2126 panic("hat_share error in ISM fault");
2127 /*NOTREACHED*/
2129 return (0);
2131 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2134 * I see no need to lock the real seg,
2135 * here, because all of our work will be on the underlying
2136 * dummy seg.
2138 * sptseg_addr and npages now account for large pages.
2140 amp = sptd->spt_amp;
2141 ASSERT(amp != NULL);
2142 anon_index = seg_page(sptseg, sptseg_addr);
2144 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2145 for (i = 0; i < npages; i++) {
2146 ap = anon_get_ptr(amp->ahp, anon_index++);
2147 ASSERT(ap != NULL);
2148 swap_xlate(ap, &vp, &offset);
2149 pp = page_lookup(vp, offset, SE_SHARED);
2150 ASSERT(pp != NULL);
2151 ppa[i] = pp;
2153 ANON_LOCK_EXIT(&amp->a_rwlock);
2154 ASSERT(i == npages);
2157 * We are already holding the as->a_lock on the user's
2158 * real segment, but we need to hold the a_lock on the
2159 * underlying dummy as. This is mostly to satisfy the
2160 * underlying HAT layer.
2162 AS_LOCK_ENTER(sptseg->s_as, RW_READER);
2163 a = sptseg_addr;
2164 pidx = 0;
2165 if (type == F_SOFTLOCK) {
2167 * Load up the translation keeping it
2168 * locked and don't unlock the page.
2170 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2171 sz = MIN(pgsz, ptob(npages - pidx));
2172 hat_memload_array(sptseg->s_as->a_hat, a,
2173 sz, &ppa[pidx], sptd->spt_prot,
2174 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2176 } else {
2178 * Migrate pages marked for migration.
2180 if (lgrp_optimizations())
2181 page_migrate(seg, shm_addr, ppa, npages);
2183 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2184 sz = MIN(pgsz, ptob(npages - pidx));
2185 hat_memload_array(sptseg->s_as->a_hat,
2186 a, sz, &ppa[pidx],
2187 sptd->spt_prot, HAT_LOAD_SHARE);
2191 * And now drop the SE_SHARED lock(s).
2193 for (i = 0; i < npages; i++)
2194 page_unlock(ppa[i]);
2196 AS_LOCK_EXIT(sptseg->s_as);
2198 kmem_free(ppa, sizeof (page_t *) * npages);
2199 return (0);
2200 case F_SOFTUNLOCK:
2203 * This is a bit ugly, we pass in the real seg pointer,
2204 * but the sptseg_addr is the virtual address within the
2205 * dummy seg.
2207 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2208 return (0);
2210 case F_PROT:
2213 * This takes care of the unusual case where a user
2214 * allocates a stack in shared memory and a register
2215 * window overflow is written to that stack page before
2216 * it is otherwise modified.
2218 * We can get away with this because ISM segments are
2219 * always rw. Other than this unusual case, there
2220 * should be no instances of protection violations.
2222 return (0);
2224 default:
2225 #ifdef DEBUG
2226 cmn_err(CE_WARN, "segspt_shmfault default type?");
2227 #endif
2228 return (FC_NOMAP);
2232 /*ARGSUSED*/
2233 static faultcode_t
2234 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2236 return (0);
2239 /*ARGSUSED*/
2240 static int
2241 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2243 return (0);
2246 /*ARGSUSED*/
2247 static size_t
2248 segspt_shmswapout(struct seg *seg)
2250 return (0);
2254 * duplicate the shared page tables
2257 segspt_shmdup(struct seg *seg, struct seg *newseg)
2259 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2260 struct anon_map *amp = shmd->shm_amp;
2261 struct shm_data *shmd_new;
2262 struct seg *spt_seg = shmd->shm_sptseg;
2263 struct spt_data *sptd = spt_seg->s_data;
2264 int error = 0;
2266 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
2268 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2269 newseg->s_data = (void *)shmd_new;
2270 shmd_new->shm_sptas = shmd->shm_sptas;
2271 shmd_new->shm_amp = amp;
2272 shmd_new->shm_sptseg = shmd->shm_sptseg;
2273 newseg->s_ops = &segspt_shmops;
2274 newseg->s_szc = seg->s_szc;
2275 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2277 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2278 amp->refcnt++;
2279 ANON_LOCK_EXIT(&amp->a_rwlock);
2281 if (sptd->spt_flags & SHM_PAGEABLE) {
2282 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2283 shmd_new->shm_lckpgs = 0;
2284 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2285 if ((error = hat_share(newseg->s_as->a_hat,
2286 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2287 seg->s_size, seg->s_szc)) != 0) {
2288 kmem_free(shmd_new->shm_vpage,
2289 btopr(amp->size));
2292 return (error);
2293 } else {
2294 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2295 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2296 seg->s_szc));
2301 /*ARGSUSED*/
2303 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2305 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2306 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2308 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2311 * ISM segment is always rw.
2313 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2317 * Return an array of locked large pages, for empty slots allocate
2318 * private zero-filled anon pages.
2320 static int
2321 spt_anon_getpages(
2322 struct seg *sptseg,
2323 caddr_t sptaddr,
2324 size_t len,
2325 page_t *ppa[])
2327 struct spt_data *sptd = sptseg->s_data;
2328 struct anon_map *amp = sptd->spt_amp;
2329 enum seg_rw rw = sptd->spt_prot;
2330 uint_t szc = sptseg->s_szc;
2331 size_t pg_sz, share_sz = page_get_pagesize(szc);
2332 pgcnt_t lp_npgs;
2333 caddr_t lp_addr, e_sptaddr;
2334 uint_t vpprot, ppa_szc = 0;
2335 struct vpage *vpage = NULL;
2336 ulong_t j, ppa_idx;
2337 int err, ierr = 0;
2338 pgcnt_t an_idx;
2339 anon_sync_obj_t cookie;
2340 int anon_locked = 0;
2341 pgcnt_t amp_pgs;
2344 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2345 ASSERT(len != 0);
2347 pg_sz = share_sz;
2348 lp_npgs = btop(pg_sz);
2349 lp_addr = sptaddr;
2350 e_sptaddr = sptaddr + len;
2351 an_idx = seg_page(sptseg, sptaddr);
2352 ppa_idx = 0;
2354 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2356 amp_pgs = page_get_pagecnt(amp->a_szc);
2358 /*CONSTCOND*/
2359 while (1) {
2360 for (; lp_addr < e_sptaddr;
2361 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2364 * If we're currently locked, and we get to a new
2365 * page, unlock our current anon chunk.
2367 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2368 anon_array_exit(&cookie);
2369 anon_locked = 0;
2371 if (!anon_locked) {
2372 anon_array_enter(amp, an_idx, &cookie);
2373 anon_locked = 1;
2375 ppa_szc = (uint_t)-1;
2376 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2377 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2378 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2380 if (ierr != 0) {
2381 if (ierr > 0) {
2382 err = FC_MAKE_ERR(ierr);
2383 goto lpgs_err;
2385 break;
2388 if (lp_addr == e_sptaddr) {
2389 break;
2391 ASSERT(lp_addr < e_sptaddr);
2394 * ierr == -1 means we failed to allocate a large page.
2395 * so do a size down operation.
2397 * ierr == -2 means some other process that privately shares
2398 * pages with this process has allocated a larger page and we
2399 * need to retry with larger pages. So do a size up
2400 * operation. This relies on the fact that large pages are
2401 * never partially shared i.e. if we share any constituent
2402 * page of a large page with another process we must share the
2403 * entire large page. Note this cannot happen for SOFTLOCK
2404 * case, unless current address (lpaddr) is at the beginning
2405 * of the next page size boundary because the other process
2406 * couldn't have relocated locked pages.
2408 ASSERT(ierr == -1 || ierr == -2);
2409 if (segvn_anypgsz) {
2410 ASSERT(ierr == -2 || szc != 0);
2411 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2412 szc = (ierr == -1) ? szc - 1 : szc + 1;
2413 } else {
2415 * For faults and segvn_anypgsz == 0
2416 * we need to be careful not to loop forever
2417 * if existing page is found with szc other
2418 * than 0 or seg->s_szc. This could be due
2419 * to page relocations on behalf of DR or
2420 * more likely large page creation. For this
2421 * case simply re-size to existing page's szc
2422 * if returned by anon_map_getpages().
2424 if (ppa_szc == (uint_t)-1) {
2425 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2426 } else {
2427 ASSERT(ppa_szc <= sptseg->s_szc);
2428 ASSERT(ierr == -2 || ppa_szc < szc);
2429 ASSERT(ierr == -1 || ppa_szc > szc);
2430 szc = ppa_szc;
2433 pg_sz = page_get_pagesize(szc);
2434 lp_npgs = btop(pg_sz);
2435 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2437 if (anon_locked) {
2438 anon_array_exit(&cookie);
2440 ANON_LOCK_EXIT(&amp->a_rwlock);
2441 return (0);
2443 lpgs_err:
2444 if (anon_locked) {
2445 anon_array_exit(&cookie);
2447 ANON_LOCK_EXIT(&amp->a_rwlock);
2448 for (j = 0; j < ppa_idx; j++)
2449 page_unlock(ppa[j]);
2450 return (err);
2454 * count the number of bytes in a set of spt pages that are currently not
2455 * locked
2457 static rctl_qty_t
2458 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2460 ulong_t i;
2461 rctl_qty_t unlocked = 0;
2463 for (i = 0; i < npages; i++) {
2464 if (ppa[i]->p_lckcnt == 0)
2465 unlocked += PAGESIZE;
2467 return (unlocked);
2470 extern u_longlong_t randtick(void);
2471 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2472 #define NLCK (NCPU_P2)
2473 /* Random number with a range [0, n-1], n must be power of two */
2474 #define RAND_P2(n) \
2475 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2478 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2479 page_t **ppa, ulong_t *lockmap, size_t pos,
2480 rctl_qty_t *locked)
2482 struct shm_data *shmd = seg->s_data;
2483 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2484 ulong_t i;
2485 int kernel;
2486 pgcnt_t nlck = 0;
2487 int rv = 0;
2488 int use_reserved = 1;
2490 /* return the number of bytes actually locked */
2491 *locked = 0;
2494 * To avoid contention on freemem_lock, availrmem and pages_locked
2495 * global counters are updated only every nlck locked pages instead of
2496 * every time. Reserve nlck locks up front and deduct from this
2497 * reservation for each page that requires a lock. When the reservation
2498 * is consumed, reserve again. nlck is randomized, so the competing
2499 * threads do not fall into a cyclic lock contention pattern. When
2500 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2501 * is used to lock pages.
2503 for (i = 0; i < npages; anon_index++, pos++, i++) {
2504 if (nlck == 0 && use_reserved == 1) {
2505 nlck = NLCK + RAND_P2(NLCK);
2506 /* if fewer loops left, decrease nlck */
2507 nlck = MIN(nlck, npages - i);
2509 * Reserve nlck locks up front and deduct from this
2510 * reservation for each page that requires a lock. When
2511 * the reservation is consumed, reserve again.
2513 mutex_enter(&freemem_lock);
2514 if ((availrmem - nlck) < pages_pp_maximum) {
2515 /* Do not do advance memory reserves */
2516 use_reserved = 0;
2517 } else {
2518 availrmem -= nlck;
2519 pages_locked += nlck;
2521 mutex_exit(&freemem_lock);
2523 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2524 if (sptd->spt_ppa_lckcnt[anon_index] <
2525 (ushort_t)DISM_LOCK_MAX) {
2526 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2527 (ushort_t)DISM_LOCK_MAX) {
2528 cmn_err(CE_WARN,
2529 "DISM page lock limit "
2530 "reached on DISM offset 0x%lx\n",
2531 anon_index << PAGESHIFT);
2533 kernel = (sptd->spt_ppa &&
2534 sptd->spt_ppa[anon_index]);
2535 if (!page_pp_lock(ppa[i], 0, kernel ||
2536 use_reserved)) {
2537 sptd->spt_ppa_lckcnt[anon_index]--;
2538 rv = EAGAIN;
2539 break;
2541 /* if this is a newly locked page, count it */
2542 if (ppa[i]->p_lckcnt == 1) {
2543 if (kernel == 0 && use_reserved == 1)
2544 nlck--;
2545 *locked += PAGESIZE;
2547 shmd->shm_lckpgs++;
2548 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2549 if (lockmap != NULL)
2550 BT_SET(lockmap, pos);
2554 /* Return unused lock reservation */
2555 if (nlck != 0 && use_reserved == 1) {
2556 mutex_enter(&freemem_lock);
2557 availrmem += nlck;
2558 pages_locked -= nlck;
2559 mutex_exit(&freemem_lock);
2562 return (rv);
2566 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2567 rctl_qty_t *unlocked)
2569 struct shm_data *shmd = seg->s_data;
2570 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2571 struct anon_map *amp = sptd->spt_amp;
2572 struct anon *ap;
2573 struct vnode *vp;
2574 u_offset_t off;
2575 struct page *pp;
2576 int kernel;
2577 anon_sync_obj_t cookie;
2578 ulong_t i;
2579 pgcnt_t nlck = 0;
2580 pgcnt_t nlck_limit = NLCK;
2582 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2583 for (i = 0; i < npages; i++, anon_index++) {
2584 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2585 anon_array_enter(amp, anon_index, &cookie);
2586 ap = anon_get_ptr(amp->ahp, anon_index);
2587 ASSERT(ap);
2589 swap_xlate(ap, &vp, &off);
2590 anon_array_exit(&cookie);
2591 pp = page_lookup(vp, off, SE_SHARED);
2592 ASSERT(pp);
2594 * availrmem is decremented only for pages which are not
2595 * in seg pcache, for pages in seg pcache availrmem was
2596 * decremented in _dismpagelock()
2598 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2599 ASSERT(pp->p_lckcnt > 0);
2602 * lock page but do not change availrmem, we do it
2603 * ourselves every nlck loops.
2605 page_pp_unlock(pp, 0, 1);
2606 if (pp->p_lckcnt == 0) {
2607 if (kernel == 0)
2608 nlck++;
2609 *unlocked += PAGESIZE;
2611 page_unlock(pp);
2612 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2613 sptd->spt_ppa_lckcnt[anon_index]--;
2614 shmd->shm_lckpgs--;
2618 * To reduce freemem_lock contention, do not update availrmem
2619 * until at least NLCK pages have been unlocked.
2620 * 1. No need to update if nlck is zero
2621 * 2. Always update if the last iteration
2623 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2624 mutex_enter(&freemem_lock);
2625 availrmem += nlck;
2626 pages_locked -= nlck;
2627 mutex_exit(&freemem_lock);
2628 nlck = 0;
2629 nlck_limit = NLCK + RAND_P2(NLCK);
2632 ANON_LOCK_EXIT(&amp->a_rwlock);
2634 return (0);
2637 /*ARGSUSED*/
2638 static int
2639 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2640 int attr, int op, ulong_t *lockmap, size_t pos)
2642 struct shm_data *shmd = seg->s_data;
2643 struct seg *sptseg = shmd->shm_sptseg;
2644 struct spt_data *sptd = sptseg->s_data;
2645 struct kshmid *sp = sptd->spt_amp->a_sp;
2646 pgcnt_t npages, a_npages;
2647 page_t **ppa;
2648 pgcnt_t an_idx, a_an_idx, ppa_idx;
2649 caddr_t spt_addr, a_addr; /* spt and aligned address */
2650 size_t a_len; /* aligned len */
2651 size_t share_sz;
2652 ulong_t i;
2653 int sts = 0;
2654 rctl_qty_t unlocked = 0;
2655 rctl_qty_t locked = 0;
2656 struct proc *p = curproc;
2657 kproject_t *proj;
2659 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2660 ASSERT(sp != NULL);
2662 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2663 return (0);
2666 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2667 an_idx = seg_page(seg, addr);
2668 npages = btopr(len);
2670 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2671 return (ENOMEM);
2675 * A shm's project never changes, so no lock needed.
2676 * The shm has a hold on the project, so it will not go away.
2677 * Since we have a mapping to shm within this zone, we know
2678 * that the zone will not go away.
2680 proj = sp->shm_perm.ipc_proj;
2682 if (op == MC_LOCK) {
2685 * Need to align addr and size request if they are not
2686 * aligned so we can always allocate large page(s) however
2687 * we only lock what was requested in initial request.
2689 share_sz = page_get_pagesize(sptseg->s_szc);
2690 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2691 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2692 share_sz);
2693 a_npages = btop(a_len);
2694 a_an_idx = seg_page(seg, a_addr);
2695 spt_addr = sptseg->s_base + ptob(a_an_idx);
2696 ppa_idx = an_idx - a_an_idx;
2698 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2699 KM_NOSLEEP)) == NULL) {
2700 return (ENOMEM);
2704 * Don't cache any new pages for IO and
2705 * flush any cached pages.
2707 mutex_enter(&sptd->spt_lock);
2708 if (sptd->spt_ppa != NULL)
2709 sptd->spt_flags |= DISM_PPA_CHANGED;
2711 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2712 if (sts != 0) {
2713 mutex_exit(&sptd->spt_lock);
2714 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2715 return (sts);
2718 mutex_enter(&sp->shm_mlock);
2719 /* enforce locked memory rctl */
2720 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2722 mutex_enter(&p->p_lock);
2723 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2724 mutex_exit(&p->p_lock);
2725 sts = EAGAIN;
2726 } else {
2727 mutex_exit(&p->p_lock);
2728 sts = spt_lockpages(seg, an_idx, npages,
2729 &ppa[ppa_idx], lockmap, pos, &locked);
2732 * correct locked count if not all pages could be
2733 * locked
2735 if ((unlocked - locked) > 0) {
2736 rctl_decr_locked_mem(NULL, proj,
2737 (unlocked - locked), 0);
2741 * unlock pages
2743 for (i = 0; i < a_npages; i++)
2744 page_unlock(ppa[i]);
2745 if (sptd->spt_ppa != NULL)
2746 sptd->spt_flags |= DISM_PPA_CHANGED;
2747 mutex_exit(&sp->shm_mlock);
2748 mutex_exit(&sptd->spt_lock);
2750 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2752 } else if (op == MC_UNLOCK) { /* unlock */
2753 page_t **ppa;
2755 mutex_enter(&sptd->spt_lock);
2756 if (shmd->shm_lckpgs == 0) {
2757 mutex_exit(&sptd->spt_lock);
2758 return (0);
2761 * Don't cache new IO pages.
2763 if (sptd->spt_ppa != NULL)
2764 sptd->spt_flags |= DISM_PPA_CHANGED;
2766 mutex_enter(&sp->shm_mlock);
2767 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2768 if ((ppa = sptd->spt_ppa) != NULL)
2769 sptd->spt_flags |= DISM_PPA_CHANGED;
2770 mutex_exit(&sptd->spt_lock);
2772 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2773 mutex_exit(&sp->shm_mlock);
2775 if (ppa != NULL)
2776 seg_ppurge_wiredpp(ppa);
2778 return (sts);
2781 /*ARGSUSED*/
2783 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2785 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2786 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2787 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2789 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2792 * ISM segment is always rw.
2794 while (--pgno >= 0)
2795 *protv++ = sptd->spt_prot;
2796 return (0);
2799 /*ARGSUSED*/
2800 u_offset_t
2801 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2803 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2805 /* Offset does not matter in ISM memory */
2807 return ((u_offset_t)0);
2810 /* ARGSUSED */
2812 segspt_shmgettype(struct seg *seg, caddr_t addr)
2814 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2815 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2817 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2820 * The shared memory mapping is always MAP_SHARED, SWAP is only
2821 * reserved for DISM
2823 return (MAP_SHARED |
2824 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2827 /*ARGSUSED*/
2829 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2831 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2832 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2834 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2836 *vpp = sptd->spt_vp;
2837 return (0);
2841 * We need to wait for pending IO to complete to a DISM segment in order for
2842 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2843 * than enough time to wait.
2845 static clock_t spt_pcache_wait = 120;
2847 /*ARGSUSED*/
2848 static int
2849 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2851 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2852 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2853 struct anon_map *amp;
2854 pgcnt_t pg_idx;
2855 ushort_t gen;
2856 clock_t end_lbolt;
2857 int writer;
2858 page_t **ppa;
2860 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2862 if (behav == MADV_FREE) {
2863 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2864 return (0);
2866 amp = sptd->spt_amp;
2867 pg_idx = seg_page(seg, addr);
2869 mutex_enter(&sptd->spt_lock);
2870 if ((ppa = sptd->spt_ppa) == NULL) {
2871 mutex_exit(&sptd->spt_lock);
2872 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2873 anon_disclaim(amp, pg_idx, len);
2874 ANON_LOCK_EXIT(&amp->a_rwlock);
2875 return (0);
2878 sptd->spt_flags |= DISM_PPA_CHANGED;
2879 gen = sptd->spt_gen;
2881 mutex_exit(&sptd->spt_lock);
2884 * Purge all DISM cached pages
2886 seg_ppurge_wiredpp(ppa);
2889 * Drop the AS_LOCK so that other threads can grab it
2890 * in the as_pageunlock path and hopefully get the segment
2891 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2892 * to keep this segment resident.
2894 writer = AS_WRITE_HELD(seg->s_as);
2895 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2896 AS_LOCK_EXIT(seg->s_as);
2898 mutex_enter(&sptd->spt_lock);
2900 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2903 * Try to wait for pages to get kicked out of the seg_pcache.
2905 while (sptd->spt_gen == gen &&
2906 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2907 ddi_get_lbolt() < end_lbolt) {
2908 if (!cv_timedwait_sig(&sptd->spt_cv,
2909 &sptd->spt_lock, end_lbolt)) {
2910 break;
2914 mutex_exit(&sptd->spt_lock);
2916 /* Regrab the AS_LOCK and release our hold on the segment */
2917 AS_LOCK_ENTER(seg->s_as, writer ? RW_WRITER : RW_READER);
2918 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2919 if (shmd->shm_softlockcnt <= 0) {
2920 if (AS_ISUNMAPWAIT(seg->s_as)) {
2921 mutex_enter(&seg->s_as->a_contents);
2922 if (AS_ISUNMAPWAIT(seg->s_as)) {
2923 AS_CLRUNMAPWAIT(seg->s_as);
2924 cv_broadcast(&seg->s_as->a_cv);
2926 mutex_exit(&seg->s_as->a_contents);
2930 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2931 anon_disclaim(amp, pg_idx, len);
2932 ANON_LOCK_EXIT(&amp->a_rwlock);
2933 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2934 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2935 int already_set;
2936 ulong_t anon_index;
2937 lgrp_mem_policy_t policy;
2938 caddr_t shm_addr;
2939 size_t share_size;
2940 size_t size;
2941 struct seg *sptseg = shmd->shm_sptseg;
2942 caddr_t sptseg_addr;
2945 * Align address and length to page size of underlying segment
2947 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2948 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2949 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2950 share_size);
2952 amp = shmd->shm_amp;
2953 anon_index = seg_page(seg, shm_addr);
2956 * And now we may have to adjust size downward if we have
2957 * exceeded the realsize of the segment or initial anon
2958 * allocations.
2960 sptseg_addr = sptseg->s_base + ptob(anon_index);
2961 if ((sptseg_addr + size) >
2962 (sptseg->s_base + sptd->spt_realsize))
2963 size = (sptseg->s_base + sptd->spt_realsize) -
2964 sptseg_addr;
2967 * Set memory allocation policy for this segment
2969 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2970 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2971 NULL, 0, len);
2974 * If random memory allocation policy set already,
2975 * don't bother reapplying it.
2977 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2978 return (0);
2981 * Mark any existing pages in the given range for
2982 * migration, flushing the I/O page cache, and using
2983 * underlying segment to calculate anon index and get
2984 * anonmap and vnode pointer from
2986 if (shmd->shm_softlockcnt > 0)
2987 segspt_purge(seg);
2989 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2992 return (0);
2995 /*ARGSUSED*/
2996 void
2997 segspt_shmdump(struct seg *seg)
2999 /* no-op for ISM segment */
3002 /*ARGSUSED*/
3003 static faultcode_t
3004 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
3006 return (ENOTSUP);
3010 * get a memory ID for an addr in a given segment
3012 static int
3013 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3015 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3016 struct anon *ap;
3017 size_t anon_index;
3018 struct anon_map *amp = shmd->shm_amp;
3019 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3020 struct seg *sptseg = shmd->shm_sptseg;
3021 anon_sync_obj_t cookie;
3023 anon_index = seg_page(seg, addr);
3025 if (addr > (seg->s_base + sptd->spt_realsize)) {
3026 return (EFAULT);
3029 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
3030 anon_array_enter(amp, anon_index, &cookie);
3031 ap = anon_get_ptr(amp->ahp, anon_index);
3032 if (ap == NULL) {
3033 struct page *pp;
3034 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3036 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3037 if (pp == NULL) {
3038 anon_array_exit(&cookie);
3039 ANON_LOCK_EXIT(&amp->a_rwlock);
3040 return (ENOMEM);
3042 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3043 page_unlock(pp);
3045 anon_array_exit(&cookie);
3046 ANON_LOCK_EXIT(&amp->a_rwlock);
3047 memidp->val[0] = (uintptr_t)ap;
3048 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3049 return (0);
3053 * Get memory allocation policy info for specified address in given segment
3055 static lgrp_mem_policy_info_t *
3056 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3058 struct anon_map *amp;
3059 ulong_t anon_index;
3060 lgrp_mem_policy_info_t *policy_info;
3061 struct shm_data *shm_data;
3063 ASSERT(seg != NULL);
3066 * Get anon_map from segshm
3068 * Assume that no lock needs to be held on anon_map, since
3069 * it should be protected by its reference count which must be
3070 * nonzero for an existing segment
3071 * Need to grab readers lock on policy tree though
3073 shm_data = (struct shm_data *)seg->s_data;
3074 if (shm_data == NULL)
3075 return (NULL);
3076 amp = shm_data->shm_amp;
3077 ASSERT(amp->refcnt != 0);
3080 * Get policy info
3082 * Assume starting anon index of 0
3084 anon_index = seg_page(seg, addr);
3085 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3087 return (policy_info);
3090 /*ARGSUSED*/
3091 static int
3092 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3094 return (0);