6801244 ps takes too long when many AS segments because of rm_assize
[illumos-gate.git] / usr / src / uts / common / vm / seg_vn.c
blob9ba274a2ebc912616f4aaebc8bb4e70e6b4d9dd5
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
30 * University Copyright- Copyright (c) 1982, 1986, 1988
31 * The Regents of the University of California
32 * All Rights Reserved
34 * University Acknowledgment- Portions of this document are derived from
35 * software developed by the University of California, Berkeley, and its
36 * contributors.
40 * VM - shared or copy-on-write from a vnode/anonymous memory.
43 #include <sys/types.h>
44 #include <sys/param.h>
45 #include <sys/t_lock.h>
46 #include <sys/errno.h>
47 #include <sys/systm.h>
48 #include <sys/mman.h>
49 #include <sys/debug.h>
50 #include <sys/cred.h>
51 #include <sys/vmsystm.h>
52 #include <sys/tuneable.h>
53 #include <sys/bitmap.h>
54 #include <sys/swap.h>
55 #include <sys/kmem.h>
56 #include <sys/sysmacros.h>
57 #include <sys/vtrace.h>
58 #include <sys/cmn_err.h>
59 #include <sys/callb.h>
60 #include <sys/vm.h>
61 #include <sys/dumphdr.h>
62 #include <sys/lgrp.h>
64 #include <vm/hat.h>
65 #include <vm/as.h>
66 #include <vm/seg.h>
67 #include <vm/seg_vn.h>
68 #include <vm/pvn.h>
69 #include <vm/anon.h>
70 #include <vm/page.h>
71 #include <vm/vpage.h>
72 #include <sys/proc.h>
73 #include <sys/task.h>
74 #include <sys/project.h>
75 #include <sys/zone.h>
76 #include <sys/shm_impl.h>
78 * Private seg op routines.
80 static int segvn_dup(struct seg *seg, struct seg *newseg);
81 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len);
82 static void segvn_free(struct seg *seg);
83 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg,
84 caddr_t addr, size_t len, enum fault_type type,
85 enum seg_rw rw);
86 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr);
87 static int segvn_setprot(struct seg *seg, caddr_t addr,
88 size_t len, uint_t prot);
89 static int segvn_checkprot(struct seg *seg, caddr_t addr,
90 size_t len, uint_t prot);
91 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
92 static size_t segvn_swapout(struct seg *seg);
93 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len,
94 int attr, uint_t flags);
95 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len,
96 char *vec);
97 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
98 int attr, int op, ulong_t *lockmap, size_t pos);
99 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len,
100 uint_t *protv);
101 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr);
102 static int segvn_gettype(struct seg *seg, caddr_t addr);
103 static int segvn_getvp(struct seg *seg, caddr_t addr,
104 struct vnode **vpp);
105 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len,
106 uint_t behav);
107 static void segvn_dump(struct seg *seg);
108 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len,
109 struct page ***ppp, enum lock_type type, enum seg_rw rw);
110 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len,
111 uint_t szc);
112 static int segvn_getmemid(struct seg *seg, caddr_t addr,
113 memid_t *memidp);
114 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t);
115 static int segvn_capable(struct seg *seg, segcapability_t capable);
117 struct seg_ops segvn_ops = {
118 segvn_dup,
119 segvn_unmap,
120 segvn_free,
121 segvn_fault,
122 segvn_faulta,
123 segvn_setprot,
124 segvn_checkprot,
125 segvn_kluster,
126 segvn_swapout,
127 segvn_sync,
128 segvn_incore,
129 segvn_lockop,
130 segvn_getprot,
131 segvn_getoffset,
132 segvn_gettype,
133 segvn_getvp,
134 segvn_advise,
135 segvn_dump,
136 segvn_pagelock,
137 segvn_setpagesize,
138 segvn_getmemid,
139 segvn_getpolicy,
140 segvn_capable,
144 * Common zfod structures, provided as a shorthand for others to use.
146 static segvn_crargs_t zfod_segvn_crargs =
147 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
148 static segvn_crargs_t kzfod_segvn_crargs =
149 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER,
150 PROT_ALL & ~PROT_USER);
151 static segvn_crargs_t stack_noexec_crargs =
152 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL);
154 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */
155 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */
156 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */
157 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */
159 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
161 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */
163 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */
164 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */
165 uint_t segvn_pglock_comb_bshift;
166 size_t segvn_pglock_comb_palign;
168 static int segvn_concat(struct seg *, struct seg *, int);
169 static int segvn_extend_prev(struct seg *, struct seg *,
170 struct segvn_crargs *, size_t);
171 static int segvn_extend_next(struct seg *, struct seg *,
172 struct segvn_crargs *, size_t);
173 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw);
174 static void segvn_pagelist_rele(page_t **);
175 static void segvn_setvnode_mpss(vnode_t *);
176 static void segvn_relocate_pages(page_t **, page_t *);
177 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *);
178 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t,
179 uint_t, page_t **, page_t **, uint_t *, int *);
180 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t,
181 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
182 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t,
183 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
184 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t,
185 u_offset_t, struct vpage *, page_t **, uint_t,
186 enum fault_type, enum seg_rw, int);
187 static void segvn_vpage(struct seg *);
188 static size_t segvn_count_swap_by_vpages(struct seg *);
190 static void segvn_purge(struct seg *seg);
191 static int segvn_reclaim(void *, caddr_t, size_t, struct page **,
192 enum seg_rw, int);
193 static int shamp_reclaim(void *, caddr_t, size_t, struct page **,
194 enum seg_rw, int);
196 static int sameprot(struct seg *, caddr_t, size_t);
198 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t);
199 static int segvn_clrszc(struct seg *);
200 static struct seg *segvn_split_seg(struct seg *, caddr_t);
201 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t,
202 ulong_t, uint_t);
204 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t,
205 size_t, void *, u_offset_t);
207 static struct kmem_cache *segvn_cache;
208 static struct kmem_cache **segvn_szc_cache;
210 #ifdef VM_STATS
211 static struct segvnvmstats_str {
212 ulong_t fill_vp_pages[31];
213 ulong_t fltvnpages[49];
214 ulong_t fullszcpages[10];
215 ulong_t relocatepages[3];
216 ulong_t fltanpages[17];
217 ulong_t pagelock[2];
218 ulong_t demoterange[3];
219 } segvnvmstats;
220 #endif /* VM_STATS */
222 #define SDR_RANGE 1 /* demote entire range */
223 #define SDR_END 2 /* demote non aligned ends only */
225 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
226 if ((len) != 0) { \
227 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
228 ASSERT(lpgaddr >= (seg)->s_base); \
229 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
230 (len)), pgsz); \
231 ASSERT(lpgeaddr > lpgaddr); \
232 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
233 } else { \
234 lpgeaddr = lpgaddr = (addr); \
238 /*ARGSUSED*/
239 static int
240 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags)
242 struct segvn_data *svd = buf;
244 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
245 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
246 svd->svn_trnext = svd->svn_trprev = NULL;
247 return (0);
250 /*ARGSUSED1*/
251 static void
252 segvn_cache_destructor(void *buf, void *cdrarg)
254 struct segvn_data *svd = buf;
256 rw_destroy(&svd->lock);
257 mutex_destroy(&svd->segfree_syncmtx);
260 /*ARGSUSED*/
261 static int
262 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags)
264 bzero(buf, sizeof (svntr_t));
265 return (0);
269 * Patching this variable to non-zero allows the system to run with
270 * stacks marked as "not executable". It's a bit of a kludge, but is
271 * provided as a tweakable for platforms that export those ABIs
272 * (e.g. sparc V8) that have executable stacks enabled by default.
273 * There are also some restrictions for platforms that don't actually
274 * implement 'noexec' protections.
276 * Once enabled, the system is (therefore) unable to provide a fully
277 * ABI-compliant execution environment, though practically speaking,
278 * most everything works. The exceptions are generally some interpreters
279 * and debuggers that create executable code on the stack and jump
280 * into it (without explicitly mprotecting the address range to include
281 * PROT_EXEC).
283 * One important class of applications that are disabled are those
284 * that have been transformed into malicious agents using one of the
285 * numerous "buffer overflow" attacks. See 4007890.
287 int noexec_user_stack = 0;
288 int noexec_user_stack_log = 1;
290 int segvn_lpg_disable = 0;
291 uint_t segvn_maxpgszc = 0;
293 ulong_t segvn_vmpss_clrszc_cnt;
294 ulong_t segvn_vmpss_clrszc_err;
295 ulong_t segvn_fltvnpages_clrszc_cnt;
296 ulong_t segvn_fltvnpages_clrszc_err;
297 ulong_t segvn_setpgsz_align_err;
298 ulong_t segvn_setpgsz_anon_align_err;
299 ulong_t segvn_setpgsz_getattr_err;
300 ulong_t segvn_setpgsz_eof_err;
301 ulong_t segvn_faultvnmpss_align_err1;
302 ulong_t segvn_faultvnmpss_align_err2;
303 ulong_t segvn_faultvnmpss_align_err3;
304 ulong_t segvn_faultvnmpss_align_err4;
305 ulong_t segvn_faultvnmpss_align_err5;
306 ulong_t segvn_vmpss_pageio_deadlk_err;
308 int segvn_use_regions = 1;
311 * Segvn supports text replication optimization for NUMA platforms. Text
312 * replica's are represented by anon maps (amp). There's one amp per text file
313 * region per lgroup. A process chooses the amp for each of its text mappings
314 * based on the lgroup assignment of its main thread (t_tid = 1). All
315 * processes that want a replica on a particular lgroup for the same text file
316 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
317 * with vp,off,size,szc used as a key. Text replication segments are read only
318 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
319 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
320 * pages. Replication amp is assigned to a segment when it gets its first
321 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
322 * rechecks periodically if the process still maps an amp local to the main
323 * thread. If not async thread forces process to remap to an amp in the new
324 * home lgroup of the main thread. Current text replication implementation
325 * only provides the benefit to workloads that do most of their work in the
326 * main thread of a process or all the threads of a process run in the same
327 * lgroup. To extend text replication benefit to different types of
328 * multithreaded workloads further work would be needed in the hat layer to
329 * allow the same virtual address in the same hat to simultaneously map
330 * different physical addresses (i.e. page table replication would be needed
331 * for x86).
333 * amp pages are used instead of vnode pages as long as segment has a very
334 * simple life cycle. It's created via segvn_create(), handles S_EXEC
335 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
336 * happens such as protection is changed, real COW fault happens, pagesize is
337 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
338 * text replication by converting the segment back to vnode only segment
339 * (unmap segment's address range and set svd->amp to NULL).
341 * The original file can be changed after amp is inserted into
342 * svntr_hashtab. Processes that are launched after the file is already
343 * changed can't use the replica's created prior to the file change. To
344 * implement this functionality hash entries are timestamped. Replica's can
345 * only be used if current file modification time is the same as the timestamp
346 * saved when hash entry was created. However just timestamps alone are not
347 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
348 * deal with file changes via MAP_SHARED mappings differently. When writable
349 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
350 * existing replica's for this vnode as not usable for future text
351 * mappings. And we don't create new replica's for files that currently have
352 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
353 * true).
356 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
357 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR;
359 static ulong_t svntr_hashtab_sz = 512;
360 static svntr_bucket_t *svntr_hashtab = NULL;
361 static struct kmem_cache *svntr_cache;
362 static svntr_stats_t *segvn_textrepl_stats;
363 static ksema_t segvn_trasync_sem;
365 int segvn_disable_textrepl = 1;
366 size_t textrepl_size_thresh = (size_t)-1;
367 size_t segvn_textrepl_bytes = 0;
368 size_t segvn_textrepl_max_bytes = 0;
369 clock_t segvn_update_textrepl_interval = 0;
370 int segvn_update_tr_time = 10;
371 int segvn_disable_textrepl_update = 0;
373 static void segvn_textrepl(struct seg *);
374 static void segvn_textunrepl(struct seg *, int);
375 static void segvn_inval_trcache(vnode_t *);
376 static void segvn_trasync_thread(void);
377 static void segvn_trupdate_wakeup(void *);
378 static void segvn_trupdate(void);
379 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *,
380 ulong_t);
383 * Initialize segvn data structures
385 void
386 segvn_init(void)
388 uint_t maxszc;
389 uint_t szc;
390 size_t pgsz;
392 segvn_cache = kmem_cache_create("segvn_cache",
393 sizeof (struct segvn_data), 0,
394 segvn_cache_constructor, segvn_cache_destructor, NULL,
395 NULL, NULL, 0);
397 if (segvn_lpg_disable == 0) {
398 szc = maxszc = page_num_pagesizes() - 1;
399 if (szc == 0) {
400 segvn_lpg_disable = 1;
402 if (page_get_pagesize(0) != PAGESIZE) {
403 panic("segvn_init: bad szc 0");
404 /*NOTREACHED*/
406 while (szc != 0) {
407 pgsz = page_get_pagesize(szc);
408 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) {
409 panic("segvn_init: bad szc %d", szc);
410 /*NOTREACHED*/
412 szc--;
414 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc)
415 segvn_maxpgszc = maxszc;
418 if (segvn_maxpgszc) {
419 segvn_szc_cache = (struct kmem_cache **)kmem_alloc(
420 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *),
421 KM_SLEEP);
424 for (szc = 1; szc <= segvn_maxpgszc; szc++) {
425 char str[32];
427 (void) sprintf(str, "segvn_szc_cache%d", szc);
428 segvn_szc_cache[szc] = kmem_cache_create(str,
429 page_get_pagecnt(szc) * sizeof (page_t *), 0,
430 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
434 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL))
435 segvn_use_regions = 0;
438 * For now shared regions and text replication segvn support
439 * are mutually exclusive. This is acceptable because
440 * currently significant benefit from text replication was
441 * only observed on AMD64 NUMA platforms (due to relatively
442 * small L2$ size) and currently we don't support shared
443 * regions on x86.
445 if (segvn_use_regions && !segvn_disable_textrepl) {
446 segvn_disable_textrepl = 1;
449 #if defined(_LP64)
450 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 &&
451 !segvn_disable_textrepl) {
452 ulong_t i;
453 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t);
455 svntr_cache = kmem_cache_create("svntr_cache",
456 sizeof (svntr_t), 0, svntr_cache_constructor, NULL,
457 NULL, NULL, NULL, 0);
458 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP);
459 for (i = 0; i < svntr_hashtab_sz; i++) {
460 mutex_init(&svntr_hashtab[i].tr_lock, NULL,
461 MUTEX_DEFAULT, NULL);
463 segvn_textrepl_max_bytes = ptob(physmem) /
464 segvn_textrepl_max_bytes_factor;
465 segvn_textrepl_stats = kmem_zalloc(NCPU *
466 sizeof (svntr_stats_t), KM_SLEEP);
467 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL);
468 (void) thread_create(NULL, 0, segvn_trasync_thread,
469 NULL, 0, &p0, TS_RUN, minclsyspri);
471 #endif
473 if (!ISP2(segvn_pglock_comb_balign) ||
474 segvn_pglock_comb_balign < PAGESIZE) {
475 segvn_pglock_comb_balign = 1UL << 16; /* 64K */
477 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1;
478 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign);
481 #define SEGVN_PAGEIO ((void *)0x1)
482 #define SEGVN_NOPAGEIO ((void *)0x2)
484 static void
485 segvn_setvnode_mpss(vnode_t *vp)
487 int err;
489 ASSERT(vp->v_mpssdata == NULL ||
490 vp->v_mpssdata == SEGVN_PAGEIO ||
491 vp->v_mpssdata == SEGVN_NOPAGEIO);
493 if (vp->v_mpssdata == NULL) {
494 if (vn_vmpss_usepageio(vp)) {
495 err = VOP_PAGEIO(vp, (page_t *)NULL,
496 (u_offset_t)0, 0, 0, CRED(), NULL);
497 } else {
498 err = ENOSYS;
501 * set v_mpssdata just once per vnode life
502 * so that it never changes.
504 mutex_enter(&vp->v_lock);
505 if (vp->v_mpssdata == NULL) {
506 if (err == EINVAL) {
507 vp->v_mpssdata = SEGVN_PAGEIO;
508 } else {
509 vp->v_mpssdata = SEGVN_NOPAGEIO;
512 mutex_exit(&vp->v_lock);
517 segvn_create(struct seg *seg, void *argsp)
519 struct segvn_crargs *a = (struct segvn_crargs *)argsp;
520 struct segvn_data *svd;
521 size_t swresv = 0;
522 struct cred *cred;
523 struct anon_map *amp;
524 int error = 0;
525 size_t pgsz;
526 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT;
527 int use_rgn = 0;
528 int trok = 0;
530 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
532 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) {
533 panic("segvn_create type");
534 /*NOTREACHED*/
538 * Check arguments. If a shared anon structure is given then
539 * it is illegal to also specify a vp.
541 if (a->amp != NULL && a->vp != NULL) {
542 panic("segvn_create anon_map");
543 /*NOTREACHED*/
546 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) &&
547 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) &&
548 segvn_use_regions) {
549 use_rgn = 1;
552 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
553 if (a->type == MAP_SHARED)
554 a->flags &= ~MAP_NORESERVE;
556 if (a->szc != 0) {
557 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) ||
558 (a->amp != NULL && a->type == MAP_PRIVATE) ||
559 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) {
560 a->szc = 0;
561 } else {
562 if (a->szc > segvn_maxpgszc)
563 a->szc = segvn_maxpgszc;
564 pgsz = page_get_pagesize(a->szc);
565 if (!IS_P2ALIGNED(seg->s_base, pgsz) ||
566 !IS_P2ALIGNED(seg->s_size, pgsz)) {
567 a->szc = 0;
568 } else if (a->vp != NULL) {
569 extern struct vnode kvp;
570 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) {
572 * paranoid check.
573 * hat_page_demote() is not supported
574 * on swapfs pages.
576 a->szc = 0;
577 } else if (map_addr_vacalign_check(seg->s_base,
578 a->offset & PAGEMASK)) {
579 a->szc = 0;
581 } else if (a->amp != NULL) {
582 pgcnt_t anum = btopr(a->offset);
583 pgcnt_t pgcnt = page_get_pagecnt(a->szc);
584 if (!IS_P2ALIGNED(anum, pgcnt)) {
585 a->szc = 0;
592 * If segment may need private pages, reserve them now.
594 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) ||
595 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) {
596 if (anon_resv_zone(seg->s_size,
597 seg->s_as->a_proc->p_zone) == 0)
598 return (EAGAIN);
599 swresv = seg->s_size;
600 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
601 seg, swresv, 1);
605 * Reserve any mapping structures that may be required.
607 * Don't do it for segments that may use regions. It's currently a
608 * noop in the hat implementations anyway.
610 if (!use_rgn) {
611 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
614 if (a->cred) {
615 cred = a->cred;
616 crhold(cred);
617 } else {
618 crhold(cred = CRED());
621 /* Inform the vnode of the new mapping */
622 if (a->vp != NULL) {
623 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK,
624 seg->s_as, seg->s_base, seg->s_size, a->prot,
625 a->maxprot, a->type, cred, NULL);
626 if (error) {
627 if (swresv != 0) {
628 anon_unresv_zone(swresv,
629 seg->s_as->a_proc->p_zone);
630 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
631 "anon proc:%p %lu %u", seg, swresv, 0);
633 crfree(cred);
634 if (!use_rgn) {
635 hat_unload(seg->s_as->a_hat, seg->s_base,
636 seg->s_size, HAT_UNLOAD_UNMAP);
638 return (error);
641 * svntr_hashtab will be NULL if we support shared regions.
643 trok = ((a->flags & MAP_TEXT) &&
644 (seg->s_size > textrepl_size_thresh ||
645 (a->flags & _MAP_TEXTREPL)) &&
646 lgrp_optimizations() && svntr_hashtab != NULL &&
647 a->type == MAP_PRIVATE && swresv == 0 &&
648 !(a->flags & MAP_NORESERVE) &&
649 seg->s_as != &kas && a->vp->v_type == VREG);
651 ASSERT(!trok || !use_rgn);
655 * If more than one segment in the address space, and they're adjacent
656 * virtually, try to concatenate them. Don't concatenate if an
657 * explicit anon_map structure was supplied (e.g., SystemV shared
658 * memory) or if we'll use text replication for this segment.
660 if (a->amp == NULL && !use_rgn && !trok) {
661 struct seg *pseg, *nseg;
662 struct segvn_data *psvd, *nsvd;
663 lgrp_mem_policy_t ppolicy, npolicy;
664 uint_t lgrp_mem_policy_flags = 0;
665 extern lgrp_mem_policy_t lgrp_mem_default_policy;
668 * Memory policy flags (lgrp_mem_policy_flags) is valid when
669 * extending stack/heap segments.
671 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) &&
672 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) {
673 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags;
674 } else {
676 * Get policy when not extending it from another segment
678 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type);
682 * First, try to concatenate the previous and new segments
684 pseg = AS_SEGPREV(seg->s_as, seg);
685 if (pseg != NULL &&
686 pseg->s_base + pseg->s_size == seg->s_base &&
687 pseg->s_ops == &segvn_ops) {
689 * Get memory allocation policy from previous segment.
690 * When extension is specified (e.g. for heap) apply
691 * this policy to the new segment regardless of the
692 * outcome of segment concatenation. Extension occurs
693 * for non-default policy otherwise default policy is
694 * used and is based on extended segment size.
696 psvd = (struct segvn_data *)pseg->s_data;
697 ppolicy = psvd->policy_info.mem_policy;
698 if (lgrp_mem_policy_flags ==
699 LGRP_MP_FLAG_EXTEND_UP) {
700 if (ppolicy != lgrp_mem_default_policy) {
701 mpolicy = ppolicy;
702 } else {
703 mpolicy = lgrp_mem_policy_default(
704 pseg->s_size + seg->s_size,
705 a->type);
709 if (mpolicy == ppolicy &&
710 (pseg->s_size + seg->s_size <=
711 segvn_comb_thrshld || psvd->amp == NULL) &&
712 segvn_extend_prev(pseg, seg, a, swresv) == 0) {
714 * success! now try to concatenate
715 * with following seg
717 crfree(cred);
718 nseg = AS_SEGNEXT(pseg->s_as, pseg);
719 if (nseg != NULL &&
720 nseg != pseg &&
721 nseg->s_ops == &segvn_ops &&
722 pseg->s_base + pseg->s_size ==
723 nseg->s_base)
724 (void) segvn_concat(pseg, nseg, 0);
725 ASSERT(pseg->s_szc == 0 ||
726 (a->szc == pseg->s_szc &&
727 IS_P2ALIGNED(pseg->s_base, pgsz) &&
728 IS_P2ALIGNED(pseg->s_size, pgsz)));
729 return (0);
734 * Failed, so try to concatenate with following seg
736 nseg = AS_SEGNEXT(seg->s_as, seg);
737 if (nseg != NULL &&
738 seg->s_base + seg->s_size == nseg->s_base &&
739 nseg->s_ops == &segvn_ops) {
741 * Get memory allocation policy from next segment.
742 * When extension is specified (e.g. for stack) apply
743 * this policy to the new segment regardless of the
744 * outcome of segment concatenation. Extension occurs
745 * for non-default policy otherwise default policy is
746 * used and is based on extended segment size.
748 nsvd = (struct segvn_data *)nseg->s_data;
749 npolicy = nsvd->policy_info.mem_policy;
750 if (lgrp_mem_policy_flags ==
751 LGRP_MP_FLAG_EXTEND_DOWN) {
752 if (npolicy != lgrp_mem_default_policy) {
753 mpolicy = npolicy;
754 } else {
755 mpolicy = lgrp_mem_policy_default(
756 nseg->s_size + seg->s_size,
757 a->type);
761 if (mpolicy == npolicy &&
762 segvn_extend_next(seg, nseg, a, swresv) == 0) {
763 crfree(cred);
764 ASSERT(nseg->s_szc == 0 ||
765 (a->szc == nseg->s_szc &&
766 IS_P2ALIGNED(nseg->s_base, pgsz) &&
767 IS_P2ALIGNED(nseg->s_size, pgsz)));
768 return (0);
773 if (a->vp != NULL) {
774 VN_HOLD(a->vp);
775 if (a->type == MAP_SHARED)
776 lgrp_shm_policy_init(NULL, a->vp);
778 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
780 seg->s_ops = &segvn_ops;
781 seg->s_data = (void *)svd;
782 seg->s_szc = a->szc;
784 svd->seg = seg;
785 svd->vp = a->vp;
787 * Anonymous mappings have no backing file so the offset is meaningless.
789 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0;
790 svd->prot = a->prot;
791 svd->maxprot = a->maxprot;
792 svd->pageprot = 0;
793 svd->type = a->type;
794 svd->vpage = NULL;
795 svd->cred = cred;
796 svd->advice = MADV_NORMAL;
797 svd->pageadvice = 0;
798 svd->flags = (ushort_t)a->flags;
799 svd->softlockcnt = 0;
800 svd->softlockcnt_sbase = 0;
801 svd->softlockcnt_send = 0;
802 svd->rcookie = HAT_INVALID_REGION_COOKIE;
803 svd->pageswap = 0;
805 if (a->szc != 0 && a->vp != NULL) {
806 segvn_setvnode_mpss(a->vp);
808 if (svd->type == MAP_SHARED && svd->vp != NULL &&
809 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) {
810 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
811 segvn_inval_trcache(svd->vp);
814 amp = a->amp;
815 if ((svd->amp = amp) == NULL) {
816 svd->anon_index = 0;
817 if (svd->type == MAP_SHARED) {
818 svd->swresv = 0;
820 * Shared mappings to a vp need no other setup.
821 * If we have a shared mapping to an anon_map object
822 * which hasn't been allocated yet, allocate the
823 * struct now so that it will be properly shared
824 * by remembering the swap reservation there.
826 if (a->vp == NULL) {
827 svd->amp = anonmap_alloc(seg->s_size, swresv,
828 ANON_SLEEP);
829 svd->amp->a_szc = seg->s_szc;
831 } else {
833 * Private mapping (with or without a vp).
834 * Allocate anon_map when needed.
836 svd->swresv = swresv;
838 } else {
839 pgcnt_t anon_num;
842 * Mapping to an existing anon_map structure without a vp.
843 * For now we will insure that the segment size isn't larger
844 * than the size - offset gives us. Later on we may wish to
845 * have the anon array dynamically allocated itself so that
846 * we don't always have to allocate all the anon pointer slots.
847 * This of course involves adding extra code to check that we
848 * aren't trying to use an anon pointer slot beyond the end
849 * of the currently allocated anon array.
851 if ((amp->size - a->offset) < seg->s_size) {
852 panic("segvn_create anon_map size");
853 /*NOTREACHED*/
856 anon_num = btopr(a->offset);
858 if (a->type == MAP_SHARED) {
860 * SHARED mapping to a given anon_map.
862 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
863 amp->refcnt++;
864 if (a->szc > amp->a_szc) {
865 amp->a_szc = a->szc;
867 ANON_LOCK_EXIT(&amp->a_rwlock);
868 svd->anon_index = anon_num;
869 svd->swresv = 0;
870 } else {
872 * PRIVATE mapping to a given anon_map.
873 * Make sure that all the needed anon
874 * structures are created (so that we will
875 * share the underlying pages if nothing
876 * is written by this mapping) and then
877 * duplicate the anon array as is done
878 * when a privately mapped segment is dup'ed.
880 struct anon *ap;
881 caddr_t addr;
882 caddr_t eaddr;
883 ulong_t anon_idx;
884 int hat_flag = HAT_LOAD;
886 if (svd->flags & MAP_TEXT) {
887 hat_flag |= HAT_LOAD_TEXT;
890 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
891 svd->amp->a_szc = seg->s_szc;
892 svd->anon_index = 0;
893 svd->swresv = swresv;
896 * Prevent 2 threads from allocating anon
897 * slots simultaneously.
899 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
900 eaddr = seg->s_base + seg->s_size;
902 for (anon_idx = anon_num, addr = seg->s_base;
903 addr < eaddr; addr += PAGESIZE, anon_idx++) {
904 page_t *pp;
906 if ((ap = anon_get_ptr(amp->ahp,
907 anon_idx)) != NULL)
908 continue;
911 * Allocate the anon struct now.
912 * Might as well load up translation
913 * to the page while we're at it...
915 pp = anon_zero(seg, addr, &ap, cred);
916 if (ap == NULL || pp == NULL) {
917 panic("segvn_create anon_zero");
918 /*NOTREACHED*/
922 * Re-acquire the anon_map lock and
923 * initialize the anon array entry.
925 ASSERT(anon_get_ptr(amp->ahp,
926 anon_idx) == NULL);
927 (void) anon_set_ptr(amp->ahp, anon_idx, ap,
928 ANON_SLEEP);
930 ASSERT(seg->s_szc == 0);
931 ASSERT(!IS_VMODSORT(pp->p_vnode));
933 ASSERT(use_rgn == 0);
934 hat_memload(seg->s_as->a_hat, addr, pp,
935 svd->prot & ~PROT_WRITE, hat_flag);
937 page_unlock(pp);
939 ASSERT(seg->s_szc == 0);
940 anon_dup(amp->ahp, anon_num, svd->amp->ahp,
941 0, seg->s_size);
942 ANON_LOCK_EXIT(&amp->a_rwlock);
947 * Set default memory allocation policy for segment
949 * Always set policy for private memory at least for initialization
950 * even if this is a shared memory segment
952 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size);
954 if (svd->type == MAP_SHARED)
955 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index,
956 svd->vp, svd->offset, seg->s_size);
958 if (use_rgn) {
959 ASSERT(!trok);
960 ASSERT(svd->amp == NULL);
961 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base,
962 seg->s_size, (void *)svd->vp, svd->offset, svd->prot,
963 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback,
964 HAT_REGION_TEXT);
967 ASSERT(!trok || !(svd->prot & PROT_WRITE));
968 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF;
970 return (0);
974 * Concatenate two existing segments, if possible.
975 * Return 0 on success, -1 if two segments are not compatible
976 * or -2 on memory allocation failure.
977 * If amp_cat == 1 then try and concat segments with anon maps
979 static int
980 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat)
982 struct segvn_data *svd1 = seg1->s_data;
983 struct segvn_data *svd2 = seg2->s_data;
984 struct anon_map *amp1 = svd1->amp;
985 struct anon_map *amp2 = svd2->amp;
986 struct vpage *vpage1 = svd1->vpage;
987 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL;
988 size_t size, nvpsize;
989 pgcnt_t npages1, npages2;
991 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as);
992 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
993 ASSERT(seg1->s_ops == seg2->s_ops);
995 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) ||
996 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
997 return (-1);
1000 /* both segments exist, try to merge them */
1001 #define incompat(x) (svd1->x != svd2->x)
1002 if (incompat(vp) || incompat(maxprot) ||
1003 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) ||
1004 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) ||
1005 incompat(type) || incompat(cred) || incompat(flags) ||
1006 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) ||
1007 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0)
1008 return (-1);
1009 #undef incompat
1012 * vp == NULL implies zfod, offset doesn't matter
1014 if (svd1->vp != NULL &&
1015 svd1->offset + seg1->s_size != svd2->offset) {
1016 return (-1);
1020 * Don't concatenate if either segment uses text replication.
1022 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) {
1023 return (-1);
1027 * Fail early if we're not supposed to concatenate
1028 * segments with non NULL amp.
1030 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) {
1031 return (-1);
1034 if (svd1->vp == NULL && svd1->type == MAP_SHARED) {
1035 if (amp1 != amp2) {
1036 return (-1);
1038 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) !=
1039 svd2->anon_index) {
1040 return (-1);
1042 ASSERT(amp1 == NULL || amp1->refcnt >= 2);
1046 * If either seg has vpages, create a new merged vpage array.
1048 if (vpage1 != NULL || vpage2 != NULL) {
1049 struct vpage *vp, *evp;
1051 npages1 = seg_pages(seg1);
1052 npages2 = seg_pages(seg2);
1053 nvpsize = vpgtob(npages1 + npages2);
1055 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) {
1056 return (-2);
1059 if (vpage1 != NULL) {
1060 bcopy(vpage1, nvpage, vpgtob(npages1));
1061 } else {
1062 evp = nvpage + npages1;
1063 for (vp = nvpage; vp < evp; vp++) {
1064 VPP_SETPROT(vp, svd1->prot);
1065 VPP_SETADVICE(vp, svd1->advice);
1069 if (vpage2 != NULL) {
1070 bcopy(vpage2, nvpage + npages1, vpgtob(npages2));
1071 } else {
1072 evp = nvpage + npages1 + npages2;
1073 for (vp = nvpage + npages1; vp < evp; vp++) {
1074 VPP_SETPROT(vp, svd2->prot);
1075 VPP_SETADVICE(vp, svd2->advice);
1079 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) {
1080 ASSERT(svd1->swresv == seg1->s_size);
1081 ASSERT(!(svd1->flags & MAP_NORESERVE));
1082 ASSERT(!(svd2->flags & MAP_NORESERVE));
1083 evp = nvpage + npages1;
1084 for (vp = nvpage; vp < evp; vp++) {
1085 VPP_SETSWAPRES(vp);
1089 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) {
1090 ASSERT(svd2->swresv == seg2->s_size);
1091 ASSERT(!(svd1->flags & MAP_NORESERVE));
1092 ASSERT(!(svd2->flags & MAP_NORESERVE));
1093 vp = nvpage + npages1;
1094 evp = vp + npages2;
1095 for (; vp < evp; vp++) {
1096 VPP_SETSWAPRES(vp);
1100 ASSERT((vpage1 != NULL || vpage2 != NULL) ||
1101 (svd1->pageswap == 0 && svd2->pageswap == 0));
1104 * If either segment has private pages, create a new merged anon
1105 * array. If mergeing shared anon segments just decrement anon map's
1106 * refcnt.
1108 if (amp1 != NULL && svd1->type == MAP_SHARED) {
1109 ASSERT(amp1 == amp2 && svd1->vp == NULL);
1110 ANON_LOCK_ENTER(&amp1->a_rwlock, RW_WRITER);
1111 ASSERT(amp1->refcnt >= 2);
1112 amp1->refcnt--;
1113 ANON_LOCK_EXIT(&amp1->a_rwlock);
1114 svd2->amp = NULL;
1115 } else if (amp1 != NULL || amp2 != NULL) {
1116 struct anon_hdr *nahp;
1117 struct anon_map *namp = NULL;
1118 size_t asize;
1120 ASSERT(svd1->type == MAP_PRIVATE);
1122 asize = seg1->s_size + seg2->s_size;
1123 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) {
1124 if (nvpage != NULL) {
1125 kmem_free(nvpage, nvpsize);
1127 return (-2);
1129 if (amp1 != NULL) {
1131 * XXX anon rwlock is not really needed because
1132 * this is a private segment and we are writers.
1134 ANON_LOCK_ENTER(&amp1->a_rwlock, RW_WRITER);
1135 ASSERT(amp1->refcnt == 1);
1136 if (anon_copy_ptr(amp1->ahp, svd1->anon_index,
1137 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) {
1138 anon_release(nahp, btop(asize));
1139 ANON_LOCK_EXIT(&amp1->a_rwlock);
1140 if (nvpage != NULL) {
1141 kmem_free(nvpage, nvpsize);
1143 return (-2);
1146 if (amp2 != NULL) {
1147 ANON_LOCK_ENTER(&amp2->a_rwlock, RW_WRITER);
1148 ASSERT(amp2->refcnt == 1);
1149 if (anon_copy_ptr(amp2->ahp, svd2->anon_index,
1150 nahp, btop(seg1->s_size), btop(seg2->s_size),
1151 ANON_NOSLEEP)) {
1152 anon_release(nahp, btop(asize));
1153 ANON_LOCK_EXIT(&amp2->a_rwlock);
1154 if (amp1 != NULL) {
1155 ANON_LOCK_EXIT(&amp1->a_rwlock);
1157 if (nvpage != NULL) {
1158 kmem_free(nvpage, nvpsize);
1160 return (-2);
1163 if (amp1 != NULL) {
1164 namp = amp1;
1165 anon_release(amp1->ahp, btop(amp1->size));
1167 if (amp2 != NULL) {
1168 if (namp == NULL) {
1169 ASSERT(amp1 == NULL);
1170 namp = amp2;
1171 anon_release(amp2->ahp, btop(amp2->size));
1172 } else {
1173 amp2->refcnt--;
1174 ANON_LOCK_EXIT(&amp2->a_rwlock);
1175 anonmap_free(amp2);
1177 svd2->amp = NULL; /* needed for seg_free */
1179 namp->ahp = nahp;
1180 namp->size = asize;
1181 svd1->amp = namp;
1182 svd1->anon_index = 0;
1183 ANON_LOCK_EXIT(&namp->a_rwlock);
1186 * Now free the old vpage structures.
1188 if (nvpage != NULL) {
1189 if (vpage1 != NULL) {
1190 kmem_free(vpage1, vpgtob(npages1));
1192 if (vpage2 != NULL) {
1193 svd2->vpage = NULL;
1194 kmem_free(vpage2, vpgtob(npages2));
1196 if (svd2->pageprot) {
1197 svd1->pageprot = 1;
1199 if (svd2->pageadvice) {
1200 svd1->pageadvice = 1;
1202 if (svd2->pageswap) {
1203 svd1->pageswap = 1;
1205 svd1->vpage = nvpage;
1208 /* all looks ok, merge segments */
1209 svd1->swresv += svd2->swresv;
1210 svd2->swresv = 0; /* so seg_free doesn't release swap space */
1211 size = seg2->s_size;
1212 seg_free(seg2);
1213 seg1->s_size += size;
1214 return (0);
1218 * Extend the previous segment (seg1) to include the
1219 * new segment (seg2 + a), if possible.
1220 * Return 0 on success.
1222 static int
1223 segvn_extend_prev(seg1, seg2, a, swresv)
1224 struct seg *seg1, *seg2;
1225 struct segvn_crargs *a;
1226 size_t swresv;
1228 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data;
1229 size_t size;
1230 struct anon_map *amp1;
1231 struct vpage *new_vpage;
1234 * We don't need any segment level locks for "segvn" data
1235 * since the address space is "write" locked.
1237 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1239 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) {
1240 return (-1);
1243 /* second segment is new, try to extend first */
1244 /* XXX - should also check cred */
1245 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot ||
1246 (!svd1->pageprot && (svd1->prot != a->prot)) ||
1247 svd1->type != a->type || svd1->flags != a->flags ||
1248 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0)
1249 return (-1);
1251 /* vp == NULL implies zfod, offset doesn't matter */
1252 if (svd1->vp != NULL &&
1253 svd1->offset + seg1->s_size != (a->offset & PAGEMASK))
1254 return (-1);
1256 if (svd1->tr_state != SEGVN_TR_OFF) {
1257 return (-1);
1260 amp1 = svd1->amp;
1261 if (amp1) {
1262 pgcnt_t newpgs;
1265 * Segment has private pages, can data structures
1266 * be expanded?
1268 * Acquire the anon_map lock to prevent it from changing,
1269 * if it is shared. This ensures that the anon_map
1270 * will not change while a thread which has a read/write
1271 * lock on an address space references it.
1272 * XXX - Don't need the anon_map lock at all if "refcnt"
1273 * is 1.
1275 * Can't grow a MAP_SHARED segment with an anonmap because
1276 * there may be existing anon slots where we want to extend
1277 * the segment and we wouldn't know what to do with them
1278 * (e.g., for tmpfs right thing is to just leave them there,
1279 * for /dev/zero they should be cleared out).
1281 if (svd1->type == MAP_SHARED)
1282 return (-1);
1284 ANON_LOCK_ENTER(&amp1->a_rwlock, RW_WRITER);
1285 if (amp1->refcnt > 1) {
1286 ANON_LOCK_EXIT(&amp1->a_rwlock);
1287 return (-1);
1289 newpgs = anon_grow(amp1->ahp, &svd1->anon_index,
1290 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP);
1292 if (newpgs == 0) {
1293 ANON_LOCK_EXIT(&amp1->a_rwlock);
1294 return (-1);
1296 amp1->size = ptob(newpgs);
1297 ANON_LOCK_EXIT(&amp1->a_rwlock);
1299 if (svd1->vpage != NULL) {
1300 struct vpage *vp, *evp;
1301 new_vpage =
1302 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1303 KM_NOSLEEP);
1304 if (new_vpage == NULL)
1305 return (-1);
1306 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1)));
1307 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1)));
1308 svd1->vpage = new_vpage;
1310 vp = new_vpage + seg_pages(seg1);
1311 evp = vp + seg_pages(seg2);
1312 for (; vp < evp; vp++)
1313 VPP_SETPROT(vp, a->prot);
1314 if (svd1->pageswap && swresv) {
1315 ASSERT(!(svd1->flags & MAP_NORESERVE));
1316 ASSERT(swresv == seg2->s_size);
1317 vp = new_vpage + seg_pages(seg1);
1318 for (; vp < evp; vp++) {
1319 VPP_SETSWAPRES(vp);
1323 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0);
1324 size = seg2->s_size;
1325 seg_free(seg2);
1326 seg1->s_size += size;
1327 svd1->swresv += swresv;
1328 if (svd1->pageprot && (a->prot & PROT_WRITE) &&
1329 svd1->type == MAP_SHARED && svd1->vp != NULL &&
1330 (svd1->vp->v_flag & VVMEXEC)) {
1331 ASSERT(vn_is_mapped(svd1->vp, V_WRITE));
1332 segvn_inval_trcache(svd1->vp);
1334 return (0);
1338 * Extend the next segment (seg2) to include the
1339 * new segment (seg1 + a), if possible.
1340 * Return 0 on success.
1342 static int
1343 segvn_extend_next(
1344 struct seg *seg1,
1345 struct seg *seg2,
1346 struct segvn_crargs *a,
1347 size_t swresv)
1349 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data;
1350 size_t size;
1351 struct anon_map *amp2;
1352 struct vpage *new_vpage;
1355 * We don't need any segment level locks for "segvn" data
1356 * since the address space is "write" locked.
1358 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock));
1360 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1361 return (-1);
1364 /* first segment is new, try to extend second */
1365 /* XXX - should also check cred */
1366 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot ||
1367 (!svd2->pageprot && (svd2->prot != a->prot)) ||
1368 svd2->type != a->type || svd2->flags != a->flags ||
1369 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0)
1370 return (-1);
1371 /* vp == NULL implies zfod, offset doesn't matter */
1372 if (svd2->vp != NULL &&
1373 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset)
1374 return (-1);
1376 if (svd2->tr_state != SEGVN_TR_OFF) {
1377 return (-1);
1380 amp2 = svd2->amp;
1381 if (amp2) {
1382 pgcnt_t newpgs;
1385 * Segment has private pages, can data structures
1386 * be expanded?
1388 * Acquire the anon_map lock to prevent it from changing,
1389 * if it is shared. This ensures that the anon_map
1390 * will not change while a thread which has a read/write
1391 * lock on an address space references it.
1393 * XXX - Don't need the anon_map lock at all if "refcnt"
1394 * is 1.
1396 if (svd2->type == MAP_SHARED)
1397 return (-1);
1399 ANON_LOCK_ENTER(&amp2->a_rwlock, RW_WRITER);
1400 if (amp2->refcnt > 1) {
1401 ANON_LOCK_EXIT(&amp2->a_rwlock);
1402 return (-1);
1404 newpgs = anon_grow(amp2->ahp, &svd2->anon_index,
1405 btop(seg2->s_size), btop(seg1->s_size),
1406 ANON_NOSLEEP | ANON_GROWDOWN);
1408 if (newpgs == 0) {
1409 ANON_LOCK_EXIT(&amp2->a_rwlock);
1410 return (-1);
1412 amp2->size = ptob(newpgs);
1413 ANON_LOCK_EXIT(&amp2->a_rwlock);
1415 if (svd2->vpage != NULL) {
1416 struct vpage *vp, *evp;
1417 new_vpage =
1418 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1419 KM_NOSLEEP);
1420 if (new_vpage == NULL) {
1421 /* Not merging segments so adjust anon_index back */
1422 if (amp2)
1423 svd2->anon_index += seg_pages(seg1);
1424 return (-1);
1426 bcopy(svd2->vpage, new_vpage + seg_pages(seg1),
1427 vpgtob(seg_pages(seg2)));
1428 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2)));
1429 svd2->vpage = new_vpage;
1431 vp = new_vpage;
1432 evp = vp + seg_pages(seg1);
1433 for (; vp < evp; vp++)
1434 VPP_SETPROT(vp, a->prot);
1435 if (svd2->pageswap && swresv) {
1436 ASSERT(!(svd2->flags & MAP_NORESERVE));
1437 ASSERT(swresv == seg1->s_size);
1438 vp = new_vpage;
1439 for (; vp < evp; vp++) {
1440 VPP_SETSWAPRES(vp);
1444 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0);
1445 size = seg1->s_size;
1446 seg_free(seg1);
1447 seg2->s_size += size;
1448 seg2->s_base -= size;
1449 svd2->offset -= size;
1450 svd2->swresv += swresv;
1451 if (svd2->pageprot && (a->prot & PROT_WRITE) &&
1452 svd2->type == MAP_SHARED && svd2->vp != NULL &&
1453 (svd2->vp->v_flag & VVMEXEC)) {
1454 ASSERT(vn_is_mapped(svd2->vp, V_WRITE));
1455 segvn_inval_trcache(svd2->vp);
1457 return (0);
1460 static int
1461 segvn_dup(struct seg *seg, struct seg *newseg)
1463 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1464 struct segvn_data *newsvd;
1465 pgcnt_t npages = seg_pages(seg);
1466 int error = 0;
1467 uint_t prot;
1468 size_t len;
1469 struct anon_map *amp;
1471 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1472 ASSERT(newseg->s_as->a_proc->p_parent == curproc);
1475 * If segment has anon reserved, reserve more for the new seg.
1476 * For a MAP_NORESERVE segment swresv will be a count of all the
1477 * allocated anon slots; thus we reserve for the child as many slots
1478 * as the parent has allocated. This semantic prevents the child or
1479 * parent from dieing during a copy-on-write fault caused by trying
1480 * to write a shared pre-existing anon page.
1482 if ((len = svd->swresv) != 0) {
1483 if (anon_resv(svd->swresv) == 0)
1484 return (ENOMEM);
1486 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
1487 seg, len, 0);
1490 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
1492 newseg->s_ops = &segvn_ops;
1493 newseg->s_data = (void *)newsvd;
1494 newseg->s_szc = seg->s_szc;
1496 newsvd->seg = newseg;
1497 if ((newsvd->vp = svd->vp) != NULL) {
1498 VN_HOLD(svd->vp);
1499 if (svd->type == MAP_SHARED)
1500 lgrp_shm_policy_init(NULL, svd->vp);
1502 newsvd->offset = svd->offset;
1503 newsvd->prot = svd->prot;
1504 newsvd->maxprot = svd->maxprot;
1505 newsvd->pageprot = svd->pageprot;
1506 newsvd->type = svd->type;
1507 newsvd->cred = svd->cred;
1508 crhold(newsvd->cred);
1509 newsvd->advice = svd->advice;
1510 newsvd->pageadvice = svd->pageadvice;
1511 newsvd->swresv = svd->swresv;
1512 newsvd->pageswap = svd->pageswap;
1513 newsvd->flags = svd->flags;
1514 newsvd->softlockcnt = 0;
1515 newsvd->softlockcnt_sbase = 0;
1516 newsvd->softlockcnt_send = 0;
1517 newsvd->policy_info = svd->policy_info;
1518 newsvd->rcookie = HAT_INVALID_REGION_COOKIE;
1520 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) {
1522 * Not attaching to a shared anon object.
1524 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) ||
1525 svd->tr_state == SEGVN_TR_OFF);
1526 if (svd->tr_state == SEGVN_TR_ON) {
1527 ASSERT(newsvd->vp != NULL && amp != NULL);
1528 newsvd->tr_state = SEGVN_TR_INIT;
1529 } else {
1530 newsvd->tr_state = svd->tr_state;
1532 newsvd->amp = NULL;
1533 newsvd->anon_index = 0;
1534 } else {
1535 /* regions for now are only used on pure vnode segments */
1536 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
1537 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1538 newsvd->tr_state = SEGVN_TR_OFF;
1539 if (svd->type == MAP_SHARED) {
1540 newsvd->amp = amp;
1541 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1542 amp->refcnt++;
1543 ANON_LOCK_EXIT(&amp->a_rwlock);
1544 newsvd->anon_index = svd->anon_index;
1545 } else {
1546 int reclaim = 1;
1549 * Allocate and initialize new anon_map structure.
1551 newsvd->amp = anonmap_alloc(newseg->s_size, 0,
1552 ANON_SLEEP);
1553 newsvd->amp->a_szc = newseg->s_szc;
1554 newsvd->anon_index = 0;
1557 * We don't have to acquire the anon_map lock
1558 * for the new segment (since it belongs to an
1559 * address space that is still not associated
1560 * with any process), or the segment in the old
1561 * address space (since all threads in it
1562 * are stopped while duplicating the address space).
1566 * The goal of the following code is to make sure that
1567 * softlocked pages do not end up as copy on write
1568 * pages. This would cause problems where one
1569 * thread writes to a page that is COW and a different
1570 * thread in the same process has softlocked it. The
1571 * softlock lock would move away from this process
1572 * because the write would cause this process to get
1573 * a copy (without the softlock).
1575 * The strategy here is to just break the
1576 * sharing on pages that could possibly be
1577 * softlocked.
1579 retry:
1580 if (svd->softlockcnt) {
1581 struct anon *ap, *newap;
1582 size_t i;
1583 uint_t vpprot;
1584 page_t *anon_pl[1+1], *pp;
1585 caddr_t addr;
1586 ulong_t old_idx = svd->anon_index;
1587 ulong_t new_idx = 0;
1590 * The softlock count might be non zero
1591 * because some pages are still stuck in the
1592 * cache for lazy reclaim. Flush the cache
1593 * now. This should drop the count to zero.
1594 * [or there is really I/O going on to these
1595 * pages]. Note, we have the writers lock so
1596 * nothing gets inserted during the flush.
1598 if (reclaim == 1) {
1599 segvn_purge(seg);
1600 reclaim = 0;
1601 goto retry;
1603 i = btopr(seg->s_size);
1604 addr = seg->s_base;
1606 * XXX break cow sharing using PAGESIZE
1607 * pages. They will be relocated into larger
1608 * pages at fault time.
1610 while (i-- > 0) {
1611 if (ap = anon_get_ptr(amp->ahp,
1612 old_idx)) {
1613 error = anon_getpage(&ap,
1614 &vpprot, anon_pl, PAGESIZE,
1615 seg, addr, S_READ,
1616 svd->cred);
1617 if (error) {
1618 newsvd->vpage = NULL;
1619 goto out;
1622 * prot need not be computed
1623 * below 'cause anon_private is
1624 * going to ignore it anyway
1625 * as child doesn't inherit
1626 * pagelock from parent.
1628 prot = svd->pageprot ?
1629 VPP_PROT(
1630 &svd->vpage[
1631 seg_page(seg, addr)])
1632 : svd->prot;
1633 pp = anon_private(&newap,
1634 newseg, addr, prot,
1635 anon_pl[0], 0,
1636 newsvd->cred);
1637 if (pp == NULL) {
1638 /* no mem abort */
1639 newsvd->vpage = NULL;
1640 error = ENOMEM;
1641 goto out;
1643 (void) anon_set_ptr(
1644 newsvd->amp->ahp, new_idx,
1645 newap, ANON_SLEEP);
1646 page_unlock(pp);
1648 addr += PAGESIZE;
1649 old_idx++;
1650 new_idx++;
1652 } else { /* common case */
1653 if (seg->s_szc != 0) {
1655 * If at least one of anon slots of a
1656 * large page exists then make sure
1657 * all anon slots of a large page
1658 * exist to avoid partial cow sharing
1659 * of a large page in the future.
1661 anon_dup_fill_holes(amp->ahp,
1662 svd->anon_index, newsvd->amp->ahp,
1663 0, seg->s_size, seg->s_szc,
1664 svd->vp != NULL);
1665 } else {
1666 anon_dup(amp->ahp, svd->anon_index,
1667 newsvd->amp->ahp, 0, seg->s_size);
1670 hat_clrattr(seg->s_as->a_hat, seg->s_base,
1671 seg->s_size, PROT_WRITE);
1676 * If necessary, create a vpage structure for the new segment.
1677 * Do not copy any page lock indications.
1679 if (svd->vpage != NULL) {
1680 uint_t i;
1681 struct vpage *ovp = svd->vpage;
1682 struct vpage *nvp;
1684 nvp = newsvd->vpage =
1685 kmem_alloc(vpgtob(npages), KM_SLEEP);
1686 for (i = 0; i < npages; i++) {
1687 *nvp = *ovp++;
1688 VPP_CLRPPLOCK(nvp++);
1690 } else
1691 newsvd->vpage = NULL;
1693 /* Inform the vnode of the new mapping */
1694 if (newsvd->vp != NULL) {
1695 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset,
1696 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot,
1697 newsvd->maxprot, newsvd->type, newsvd->cred, NULL);
1699 out:
1700 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1701 ASSERT(newsvd->amp == NULL);
1702 ASSERT(newsvd->tr_state == SEGVN_TR_OFF);
1703 newsvd->rcookie = svd->rcookie;
1704 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie);
1706 return (error);
1711 * callback function to invoke free_vp_pages() for only those pages actually
1712 * processed by the HAT when a shared region is destroyed.
1714 extern int free_pages;
1716 static void
1717 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
1718 size_t r_size, void *r_obj, u_offset_t r_objoff)
1720 u_offset_t off;
1721 size_t len;
1722 vnode_t *vp = (vnode_t *)r_obj;
1724 ASSERT(eaddr > saddr);
1725 ASSERT(saddr >= r_saddr);
1726 ASSERT(saddr < r_saddr + r_size);
1727 ASSERT(eaddr > r_saddr);
1728 ASSERT(eaddr <= r_saddr + r_size);
1729 ASSERT(vp != NULL);
1731 if (!free_pages) {
1732 return;
1735 len = eaddr - saddr;
1736 off = (saddr - r_saddr) + r_objoff;
1737 free_vp_pages(vp, off, len);
1741 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1742 * those pages actually processed by the HAT
1744 static void
1745 segvn_hat_unload_callback(hat_callback_t *cb)
1747 struct seg *seg = cb->hcb_data;
1748 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1749 size_t len;
1750 u_offset_t off;
1752 ASSERT(svd->vp != NULL);
1753 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr);
1754 ASSERT(cb->hcb_start_addr >= seg->s_base);
1756 len = cb->hcb_end_addr - cb->hcb_start_addr;
1757 off = cb->hcb_start_addr - seg->s_base;
1758 free_vp_pages(svd->vp, svd->offset + off, len);
1762 * This function determines the number of bytes of swap reserved by
1763 * a segment for which per-page accounting is present. It is used to
1764 * calculate the correct value of a segvn_data's swresv.
1766 static size_t
1767 segvn_count_swap_by_vpages(struct seg *seg)
1769 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1770 struct vpage *vp, *evp;
1771 size_t nswappages = 0;
1773 ASSERT(svd->pageswap);
1774 ASSERT(svd->vpage != NULL);
1776 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
1778 for (vp = svd->vpage; vp < evp; vp++) {
1779 if (VPP_ISSWAPRES(vp))
1780 nswappages++;
1783 return (nswappages << PAGESHIFT);
1786 static int
1787 segvn_unmap(struct seg *seg, caddr_t addr, size_t len)
1789 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1790 struct segvn_data *nsvd;
1791 struct seg *nseg;
1792 struct anon_map *amp;
1793 pgcnt_t opages; /* old segment size in pages */
1794 pgcnt_t npages; /* new segment size in pages */
1795 pgcnt_t dpages; /* pages being deleted (unmapped) */
1796 hat_callback_t callback; /* used for free_vp_pages() */
1797 hat_callback_t *cbp = NULL;
1798 caddr_t nbase;
1799 size_t nsize;
1800 size_t oswresv;
1801 int reclaim = 1;
1804 * We don't need any segment level locks for "segvn" data
1805 * since the address space is "write" locked.
1807 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1810 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1811 * softlockcnt is protected from change by the as write lock.
1813 retry:
1814 if (svd->softlockcnt > 0) {
1815 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1818 * If this is shared segment non 0 softlockcnt
1819 * means locked pages are still in use.
1821 if (svd->type == MAP_SHARED) {
1822 return (EAGAIN);
1826 * since we do have the writers lock nobody can fill
1827 * the cache during the purge. The flush either succeeds
1828 * or we still have pending I/Os.
1830 if (reclaim == 1) {
1831 segvn_purge(seg);
1832 reclaim = 0;
1833 goto retry;
1835 return (EAGAIN);
1839 * Check for bad sizes
1841 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
1842 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
1843 panic("segvn_unmap");
1844 /*NOTREACHED*/
1847 if (seg->s_szc != 0) {
1848 size_t pgsz = page_get_pagesize(seg->s_szc);
1849 int err;
1850 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
1851 ASSERT(seg->s_base != addr || seg->s_size != len);
1852 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1853 ASSERT(svd->amp == NULL);
1854 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1855 hat_leave_region(seg->s_as->a_hat,
1856 svd->rcookie, HAT_REGION_TEXT);
1857 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1859 * could pass a flag to segvn_demote_range()
1860 * below to tell it not to do any unloads but
1861 * this case is rare enough to not bother for
1862 * now.
1864 } else if (svd->tr_state == SEGVN_TR_INIT) {
1865 svd->tr_state = SEGVN_TR_OFF;
1866 } else if (svd->tr_state == SEGVN_TR_ON) {
1867 ASSERT(svd->amp != NULL);
1868 segvn_textunrepl(seg, 1);
1869 ASSERT(svd->amp == NULL);
1870 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1872 VM_STAT_ADD(segvnvmstats.demoterange[0]);
1873 err = segvn_demote_range(seg, addr, len, SDR_END, 0);
1874 if (err == 0) {
1875 return (IE_RETRY);
1877 return (err);
1881 /* Inform the vnode of the unmapping. */
1882 if (svd->vp) {
1883 int error;
1885 error = VOP_DELMAP(svd->vp,
1886 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1887 seg->s_as, addr, len, svd->prot, svd->maxprot,
1888 svd->type, svd->cred, NULL);
1890 if (error == EAGAIN)
1891 return (error);
1895 * Remove any page locks set through this mapping.
1896 * If text replication is not off no page locks could have been
1897 * established via this mapping.
1899 if (svd->tr_state == SEGVN_TR_OFF) {
1900 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0);
1903 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1904 ASSERT(svd->amp == NULL);
1905 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1906 ASSERT(svd->type == MAP_PRIVATE);
1907 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
1908 HAT_REGION_TEXT);
1909 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1910 } else if (svd->tr_state == SEGVN_TR_ON) {
1911 ASSERT(svd->amp != NULL);
1912 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE));
1913 segvn_textunrepl(seg, 1);
1914 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
1915 } else {
1916 if (svd->tr_state != SEGVN_TR_OFF) {
1917 ASSERT(svd->tr_state == SEGVN_TR_INIT);
1918 svd->tr_state = SEGVN_TR_OFF;
1921 * Unload any hardware translations in the range to be taken
1922 * out. Use a callback to invoke free_vp_pages() effectively.
1924 if (svd->vp != NULL && free_pages != 0) {
1925 callback.hcb_data = seg;
1926 callback.hcb_function = segvn_hat_unload_callback;
1927 cbp = &callback;
1929 hat_unload_callback(seg->s_as->a_hat, addr, len,
1930 HAT_UNLOAD_UNMAP, cbp);
1932 if (svd->type == MAP_SHARED && svd->vp != NULL &&
1933 (svd->vp->v_flag & VVMEXEC) &&
1934 ((svd->prot & PROT_WRITE) || svd->pageprot)) {
1935 segvn_inval_trcache(svd->vp);
1940 * Check for entire segment
1942 if (addr == seg->s_base && len == seg->s_size) {
1943 seg_free(seg);
1944 return (0);
1947 opages = seg_pages(seg);
1948 dpages = btop(len);
1949 npages = opages - dpages;
1950 amp = svd->amp;
1951 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc);
1954 * Check for beginning of segment
1956 if (addr == seg->s_base) {
1957 if (svd->vpage != NULL) {
1958 size_t nbytes;
1959 struct vpage *ovpage;
1961 ovpage = svd->vpage; /* keep pointer to vpage */
1963 nbytes = vpgtob(npages);
1964 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
1965 bcopy(&ovpage[dpages], svd->vpage, nbytes);
1967 /* free up old vpage */
1968 kmem_free(ovpage, vpgtob(opages));
1970 if (amp != NULL) {
1971 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1972 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
1974 * Shared anon map is no longer in use. Before
1975 * freeing its pages purge all entries from
1976 * pcache that belong to this amp.
1978 if (svd->type == MAP_SHARED) {
1979 ASSERT(amp->refcnt == 1);
1980 ASSERT(svd->softlockcnt == 0);
1981 anonmap_purge(amp);
1984 * Free up now unused parts of anon_map array.
1986 if (amp->a_szc == seg->s_szc) {
1987 if (seg->s_szc != 0) {
1988 anon_free_pages(amp->ahp,
1989 svd->anon_index, len,
1990 seg->s_szc);
1991 } else {
1992 anon_free(amp->ahp,
1993 svd->anon_index,
1994 len);
1996 } else {
1997 ASSERT(svd->type == MAP_SHARED);
1998 ASSERT(amp->a_szc > seg->s_szc);
1999 anon_shmap_free_pages(amp,
2000 svd->anon_index, len);
2004 * Unreserve swap space for the
2005 * unmapped chunk of this segment in
2006 * case it's MAP_SHARED
2008 if (svd->type == MAP_SHARED) {
2009 anon_unresv_zone(len,
2010 seg->s_as->a_proc->p_zone);
2011 amp->swresv -= len;
2014 ANON_LOCK_EXIT(&amp->a_rwlock);
2015 svd->anon_index += dpages;
2017 if (svd->vp != NULL)
2018 svd->offset += len;
2020 seg->s_base += len;
2021 seg->s_size -= len;
2023 if (svd->swresv) {
2024 if (svd->flags & MAP_NORESERVE) {
2025 ASSERT(amp);
2026 oswresv = svd->swresv;
2028 svd->swresv = ptob(anon_pages(amp->ahp,
2029 svd->anon_index, npages));
2030 anon_unresv_zone(oswresv - svd->swresv,
2031 seg->s_as->a_proc->p_zone);
2032 if (SEG_IS_PARTIAL_RESV(seg))
2033 seg->s_as->a_resvsize -= oswresv -
2034 svd->swresv;
2035 } else {
2036 size_t unlen;
2038 if (svd->pageswap) {
2039 oswresv = svd->swresv;
2040 svd->swresv =
2041 segvn_count_swap_by_vpages(seg);
2042 ASSERT(oswresv >= svd->swresv);
2043 unlen = oswresv - svd->swresv;
2044 } else {
2045 svd->swresv -= len;
2046 ASSERT(svd->swresv == seg->s_size);
2047 unlen = len;
2049 anon_unresv_zone(unlen,
2050 seg->s_as->a_proc->p_zone);
2052 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2053 seg, len, 0);
2056 return (0);
2060 * Check for end of segment
2062 if (addr + len == seg->s_base + seg->s_size) {
2063 if (svd->vpage != NULL) {
2064 size_t nbytes;
2065 struct vpage *ovpage;
2067 ovpage = svd->vpage; /* keep pointer to vpage */
2069 nbytes = vpgtob(npages);
2070 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2071 bcopy(ovpage, svd->vpage, nbytes);
2073 /* free up old vpage */
2074 kmem_free(ovpage, vpgtob(opages));
2077 if (amp != NULL) {
2078 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2079 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2081 * Free up now unused parts of anon_map array.
2083 ulong_t an_idx = svd->anon_index + npages;
2086 * Shared anon map is no longer in use. Before
2087 * freeing its pages purge all entries from
2088 * pcache that belong to this amp.
2090 if (svd->type == MAP_SHARED) {
2091 ASSERT(amp->refcnt == 1);
2092 ASSERT(svd->softlockcnt == 0);
2093 anonmap_purge(amp);
2096 if (amp->a_szc == seg->s_szc) {
2097 if (seg->s_szc != 0) {
2098 anon_free_pages(amp->ahp,
2099 an_idx, len,
2100 seg->s_szc);
2101 } else {
2102 anon_free(amp->ahp, an_idx,
2103 len);
2105 } else {
2106 ASSERT(svd->type == MAP_SHARED);
2107 ASSERT(amp->a_szc > seg->s_szc);
2108 anon_shmap_free_pages(amp,
2109 an_idx, len);
2113 * Unreserve swap space for the
2114 * unmapped chunk of this segment in
2115 * case it's MAP_SHARED
2117 if (svd->type == MAP_SHARED) {
2118 anon_unresv_zone(len,
2119 seg->s_as->a_proc->p_zone);
2120 amp->swresv -= len;
2123 ANON_LOCK_EXIT(&amp->a_rwlock);
2126 seg->s_size -= len;
2128 if (svd->swresv) {
2129 if (svd->flags & MAP_NORESERVE) {
2130 ASSERT(amp);
2131 oswresv = svd->swresv;
2132 svd->swresv = ptob(anon_pages(amp->ahp,
2133 svd->anon_index, npages));
2134 anon_unresv_zone(oswresv - svd->swresv,
2135 seg->s_as->a_proc->p_zone);
2136 if (SEG_IS_PARTIAL_RESV(seg))
2137 seg->s_as->a_resvsize -= oswresv -
2138 svd->swresv;
2139 } else {
2140 size_t unlen;
2142 if (svd->pageswap) {
2143 oswresv = svd->swresv;
2144 svd->swresv =
2145 segvn_count_swap_by_vpages(seg);
2146 ASSERT(oswresv >= svd->swresv);
2147 unlen = oswresv - svd->swresv;
2148 } else {
2149 svd->swresv -= len;
2150 ASSERT(svd->swresv == seg->s_size);
2151 unlen = len;
2153 anon_unresv_zone(unlen,
2154 seg->s_as->a_proc->p_zone);
2156 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2157 "anon proc:%p %lu %u", seg, len, 0);
2160 return (0);
2164 * The section to go is in the middle of the segment,
2165 * have to make it into two segments. nseg is made for
2166 * the high end while seg is cut down at the low end.
2168 nbase = addr + len; /* new seg base */
2169 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
2170 seg->s_size = addr - seg->s_base; /* shrink old seg */
2171 nseg = seg_alloc(seg->s_as, nbase, nsize);
2172 if (nseg == NULL) {
2173 panic("segvn_unmap seg_alloc");
2174 /*NOTREACHED*/
2176 nseg->s_ops = seg->s_ops;
2177 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
2178 nseg->s_data = (void *)nsvd;
2179 nseg->s_szc = seg->s_szc;
2180 *nsvd = *svd;
2181 nsvd->seg = nseg;
2182 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base);
2183 nsvd->swresv = 0;
2184 nsvd->softlockcnt = 0;
2185 nsvd->softlockcnt_sbase = 0;
2186 nsvd->softlockcnt_send = 0;
2187 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
2189 if (svd->vp != NULL) {
2190 VN_HOLD(nsvd->vp);
2191 if (nsvd->type == MAP_SHARED)
2192 lgrp_shm_policy_init(NULL, nsvd->vp);
2194 crhold(svd->cred);
2196 if (svd->vpage == NULL) {
2197 nsvd->vpage = NULL;
2198 } else {
2199 /* need to split vpage into two arrays */
2200 size_t nbytes;
2201 struct vpage *ovpage;
2203 ovpage = svd->vpage; /* keep pointer to vpage */
2205 npages = seg_pages(seg); /* seg has shrunk */
2206 nbytes = vpgtob(npages);
2207 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2209 bcopy(ovpage, svd->vpage, nbytes);
2211 npages = seg_pages(nseg);
2212 nbytes = vpgtob(npages);
2213 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2215 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes);
2217 /* free up old vpage */
2218 kmem_free(ovpage, vpgtob(opages));
2221 if (amp == NULL) {
2222 nsvd->amp = NULL;
2223 nsvd->anon_index = 0;
2224 } else {
2226 * Need to create a new anon map for the new segment.
2227 * We'll also allocate a new smaller array for the old
2228 * smaller segment to save space.
2230 opages = btop((uintptr_t)(addr - seg->s_base));
2231 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2232 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2234 * Free up now unused parts of anon_map array.
2236 ulong_t an_idx = svd->anon_index + opages;
2239 * Shared anon map is no longer in use. Before
2240 * freeing its pages purge all entries from
2241 * pcache that belong to this amp.
2243 if (svd->type == MAP_SHARED) {
2244 ASSERT(amp->refcnt == 1);
2245 ASSERT(svd->softlockcnt == 0);
2246 anonmap_purge(amp);
2249 if (amp->a_szc == seg->s_szc) {
2250 if (seg->s_szc != 0) {
2251 anon_free_pages(amp->ahp, an_idx, len,
2252 seg->s_szc);
2253 } else {
2254 anon_free(amp->ahp, an_idx,
2255 len);
2257 } else {
2258 ASSERT(svd->type == MAP_SHARED);
2259 ASSERT(amp->a_szc > seg->s_szc);
2260 anon_shmap_free_pages(amp, an_idx, len);
2264 * Unreserve swap space for the
2265 * unmapped chunk of this segment in
2266 * case it's MAP_SHARED
2268 if (svd->type == MAP_SHARED) {
2269 anon_unresv_zone(len,
2270 seg->s_as->a_proc->p_zone);
2271 amp->swresv -= len;
2274 nsvd->anon_index = svd->anon_index +
2275 btop((uintptr_t)(nseg->s_base - seg->s_base));
2276 if (svd->type == MAP_SHARED) {
2277 amp->refcnt++;
2278 nsvd->amp = amp;
2279 } else {
2280 struct anon_map *namp;
2281 struct anon_hdr *nahp;
2283 ASSERT(svd->type == MAP_PRIVATE);
2284 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
2285 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
2286 namp->a_szc = seg->s_szc;
2287 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp,
2288 0, btop(seg->s_size), ANON_SLEEP);
2289 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index,
2290 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
2291 anon_release(amp->ahp, btop(amp->size));
2292 svd->anon_index = 0;
2293 nsvd->anon_index = 0;
2294 amp->ahp = nahp;
2295 amp->size = seg->s_size;
2296 nsvd->amp = namp;
2298 ANON_LOCK_EXIT(&amp->a_rwlock);
2300 if (svd->swresv) {
2301 if (svd->flags & MAP_NORESERVE) {
2302 ASSERT(amp);
2303 oswresv = svd->swresv;
2304 svd->swresv = ptob(anon_pages(amp->ahp,
2305 svd->anon_index, btop(seg->s_size)));
2306 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
2307 nsvd->anon_index, btop(nseg->s_size)));
2308 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2309 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv),
2310 seg->s_as->a_proc->p_zone);
2311 if (SEG_IS_PARTIAL_RESV(seg))
2312 seg->s_as->a_resvsize -= oswresv -
2313 (svd->swresv + nsvd->swresv);
2314 } else {
2315 size_t unlen;
2317 if (svd->pageswap) {
2318 oswresv = svd->swresv;
2319 svd->swresv = segvn_count_swap_by_vpages(seg);
2320 nsvd->swresv = segvn_count_swap_by_vpages(nseg);
2321 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2322 unlen = oswresv - (svd->swresv + nsvd->swresv);
2323 } else {
2324 if (seg->s_size + nseg->s_size + len !=
2325 svd->swresv) {
2326 panic("segvn_unmap: cannot split "
2327 "swap reservation");
2328 /*NOTREACHED*/
2330 svd->swresv = seg->s_size;
2331 nsvd->swresv = nseg->s_size;
2332 unlen = len;
2334 anon_unresv_zone(unlen,
2335 seg->s_as->a_proc->p_zone);
2337 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2338 seg, len, 0);
2341 return (0); /* I'm glad that's all over with! */
2344 static void
2345 segvn_free(struct seg *seg)
2347 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2348 pgcnt_t npages = seg_pages(seg);
2349 struct anon_map *amp;
2350 size_t len;
2353 * We don't need any segment level locks for "segvn" data
2354 * since the address space is "write" locked.
2356 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2357 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2359 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2362 * Be sure to unlock pages. XXX Why do things get free'ed instead
2363 * of unmapped? XXX
2365 (void) segvn_lockop(seg, seg->s_base, seg->s_size,
2366 0, MC_UNLOCK, NULL, 0);
2369 * Deallocate the vpage and anon pointers if necessary and possible.
2371 if (svd->vpage != NULL) {
2372 kmem_free(svd->vpage, vpgtob(npages));
2373 svd->vpage = NULL;
2375 if ((amp = svd->amp) != NULL) {
2377 * If there are no more references to this anon_map
2378 * structure, then deallocate the structure after freeing
2379 * up all the anon slot pointers that we can.
2381 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2382 ASSERT(amp->a_szc >= seg->s_szc);
2383 if (--amp->refcnt == 0) {
2384 if (svd->type == MAP_PRIVATE) {
2386 * Private - we only need to anon_free
2387 * the part that this segment refers to.
2389 if (seg->s_szc != 0) {
2390 anon_free_pages(amp->ahp,
2391 svd->anon_index, seg->s_size,
2392 seg->s_szc);
2393 } else {
2394 anon_free(amp->ahp, svd->anon_index,
2395 seg->s_size);
2397 } else {
2400 * Shared anon map is no longer in use. Before
2401 * freeing its pages purge all entries from
2402 * pcache that belong to this amp.
2404 ASSERT(svd->softlockcnt == 0);
2405 anonmap_purge(amp);
2408 * Shared - anon_free the entire
2409 * anon_map's worth of stuff and
2410 * release any swap reservation.
2412 if (amp->a_szc != 0) {
2413 anon_shmap_free_pages(amp, 0,
2414 amp->size);
2415 } else {
2416 anon_free(amp->ahp, 0, amp->size);
2418 if ((len = amp->swresv) != 0) {
2419 anon_unresv_zone(len,
2420 seg->s_as->a_proc->p_zone);
2421 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2422 "anon proc:%p %lu %u", seg, len, 0);
2425 svd->amp = NULL;
2426 ANON_LOCK_EXIT(&amp->a_rwlock);
2427 anonmap_free(amp);
2428 } else if (svd->type == MAP_PRIVATE) {
2430 * We had a private mapping which still has
2431 * a held anon_map so just free up all the
2432 * anon slot pointers that we were using.
2434 if (seg->s_szc != 0) {
2435 anon_free_pages(amp->ahp, svd->anon_index,
2436 seg->s_size, seg->s_szc);
2437 } else {
2438 anon_free(amp->ahp, svd->anon_index,
2439 seg->s_size);
2441 ANON_LOCK_EXIT(&amp->a_rwlock);
2442 } else {
2443 ANON_LOCK_EXIT(&amp->a_rwlock);
2448 * Release swap reservation.
2450 if ((len = svd->swresv) != 0) {
2451 anon_unresv_zone(svd->swresv,
2452 seg->s_as->a_proc->p_zone);
2453 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2454 seg, len, 0);
2455 if (SEG_IS_PARTIAL_RESV(seg))
2456 seg->s_as->a_resvsize -= svd->swresv;
2457 svd->swresv = 0;
2460 * Release claim on vnode, credentials, and finally free the
2461 * private data.
2463 if (svd->vp != NULL) {
2464 if (svd->type == MAP_SHARED)
2465 lgrp_shm_policy_fini(NULL, svd->vp);
2466 VN_RELE(svd->vp);
2467 svd->vp = NULL;
2469 crfree(svd->cred);
2470 svd->pageprot = 0;
2471 svd->pageadvice = 0;
2472 svd->pageswap = 0;
2473 svd->cred = NULL;
2476 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2477 * still working with this segment without holding as lock (in case
2478 * it's called by pcache async thread).
2480 ASSERT(svd->softlockcnt == 0);
2481 mutex_enter(&svd->segfree_syncmtx);
2482 mutex_exit(&svd->segfree_syncmtx);
2484 seg->s_data = NULL;
2485 kmem_cache_free(segvn_cache, svd);
2489 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2490 * already been F_SOFTLOCK'ed.
2491 * Caller must always match addr and len of a softunlock with a previous
2492 * softlock with exactly the same addr and len.
2494 static void
2495 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
2497 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2498 page_t *pp;
2499 caddr_t adr;
2500 struct vnode *vp;
2501 u_offset_t offset;
2502 ulong_t anon_index;
2503 struct anon_map *amp;
2504 struct anon *ap = NULL;
2506 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2507 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2509 if ((amp = svd->amp) != NULL)
2510 anon_index = svd->anon_index + seg_page(seg, addr);
2512 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2513 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2514 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2515 } else {
2516 hat_unlock(seg->s_as->a_hat, addr, len);
2518 for (adr = addr; adr < addr + len; adr += PAGESIZE) {
2519 if (amp != NULL) {
2520 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2521 if ((ap = anon_get_ptr(amp->ahp, anon_index++))
2522 != NULL) {
2523 swap_xlate(ap, &vp, &offset);
2524 } else {
2525 vp = svd->vp;
2526 offset = svd->offset +
2527 (uintptr_t)(adr - seg->s_base);
2529 ANON_LOCK_EXIT(&amp->a_rwlock);
2530 } else {
2531 vp = svd->vp;
2532 offset = svd->offset +
2533 (uintptr_t)(adr - seg->s_base);
2537 * Use page_find() instead of page_lookup() to
2538 * find the page since we know that it is locked.
2540 pp = page_find(vp, offset);
2541 if (pp == NULL) {
2542 panic(
2543 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2544 (void *)adr, (void *)ap, (void *)vp, offset);
2545 /*NOTREACHED*/
2548 if (rw == S_WRITE) {
2549 hat_setrefmod(pp);
2550 if (seg->s_as->a_vbits)
2551 hat_setstat(seg->s_as, adr, PAGESIZE,
2552 P_REF | P_MOD);
2553 } else if (rw != S_OTHER) {
2554 hat_setref(pp);
2555 if (seg->s_as->a_vbits)
2556 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF);
2558 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2559 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset);
2560 page_unlock(pp);
2562 ASSERT(svd->softlockcnt >= btop(len));
2563 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) {
2565 * All SOFTLOCKS are gone. Wakeup any waiting
2566 * unmappers so they can try again to unmap.
2567 * Check for waiters first without the mutex
2568 * held so we don't always grab the mutex on
2569 * softunlocks.
2571 if (AS_ISUNMAPWAIT(seg->s_as)) {
2572 mutex_enter(&seg->s_as->a_contents);
2573 if (AS_ISUNMAPWAIT(seg->s_as)) {
2574 AS_CLRUNMAPWAIT(seg->s_as);
2575 cv_broadcast(&seg->s_as->a_cv);
2577 mutex_exit(&seg->s_as->a_contents);
2582 #define PAGE_HANDLED ((page_t *)-1)
2585 * Release all the pages in the NULL terminated ppp list
2586 * which haven't already been converted to PAGE_HANDLED.
2588 static void
2589 segvn_pagelist_rele(page_t **ppp)
2591 for (; *ppp != NULL; ppp++) {
2592 if (*ppp != PAGE_HANDLED)
2593 page_unlock(*ppp);
2597 static int stealcow = 1;
2600 * Workaround for viking chip bug. See bug id 1220902.
2601 * To fix this down in pagefault() would require importing so
2602 * much as and segvn code as to be unmaintainable.
2604 int enable_mbit_wa = 0;
2607 * Handles all the dirty work of getting the right
2608 * anonymous pages and loading up the translations.
2609 * This routine is called only from segvn_fault()
2610 * when looping over the range of addresses requested.
2612 * The basic algorithm here is:
2613 * If this is an anon_zero case
2614 * Call anon_zero to allocate page
2615 * Load up translation
2616 * Return
2617 * endif
2618 * If this is an anon page
2619 * Use anon_getpage to get the page
2620 * else
2621 * Find page in pl[] list passed in
2622 * endif
2623 * If not a cow
2624 * Load up the translation to the page
2625 * return
2626 * endif
2627 * Call anon_private to handle cow
2628 * Load up (writable) translation to new page
2630 static faultcode_t
2631 segvn_faultpage(
2632 struct hat *hat, /* the hat to use for mapping */
2633 struct seg *seg, /* seg_vn of interest */
2634 caddr_t addr, /* address in as */
2635 u_offset_t off, /* offset in vp */
2636 struct vpage *vpage, /* pointer to vpage for vp, off */
2637 page_t *pl[], /* object source page pointer */
2638 uint_t vpprot, /* access allowed to object pages */
2639 enum fault_type type, /* type of fault */
2640 enum seg_rw rw, /* type of access at fault */
2641 int brkcow) /* we may need to break cow */
2643 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2644 page_t *pp, **ppp;
2645 uint_t pageflags = 0;
2646 page_t *anon_pl[1 + 1];
2647 page_t *opp = NULL; /* original page */
2648 uint_t prot;
2649 int err;
2650 int cow;
2651 int claim;
2652 int steal = 0;
2653 ulong_t anon_index;
2654 struct anon *ap, *oldap;
2655 struct anon_map *amp;
2656 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
2657 int anon_lock = 0;
2658 anon_sync_obj_t cookie;
2660 if (svd->flags & MAP_TEXT) {
2661 hat_flag |= HAT_LOAD_TEXT;
2664 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
2665 ASSERT(seg->s_szc == 0);
2666 ASSERT(svd->tr_state != SEGVN_TR_INIT);
2669 * Initialize protection value for this page.
2670 * If we have per page protection values check it now.
2672 if (svd->pageprot) {
2673 uint_t protchk;
2675 switch (rw) {
2676 case S_READ:
2677 protchk = PROT_READ;
2678 break;
2679 case S_WRITE:
2680 protchk = PROT_WRITE;
2681 break;
2682 case S_EXEC:
2683 protchk = PROT_EXEC;
2684 break;
2685 case S_OTHER:
2686 default:
2687 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2688 break;
2691 prot = VPP_PROT(vpage);
2692 if ((prot & protchk) == 0)
2693 return (FC_PROT); /* illegal access type */
2694 } else {
2695 prot = svd->prot;
2698 if (type == F_SOFTLOCK) {
2699 atomic_add_long((ulong_t *)&svd->softlockcnt, 1);
2703 * Always acquire the anon array lock to prevent 2 threads from
2704 * allocating separate anon slots for the same "addr".
2707 if ((amp = svd->amp) != NULL) {
2708 ASSERT(RW_READ_HELD(&amp->a_rwlock));
2709 anon_index = svd->anon_index + seg_page(seg, addr);
2710 anon_array_enter(amp, anon_index, &cookie);
2711 anon_lock = 1;
2714 if (svd->vp == NULL && amp != NULL) {
2715 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2717 * Allocate a (normally) writable anonymous page of
2718 * zeroes. If no advance reservations, reserve now.
2720 if (svd->flags & MAP_NORESERVE) {
2721 if (anon_resv_zone(ptob(1),
2722 seg->s_as->a_proc->p_zone)) {
2723 atomic_add_long(&svd->swresv, ptob(1));
2724 atomic_add_long(&seg->s_as->a_resvsize,
2725 ptob(1));
2726 } else {
2727 err = ENOMEM;
2728 goto out;
2731 if ((pp = anon_zero(seg, addr, &ap,
2732 svd->cred)) == NULL) {
2733 err = ENOMEM;
2734 goto out; /* out of swap space */
2737 * Re-acquire the anon_map lock and
2738 * initialize the anon array entry.
2740 (void) anon_set_ptr(amp->ahp, anon_index, ap,
2741 ANON_SLEEP);
2743 ASSERT(pp->p_szc == 0);
2746 * Handle pages that have been marked for migration
2748 if (lgrp_optimizations())
2749 page_migrate(seg, addr, &pp, 1);
2751 if (enable_mbit_wa) {
2752 if (rw == S_WRITE)
2753 hat_setmod(pp);
2754 else if (!hat_ismod(pp))
2755 prot &= ~PROT_WRITE;
2758 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2759 * with MC_LOCKAS, MCL_FUTURE) and this is a
2760 * MAP_NORESERVE segment, we may need to
2761 * permanently lock the page as it is being faulted
2762 * for the first time. The following text applies
2763 * only to MAP_NORESERVE segments:
2765 * As per memcntl(2), if this segment was created
2766 * after MCL_FUTURE was applied (a "future"
2767 * segment), its pages must be locked. If this
2768 * segment existed at MCL_FUTURE application (a
2769 * "past" segment), the interface is unclear.
2771 * We decide to lock only if vpage is present:
2773 * - "future" segments will have a vpage array (see
2774 * as_map), and so will be locked as required
2776 * - "past" segments may not have a vpage array,
2777 * depending on whether events (such as
2778 * mprotect) have occurred. Locking if vpage
2779 * exists will preserve legacy behavior. Not
2780 * locking if vpage is absent, will not break
2781 * the interface or legacy behavior. Note that
2782 * allocating vpage here if it's absent requires
2783 * upgrading the segvn reader lock, the cost of
2784 * which does not seem worthwhile.
2786 * Usually testing and setting VPP_ISPPLOCK and
2787 * VPP_SETPPLOCK requires holding the segvn lock as
2788 * writer, but in this case all readers are
2789 * serializing on the anon array lock.
2791 if (AS_ISPGLCK(seg->s_as) && vpage != NULL &&
2792 (svd->flags & MAP_NORESERVE) &&
2793 !VPP_ISPPLOCK(vpage)) {
2794 proc_t *p = seg->s_as->a_proc;
2795 ASSERT(svd->type == MAP_PRIVATE);
2796 mutex_enter(&p->p_lock);
2797 if (rctl_incr_locked_mem(p, NULL, PAGESIZE,
2798 1) == 0) {
2799 claim = VPP_PROT(vpage) & PROT_WRITE;
2800 if (page_pp_lock(pp, claim, 0)) {
2801 VPP_SETPPLOCK(vpage);
2802 } else {
2803 rctl_decr_locked_mem(p, NULL,
2804 PAGESIZE, 1);
2807 mutex_exit(&p->p_lock);
2810 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2811 hat_memload(hat, addr, pp, prot, hat_flag);
2813 if (!(hat_flag & HAT_LOAD_LOCK))
2814 page_unlock(pp);
2816 anon_array_exit(&cookie);
2817 return (0);
2822 * Obtain the page structure via anon_getpage() if it is
2823 * a private copy of an object (the result of a previous
2824 * copy-on-write).
2826 if (amp != NULL) {
2827 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) {
2828 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE,
2829 seg, addr, rw, svd->cred);
2830 if (err)
2831 goto out;
2833 if (svd->type == MAP_SHARED) {
2835 * If this is a shared mapping to an
2836 * anon_map, then ignore the write
2837 * permissions returned by anon_getpage().
2838 * They apply to the private mappings
2839 * of this anon_map.
2841 vpprot |= PROT_WRITE;
2843 opp = anon_pl[0];
2848 * Search the pl[] list passed in if it is from the
2849 * original object (i.e., not a private copy).
2851 if (opp == NULL) {
2853 * Find original page. We must be bringing it in
2854 * from the list in pl[].
2856 for (ppp = pl; (opp = *ppp) != NULL; ppp++) {
2857 if (opp == PAGE_HANDLED)
2858 continue;
2859 ASSERT(opp->p_vnode == svd->vp); /* XXX */
2860 if (opp->p_offset == off)
2861 break;
2863 if (opp == NULL) {
2864 panic("segvn_faultpage not found");
2865 /*NOTREACHED*/
2867 *ppp = PAGE_HANDLED;
2871 ASSERT(PAGE_LOCKED(opp));
2873 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2874 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0);
2877 * The fault is treated as a copy-on-write fault if a
2878 * write occurs on a private segment and the object
2879 * page (i.e., mapping) is write protected. We assume
2880 * that fatal protection checks have already been made.
2883 if (brkcow) {
2884 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2885 cow = !(vpprot & PROT_WRITE);
2886 } else if (svd->tr_state == SEGVN_TR_ON) {
2888 * If we are doing text replication COW on first touch.
2890 ASSERT(amp != NULL);
2891 ASSERT(svd->vp != NULL);
2892 ASSERT(rw != S_WRITE);
2893 cow = (ap == NULL);
2894 } else {
2895 cow = 0;
2899 * If not a copy-on-write case load the translation
2900 * and return.
2902 if (cow == 0) {
2905 * Handle pages that have been marked for migration
2907 if (lgrp_optimizations())
2908 page_migrate(seg, addr, &opp, 1);
2910 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) {
2911 if (rw == S_WRITE)
2912 hat_setmod(opp);
2913 else if (rw != S_OTHER && !hat_ismod(opp))
2914 prot &= ~PROT_WRITE;
2917 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
2918 (!svd->pageprot && svd->prot == (prot & vpprot)));
2919 ASSERT(amp == NULL ||
2920 svd->rcookie == HAT_INVALID_REGION_COOKIE);
2921 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag,
2922 svd->rcookie);
2924 if (!(hat_flag & HAT_LOAD_LOCK))
2925 page_unlock(opp);
2927 if (anon_lock) {
2928 anon_array_exit(&cookie);
2930 return (0);
2933 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2935 hat_setref(opp);
2937 ASSERT(amp != NULL && anon_lock);
2940 * Steal the page only if it isn't a private page
2941 * since stealing a private page is not worth the effort.
2943 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL)
2944 steal = 1;
2947 * Steal the original page if the following conditions are true:
2949 * We are low on memory, the page is not private, page is not large,
2950 * not shared, not modified, not `locked' or if we have it `locked'
2951 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
2952 * that the page is not shared) and if it doesn't have any
2953 * translations. page_struct_lock isn't needed to look at p_cowcnt
2954 * and p_lckcnt because we first get exclusive lock on page.
2956 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD);
2958 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 &&
2959 page_tryupgrade(opp) && !hat_ismod(opp) &&
2960 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) ||
2961 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 &&
2962 vpage != NULL && VPP_ISPPLOCK(vpage)))) {
2964 * Check if this page has other translations
2965 * after unloading our translation.
2967 if (hat_page_is_mapped(opp)) {
2968 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2969 hat_unload(seg->s_as->a_hat, addr, PAGESIZE,
2970 HAT_UNLOAD);
2974 * hat_unload() might sync back someone else's recent
2975 * modification, so check again.
2977 if (!hat_ismod(opp) && !hat_page_is_mapped(opp))
2978 pageflags |= STEAL_PAGE;
2982 * If we have a vpage pointer, see if it indicates that we have
2983 * ``locked'' the page we map -- if so, tell anon_private to
2984 * transfer the locking resource to the new page.
2986 * See Statement at the beginning of segvn_lockop regarding
2987 * the way lockcnts/cowcnts are handled during COW.
2990 if (vpage != NULL && VPP_ISPPLOCK(vpage))
2991 pageflags |= LOCK_PAGE;
2994 * Allocate a private page and perform the copy.
2995 * For MAP_NORESERVE reserve swap space now, unless this
2996 * is a cow fault on an existing anon page in which case
2997 * MAP_NORESERVE will have made advance reservations.
2999 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) {
3000 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) {
3001 atomic_add_long(&svd->swresv, ptob(1));
3002 atomic_add_long(&seg->s_as->a_resvsize, ptob(1));
3003 } else {
3004 page_unlock(opp);
3005 err = ENOMEM;
3006 goto out;
3009 oldap = ap;
3010 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3011 if (pp == NULL) {
3012 err = ENOMEM; /* out of swap space */
3013 goto out;
3017 * If we copied away from an anonymous page, then
3018 * we are one step closer to freeing up an anon slot.
3020 * NOTE: The original anon slot must be released while
3021 * holding the "anon_map" lock. This is necessary to prevent
3022 * other threads from obtaining a pointer to the anon slot
3023 * which may be freed if its "refcnt" is 1.
3025 if (oldap != NULL)
3026 anon_decref(oldap);
3028 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3031 * Handle pages that have been marked for migration
3033 if (lgrp_optimizations())
3034 page_migrate(seg, addr, &pp, 1);
3036 ASSERT(pp->p_szc == 0);
3038 ASSERT(!IS_VMODSORT(pp->p_vnode));
3039 if (enable_mbit_wa) {
3040 if (rw == S_WRITE)
3041 hat_setmod(pp);
3042 else if (!hat_ismod(pp))
3043 prot &= ~PROT_WRITE;
3046 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3047 hat_memload(hat, addr, pp, prot, hat_flag);
3049 if (!(hat_flag & HAT_LOAD_LOCK))
3050 page_unlock(pp);
3052 ASSERT(anon_lock);
3053 anon_array_exit(&cookie);
3054 return (0);
3055 out:
3056 if (anon_lock)
3057 anon_array_exit(&cookie);
3059 if (type == F_SOFTLOCK) {
3060 atomic_add_long((ulong_t *)&svd->softlockcnt, -1);
3062 return (FC_MAKE_ERR(err));
3066 * relocate a bunch of smaller targ pages into one large repl page. all targ
3067 * pages must be complete pages smaller than replacement pages.
3068 * it's assumed that no page's szc can change since they are all PAGESIZE or
3069 * complete large pages locked SHARED.
3071 static void
3072 segvn_relocate_pages(page_t **targ, page_t *replacement)
3074 page_t *pp;
3075 pgcnt_t repl_npgs, curnpgs;
3076 pgcnt_t i;
3077 uint_t repl_szc = replacement->p_szc;
3078 page_t *first_repl = replacement;
3079 page_t *repl;
3080 spgcnt_t npgs;
3082 VM_STAT_ADD(segvnvmstats.relocatepages[0]);
3084 ASSERT(repl_szc != 0);
3085 npgs = repl_npgs = page_get_pagecnt(repl_szc);
3087 i = 0;
3088 while (repl_npgs) {
3089 spgcnt_t nreloc;
3090 int err;
3091 ASSERT(replacement != NULL);
3092 pp = targ[i];
3093 ASSERT(pp->p_szc < repl_szc);
3094 ASSERT(PAGE_EXCL(pp));
3095 ASSERT(!PP_ISFREE(pp));
3096 curnpgs = page_get_pagecnt(pp->p_szc);
3097 if (curnpgs == 1) {
3098 VM_STAT_ADD(segvnvmstats.relocatepages[1]);
3099 repl = replacement;
3100 page_sub(&replacement, repl);
3101 ASSERT(PAGE_EXCL(repl));
3102 ASSERT(!PP_ISFREE(repl));
3103 ASSERT(repl->p_szc == repl_szc);
3104 } else {
3105 page_t *repl_savepp;
3106 int j;
3107 VM_STAT_ADD(segvnvmstats.relocatepages[2]);
3108 repl_savepp = replacement;
3109 for (j = 0; j < curnpgs; j++) {
3110 repl = replacement;
3111 page_sub(&replacement, repl);
3112 ASSERT(PAGE_EXCL(repl));
3113 ASSERT(!PP_ISFREE(repl));
3114 ASSERT(repl->p_szc == repl_szc);
3115 ASSERT(page_pptonum(targ[i + j]) ==
3116 page_pptonum(targ[i]) + j);
3118 repl = repl_savepp;
3119 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs));
3121 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL);
3122 if (err || nreloc != curnpgs) {
3123 panic("segvn_relocate_pages: "
3124 "page_relocate failed err=%d curnpgs=%ld "
3125 "nreloc=%ld", err, curnpgs, nreloc);
3127 ASSERT(curnpgs <= repl_npgs);
3128 repl_npgs -= curnpgs;
3129 i += curnpgs;
3131 ASSERT(replacement == NULL);
3133 repl = first_repl;
3134 repl_npgs = npgs;
3135 for (i = 0; i < repl_npgs; i++) {
3136 ASSERT(PAGE_EXCL(repl));
3137 ASSERT(!PP_ISFREE(repl));
3138 targ[i] = repl;
3139 page_downgrade(targ[i]);
3140 repl++;
3145 * Check if all pages in ppa array are complete smaller than szc pages and
3146 * their roots will still be aligned relative to their current size if the
3147 * entire ppa array is relocated into one szc page. If these conditions are
3148 * not met return 0.
3150 * If all pages are properly aligned attempt to upgrade their locks
3151 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3152 * upgrdfail was set to 0 by caller.
3154 * Return 1 if all pages are aligned and locked exclusively.
3156 * If all pages in ppa array happen to be physically contiguous to make one
3157 * szc page and all exclusive locks are successfully obtained promote the page
3158 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3160 static int
3161 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)
3163 page_t *pp;
3164 pfn_t pfn;
3165 pgcnt_t totnpgs = page_get_pagecnt(szc);
3166 pfn_t first_pfn;
3167 int contig = 1;
3168 pgcnt_t i;
3169 pgcnt_t j;
3170 uint_t curszc;
3171 pgcnt_t curnpgs;
3172 int root = 0;
3174 ASSERT(szc > 0);
3176 VM_STAT_ADD(segvnvmstats.fullszcpages[0]);
3178 for (i = 0; i < totnpgs; i++) {
3179 pp = ppa[i];
3180 ASSERT(PAGE_SHARED(pp));
3181 ASSERT(!PP_ISFREE(pp));
3182 pfn = page_pptonum(pp);
3183 if (i == 0) {
3184 if (!IS_P2ALIGNED(pfn, totnpgs)) {
3185 contig = 0;
3186 } else {
3187 first_pfn = pfn;
3189 } else if (contig && pfn != first_pfn + i) {
3190 contig = 0;
3192 if (pp->p_szc == 0) {
3193 if (root) {
3194 VM_STAT_ADD(segvnvmstats.fullszcpages[1]);
3195 return (0);
3197 } else if (!root) {
3198 if ((curszc = pp->p_szc) >= szc) {
3199 VM_STAT_ADD(segvnvmstats.fullszcpages[2]);
3200 return (0);
3202 if (curszc == 0) {
3204 * p_szc changed means we don't have all pages
3205 * locked. return failure.
3207 VM_STAT_ADD(segvnvmstats.fullszcpages[3]);
3208 return (0);
3210 curnpgs = page_get_pagecnt(curszc);
3211 if (!IS_P2ALIGNED(pfn, curnpgs) ||
3212 !IS_P2ALIGNED(i, curnpgs)) {
3213 VM_STAT_ADD(segvnvmstats.fullszcpages[4]);
3214 return (0);
3216 root = 1;
3217 } else {
3218 ASSERT(i > 0);
3219 VM_STAT_ADD(segvnvmstats.fullszcpages[5]);
3220 if (pp->p_szc != curszc) {
3221 VM_STAT_ADD(segvnvmstats.fullszcpages[6]);
3222 return (0);
3224 if (pfn - 1 != page_pptonum(ppa[i - 1])) {
3225 panic("segvn_full_szcpages: "
3226 "large page not physically contiguous");
3228 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) {
3229 root = 0;
3234 for (i = 0; i < totnpgs; i++) {
3235 ASSERT(ppa[i]->p_szc < szc);
3236 if (!page_tryupgrade(ppa[i])) {
3237 for (j = 0; j < i; j++) {
3238 page_downgrade(ppa[j]);
3240 *pszc = ppa[i]->p_szc;
3241 *upgrdfail = 1;
3242 VM_STAT_ADD(segvnvmstats.fullszcpages[7]);
3243 return (0);
3248 * When a page is put a free cachelist its szc is set to 0. if file
3249 * system reclaimed pages from cachelist targ pages will be physically
3250 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3251 * pages without any relocations.
3252 * To avoid any hat issues with previous small mappings
3253 * hat_pageunload() the target pages first.
3255 if (contig) {
3256 VM_STAT_ADD(segvnvmstats.fullszcpages[8]);
3257 for (i = 0; i < totnpgs; i++) {
3258 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD);
3260 for (i = 0; i < totnpgs; i++) {
3261 ppa[i]->p_szc = szc;
3263 for (i = 0; i < totnpgs; i++) {
3264 ASSERT(PAGE_EXCL(ppa[i]));
3265 page_downgrade(ppa[i]);
3267 if (pszc != NULL) {
3268 *pszc = szc;
3271 VM_STAT_ADD(segvnvmstats.fullszcpages[9]);
3272 return (1);
3276 * Create physically contiguous pages for [vp, off] - [vp, off +
3277 * page_size(szc)) range and for private segment return them in ppa array.
3278 * Pages are created either via IO or relocations.
3280 * Return 1 on success and 0 on failure.
3282 * If physically contiguous pages already exist for this range return 1 without
3283 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3284 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3287 static int
3288 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
3289 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc,
3290 int *downsize)
3293 page_t *pplist = *ppplist;
3294 size_t pgsz = page_get_pagesize(szc);
3295 pgcnt_t pages = btop(pgsz);
3296 ulong_t start_off = off;
3297 u_offset_t eoff = off + pgsz;
3298 spgcnt_t nreloc;
3299 u_offset_t io_off = off;
3300 size_t io_len;
3301 page_t *io_pplist = NULL;
3302 page_t *done_pplist = NULL;
3303 pgcnt_t pgidx = 0;
3304 page_t *pp;
3305 page_t *newpp;
3306 page_t *targpp;
3307 int io_err = 0;
3308 int i;
3309 pfn_t pfn;
3310 ulong_t ppages;
3311 page_t *targ_pplist = NULL;
3312 page_t *repl_pplist = NULL;
3313 page_t *tmp_pplist;
3314 int nios = 0;
3315 uint_t pszc;
3316 struct vattr va;
3318 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]);
3320 ASSERT(szc != 0);
3321 ASSERT(pplist->p_szc == szc);
3324 * downsize will be set to 1 only if we fail to lock pages. this will
3325 * allow subsequent faults to try to relocate the page again. If we
3326 * fail due to misalignment don't downsize and let the caller map the
3327 * whole region with small mappings to avoid more faults into the area
3328 * where we can't get large pages anyway.
3330 *downsize = 0;
3332 while (off < eoff) {
3333 newpp = pplist;
3334 ASSERT(newpp != NULL);
3335 ASSERT(PAGE_EXCL(newpp));
3336 ASSERT(!PP_ISFREE(newpp));
3338 * we pass NULL for nrelocp to page_lookup_create()
3339 * so that it doesn't relocate. We relocate here
3340 * later only after we make sure we can lock all
3341 * pages in the range we handle and they are all
3342 * aligned.
3344 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0);
3345 ASSERT(pp != NULL);
3346 ASSERT(!PP_ISFREE(pp));
3347 ASSERT(pp->p_vnode == vp);
3348 ASSERT(pp->p_offset == off);
3349 if (pp == newpp) {
3350 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]);
3351 page_sub(&pplist, pp);
3352 ASSERT(PAGE_EXCL(pp));
3353 ASSERT(page_iolock_assert(pp));
3354 page_list_concat(&io_pplist, &pp);
3355 off += PAGESIZE;
3356 continue;
3358 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]);
3359 pfn = page_pptonum(pp);
3360 pszc = pp->p_szc;
3361 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL &&
3362 IS_P2ALIGNED(pfn, pages)) {
3363 ASSERT(repl_pplist == NULL);
3364 ASSERT(done_pplist == NULL);
3365 ASSERT(pplist == *ppplist);
3366 page_unlock(pp);
3367 page_free_replacement_page(pplist);
3368 page_create_putback(pages);
3369 *ppplist = NULL;
3370 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]);
3371 return (1);
3373 if (pszc >= szc) {
3374 page_unlock(pp);
3375 segvn_faultvnmpss_align_err1++;
3376 goto out;
3378 ppages = page_get_pagecnt(pszc);
3379 if (!IS_P2ALIGNED(pfn, ppages)) {
3380 ASSERT(pszc > 0);
3382 * sizing down to pszc won't help.
3384 page_unlock(pp);
3385 segvn_faultvnmpss_align_err2++;
3386 goto out;
3388 pfn = page_pptonum(newpp);
3389 if (!IS_P2ALIGNED(pfn, ppages)) {
3390 ASSERT(pszc > 0);
3392 * sizing down to pszc won't help.
3394 page_unlock(pp);
3395 segvn_faultvnmpss_align_err3++;
3396 goto out;
3398 if (!PAGE_EXCL(pp)) {
3399 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]);
3400 page_unlock(pp);
3401 *downsize = 1;
3402 *ret_pszc = pp->p_szc;
3403 goto out;
3405 targpp = pp;
3406 if (io_pplist != NULL) {
3407 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]);
3408 io_len = off - io_off;
3410 * Some file systems like NFS don't check EOF
3411 * conditions in VOP_PAGEIO(). Check it here
3412 * now that pages are locked SE_EXCL. Any file
3413 * truncation will wait until the pages are
3414 * unlocked so no need to worry that file will
3415 * be truncated after we check its size here.
3416 * XXX fix NFS to remove this check.
3418 va.va_mask = AT_SIZE;
3419 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) {
3420 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]);
3421 page_unlock(targpp);
3422 goto out;
3424 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3425 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]);
3426 *downsize = 1;
3427 *ret_pszc = 0;
3428 page_unlock(targpp);
3429 goto out;
3431 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3432 B_READ, svd->cred, NULL);
3433 if (io_err) {
3434 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]);
3435 page_unlock(targpp);
3436 if (io_err == EDEADLK) {
3437 segvn_vmpss_pageio_deadlk_err++;
3439 goto out;
3441 nios++;
3442 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]);
3443 while (io_pplist != NULL) {
3444 pp = io_pplist;
3445 page_sub(&io_pplist, pp);
3446 ASSERT(page_iolock_assert(pp));
3447 page_io_unlock(pp);
3448 pgidx = (pp->p_offset - start_off) >>
3449 PAGESHIFT;
3450 ASSERT(pgidx < pages);
3451 ppa[pgidx] = pp;
3452 page_list_concat(&done_pplist, &pp);
3455 pp = targpp;
3456 ASSERT(PAGE_EXCL(pp));
3457 ASSERT(pp->p_szc <= pszc);
3458 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) {
3459 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]);
3460 page_unlock(pp);
3461 *downsize = 1;
3462 *ret_pszc = pp->p_szc;
3463 goto out;
3465 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]);
3467 * page szc chould have changed before the entire group was
3468 * locked. reread page szc.
3470 pszc = pp->p_szc;
3471 ppages = page_get_pagecnt(pszc);
3473 /* link just the roots */
3474 page_list_concat(&targ_pplist, &pp);
3475 page_sub(&pplist, newpp);
3476 page_list_concat(&repl_pplist, &newpp);
3477 off += PAGESIZE;
3478 while (--ppages != 0) {
3479 newpp = pplist;
3480 page_sub(&pplist, newpp);
3481 off += PAGESIZE;
3483 io_off = off;
3485 if (io_pplist != NULL) {
3486 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]);
3487 io_len = eoff - io_off;
3488 va.va_mask = AT_SIZE;
3489 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) {
3490 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]);
3491 goto out;
3493 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3494 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]);
3495 *downsize = 1;
3496 *ret_pszc = 0;
3497 goto out;
3499 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3500 B_READ, svd->cred, NULL);
3501 if (io_err) {
3502 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]);
3503 if (io_err == EDEADLK) {
3504 segvn_vmpss_pageio_deadlk_err++;
3506 goto out;
3508 nios++;
3509 while (io_pplist != NULL) {
3510 pp = io_pplist;
3511 page_sub(&io_pplist, pp);
3512 ASSERT(page_iolock_assert(pp));
3513 page_io_unlock(pp);
3514 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3515 ASSERT(pgidx < pages);
3516 ppa[pgidx] = pp;
3520 * we're now bound to succeed or panic.
3521 * remove pages from done_pplist. it's not needed anymore.
3523 while (done_pplist != NULL) {
3524 pp = done_pplist;
3525 page_sub(&done_pplist, pp);
3527 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]);
3528 ASSERT(pplist == NULL);
3529 *ppplist = NULL;
3530 while (targ_pplist != NULL) {
3531 int ret;
3532 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]);
3533 ASSERT(repl_pplist);
3534 pp = targ_pplist;
3535 page_sub(&targ_pplist, pp);
3536 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3537 newpp = repl_pplist;
3538 page_sub(&repl_pplist, newpp);
3539 #ifdef DEBUG
3540 pfn = page_pptonum(pp);
3541 pszc = pp->p_szc;
3542 ppages = page_get_pagecnt(pszc);
3543 ASSERT(IS_P2ALIGNED(pfn, ppages));
3544 pfn = page_pptonum(newpp);
3545 ASSERT(IS_P2ALIGNED(pfn, ppages));
3546 ASSERT(P2PHASE(pfn, pages) == pgidx);
3547 #endif
3548 nreloc = 0;
3549 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL);
3550 if (ret != 0 || nreloc == 0) {
3551 panic("segvn_fill_vp_pages: "
3552 "page_relocate failed");
3554 pp = newpp;
3555 while (nreloc-- != 0) {
3556 ASSERT(PAGE_EXCL(pp));
3557 ASSERT(pp->p_vnode == vp);
3558 ASSERT(pgidx ==
3559 ((pp->p_offset - start_off) >> PAGESHIFT));
3560 ppa[pgidx++] = pp;
3561 pp++;
3565 if (svd->type == MAP_PRIVATE) {
3566 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]);
3567 for (i = 0; i < pages; i++) {
3568 ASSERT(ppa[i] != NULL);
3569 ASSERT(PAGE_EXCL(ppa[i]));
3570 ASSERT(ppa[i]->p_vnode == vp);
3571 ASSERT(ppa[i]->p_offset ==
3572 start_off + (i << PAGESHIFT));
3573 page_downgrade(ppa[i]);
3575 ppa[pages] = NULL;
3576 } else {
3577 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]);
3579 * the caller will still call VOP_GETPAGE() for shared segments
3580 * to check FS write permissions. For private segments we map
3581 * file read only anyway. so no VOP_GETPAGE is needed.
3583 for (i = 0; i < pages; i++) {
3584 ASSERT(ppa[i] != NULL);
3585 ASSERT(PAGE_EXCL(ppa[i]));
3586 ASSERT(ppa[i]->p_vnode == vp);
3587 ASSERT(ppa[i]->p_offset ==
3588 start_off + (i << PAGESHIFT));
3589 page_unlock(ppa[i]);
3591 ppa[0] = NULL;
3594 return (1);
3595 out:
3597 * Do the cleanup. Unlock target pages we didn't relocate. They are
3598 * linked on targ_pplist by root pages. reassemble unused replacement
3599 * and io pages back to pplist.
3601 if (io_pplist != NULL) {
3602 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]);
3603 pp = io_pplist;
3604 do {
3605 ASSERT(pp->p_vnode == vp);
3606 ASSERT(pp->p_offset == io_off);
3607 ASSERT(page_iolock_assert(pp));
3608 page_io_unlock(pp);
3609 page_hashout(pp, NULL);
3610 io_off += PAGESIZE;
3611 } while ((pp = pp->p_next) != io_pplist);
3612 page_list_concat(&io_pplist, &pplist);
3613 pplist = io_pplist;
3615 tmp_pplist = NULL;
3616 while (targ_pplist != NULL) {
3617 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]);
3618 pp = targ_pplist;
3619 ASSERT(PAGE_EXCL(pp));
3620 page_sub(&targ_pplist, pp);
3622 pszc = pp->p_szc;
3623 ppages = page_get_pagecnt(pszc);
3624 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3626 if (pszc != 0) {
3627 group_page_unlock(pp);
3629 page_unlock(pp);
3631 pp = repl_pplist;
3632 ASSERT(pp != NULL);
3633 ASSERT(PAGE_EXCL(pp));
3634 ASSERT(pp->p_szc == szc);
3635 page_sub(&repl_pplist, pp);
3637 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3639 /* relink replacement page */
3640 page_list_concat(&tmp_pplist, &pp);
3641 while (--ppages != 0) {
3642 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]);
3643 pp++;
3644 ASSERT(PAGE_EXCL(pp));
3645 ASSERT(pp->p_szc == szc);
3646 page_list_concat(&tmp_pplist, &pp);
3649 if (tmp_pplist != NULL) {
3650 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]);
3651 page_list_concat(&tmp_pplist, &pplist);
3652 pplist = tmp_pplist;
3655 * at this point all pages are either on done_pplist or
3656 * pplist. They can't be all on done_pplist otherwise
3657 * we'd've been done.
3659 ASSERT(pplist != NULL);
3660 if (nios != 0) {
3661 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]);
3662 pp = pplist;
3663 do {
3664 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]);
3665 ASSERT(pp->p_szc == szc);
3666 ASSERT(PAGE_EXCL(pp));
3667 ASSERT(pp->p_vnode != vp);
3668 pp->p_szc = 0;
3669 } while ((pp = pp->p_next) != pplist);
3671 pp = done_pplist;
3672 do {
3673 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]);
3674 ASSERT(pp->p_szc == szc);
3675 ASSERT(PAGE_EXCL(pp));
3676 ASSERT(pp->p_vnode == vp);
3677 pp->p_szc = 0;
3678 } while ((pp = pp->p_next) != done_pplist);
3680 while (pplist != NULL) {
3681 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]);
3682 pp = pplist;
3683 page_sub(&pplist, pp);
3684 page_free(pp, 0);
3687 while (done_pplist != NULL) {
3688 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]);
3689 pp = done_pplist;
3690 page_sub(&done_pplist, pp);
3691 page_unlock(pp);
3693 *ppplist = NULL;
3694 return (0);
3696 ASSERT(pplist == *ppplist);
3697 if (io_err) {
3698 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]);
3700 * don't downsize on io error.
3701 * see if vop_getpage succeeds.
3702 * pplist may still be used in this case
3703 * for relocations.
3705 return (0);
3707 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]);
3708 page_free_replacement_page(pplist);
3709 page_create_putback(pages);
3710 *ppplist = NULL;
3711 return (0);
3714 int segvn_anypgsz = 0;
3716 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3717 if ((type) == F_SOFTLOCK) { \
3718 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3719 -(pages)); \
3722 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3723 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3724 if ((rw) == S_WRITE) { \
3725 for (i = 0; i < (pages); i++) { \
3726 ASSERT((ppa)[i]->p_vnode == \
3727 (ppa)[0]->p_vnode); \
3728 hat_setmod((ppa)[i]); \
3730 } else if ((rw) != S_OTHER && \
3731 ((prot) & (vpprot) & PROT_WRITE)) { \
3732 for (i = 0; i < (pages); i++) { \
3733 ASSERT((ppa)[i]->p_vnode == \
3734 (ppa)[0]->p_vnode); \
3735 if (!hat_ismod((ppa)[i])) { \
3736 prot &= ~PROT_WRITE; \
3737 break; \
3743 #ifdef VM_STATS
3745 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3746 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3748 #else /* VM_STATS */
3750 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3752 #endif
3754 static faultcode_t
3755 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
3756 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
3757 caddr_t eaddr, int brkcow)
3759 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
3760 struct anon_map *amp = svd->amp;
3761 uchar_t segtype = svd->type;
3762 uint_t szc = seg->s_szc;
3763 size_t pgsz = page_get_pagesize(szc);
3764 size_t maxpgsz = pgsz;
3765 pgcnt_t pages = btop(pgsz);
3766 pgcnt_t maxpages = pages;
3767 size_t ppasize = (pages + 1) * sizeof (page_t *);
3768 caddr_t a = lpgaddr;
3769 caddr_t maxlpgeaddr = lpgeaddr;
3770 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3771 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3772 struct vpage *vpage = (svd->vpage != NULL) ?
3773 &svd->vpage[seg_page(seg, a)] : NULL;
3774 vnode_t *vp = svd->vp;
3775 page_t **ppa;
3776 uint_t pszc;
3777 size_t ppgsz;
3778 pgcnt_t ppages;
3779 faultcode_t err = 0;
3780 int ierr;
3781 int vop_size_err = 0;
3782 uint_t protchk, prot, vpprot;
3783 ulong_t i;
3784 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3785 anon_sync_obj_t an_cookie;
3786 enum seg_rw arw;
3787 int alloc_failed = 0;
3788 int adjszc_chk;
3789 struct vattr va;
3790 int xhat = 0;
3791 page_t *pplist;
3792 pfn_t pfn;
3793 int physcontig;
3794 int upgrdfail;
3795 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3796 int tron = (svd->tr_state == SEGVN_TR_ON);
3798 ASSERT(szc != 0);
3799 ASSERT(vp != NULL);
3800 ASSERT(brkcow == 0 || amp != NULL);
3801 ASSERT(tron == 0 || amp != NULL);
3802 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3803 ASSERT(!(svd->flags & MAP_NORESERVE));
3804 ASSERT(type != F_SOFTUNLOCK);
3805 ASSERT(IS_P2ALIGNED(a, maxpgsz));
3806 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3807 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3808 ASSERT(seg->s_szc < NBBY * sizeof (int));
3809 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3810 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3812 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]);
3813 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]);
3815 if (svd->flags & MAP_TEXT) {
3816 hat_flag |= HAT_LOAD_TEXT;
3819 if (svd->pageprot) {
3820 switch (rw) {
3821 case S_READ:
3822 protchk = PROT_READ;
3823 break;
3824 case S_WRITE:
3825 protchk = PROT_WRITE;
3826 break;
3827 case S_EXEC:
3828 protchk = PROT_EXEC;
3829 break;
3830 case S_OTHER:
3831 default:
3832 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3833 break;
3835 } else {
3836 prot = svd->prot;
3837 /* caller has already done segment level protection check. */
3840 if (seg->s_as->a_hat != hat) {
3841 xhat = 1;
3844 if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3845 SEGVN_VMSTAT_FLTVNPAGES(2);
3846 arw = S_READ;
3847 } else {
3848 arw = rw;
3851 ppa = kmem_alloc(ppasize, KM_SLEEP);
3853 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3855 for (;;) {
3856 adjszc_chk = 0;
3857 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3858 if (adjszc_chk) {
3859 while (szc < seg->s_szc) {
3860 uintptr_t e;
3861 uint_t tszc;
3862 tszc = segvn_anypgsz_vnode ? szc + 1 :
3863 seg->s_szc;
3864 ppgsz = page_get_pagesize(tszc);
3865 if (!IS_P2ALIGNED(a, ppgsz) ||
3866 ((alloc_failed >> tszc) & 0x1)) {
3867 break;
3869 SEGVN_VMSTAT_FLTVNPAGES(4);
3870 szc = tszc;
3871 pgsz = ppgsz;
3872 pages = btop(pgsz);
3873 e = P2ROUNDUP((uintptr_t)eaddr, pgsz);
3874 lpgeaddr = (caddr_t)e;
3878 again:
3879 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) {
3880 ASSERT(IS_P2ALIGNED(aindx, maxpages));
3881 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
3882 anon_array_enter(amp, aindx, &an_cookie);
3883 if (anon_get_ptr(amp->ahp, aindx) != NULL) {
3884 SEGVN_VMSTAT_FLTVNPAGES(5);
3885 ASSERT(anon_pages(amp->ahp, aindx,
3886 maxpages) == maxpages);
3887 anon_array_exit(&an_cookie);
3888 ANON_LOCK_EXIT(&amp->a_rwlock);
3889 err = segvn_fault_anonpages(hat, seg,
3890 a, a + maxpgsz, type, rw,
3891 MAX(a, addr),
3892 MIN(a + maxpgsz, eaddr), brkcow);
3893 if (err != 0) {
3894 SEGVN_VMSTAT_FLTVNPAGES(6);
3895 goto out;
3897 if (szc < seg->s_szc) {
3898 szc = seg->s_szc;
3899 pgsz = maxpgsz;
3900 pages = maxpages;
3901 lpgeaddr = maxlpgeaddr;
3903 goto next;
3904 } else {
3905 ASSERT(anon_pages(amp->ahp, aindx,
3906 maxpages) == 0);
3907 SEGVN_VMSTAT_FLTVNPAGES(7);
3908 anon_array_exit(&an_cookie);
3909 ANON_LOCK_EXIT(&amp->a_rwlock);
3912 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz));
3913 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz));
3915 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
3916 ASSERT(vpage != NULL);
3917 prot = VPP_PROT(vpage);
3918 ASSERT(sameprot(seg, a, maxpgsz));
3919 if ((prot & protchk) == 0) {
3920 SEGVN_VMSTAT_FLTVNPAGES(8);
3921 err = FC_PROT;
3922 goto out;
3925 if (type == F_SOFTLOCK) {
3926 atomic_add_long((ulong_t *)&svd->softlockcnt,
3927 pages);
3930 pplist = NULL;
3931 physcontig = 0;
3932 ppa[0] = NULL;
3933 if (!brkcow && !tron && szc &&
3934 !page_exists_physcontig(vp, off, szc,
3935 segtype == MAP_PRIVATE ? ppa : NULL)) {
3936 SEGVN_VMSTAT_FLTVNPAGES(9);
3937 if (page_alloc_pages(vp, seg, a, &pplist, NULL,
3938 szc, 0, 0) && type != F_SOFTLOCK) {
3939 SEGVN_VMSTAT_FLTVNPAGES(10);
3940 pszc = 0;
3941 ierr = -1;
3942 alloc_failed |= (1 << szc);
3943 break;
3945 if (pplist != NULL &&
3946 vp->v_mpssdata == SEGVN_PAGEIO) {
3947 int downsize;
3948 SEGVN_VMSTAT_FLTVNPAGES(11);
3949 physcontig = segvn_fill_vp_pages(svd,
3950 vp, off, szc, ppa, &pplist,
3951 &pszc, &downsize);
3952 ASSERT(!physcontig || pplist == NULL);
3953 if (!physcontig && downsize &&
3954 type != F_SOFTLOCK) {
3955 ASSERT(pplist == NULL);
3956 SEGVN_VMSTAT_FLTVNPAGES(12);
3957 ierr = -1;
3958 break;
3960 ASSERT(!physcontig ||
3961 segtype == MAP_PRIVATE ||
3962 ppa[0] == NULL);
3963 if (physcontig && ppa[0] == NULL) {
3964 physcontig = 0;
3967 } else if (!brkcow && !tron && szc && ppa[0] != NULL) {
3968 SEGVN_VMSTAT_FLTVNPAGES(13);
3969 ASSERT(segtype == MAP_PRIVATE);
3970 physcontig = 1;
3973 if (!physcontig) {
3974 SEGVN_VMSTAT_FLTVNPAGES(14);
3975 ppa[0] = NULL;
3976 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz,
3977 &vpprot, ppa, pgsz, seg, a, arw,
3978 svd->cred, NULL);
3979 #ifdef DEBUG
3980 if (ierr == 0) {
3981 for (i = 0; i < pages; i++) {
3982 ASSERT(PAGE_LOCKED(ppa[i]));
3983 ASSERT(!PP_ISFREE(ppa[i]));
3984 ASSERT(ppa[i]->p_vnode == vp);
3985 ASSERT(ppa[i]->p_offset ==
3986 off + (i << PAGESHIFT));
3989 #endif /* DEBUG */
3990 if (segtype == MAP_PRIVATE) {
3991 SEGVN_VMSTAT_FLTVNPAGES(15);
3992 vpprot &= ~PROT_WRITE;
3994 } else {
3995 ASSERT(segtype == MAP_PRIVATE);
3996 SEGVN_VMSTAT_FLTVNPAGES(16);
3997 vpprot = PROT_ALL & ~PROT_WRITE;
3998 ierr = 0;
4001 if (ierr != 0) {
4002 SEGVN_VMSTAT_FLTVNPAGES(17);
4003 if (pplist != NULL) {
4004 SEGVN_VMSTAT_FLTVNPAGES(18);
4005 page_free_replacement_page(pplist);
4006 page_create_putback(pages);
4008 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4009 if (a + pgsz <= eaddr) {
4010 SEGVN_VMSTAT_FLTVNPAGES(19);
4011 err = FC_MAKE_ERR(ierr);
4012 goto out;
4014 va.va_mask = AT_SIZE;
4015 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) {
4016 SEGVN_VMSTAT_FLTVNPAGES(20);
4017 err = FC_MAKE_ERR(EIO);
4018 goto out;
4020 if (btopr(va.va_size) >= btopr(off + pgsz)) {
4021 SEGVN_VMSTAT_FLTVNPAGES(21);
4022 err = FC_MAKE_ERR(ierr);
4023 goto out;
4025 if (btopr(va.va_size) <
4026 btopr(off + (eaddr - a))) {
4027 SEGVN_VMSTAT_FLTVNPAGES(22);
4028 err = FC_MAKE_ERR(ierr);
4029 goto out;
4031 if (brkcow || tron || type == F_SOFTLOCK) {
4032 /* can't reduce map area */
4033 SEGVN_VMSTAT_FLTVNPAGES(23);
4034 vop_size_err = 1;
4035 goto out;
4037 SEGVN_VMSTAT_FLTVNPAGES(24);
4038 ASSERT(szc != 0);
4039 pszc = 0;
4040 ierr = -1;
4041 break;
4044 if (amp != NULL) {
4045 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
4046 anon_array_enter(amp, aindx, &an_cookie);
4048 if (amp != NULL &&
4049 anon_get_ptr(amp->ahp, aindx) != NULL) {
4050 ulong_t taindx = P2ALIGN(aindx, maxpages);
4052 SEGVN_VMSTAT_FLTVNPAGES(25);
4053 ASSERT(anon_pages(amp->ahp, taindx,
4054 maxpages) == maxpages);
4055 for (i = 0; i < pages; i++) {
4056 page_unlock(ppa[i]);
4058 anon_array_exit(&an_cookie);
4059 ANON_LOCK_EXIT(&amp->a_rwlock);
4060 if (pplist != NULL) {
4061 page_free_replacement_page(pplist);
4062 page_create_putback(pages);
4064 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4065 if (szc < seg->s_szc) {
4066 SEGVN_VMSTAT_FLTVNPAGES(26);
4068 * For private segments SOFTLOCK
4069 * either always breaks cow (any rw
4070 * type except S_READ_NOCOW) or
4071 * address space is locked as writer
4072 * (S_READ_NOCOW case) and anon slots
4073 * can't show up on second check.
4074 * Therefore if we are here for
4075 * SOFTLOCK case it must be a cow
4076 * break but cow break never reduces
4077 * szc. text replication (tron) in
4078 * this case works as cow break.
4079 * Thus the assert below.
4081 ASSERT(!brkcow && !tron &&
4082 type != F_SOFTLOCK);
4083 pszc = seg->s_szc;
4084 ierr = -2;
4085 break;
4087 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4088 goto again;
4090 #ifdef DEBUG
4091 if (amp != NULL) {
4092 ulong_t taindx = P2ALIGN(aindx, maxpages);
4093 ASSERT(!anon_pages(amp->ahp, taindx, maxpages));
4095 #endif /* DEBUG */
4097 if (brkcow || tron) {
4098 ASSERT(amp != NULL);
4099 ASSERT(pplist == NULL);
4100 ASSERT(szc == seg->s_szc);
4101 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4102 ASSERT(IS_P2ALIGNED(aindx, maxpages));
4103 SEGVN_VMSTAT_FLTVNPAGES(27);
4104 ierr = anon_map_privatepages(amp, aindx, szc,
4105 seg, a, prot, ppa, vpage, segvn_anypgsz,
4106 tron ? PG_LOCAL : 0, svd->cred);
4107 if (ierr != 0) {
4108 SEGVN_VMSTAT_FLTVNPAGES(28);
4109 anon_array_exit(&an_cookie);
4110 ANON_LOCK_EXIT(&amp->a_rwlock);
4111 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4112 err = FC_MAKE_ERR(ierr);
4113 goto out;
4116 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4118 * p_szc can't be changed for locked
4119 * swapfs pages.
4121 ASSERT(svd->rcookie ==
4122 HAT_INVALID_REGION_COOKIE);
4123 hat_memload_array(hat, a, pgsz, ppa, prot,
4124 hat_flag);
4126 if (!(hat_flag & HAT_LOAD_LOCK)) {
4127 SEGVN_VMSTAT_FLTVNPAGES(29);
4128 for (i = 0; i < pages; i++) {
4129 page_unlock(ppa[i]);
4132 anon_array_exit(&an_cookie);
4133 ANON_LOCK_EXIT(&amp->a_rwlock);
4134 goto next;
4137 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
4138 (!svd->pageprot && svd->prot == (prot & vpprot)));
4140 pfn = page_pptonum(ppa[0]);
4142 * hat_page_demote() needs an SE_EXCL lock on one of
4143 * constituent page_t's and it decreases root's p_szc
4144 * last. This means if root's p_szc is equal szc and
4145 * all its constituent pages are locked
4146 * hat_page_demote() that could have changed p_szc to
4147 * szc is already done and no new have page_demote()
4148 * can start for this large page.
4152 * we need to make sure same mapping size is used for
4153 * the same address range if there's a possibility the
4154 * adddress is already mapped because hat layer panics
4155 * when translation is loaded for the range already
4156 * mapped with a different page size. We achieve it
4157 * by always using largest page size possible subject
4158 * to the constraints of page size, segment page size
4159 * and page alignment. Since mappings are invalidated
4160 * when those constraints change and make it
4161 * impossible to use previously used mapping size no
4162 * mapping size conflicts should happen.
4165 chkszc:
4166 if ((pszc = ppa[0]->p_szc) == szc &&
4167 IS_P2ALIGNED(pfn, pages)) {
4169 SEGVN_VMSTAT_FLTVNPAGES(30);
4170 #ifdef DEBUG
4171 for (i = 0; i < pages; i++) {
4172 ASSERT(PAGE_LOCKED(ppa[i]));
4173 ASSERT(!PP_ISFREE(ppa[i]));
4174 ASSERT(page_pptonum(ppa[i]) ==
4175 pfn + i);
4176 ASSERT(ppa[i]->p_szc == szc);
4177 ASSERT(ppa[i]->p_vnode == vp);
4178 ASSERT(ppa[i]->p_offset ==
4179 off + (i << PAGESHIFT));
4181 #endif /* DEBUG */
4183 * All pages are of szc we need and they are
4184 * all locked so they can't change szc. load
4185 * translations.
4187 * if page got promoted since last check
4188 * we don't need pplist.
4190 if (pplist != NULL) {
4191 page_free_replacement_page(pplist);
4192 page_create_putback(pages);
4194 if (PP_ISMIGRATE(ppa[0])) {
4195 page_migrate(seg, a, ppa, pages);
4197 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4198 prot, vpprot);
4199 if (!xhat) {
4200 hat_memload_array_region(hat, a, pgsz,
4201 ppa, prot & vpprot, hat_flag,
4202 svd->rcookie);
4203 } else {
4205 * avoid large xhat mappings to FS
4206 * pages so that hat_page_demote()
4207 * doesn't need to check for xhat
4208 * large mappings.
4209 * Don't use regions with xhats.
4211 for (i = 0; i < pages; i++) {
4212 hat_memload(hat,
4213 a + (i << PAGESHIFT),
4214 ppa[i], prot & vpprot,
4215 hat_flag);
4219 if (!(hat_flag & HAT_LOAD_LOCK)) {
4220 for (i = 0; i < pages; i++) {
4221 page_unlock(ppa[i]);
4224 if (amp != NULL) {
4225 anon_array_exit(&an_cookie);
4226 ANON_LOCK_EXIT(&amp->a_rwlock);
4228 goto next;
4232 * See if upsize is possible.
4234 if (pszc > szc && szc < seg->s_szc &&
4235 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4236 pgcnt_t aphase;
4237 uint_t pszc1 = MIN(pszc, seg->s_szc);
4238 ppgsz = page_get_pagesize(pszc1);
4239 ppages = btop(ppgsz);
4240 aphase = btop(P2PHASE((uintptr_t)a, ppgsz));
4242 ASSERT(type != F_SOFTLOCK);
4244 SEGVN_VMSTAT_FLTVNPAGES(31);
4245 if (aphase != P2PHASE(pfn, ppages)) {
4246 segvn_faultvnmpss_align_err4++;
4247 } else {
4248 SEGVN_VMSTAT_FLTVNPAGES(32);
4249 if (pplist != NULL) {
4250 page_t *pl = pplist;
4251 page_free_replacement_page(pl);
4252 page_create_putback(pages);
4254 for (i = 0; i < pages; i++) {
4255 page_unlock(ppa[i]);
4257 if (amp != NULL) {
4258 anon_array_exit(&an_cookie);
4259 ANON_LOCK_EXIT(&amp->a_rwlock);
4261 pszc = pszc1;
4262 ierr = -2;
4263 break;
4268 * check if we should use smallest mapping size.
4270 upgrdfail = 0;
4271 if (szc == 0 || xhat ||
4272 (pszc >= szc &&
4273 !IS_P2ALIGNED(pfn, pages)) ||
4274 (pszc < szc &&
4275 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4276 &pszc))) {
4278 if (upgrdfail && type != F_SOFTLOCK) {
4280 * segvn_full_szcpages failed to lock
4281 * all pages EXCL. Size down.
4283 ASSERT(pszc < szc);
4285 SEGVN_VMSTAT_FLTVNPAGES(33);
4287 if (pplist != NULL) {
4288 page_t *pl = pplist;
4289 page_free_replacement_page(pl);
4290 page_create_putback(pages);
4293 for (i = 0; i < pages; i++) {
4294 page_unlock(ppa[i]);
4296 if (amp != NULL) {
4297 anon_array_exit(&an_cookie);
4298 ANON_LOCK_EXIT(&amp->a_rwlock);
4300 ierr = -1;
4301 break;
4303 if (szc != 0 && !xhat && !upgrdfail) {
4304 segvn_faultvnmpss_align_err5++;
4306 SEGVN_VMSTAT_FLTVNPAGES(34);
4307 if (pplist != NULL) {
4308 page_free_replacement_page(pplist);
4309 page_create_putback(pages);
4311 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4312 prot, vpprot);
4313 if (upgrdfail && segvn_anypgsz_vnode) {
4314 /* SOFTLOCK case */
4315 hat_memload_array_region(hat, a, pgsz,
4316 ppa, prot & vpprot, hat_flag,
4317 svd->rcookie);
4318 } else {
4319 for (i = 0; i < pages; i++) {
4320 hat_memload_region(hat,
4321 a + (i << PAGESHIFT),
4322 ppa[i], prot & vpprot,
4323 hat_flag, svd->rcookie);
4326 if (!(hat_flag & HAT_LOAD_LOCK)) {
4327 for (i = 0; i < pages; i++) {
4328 page_unlock(ppa[i]);
4331 if (amp != NULL) {
4332 anon_array_exit(&an_cookie);
4333 ANON_LOCK_EXIT(&amp->a_rwlock);
4335 goto next;
4338 if (pszc == szc) {
4340 * segvn_full_szcpages() upgraded pages szc.
4342 ASSERT(pszc == ppa[0]->p_szc);
4343 ASSERT(IS_P2ALIGNED(pfn, pages));
4344 goto chkszc;
4347 if (pszc > szc) {
4348 kmutex_t *szcmtx;
4349 SEGVN_VMSTAT_FLTVNPAGES(35);
4351 * p_szc of ppa[0] can change since we haven't
4352 * locked all constituent pages. Call
4353 * page_lock_szc() to prevent szc changes.
4354 * This should be a rare case that happens when
4355 * multiple segments use a different page size
4356 * to map the same file offsets.
4358 szcmtx = page_szc_lock(ppa[0]);
4359 pszc = ppa[0]->p_szc;
4360 ASSERT(szcmtx != NULL || pszc == 0);
4361 ASSERT(ppa[0]->p_szc <= pszc);
4362 if (pszc <= szc) {
4363 SEGVN_VMSTAT_FLTVNPAGES(36);
4364 if (szcmtx != NULL) {
4365 mutex_exit(szcmtx);
4367 goto chkszc;
4369 if (pplist != NULL) {
4371 * page got promoted since last check.
4372 * we don't need preaalocated large
4373 * page.
4375 SEGVN_VMSTAT_FLTVNPAGES(37);
4376 page_free_replacement_page(pplist);
4377 page_create_putback(pages);
4379 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4380 prot, vpprot);
4381 hat_memload_array_region(hat, a, pgsz, ppa,
4382 prot & vpprot, hat_flag, svd->rcookie);
4383 mutex_exit(szcmtx);
4384 if (!(hat_flag & HAT_LOAD_LOCK)) {
4385 for (i = 0; i < pages; i++) {
4386 page_unlock(ppa[i]);
4389 if (amp != NULL) {
4390 anon_array_exit(&an_cookie);
4391 ANON_LOCK_EXIT(&amp->a_rwlock);
4393 goto next;
4397 * if page got demoted since last check
4398 * we could have not allocated larger page.
4399 * allocate now.
4401 if (pplist == NULL &&
4402 page_alloc_pages(vp, seg, a, &pplist, NULL,
4403 szc, 0, 0) && type != F_SOFTLOCK) {
4404 SEGVN_VMSTAT_FLTVNPAGES(38);
4405 for (i = 0; i < pages; i++) {
4406 page_unlock(ppa[i]);
4408 if (amp != NULL) {
4409 anon_array_exit(&an_cookie);
4410 ANON_LOCK_EXIT(&amp->a_rwlock);
4412 ierr = -1;
4413 alloc_failed |= (1 << szc);
4414 break;
4417 SEGVN_VMSTAT_FLTVNPAGES(39);
4419 if (pplist != NULL) {
4420 segvn_relocate_pages(ppa, pplist);
4421 #ifdef DEBUG
4422 } else {
4423 ASSERT(type == F_SOFTLOCK);
4424 SEGVN_VMSTAT_FLTVNPAGES(40);
4425 #endif /* DEBUG */
4428 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot);
4430 if (pplist == NULL && segvn_anypgsz_vnode == 0) {
4431 ASSERT(type == F_SOFTLOCK);
4432 for (i = 0; i < pages; i++) {
4433 ASSERT(ppa[i]->p_szc < szc);
4434 hat_memload_region(hat,
4435 a + (i << PAGESHIFT),
4436 ppa[i], prot & vpprot, hat_flag,
4437 svd->rcookie);
4439 } else {
4440 ASSERT(pplist != NULL || type == F_SOFTLOCK);
4441 hat_memload_array_region(hat, a, pgsz, ppa,
4442 prot & vpprot, hat_flag, svd->rcookie);
4444 if (!(hat_flag & HAT_LOAD_LOCK)) {
4445 for (i = 0; i < pages; i++) {
4446 ASSERT(PAGE_SHARED(ppa[i]));
4447 page_unlock(ppa[i]);
4450 if (amp != NULL) {
4451 anon_array_exit(&an_cookie);
4452 ANON_LOCK_EXIT(&amp->a_rwlock);
4455 next:
4456 if (vpage != NULL) {
4457 vpage += pages;
4459 adjszc_chk = 1;
4461 if (a == lpgeaddr)
4462 break;
4463 ASSERT(a < lpgeaddr);
4465 ASSERT(!brkcow && !tron && type != F_SOFTLOCK);
4468 * ierr == -1 means we failed to map with a large page.
4469 * (either due to allocation/relocation failures or
4470 * misalignment with other mappings to this file.
4472 * ierr == -2 means some other thread allocated a large page
4473 * after we gave up tp map with a large page. retry with
4474 * larger mapping.
4476 ASSERT(ierr == -1 || ierr == -2);
4477 ASSERT(ierr == -2 || szc != 0);
4478 ASSERT(ierr == -1 || szc < seg->s_szc);
4479 if (ierr == -2) {
4480 SEGVN_VMSTAT_FLTVNPAGES(41);
4481 ASSERT(pszc > szc && pszc <= seg->s_szc);
4482 szc = pszc;
4483 } else if (segvn_anypgsz_vnode) {
4484 SEGVN_VMSTAT_FLTVNPAGES(42);
4485 szc--;
4486 } else {
4487 SEGVN_VMSTAT_FLTVNPAGES(43);
4488 ASSERT(pszc < szc);
4490 * other process created pszc large page.
4491 * but we still have to drop to 0 szc.
4493 szc = 0;
4496 pgsz = page_get_pagesize(szc);
4497 pages = btop(pgsz);
4498 if (ierr == -2) {
4500 * Size up case. Note lpgaddr may only be needed for
4501 * softlock case so we don't adjust it here.
4503 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4504 ASSERT(a >= lpgaddr);
4505 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4506 off = svd->offset + (uintptr_t)(a - seg->s_base);
4507 aindx = svd->anon_index + seg_page(seg, a);
4508 vpage = (svd->vpage != NULL) ?
4509 &svd->vpage[seg_page(seg, a)] : NULL;
4510 } else {
4512 * Size down case. Note lpgaddr may only be needed for
4513 * softlock case so we don't adjust it here.
4515 ASSERT(IS_P2ALIGNED(a, pgsz));
4516 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4517 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4518 ASSERT(a < lpgeaddr);
4519 if (a < addr) {
4520 SEGVN_VMSTAT_FLTVNPAGES(44);
4522 * The beginning of the large page region can
4523 * be pulled to the right to make a smaller
4524 * region. We haven't yet faulted a single
4525 * page.
4527 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4528 ASSERT(a >= lpgaddr);
4529 off = svd->offset +
4530 (uintptr_t)(a - seg->s_base);
4531 aindx = svd->anon_index + seg_page(seg, a);
4532 vpage = (svd->vpage != NULL) ?
4533 &svd->vpage[seg_page(seg, a)] : NULL;
4537 out:
4538 kmem_free(ppa, ppasize);
4539 if (!err && !vop_size_err) {
4540 SEGVN_VMSTAT_FLTVNPAGES(45);
4541 return (0);
4543 if (type == F_SOFTLOCK && a > lpgaddr) {
4544 SEGVN_VMSTAT_FLTVNPAGES(46);
4545 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4547 if (!vop_size_err) {
4548 SEGVN_VMSTAT_FLTVNPAGES(47);
4549 return (err);
4551 ASSERT(brkcow || tron || type == F_SOFTLOCK);
4553 * Large page end is mapped beyond the end of file and it's a cow
4554 * fault (can be a text replication induced cow) or softlock so we can't
4555 * reduce the map area. For now just demote the segment. This should
4556 * really only happen if the end of the file changed after the mapping
4557 * was established since when large page segments are created we make
4558 * sure they don't extend beyond the end of the file.
4560 SEGVN_VMSTAT_FLTVNPAGES(48);
4562 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4563 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4564 err = 0;
4565 if (seg->s_szc != 0) {
4566 segvn_fltvnpages_clrszc_cnt++;
4567 ASSERT(svd->softlockcnt == 0);
4568 err = segvn_clrszc(seg);
4569 if (err != 0) {
4570 segvn_fltvnpages_clrszc_err++;
4573 ASSERT(err || seg->s_szc == 0);
4574 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock);
4575 /* segvn_fault will do its job as if szc had been zero to begin with */
4576 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err));
4580 * This routine will attempt to fault in one large page.
4581 * it will use smaller pages if that fails.
4582 * It should only be called for pure anonymous segments.
4584 static faultcode_t
4585 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
4586 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
4587 caddr_t eaddr, int brkcow)
4589 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4590 struct anon_map *amp = svd->amp;
4591 uchar_t segtype = svd->type;
4592 uint_t szc = seg->s_szc;
4593 size_t pgsz = page_get_pagesize(szc);
4594 size_t maxpgsz = pgsz;
4595 pgcnt_t pages = btop(pgsz);
4596 uint_t ppaszc = szc;
4597 caddr_t a = lpgaddr;
4598 ulong_t aindx = svd->anon_index + seg_page(seg, a);
4599 struct vpage *vpage = (svd->vpage != NULL) ?
4600 &svd->vpage[seg_page(seg, a)] : NULL;
4601 page_t **ppa;
4602 uint_t ppa_szc;
4603 faultcode_t err;
4604 int ierr;
4605 uint_t protchk, prot, vpprot;
4606 ulong_t i;
4607 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
4608 anon_sync_obj_t cookie;
4609 int adjszc_chk;
4610 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0;
4612 ASSERT(szc != 0);
4613 ASSERT(amp != NULL);
4614 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
4615 ASSERT(!(svd->flags & MAP_NORESERVE));
4616 ASSERT(type != F_SOFTUNLOCK);
4617 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4618 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF);
4619 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4621 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
4623 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]);
4624 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]);
4626 if (svd->flags & MAP_TEXT) {
4627 hat_flag |= HAT_LOAD_TEXT;
4630 if (svd->pageprot) {
4631 switch (rw) {
4632 case S_READ:
4633 protchk = PROT_READ;
4634 break;
4635 case S_WRITE:
4636 protchk = PROT_WRITE;
4637 break;
4638 case S_EXEC:
4639 protchk = PROT_EXEC;
4640 break;
4641 case S_OTHER:
4642 default:
4643 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4644 break;
4646 VM_STAT_ADD(segvnvmstats.fltanpages[2]);
4647 } else {
4648 prot = svd->prot;
4649 /* caller has already done segment level protection check. */
4652 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP);
4653 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
4654 for (;;) {
4655 adjszc_chk = 0;
4656 for (; a < lpgeaddr; a += pgsz, aindx += pages) {
4657 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
4658 VM_STAT_ADD(segvnvmstats.fltanpages[3]);
4659 ASSERT(vpage != NULL);
4660 prot = VPP_PROT(vpage);
4661 ASSERT(sameprot(seg, a, maxpgsz));
4662 if ((prot & protchk) == 0) {
4663 err = FC_PROT;
4664 goto error;
4667 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) &&
4668 pgsz < maxpgsz) {
4669 ASSERT(a > lpgaddr);
4670 szc = seg->s_szc;
4671 pgsz = maxpgsz;
4672 pages = btop(pgsz);
4673 ASSERT(IS_P2ALIGNED(aindx, pages));
4674 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr,
4675 pgsz);
4677 if (type == F_SOFTLOCK) {
4678 atomic_add_long((ulong_t *)&svd->softlockcnt,
4679 pages);
4681 anon_array_enter(amp, aindx, &cookie);
4682 ppa_szc = (uint_t)-1;
4683 ierr = anon_map_getpages(amp, aindx, szc, seg, a,
4684 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow,
4685 segvn_anypgsz, pgflags, svd->cred);
4686 if (ierr != 0) {
4687 anon_array_exit(&cookie);
4688 VM_STAT_ADD(segvnvmstats.fltanpages[4]);
4689 if (type == F_SOFTLOCK) {
4690 atomic_add_long(
4691 (ulong_t *)&svd->softlockcnt,
4692 -pages);
4694 if (ierr > 0) {
4695 VM_STAT_ADD(segvnvmstats.fltanpages[6]);
4696 err = FC_MAKE_ERR(ierr);
4697 goto error;
4699 break;
4702 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4704 ASSERT(segtype == MAP_SHARED ||
4705 ppa[0]->p_szc <= szc);
4706 ASSERT(segtype == MAP_PRIVATE ||
4707 ppa[0]->p_szc >= szc);
4710 * Handle pages that have been marked for migration
4712 if (lgrp_optimizations())
4713 page_migrate(seg, a, ppa, pages);
4715 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
4717 if (segtype == MAP_SHARED) {
4718 vpprot |= PROT_WRITE;
4721 hat_memload_array(hat, a, pgsz, ppa,
4722 prot & vpprot, hat_flag);
4724 if (hat_flag & HAT_LOAD_LOCK) {
4725 VM_STAT_ADD(segvnvmstats.fltanpages[7]);
4726 } else {
4727 VM_STAT_ADD(segvnvmstats.fltanpages[8]);
4728 for (i = 0; i < pages; i++)
4729 page_unlock(ppa[i]);
4731 if (vpage != NULL)
4732 vpage += pages;
4734 anon_array_exit(&cookie);
4735 adjszc_chk = 1;
4737 if (a == lpgeaddr)
4738 break;
4739 ASSERT(a < lpgeaddr);
4741 * ierr == -1 means we failed to allocate a large page.
4742 * so do a size down operation.
4744 * ierr == -2 means some other process that privately shares
4745 * pages with this process has allocated a larger page and we
4746 * need to retry with larger pages. So do a size up
4747 * operation. This relies on the fact that large pages are
4748 * never partially shared i.e. if we share any constituent
4749 * page of a large page with another process we must share the
4750 * entire large page. Note this cannot happen for SOFTLOCK
4751 * case, unless current address (a) is at the beginning of the
4752 * next page size boundary because the other process couldn't
4753 * have relocated locked pages.
4755 ASSERT(ierr == -1 || ierr == -2);
4757 if (segvn_anypgsz) {
4758 ASSERT(ierr == -2 || szc != 0);
4759 ASSERT(ierr == -1 || szc < seg->s_szc);
4760 szc = (ierr == -1) ? szc - 1 : szc + 1;
4761 } else {
4763 * For non COW faults and segvn_anypgsz == 0
4764 * we need to be careful not to loop forever
4765 * if existing page is found with szc other
4766 * than 0 or seg->s_szc. This could be due
4767 * to page relocations on behalf of DR or
4768 * more likely large page creation. For this
4769 * case simply re-size to existing page's szc
4770 * if returned by anon_map_getpages().
4772 if (ppa_szc == (uint_t)-1) {
4773 szc = (ierr == -1) ? 0 : seg->s_szc;
4774 } else {
4775 ASSERT(ppa_szc <= seg->s_szc);
4776 ASSERT(ierr == -2 || ppa_szc < szc);
4777 ASSERT(ierr == -1 || ppa_szc > szc);
4778 szc = ppa_szc;
4782 pgsz = page_get_pagesize(szc);
4783 pages = btop(pgsz);
4784 ASSERT(type != F_SOFTLOCK || ierr == -1 ||
4785 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz)));
4786 if (type == F_SOFTLOCK) {
4788 * For softlocks we cannot reduce the fault area
4789 * (calculated based on the largest page size for this
4790 * segment) for size down and a is already next
4791 * page size aligned as assertted above for size
4792 * ups. Therefore just continue in case of softlock.
4794 VM_STAT_ADD(segvnvmstats.fltanpages[9]);
4795 continue; /* keep lint happy */
4796 } else if (ierr == -2) {
4799 * Size up case. Note lpgaddr may only be needed for
4800 * softlock case so we don't adjust it here.
4802 VM_STAT_ADD(segvnvmstats.fltanpages[10]);
4803 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4804 ASSERT(a >= lpgaddr);
4805 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4806 aindx = svd->anon_index + seg_page(seg, a);
4807 vpage = (svd->vpage != NULL) ?
4808 &svd->vpage[seg_page(seg, a)] : NULL;
4809 } else {
4811 * Size down case. Note lpgaddr may only be needed for
4812 * softlock case so we don't adjust it here.
4814 VM_STAT_ADD(segvnvmstats.fltanpages[11]);
4815 ASSERT(IS_P2ALIGNED(a, pgsz));
4816 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4817 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4818 ASSERT(a < lpgeaddr);
4819 if (a < addr) {
4821 * The beginning of the large page region can
4822 * be pulled to the right to make a smaller
4823 * region. We haven't yet faulted a single
4824 * page.
4826 VM_STAT_ADD(segvnvmstats.fltanpages[12]);
4827 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4828 ASSERT(a >= lpgaddr);
4829 aindx = svd->anon_index + seg_page(seg, a);
4830 vpage = (svd->vpage != NULL) ?
4831 &svd->vpage[seg_page(seg, a)] : NULL;
4835 VM_STAT_ADD(segvnvmstats.fltanpages[13]);
4836 ANON_LOCK_EXIT(&amp->a_rwlock);
4837 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4838 return (0);
4839 error:
4840 VM_STAT_ADD(segvnvmstats.fltanpages[14]);
4841 ANON_LOCK_EXIT(&amp->a_rwlock);
4842 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4843 if (type == F_SOFTLOCK && a > lpgaddr) {
4844 VM_STAT_ADD(segvnvmstats.fltanpages[15]);
4845 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4847 return (err);
4850 int fltadvice = 1; /* set to free behind pages for sequential access */
4853 * This routine is called via a machine specific fault handling routine.
4854 * It is also called by software routines wishing to lock or unlock
4855 * a range of addresses.
4857 * Here is the basic algorithm:
4858 * If unlocking
4859 * Call segvn_softunlock
4860 * Return
4861 * endif
4862 * Checking and set up work
4863 * If we will need some non-anonymous pages
4864 * Call VOP_GETPAGE over the range of non-anonymous pages
4865 * endif
4866 * Loop over all addresses requested
4867 * Call segvn_faultpage passing in page list
4868 * to load up translations and handle anonymous pages
4869 * endloop
4870 * Load up translation to any additional pages in page list not
4871 * already handled that fit into this segment
4873 static faultcode_t
4874 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
4875 enum fault_type type, enum seg_rw rw)
4877 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4878 page_t **plp, **ppp, *pp;
4879 u_offset_t off;
4880 caddr_t a;
4881 struct vpage *vpage;
4882 uint_t vpprot, prot;
4883 int err;
4884 page_t *pl[PVN_GETPAGE_NUM + 1];
4885 size_t plsz, pl_alloc_sz;
4886 size_t page;
4887 ulong_t anon_index;
4888 struct anon_map *amp;
4889 int dogetpage = 0;
4890 caddr_t lpgaddr, lpgeaddr;
4891 size_t pgsz;
4892 anon_sync_obj_t cookie;
4893 int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4895 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
4896 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4899 * First handle the easy stuff
4901 if (type == F_SOFTUNLOCK) {
4902 if (rw == S_READ_NOCOW) {
4903 rw = S_READ;
4904 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
4906 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4907 pgsz = (seg->s_szc == 0) ? PAGESIZE :
4908 page_get_pagesize(seg->s_szc);
4909 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]);
4910 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
4911 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw);
4912 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4913 return (0);
4916 ASSERT(svd->tr_state == SEGVN_TR_OFF ||
4917 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
4918 if (brkcow == 0) {
4919 if (svd->tr_state == SEGVN_TR_INIT) {
4920 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4921 if (svd->tr_state == SEGVN_TR_INIT) {
4922 ASSERT(svd->vp != NULL && svd->amp == NULL);
4923 ASSERT(svd->flags & MAP_TEXT);
4924 ASSERT(svd->type == MAP_PRIVATE);
4925 segvn_textrepl(seg);
4926 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4927 ASSERT(svd->tr_state != SEGVN_TR_ON ||
4928 svd->amp != NULL);
4930 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4932 } else if (svd->tr_state != SEGVN_TR_OFF) {
4933 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4935 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) {
4936 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
4937 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4938 return (FC_PROT);
4941 if (svd->tr_state == SEGVN_TR_ON) {
4942 ASSERT(svd->vp != NULL && svd->amp != NULL);
4943 segvn_textunrepl(seg, 0);
4944 ASSERT(svd->amp == NULL &&
4945 svd->tr_state == SEGVN_TR_OFF);
4946 } else if (svd->tr_state != SEGVN_TR_OFF) {
4947 svd->tr_state = SEGVN_TR_OFF;
4949 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
4950 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4953 top:
4954 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4957 * If we have the same protections for the entire segment,
4958 * insure that the access being attempted is legitimate.
4961 if (svd->pageprot == 0) {
4962 uint_t protchk;
4964 switch (rw) {
4965 case S_READ:
4966 case S_READ_NOCOW:
4967 protchk = PROT_READ;
4968 break;
4969 case S_WRITE:
4970 protchk = PROT_WRITE;
4971 break;
4972 case S_EXEC:
4973 protchk = PROT_EXEC;
4974 break;
4975 case S_OTHER:
4976 default:
4977 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4978 break;
4981 if ((svd->prot & protchk) == 0) {
4982 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4983 return (FC_PROT); /* illegal access type */
4987 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
4988 /* this must be SOFTLOCK S_READ fault */
4989 ASSERT(svd->amp == NULL);
4990 ASSERT(svd->tr_state == SEGVN_TR_OFF);
4991 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4992 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4993 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
4995 * this must be the first ever non S_READ_NOCOW
4996 * softlock for this segment.
4998 ASSERT(svd->softlockcnt == 0);
4999 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5000 HAT_REGION_TEXT);
5001 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5003 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5004 goto top;
5008 * We can't allow the long term use of softlocks for vmpss segments,
5009 * because in some file truncation cases we should be able to demote
5010 * the segment, which requires that there are no softlocks. The
5011 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5012 * segment is S_READ_NOCOW, where the caller holds the address space
5013 * locked as writer and calls softunlock before dropping the as lock.
5014 * S_READ_NOCOW is used by /proc to read memory from another user.
5016 * Another deadlock between SOFTLOCK and file truncation can happen
5017 * because segvn_fault_vnodepages() calls the FS one pagesize at
5018 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5019 * can cause a deadlock because the first set of page_t's remain
5020 * locked SE_SHARED. To avoid this, we demote segments on a first
5021 * SOFTLOCK if they have a length greater than the segment's
5022 * page size.
5024 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5025 * the access type is S_READ_NOCOW and the fault length is less than
5026 * or equal to the segment's page size. While this is quite restrictive,
5027 * it should be the most common case of SOFTLOCK against a vmpss
5028 * segment.
5030 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5031 * caller makes sure no COW will be caused by another thread for a
5032 * softlocked page.
5034 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) {
5035 int demote = 0;
5037 if (rw != S_READ_NOCOW) {
5038 demote = 1;
5040 if (!demote && len > PAGESIZE) {
5041 pgsz = page_get_pagesize(seg->s_szc);
5042 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5043 lpgeaddr);
5044 if (lpgeaddr - lpgaddr > pgsz) {
5045 demote = 1;
5049 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5051 if (demote) {
5052 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5053 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5054 if (seg->s_szc != 0) {
5055 segvn_vmpss_clrszc_cnt++;
5056 ASSERT(svd->softlockcnt == 0);
5057 err = segvn_clrszc(seg);
5058 if (err) {
5059 segvn_vmpss_clrszc_err++;
5060 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5061 return (FC_MAKE_ERR(err));
5064 ASSERT(seg->s_szc == 0);
5065 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5066 goto top;
5071 * Check to see if we need to allocate an anon_map structure.
5073 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) {
5074 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5076 * Drop the "read" lock on the segment and acquire
5077 * the "write" version since we have to allocate the
5078 * anon_map.
5080 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5081 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5083 if (svd->amp == NULL) {
5084 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
5085 svd->amp->a_szc = seg->s_szc;
5087 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5090 * Start all over again since segment protections
5091 * may have changed after we dropped the "read" lock.
5093 goto top;
5097 * S_READ_NOCOW vs S_READ distinction was
5098 * only needed for the code above. After
5099 * that we treat it as S_READ.
5101 if (rw == S_READ_NOCOW) {
5102 ASSERT(type == F_SOFTLOCK);
5103 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5104 rw = S_READ;
5107 amp = svd->amp;
5110 * MADV_SEQUENTIAL work is ignored for large page segments.
5112 if (seg->s_szc != 0) {
5113 pgsz = page_get_pagesize(seg->s_szc);
5114 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
5115 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5116 if (svd->vp == NULL) {
5117 err = segvn_fault_anonpages(hat, seg, lpgaddr,
5118 lpgeaddr, type, rw, addr, addr + len, brkcow);
5119 } else {
5120 err = segvn_fault_vnodepages(hat, seg, lpgaddr,
5121 lpgeaddr, type, rw, addr, addr + len, brkcow);
5122 if (err == IE_RETRY) {
5123 ASSERT(seg->s_szc == 0);
5124 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
5125 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5126 goto top;
5129 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5130 return (err);
5133 page = seg_page(seg, addr);
5134 if (amp != NULL) {
5135 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5136 anon_index = svd->anon_index + page;
5138 if (type == F_PROT && rw == S_READ &&
5139 svd->tr_state == SEGVN_TR_OFF &&
5140 svd->type == MAP_PRIVATE && svd->pageprot == 0) {
5141 size_t index = anon_index;
5142 struct anon *ap;
5144 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
5146 * The fast path could apply to S_WRITE also, except
5147 * that the protection fault could be caused by lazy
5148 * tlb flush when ro->rw. In this case, the pte is
5149 * RW already. But RO in the other cpu's tlb causes
5150 * the fault. Since hat_chgprot won't do anything if
5151 * pte doesn't change, we may end up faulting
5152 * indefinitely until the RO tlb entry gets replaced.
5154 for (a = addr; a < addr + len; a += PAGESIZE, index++) {
5155 anon_array_enter(amp, index, &cookie);
5156 ap = anon_get_ptr(amp->ahp, index);
5157 anon_array_exit(&cookie);
5158 if ((ap == NULL) || (ap->an_refcnt != 1)) {
5159 ANON_LOCK_EXIT(&amp->a_rwlock);
5160 goto slow;
5163 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5164 ANON_LOCK_EXIT(&amp->a_rwlock);
5165 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5166 return (0);
5169 slow:
5171 if (svd->vpage == NULL)
5172 vpage = NULL;
5173 else
5174 vpage = &svd->vpage[page];
5176 off = svd->offset + (uintptr_t)(addr - seg->s_base);
5179 * If MADV_SEQUENTIAL has been set for the particular page we
5180 * are faulting on, free behind all pages in the segment and put
5181 * them on the free list.
5184 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) {
5185 struct vpage *vpp;
5186 ulong_t fanon_index;
5187 size_t fpage;
5188 u_offset_t pgoff, fpgoff;
5189 struct vnode *fvp;
5190 struct anon *fap = NULL;
5192 if (svd->advice == MADV_SEQUENTIAL ||
5193 (svd->pageadvice &&
5194 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) {
5195 pgoff = off - PAGESIZE;
5196 fpage = page - 1;
5197 if (vpage != NULL)
5198 vpp = &svd->vpage[fpage];
5199 if (amp != NULL)
5200 fanon_index = svd->anon_index + fpage;
5202 while (pgoff > svd->offset) {
5203 if (svd->advice != MADV_SEQUENTIAL &&
5204 (!svd->pageadvice || (vpage &&
5205 VPP_ADVICE(vpp) != MADV_SEQUENTIAL)))
5206 break;
5209 * If this is an anon page, we must find the
5210 * correct <vp, offset> for it
5212 fap = NULL;
5213 if (amp != NULL) {
5214 ANON_LOCK_ENTER(&amp->a_rwlock,
5215 RW_READER);
5216 anon_array_enter(amp, fanon_index,
5217 &cookie);
5218 fap = anon_get_ptr(amp->ahp,
5219 fanon_index);
5220 if (fap != NULL) {
5221 swap_xlate(fap, &fvp, &fpgoff);
5222 } else {
5223 fpgoff = pgoff;
5224 fvp = svd->vp;
5226 anon_array_exit(&cookie);
5227 ANON_LOCK_EXIT(&amp->a_rwlock);
5228 } else {
5229 fpgoff = pgoff;
5230 fvp = svd->vp;
5232 if (fvp == NULL)
5233 break; /* XXX */
5235 * Skip pages that are free or have an
5236 * "exclusive" lock.
5238 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED);
5239 if (pp == NULL)
5240 break;
5242 * We don't need the page_struct_lock to test
5243 * as this is only advisory; even if we
5244 * acquire it someone might race in and lock
5245 * the page after we unlock and before the
5246 * PUTPAGE, then VOP_PUTPAGE will do nothing.
5248 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) {
5250 * Hold the vnode before releasing
5251 * the page lock to prevent it from
5252 * being freed and re-used by some
5253 * other thread.
5255 VN_HOLD(fvp);
5256 page_unlock(pp);
5258 * We should build a page list
5259 * to kluster putpages XXX
5261 (void) VOP_PUTPAGE(fvp,
5262 (offset_t)fpgoff, PAGESIZE,
5263 (B_DONTNEED|B_FREE|B_ASYNC),
5264 svd->cred, NULL);
5265 VN_RELE(fvp);
5266 } else {
5268 * XXX - Should the loop terminate if
5269 * the page is `locked'?
5271 page_unlock(pp);
5273 --vpp;
5274 --fanon_index;
5275 pgoff -= PAGESIZE;
5280 plp = pl;
5281 *plp = NULL;
5282 pl_alloc_sz = 0;
5285 * See if we need to call VOP_GETPAGE for
5286 * *any* of the range being faulted on.
5287 * We can skip all of this work if there
5288 * was no original vnode.
5290 if (svd->vp != NULL) {
5291 u_offset_t vp_off;
5292 size_t vp_len;
5293 struct anon *ap;
5294 vnode_t *vp;
5296 vp_off = off;
5297 vp_len = len;
5299 if (amp == NULL)
5300 dogetpage = 1;
5301 else {
5303 * Only acquire reader lock to prevent amp->ahp
5304 * from being changed. It's ok to miss pages,
5305 * hence we don't do anon_array_enter
5307 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
5308 ap = anon_get_ptr(amp->ahp, anon_index);
5310 if (len <= PAGESIZE)
5311 /* inline non_anon() */
5312 dogetpage = (ap == NULL);
5313 else
5314 dogetpage = non_anon(amp->ahp, anon_index,
5315 &vp_off, &vp_len);
5316 ANON_LOCK_EXIT(&amp->a_rwlock);
5319 if (dogetpage) {
5320 enum seg_rw arw;
5321 struct as *as = seg->s_as;
5323 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) {
5325 * Page list won't fit in local array,
5326 * allocate one of the needed size.
5328 pl_alloc_sz =
5329 (btop(len) + 1) * sizeof (page_t *);
5330 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP);
5331 plp[0] = NULL;
5332 plsz = len;
5333 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE ||
5334 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER ||
5335 (((size_t)(addr + PAGESIZE) <
5336 (size_t)(seg->s_base + seg->s_size)) &&
5337 hat_probe(as->a_hat, addr + PAGESIZE))) {
5339 * Ask VOP_GETPAGE to return the exact number
5340 * of pages if
5341 * (a) this is a COW fault, or
5342 * (b) this is a software fault, or
5343 * (c) next page is already mapped.
5345 plsz = len;
5346 } else {
5348 * Ask VOP_GETPAGE to return adjacent pages
5349 * within the segment.
5351 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t)
5352 ((seg->s_base + seg->s_size) - addr));
5353 ASSERT((addr + plsz) <=
5354 (seg->s_base + seg->s_size));
5358 * Need to get some non-anonymous pages.
5359 * We need to make only one call to GETPAGE to do
5360 * this to prevent certain deadlocking conditions
5361 * when we are doing locking. In this case
5362 * non_anon() should have picked up the smallest
5363 * range which includes all the non-anonymous
5364 * pages in the requested range. We have to
5365 * be careful regarding which rw flag to pass in
5366 * because on a private mapping, the underlying
5367 * object is never allowed to be written.
5369 if (rw == S_WRITE && svd->type == MAP_PRIVATE) {
5370 arw = S_READ;
5371 } else {
5372 arw = rw;
5374 vp = svd->vp;
5375 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5376 "segvn_getpage:seg %p addr %p vp %p",
5377 seg, addr, vp);
5378 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len,
5379 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw,
5380 svd->cred, NULL);
5381 if (err) {
5382 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5383 segvn_pagelist_rele(plp);
5384 if (pl_alloc_sz)
5385 kmem_free(plp, pl_alloc_sz);
5386 return (FC_MAKE_ERR(err));
5388 if (svd->type == MAP_PRIVATE)
5389 vpprot &= ~PROT_WRITE;
5394 * N.B. at this time the plp array has all the needed non-anon
5395 * pages in addition to (possibly) having some adjacent pages.
5399 * Always acquire the anon_array_lock to prevent
5400 * 2 threads from allocating separate anon slots for
5401 * the same "addr".
5403 * If this is a copy-on-write fault and we don't already
5404 * have the anon_array_lock, acquire it to prevent the
5405 * fault routine from handling multiple copy-on-write faults
5406 * on the same "addr" in the same address space.
5408 * Only one thread should deal with the fault since after
5409 * it is handled, the other threads can acquire a translation
5410 * to the newly created private page. This prevents two or
5411 * more threads from creating different private pages for the
5412 * same fault.
5414 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5415 * to prevent deadlock between this thread and another thread
5416 * which has soft-locked this page and wants to acquire serial_lock.
5417 * ( bug 4026339 )
5419 * The fix for bug 4026339 becomes unnecessary when using the
5420 * locking scheme with per amp rwlock and a global set of hash
5421 * lock, anon_array_lock. If we steal a vnode page when low
5422 * on memory and upgrad the page lock through page_rename,
5423 * then the page is PAGE_HANDLED, nothing needs to be done
5424 * for this page after returning from segvn_faultpage.
5426 * But really, the page lock should be downgraded after
5427 * the stolen page is page_rename'd.
5430 if (amp != NULL)
5431 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
5434 * Ok, now loop over the address range and handle faults
5436 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) {
5437 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot,
5438 type, rw, brkcow);
5439 if (err) {
5440 if (amp != NULL)
5441 ANON_LOCK_EXIT(&amp->a_rwlock);
5442 if (type == F_SOFTLOCK && a > addr) {
5443 segvn_softunlock(seg, addr, (a - addr),
5444 S_OTHER);
5446 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5447 segvn_pagelist_rele(plp);
5448 if (pl_alloc_sz)
5449 kmem_free(plp, pl_alloc_sz);
5450 return (err);
5452 if (vpage) {
5453 vpage++;
5454 } else if (svd->vpage) {
5455 page = seg_page(seg, addr);
5456 vpage = &svd->vpage[++page];
5460 /* Didn't get pages from the underlying fs so we're done */
5461 if (!dogetpage)
5462 goto done;
5465 * Now handle any other pages in the list returned.
5466 * If the page can be used, load up the translations now.
5467 * Note that the for loop will only be entered if "plp"
5468 * is pointing to a non-NULL page pointer which means that
5469 * VOP_GETPAGE() was called and vpprot has been initialized.
5471 if (svd->pageprot == 0)
5472 prot = svd->prot & vpprot;
5476 * Large Files: diff should be unsigned value because we started
5477 * supporting > 2GB segment sizes from 2.5.1 and when a
5478 * large file of size > 2GB gets mapped to address space
5479 * the diff value can be > 2GB.
5482 for (ppp = plp; (pp = *ppp) != NULL; ppp++) {
5483 size_t diff;
5484 struct anon *ap;
5485 int anon_index;
5486 anon_sync_obj_t cookie;
5487 int hat_flag = HAT_LOAD_ADV;
5489 if (svd->flags & MAP_TEXT) {
5490 hat_flag |= HAT_LOAD_TEXT;
5493 if (pp == PAGE_HANDLED)
5494 continue;
5496 if (svd->tr_state != SEGVN_TR_ON &&
5497 pp->p_offset >= svd->offset &&
5498 pp->p_offset < svd->offset + seg->s_size) {
5500 diff = pp->p_offset - svd->offset;
5503 * Large Files: Following is the assertion
5504 * validating the above cast.
5506 ASSERT(svd->vp == pp->p_vnode);
5508 page = btop(diff);
5509 if (svd->pageprot)
5510 prot = VPP_PROT(&svd->vpage[page]) & vpprot;
5513 * Prevent other threads in the address space from
5514 * creating private pages (i.e., allocating anon slots)
5515 * while we are in the process of loading translations
5516 * to additional pages returned by the underlying
5517 * object.
5519 if (amp != NULL) {
5520 anon_index = svd->anon_index + page;
5521 anon_array_enter(amp, anon_index, &cookie);
5522 ap = anon_get_ptr(amp->ahp, anon_index);
5524 if ((amp == NULL) || (ap == NULL)) {
5525 if (IS_VMODSORT(pp->p_vnode) ||
5526 enable_mbit_wa) {
5527 if (rw == S_WRITE)
5528 hat_setmod(pp);
5529 else if (rw != S_OTHER &&
5530 !hat_ismod(pp))
5531 prot &= ~PROT_WRITE;
5534 * Skip mapping read ahead pages marked
5535 * for migration, so they will get migrated
5536 * properly on fault
5538 ASSERT(amp == NULL ||
5539 svd->rcookie == HAT_INVALID_REGION_COOKIE);
5540 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) {
5541 hat_memload_region(hat,
5542 seg->s_base + diff,
5543 pp, prot, hat_flag,
5544 svd->rcookie);
5547 if (amp != NULL)
5548 anon_array_exit(&cookie);
5550 page_unlock(pp);
5552 done:
5553 if (amp != NULL)
5554 ANON_LOCK_EXIT(&amp->a_rwlock);
5555 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5556 if (pl_alloc_sz)
5557 kmem_free(plp, pl_alloc_sz);
5558 return (0);
5562 * This routine is used to start I/O on pages asynchronously. XXX it will
5563 * only create PAGESIZE pages. At fault time they will be relocated into
5564 * larger pages.
5566 static faultcode_t
5567 segvn_faulta(struct seg *seg, caddr_t addr)
5569 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5570 int err;
5571 struct anon_map *amp;
5572 vnode_t *vp;
5574 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5576 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5577 if ((amp = svd->amp) != NULL) {
5578 struct anon *ap;
5581 * Reader lock to prevent amp->ahp from being changed.
5582 * This is advisory, it's ok to miss a page, so
5583 * we don't do anon_array_enter lock.
5585 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
5586 if ((ap = anon_get_ptr(amp->ahp,
5587 svd->anon_index + seg_page(seg, addr))) != NULL) {
5589 err = anon_getpage(&ap, NULL, NULL,
5590 0, seg, addr, S_READ, svd->cred);
5592 ANON_LOCK_EXIT(&amp->a_rwlock);
5593 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5594 if (err)
5595 return (FC_MAKE_ERR(err));
5596 return (0);
5598 ANON_LOCK_EXIT(&amp->a_rwlock);
5601 if (svd->vp == NULL) {
5602 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5603 return (0); /* zfod page - do nothing now */
5606 vp = svd->vp;
5607 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5608 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp);
5609 err = VOP_GETPAGE(vp,
5610 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5611 PAGESIZE, NULL, NULL, 0, seg, addr,
5612 S_OTHER, svd->cred, NULL);
5614 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5615 if (err)
5616 return (FC_MAKE_ERR(err));
5617 return (0);
5620 static int
5621 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5623 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5624 struct vpage *cvp, *svp, *evp;
5625 struct vnode *vp;
5626 size_t pgsz;
5627 pgcnt_t pgcnt;
5628 anon_sync_obj_t cookie;
5629 int unload_done = 0;
5631 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5633 if ((svd->maxprot & prot) != prot)
5634 return (EACCES); /* violated maxprot */
5636 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5638 /* return if prot is the same */
5639 if (!svd->pageprot && svd->prot == prot) {
5640 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5641 return (0);
5645 * Since we change protections we first have to flush the cache.
5646 * This makes sure all the pagelock calls have to recheck
5647 * protections.
5649 if (svd->softlockcnt > 0) {
5650 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5653 * If this is shared segment non 0 softlockcnt
5654 * means locked pages are still in use.
5656 if (svd->type == MAP_SHARED) {
5657 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5658 return (EAGAIN);
5662 * Since we do have the segvn writers lock nobody can fill
5663 * the cache with entries belonging to this seg during
5664 * the purge. The flush either succeeds or we still have
5665 * pending I/Os.
5667 segvn_purge(seg);
5668 if (svd->softlockcnt > 0) {
5669 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5670 return (EAGAIN);
5674 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5675 ASSERT(svd->amp == NULL);
5676 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5677 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5678 HAT_REGION_TEXT);
5679 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5680 unload_done = 1;
5681 } else if (svd->tr_state == SEGVN_TR_INIT) {
5682 svd->tr_state = SEGVN_TR_OFF;
5683 } else if (svd->tr_state == SEGVN_TR_ON) {
5684 ASSERT(svd->amp != NULL);
5685 segvn_textunrepl(seg, 0);
5686 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5687 unload_done = 1;
5690 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED &&
5691 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) {
5692 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
5693 segvn_inval_trcache(svd->vp);
5695 if (seg->s_szc != 0) {
5696 int err;
5697 pgsz = page_get_pagesize(seg->s_szc);
5698 pgcnt = pgsz >> PAGESHIFT;
5699 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
5700 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5701 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5702 ASSERT(seg->s_base != addr || seg->s_size != len);
5704 * If we are holding the as lock as a reader then
5705 * we need to return IE_RETRY and let the as
5706 * layer drop and re-acquire the lock as a writer.
5708 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock))
5709 return (IE_RETRY);
5710 VM_STAT_ADD(segvnvmstats.demoterange[1]);
5711 if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5712 err = segvn_demote_range(seg, addr, len,
5713 SDR_END, 0);
5714 } else {
5715 uint_t szcvec = map_pgszcvec(seg->s_base,
5716 pgsz, (uintptr_t)seg->s_base,
5717 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5718 err = segvn_demote_range(seg, addr, len,
5719 SDR_END, szcvec);
5721 if (err == 0)
5722 return (IE_RETRY);
5723 if (err == ENOMEM)
5724 return (IE_NOMEM);
5725 return (err);
5731 * If it's a private mapping and we're making it writable then we
5732 * may have to reserve the additional swap space now. If we are
5733 * making writable only a part of the segment then we use its vpage
5734 * array to keep a record of the pages for which we have reserved
5735 * swap. In this case we set the pageswap field in the segment's
5736 * segvn structure to record this.
5738 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5739 * removing write permission on the entire segment and we haven't
5740 * modified any pages, we can release the swap space.
5742 if (svd->type == MAP_PRIVATE) {
5743 if (prot & PROT_WRITE) {
5744 if (!(svd->flags & MAP_NORESERVE) &&
5745 !(svd->swresv && svd->pageswap == 0)) {
5746 size_t sz = 0;
5749 * Start by determining how much swap
5750 * space is required.
5752 if (addr == seg->s_base &&
5753 len == seg->s_size &&
5754 svd->pageswap == 0) {
5755 /* The whole segment */
5756 sz = seg->s_size;
5757 } else {
5759 * Make sure that the vpage array
5760 * exists, and make a note of the
5761 * range of elements corresponding
5762 * to len.
5764 segvn_vpage(seg);
5765 svp = &svd->vpage[seg_page(seg, addr)];
5766 evp = &svd->vpage[seg_page(seg,
5767 addr + len)];
5769 if (svd->pageswap == 0) {
5771 * This is the first time we've
5772 * asked for a part of this
5773 * segment, so we need to
5774 * reserve everything we've
5775 * been asked for.
5777 sz = len;
5778 } else {
5780 * We have to count the number
5781 * of pages required.
5783 for (cvp = svp; cvp < evp;
5784 cvp++) {
5785 if (!VPP_ISSWAPRES(cvp))
5786 sz++;
5788 sz <<= PAGESHIFT;
5792 /* Try to reserve the necessary swap. */
5793 if (anon_resv_zone(sz,
5794 seg->s_as->a_proc->p_zone) == 0) {
5795 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5796 return (IE_NOMEM);
5800 * Make a note of how much swap space
5801 * we've reserved.
5803 if (svd->pageswap == 0 && sz == seg->s_size) {
5804 svd->swresv = sz;
5805 } else {
5806 ASSERT(svd->vpage != NULL);
5807 svd->swresv += sz;
5808 svd->pageswap = 1;
5809 for (cvp = svp; cvp < evp; cvp++) {
5810 if (!VPP_ISSWAPRES(cvp))
5811 VPP_SETSWAPRES(cvp);
5815 } else {
5817 * Swap space is released only if this segment
5818 * does not map anonymous memory, since read faults
5819 * on such segments still need an anon slot to read
5820 * in the data.
5822 if (svd->swresv != 0 && svd->vp != NULL &&
5823 svd->amp == NULL && addr == seg->s_base &&
5824 len == seg->s_size && svd->pageprot == 0) {
5825 ASSERT(svd->pageswap == 0);
5826 anon_unresv_zone(svd->swresv,
5827 seg->s_as->a_proc->p_zone);
5828 svd->swresv = 0;
5829 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
5830 "anon proc:%p %lu %u", seg, 0, 0);
5835 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5836 if (svd->prot == prot) {
5837 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5838 return (0); /* all done */
5840 svd->prot = (uchar_t)prot;
5841 } else if (svd->type == MAP_PRIVATE) {
5842 struct anon *ap = NULL;
5843 page_t *pp;
5844 u_offset_t offset, off;
5845 struct anon_map *amp;
5846 ulong_t anon_idx = 0;
5849 * A vpage structure exists or else the change does not
5850 * involve the entire segment. Establish a vpage structure
5851 * if none is there. Then, for each page in the range,
5852 * adjust its individual permissions. Note that write-
5853 * enabling a MAP_PRIVATE page can affect the claims for
5854 * locked down memory. Overcommitting memory terminates
5855 * the operation.
5857 segvn_vpage(seg);
5858 svd->pageprot = 1;
5859 if ((amp = svd->amp) != NULL) {
5860 anon_idx = svd->anon_index + seg_page(seg, addr);
5861 ASSERT(seg->s_szc == 0 ||
5862 IS_P2ALIGNED(anon_idx, pgcnt));
5863 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
5866 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5867 evp = &svd->vpage[seg_page(seg, addr + len)];
5870 * See Statement at the beginning of segvn_lockop regarding
5871 * the way cowcnts and lckcnts are handled.
5873 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5875 if (seg->s_szc != 0) {
5876 if (amp != NULL) {
5877 anon_array_enter(amp, anon_idx,
5878 &cookie);
5880 if (IS_P2ALIGNED(anon_idx, pgcnt) &&
5881 !segvn_claim_pages(seg, svp, offset,
5882 anon_idx, prot)) {
5883 if (amp != NULL) {
5884 anon_array_exit(&cookie);
5886 break;
5888 if (amp != NULL) {
5889 anon_array_exit(&cookie);
5891 anon_idx++;
5892 } else {
5893 if (amp != NULL) {
5894 anon_array_enter(amp, anon_idx,
5895 &cookie);
5896 ap = anon_get_ptr(amp->ahp, anon_idx++);
5899 if (VPP_ISPPLOCK(svp) &&
5900 VPP_PROT(svp) != prot) {
5902 if (amp == NULL || ap == NULL) {
5903 vp = svd->vp;
5904 off = offset;
5905 } else
5906 swap_xlate(ap, &vp, &off);
5907 if (amp != NULL)
5908 anon_array_exit(&cookie);
5910 if ((pp = page_lookup(vp, off,
5911 SE_SHARED)) == NULL) {
5912 panic("segvn_setprot: no page");
5913 /*NOTREACHED*/
5915 ASSERT(seg->s_szc == 0);
5916 if ((VPP_PROT(svp) ^ prot) &
5917 PROT_WRITE) {
5918 if (prot & PROT_WRITE) {
5919 if (!page_addclaim(
5920 pp)) {
5921 page_unlock(pp);
5922 break;
5924 } else {
5925 if (!page_subclaim(
5926 pp)) {
5927 page_unlock(pp);
5928 break;
5932 page_unlock(pp);
5933 } else if (amp != NULL)
5934 anon_array_exit(&cookie);
5936 VPP_SETPROT(svp, prot);
5937 offset += PAGESIZE;
5939 if (amp != NULL)
5940 ANON_LOCK_EXIT(&amp->a_rwlock);
5943 * Did we terminate prematurely? If so, simply unload
5944 * the translations to the things we've updated so far.
5946 if (svp != evp) {
5947 if (unload_done) {
5948 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5949 return (IE_NOMEM);
5951 len = (svp - &svd->vpage[seg_page(seg, addr)]) *
5952 PAGESIZE;
5953 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz));
5954 if (len != 0)
5955 hat_unload(seg->s_as->a_hat, addr,
5956 len, HAT_UNLOAD);
5957 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5958 return (IE_NOMEM);
5960 } else {
5961 segvn_vpage(seg);
5962 svd->pageprot = 1;
5963 evp = &svd->vpage[seg_page(seg, addr + len)];
5964 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5965 VPP_SETPROT(svp, prot);
5969 if (unload_done) {
5970 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5971 return (0);
5974 if (((prot & PROT_WRITE) != 0 &&
5975 (svd->vp != NULL || svd->type == MAP_PRIVATE)) ||
5976 (prot & ~PROT_USER) == PROT_NONE) {
5978 * Either private or shared data with write access (in
5979 * which case we need to throw out all former translations
5980 * so that we get the right translations set up on fault
5981 * and we don't allow write access to any copy-on-write pages
5982 * that might be around or to prevent write access to pages
5983 * representing holes in a file), or we don't have permission
5984 * to access the memory at all (in which case we have to
5985 * unload any current translations that might exist).
5987 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
5988 } else {
5990 * A shared mapping or a private mapping in which write
5991 * protection is going to be denied - just change all the
5992 * protections over the range of addresses in question.
5993 * segvn does not support any other attributes other
5994 * than prot so we can use hat_chgattr.
5996 hat_chgattr(seg->s_as->a_hat, addr, len, prot);
5999 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6001 return (0);
6005 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize,
6006 * to determine if the seg is capable of mapping the requested szc.
6008 static int
6009 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
6011 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6012 struct segvn_data *nsvd;
6013 struct anon_map *amp = svd->amp;
6014 struct seg *nseg;
6015 caddr_t eaddr = addr + len, a;
6016 size_t pgsz = page_get_pagesize(szc);
6017 pgcnt_t pgcnt = page_get_pagecnt(szc);
6018 int err;
6019 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6020 extern struct vnode kvp;
6022 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6023 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6025 if (seg->s_szc == szc || segvn_lpg_disable != 0) {
6026 return (0);
6030 * addr should always be pgsz aligned but eaddr may be misaligned if
6031 * it's at the end of the segment.
6033 * XXX we should assert this condition since as_setpagesize() logic
6034 * guarantees it.
6036 if (!IS_P2ALIGNED(addr, pgsz) ||
6037 (!IS_P2ALIGNED(eaddr, pgsz) &&
6038 eaddr != seg->s_base + seg->s_size)) {
6040 segvn_setpgsz_align_err++;
6041 return (EINVAL);
6044 if (amp != NULL && svd->type == MAP_SHARED) {
6045 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6046 if (!IS_P2ALIGNED(an_idx, pgcnt)) {
6048 segvn_setpgsz_anon_align_err++;
6049 return (EINVAL);
6053 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas ||
6054 szc > segvn_maxpgszc) {
6055 return (EINVAL);
6058 /* paranoid check */
6059 if (svd->vp != NULL &&
6060 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) {
6061 return (EINVAL);
6064 if (seg->s_szc == 0 && svd->vp != NULL &&
6065 map_addr_vacalign_check(addr, off)) {
6066 return (EINVAL);
6070 * Check that protections are the same within new page
6071 * size boundaries.
6073 if (svd->pageprot) {
6074 for (a = addr; a < eaddr; a += pgsz) {
6075 if ((a + pgsz) > eaddr) {
6076 if (!sameprot(seg, a, eaddr - a)) {
6077 return (EINVAL);
6079 } else {
6080 if (!sameprot(seg, a, pgsz)) {
6081 return (EINVAL);
6088 * Since we are changing page size we first have to flush
6089 * the cache. This makes sure all the pagelock calls have
6090 * to recheck protections.
6092 if (svd->softlockcnt > 0) {
6093 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6096 * If this is shared segment non 0 softlockcnt
6097 * means locked pages are still in use.
6099 if (svd->type == MAP_SHARED) {
6100 return (EAGAIN);
6104 * Since we do have the segvn writers lock nobody can fill
6105 * the cache with entries belonging to this seg during
6106 * the purge. The flush either succeeds or we still have
6107 * pending I/Os.
6109 segvn_purge(seg);
6110 if (svd->softlockcnt > 0) {
6111 return (EAGAIN);
6115 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6116 ASSERT(svd->amp == NULL);
6117 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6118 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6119 HAT_REGION_TEXT);
6120 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6121 } else if (svd->tr_state == SEGVN_TR_INIT) {
6122 svd->tr_state = SEGVN_TR_OFF;
6123 } else if (svd->tr_state == SEGVN_TR_ON) {
6124 ASSERT(svd->amp != NULL);
6125 segvn_textunrepl(seg, 1);
6126 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6127 amp = NULL;
6131 * Operation for sub range of existing segment.
6133 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) {
6134 if (szc < seg->s_szc) {
6135 VM_STAT_ADD(segvnvmstats.demoterange[2]);
6136 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0);
6137 if (err == 0) {
6138 return (IE_RETRY);
6140 if (err == ENOMEM) {
6141 return (IE_NOMEM);
6143 return (err);
6145 if (addr != seg->s_base) {
6146 nseg = segvn_split_seg(seg, addr);
6147 if (eaddr != (nseg->s_base + nseg->s_size)) {
6148 /* eaddr is szc aligned */
6149 (void) segvn_split_seg(nseg, eaddr);
6151 return (IE_RETRY);
6153 if (eaddr != (seg->s_base + seg->s_size)) {
6154 /* eaddr is szc aligned */
6155 (void) segvn_split_seg(seg, eaddr);
6157 return (IE_RETRY);
6161 * Break any low level sharing and reset seg->s_szc to 0.
6163 if ((err = segvn_clrszc(seg)) != 0) {
6164 if (err == ENOMEM) {
6165 err = IE_NOMEM;
6167 return (err);
6169 ASSERT(seg->s_szc == 0);
6172 * If the end of the current segment is not pgsz aligned
6173 * then attempt to concatenate with the next segment.
6175 if (!IS_P2ALIGNED(eaddr, pgsz)) {
6176 nseg = AS_SEGNEXT(seg->s_as, seg);
6177 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) {
6178 return (ENOMEM);
6180 if (nseg->s_ops != &segvn_ops) {
6181 return (EINVAL);
6183 nsvd = (struct segvn_data *)nseg->s_data;
6184 if (nsvd->softlockcnt > 0) {
6186 * If this is shared segment non 0 softlockcnt
6187 * means locked pages are still in use.
6189 if (nsvd->type == MAP_SHARED) {
6190 return (EAGAIN);
6192 segvn_purge(nseg);
6193 if (nsvd->softlockcnt > 0) {
6194 return (EAGAIN);
6197 err = segvn_clrszc(nseg);
6198 if (err == ENOMEM) {
6199 err = IE_NOMEM;
6201 if (err != 0) {
6202 return (err);
6204 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6205 err = segvn_concat(seg, nseg, 1);
6206 if (err == -1) {
6207 return (EINVAL);
6209 if (err == -2) {
6210 return (IE_NOMEM);
6212 return (IE_RETRY);
6216 * May need to re-align anon array to
6217 * new szc.
6219 if (amp != NULL) {
6220 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) {
6221 struct anon_hdr *nahp;
6223 ASSERT(svd->type == MAP_PRIVATE);
6225 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
6226 ASSERT(amp->refcnt == 1);
6227 nahp = anon_create(btop(amp->size), ANON_NOSLEEP);
6228 if (nahp == NULL) {
6229 ANON_LOCK_EXIT(&amp->a_rwlock);
6230 return (IE_NOMEM);
6232 if (anon_copy_ptr(amp->ahp, svd->anon_index,
6233 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) {
6234 anon_release(nahp, btop(amp->size));
6235 ANON_LOCK_EXIT(&amp->a_rwlock);
6236 return (IE_NOMEM);
6238 anon_release(amp->ahp, btop(amp->size));
6239 amp->ahp = nahp;
6240 svd->anon_index = 0;
6241 ANON_LOCK_EXIT(&amp->a_rwlock);
6244 if (svd->vp != NULL && szc != 0) {
6245 struct vattr va;
6246 u_offset_t eoffpage = svd->offset;
6247 va.va_mask = AT_SIZE;
6248 eoffpage += seg->s_size;
6249 eoffpage = btopr(eoffpage);
6250 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) {
6251 segvn_setpgsz_getattr_err++;
6252 return (EINVAL);
6254 if (btopr(va.va_size) < eoffpage) {
6255 segvn_setpgsz_eof_err++;
6256 return (EINVAL);
6258 if (amp != NULL) {
6260 * anon_fill_cow_holes() may call VOP_GETPAGE().
6261 * don't take anon map lock here to avoid holding it
6262 * across VOP_GETPAGE() calls that may call back into
6263 * segvn for klsutering checks. We don't really need
6264 * anon map lock here since it's a private segment and
6265 * we hold as level lock as writers.
6267 if ((err = anon_fill_cow_holes(seg, seg->s_base,
6268 amp->ahp, svd->anon_index, svd->vp, svd->offset,
6269 seg->s_size, szc, svd->prot, svd->vpage,
6270 svd->cred)) != 0) {
6271 return (EINVAL);
6274 segvn_setvnode_mpss(svd->vp);
6277 if (amp != NULL) {
6278 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
6279 if (svd->type == MAP_PRIVATE) {
6280 amp->a_szc = szc;
6281 } else if (szc > amp->a_szc) {
6282 amp->a_szc = szc;
6284 ANON_LOCK_EXIT(&amp->a_rwlock);
6287 seg->s_szc = szc;
6289 return (0);
6292 static int
6293 segvn_clrszc(struct seg *seg)
6295 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6296 struct anon_map *amp = svd->amp;
6297 size_t pgsz;
6298 pgcnt_t pages;
6299 int err = 0;
6300 caddr_t a = seg->s_base;
6301 caddr_t ea = a + seg->s_size;
6302 ulong_t an_idx = svd->anon_index;
6303 vnode_t *vp = svd->vp;
6304 struct vpage *vpage = svd->vpage;
6305 page_t *anon_pl[1 + 1], *pp;
6306 struct anon *ap, *oldap;
6307 uint_t prot = svd->prot, vpprot;
6308 int pageflag = 0;
6310 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6311 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6312 ASSERT(svd->softlockcnt == 0);
6314 if (vp == NULL && amp == NULL) {
6315 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6316 seg->s_szc = 0;
6317 return (0);
6320 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6321 ASSERT(svd->amp == NULL);
6322 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6323 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6324 HAT_REGION_TEXT);
6325 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6326 } else if (svd->tr_state == SEGVN_TR_ON) {
6327 ASSERT(svd->amp != NULL);
6328 segvn_textunrepl(seg, 1);
6329 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6330 amp = NULL;
6331 } else {
6332 if (svd->tr_state != SEGVN_TR_OFF) {
6333 ASSERT(svd->tr_state == SEGVN_TR_INIT);
6334 svd->tr_state = SEGVN_TR_OFF;
6338 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6339 * unload argument is 0 when we are freeing the segment
6340 * and unload was already done.
6342 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
6343 HAT_UNLOAD_UNMAP);
6346 if (amp == NULL || svd->type == MAP_SHARED) {
6347 seg->s_szc = 0;
6348 return (0);
6351 pgsz = page_get_pagesize(seg->s_szc);
6352 pages = btop(pgsz);
6355 * XXX anon rwlock is not really needed because this is a
6356 * private segment and we are writers.
6358 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
6360 for (; a < ea; a += pgsz, an_idx += pages) {
6361 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) {
6362 ASSERT(vpage != NULL || svd->pageprot == 0);
6363 if (vpage != NULL) {
6364 ASSERT(sameprot(seg, a, pgsz));
6365 prot = VPP_PROT(vpage);
6366 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0;
6368 if (seg->s_szc != 0) {
6369 ASSERT(vp == NULL || anon_pages(amp->ahp,
6370 an_idx, pages) == pages);
6371 if ((err = anon_map_demotepages(amp, an_idx,
6372 seg, a, prot, vpage, svd->cred)) != 0) {
6373 goto out;
6375 } else {
6376 if (oldap->an_refcnt == 1) {
6377 continue;
6379 if ((err = anon_getpage(&oldap, &vpprot,
6380 anon_pl, PAGESIZE, seg, a, S_READ,
6381 svd->cred))) {
6382 goto out;
6384 if ((pp = anon_private(&ap, seg, a, prot,
6385 anon_pl[0], pageflag, svd->cred)) == NULL) {
6386 err = ENOMEM;
6387 goto out;
6389 anon_decref(oldap);
6390 (void) anon_set_ptr(amp->ahp, an_idx, ap,
6391 ANON_SLEEP);
6392 page_unlock(pp);
6395 vpage = (vpage == NULL) ? NULL : vpage + pages;
6398 amp->a_szc = 0;
6399 seg->s_szc = 0;
6400 out:
6401 ANON_LOCK_EXIT(&amp->a_rwlock);
6402 return (err);
6405 static int
6406 segvn_claim_pages(
6407 struct seg *seg,
6408 struct vpage *svp,
6409 u_offset_t off,
6410 ulong_t anon_idx,
6411 uint_t prot)
6413 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6414 size_t ppasize = (pgcnt + 1) * sizeof (page_t *);
6415 page_t **ppa;
6416 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6417 struct anon_map *amp = svd->amp;
6418 struct vpage *evp = svp + pgcnt;
6419 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6420 + seg->s_base;
6421 struct anon *ap;
6422 struct vnode *vp = svd->vp;
6423 page_t *pp;
6424 pgcnt_t pg_idx, i;
6425 int err = 0;
6426 anoff_t aoff;
6427 int anon = (amp != NULL) ? 1 : 0;
6429 ASSERT(svd->type == MAP_PRIVATE);
6430 ASSERT(svd->vpage != NULL);
6431 ASSERT(seg->s_szc != 0);
6432 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
6433 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt));
6434 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT));
6436 if (VPP_PROT(svp) == prot)
6437 return (1);
6438 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE))
6439 return (1);
6441 ppa = kmem_alloc(ppasize, KM_SLEEP);
6442 if (anon && vp != NULL) {
6443 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) {
6444 anon = 0;
6445 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt));
6447 ASSERT(!anon ||
6448 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt);
6451 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) {
6452 if (!VPP_ISPPLOCK(svp))
6453 continue;
6454 if (anon) {
6455 ap = anon_get_ptr(amp->ahp, anon_idx);
6456 if (ap == NULL) {
6457 panic("segvn_claim_pages: no anon slot");
6459 swap_xlate(ap, &vp, &aoff);
6460 off = (u_offset_t)aoff;
6462 ASSERT(vp != NULL);
6463 if ((pp = page_lookup(vp,
6464 (u_offset_t)off, SE_SHARED)) == NULL) {
6465 panic("segvn_claim_pages: no page");
6467 ppa[pg_idx++] = pp;
6468 off += PAGESIZE;
6471 if (ppa[0] == NULL) {
6472 kmem_free(ppa, ppasize);
6473 return (1);
6476 ASSERT(pg_idx <= pgcnt);
6477 ppa[pg_idx] = NULL;
6479 if (prot & PROT_WRITE)
6480 err = page_addclaim_pages(ppa);
6481 else
6482 err = page_subclaim_pages(ppa);
6484 for (i = 0; i < pg_idx; i++) {
6485 ASSERT(ppa[i] != NULL);
6486 page_unlock(ppa[i]);
6489 kmem_free(ppa, ppasize);
6490 return (err);
6494 * Returns right (upper address) segment if split occurred.
6495 * If the address is equal to the beginning or end of its segment it returns
6496 * the current segment.
6498 static struct seg *
6499 segvn_split_seg(struct seg *seg, caddr_t addr)
6501 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6502 struct seg *nseg;
6503 size_t nsize;
6504 struct segvn_data *nsvd;
6506 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6507 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6509 ASSERT(addr >= seg->s_base);
6510 ASSERT(addr <= seg->s_base + seg->s_size);
6511 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6513 if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6514 return (seg);
6516 nsize = seg->s_base + seg->s_size - addr;
6517 seg->s_size = addr - seg->s_base;
6518 nseg = seg_alloc(seg->s_as, addr, nsize);
6519 ASSERT(nseg != NULL);
6520 nseg->s_ops = seg->s_ops;
6521 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
6522 nseg->s_data = (void *)nsvd;
6523 nseg->s_szc = seg->s_szc;
6524 *nsvd = *svd;
6525 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6526 nsvd->seg = nseg;
6527 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL);
6529 if (nsvd->vp != NULL) {
6530 VN_HOLD(nsvd->vp);
6531 nsvd->offset = svd->offset +
6532 (uintptr_t)(nseg->s_base - seg->s_base);
6533 if (nsvd->type == MAP_SHARED)
6534 lgrp_shm_policy_init(NULL, nsvd->vp);
6535 } else {
6537 * The offset for an anonymous segment has no signifigance in
6538 * terms of an offset into a file. If we were to use the above
6539 * calculation instead, the structures read out of
6540 * /proc/<pid>/xmap would be more difficult to decipher since
6541 * it would be unclear whether two seemingly contiguous
6542 * prxmap_t structures represented different segments or a
6543 * single segment that had been split up into multiple prxmap_t
6544 * structures (e.g. if some part of the segment had not yet
6545 * been faulted in).
6547 nsvd->offset = 0;
6550 ASSERT(svd->softlockcnt == 0);
6551 ASSERT(svd->softlockcnt_sbase == 0);
6552 ASSERT(svd->softlockcnt_send == 0);
6553 crhold(svd->cred);
6555 if (svd->vpage != NULL) {
6556 size_t bytes = vpgtob(seg_pages(seg));
6557 size_t nbytes = vpgtob(seg_pages(nseg));
6558 struct vpage *ovpage = svd->vpage;
6560 svd->vpage = kmem_alloc(bytes, KM_SLEEP);
6561 bcopy(ovpage, svd->vpage, bytes);
6562 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
6563 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes);
6564 kmem_free(ovpage, bytes + nbytes);
6566 if (svd->amp != NULL && svd->type == MAP_PRIVATE) {
6567 struct anon_map *oamp = svd->amp, *namp;
6568 struct anon_hdr *nahp;
6570 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER);
6571 ASSERT(oamp->refcnt == 1);
6572 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
6573 (void) anon_copy_ptr(oamp->ahp, svd->anon_index,
6574 nahp, 0, btop(seg->s_size), ANON_SLEEP);
6576 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
6577 namp->a_szc = nseg->s_szc;
6578 (void) anon_copy_ptr(oamp->ahp,
6579 svd->anon_index + btop(seg->s_size),
6580 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
6581 anon_release(oamp->ahp, btop(oamp->size));
6582 oamp->ahp = nahp;
6583 oamp->size = seg->s_size;
6584 svd->anon_index = 0;
6585 nsvd->amp = namp;
6586 nsvd->anon_index = 0;
6587 ANON_LOCK_EXIT(&oamp->a_rwlock);
6588 } else if (svd->amp != NULL) {
6589 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6590 ASSERT(svd->amp == nsvd->amp);
6591 ASSERT(seg->s_szc <= svd->amp->a_szc);
6592 nsvd->anon_index = svd->anon_index + seg_pages(seg);
6593 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt));
6594 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER);
6595 svd->amp->refcnt++;
6596 ANON_LOCK_EXIT(&svd->amp->a_rwlock);
6600 * Split the amount of swap reserved.
6602 if (svd->swresv) {
6604 * For MAP_NORESERVE, only allocate swap reserve for pages
6605 * being used. Other segments get enough to cover whole
6606 * segment.
6608 if (svd->flags & MAP_NORESERVE) {
6609 size_t oswresv;
6611 ASSERT(svd->amp);
6612 oswresv = svd->swresv;
6613 svd->swresv = ptob(anon_pages(svd->amp->ahp,
6614 svd->anon_index, btop(seg->s_size)));
6615 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
6616 nsvd->anon_index, btop(nseg->s_size)));
6617 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
6618 } else {
6619 if (svd->pageswap) {
6620 svd->swresv = segvn_count_swap_by_vpages(seg);
6621 ASSERT(nsvd->swresv >= svd->swresv);
6622 nsvd->swresv -= svd->swresv;
6623 } else {
6624 ASSERT(svd->swresv == seg->s_size +
6625 nseg->s_size);
6626 svd->swresv = seg->s_size;
6627 nsvd->swresv = nseg->s_size;
6632 return (nseg);
6636 * called on memory operations (unmap, setprot, setpagesize) for a subset
6637 * of a large page segment to either demote the memory range (SDR_RANGE)
6638 * or the ends (SDR_END) by addr/len.
6640 * returns 0 on success. returns errno, including ENOMEM, on failure.
6642 static int
6643 segvn_demote_range(
6644 struct seg *seg,
6645 caddr_t addr,
6646 size_t len,
6647 int flag,
6648 uint_t szcvec)
6650 caddr_t eaddr = addr + len;
6651 caddr_t lpgaddr, lpgeaddr;
6652 struct seg *nseg;
6653 struct seg *badseg1 = NULL;
6654 struct seg *badseg2 = NULL;
6655 size_t pgsz;
6656 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6657 int err;
6658 uint_t szc = seg->s_szc;
6659 uint_t tszcvec;
6661 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6662 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6663 ASSERT(szc != 0);
6664 pgsz = page_get_pagesize(szc);
6665 ASSERT(seg->s_base != addr || seg->s_size != len);
6666 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6667 ASSERT(svd->softlockcnt == 0);
6668 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6669 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6671 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
6672 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr);
6673 if (flag == SDR_RANGE) {
6674 /* demote entire range */
6675 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6676 (void) segvn_split_seg(nseg, lpgeaddr);
6677 ASSERT(badseg1->s_base == lpgaddr);
6678 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr);
6679 } else if (addr != lpgaddr) {
6680 ASSERT(flag == SDR_END);
6681 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6682 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz &&
6683 eaddr < lpgaddr + 2 * pgsz) {
6684 (void) segvn_split_seg(nseg, lpgeaddr);
6685 ASSERT(badseg1->s_base == lpgaddr);
6686 ASSERT(badseg1->s_size == 2 * pgsz);
6687 } else {
6688 nseg = segvn_split_seg(nseg, lpgaddr + pgsz);
6689 ASSERT(badseg1->s_base == lpgaddr);
6690 ASSERT(badseg1->s_size == pgsz);
6691 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) {
6692 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz);
6693 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz);
6694 badseg2 = nseg;
6695 (void) segvn_split_seg(nseg, lpgeaddr);
6696 ASSERT(badseg2->s_base == lpgeaddr - pgsz);
6697 ASSERT(badseg2->s_size == pgsz);
6700 } else {
6701 ASSERT(flag == SDR_END);
6702 ASSERT(eaddr < lpgeaddr);
6703 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz);
6704 (void) segvn_split_seg(nseg, lpgeaddr);
6705 ASSERT(badseg1->s_base == lpgeaddr - pgsz);
6706 ASSERT(badseg1->s_size == pgsz);
6709 ASSERT(badseg1 != NULL);
6710 ASSERT(badseg1->s_szc == szc);
6711 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz ||
6712 badseg1->s_size == 2 * pgsz);
6713 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz));
6714 ASSERT(badseg1->s_size == pgsz ||
6715 sameprot(badseg1, badseg1->s_base + pgsz, pgsz));
6716 if (err = segvn_clrszc(badseg1)) {
6717 return (err);
6719 ASSERT(badseg1->s_szc == 0);
6721 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6722 uint_t tszc = highbit(tszcvec) - 1;
6723 caddr_t ta = MAX(addr, badseg1->s_base);
6724 caddr_t te;
6725 size_t tpgsz = page_get_pagesize(tszc);
6727 ASSERT(svd->type == MAP_SHARED);
6728 ASSERT(flag == SDR_END);
6729 ASSERT(tszc < szc && tszc > 0);
6731 if (eaddr > badseg1->s_base + badseg1->s_size) {
6732 te = badseg1->s_base + badseg1->s_size;
6733 } else {
6734 te = eaddr;
6737 ASSERT(ta <= te);
6738 badseg1->s_szc = tszc;
6739 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) {
6740 if (badseg2 != NULL) {
6741 err = segvn_demote_range(badseg1, ta, te - ta,
6742 SDR_END, tszcvec);
6743 if (err != 0) {
6744 return (err);
6746 } else {
6747 return (segvn_demote_range(badseg1, ta,
6748 te - ta, SDR_END, tszcvec));
6753 if (badseg2 == NULL)
6754 return (0);
6755 ASSERT(badseg2->s_szc == szc);
6756 ASSERT(badseg2->s_size == pgsz);
6757 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size));
6758 if (err = segvn_clrszc(badseg2)) {
6759 return (err);
6761 ASSERT(badseg2->s_szc == 0);
6763 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6764 uint_t tszc = highbit(tszcvec) - 1;
6765 size_t tpgsz = page_get_pagesize(tszc);
6767 ASSERT(svd->type == MAP_SHARED);
6768 ASSERT(flag == SDR_END);
6769 ASSERT(tszc < szc && tszc > 0);
6770 ASSERT(badseg2->s_base > addr);
6771 ASSERT(eaddr > badseg2->s_base);
6772 ASSERT(eaddr < badseg2->s_base + badseg2->s_size);
6774 badseg2->s_szc = tszc;
6775 if (!IS_P2ALIGNED(eaddr, tpgsz)) {
6776 return (segvn_demote_range(badseg2, badseg2->s_base,
6777 eaddr - badseg2->s_base, SDR_END, tszcvec));
6781 return (0);
6784 static int
6785 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6787 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6788 struct vpage *vp, *evp;
6790 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6792 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6794 * If segment protection can be used, simply check against them.
6796 if (svd->pageprot == 0) {
6797 int err;
6799 err = ((svd->prot & prot) != prot) ? EACCES : 0;
6800 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6801 return (err);
6805 * Have to check down to the vpage level.
6807 evp = &svd->vpage[seg_page(seg, addr + len)];
6808 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6809 if ((VPP_PROT(vp) & prot) != prot) {
6810 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6811 return (EACCES);
6814 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6815 return (0);
6818 static int
6819 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6821 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6822 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6824 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6826 if (pgno != 0) {
6827 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6828 if (svd->pageprot == 0) {
6829 do {
6830 protv[--pgno] = svd->prot;
6831 } while (pgno != 0);
6832 } else {
6833 size_t pgoff = seg_page(seg, addr);
6835 do {
6836 pgno--;
6837 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]);
6838 } while (pgno != 0);
6840 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6842 return (0);
6845 static u_offset_t
6846 segvn_getoffset(struct seg *seg, caddr_t addr)
6848 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6850 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6852 return (svd->offset + (uintptr_t)(addr - seg->s_base));
6855 /*ARGSUSED*/
6856 static int
6857 segvn_gettype(struct seg *seg, caddr_t addr)
6859 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6861 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6863 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6864 MAP_INITDATA)));
6867 /*ARGSUSED*/
6868 static int
6869 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6871 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6873 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6875 *vpp = svd->vp;
6876 return (0);
6880 * Check to see if it makes sense to do kluster/read ahead to
6881 * addr + delta relative to the mapping at addr. We assume here
6882 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6884 * For segvn, we currently "approve" of the action if we are
6885 * still in the segment and it maps from the same vp/off,
6886 * or if the advice stored in segvn_data or vpages allows it.
6887 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6889 static int
6890 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6892 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6893 struct anon *oap, *ap;
6894 ssize_t pd;
6895 size_t page;
6896 struct vnode *vp1, *vp2;
6897 u_offset_t off1, off2;
6898 struct anon_map *amp;
6900 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6901 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6902 SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
6904 if (addr + delta < seg->s_base ||
6905 addr + delta >= (seg->s_base + seg->s_size))
6906 return (-1); /* exceeded segment bounds */
6908 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */
6909 page = seg_page(seg, addr);
6912 * Check to see if either of the pages addr or addr + delta
6913 * have advice set that prevents klustering (if MADV_RANDOM advice
6914 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
6915 * is negative).
6917 if (svd->advice == MADV_RANDOM ||
6918 svd->advice == MADV_SEQUENTIAL && delta < 0)
6919 return (-1);
6920 else if (svd->pageadvice && svd->vpage) {
6921 struct vpage *bvpp, *evpp;
6923 bvpp = &svd->vpage[page];
6924 evpp = &svd->vpage[page + pd];
6925 if (VPP_ADVICE(bvpp) == MADV_RANDOM ||
6926 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0)
6927 return (-1);
6928 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) &&
6929 VPP_ADVICE(evpp) == MADV_RANDOM)
6930 return (-1);
6933 if (svd->type == MAP_SHARED)
6934 return (0); /* shared mapping - all ok */
6936 if ((amp = svd->amp) == NULL)
6937 return (0); /* off original vnode */
6939 page += svd->anon_index;
6941 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
6943 oap = anon_get_ptr(amp->ahp, page);
6944 ap = anon_get_ptr(amp->ahp, page + pd);
6946 ANON_LOCK_EXIT(&amp->a_rwlock);
6948 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) {
6949 return (-1); /* one with and one without an anon */
6952 if (oap == NULL) { /* implies that ap == NULL */
6953 return (0); /* off original vnode */
6957 * Now we know we have two anon pointers - check to
6958 * see if they happen to be properly allocated.
6962 * XXX We cheat here and don't lock the anon slots. We can't because
6963 * we may have been called from the anon layer which might already
6964 * have locked them. We are holding a refcnt on the slots so they
6965 * can't disappear. The worst that will happen is we'll get the wrong
6966 * names (vp, off) for the slots and make a poor klustering decision.
6968 swap_xlate(ap, &vp1, &off1);
6969 swap_xlate(oap, &vp2, &off2);
6972 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta)
6973 return (-1);
6974 return (0);
6978 * Swap the pages of seg out to secondary storage, returning the
6979 * number of bytes of storage freed.
6981 * The basic idea is first to unload all translations and then to call
6982 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the
6983 * swap device. Pages to which other segments have mappings will remain
6984 * mapped and won't be swapped. Our caller (as_swapout) has already
6985 * performed the unloading step.
6987 * The value returned is intended to correlate well with the process's
6988 * memory requirements. However, there are some caveats:
6989 * 1) When given a shared segment as argument, this routine will
6990 * only succeed in swapping out pages for the last sharer of the
6991 * segment. (Previous callers will only have decremented mapping
6992 * reference counts.)
6993 * 2) We assume that the hat layer maintains a large enough translation
6994 * cache to capture process reference patterns.
6996 static size_t
6997 segvn_swapout(struct seg *seg)
6999 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7000 struct anon_map *amp;
7001 pgcnt_t pgcnt = 0;
7002 pgcnt_t npages;
7003 pgcnt_t page;
7004 ulong_t anon_index;
7006 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7008 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7010 * Find pages unmapped by our caller and force them
7011 * out to the virtual swap device.
7013 if ((amp = svd->amp) != NULL)
7014 anon_index = svd->anon_index;
7015 npages = seg->s_size >> PAGESHIFT;
7016 for (page = 0; page < npages; page++) {
7017 page_t *pp;
7018 struct anon *ap;
7019 struct vnode *vp;
7020 u_offset_t off;
7021 anon_sync_obj_t cookie;
7024 * Obtain <vp, off> pair for the page, then look it up.
7026 * Note that this code is willing to consider regular
7027 * pages as well as anon pages. Is this appropriate here?
7029 ap = NULL;
7030 if (amp != NULL) {
7031 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
7032 if (anon_array_try_enter(amp, anon_index + page,
7033 &cookie)) {
7034 ANON_LOCK_EXIT(&amp->a_rwlock);
7035 continue;
7037 ap = anon_get_ptr(amp->ahp, anon_index + page);
7038 if (ap != NULL) {
7039 swap_xlate(ap, &vp, &off);
7040 } else {
7041 vp = svd->vp;
7042 off = svd->offset + ptob(page);
7044 anon_array_exit(&cookie);
7045 ANON_LOCK_EXIT(&amp->a_rwlock);
7046 } else {
7047 vp = svd->vp;
7048 off = svd->offset + ptob(page);
7050 if (vp == NULL) { /* untouched zfod page */
7051 ASSERT(ap == NULL);
7052 continue;
7055 pp = page_lookup_nowait(vp, off, SE_SHARED);
7056 if (pp == NULL)
7057 continue;
7061 * Examine the page to see whether it can be tossed out,
7062 * keeping track of how many we've found.
7064 if (!page_tryupgrade(pp)) {
7066 * If the page has an i/o lock and no mappings,
7067 * it's very likely that the page is being
7068 * written out as a result of klustering.
7069 * Assume this is so and take credit for it here.
7071 if (!page_io_trylock(pp)) {
7072 if (!hat_page_is_mapped(pp))
7073 pgcnt++;
7074 } else {
7075 page_io_unlock(pp);
7077 page_unlock(pp);
7078 continue;
7080 ASSERT(!page_iolock_assert(pp));
7084 * Skip if page is locked or has mappings.
7085 * We don't need the page_struct_lock to look at lckcnt
7086 * and cowcnt because the page is exclusive locked.
7088 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
7089 hat_page_is_mapped(pp)) {
7090 page_unlock(pp);
7091 continue;
7095 * dispose skips large pages so try to demote first.
7097 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) {
7098 page_unlock(pp);
7100 * XXX should skip the remaining page_t's of this
7101 * large page.
7103 continue;
7106 ASSERT(pp->p_szc == 0);
7109 * No longer mapped -- we can toss it out. How
7110 * we do so depends on whether or not it's dirty.
7112 if (hat_ismod(pp) && pp->p_vnode) {
7114 * We must clean the page before it can be
7115 * freed. Setting B_FREE will cause pvn_done
7116 * to free the page when the i/o completes.
7117 * XXX: This also causes it to be accounted
7118 * as a pageout instead of a swap: need
7119 * B_SWAPOUT bit to use instead of B_FREE.
7121 * Hold the vnode before releasing the page lock
7122 * to prevent it from being freed and re-used by
7123 * some other thread.
7125 VN_HOLD(vp);
7126 page_unlock(pp);
7129 * Queue all i/o requests for the pageout thread
7130 * to avoid saturating the pageout devices.
7132 if (!queue_io_request(vp, off))
7133 VN_RELE(vp);
7134 } else {
7136 * The page was clean, free it.
7138 * XXX: Can we ever encounter modified pages
7139 * with no associated vnode here?
7141 ASSERT(pp->p_vnode != NULL);
7142 /*LINTED: constant in conditional context*/
7143 VN_DISPOSE(pp, B_FREE, 0, kcred);
7147 * Credit now even if i/o is in progress.
7149 pgcnt++;
7151 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7154 * Wakeup pageout to initiate i/o on all queued requests.
7156 cv_signal_pageout();
7157 return (ptob(pgcnt));
7161 * Synchronize primary storage cache with real object in virtual memory.
7163 * XXX - Anonymous pages should not be sync'ed out at all.
7165 static int
7166 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
7168 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7169 struct vpage *vpp;
7170 page_t *pp;
7171 u_offset_t offset;
7172 struct vnode *vp;
7173 u_offset_t off;
7174 caddr_t eaddr;
7175 int bflags;
7176 int err = 0;
7177 int segtype;
7178 int pageprot;
7179 int prot;
7180 ulong_t anon_index;
7181 struct anon_map *amp;
7182 struct anon *ap;
7183 anon_sync_obj_t cookie;
7185 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7187 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7189 if (svd->softlockcnt > 0) {
7191 * If this is shared segment non 0 softlockcnt
7192 * means locked pages are still in use.
7194 if (svd->type == MAP_SHARED) {
7195 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7196 return (EAGAIN);
7200 * flush all pages from seg cache
7201 * otherwise we may deadlock in swap_putpage
7202 * for B_INVAL page (4175402).
7204 * Even if we grab segvn WRITER's lock
7205 * here, there might be another thread which could've
7206 * successfully performed lookup/insert just before
7207 * we acquired the lock here. So, grabbing either
7208 * lock here is of not much use. Until we devise
7209 * a strategy at upper layers to solve the
7210 * synchronization issues completely, we expect
7211 * applications to handle this appropriately.
7213 segvn_purge(seg);
7214 if (svd->softlockcnt > 0) {
7215 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7216 return (EAGAIN);
7218 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7219 svd->amp->a_softlockcnt > 0) {
7221 * Try to purge this amp's entries from pcache. It will
7222 * succeed only if other segments that share the amp have no
7223 * outstanding softlock's.
7225 segvn_purge(seg);
7226 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) {
7227 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7228 return (EAGAIN);
7232 vpp = svd->vpage;
7233 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7234 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) |
7235 ((flags & MS_INVALIDATE) ? B_INVAL : 0);
7237 if (attr) {
7238 pageprot = attr & ~(SHARED|PRIVATE);
7239 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE;
7242 * We are done if the segment types don't match
7243 * or if we have segment level protections and
7244 * they don't match.
7246 if (svd->type != segtype) {
7247 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7248 return (0);
7250 if (vpp == NULL) {
7251 if (svd->prot != pageprot) {
7252 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7253 return (0);
7255 prot = svd->prot;
7256 } else
7257 vpp = &svd->vpage[seg_page(seg, addr)];
7259 } else if (svd->vp && svd->amp == NULL &&
7260 (flags & MS_INVALIDATE) == 0) {
7263 * No attributes, no anonymous pages and MS_INVALIDATE flag
7264 * is not on, just use one big request.
7266 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len,
7267 bflags, svd->cred, NULL);
7268 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7269 return (err);
7272 if ((amp = svd->amp) != NULL)
7273 anon_index = svd->anon_index + seg_page(seg, addr);
7275 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) {
7276 ap = NULL;
7277 if (amp != NULL) {
7278 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
7279 anon_array_enter(amp, anon_index, &cookie);
7280 ap = anon_get_ptr(amp->ahp, anon_index++);
7281 if (ap != NULL) {
7282 swap_xlate(ap, &vp, &off);
7283 } else {
7284 vp = svd->vp;
7285 off = offset;
7287 anon_array_exit(&cookie);
7288 ANON_LOCK_EXIT(&amp->a_rwlock);
7289 } else {
7290 vp = svd->vp;
7291 off = offset;
7293 offset += PAGESIZE;
7295 if (vp == NULL) /* untouched zfod page */
7296 continue;
7298 if (attr) {
7299 if (vpp) {
7300 prot = VPP_PROT(vpp);
7301 vpp++;
7303 if (prot != pageprot) {
7304 continue;
7309 * See if any of these pages are locked -- if so, then we
7310 * will have to truncate an invalidate request at the first
7311 * locked one. We don't need the page_struct_lock to test
7312 * as this is only advisory; even if we acquire it someone
7313 * might race in and lock the page after we unlock and before
7314 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7316 if (flags & MS_INVALIDATE) {
7317 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) {
7318 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
7319 page_unlock(pp);
7320 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7321 return (EBUSY);
7323 if (ap != NULL && pp->p_szc != 0 &&
7324 page_tryupgrade(pp)) {
7325 if (pp->p_lckcnt == 0 &&
7326 pp->p_cowcnt == 0) {
7328 * swapfs VN_DISPOSE() won't
7329 * invalidate large pages.
7330 * Attempt to demote.
7331 * XXX can't help it if it
7332 * fails. But for swapfs
7333 * pages it is no big deal.
7335 (void) page_try_demote_pages(
7336 pp);
7339 page_unlock(pp);
7341 } else if (svd->type == MAP_SHARED && amp != NULL) {
7343 * Avoid writing out to disk ISM's large pages
7344 * because segspt_free_pages() relies on NULL an_pvp
7345 * of anon slots of such pages.
7348 ASSERT(svd->vp == NULL);
7350 * swapfs uses page_lookup_nowait if not freeing or
7351 * invalidating and skips a page if
7352 * page_lookup_nowait returns NULL.
7354 pp = page_lookup_nowait(vp, off, SE_SHARED);
7355 if (pp == NULL) {
7356 continue;
7358 if (pp->p_szc != 0) {
7359 page_unlock(pp);
7360 continue;
7364 * Note ISM pages are created large so (vp, off)'s
7365 * page cannot suddenly become large after we unlock
7366 * pp.
7368 page_unlock(pp);
7371 * XXX - Should ultimately try to kluster
7372 * calls to VOP_PUTPAGE() for performance.
7374 VN_HOLD(vp);
7375 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
7376 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)),
7377 svd->cred, NULL);
7379 VN_RELE(vp);
7380 if (err)
7381 break;
7383 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7384 return (err);
7388 * Determine if we have data corresponding to pages in the
7389 * primary storage virtual memory cache (i.e., "in core").
7391 static size_t
7392 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
7394 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7395 struct vnode *vp, *avp;
7396 u_offset_t offset, aoffset;
7397 size_t p, ep;
7398 int ret;
7399 struct vpage *vpp;
7400 page_t *pp;
7401 uint_t start;
7402 struct anon_map *amp; /* XXX - for locknest */
7403 struct anon *ap;
7404 uint_t attr;
7405 anon_sync_obj_t cookie;
7407 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7409 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7410 if (svd->amp == NULL && svd->vp == NULL) {
7411 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7412 bzero(vec, btopr(len));
7413 return (len); /* no anonymous pages created yet */
7416 p = seg_page(seg, addr);
7417 ep = seg_page(seg, addr + len);
7418 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0;
7420 amp = svd->amp;
7421 for (; p < ep; p++, addr += PAGESIZE) {
7422 vpp = (svd->vpage) ? &svd->vpage[p]: NULL;
7423 ret = start;
7424 ap = NULL;
7425 avp = NULL;
7426 /* Grab the vnode/offset for the anon slot */
7427 if (amp != NULL) {
7428 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
7429 anon_array_enter(amp, svd->anon_index + p, &cookie);
7430 ap = anon_get_ptr(amp->ahp, svd->anon_index + p);
7431 if (ap != NULL) {
7432 swap_xlate(ap, &avp, &aoffset);
7434 anon_array_exit(&cookie);
7435 ANON_LOCK_EXIT(&amp->a_rwlock);
7437 if ((avp != NULL) && page_exists(avp, aoffset)) {
7438 /* A page exists for the anon slot */
7439 ret |= SEG_PAGE_INCORE;
7442 * If page is mapped and writable
7444 attr = (uint_t)0;
7445 if ((hat_getattr(seg->s_as->a_hat, addr,
7446 &attr) != -1) && (attr & PROT_WRITE)) {
7447 ret |= SEG_PAGE_ANON;
7450 * Don't get page_struct lock for lckcnt and cowcnt,
7451 * since this is purely advisory.
7453 if ((pp = page_lookup_nowait(avp, aoffset,
7454 SE_SHARED)) != NULL) {
7455 if (pp->p_lckcnt)
7456 ret |= SEG_PAGE_SOFTLOCK;
7457 if (pp->p_cowcnt)
7458 ret |= SEG_PAGE_HASCOW;
7459 page_unlock(pp);
7463 /* Gather vnode statistics */
7464 vp = svd->vp;
7465 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7467 if (vp != NULL) {
7469 * Try to obtain a "shared" lock on the page
7470 * without blocking. If this fails, determine
7471 * if the page is in memory.
7473 pp = page_lookup_nowait(vp, offset, SE_SHARED);
7474 if ((pp == NULL) && (page_exists(vp, offset))) {
7475 /* Page is incore, and is named */
7476 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7479 * Don't get page_struct lock for lckcnt and cowcnt,
7480 * since this is purely advisory.
7482 if (pp != NULL) {
7483 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7484 if (pp->p_lckcnt)
7485 ret |= SEG_PAGE_SOFTLOCK;
7486 if (pp->p_cowcnt)
7487 ret |= SEG_PAGE_HASCOW;
7488 page_unlock(pp);
7492 /* Gather virtual page information */
7493 if (vpp) {
7494 if (VPP_ISPPLOCK(vpp))
7495 ret |= SEG_PAGE_LOCKED;
7496 vpp++;
7499 *vec++ = (char)ret;
7501 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7502 return (len);
7506 * Statement for p_cowcnts/p_lckcnts.
7508 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7509 * irrespective of the following factors or anything else:
7511 * (1) anon slots are populated or not
7512 * (2) cow is broken or not
7513 * (3) refcnt on ap is 1 or greater than 1
7515 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7516 * and munlock.
7519 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7521 * if vpage has PROT_WRITE
7522 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7523 * else
7524 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7526 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7527 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7529 * We may also break COW if softlocking on read access in the physio case.
7530 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7531 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7532 * vpage doesn't have PROT_WRITE.
7535 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7537 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7538 * increment p_lckcnt by calling page_subclaim() which takes care of
7539 * availrmem accounting and p_lckcnt overflow.
7541 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7542 * increment p_cowcnt by calling page_addclaim() which takes care of
7543 * availrmem availability and p_cowcnt overflow.
7547 * Lock down (or unlock) pages mapped by this segment.
7549 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7550 * At fault time they will be relocated into larger pages.
7552 static int
7553 segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
7554 int attr, int op, ulong_t *lockmap, size_t pos)
7556 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7557 struct vpage *vpp;
7558 struct vpage *evp;
7559 page_t *pp;
7560 u_offset_t offset;
7561 u_offset_t off;
7562 int segtype;
7563 int pageprot;
7564 int claim;
7565 struct vnode *vp;
7566 ulong_t anon_index;
7567 struct anon_map *amp;
7568 struct anon *ap;
7569 struct vattr va;
7570 anon_sync_obj_t cookie;
7571 struct kshmid *sp = NULL;
7572 struct proc *p = curproc;
7573 kproject_t *proj = NULL;
7574 int chargeproc = 1;
7575 size_t locked_bytes = 0;
7576 size_t unlocked_bytes = 0;
7577 int err = 0;
7580 * Hold write lock on address space because may split or concatenate
7581 * segments
7583 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7586 * If this is a shm, use shm's project and zone, else use
7587 * project and zone of calling process
7590 /* Determine if this segment backs a sysV shm */
7591 if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7592 ASSERT(svd->type == MAP_SHARED);
7593 ASSERT(svd->tr_state == SEGVN_TR_OFF);
7594 sp = svd->amp->a_sp;
7595 proj = sp->shm_perm.ipc_proj;
7596 chargeproc = 0;
7599 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7600 if (attr) {
7601 pageprot = attr & ~(SHARED|PRIVATE);
7602 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE;
7605 * We are done if the segment types don't match
7606 * or if we have segment level protections and
7607 * they don't match.
7609 if (svd->type != segtype) {
7610 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7611 return (0);
7613 if (svd->pageprot == 0 && svd->prot != pageprot) {
7614 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7615 return (0);
7619 if (op == MC_LOCK) {
7620 if (svd->tr_state == SEGVN_TR_INIT) {
7621 svd->tr_state = SEGVN_TR_OFF;
7622 } else if (svd->tr_state == SEGVN_TR_ON) {
7623 ASSERT(svd->amp != NULL);
7624 segvn_textunrepl(seg, 0);
7625 ASSERT(svd->amp == NULL &&
7626 svd->tr_state == SEGVN_TR_OFF);
7631 * If we're locking, then we must create a vpage structure if
7632 * none exists. If we're unlocking, then check to see if there
7633 * is a vpage -- if not, then we could not have locked anything.
7636 if ((vpp = svd->vpage) == NULL) {
7637 if (op == MC_LOCK)
7638 segvn_vpage(seg);
7639 else {
7640 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7641 return (0);
7646 * The anonymous data vector (i.e., previously
7647 * unreferenced mapping to swap space) can be allocated
7648 * by lazily testing for its existence.
7650 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) {
7651 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
7652 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
7653 svd->amp->a_szc = seg->s_szc;
7656 if ((amp = svd->amp) != NULL) {
7657 anon_index = svd->anon_index + seg_page(seg, addr);
7660 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7661 evp = &svd->vpage[seg_page(seg, addr + len)];
7663 if (sp != NULL)
7664 mutex_enter(&sp->shm_mlock);
7666 /* determine number of unlocked bytes in range for lock operation */
7667 if (op == MC_LOCK) {
7669 if (sp == NULL) {
7670 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7671 vpp++) {
7672 if (!VPP_ISPPLOCK(vpp))
7673 unlocked_bytes += PAGESIZE;
7675 } else {
7676 ulong_t i_idx, i_edx;
7677 anon_sync_obj_t i_cookie;
7678 struct anon *i_ap;
7679 struct vnode *i_vp;
7680 u_offset_t i_off;
7682 /* Only count sysV pages once for locked memory */
7683 i_edx = svd->anon_index + seg_page(seg, addr + len);
7684 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
7685 for (i_idx = anon_index; i_idx < i_edx; i_idx++) {
7686 anon_array_enter(amp, i_idx, &i_cookie);
7687 i_ap = anon_get_ptr(amp->ahp, i_idx);
7688 if (i_ap == NULL) {
7689 unlocked_bytes += PAGESIZE;
7690 anon_array_exit(&i_cookie);
7691 continue;
7693 swap_xlate(i_ap, &i_vp, &i_off);
7694 anon_array_exit(&i_cookie);
7695 pp = page_lookup(i_vp, i_off, SE_SHARED);
7696 if (pp == NULL) {
7697 unlocked_bytes += PAGESIZE;
7698 continue;
7699 } else if (pp->p_lckcnt == 0)
7700 unlocked_bytes += PAGESIZE;
7701 page_unlock(pp);
7703 ANON_LOCK_EXIT(&amp->a_rwlock);
7706 mutex_enter(&p->p_lock);
7707 err = rctl_incr_locked_mem(p, proj, unlocked_bytes,
7708 chargeproc);
7709 mutex_exit(&p->p_lock);
7711 if (err) {
7712 if (sp != NULL)
7713 mutex_exit(&sp->shm_mlock);
7714 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7715 return (err);
7719 * Loop over all pages in the range. Process if we're locking and
7720 * page has not already been locked in this mapping; or if we're
7721 * unlocking and the page has been locked.
7723 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7724 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) {
7725 if ((attr == 0 || VPP_PROT(vpp) == pageprot) &&
7726 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) ||
7727 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) {
7729 if (amp != NULL)
7730 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
7732 * If this isn't a MAP_NORESERVE segment and
7733 * we're locking, allocate anon slots if they
7734 * don't exist. The page is brought in later on.
7736 if (op == MC_LOCK && svd->vp == NULL &&
7737 ((svd->flags & MAP_NORESERVE) == 0) &&
7738 amp != NULL &&
7739 ((ap = anon_get_ptr(amp->ahp, anon_index))
7740 == NULL)) {
7741 anon_array_enter(amp, anon_index, &cookie);
7743 if ((ap = anon_get_ptr(amp->ahp,
7744 anon_index)) == NULL) {
7745 pp = anon_zero(seg, addr, &ap,
7746 svd->cred);
7747 if (pp == NULL) {
7748 anon_array_exit(&cookie);
7749 ANON_LOCK_EXIT(&amp->a_rwlock);
7750 err = ENOMEM;
7751 goto out;
7753 ASSERT(anon_get_ptr(amp->ahp,
7754 anon_index) == NULL);
7755 (void) anon_set_ptr(amp->ahp,
7756 anon_index, ap, ANON_SLEEP);
7757 page_unlock(pp);
7759 anon_array_exit(&cookie);
7763 * Get name for page, accounting for
7764 * existence of private copy.
7766 ap = NULL;
7767 if (amp != NULL) {
7768 anon_array_enter(amp, anon_index, &cookie);
7769 ap = anon_get_ptr(amp->ahp, anon_index);
7770 if (ap != NULL) {
7771 swap_xlate(ap, &vp, &off);
7772 } else {
7773 if (svd->vp == NULL &&
7774 (svd->flags & MAP_NORESERVE)) {
7775 anon_array_exit(&cookie);
7776 ANON_LOCK_EXIT(&amp->a_rwlock);
7777 continue;
7779 vp = svd->vp;
7780 off = offset;
7782 if (op != MC_LOCK || ap == NULL) {
7783 anon_array_exit(&cookie);
7784 ANON_LOCK_EXIT(&amp->a_rwlock);
7786 } else {
7787 vp = svd->vp;
7788 off = offset;
7792 * Get page frame. It's ok if the page is
7793 * not available when we're unlocking, as this
7794 * may simply mean that a page we locked got
7795 * truncated out of existence after we locked it.
7797 * Invoke VOP_GETPAGE() to obtain the page struct
7798 * since we may need to read it from disk if its
7799 * been paged out.
7801 if (op != MC_LOCK)
7802 pp = page_lookup(vp, off, SE_SHARED);
7803 else {
7804 page_t *pl[1 + 1];
7805 int error;
7807 ASSERT(vp != NULL);
7809 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
7810 (uint_t *)NULL, pl, PAGESIZE, seg, addr,
7811 S_OTHER, svd->cred, NULL);
7813 if (error && ap != NULL) {
7814 anon_array_exit(&cookie);
7815 ANON_LOCK_EXIT(&amp->a_rwlock);
7819 * If the error is EDEADLK then we must bounce
7820 * up and drop all vm subsystem locks and then
7821 * retry the operation later
7822 * This behavior is a temporary measure because
7823 * ufs/sds logging is badly designed and will
7824 * deadlock if we don't allow this bounce to
7825 * happen. The real solution is to re-design
7826 * the logging code to work properly. See bug
7827 * 4125102 for details of the problem.
7829 if (error == EDEADLK) {
7830 err = error;
7831 goto out;
7834 * Quit if we fail to fault in the page. Treat
7835 * the failure as an error, unless the addr
7836 * is mapped beyond the end of a file.
7838 if (error && svd->vp) {
7839 va.va_mask = AT_SIZE;
7840 if (VOP_GETATTR(svd->vp, &va, 0,
7841 svd->cred, NULL) != 0) {
7842 err = EIO;
7843 goto out;
7845 if (btopr(va.va_size) >=
7846 btopr(off + 1)) {
7847 err = EIO;
7848 goto out;
7850 goto out;
7852 } else if (error) {
7853 err = EIO;
7854 goto out;
7856 pp = pl[0];
7857 ASSERT(pp != NULL);
7861 * See Statement at the beginning of this routine.
7863 * claim is always set if MAP_PRIVATE and PROT_WRITE
7864 * irrespective of following factors:
7866 * (1) anon slots are populated or not
7867 * (2) cow is broken or not
7868 * (3) refcnt on ap is 1 or greater than 1
7870 * See 4140683 for details
7872 claim = ((VPP_PROT(vpp) & PROT_WRITE) &&
7873 (svd->type == MAP_PRIVATE));
7876 * Perform page-level operation appropriate to
7877 * operation. If locking, undo the SOFTLOCK
7878 * performed to bring the page into memory
7879 * after setting the lock. If unlocking,
7880 * and no page was found, account for the claim
7881 * separately.
7883 if (op == MC_LOCK) {
7884 int ret = 1; /* Assume success */
7886 ASSERT(!VPP_ISPPLOCK(vpp));
7888 ret = page_pp_lock(pp, claim, 0);
7889 if (ap != NULL) {
7890 if (ap->an_pvp != NULL) {
7891 anon_swap_free(ap, pp);
7893 anon_array_exit(&cookie);
7894 ANON_LOCK_EXIT(&amp->a_rwlock);
7896 if (ret == 0) {
7897 /* locking page failed */
7898 page_unlock(pp);
7899 err = EAGAIN;
7900 goto out;
7902 VPP_SETPPLOCK(vpp);
7903 if (sp != NULL) {
7904 if (pp->p_lckcnt == 1)
7905 locked_bytes += PAGESIZE;
7906 } else
7907 locked_bytes += PAGESIZE;
7909 if (lockmap != (ulong_t *)NULL)
7910 BT_SET(lockmap, pos);
7912 page_unlock(pp);
7913 } else {
7914 ASSERT(VPP_ISPPLOCK(vpp));
7915 if (pp != NULL) {
7916 /* sysV pages should be locked */
7917 ASSERT(sp == NULL || pp->p_lckcnt > 0);
7918 page_pp_unlock(pp, claim, 0);
7919 if (sp != NULL) {
7920 if (pp->p_lckcnt == 0)
7921 unlocked_bytes
7922 += PAGESIZE;
7923 } else
7924 unlocked_bytes += PAGESIZE;
7925 page_unlock(pp);
7926 } else {
7927 ASSERT(sp == NULL);
7928 unlocked_bytes += PAGESIZE;
7930 VPP_CLRPPLOCK(vpp);
7934 out:
7935 if (op == MC_LOCK) {
7936 /* Credit back bytes that did not get locked */
7937 if ((unlocked_bytes - locked_bytes) > 0) {
7938 if (proj == NULL)
7939 mutex_enter(&p->p_lock);
7940 rctl_decr_locked_mem(p, proj,
7941 (unlocked_bytes - locked_bytes), chargeproc);
7942 if (proj == NULL)
7943 mutex_exit(&p->p_lock);
7946 } else {
7947 /* Account bytes that were unlocked */
7948 if (unlocked_bytes > 0) {
7949 if (proj == NULL)
7950 mutex_enter(&p->p_lock);
7951 rctl_decr_locked_mem(p, proj, unlocked_bytes,
7952 chargeproc);
7953 if (proj == NULL)
7954 mutex_exit(&p->p_lock);
7957 if (sp != NULL)
7958 mutex_exit(&sp->shm_mlock);
7959 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7961 return (err);
7965 * Set advice from user for specified pages
7966 * There are 5 types of advice:
7967 * MADV_NORMAL - Normal (default) behavior (whatever that is)
7968 * MADV_RANDOM - Random page references
7969 * do not allow readahead or 'klustering'
7970 * MADV_SEQUENTIAL - Sequential page references
7971 * Pages previous to the one currently being
7972 * accessed (determined by fault) are 'not needed'
7973 * and are freed immediately
7974 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
7975 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
7976 * MADV_FREE - Contents can be discarded
7977 * MADV_ACCESS_DEFAULT- Default access
7978 * MADV_ACCESS_LWP - Next LWP will access heavily
7979 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
7981 static int
7982 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
7984 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7985 size_t page;
7986 int err = 0;
7987 int already_set;
7988 struct anon_map *amp;
7989 ulong_t anon_index;
7990 struct seg *next;
7991 lgrp_mem_policy_t policy;
7992 struct seg *prev;
7993 struct vnode *vp;
7995 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7998 * In case of MADV_FREE, we won't be modifying any segment private
7999 * data structures; so, we only need to grab READER's lock
8001 if (behav != MADV_FREE) {
8002 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8003 if (svd->tr_state != SEGVN_TR_OFF) {
8004 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8005 return (0);
8007 } else {
8008 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8012 * Large pages are assumed to be only turned on when accesses to the
8013 * segment's address range have spatial and temporal locality. That
8014 * justifies ignoring MADV_SEQUENTIAL for large page segments.
8015 * Also, ignore advice affecting lgroup memory allocation
8016 * if don't need to do lgroup optimizations on this system
8019 if ((behav == MADV_SEQUENTIAL &&
8020 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) ||
8021 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT ||
8022 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) {
8023 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8024 return (0);
8027 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT ||
8028 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) {
8030 * Since we are going to unload hat mappings
8031 * we first have to flush the cache. Otherwise
8032 * this might lead to system panic if another
8033 * thread is doing physio on the range whose
8034 * mappings are unloaded by madvise(3C).
8036 if (svd->softlockcnt > 0) {
8038 * If this is shared segment non 0 softlockcnt
8039 * means locked pages are still in use.
8041 if (svd->type == MAP_SHARED) {
8042 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8043 return (EAGAIN);
8046 * Since we do have the segvn writers lock
8047 * nobody can fill the cache with entries
8048 * belonging to this seg during the purge.
8049 * The flush either succeeds or we still
8050 * have pending I/Os. In the later case,
8051 * madvise(3C) fails.
8053 segvn_purge(seg);
8054 if (svd->softlockcnt > 0) {
8056 * Since madvise(3C) is advisory and
8057 * it's not part of UNIX98, madvise(3C)
8058 * failure here doesn't cause any hardship.
8059 * Note that we don't block in "as" layer.
8061 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8062 return (EAGAIN);
8064 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
8065 svd->amp->a_softlockcnt > 0) {
8067 * Try to purge this amp's entries from pcache. It
8068 * will succeed only if other segments that share the
8069 * amp have no outstanding softlock's.
8071 segvn_purge(seg);
8075 amp = svd->amp;
8076 vp = svd->vp;
8077 if (behav == MADV_FREE) {
8079 * MADV_FREE is not supported for segments with
8080 * underlying object; if anonmap is NULL, anon slots
8081 * are not yet populated and there is nothing for
8082 * us to do. As MADV_FREE is advisory, we don't
8083 * return error in either case.
8085 if (vp != NULL || amp == NULL) {
8086 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8087 return (0);
8090 segvn_purge(seg);
8092 page = seg_page(seg, addr);
8093 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
8094 anon_disclaim(amp, svd->anon_index + page, len);
8095 ANON_LOCK_EXIT(&amp->a_rwlock);
8096 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8097 return (0);
8101 * If advice is to be applied to entire segment,
8102 * use advice field in seg_data structure
8103 * otherwise use appropriate vpage entry.
8105 if ((addr == seg->s_base) && (len == seg->s_size)) {
8106 switch (behav) {
8107 case MADV_ACCESS_LWP:
8108 case MADV_ACCESS_MANY:
8109 case MADV_ACCESS_DEFAULT:
8111 * Set memory allocation policy for this segment
8113 policy = lgrp_madv_to_policy(behav, len, svd->type);
8114 if (svd->type == MAP_SHARED)
8115 already_set = lgrp_shm_policy_set(policy, amp,
8116 svd->anon_index, vp, svd->offset, len);
8117 else {
8119 * For private memory, need writers lock on
8120 * address space because the segment may be
8121 * split or concatenated when changing policy
8123 if (AS_READ_HELD(seg->s_as,
8124 &seg->s_as->a_lock)) {
8125 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8126 return (IE_RETRY);
8129 already_set = lgrp_privm_policy_set(policy,
8130 &svd->policy_info, len);
8134 * If policy set already and it shouldn't be reapplied,
8135 * don't do anything.
8137 if (already_set &&
8138 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8139 break;
8142 * Mark any existing pages in given range for
8143 * migration
8145 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8146 vp, svd->offset, 1);
8149 * If same policy set already or this is a shared
8150 * memory segment, don't need to try to concatenate
8151 * segment with adjacent ones.
8153 if (already_set || svd->type == MAP_SHARED)
8154 break;
8157 * Try to concatenate this segment with previous
8158 * one and next one, since we changed policy for
8159 * this one and it may be compatible with adjacent
8160 * ones now.
8162 prev = AS_SEGPREV(seg->s_as, seg);
8163 next = AS_SEGNEXT(seg->s_as, seg);
8165 if (next && next->s_ops == &segvn_ops &&
8166 addr + len == next->s_base)
8167 (void) segvn_concat(seg, next, 1);
8169 if (prev && prev->s_ops == &segvn_ops &&
8170 addr == prev->s_base + prev->s_size) {
8172 * Drop lock for private data of current
8173 * segment before concatenating (deleting) it
8174 * and return IE_REATTACH to tell as_ctl() that
8175 * current segment has changed
8177 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8178 if (!segvn_concat(prev, seg, 1))
8179 err = IE_REATTACH;
8181 return (err);
8183 break;
8185 case MADV_SEQUENTIAL:
8187 * unloading mapping guarantees
8188 * detection in segvn_fault
8190 ASSERT(seg->s_szc == 0);
8191 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8192 hat_unload(seg->s_as->a_hat, addr, len,
8193 HAT_UNLOAD);
8194 /* FALLTHROUGH */
8195 case MADV_NORMAL:
8196 case MADV_RANDOM:
8197 svd->advice = (uchar_t)behav;
8198 svd->pageadvice = 0;
8199 break;
8200 case MADV_WILLNEED: /* handled in memcntl */
8201 case MADV_DONTNEED: /* handled in memcntl */
8202 case MADV_FREE: /* handled above */
8203 break;
8204 default:
8205 err = EINVAL;
8207 } else {
8208 caddr_t eaddr;
8209 struct seg *new_seg;
8210 struct segvn_data *new_svd;
8211 u_offset_t off;
8212 caddr_t oldeaddr;
8214 page = seg_page(seg, addr);
8216 segvn_vpage(seg);
8218 switch (behav) {
8219 struct vpage *bvpp, *evpp;
8221 case MADV_ACCESS_LWP:
8222 case MADV_ACCESS_MANY:
8223 case MADV_ACCESS_DEFAULT:
8225 * Set memory allocation policy for portion of this
8226 * segment
8230 * Align address and length of advice to page
8231 * boundaries for large pages
8233 if (seg->s_szc != 0) {
8234 size_t pgsz;
8236 pgsz = page_get_pagesize(seg->s_szc);
8237 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
8238 len = P2ROUNDUP(len, pgsz);
8242 * Check to see whether policy is set already
8244 policy = lgrp_madv_to_policy(behav, len, svd->type);
8246 anon_index = svd->anon_index + page;
8247 off = svd->offset + (uintptr_t)(addr - seg->s_base);
8249 if (svd->type == MAP_SHARED)
8250 already_set = lgrp_shm_policy_set(policy, amp,
8251 anon_index, vp, off, len);
8252 else
8253 already_set =
8254 (policy == svd->policy_info.mem_policy);
8257 * If policy set already and it shouldn't be reapplied,
8258 * don't do anything.
8260 if (already_set &&
8261 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8262 break;
8265 * For private memory, need writers lock on
8266 * address space because the segment may be
8267 * split or concatenated when changing policy
8269 if (svd->type == MAP_PRIVATE &&
8270 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) {
8271 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8272 return (IE_RETRY);
8276 * Mark any existing pages in given range for
8277 * migration
8279 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8280 vp, svd->offset, 1);
8283 * Don't need to try to split or concatenate
8284 * segments, since policy is same or this is a shared
8285 * memory segment
8287 if (already_set || svd->type == MAP_SHARED)
8288 break;
8290 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
8291 ASSERT(svd->amp == NULL);
8292 ASSERT(svd->tr_state == SEGVN_TR_OFF);
8293 ASSERT(svd->softlockcnt == 0);
8294 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
8295 HAT_REGION_TEXT);
8296 svd->rcookie = HAT_INVALID_REGION_COOKIE;
8300 * Split off new segment if advice only applies to a
8301 * portion of existing segment starting in middle
8303 new_seg = NULL;
8304 eaddr = addr + len;
8305 oldeaddr = seg->s_base + seg->s_size;
8306 if (addr > seg->s_base) {
8308 * Must flush I/O page cache
8309 * before splitting segment
8311 if (svd->softlockcnt > 0)
8312 segvn_purge(seg);
8315 * Split segment and return IE_REATTACH to tell
8316 * as_ctl() that current segment changed
8318 new_seg = segvn_split_seg(seg, addr);
8319 new_svd = (struct segvn_data *)new_seg->s_data;
8320 err = IE_REATTACH;
8323 * If new segment ends where old one
8324 * did, try to concatenate the new
8325 * segment with next one.
8327 if (eaddr == oldeaddr) {
8329 * Set policy for new segment
8331 (void) lgrp_privm_policy_set(policy,
8332 &new_svd->policy_info,
8333 new_seg->s_size);
8335 next = AS_SEGNEXT(new_seg->s_as,
8336 new_seg);
8338 if (next &&
8339 next->s_ops == &segvn_ops &&
8340 eaddr == next->s_base)
8341 (void) segvn_concat(new_seg,
8342 next, 1);
8347 * Split off end of existing segment if advice only
8348 * applies to a portion of segment ending before
8349 * end of the existing segment
8351 if (eaddr < oldeaddr) {
8353 * Must flush I/O page cache
8354 * before splitting segment
8356 if (svd->softlockcnt > 0)
8357 segvn_purge(seg);
8360 * If beginning of old segment was already
8361 * split off, use new segment to split end off
8362 * from.
8364 if (new_seg != NULL && new_seg != seg) {
8366 * Split segment
8368 (void) segvn_split_seg(new_seg, eaddr);
8371 * Set policy for new segment
8373 (void) lgrp_privm_policy_set(policy,
8374 &new_svd->policy_info,
8375 new_seg->s_size);
8376 } else {
8378 * Split segment and return IE_REATTACH
8379 * to tell as_ctl() that current
8380 * segment changed
8382 (void) segvn_split_seg(seg, eaddr);
8383 err = IE_REATTACH;
8385 (void) lgrp_privm_policy_set(policy,
8386 &svd->policy_info, seg->s_size);
8389 * If new segment starts where old one
8390 * did, try to concatenate it with
8391 * previous segment.
8393 if (addr == seg->s_base) {
8394 prev = AS_SEGPREV(seg->s_as,
8395 seg);
8398 * Drop lock for private data
8399 * of current segment before
8400 * concatenating (deleting) it
8402 if (prev &&
8403 prev->s_ops ==
8404 &segvn_ops &&
8405 addr == prev->s_base +
8406 prev->s_size) {
8407 SEGVN_LOCK_EXIT(
8408 seg->s_as,
8409 &svd->lock);
8410 (void) segvn_concat(
8411 prev, seg, 1);
8412 return (err);
8417 break;
8418 case MADV_SEQUENTIAL:
8419 ASSERT(seg->s_szc == 0);
8420 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8421 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
8422 /* FALLTHROUGH */
8423 case MADV_NORMAL:
8424 case MADV_RANDOM:
8425 bvpp = &svd->vpage[page];
8426 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8427 for (; bvpp < evpp; bvpp++)
8428 VPP_SETADVICE(bvpp, behav);
8429 svd->advice = MADV_NORMAL;
8430 break;
8431 case MADV_WILLNEED: /* handled in memcntl */
8432 case MADV_DONTNEED: /* handled in memcntl */
8433 case MADV_FREE: /* handled above */
8434 break;
8435 default:
8436 err = EINVAL;
8439 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8440 return (err);
8444 * Create a vpage structure for this seg.
8446 static void
8447 segvn_vpage(struct seg *seg)
8449 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8450 struct vpage *vp, *evp;
8452 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
8455 * If no vpage structure exists, allocate one. Copy the protections
8456 * and the advice from the segment itself to the individual pages.
8458 if (svd->vpage == NULL) {
8459 svd->pageadvice = 1;
8460 svd->vpage = kmem_zalloc(seg_pages(seg) * sizeof (struct vpage),
8461 KM_SLEEP);
8462 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
8463 for (vp = svd->vpage; vp < evp; vp++) {
8464 VPP_SETPROT(vp, svd->prot);
8465 VPP_SETADVICE(vp, svd->advice);
8471 * Dump the pages belonging to this segvn segment.
8473 static void
8474 segvn_dump(struct seg *seg)
8476 struct segvn_data *svd;
8477 page_t *pp;
8478 struct anon_map *amp;
8479 ulong_t anon_index;
8480 struct vnode *vp;
8481 u_offset_t off, offset;
8482 pfn_t pfn;
8483 pgcnt_t page, npages;
8484 caddr_t addr;
8486 npages = seg_pages(seg);
8487 svd = (struct segvn_data *)seg->s_data;
8488 vp = svd->vp;
8489 off = offset = svd->offset;
8490 addr = seg->s_base;
8492 if ((amp = svd->amp) != NULL) {
8493 anon_index = svd->anon_index;
8494 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
8497 for (page = 0; page < npages; page++, offset += PAGESIZE) {
8498 struct anon *ap;
8499 int we_own_it = 0;
8501 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) {
8502 swap_xlate_nopanic(ap, &vp, &off);
8503 } else {
8504 vp = svd->vp;
8505 off = offset;
8509 * If pp == NULL, the page either does not exist
8510 * or is exclusively locked. So determine if it
8511 * exists before searching for it.
8514 if ((pp = page_lookup_nowait(vp, off, SE_SHARED)))
8515 we_own_it = 1;
8516 else
8517 pp = page_exists(vp, off);
8519 if (pp) {
8520 pfn = page_pptonum(pp);
8521 dump_addpage(seg->s_as, addr, pfn);
8522 if (we_own_it)
8523 page_unlock(pp);
8525 addr += PAGESIZE;
8526 dump_timeleft = dump_timeout;
8529 if (amp != NULL)
8530 ANON_LOCK_EXIT(&amp->a_rwlock);
8533 #ifdef DEBUG
8534 static uint32_t segvn_pglock_mtbf = 0;
8535 #endif
8537 #define PCACHE_SHWLIST ((page_t *)-2)
8538 #define NOPCACHE_SHWLIST ((page_t *)-1)
8541 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8542 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8543 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8544 * the same parts of the segment. Currently shadow list creation is only
8545 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8546 * tagged with segment pointer, starting virtual address and length. This
8547 * approach for MAP_SHARED segments may add many pcache entries for the same
8548 * set of pages and lead to long hash chains that decrease pcache lookup
8549 * performance. To avoid this issue for shared segments shared anon map and
8550 * starting anon index are used for pcache entry tagging. This allows all
8551 * segments to share pcache entries for the same anon range and reduces pcache
8552 * chain's length as well as memory overhead from duplicate shadow lists and
8553 * pcache entries.
8555 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8556 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8557 * part of softlockcnt accounting is done differently for private and shared
8558 * segments. In private segment case softlock is only incremented when a new
8559 * shadow list is created but not when an existing one is found via
8560 * seg_plookup(). pcache entries have reference count incremented/decremented
8561 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8562 * reference count can be purged (and purging is needed before segment can be
8563 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8564 * decrement softlockcnt. Since in private segment case each of its pcache
8565 * entries only belongs to this segment we can expect that when
8566 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8567 * segment purge will succeed and softlockcnt will drop to 0. In shared
8568 * segment case reference count in pcache entry counts active locks from many
8569 * different segments so we can't expect segment purging to succeed even when
8570 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8571 * segment. To be able to determine when there're no pending pagelocks in
8572 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8573 * but instead softlockcnt is incremented and decremented for every
8574 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8575 * list was created or an existing one was found. When softlockcnt drops to 0
8576 * this segment no longer has any claims for pcached shadow lists and the
8577 * segment can be freed even if there're still active pcache entries
8578 * shared by this segment anon map. Shared segment pcache entries belong to
8579 * anon map and are typically removed when anon map is freed after all
8580 * processes destroy the segments that use this anon map.
8582 static int
8583 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
8584 enum lock_type type, enum seg_rw rw)
8586 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8587 size_t np;
8588 pgcnt_t adjustpages;
8589 pgcnt_t npages;
8590 ulong_t anon_index;
8591 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE;
8592 uint_t error;
8593 struct anon_map *amp;
8594 pgcnt_t anpgcnt;
8595 struct page **pplist, **pl, *pp;
8596 caddr_t a;
8597 size_t page;
8598 caddr_t lpgaddr, lpgeaddr;
8599 anon_sync_obj_t cookie;
8600 int anlock;
8601 struct anon_map *pamp;
8602 caddr_t paddr;
8603 seg_preclaim_cbfunc_t preclaim_callback;
8604 size_t pgsz;
8605 int use_pcache;
8606 size_t wlen;
8607 uint_t pflags = 0;
8608 int sftlck_sbase = 0;
8609 int sftlck_send = 0;
8611 #ifdef DEBUG
8612 if (type == L_PAGELOCK && segvn_pglock_mtbf) {
8613 hrtime_t ts = gethrtime();
8614 if ((ts % segvn_pglock_mtbf) == 0) {
8615 return (ENOTSUP);
8617 if ((ts % segvn_pglock_mtbf) == 1) {
8618 return (EFAULT);
8621 #endif
8623 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START,
8624 "segvn_pagelock: start seg %p addr %p", seg, addr);
8626 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8627 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
8629 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8632 * for now we only support pagelock to anon memory. We would have to
8633 * check protections for vnode objects and call into the vnode driver.
8634 * That's too much for a fast path. Let the fault entry point handle
8635 * it.
8637 if (svd->vp != NULL) {
8638 if (type == L_PAGELOCK) {
8639 error = ENOTSUP;
8640 goto out;
8642 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8644 if ((amp = svd->amp) == NULL) {
8645 if (type == L_PAGELOCK) {
8646 error = EFAULT;
8647 goto out;
8649 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8651 if (rw != S_READ && rw != S_WRITE) {
8652 if (type == L_PAGELOCK) {
8653 error = ENOTSUP;
8654 goto out;
8656 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8659 if (seg->s_szc != 0) {
8661 * We are adjusting the pagelock region to the large page size
8662 * boundary because the unlocked part of a large page cannot
8663 * be freed anyway unless all constituent pages of a large
8664 * page are locked. Bigger regions reduce pcache chain length
8665 * and improve lookup performance. The tradeoff is that the
8666 * very first segvn_pagelock() call for a given page is more
8667 * expensive if only 1 page_t is needed for IO. This is only
8668 * an issue if pcache entry doesn't get reused by several
8669 * subsequent calls. We optimize here for the case when pcache
8670 * is heavily used by repeated IOs to the same address range.
8672 * Note segment's page size cannot change while we are holding
8673 * as lock. And then it cannot change while softlockcnt is
8674 * not 0. This will allow us to correctly recalculate large
8675 * page size region for the matching pageunlock/reclaim call
8676 * since as_pageunlock() caller must always match
8677 * as_pagelock() call's addr and len.
8679 * For pageunlock *ppp points to the pointer of page_t that
8680 * corresponds to the real unadjusted start address. Similar
8681 * for pagelock *ppp must point to the pointer of page_t that
8682 * corresponds to the real unadjusted start address.
8684 pgsz = page_get_pagesize(seg->s_szc);
8685 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
8686 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8687 } else if (len < segvn_pglock_comb_thrshld) {
8688 lpgaddr = addr;
8689 lpgeaddr = addr + len;
8690 adjustpages = 0;
8691 pgsz = PAGESIZE;
8692 } else {
8694 * Align the address range of large enough requests to allow
8695 * combining of different shadow lists into 1 to reduce memory
8696 * overhead from potentially overlapping large shadow lists
8697 * (worst case is we have a 1MB IO into buffers with start
8698 * addresses separated by 4K). Alignment is only possible if
8699 * padded chunks have sufficient access permissions. Note
8700 * permissions won't change between L_PAGELOCK and
8701 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8702 * segvn_setprot() to wait until softlockcnt drops to 0. This
8703 * allows us to determine in L_PAGEUNLOCK the same range we
8704 * computed in L_PAGELOCK.
8706 * If alignment is limited by segment ends set
8707 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8708 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8709 * per segment counters. In L_PAGEUNLOCK case decrease
8710 * softlockcnt_sbase/softlockcnt_send counters if
8711 * sftlck_sbase/sftlck_send flags are set. When
8712 * softlockcnt_sbase/softlockcnt_send are non 0
8713 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8714 * won't merge the segments. This restriction combined with
8715 * restriction on segment unmapping and splitting for segments
8716 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8717 * correctly determine the same range that was previously
8718 * locked by matching L_PAGELOCK.
8720 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16);
8721 pgsz = PAGESIZE;
8722 if (svd->type == MAP_PRIVATE) {
8723 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr,
8724 segvn_pglock_comb_balign);
8725 if (lpgaddr < seg->s_base) {
8726 lpgaddr = seg->s_base;
8727 sftlck_sbase = 1;
8729 } else {
8730 ulong_t aix = svd->anon_index + seg_page(seg, addr);
8731 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign);
8732 if (aaix < svd->anon_index) {
8733 lpgaddr = seg->s_base;
8734 sftlck_sbase = 1;
8735 } else {
8736 lpgaddr = addr - ptob(aix - aaix);
8737 ASSERT(lpgaddr >= seg->s_base);
8740 if (svd->pageprot && lpgaddr != addr) {
8741 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)];
8742 struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8743 while (vp < evp) {
8744 if ((VPP_PROT(vp) & protchk) == 0) {
8745 break;
8747 vp++;
8749 if (vp < evp) {
8750 lpgaddr = addr;
8751 pflags = 0;
8754 lpgeaddr = addr + len;
8755 if (pflags) {
8756 if (svd->type == MAP_PRIVATE) {
8757 lpgeaddr = (caddr_t)P2ROUNDUP(
8758 (uintptr_t)lpgeaddr,
8759 segvn_pglock_comb_balign);
8760 } else {
8761 ulong_t aix = svd->anon_index +
8762 seg_page(seg, lpgeaddr);
8763 ulong_t aaix = P2ROUNDUP(aix,
8764 segvn_pglock_comb_palign);
8765 if (aaix < aix) {
8766 lpgeaddr = 0;
8767 } else {
8768 lpgeaddr += ptob(aaix - aix);
8771 if (lpgeaddr == 0 ||
8772 lpgeaddr > seg->s_base + seg->s_size) {
8773 lpgeaddr = seg->s_base + seg->s_size;
8774 sftlck_send = 1;
8777 if (svd->pageprot && lpgeaddr != addr + len) {
8778 struct vpage *vp;
8779 struct vpage *evp;
8781 vp = &svd->vpage[seg_page(seg, addr + len)];
8782 evp = &svd->vpage[seg_page(seg, lpgeaddr)];
8784 while (vp < evp) {
8785 if ((VPP_PROT(vp) & protchk) == 0) {
8786 break;
8788 vp++;
8790 if (vp < evp) {
8791 lpgeaddr = addr + len;
8794 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8798 * For MAP_SHARED segments we create pcache entries tagged by amp and
8799 * anon index so that we can share pcache entries with other segments
8800 * that map this amp. For private segments pcache entries are tagged
8801 * with segment and virtual address.
8803 if (svd->type == MAP_SHARED) {
8804 pamp = amp;
8805 paddr = (caddr_t)((lpgaddr - seg->s_base) +
8806 ptob(svd->anon_index));
8807 preclaim_callback = shamp_reclaim;
8808 } else {
8809 pamp = NULL;
8810 paddr = lpgaddr;
8811 preclaim_callback = segvn_reclaim;
8814 if (type == L_PAGEUNLOCK) {
8815 VM_STAT_ADD(segvnvmstats.pagelock[0]);
8818 * update hat ref bits for /proc. We need to make sure
8819 * that threads tracing the ref and mod bits of the
8820 * address space get the right data.
8821 * Note: page ref and mod bits are updated at reclaim time
8823 if (seg->s_as->a_vbits) {
8824 for (a = addr; a < addr + len; a += PAGESIZE) {
8825 if (rw == S_WRITE) {
8826 hat_setstat(seg->s_as, a,
8827 PAGESIZE, P_REF | P_MOD);
8828 } else {
8829 hat_setstat(seg->s_as, a,
8830 PAGESIZE, P_REF);
8836 * Check the shadow list entry after the last page used in
8837 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
8838 * was not inserted into pcache and is not large page
8839 * adjusted. In this case call reclaim callback directly and
8840 * don't adjust the shadow list start and size for large
8841 * pages.
8843 npages = btop(len);
8844 if ((*ppp)[npages] == NOPCACHE_SHWLIST) {
8845 void *ptag;
8846 if (pamp != NULL) {
8847 ASSERT(svd->type == MAP_SHARED);
8848 ptag = (void *)pamp;
8849 paddr = (caddr_t)((addr - seg->s_base) +
8850 ptob(svd->anon_index));
8851 } else {
8852 ptag = (void *)seg;
8853 paddr = addr;
8855 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
8856 } else {
8857 ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
8858 IS_SWAPFSVP((*ppp)[npages]->p_vnode));
8859 len = lpgeaddr - lpgaddr;
8860 npages = btop(len);
8861 seg_pinactive(seg, pamp, paddr, len,
8862 *ppp - adjustpages, rw, pflags, preclaim_callback);
8865 if (pamp != NULL) {
8866 ASSERT(svd->type == MAP_SHARED);
8867 ASSERT(svd->softlockcnt >= npages);
8868 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
8871 if (sftlck_sbase) {
8872 ASSERT(svd->softlockcnt_sbase > 0);
8873 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, -1);
8875 if (sftlck_send) {
8876 ASSERT(svd->softlockcnt_send > 0);
8877 atomic_add_long((ulong_t *)&svd->softlockcnt_send, -1);
8881 * If someone is blocked while unmapping, we purge
8882 * segment page cache and thus reclaim pplist synchronously
8883 * without waiting for seg_pasync_thread. This speeds up
8884 * unmapping in cases where munmap(2) is called, while
8885 * raw async i/o is still in progress or where a thread
8886 * exits on data fault in a multithreaded application.
8888 if (AS_ISUNMAPWAIT(seg->s_as)) {
8889 if (svd->softlockcnt == 0) {
8890 mutex_enter(&seg->s_as->a_contents);
8891 if (AS_ISUNMAPWAIT(seg->s_as)) {
8892 AS_CLRUNMAPWAIT(seg->s_as);
8893 cv_broadcast(&seg->s_as->a_cv);
8895 mutex_exit(&seg->s_as->a_contents);
8896 } else if (pamp == NULL) {
8898 * softlockcnt is not 0 and this is a
8899 * MAP_PRIVATE segment. Try to purge its
8900 * pcache entries to reduce softlockcnt.
8901 * If it drops to 0 segvn_reclaim()
8902 * will wake up a thread waiting on
8903 * unmapwait flag.
8905 * We don't purge MAP_SHARED segments with non
8906 * 0 softlockcnt since IO is still in progress
8907 * for such segments.
8909 ASSERT(svd->type == MAP_PRIVATE);
8910 segvn_purge(seg);
8913 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8914 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END,
8915 "segvn_pagelock: unlock seg %p addr %p", seg, addr);
8916 return (0);
8919 /* The L_PAGELOCK case ... */
8921 VM_STAT_ADD(segvnvmstats.pagelock[1]);
8924 * For MAP_SHARED segments we have to check protections before
8925 * seg_plookup() since pcache entries may be shared by many segments
8926 * with potentially different page protections.
8928 if (pamp != NULL) {
8929 ASSERT(svd->type == MAP_SHARED);
8930 if (svd->pageprot == 0) {
8931 if ((svd->prot & protchk) == 0) {
8932 error = EACCES;
8933 goto out;
8935 } else {
8937 * check page protections
8939 caddr_t ea;
8941 if (seg->s_szc) {
8942 a = lpgaddr;
8943 ea = lpgeaddr;
8944 } else {
8945 a = addr;
8946 ea = addr + len;
8948 for (; a < ea; a += pgsz) {
8949 struct vpage *vp;
8951 ASSERT(seg->s_szc == 0 ||
8952 sameprot(seg, a, pgsz));
8953 vp = &svd->vpage[seg_page(seg, a)];
8954 if ((VPP_PROT(vp) & protchk) == 0) {
8955 error = EACCES;
8956 goto out;
8963 * try to find pages in segment page cache
8965 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
8966 if (pplist != NULL) {
8967 if (pamp != NULL) {
8968 npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
8969 ASSERT(svd->type == MAP_SHARED);
8970 atomic_add_long((ulong_t *)&svd->softlockcnt,
8971 npages);
8973 if (sftlck_sbase) {
8974 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1);
8976 if (sftlck_send) {
8977 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1);
8979 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8980 *ppp = pplist + adjustpages;
8981 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
8982 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
8983 return (0);
8987 * For MAP_SHARED segments we already verified above that segment
8988 * protections allow this pagelock operation.
8990 if (pamp == NULL) {
8991 ASSERT(svd->type == MAP_PRIVATE);
8992 if (svd->pageprot == 0) {
8993 if ((svd->prot & protchk) == 0) {
8994 error = EACCES;
8995 goto out;
8997 if (svd->prot & PROT_WRITE) {
8998 wlen = lpgeaddr - lpgaddr;
8999 } else {
9000 wlen = 0;
9001 ASSERT(rw == S_READ);
9003 } else {
9004 int wcont = 1;
9006 * check page protections
9008 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) {
9009 struct vpage *vp;
9011 ASSERT(seg->s_szc == 0 ||
9012 sameprot(seg, a, pgsz));
9013 vp = &svd->vpage[seg_page(seg, a)];
9014 if ((VPP_PROT(vp) & protchk) == 0) {
9015 error = EACCES;
9016 goto out;
9018 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) {
9019 wlen += pgsz;
9020 } else {
9021 wcont = 0;
9022 ASSERT(rw == S_READ);
9026 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr);
9027 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr);
9031 * Only build large page adjusted shadow list if we expect to insert
9032 * it into pcache. For large enough pages it's a big overhead to
9033 * create a shadow list of the entire large page. But this overhead
9034 * should be amortized over repeated pcache hits on subsequent reuse
9035 * of this shadow list (IO into any range within this shadow list will
9036 * find it in pcache since we large page align the request for pcache
9037 * lookups). pcache performance is improved with bigger shadow lists
9038 * as it reduces the time to pcache the entire big segment and reduces
9039 * pcache chain length.
9041 if (seg_pinsert_check(seg, pamp, paddr,
9042 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) {
9043 addr = lpgaddr;
9044 len = lpgeaddr - lpgaddr;
9045 use_pcache = 1;
9046 } else {
9047 use_pcache = 0;
9049 * Since this entry will not be inserted into the pcache, we
9050 * will not do any adjustments to the starting address or
9051 * size of the memory to be locked.
9053 adjustpages = 0;
9055 npages = btop(len);
9057 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP);
9058 pl = pplist;
9059 *ppp = pplist + adjustpages;
9061 * If use_pcache is 0 this shadow list is not large page adjusted.
9062 * Record this info in the last entry of shadow array so that
9063 * L_PAGEUNLOCK can determine if it should large page adjust the
9064 * address range to find the real range that was locked.
9066 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST;
9068 page = seg_page(seg, addr);
9069 anon_index = svd->anon_index + page;
9071 anlock = 0;
9072 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
9073 ASSERT(amp->a_szc >= seg->s_szc);
9074 anpgcnt = page_get_pagecnt(amp->a_szc);
9075 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) {
9076 struct anon *ap;
9077 struct vnode *vp;
9078 u_offset_t off;
9081 * Lock and unlock anon array only once per large page.
9082 * anon_array_enter() locks the root anon slot according to
9083 * a_szc which can't change while anon map is locked. We lock
9084 * anon the first time through this loop and each time we
9085 * reach anon index that corresponds to a root of a large
9086 * page.
9088 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) {
9089 ASSERT(anlock == 0);
9090 anon_array_enter(amp, anon_index, &cookie);
9091 anlock = 1;
9093 ap = anon_get_ptr(amp->ahp, anon_index);
9096 * We must never use seg_pcache for COW pages
9097 * because we might end up with original page still
9098 * lying in seg_pcache even after private page is
9099 * created. This leads to data corruption as
9100 * aio_write refers to the page still in cache
9101 * while all other accesses refer to the private
9102 * page.
9104 if (ap == NULL || ap->an_refcnt != 1) {
9105 struct vpage *vpage;
9107 if (seg->s_szc) {
9108 error = EFAULT;
9109 break;
9111 if (svd->vpage != NULL) {
9112 vpage = &svd->vpage[seg_page(seg, a)];
9113 } else {
9114 vpage = NULL;
9116 ASSERT(anlock);
9117 anon_array_exit(&cookie);
9118 anlock = 0;
9119 pp = NULL;
9120 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0,
9121 vpage, &pp, 0, F_INVAL, rw, 1);
9122 if (error) {
9123 error = fc_decode(error);
9124 break;
9126 anon_array_enter(amp, anon_index, &cookie);
9127 anlock = 1;
9128 ap = anon_get_ptr(amp->ahp, anon_index);
9129 if (ap == NULL || ap->an_refcnt != 1) {
9130 error = EFAULT;
9131 break;
9134 swap_xlate(ap, &vp, &off);
9135 pp = page_lookup_nowait(vp, off, SE_SHARED);
9136 if (pp == NULL) {
9137 error = EFAULT;
9138 break;
9140 if (ap->an_pvp != NULL) {
9141 anon_swap_free(ap, pp);
9144 * Unlock anon if this is the last slot in a large page.
9146 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) {
9147 ASSERT(anlock);
9148 anon_array_exit(&cookie);
9149 anlock = 0;
9151 *pplist++ = pp;
9153 if (anlock) { /* Ensure the lock is dropped */
9154 anon_array_exit(&cookie);
9156 ANON_LOCK_EXIT(&amp->a_rwlock);
9158 if (a >= addr + len) {
9159 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9160 if (pamp != NULL) {
9161 ASSERT(svd->type == MAP_SHARED);
9162 atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9163 npages);
9164 wlen = len;
9166 if (sftlck_sbase) {
9167 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1);
9169 if (sftlck_send) {
9170 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1);
9172 if (use_pcache) {
9173 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9174 rw, pflags, preclaim_callback);
9176 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9177 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9178 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9179 return (0);
9182 pplist = pl;
9183 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9184 while (np > (uint_t)0) {
9185 ASSERT(PAGE_LOCKED(*pplist));
9186 page_unlock(*pplist);
9187 np--;
9188 pplist++;
9190 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9191 out:
9192 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9193 *ppp = NULL;
9194 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END,
9195 "segvn_pagelock: cache miss seg %p addr %p", seg, addr);
9196 return (error);
9200 * purge any cached pages in the I/O page cache
9202 static void
9203 segvn_purge(struct seg *seg)
9205 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9208 * pcache is only used by pure anon segments.
9210 if (svd->amp == NULL || svd->vp != NULL) {
9211 return;
9215 * For MAP_SHARED segments non 0 segment's softlockcnt means
9216 * active IO is still in progress via this segment. So we only
9217 * purge MAP_SHARED segments when their softlockcnt is 0.
9219 if (svd->type == MAP_PRIVATE) {
9220 if (svd->softlockcnt) {
9221 seg_ppurge(seg, NULL, 0);
9223 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) {
9224 seg_ppurge(seg, svd->amp, 0);
9229 * If async argument is not 0 we are called from pcache async thread and don't
9230 * hold AS lock.
9233 /*ARGSUSED*/
9234 static int
9235 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9236 enum seg_rw rw, int async)
9238 struct seg *seg = (struct seg *)ptag;
9239 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9240 pgcnt_t np, npages;
9241 struct page **pl;
9243 npages = np = btop(len);
9244 ASSERT(npages);
9246 ASSERT(svd->vp == NULL && svd->amp != NULL);
9247 ASSERT(svd->softlockcnt >= npages);
9248 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9250 pl = pplist;
9252 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9253 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9255 while (np > (uint_t)0) {
9256 if (rw == S_WRITE) {
9257 hat_setrefmod(*pplist);
9258 } else {
9259 hat_setref(*pplist);
9261 page_unlock(*pplist);
9262 np--;
9263 pplist++;
9266 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9269 * If we are pcache async thread we don't hold AS lock. This means if
9270 * softlockcnt drops to 0 after the decrement below address space may
9271 * get freed. We can't allow it since after softlock derement to 0 we
9272 * still need to access as structure for possible wakeup of unmap
9273 * waiters. To prevent the disappearance of as we take this segment
9274 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9275 * make sure this routine completes before segment is freed.
9277 * The second complication we have to deal with in async case is a
9278 * possibility of missed wake up of unmap wait thread. When we don't
9279 * hold as lock here we may take a_contents lock before unmap wait
9280 * thread that was first to see softlockcnt was still not 0. As a
9281 * result we'll fail to wake up an unmap wait thread. To avoid this
9282 * race we set nounmapwait flag in as structure if we drop softlockcnt
9283 * to 0 when we were called by pcache async thread. unmapwait thread
9284 * will not block if this flag is set.
9286 if (async) {
9287 mutex_enter(&svd->segfree_syncmtx);
9290 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) {
9291 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
9292 mutex_enter(&seg->s_as->a_contents);
9293 if (async) {
9294 AS_SETNOUNMAPWAIT(seg->s_as);
9296 if (AS_ISUNMAPWAIT(seg->s_as)) {
9297 AS_CLRUNMAPWAIT(seg->s_as);
9298 cv_broadcast(&seg->s_as->a_cv);
9300 mutex_exit(&seg->s_as->a_contents);
9304 if (async) {
9305 mutex_exit(&svd->segfree_syncmtx);
9307 return (0);
9310 /*ARGSUSED*/
9311 static int
9312 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9313 enum seg_rw rw, int async)
9315 amp_t *amp = (amp_t *)ptag;
9316 pgcnt_t np, npages;
9317 struct page **pl;
9319 npages = np = btop(len);
9320 ASSERT(npages);
9321 ASSERT(amp->a_softlockcnt >= npages);
9323 pl = pplist;
9325 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9326 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9328 while (np > (uint_t)0) {
9329 if (rw == S_WRITE) {
9330 hat_setrefmod(*pplist);
9331 } else {
9332 hat_setref(*pplist);
9334 page_unlock(*pplist);
9335 np--;
9336 pplist++;
9339 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9342 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9343 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9344 * and anonmap_purge() acquires a_purgemtx.
9346 mutex_enter(&amp->a_purgemtx);
9347 if (!atomic_add_long_nv((ulong_t *)&amp->a_softlockcnt, -npages) &&
9348 amp->a_purgewait) {
9349 amp->a_purgewait = 0;
9350 cv_broadcast(&amp->a_purgecv);
9352 mutex_exit(&amp->a_purgemtx);
9353 return (0);
9357 * get a memory ID for an addr in a given segment
9359 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9360 * At fault time they will be relocated into larger pages.
9362 static int
9363 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
9365 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9366 struct anon *ap = NULL;
9367 ulong_t anon_index;
9368 struct anon_map *amp;
9369 anon_sync_obj_t cookie;
9371 if (svd->type == MAP_PRIVATE) {
9372 memidp->val[0] = (uintptr_t)seg->s_as;
9373 memidp->val[1] = (uintptr_t)addr;
9374 return (0);
9377 if (svd->type == MAP_SHARED) {
9378 if (svd->vp) {
9379 memidp->val[0] = (uintptr_t)svd->vp;
9380 memidp->val[1] = (u_longlong_t)svd->offset +
9381 (uintptr_t)(addr - seg->s_base);
9382 return (0);
9383 } else {
9385 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
9386 if ((amp = svd->amp) != NULL) {
9387 anon_index = svd->anon_index +
9388 seg_page(seg, addr);
9390 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9392 ASSERT(amp != NULL);
9394 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
9395 anon_array_enter(amp, anon_index, &cookie);
9396 ap = anon_get_ptr(amp->ahp, anon_index);
9397 if (ap == NULL) {
9398 page_t *pp;
9400 pp = anon_zero(seg, addr, &ap, svd->cred);
9401 if (pp == NULL) {
9402 anon_array_exit(&cookie);
9403 ANON_LOCK_EXIT(&amp->a_rwlock);
9404 return (ENOMEM);
9406 ASSERT(anon_get_ptr(amp->ahp, anon_index)
9407 == NULL);
9408 (void) anon_set_ptr(amp->ahp, anon_index,
9409 ap, ANON_SLEEP);
9410 page_unlock(pp);
9413 anon_array_exit(&cookie);
9414 ANON_LOCK_EXIT(&amp->a_rwlock);
9416 memidp->val[0] = (uintptr_t)ap;
9417 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
9418 return (0);
9421 return (EINVAL);
9424 static int
9425 sameprot(struct seg *seg, caddr_t a, size_t len)
9427 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9428 struct vpage *vpage;
9429 spgcnt_t pages = btop(len);
9430 uint_t prot;
9432 if (svd->pageprot == 0)
9433 return (1);
9435 ASSERT(svd->vpage != NULL);
9437 vpage = &svd->vpage[seg_page(seg, a)];
9438 prot = VPP_PROT(vpage);
9439 vpage++;
9440 pages--;
9441 while (pages-- > 0) {
9442 if (prot != VPP_PROT(vpage))
9443 return (0);
9444 vpage++;
9446 return (1);
9450 * Get memory allocation policy info for specified address in given segment
9452 static lgrp_mem_policy_info_t *
9453 segvn_getpolicy(struct seg *seg, caddr_t addr)
9455 struct anon_map *amp;
9456 ulong_t anon_index;
9457 lgrp_mem_policy_info_t *policy_info;
9458 struct segvn_data *svn_data;
9459 u_offset_t vn_off;
9460 vnode_t *vp;
9462 ASSERT(seg != NULL);
9464 svn_data = (struct segvn_data *)seg->s_data;
9465 if (svn_data == NULL)
9466 return (NULL);
9469 * Get policy info for private or shared memory
9471 if (svn_data->type != MAP_SHARED) {
9472 if (svn_data->tr_state != SEGVN_TR_ON) {
9473 policy_info = &svn_data->policy_info;
9474 } else {
9475 policy_info = &svn_data->tr_policy_info;
9476 ASSERT(policy_info->mem_policy ==
9477 LGRP_MEM_POLICY_NEXT_SEG);
9479 } else {
9480 amp = svn_data->amp;
9481 anon_index = svn_data->anon_index + seg_page(seg, addr);
9482 vp = svn_data->vp;
9483 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base);
9484 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off);
9487 return (policy_info);
9490 /*ARGSUSED*/
9491 static int
9492 segvn_capable(struct seg *seg, segcapability_t capability)
9494 return (0);
9498 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9499 * established to per vnode mapping per lgroup amp pages instead of to vnode
9500 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9501 * may share the same text replication amp. If a suitable amp doesn't already
9502 * exist in svntr hash table create a new one. We may fail to bind to amp if
9503 * segment is not eligible for text replication. Code below first checks for
9504 * these conditions. If binding is successful segment tr_state is set to on
9505 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9506 * svd->amp remains as NULL.
9508 static void
9509 segvn_textrepl(struct seg *seg)
9511 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9512 vnode_t *vp = svd->vp;
9513 u_offset_t off = svd->offset;
9514 size_t size = seg->s_size;
9515 u_offset_t eoff = off + size;
9516 uint_t szc = seg->s_szc;
9517 ulong_t hash = SVNTR_HASH_FUNC(vp);
9518 svntr_t *svntrp;
9519 struct vattr va;
9520 proc_t *p = seg->s_as->a_proc;
9521 lgrp_id_t lgrp_id;
9522 lgrp_id_t olid;
9523 int first;
9524 struct anon_map *amp;
9526 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9527 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9528 ASSERT(p != NULL);
9529 ASSERT(svd->tr_state == SEGVN_TR_INIT);
9530 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9531 ASSERT(svd->flags & MAP_TEXT);
9532 ASSERT(svd->type == MAP_PRIVATE);
9533 ASSERT(vp != NULL && svd->amp == NULL);
9534 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9535 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9536 ASSERT(seg->s_as != &kas);
9537 ASSERT(off < eoff);
9538 ASSERT(svntr_hashtab != NULL);
9541 * If numa optimizations are no longer desired bail out.
9543 if (!lgrp_optimizations()) {
9544 svd->tr_state = SEGVN_TR_OFF;
9545 return;
9549 * Avoid creating anon maps with size bigger than the file size.
9550 * If VOP_GETATTR() call fails bail out.
9552 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME;
9553 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) {
9554 svd->tr_state = SEGVN_TR_OFF;
9555 SEGVN_TR_ADDSTAT(gaerr);
9556 return;
9558 if (btopr(va.va_size) < btopr(eoff)) {
9559 svd->tr_state = SEGVN_TR_OFF;
9560 SEGVN_TR_ADDSTAT(overmap);
9561 return;
9565 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9566 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9567 * mapping that checks if trcache for this vnode needs to be
9568 * invalidated can't miss us.
9570 if (!(vp->v_flag & VVMEXEC)) {
9571 mutex_enter(&vp->v_lock);
9572 vp->v_flag |= VVMEXEC;
9573 mutex_exit(&vp->v_lock);
9575 mutex_enter(&svntr_hashtab[hash].tr_lock);
9577 * Bail out if potentially MAP_SHARED writable mappings exist to this
9578 * vnode. We don't want to use old file contents from existing
9579 * replicas if this mapping was established after the original file
9580 * was changed.
9582 if (vn_is_mapped(vp, V_WRITE)) {
9583 mutex_exit(&svntr_hashtab[hash].tr_lock);
9584 svd->tr_state = SEGVN_TR_OFF;
9585 SEGVN_TR_ADDSTAT(wrcnt);
9586 return;
9588 svntrp = svntr_hashtab[hash].tr_head;
9589 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9590 ASSERT(svntrp->tr_refcnt != 0);
9591 if (svntrp->tr_vp != vp) {
9592 continue;
9596 * Bail out if the file or its attributes were changed after
9597 * this replication entry was created since we need to use the
9598 * latest file contents. Note that mtime test alone is not
9599 * sufficient because a user can explicitly change mtime via
9600 * utimes(2) interfaces back to the old value after modifiying
9601 * the file contents. To detect this case we also have to test
9602 * ctime which among other things records the time of the last
9603 * mtime change by utimes(2). ctime is not changed when the file
9604 * is only read or executed so we expect that typically existing
9605 * replication amp's can be used most of the time.
9607 if (!svntrp->tr_valid ||
9608 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec ||
9609 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec ||
9610 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec ||
9611 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) {
9612 mutex_exit(&svntr_hashtab[hash].tr_lock);
9613 svd->tr_state = SEGVN_TR_OFF;
9614 SEGVN_TR_ADDSTAT(stale);
9615 return;
9618 * if off, eoff and szc match current segment we found the
9619 * existing entry we can use.
9621 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff &&
9622 svntrp->tr_szc == szc) {
9623 break;
9626 * Don't create different but overlapping in file offsets
9627 * entries to avoid replication of the same file pages more
9628 * than once per lgroup.
9630 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) ||
9631 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) {
9632 mutex_exit(&svntr_hashtab[hash].tr_lock);
9633 svd->tr_state = SEGVN_TR_OFF;
9634 SEGVN_TR_ADDSTAT(overlap);
9635 return;
9639 * If we didn't find existing entry create a new one.
9641 if (svntrp == NULL) {
9642 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP);
9643 if (svntrp == NULL) {
9644 mutex_exit(&svntr_hashtab[hash].tr_lock);
9645 svd->tr_state = SEGVN_TR_OFF;
9646 SEGVN_TR_ADDSTAT(nokmem);
9647 return;
9649 #ifdef DEBUG
9651 lgrp_id_t i;
9652 for (i = 0; i < NLGRPS_MAX; i++) {
9653 ASSERT(svntrp->tr_amp[i] == NULL);
9656 #endif /* DEBUG */
9657 svntrp->tr_vp = vp;
9658 svntrp->tr_off = off;
9659 svntrp->tr_eoff = eoff;
9660 svntrp->tr_szc = szc;
9661 svntrp->tr_valid = 1;
9662 svntrp->tr_mtime = va.va_mtime;
9663 svntrp->tr_ctime = va.va_ctime;
9664 svntrp->tr_refcnt = 0;
9665 svntrp->tr_next = svntr_hashtab[hash].tr_head;
9666 svntr_hashtab[hash].tr_head = svntrp;
9668 first = 1;
9669 again:
9671 * We want to pick a replica with pages on main thread's (t_tid = 1,
9672 * aka T1) lgrp. Currently text replication is only optimized for
9673 * workloads that either have all threads of a process on the same
9674 * lgrp or execute their large text primarily on main thread.
9676 lgrp_id = p->p_t1_lgrpid;
9677 if (lgrp_id == LGRP_NONE) {
9679 * In case exec() prefaults text on non main thread use
9680 * current thread lgrpid. It will become main thread anyway
9681 * soon.
9683 lgrp_id = lgrp_home_id(curthread);
9686 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9687 * just set it to NLGRPS_MAX if it's different from current process T1
9688 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9689 * replication and T1 new home is different from lgrp used for text
9690 * replication. When this happens asyncronous segvn thread rechecks if
9691 * segments should change lgrps used for text replication. If we fail
9692 * to set p_tr_lgrpid with cas32 then set it to NLGRPS_MAX without cas
9693 * if it's not already NLGRPS_MAX and not equal lgrp_id we want to
9694 * use. We don't need to use cas in this case because another thread
9695 * that races in between our non atomic check and set may only change
9696 * p_tr_lgrpid to NLGRPS_MAX at this point.
9698 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9699 olid = p->p_tr_lgrpid;
9700 if (lgrp_id != olid && olid != NLGRPS_MAX) {
9701 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX;
9702 if (cas32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != olid) {
9703 olid = p->p_tr_lgrpid;
9704 ASSERT(olid != LGRP_NONE);
9705 if (olid != lgrp_id && olid != NLGRPS_MAX) {
9706 p->p_tr_lgrpid = NLGRPS_MAX;
9709 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
9710 membar_producer();
9712 * lgrp_move_thread() won't schedule async recheck after
9713 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9714 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9715 * is not LGRP_NONE.
9717 if (first && p->p_t1_lgrpid != LGRP_NONE &&
9718 p->p_t1_lgrpid != lgrp_id) {
9719 first = 0;
9720 goto again;
9724 * If no amp was created yet for lgrp_id create a new one as long as
9725 * we have enough memory to afford it.
9727 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) {
9728 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
9729 if (trmem > segvn_textrepl_max_bytes) {
9730 SEGVN_TR_ADDSTAT(normem);
9731 goto fail;
9733 if (anon_try_resv_zone(size, NULL) == 0) {
9734 SEGVN_TR_ADDSTAT(noanon);
9735 goto fail;
9737 amp = anonmap_alloc(size, size, ANON_NOSLEEP);
9738 if (amp == NULL) {
9739 anon_unresv_zone(size, NULL);
9740 SEGVN_TR_ADDSTAT(nokmem);
9741 goto fail;
9743 ASSERT(amp->refcnt == 1);
9744 amp->a_szc = szc;
9745 svntrp->tr_amp[lgrp_id] = amp;
9746 SEGVN_TR_ADDSTAT(newamp);
9748 svntrp->tr_refcnt++;
9749 ASSERT(svd->svn_trnext == NULL);
9750 ASSERT(svd->svn_trprev == NULL);
9751 svd->svn_trnext = svntrp->tr_svnhead;
9752 svd->svn_trprev = NULL;
9753 if (svntrp->tr_svnhead != NULL) {
9754 svntrp->tr_svnhead->svn_trprev = svd;
9756 svntrp->tr_svnhead = svd;
9757 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size);
9758 ASSERT(amp->refcnt >= 1);
9759 svd->amp = amp;
9760 svd->anon_index = 0;
9761 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG;
9762 svd->tr_policy_info.mem_lgrpid = lgrp_id;
9763 svd->tr_state = SEGVN_TR_ON;
9764 mutex_exit(&svntr_hashtab[hash].tr_lock);
9765 SEGVN_TR_ADDSTAT(repl);
9766 return;
9767 fail:
9768 ASSERT(segvn_textrepl_bytes >= size);
9769 atomic_add_long(&segvn_textrepl_bytes, -size);
9770 ASSERT(svntrp != NULL);
9771 ASSERT(svntrp->tr_amp[lgrp_id] == NULL);
9772 if (svntrp->tr_refcnt == 0) {
9773 ASSERT(svntrp == svntr_hashtab[hash].tr_head);
9774 svntr_hashtab[hash].tr_head = svntrp->tr_next;
9775 mutex_exit(&svntr_hashtab[hash].tr_lock);
9776 kmem_cache_free(svntr_cache, svntrp);
9777 } else {
9778 mutex_exit(&svntr_hashtab[hash].tr_lock);
9780 svd->tr_state = SEGVN_TR_OFF;
9784 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9785 * replication amp. This routine is most typically called when segment is
9786 * unmapped but can also be called when segment no longer qualifies for text
9787 * replication (e.g. due to protection changes). If unload_unmap is set use
9788 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9789 * svntr free all its anon maps and remove it from the hash table.
9791 static void
9792 segvn_textunrepl(struct seg *seg, int unload_unmap)
9794 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9795 vnode_t *vp = svd->vp;
9796 u_offset_t off = svd->offset;
9797 size_t size = seg->s_size;
9798 u_offset_t eoff = off + size;
9799 uint_t szc = seg->s_szc;
9800 ulong_t hash = SVNTR_HASH_FUNC(vp);
9801 svntr_t *svntrp;
9802 svntr_t **prv_svntrp;
9803 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid;
9804 lgrp_id_t i;
9806 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9807 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
9808 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9809 ASSERT(svd->tr_state == SEGVN_TR_ON);
9810 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9811 ASSERT(svd->amp != NULL);
9812 ASSERT(svd->amp->refcnt >= 1);
9813 ASSERT(svd->anon_index == 0);
9814 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9815 ASSERT(svntr_hashtab != NULL);
9817 mutex_enter(&svntr_hashtab[hash].tr_lock);
9818 prv_svntrp = &svntr_hashtab[hash].tr_head;
9819 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) {
9820 ASSERT(svntrp->tr_refcnt != 0);
9821 if (svntrp->tr_vp == vp && svntrp->tr_off == off &&
9822 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) {
9823 break;
9826 if (svntrp == NULL) {
9827 panic("segvn_textunrepl: svntr record not found");
9829 if (svntrp->tr_amp[lgrp_id] != svd->amp) {
9830 panic("segvn_textunrepl: amp mismatch");
9832 svd->tr_state = SEGVN_TR_OFF;
9833 svd->amp = NULL;
9834 if (svd->svn_trprev == NULL) {
9835 ASSERT(svntrp->tr_svnhead == svd);
9836 svntrp->tr_svnhead = svd->svn_trnext;
9837 if (svntrp->tr_svnhead != NULL) {
9838 svntrp->tr_svnhead->svn_trprev = NULL;
9840 svd->svn_trnext = NULL;
9841 } else {
9842 svd->svn_trprev->svn_trnext = svd->svn_trnext;
9843 if (svd->svn_trnext != NULL) {
9844 svd->svn_trnext->svn_trprev = svd->svn_trprev;
9845 svd->svn_trnext = NULL;
9847 svd->svn_trprev = NULL;
9849 if (--svntrp->tr_refcnt) {
9850 mutex_exit(&svntr_hashtab[hash].tr_lock);
9851 goto done;
9853 *prv_svntrp = svntrp->tr_next;
9854 mutex_exit(&svntr_hashtab[hash].tr_lock);
9855 for (i = 0; i < NLGRPS_MAX; i++) {
9856 struct anon_map *amp = svntrp->tr_amp[i];
9857 if (amp == NULL) {
9858 continue;
9860 ASSERT(amp->refcnt == 1);
9861 ASSERT(amp->swresv == size);
9862 ASSERT(amp->size == size);
9863 ASSERT(amp->a_szc == szc);
9864 if (amp->a_szc != 0) {
9865 anon_free_pages(amp->ahp, 0, size, szc);
9866 } else {
9867 anon_free(amp->ahp, 0, size);
9869 svntrp->tr_amp[i] = NULL;
9870 ASSERT(segvn_textrepl_bytes >= size);
9871 atomic_add_long(&segvn_textrepl_bytes, -size);
9872 anon_unresv_zone(amp->swresv, NULL);
9873 amp->refcnt = 0;
9874 anonmap_free(amp);
9876 kmem_cache_free(svntr_cache, svntrp);
9877 done:
9878 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size,
9879 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL);
9883 * This is called when a MAP_SHARED writable mapping is created to a vnode
9884 * that is currently used for execution (VVMEXEC flag is set). In this case we
9885 * need to prevent further use of existing replicas.
9887 static void
9888 segvn_inval_trcache(vnode_t *vp)
9890 ulong_t hash = SVNTR_HASH_FUNC(vp);
9891 svntr_t *svntrp;
9893 ASSERT(vp->v_flag & VVMEXEC);
9895 if (svntr_hashtab == NULL) {
9896 return;
9899 mutex_enter(&svntr_hashtab[hash].tr_lock);
9900 svntrp = svntr_hashtab[hash].tr_head;
9901 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9902 ASSERT(svntrp->tr_refcnt != 0);
9903 if (svntrp->tr_vp == vp && svntrp->tr_valid) {
9904 svntrp->tr_valid = 0;
9907 mutex_exit(&svntr_hashtab[hash].tr_lock);
9910 static void
9911 segvn_trasync_thread(void)
9913 callb_cpr_t cpr_info;
9914 kmutex_t cpr_lock; /* just for CPR stuff */
9916 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
9918 CALLB_CPR_INIT(&cpr_info, &cpr_lock,
9919 callb_generic_cpr, "segvn_async");
9921 if (segvn_update_textrepl_interval == 0) {
9922 segvn_update_textrepl_interval = segvn_update_tr_time * hz;
9923 } else {
9924 segvn_update_textrepl_interval *= hz;
9926 (void) timeout(segvn_trupdate_wakeup, NULL,
9927 segvn_update_textrepl_interval);
9929 for (;;) {
9930 mutex_enter(&cpr_lock);
9931 CALLB_CPR_SAFE_BEGIN(&cpr_info);
9932 mutex_exit(&cpr_lock);
9933 sema_p(&segvn_trasync_sem);
9934 mutex_enter(&cpr_lock);
9935 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
9936 mutex_exit(&cpr_lock);
9937 segvn_trupdate();
9941 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0;
9943 static void
9944 segvn_trupdate_wakeup(void *dummy)
9946 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations();
9948 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) {
9949 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs;
9950 sema_v(&segvn_trasync_sem);
9953 if (!segvn_disable_textrepl_update &&
9954 segvn_update_textrepl_interval != 0) {
9955 (void) timeout(segvn_trupdate_wakeup, dummy,
9956 segvn_update_textrepl_interval);
9960 static void
9961 segvn_trupdate(void)
9963 ulong_t hash;
9964 svntr_t *svntrp;
9965 segvn_data_t *svd;
9967 ASSERT(svntr_hashtab != NULL);
9969 for (hash = 0; hash < svntr_hashtab_sz; hash++) {
9970 mutex_enter(&svntr_hashtab[hash].tr_lock);
9971 svntrp = svntr_hashtab[hash].tr_head;
9972 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9973 ASSERT(svntrp->tr_refcnt != 0);
9974 svd = svntrp->tr_svnhead;
9975 for (; svd != NULL; svd = svd->svn_trnext) {
9976 segvn_trupdate_seg(svd->seg, svd, svntrp,
9977 hash);
9980 mutex_exit(&svntr_hashtab[hash].tr_lock);
9984 static void
9985 segvn_trupdate_seg(struct seg *seg,
9986 segvn_data_t *svd,
9987 svntr_t *svntrp,
9988 ulong_t hash)
9990 proc_t *p;
9991 lgrp_id_t lgrp_id;
9992 struct as *as;
9993 size_t size;
9994 struct anon_map *amp;
9996 ASSERT(svd->vp != NULL);
9997 ASSERT(svd->vp == svntrp->tr_vp);
9998 ASSERT(svd->offset == svntrp->tr_off);
9999 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff);
10000 ASSERT(seg != NULL);
10001 ASSERT(svd->seg == seg);
10002 ASSERT(seg->s_data == (void *)svd);
10003 ASSERT(seg->s_szc == svntrp->tr_szc);
10004 ASSERT(svd->tr_state == SEGVN_TR_ON);
10005 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10006 ASSERT(svd->amp != NULL);
10007 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10008 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE);
10009 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX);
10010 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp);
10011 ASSERT(svntrp->tr_refcnt != 0);
10012 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock));
10014 as = seg->s_as;
10015 ASSERT(as != NULL && as != &kas);
10016 p = as->a_proc;
10017 ASSERT(p != NULL);
10018 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
10019 lgrp_id = p->p_t1_lgrpid;
10020 if (lgrp_id == LGRP_NONE) {
10021 return;
10023 ASSERT(lgrp_id < NLGRPS_MAX);
10024 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10025 return;
10029 * Use tryenter locking since we are locking as/seg and svntr hash
10030 * lock in reverse from syncrounous thread order.
10032 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) {
10033 SEGVN_TR_ADDSTAT(nolock);
10034 if (segvn_lgrp_trthr_migrs_snpsht) {
10035 segvn_lgrp_trthr_migrs_snpsht = 0;
10037 return;
10039 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10040 AS_LOCK_EXIT(as, &as->a_lock);
10041 SEGVN_TR_ADDSTAT(nolock);
10042 if (segvn_lgrp_trthr_migrs_snpsht) {
10043 segvn_lgrp_trthr_migrs_snpsht = 0;
10045 return;
10047 size = seg->s_size;
10048 if (svntrp->tr_amp[lgrp_id] == NULL) {
10049 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
10050 if (trmem > segvn_textrepl_max_bytes) {
10051 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10052 AS_LOCK_EXIT(as, &as->a_lock);
10053 atomic_add_long(&segvn_textrepl_bytes, -size);
10054 SEGVN_TR_ADDSTAT(normem);
10055 return;
10057 if (anon_try_resv_zone(size, NULL) == 0) {
10058 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10059 AS_LOCK_EXIT(as, &as->a_lock);
10060 atomic_add_long(&segvn_textrepl_bytes, -size);
10061 SEGVN_TR_ADDSTAT(noanon);
10062 return;
10064 amp = anonmap_alloc(size, size, KM_NOSLEEP);
10065 if (amp == NULL) {
10066 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10067 AS_LOCK_EXIT(as, &as->a_lock);
10068 atomic_add_long(&segvn_textrepl_bytes, -size);
10069 anon_unresv_zone(size, NULL);
10070 SEGVN_TR_ADDSTAT(nokmem);
10071 return;
10073 ASSERT(amp->refcnt == 1);
10074 amp->a_szc = seg->s_szc;
10075 svntrp->tr_amp[lgrp_id] = amp;
10078 * We don't need to drop the bucket lock but here we give other
10079 * threads a chance. svntr and svd can't be unlinked as long as
10080 * segment lock is held as a writer and AS held as well. After we
10081 * retake bucket lock we'll continue from where we left. We'll be able
10082 * to reach the end of either list since new entries are always added
10083 * to the beginning of the lists.
10085 mutex_exit(&svntr_hashtab[hash].tr_lock);
10086 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL);
10087 mutex_enter(&svntr_hashtab[hash].tr_lock);
10089 ASSERT(svd->tr_state == SEGVN_TR_ON);
10090 ASSERT(svd->amp != NULL);
10091 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10092 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10093 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10095 svd->tr_policy_info.mem_lgrpid = lgrp_id;
10096 svd->amp = svntrp->tr_amp[lgrp_id];
10097 p->p_tr_lgrpid = NLGRPS_MAX;
10098 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10099 AS_LOCK_EXIT(as, &as->a_lock);
10101 ASSERT(svntrp->tr_refcnt != 0);
10102 ASSERT(svd->vp == svntrp->tr_vp);
10103 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10104 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10105 ASSERT(svd->seg == seg);
10106 ASSERT(svd->tr_state == SEGVN_TR_ON);
10108 SEGVN_TR_ADDSTAT(asyncrepl);