4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2015, Joyent, Inc. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 * The Regents of the University of California
35 * University Acknowledgment- Portions of this document are derived from
36 * software developed by the University of California, Berkeley, and its
41 * VM - shared or copy-on-write from a vnode/anonymous memory.
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/t_lock.h>
47 #include <sys/errno.h>
48 #include <sys/systm.h>
50 #include <sys/debug.h>
52 #include <sys/vmsystm.h>
53 #include <sys/tuneable.h>
54 #include <sys/bitmap.h>
57 #include <sys/sysmacros.h>
58 #include <sys/vtrace.h>
59 #include <sys/cmn_err.h>
60 #include <sys/callb.h>
62 #include <sys/dumphdr.h>
68 #include <vm/seg_vn.h>
75 #include <sys/project.h>
77 #include <sys/shm_impl.h>
80 * segvn_fault needs a temporary page list array. To avoid calling kmem all
81 * the time, it creates a small (FAULT_TMP_PAGES_NUM entry) array and uses
82 * it if it can. In the rare case when this page list is not large enough,
83 * it goes and gets a large enough array from kmem.
85 #define FAULT_TMP_PAGES_NUM 0x8
86 #define FAULT_TMP_PAGES_SZ ptob(FAULT_TMP_PAGES_NUM)
89 * Private seg op routines.
91 static int segvn_dup(struct seg
*seg
, struct seg
*newseg
);
92 static int segvn_unmap(struct seg
*seg
, caddr_t addr
, size_t len
);
93 static void segvn_free(struct seg
*seg
);
94 static faultcode_t
segvn_fault(struct hat
*hat
, struct seg
*seg
,
95 caddr_t addr
, size_t len
, enum fault_type type
,
97 static faultcode_t
segvn_faulta(struct seg
*seg
, caddr_t addr
);
98 static int segvn_setprot(struct seg
*seg
, caddr_t addr
,
99 size_t len
, uint_t prot
);
100 static int segvn_checkprot(struct seg
*seg
, caddr_t addr
,
101 size_t len
, uint_t prot
);
102 static int segvn_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
);
103 static int segvn_sync(struct seg
*seg
, caddr_t addr
, size_t len
,
104 int attr
, uint_t flags
);
105 static size_t segvn_incore(struct seg
*seg
, caddr_t addr
, size_t len
,
107 static int segvn_lockop(struct seg
*seg
, caddr_t addr
, size_t len
,
108 int attr
, int op
, ulong_t
*lockmap
, size_t pos
);
109 static int segvn_getprot(struct seg
*seg
, caddr_t addr
, size_t len
,
111 static uoff_t
segvn_getoffset(struct seg
*seg
, caddr_t addr
);
112 static int segvn_gettype(struct seg
*seg
, caddr_t addr
);
113 static int segvn_getvp(struct seg
*seg
, caddr_t addr
,
115 static int segvn_advise(struct seg
*seg
, caddr_t addr
, size_t len
,
117 static void segvn_dump(struct seg
*seg
);
118 static int segvn_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
119 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
);
120 static int segvn_setpagesize(struct seg
*seg
, caddr_t addr
, size_t len
,
122 static int segvn_getmemid(struct seg
*seg
, caddr_t addr
,
124 static lgrp_mem_policy_info_t
*segvn_getpolicy(struct seg
*, caddr_t
);
125 static int segvn_inherit(struct seg
*, caddr_t
, size_t, uint_t
);
127 const struct seg_ops segvn_ops
= {
129 .unmap
= segvn_unmap
,
131 .fault
= segvn_fault
,
132 .faulta
= segvn_faulta
,
133 .setprot
= segvn_setprot
,
134 .checkprot
= segvn_checkprot
,
135 .kluster
= segvn_kluster
,
137 .incore
= segvn_incore
,
138 .lockop
= segvn_lockop
,
139 .getprot
= segvn_getprot
,
140 .getoffset
= segvn_getoffset
,
141 .gettype
= segvn_gettype
,
142 .getvp
= segvn_getvp
,
143 .advise
= segvn_advise
,
145 .pagelock
= segvn_pagelock
,
146 .setpagesize
= segvn_setpagesize
,
147 .getmemid
= segvn_getmemid
,
148 .getpolicy
= segvn_getpolicy
,
149 .inherit
= segvn_inherit
,
153 * Common zfod structures, provided as a shorthand for others to use.
155 static segvn_crargs_t zfod_segvn_crargs
=
156 SEGVN_ZFOD_ARGS(PROT_ZFOD
, PROT_ALL
);
157 static segvn_crargs_t kzfod_segvn_crargs
=
158 SEGVN_ZFOD_ARGS(PROT_ZFOD
& ~PROT_USER
,
159 PROT_ALL
& ~PROT_USER
);
160 static segvn_crargs_t stack_noexec_crargs
=
161 SEGVN_ZFOD_ARGS(PROT_ZFOD
& ~PROT_EXEC
, PROT_ALL
);
163 caddr_t zfod_argsp
= (caddr_t
)&zfod_segvn_crargs
; /* user zfod argsp */
164 caddr_t kzfod_argsp
= (caddr_t
)&kzfod_segvn_crargs
; /* kernel zfod argsp */
165 caddr_t stack_exec_argsp
= (caddr_t
)&zfod_segvn_crargs
; /* executable stack */
166 caddr_t stack_noexec_argsp
= (caddr_t
)&stack_noexec_crargs
; /* noexec stack */
168 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
170 size_t segvn_comb_thrshld
= UINT_MAX
; /* patchable -- see 1196681 */
172 size_t segvn_pglock_comb_thrshld
= (1UL << 16); /* 64K */
173 size_t segvn_pglock_comb_balign
= (1UL << 16); /* 64K */
174 uint_t segvn_pglock_comb_bshift
;
175 size_t segvn_pglock_comb_palign
;
177 static int segvn_concat(struct seg
*, struct seg
*, int);
178 static int segvn_extend_prev(struct seg
*, struct seg
*,
179 struct segvn_crargs
*, size_t);
180 static int segvn_extend_next(struct seg
*, struct seg
*,
181 struct segvn_crargs
*, size_t);
182 static void segvn_softunlock(struct seg
*, caddr_t
, size_t, enum seg_rw
);
183 static void segvn_pagelist_rele(page_t
**);
184 static void segvn_setvnode_mpss(vnode_t
*);
185 static void segvn_relocate_pages(page_t
**, page_t
*);
186 static int segvn_full_szcpages(page_t
**, uint_t
, int *, uint_t
*);
187 static int segvn_fill_vp_pages(struct segvn_data
*, vnode_t
*, uoff_t
,
188 uint_t
, page_t
**, page_t
**, uint_t
*, int *);
189 static faultcode_t
segvn_fault_vnodepages(struct hat
*, struct seg
*, caddr_t
,
190 caddr_t
, enum fault_type
, enum seg_rw
, caddr_t
, caddr_t
, int);
191 static faultcode_t
segvn_fault_anonpages(struct hat
*, struct seg
*, caddr_t
,
192 caddr_t
, enum fault_type
, enum seg_rw
, caddr_t
, caddr_t
, int);
193 static faultcode_t
segvn_faultpage(struct hat
*, struct seg
*, caddr_t
,
194 uoff_t
, struct vpage
*, page_t
**, uint_t
,
195 enum fault_type
, enum seg_rw
, int);
196 static void segvn_vpage(struct seg
*);
197 static size_t segvn_count_swap_by_vpages(struct seg
*);
199 static void segvn_purge(struct seg
*seg
);
200 static int segvn_reclaim(void *, caddr_t
, size_t, struct page
**,
202 static int shamp_reclaim(void *, caddr_t
, size_t, struct page
**,
205 static int sameprot(struct seg
*, caddr_t
, size_t);
207 static int segvn_demote_range(struct seg
*, caddr_t
, size_t, int, uint_t
);
208 static int segvn_clrszc(struct seg
*);
209 static struct seg
*segvn_split_seg(struct seg
*, caddr_t
);
210 static int segvn_claim_pages(struct seg
*, struct vpage
*, uoff_t
,
213 static void segvn_hat_rgn_unload_callback(caddr_t
, caddr_t
, caddr_t
,
214 size_t, void *, uoff_t
);
216 static struct kmem_cache
*segvn_cache
;
217 static struct kmem_cache
**segvn_szc_cache
;
220 static struct segvnvmstats_str
{
221 ulong_t fill_vp_pages
[31];
222 ulong_t fltvnpages
[49];
223 ulong_t fullszcpages
[10];
224 ulong_t relocatepages
[3];
225 ulong_t fltanpages
[17];
227 ulong_t demoterange
[3];
229 #endif /* VM_STATS */
231 #define SDR_RANGE 1 /* demote entire range */
232 #define SDR_END 2 /* demote non aligned ends only */
234 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
236 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
237 ASSERT(lpgaddr >= (seg)->s_base); \
238 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
240 ASSERT(lpgeaddr > lpgaddr); \
241 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
243 lpgeaddr = lpgaddr = (addr); \
249 segvn_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
251 struct segvn_data
*svd
= buf
;
253 rw_init(&svd
->lock
, NULL
, RW_DEFAULT
, NULL
);
254 mutex_init(&svd
->segfree_syncmtx
, NULL
, MUTEX_DEFAULT
, NULL
);
255 svd
->svn_trnext
= svd
->svn_trprev
= NULL
;
261 segvn_cache_destructor(void *buf
, void *cdrarg
)
263 struct segvn_data
*svd
= buf
;
265 rw_destroy(&svd
->lock
);
266 mutex_destroy(&svd
->segfree_syncmtx
);
271 svntr_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
273 bzero(buf
, sizeof (svntr_t
));
278 * Patching this variable to non-zero allows the system to run with
279 * stacks marked as "not executable". It's a bit of a kludge, but is
280 * provided as a tweakable for platforms that export those ABIs
281 * (e.g. sparc V8) that have executable stacks enabled by default.
282 * There are also some restrictions for platforms that don't actually
283 * implement 'noexec' protections.
285 * Once enabled, the system is (therefore) unable to provide a fully
286 * ABI-compliant execution environment, though practically speaking,
287 * most everything works. The exceptions are generally some interpreters
288 * and debuggers that create executable code on the stack and jump
289 * into it (without explicitly mprotecting the address range to include
292 * One important class of applications that are disabled are those
293 * that have been transformed into malicious agents using one of the
294 * numerous "buffer overflow" attacks. See 4007890.
296 int noexec_user_stack
= 0;
297 int noexec_user_stack_log
= 1;
299 int segvn_lpg_disable
= 0;
300 uint_t segvn_maxpgszc
= 0;
302 ulong_t segvn_vmpss_clrszc_cnt
;
303 ulong_t segvn_vmpss_clrszc_err
;
304 ulong_t segvn_fltvnpages_clrszc_cnt
;
305 ulong_t segvn_fltvnpages_clrszc_err
;
306 ulong_t segvn_setpgsz_align_err
;
307 ulong_t segvn_setpgsz_anon_align_err
;
308 ulong_t segvn_setpgsz_getattr_err
;
309 ulong_t segvn_setpgsz_eof_err
;
310 ulong_t segvn_faultvnmpss_align_err1
;
311 ulong_t segvn_faultvnmpss_align_err2
;
312 ulong_t segvn_faultvnmpss_align_err3
;
313 ulong_t segvn_faultvnmpss_align_err4
;
314 ulong_t segvn_faultvnmpss_align_err5
;
315 ulong_t segvn_vmpss_pageio_deadlk_err
;
317 int segvn_use_regions
= 1;
320 * Segvn supports text replication optimization for NUMA platforms. Text
321 * replica's are represented by anon maps (amp). There's one amp per text file
322 * region per lgroup. A process chooses the amp for each of its text mappings
323 * based on the lgroup assignment of its main thread (t_tid = 1). All
324 * processes that want a replica on a particular lgroup for the same text file
325 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
326 * with vp,off,size,szc used as a key. Text replication segments are read only
327 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
328 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
329 * pages. Replication amp is assigned to a segment when it gets its first
330 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
331 * rechecks periodically if the process still maps an amp local to the main
332 * thread. If not async thread forces process to remap to an amp in the new
333 * home lgroup of the main thread. Current text replication implementation
334 * only provides the benefit to workloads that do most of their work in the
335 * main thread of a process or all the threads of a process run in the same
336 * lgroup. To extend text replication benefit to different types of
337 * multithreaded workloads further work would be needed in the hat layer to
338 * allow the same virtual address in the same hat to simultaneously map
339 * different physical addresses (i.e. page table replication would be needed
342 * amp pages are used instead of vnode pages as long as segment has a very
343 * simple life cycle. It's created via segvn_create(), handles S_EXEC
344 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
345 * happens such as protection is changed, real COW fault happens, pagesize is
346 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
347 * text replication by converting the segment back to vnode only segment
348 * (unmap segment's address range and set svd->amp to NULL).
350 * The original file can be changed after amp is inserted into
351 * svntr_hashtab. Processes that are launched after the file is already
352 * changed can't use the replica's created prior to the file change. To
353 * implement this functionality hash entries are timestamped. Replica's can
354 * only be used if current file modification time is the same as the timestamp
355 * saved when hash entry was created. However just timestamps alone are not
356 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
357 * deal with file changes via MAP_SHARED mappings differently. When writable
358 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
359 * existing replica's for this vnode as not usable for future text
360 * mappings. And we don't create new replica's for files that currently have
361 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
365 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
366 size_t segvn_textrepl_max_bytes_factor
= SEGVN_TEXTREPL_MAXBYTES_FACTOR
;
368 static ulong_t svntr_hashtab_sz
= 512;
369 static svntr_bucket_t
*svntr_hashtab
= NULL
;
370 static struct kmem_cache
*svntr_cache
;
371 static svntr_stats_t
*segvn_textrepl_stats
;
372 static ksema_t segvn_trasync_sem
;
374 int segvn_disable_textrepl
= 1;
375 size_t textrepl_size_thresh
= (size_t)-1;
376 size_t segvn_textrepl_bytes
= 0;
377 size_t segvn_textrepl_max_bytes
= 0;
378 clock_t segvn_update_textrepl_interval
= 0;
379 int segvn_update_tr_time
= 10;
380 int segvn_disable_textrepl_update
= 0;
382 static void segvn_textrepl(struct seg
*);
383 static void segvn_textunrepl(struct seg
*, int);
384 static void segvn_inval_trcache(vnode_t
*);
385 static void segvn_trasync_thread(void);
386 static void segvn_trupdate_wakeup(void *);
387 static void segvn_trupdate(void);
388 static void segvn_trupdate_seg(struct seg
*, segvn_data_t
*, svntr_t
*,
392 * Initialize segvn data structures
401 segvn_cache
= kmem_cache_create("segvn_cache",
402 sizeof (struct segvn_data
), 0,
403 segvn_cache_constructor
, segvn_cache_destructor
, NULL
,
406 if (segvn_lpg_disable
== 0) {
407 szc
= maxszc
= page_num_pagesizes() - 1;
409 segvn_lpg_disable
= 1;
411 if (page_get_pagesize(0) != PAGESIZE
) {
412 panic("segvn_init: bad szc 0");
416 pgsz
= page_get_pagesize(szc
);
417 if (pgsz
<= PAGESIZE
|| !IS_P2ALIGNED(pgsz
, pgsz
)) {
418 panic("segvn_init: bad szc %d", szc
);
423 if (segvn_maxpgszc
== 0 || segvn_maxpgszc
> maxszc
)
424 segvn_maxpgszc
= maxszc
;
427 if (segvn_maxpgszc
) {
428 segvn_szc_cache
= (struct kmem_cache
**)kmem_alloc(
429 (segvn_maxpgszc
+ 1) * sizeof (struct kmem_cache
*),
433 for (szc
= 1; szc
<= segvn_maxpgszc
; szc
++) {
436 (void) sprintf(str
, "segvn_szc_cache%d", szc
);
437 segvn_szc_cache
[szc
] = kmem_cache_create(str
,
438 page_get_pagecnt(szc
) * sizeof (page_t
*), 0,
439 NULL
, NULL
, NULL
, NULL
, NULL
, KMC_NODEBUG
);
443 if (segvn_use_regions
&& !hat_supported(HAT_SHARED_REGIONS
, NULL
))
444 segvn_use_regions
= 0;
447 * For now shared regions and text replication segvn support
448 * are mutually exclusive. This is acceptable because
449 * currently significant benefit from text replication was
450 * only observed on AMD64 NUMA platforms (due to relatively
451 * small L2$ size) and currently we don't support shared
454 if (segvn_use_regions
&& !segvn_disable_textrepl
) {
455 segvn_disable_textrepl
= 1;
459 if (lgrp_optimizations() && textrepl_size_thresh
!= (size_t)-1 &&
460 !segvn_disable_textrepl
) {
462 size_t hsz
= svntr_hashtab_sz
* sizeof (svntr_bucket_t
);
464 svntr_cache
= kmem_cache_create("svntr_cache",
465 sizeof (svntr_t
), 0, svntr_cache_constructor
, NULL
,
466 NULL
, NULL
, NULL
, 0);
467 svntr_hashtab
= kmem_zalloc(hsz
, KM_SLEEP
);
468 for (i
= 0; i
< svntr_hashtab_sz
; i
++) {
469 mutex_init(&svntr_hashtab
[i
].tr_lock
, NULL
,
470 MUTEX_DEFAULT
, NULL
);
472 segvn_textrepl_max_bytes
= ptob(physmem
) /
473 segvn_textrepl_max_bytes_factor
;
474 segvn_textrepl_stats
= kmem_zalloc(NCPU
*
475 sizeof (svntr_stats_t
), KM_SLEEP
);
476 sema_init(&segvn_trasync_sem
, 0, NULL
, SEMA_DEFAULT
, NULL
);
477 (void) thread_create(NULL
, 0, segvn_trasync_thread
,
478 NULL
, 0, &p0
, TS_RUN
, minclsyspri
);
482 if (!ISP2(segvn_pglock_comb_balign
) ||
483 segvn_pglock_comb_balign
< PAGESIZE
) {
484 segvn_pglock_comb_balign
= 1UL << 16; /* 64K */
486 segvn_pglock_comb_bshift
= highbit(segvn_pglock_comb_balign
) - 1;
487 segvn_pglock_comb_palign
= btop(segvn_pglock_comb_balign
);
490 #define SEGVN_PAGEIO ((void *)0x1)
491 #define SEGVN_NOPAGEIO ((void *)0x2)
494 segvn_setvnode_mpss(vnode_t
*vp
)
498 ASSERT(vp
->v_mpssdata
== NULL
||
499 vp
->v_mpssdata
== SEGVN_PAGEIO
||
500 vp
->v_mpssdata
== SEGVN_NOPAGEIO
);
502 if (vp
->v_mpssdata
== NULL
) {
503 if (vn_vmpss_usepageio(vp
)) {
504 err
= fop_pageio(vp
, NULL
,
505 0, 0, 0, CRED(), NULL
);
510 * set v_mpssdata just once per vnode life
511 * so that it never changes.
513 mutex_enter(&vp
->v_lock
);
514 if (vp
->v_mpssdata
== NULL
) {
516 vp
->v_mpssdata
= SEGVN_PAGEIO
;
518 vp
->v_mpssdata
= SEGVN_NOPAGEIO
;
521 mutex_exit(&vp
->v_lock
);
526 segvn_create(struct seg
*seg
, void *argsp
)
528 extern lgrp_mem_policy_t lgrp_mem_default_policy
;
529 struct segvn_crargs
*a
= (struct segvn_crargs
*)argsp
;
530 struct segvn_data
*svd
;
533 struct anon_map
*amp
;
536 lgrp_mem_policy_t mpolicy
= lgrp_mem_default_policy
;
540 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
542 if (a
->type
!= MAP_PRIVATE
&& a
->type
!= MAP_SHARED
) {
543 panic("segvn_create type");
548 * Check arguments. If a shared anon structure is given then
549 * it is illegal to also specify a vp.
551 if (a
->amp
!= NULL
&& a
->vp
!= NULL
) {
552 panic("segvn_create anon_map");
556 if (a
->type
== MAP_PRIVATE
&& (a
->flags
& MAP_TEXT
) &&
557 a
->vp
!= NULL
&& a
->prot
== (PROT_USER
| PROT_READ
| PROT_EXEC
) &&
562 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
563 if (a
->type
== MAP_SHARED
)
564 a
->flags
&= ~MAP_NORESERVE
;
567 if (segvn_lpg_disable
!= 0 || (a
->szc
== AS_MAP_NO_LPOOB
) ||
568 (a
->amp
!= NULL
&& a
->type
== MAP_PRIVATE
) ||
569 (a
->flags
& MAP_NORESERVE
) || seg
->s_as
== &kas
) {
572 if (a
->szc
> segvn_maxpgszc
)
573 a
->szc
= segvn_maxpgszc
;
574 pgsz
= page_get_pagesize(a
->szc
);
575 if (!IS_P2ALIGNED(seg
->s_base
, pgsz
) ||
576 !IS_P2ALIGNED(seg
->s_size
, pgsz
)) {
578 } else if (a
->vp
!= NULL
) {
579 if (IS_SWAPFSVP(a
->vp
) || VN_ISKAS(a
->vp
)) {
582 * hat_page_demote() is not supported
586 } else if (map_addr_vacalign_check(seg
->s_base
,
587 a
->offset
& PAGEMASK
)) {
590 } else if (a
->amp
!= NULL
) {
591 pgcnt_t anum
= btopr(a
->offset
);
592 pgcnt_t pgcnt
= page_get_pagecnt(a
->szc
);
593 if (!IS_P2ALIGNED(anum
, pgcnt
)) {
601 * If segment may need private pages, reserve them now.
603 if (!(a
->flags
& MAP_NORESERVE
) && ((a
->vp
== NULL
&& a
->amp
== NULL
) ||
604 (a
->type
== MAP_PRIVATE
&& (a
->prot
& PROT_WRITE
)))) {
605 if (anon_resv_zone(seg
->s_size
,
606 seg
->s_as
->a_proc
->p_zone
) == 0)
608 swresv
= seg
->s_size
;
612 * Reserve any mapping structures that may be required.
614 * Don't do it for segments that may use regions. It's currently a
615 * noop in the hat implementations anyway.
618 hat_map(seg
->s_as
->a_hat
, seg
->s_base
, seg
->s_size
, HAT_MAP
);
625 crhold(cred
= CRED());
628 /* Inform the vnode of the new mapping */
630 error
= fop_addmap(a
->vp
, a
->offset
& PAGEMASK
,
631 seg
->s_as
, seg
->s_base
, seg
->s_size
, a
->prot
,
632 a
->maxprot
, a
->type
, cred
, NULL
);
635 anon_unresv_zone(swresv
,
636 seg
->s_as
->a_proc
->p_zone
);
640 hat_unload(seg
->s_as
->a_hat
, seg
->s_base
,
641 seg
->s_size
, HAT_UNLOAD_UNMAP
);
646 * svntr_hashtab will be NULL if we support shared regions.
648 trok
= ((a
->flags
& MAP_TEXT
) &&
649 (seg
->s_size
> textrepl_size_thresh
||
650 (a
->flags
& _MAP_TEXTREPL
)) &&
651 lgrp_optimizations() && svntr_hashtab
!= NULL
&&
652 a
->type
== MAP_PRIVATE
&& swresv
== 0 &&
653 !(a
->flags
& MAP_NORESERVE
) &&
654 seg
->s_as
!= &kas
&& a
->vp
->v_type
== VREG
);
656 ASSERT(!trok
|| !use_rgn
);
660 * MAP_NORESERVE mappings don't count towards the VSZ of a process
661 * until we fault the pages in.
663 if ((a
->vp
== NULL
|| a
->vp
->v_type
!= VREG
) &&
664 a
->flags
& MAP_NORESERVE
) {
665 seg
->s_as
->a_resvsize
-= seg
->s_size
;
669 * If more than one segment in the address space, and they're adjacent
670 * virtually, try to concatenate them. Don't concatenate if an
671 * explicit anon_map structure was supplied (e.g., SystemV shared
672 * memory) or if we'll use text replication for this segment.
674 if (a
->amp
== NULL
&& !use_rgn
&& !trok
) {
675 struct seg
*pseg
, *nseg
;
676 struct segvn_data
*psvd
, *nsvd
;
677 lgrp_mem_policy_t ppolicy
, npolicy
;
678 uint_t lgrp_mem_policy_flags
= 0;
681 * Memory policy flags (lgrp_mem_policy_flags) is valid when
682 * extending stack/heap segments.
684 if ((a
->vp
== NULL
) && (a
->type
== MAP_PRIVATE
) &&
685 !(a
->flags
& MAP_NORESERVE
) && (seg
->s_as
!= &kas
)) {
686 lgrp_mem_policy_flags
= a
->lgrp_mem_policy_flags
;
689 * Get policy when not extending it from another segment
691 mpolicy
= lgrp_mem_policy_default(seg
->s_size
, a
->type
);
695 * First, try to concatenate the previous and new segments
697 pseg
= AS_SEGPREV(seg
->s_as
, seg
);
699 pseg
->s_base
+ pseg
->s_size
== seg
->s_base
&&
700 pseg
->s_ops
== &segvn_ops
) {
702 * Get memory allocation policy from previous segment.
703 * When extension is specified (e.g. for heap) apply
704 * this policy to the new segment regardless of the
705 * outcome of segment concatenation. Extension occurs
706 * for non-default policy otherwise default policy is
707 * used and is based on extended segment size.
709 psvd
= (struct segvn_data
*)pseg
->s_data
;
710 ppolicy
= psvd
->policy_info
.mem_policy
;
711 if (lgrp_mem_policy_flags
==
712 LGRP_MP_FLAG_EXTEND_UP
) {
713 if (ppolicy
!= lgrp_mem_default_policy
) {
716 mpolicy
= lgrp_mem_policy_default(
717 pseg
->s_size
+ seg
->s_size
,
722 if (mpolicy
== ppolicy
&&
723 (pseg
->s_size
+ seg
->s_size
<=
724 segvn_comb_thrshld
|| psvd
->amp
== NULL
) &&
725 segvn_extend_prev(pseg
, seg
, a
, swresv
) == 0) {
727 * success! now try to concatenate
731 nseg
= AS_SEGNEXT(pseg
->s_as
, pseg
);
734 nseg
->s_ops
== &segvn_ops
&&
735 pseg
->s_base
+ pseg
->s_size
==
737 (void) segvn_concat(pseg
, nseg
, 0);
738 ASSERT(pseg
->s_szc
== 0 ||
739 (a
->szc
== pseg
->s_szc
&&
740 IS_P2ALIGNED(pseg
->s_base
, pgsz
) &&
741 IS_P2ALIGNED(pseg
->s_size
, pgsz
)));
747 * Failed, so try to concatenate with following seg
749 nseg
= AS_SEGNEXT(seg
->s_as
, seg
);
751 seg
->s_base
+ seg
->s_size
== nseg
->s_base
&&
752 nseg
->s_ops
== &segvn_ops
) {
754 * Get memory allocation policy from next segment.
755 * When extension is specified (e.g. for stack) apply
756 * this policy to the new segment regardless of the
757 * outcome of segment concatenation. Extension occurs
758 * for non-default policy otherwise default policy is
759 * used and is based on extended segment size.
761 nsvd
= (struct segvn_data
*)nseg
->s_data
;
762 npolicy
= nsvd
->policy_info
.mem_policy
;
763 if (lgrp_mem_policy_flags
==
764 LGRP_MP_FLAG_EXTEND_DOWN
) {
765 if (npolicy
!= lgrp_mem_default_policy
) {
768 mpolicy
= lgrp_mem_policy_default(
769 nseg
->s_size
+ seg
->s_size
,
774 if (mpolicy
== npolicy
&&
775 segvn_extend_next(seg
, nseg
, a
, swresv
) == 0) {
777 ASSERT(nseg
->s_szc
== 0 ||
778 (a
->szc
== nseg
->s_szc
&&
779 IS_P2ALIGNED(nseg
->s_base
, pgsz
) &&
780 IS_P2ALIGNED(nseg
->s_size
, pgsz
)));
788 if (a
->type
== MAP_SHARED
)
789 lgrp_shm_policy_init(NULL
, a
->vp
);
791 svd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
793 seg
->s_ops
= &segvn_ops
;
794 seg
->s_data
= (void *)svd
;
800 * Anonymous mappings have no backing file so the offset is meaningless.
802 svd
->offset
= a
->vp
? (a
->offset
& PAGEMASK
) : 0;
804 svd
->maxprot
= a
->maxprot
;
809 svd
->advice
= MADV_NORMAL
;
811 svd
->flags
= (ushort_t
)a
->flags
;
812 svd
->softlockcnt
= 0;
813 svd
->softlockcnt_sbase
= 0;
814 svd
->softlockcnt_send
= 0;
816 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
819 if (a
->szc
!= 0 && a
->vp
!= NULL
) {
820 segvn_setvnode_mpss(a
->vp
);
822 if (svd
->type
== MAP_SHARED
&& svd
->vp
!= NULL
&&
823 (svd
->vp
->v_flag
& VVMEXEC
) && (svd
->prot
& PROT_WRITE
)) {
824 ASSERT(vn_is_mapped(svd
->vp
, V_WRITE
));
825 segvn_inval_trcache(svd
->vp
);
829 if ((svd
->amp
= amp
) == NULL
) {
831 if (svd
->type
== MAP_SHARED
) {
834 * Shared mappings to a vp need no other setup.
835 * If we have a shared mapping to an anon_map object
836 * which hasn't been allocated yet, allocate the
837 * struct now so that it will be properly shared
838 * by remembering the swap reservation there.
841 svd
->amp
= anonmap_alloc(seg
->s_size
, swresv
,
843 svd
->amp
->a_szc
= seg
->s_szc
;
847 * Private mapping (with or without a vp).
848 * Allocate anon_map when needed.
850 svd
->swresv
= swresv
;
856 * Mapping to an existing anon_map structure without a vp.
857 * For now we will insure that the segment size isn't larger
858 * than the size - offset gives us. Later on we may wish to
859 * have the anon array dynamically allocated itself so that
860 * we don't always have to allocate all the anon pointer slots.
861 * This of course involves adding extra code to check that we
862 * aren't trying to use an anon pointer slot beyond the end
863 * of the currently allocated anon array.
865 if ((amp
->size
- a
->offset
) < seg
->s_size
) {
866 panic("segvn_create anon_map size");
870 anon_num
= btopr(a
->offset
);
872 if (a
->type
== MAP_SHARED
) {
874 * SHARED mapping to a given anon_map.
876 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
878 if (a
->szc
> amp
->a_szc
) {
881 ANON_LOCK_EXIT(&
->a_rwlock
);
882 svd
->anon_index
= anon_num
;
886 * PRIVATE mapping to a given anon_map.
887 * Make sure that all the needed anon
888 * structures are created (so that we will
889 * share the underlying pages if nothing
890 * is written by this mapping) and then
891 * duplicate the anon array as is done
892 * when a privately mapped segment is dup'ed.
898 int hat_flag
= HAT_LOAD
;
900 if (svd
->flags
& MAP_TEXT
) {
901 hat_flag
|= HAT_LOAD_TEXT
;
904 svd
->amp
= anonmap_alloc(seg
->s_size
, 0, ANON_SLEEP
);
905 svd
->amp
->a_szc
= seg
->s_szc
;
907 svd
->swresv
= swresv
;
910 * Prevent 2 threads from allocating anon
911 * slots simultaneously.
913 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
914 eaddr
= seg
->s_base
+ seg
->s_size
;
916 for (anon_idx
= anon_num
, addr
= seg
->s_base
;
917 addr
< eaddr
; addr
+= PAGESIZE
, anon_idx
++) {
920 if ((ap
= anon_get_ptr(amp
->ahp
,
925 * Allocate the anon struct now.
926 * Might as well load up translation
927 * to the page while we're at it...
929 pp
= anon_zero(seg
, addr
, &ap
, cred
);
930 if (ap
== NULL
|| pp
== NULL
) {
931 panic("segvn_create anon_zero");
936 * Re-acquire the anon_map lock and
937 * initialize the anon array entry.
939 ASSERT(anon_get_ptr(amp
->ahp
,
941 (void) anon_set_ptr(amp
->ahp
, anon_idx
, ap
,
944 ASSERT(seg
->s_szc
== 0);
945 ASSERT(!IS_VMODSORT(pp
->p_vnode
));
947 ASSERT(use_rgn
== 0);
948 hat_memload(seg
->s_as
->a_hat
, addr
, pp
,
949 svd
->prot
& ~PROT_WRITE
, hat_flag
);
953 ASSERT(seg
->s_szc
== 0);
954 anon_dup(amp
->ahp
, anon_num
, svd
->amp
->ahp
,
956 ANON_LOCK_EXIT(&
->a_rwlock
);
961 * Set default memory allocation policy for segment
963 * Always set policy for private memory at least for initialization
964 * even if this is a shared memory segment
966 (void) lgrp_privm_policy_set(mpolicy
, &svd
->policy_info
, seg
->s_size
);
968 if (svd
->type
== MAP_SHARED
)
969 (void) lgrp_shm_policy_set(mpolicy
, svd
->amp
, svd
->anon_index
,
970 svd
->vp
, svd
->offset
, seg
->s_size
);
974 ASSERT(svd
->amp
== NULL
);
975 svd
->rcookie
= hat_join_region(seg
->s_as
->a_hat
, seg
->s_base
,
976 seg
->s_size
, (void *)svd
->vp
, svd
->offset
, svd
->prot
,
977 (uchar_t
)seg
->s_szc
, segvn_hat_rgn_unload_callback
,
981 ASSERT(!trok
|| !(svd
->prot
& PROT_WRITE
));
982 svd
->tr_state
= trok
? SEGVN_TR_INIT
: SEGVN_TR_OFF
;
988 * Concatenate two existing segments, if possible.
989 * Return 0 on success, -1 if two segments are not compatible
990 * or -2 on memory allocation failure.
991 * If amp_cat == 1 then try and concat segments with anon maps
994 segvn_concat(struct seg
*seg1
, struct seg
*seg2
, int amp_cat
)
996 struct segvn_data
*svd1
= seg1
->s_data
;
997 struct segvn_data
*svd2
= seg2
->s_data
;
998 struct anon_map
*amp1
= svd1
->amp
;
999 struct anon_map
*amp2
= svd2
->amp
;
1000 struct vpage
*vpage1
= svd1
->vpage
;
1001 struct vpage
*vpage2
= svd2
->vpage
, *nvpage
= NULL
;
1002 size_t size
, nvpsize
;
1003 pgcnt_t npages1
, npages2
;
1005 ASSERT(seg1
->s_as
&& seg2
->s_as
&& seg1
->s_as
== seg2
->s_as
);
1006 ASSERT(AS_WRITE_HELD(seg1
->s_as
));
1007 ASSERT(seg1
->s_ops
== seg2
->s_ops
);
1009 if (HAT_IS_REGION_COOKIE_VALID(svd1
->rcookie
) ||
1010 HAT_IS_REGION_COOKIE_VALID(svd2
->rcookie
)) {
1014 /* both segments exist, try to merge them */
1015 #define incompat(x) (svd1->x != svd2->x)
1016 if (incompat(vp
) || incompat(maxprot
) ||
1017 (!svd1
->pageadvice
&& !svd2
->pageadvice
&& incompat(advice
)) ||
1018 (!svd1
->pageprot
&& !svd2
->pageprot
&& incompat(prot
)) ||
1019 incompat(type
) || incompat(cred
) || incompat(flags
) ||
1020 seg1
->s_szc
!= seg2
->s_szc
|| incompat(policy_info
.mem_policy
) ||
1021 (svd2
->softlockcnt
> 0) || svd1
->softlockcnt_send
> 0)
1026 * vp == NULL implies zfod, offset doesn't matter
1028 if (svd1
->vp
!= NULL
&&
1029 svd1
->offset
+ seg1
->s_size
!= svd2
->offset
) {
1034 * Don't concatenate if either segment uses text replication.
1036 if (svd1
->tr_state
!= SEGVN_TR_OFF
|| svd2
->tr_state
!= SEGVN_TR_OFF
) {
1041 * Fail early if we're not supposed to concatenate
1042 * segments with non NULL amp.
1044 if (amp_cat
== 0 && (amp1
!= NULL
|| amp2
!= NULL
)) {
1048 if (svd1
->vp
== NULL
&& svd1
->type
== MAP_SHARED
) {
1052 if (amp1
!= NULL
&& svd1
->anon_index
+ btop(seg1
->s_size
) !=
1056 ASSERT(amp1
== NULL
|| amp1
->refcnt
>= 2);
1060 * If either seg has vpages, create a new merged vpage array.
1062 if (vpage1
!= NULL
|| vpage2
!= NULL
) {
1063 struct vpage
*vp
, *evp
;
1065 npages1
= seg_pages(seg1
);
1066 npages2
= seg_pages(seg2
);
1067 nvpsize
= vpgtob(npages1
+ npages2
);
1069 if ((nvpage
= kmem_zalloc(nvpsize
, KM_NOSLEEP
)) == NULL
) {
1073 if (vpage1
!= NULL
) {
1074 bcopy(vpage1
, nvpage
, vpgtob(npages1
));
1076 evp
= nvpage
+ npages1
;
1077 for (vp
= nvpage
; vp
< evp
; vp
++) {
1078 VPP_SETPROT(vp
, svd1
->prot
);
1079 VPP_SETADVICE(vp
, svd1
->advice
);
1083 if (vpage2
!= NULL
) {
1084 bcopy(vpage2
, nvpage
+ npages1
, vpgtob(npages2
));
1086 evp
= nvpage
+ npages1
+ npages2
;
1087 for (vp
= nvpage
+ npages1
; vp
< evp
; vp
++) {
1088 VPP_SETPROT(vp
, svd2
->prot
);
1089 VPP_SETADVICE(vp
, svd2
->advice
);
1093 if (svd2
->pageswap
&& (!svd1
->pageswap
&& svd1
->swresv
)) {
1094 ASSERT(svd1
->swresv
== seg1
->s_size
);
1095 ASSERT(!(svd1
->flags
& MAP_NORESERVE
));
1096 ASSERT(!(svd2
->flags
& MAP_NORESERVE
));
1097 evp
= nvpage
+ npages1
;
1098 for (vp
= nvpage
; vp
< evp
; vp
++) {
1103 if (svd1
->pageswap
&& (!svd2
->pageswap
&& svd2
->swresv
)) {
1104 ASSERT(svd2
->swresv
== seg2
->s_size
);
1105 ASSERT(!(svd1
->flags
& MAP_NORESERVE
));
1106 ASSERT(!(svd2
->flags
& MAP_NORESERVE
));
1107 vp
= nvpage
+ npages1
;
1109 for (; vp
< evp
; vp
++) {
1114 ASSERT((vpage1
!= NULL
|| vpage2
!= NULL
) ||
1115 (svd1
->pageswap
== 0 && svd2
->pageswap
== 0));
1118 * If either segment has private pages, create a new merged anon
1119 * array. If mergeing shared anon segments just decrement anon map's
1122 if (amp1
!= NULL
&& svd1
->type
== MAP_SHARED
) {
1123 ASSERT(amp1
== amp2
&& svd1
->vp
== NULL
);
1124 ANON_LOCK_ENTER(&1
->a_rwlock
, RW_WRITER
);
1125 ASSERT(amp1
->refcnt
>= 2);
1127 ANON_LOCK_EXIT(&1
->a_rwlock
);
1129 } else if (amp1
!= NULL
|| amp2
!= NULL
) {
1130 struct anon_hdr
*nahp
;
1131 struct anon_map
*namp
= NULL
;
1134 ASSERT(svd1
->type
== MAP_PRIVATE
);
1136 asize
= seg1
->s_size
+ seg2
->s_size
;
1137 if ((nahp
= anon_create(btop(asize
), ANON_NOSLEEP
)) == NULL
) {
1138 if (nvpage
!= NULL
) {
1139 kmem_free(nvpage
, nvpsize
);
1145 * XXX anon rwlock is not really needed because
1146 * this is a private segment and we are writers.
1148 ANON_LOCK_ENTER(&1
->a_rwlock
, RW_WRITER
);
1149 ASSERT(amp1
->refcnt
== 1);
1150 if (anon_copy_ptr(amp1
->ahp
, svd1
->anon_index
,
1151 nahp
, 0, btop(seg1
->s_size
), ANON_NOSLEEP
)) {
1152 anon_release(nahp
, btop(asize
));
1153 ANON_LOCK_EXIT(&1
->a_rwlock
);
1154 if (nvpage
!= NULL
) {
1155 kmem_free(nvpage
, nvpsize
);
1161 ANON_LOCK_ENTER(&2
->a_rwlock
, RW_WRITER
);
1162 ASSERT(amp2
->refcnt
== 1);
1163 if (anon_copy_ptr(amp2
->ahp
, svd2
->anon_index
,
1164 nahp
, btop(seg1
->s_size
), btop(seg2
->s_size
),
1166 anon_release(nahp
, btop(asize
));
1167 ANON_LOCK_EXIT(&2
->a_rwlock
);
1169 ANON_LOCK_EXIT(&1
->a_rwlock
);
1171 if (nvpage
!= NULL
) {
1172 kmem_free(nvpage
, nvpsize
);
1179 anon_release(amp1
->ahp
, btop(amp1
->size
));
1183 ASSERT(amp1
== NULL
);
1185 anon_release(amp2
->ahp
, btop(amp2
->size
));
1188 ANON_LOCK_EXIT(&2
->a_rwlock
);
1191 svd2
->amp
= NULL
; /* needed for seg_free */
1196 svd1
->anon_index
= 0;
1197 ANON_LOCK_EXIT(&namp
->a_rwlock
);
1200 * Now free the old vpage structures.
1202 if (nvpage
!= NULL
) {
1203 if (vpage1
!= NULL
) {
1204 kmem_free(vpage1
, vpgtob(npages1
));
1206 if (vpage2
!= NULL
) {
1208 kmem_free(vpage2
, vpgtob(npages2
));
1210 if (svd2
->pageprot
) {
1213 if (svd2
->pageadvice
) {
1214 svd1
->pageadvice
= 1;
1216 if (svd2
->pageswap
) {
1219 svd1
->vpage
= nvpage
;
1222 /* all looks ok, merge segments */
1223 svd1
->swresv
+= svd2
->swresv
;
1224 svd2
->swresv
= 0; /* so seg_free doesn't release swap space */
1225 size
= seg2
->s_size
;
1227 seg1
->s_size
+= size
;
1232 * Extend the previous segment (seg1) to include the
1233 * new segment (seg2 + a), if possible.
1234 * Return 0 on success.
1237 segvn_extend_prev(seg1
, seg2
, a
, swresv
)
1238 struct seg
*seg1
, *seg2
;
1239 struct segvn_crargs
*a
;
1242 struct segvn_data
*svd1
= (struct segvn_data
*)seg1
->s_data
;
1244 struct anon_map
*amp1
;
1245 struct vpage
*new_vpage
;
1248 * We don't need any segment level locks for "segvn" data
1249 * since the address space is "write" locked.
1251 ASSERT(seg1
->s_as
&& AS_WRITE_HELD(seg1
->s_as
));
1253 if (HAT_IS_REGION_COOKIE_VALID(svd1
->rcookie
)) {
1257 /* second segment is new, try to extend first */
1258 /* XXX - should also check cred */
1259 if (svd1
->vp
!= a
->vp
|| svd1
->maxprot
!= a
->maxprot
||
1260 (!svd1
->pageprot
&& (svd1
->prot
!= a
->prot
)) ||
1261 svd1
->type
!= a
->type
|| svd1
->flags
!= a
->flags
||
1262 seg1
->s_szc
!= a
->szc
|| svd1
->softlockcnt_send
> 0)
1265 /* vp == NULL implies zfod, offset doesn't matter */
1266 if (svd1
->vp
!= NULL
&&
1267 svd1
->offset
+ seg1
->s_size
!= (a
->offset
& PAGEMASK
))
1270 if (svd1
->tr_state
!= SEGVN_TR_OFF
) {
1279 * Segment has private pages, can data structures
1282 * Acquire the anon_map lock to prevent it from changing,
1283 * if it is shared. This ensures that the anon_map
1284 * will not change while a thread which has a read/write
1285 * lock on an address space references it.
1286 * XXX - Don't need the anon_map lock at all if "refcnt"
1289 * Can't grow a MAP_SHARED segment with an anonmap because
1290 * there may be existing anon slots where we want to extend
1291 * the segment and we wouldn't know what to do with them
1292 * (e.g., for tmpfs right thing is to just leave them there,
1293 * for /dev/zero they should be cleared out).
1295 if (svd1
->type
== MAP_SHARED
)
1298 ANON_LOCK_ENTER(&1
->a_rwlock
, RW_WRITER
);
1299 if (amp1
->refcnt
> 1) {
1300 ANON_LOCK_EXIT(&1
->a_rwlock
);
1303 newpgs
= anon_grow(amp1
->ahp
, &svd1
->anon_index
,
1304 btop(seg1
->s_size
), btop(seg2
->s_size
), ANON_NOSLEEP
);
1307 ANON_LOCK_EXIT(&1
->a_rwlock
);
1310 amp1
->size
= ptob(newpgs
);
1311 ANON_LOCK_EXIT(&1
->a_rwlock
);
1313 if (svd1
->vpage
!= NULL
) {
1314 struct vpage
*vp
, *evp
;
1316 kmem_zalloc(vpgtob(seg_pages(seg1
) + seg_pages(seg2
)),
1318 if (new_vpage
== NULL
)
1320 bcopy(svd1
->vpage
, new_vpage
, vpgtob(seg_pages(seg1
)));
1321 kmem_free(svd1
->vpage
, vpgtob(seg_pages(seg1
)));
1322 svd1
->vpage
= new_vpage
;
1324 vp
= new_vpage
+ seg_pages(seg1
);
1325 evp
= vp
+ seg_pages(seg2
);
1326 for (; vp
< evp
; vp
++)
1327 VPP_SETPROT(vp
, a
->prot
);
1328 if (svd1
->pageswap
&& swresv
) {
1329 ASSERT(!(svd1
->flags
& MAP_NORESERVE
));
1330 ASSERT(swresv
== seg2
->s_size
);
1331 vp
= new_vpage
+ seg_pages(seg1
);
1332 for (; vp
< evp
; vp
++) {
1337 ASSERT(svd1
->vpage
!= NULL
|| svd1
->pageswap
== 0);
1338 size
= seg2
->s_size
;
1340 seg1
->s_size
+= size
;
1341 svd1
->swresv
+= swresv
;
1342 if (svd1
->pageprot
&& (a
->prot
& PROT_WRITE
) &&
1343 svd1
->type
== MAP_SHARED
&& svd1
->vp
!= NULL
&&
1344 (svd1
->vp
->v_flag
& VVMEXEC
)) {
1345 ASSERT(vn_is_mapped(svd1
->vp
, V_WRITE
));
1346 segvn_inval_trcache(svd1
->vp
);
1352 * Extend the next segment (seg2) to include the
1353 * new segment (seg1 + a), if possible.
1354 * Return 0 on success.
1360 struct segvn_crargs
*a
,
1363 struct segvn_data
*svd2
= (struct segvn_data
*)seg2
->s_data
;
1365 struct anon_map
*amp2
;
1366 struct vpage
*new_vpage
;
1369 * We don't need any segment level locks for "segvn" data
1370 * since the address space is "write" locked.
1372 ASSERT(seg2
->s_as
&& AS_WRITE_HELD(seg2
->s_as
));
1374 if (HAT_IS_REGION_COOKIE_VALID(svd2
->rcookie
)) {
1378 /* first segment is new, try to extend second */
1379 /* XXX - should also check cred */
1380 if (svd2
->vp
!= a
->vp
|| svd2
->maxprot
!= a
->maxprot
||
1381 (!svd2
->pageprot
&& (svd2
->prot
!= a
->prot
)) ||
1382 svd2
->type
!= a
->type
|| svd2
->flags
!= a
->flags
||
1383 seg2
->s_szc
!= a
->szc
|| svd2
->softlockcnt_sbase
> 0)
1385 /* vp == NULL implies zfod, offset doesn't matter */
1386 if (svd2
->vp
!= NULL
&&
1387 (a
->offset
& PAGEMASK
) + seg1
->s_size
!= svd2
->offset
)
1390 if (svd2
->tr_state
!= SEGVN_TR_OFF
) {
1399 * Segment has private pages, can data structures
1402 * Acquire the anon_map lock to prevent it from changing,
1403 * if it is shared. This ensures that the anon_map
1404 * will not change while a thread which has a read/write
1405 * lock on an address space references it.
1407 * XXX - Don't need the anon_map lock at all if "refcnt"
1410 if (svd2
->type
== MAP_SHARED
)
1413 ANON_LOCK_ENTER(&2
->a_rwlock
, RW_WRITER
);
1414 if (amp2
->refcnt
> 1) {
1415 ANON_LOCK_EXIT(&2
->a_rwlock
);
1418 newpgs
= anon_grow(amp2
->ahp
, &svd2
->anon_index
,
1419 btop(seg2
->s_size
), btop(seg1
->s_size
),
1420 ANON_NOSLEEP
| ANON_GROWDOWN
);
1423 ANON_LOCK_EXIT(&2
->a_rwlock
);
1426 amp2
->size
= ptob(newpgs
);
1427 ANON_LOCK_EXIT(&2
->a_rwlock
);
1429 if (svd2
->vpage
!= NULL
) {
1430 struct vpage
*vp
, *evp
;
1432 kmem_zalloc(vpgtob(seg_pages(seg1
) + seg_pages(seg2
)),
1434 if (new_vpage
== NULL
) {
1435 /* Not merging segments so adjust anon_index back */
1437 svd2
->anon_index
+= seg_pages(seg1
);
1440 bcopy(svd2
->vpage
, new_vpage
+ seg_pages(seg1
),
1441 vpgtob(seg_pages(seg2
)));
1442 kmem_free(svd2
->vpage
, vpgtob(seg_pages(seg2
)));
1443 svd2
->vpage
= new_vpage
;
1446 evp
= vp
+ seg_pages(seg1
);
1447 for (; vp
< evp
; vp
++)
1448 VPP_SETPROT(vp
, a
->prot
);
1449 if (svd2
->pageswap
&& swresv
) {
1450 ASSERT(!(svd2
->flags
& MAP_NORESERVE
));
1451 ASSERT(swresv
== seg1
->s_size
);
1453 for (; vp
< evp
; vp
++) {
1458 ASSERT(svd2
->vpage
!= NULL
|| svd2
->pageswap
== 0);
1459 size
= seg1
->s_size
;
1461 seg2
->s_size
+= size
;
1462 seg2
->s_base
-= size
;
1463 svd2
->offset
-= size
;
1464 svd2
->swresv
+= swresv
;
1465 if (svd2
->pageprot
&& (a
->prot
& PROT_WRITE
) &&
1466 svd2
->type
== MAP_SHARED
&& svd2
->vp
!= NULL
&&
1467 (svd2
->vp
->v_flag
& VVMEXEC
)) {
1468 ASSERT(vn_is_mapped(svd2
->vp
, V_WRITE
));
1469 segvn_inval_trcache(svd2
->vp
);
1475 * Duplicate all the pages in the segment. This may break COW sharing for a
1476 * given page. If the page is marked with inherit zero set, then instead of
1477 * duplicating the page, we zero the page.
1480 segvn_dup_pages(struct seg
*seg
, struct seg
*newseg
)
1485 struct anon
*ap
, *newap
;
1489 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1490 struct segvn_data
*newsvd
= (struct segvn_data
*)newseg
->s_data
;
1491 ulong_t old_idx
= svd
->anon_index
;
1492 ulong_t new_idx
= 0;
1494 i
= btopr(seg
->s_size
);
1498 * XXX break cow sharing using PAGESIZE
1499 * pages. They will be relocated into larger
1500 * pages at fault time.
1503 if ((ap
= anon_get_ptr(svd
->amp
->ahp
, old_idx
)) != NULL
) {
1506 vpp
= &svd
->vpage
[seg_page(seg
, addr
)];
1509 * prot need not be computed below 'cause anon_private
1510 * is going to ignore it anyway as child doesn't inherit
1511 * pagelock from parent.
1513 prot
= svd
->pageprot
? VPP_PROT(vpp
) : svd
->prot
;
1516 * Check whether we should zero this or dup it.
1518 if (svd
->svn_inz
== SEGVN_INZ_ALL
||
1519 (svd
->svn_inz
== SEGVN_INZ_VPP
&&
1520 VPP_ISINHZERO(vpp
))) {
1521 pp
= anon_zero(newseg
, addr
, &newap
,
1524 page_t
*anon_pl
[1+1];
1526 error
= anon_getpage(&ap
, &vpprot
, anon_pl
,
1527 PAGESIZE
, seg
, addr
, S_READ
, svd
->cred
);
1531 pp
= anon_private(&newap
, newseg
, addr
, prot
,
1532 anon_pl
[0], 0, newsvd
->cred
);
1537 (void) anon_set_ptr(newsvd
->amp
->ahp
, new_idx
, newap
,
1550 segvn_dup(struct seg
*seg
, struct seg
*newseg
)
1552 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1553 struct segvn_data
*newsvd
;
1554 pgcnt_t npages
= seg_pages(seg
);
1557 struct anon_map
*amp
;
1559 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1560 ASSERT(newseg
->s_as
->a_proc
->p_parent
== curproc
);
1563 * If segment has anon reserved, reserve more for the new seg.
1564 * For a MAP_NORESERVE segment swresv will be a count of all the
1565 * allocated anon slots; thus we reserve for the child as many slots
1566 * as the parent has allocated. This semantic prevents the child or
1567 * parent from dieing during a copy-on-write fault caused by trying
1568 * to write a shared pre-existing anon page.
1570 if ((len
= svd
->swresv
) != 0) {
1571 if (anon_resv(svd
->swresv
) == 0)
1575 newsvd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
1577 newseg
->s_ops
= &segvn_ops
;
1578 newseg
->s_data
= (void *)newsvd
;
1579 newseg
->s_szc
= seg
->s_szc
;
1581 newsvd
->seg
= newseg
;
1582 if ((newsvd
->vp
= svd
->vp
) != NULL
) {
1584 if (svd
->type
== MAP_SHARED
)
1585 lgrp_shm_policy_init(NULL
, svd
->vp
);
1587 newsvd
->offset
= svd
->offset
;
1588 newsvd
->prot
= svd
->prot
;
1589 newsvd
->maxprot
= svd
->maxprot
;
1590 newsvd
->pageprot
= svd
->pageprot
;
1591 newsvd
->type
= svd
->type
;
1592 newsvd
->cred
= svd
->cred
;
1593 crhold(newsvd
->cred
);
1594 newsvd
->advice
= svd
->advice
;
1595 newsvd
->pageadvice
= svd
->pageadvice
;
1596 newsvd
->svn_inz
= svd
->svn_inz
;
1597 newsvd
->swresv
= svd
->swresv
;
1598 newsvd
->pageswap
= svd
->pageswap
;
1599 newsvd
->flags
= svd
->flags
;
1600 newsvd
->softlockcnt
= 0;
1601 newsvd
->softlockcnt_sbase
= 0;
1602 newsvd
->softlockcnt_send
= 0;
1603 newsvd
->policy_info
= svd
->policy_info
;
1604 newsvd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
1606 if ((amp
= svd
->amp
) == NULL
|| svd
->tr_state
== SEGVN_TR_ON
) {
1608 * Not attaching to a shared anon object.
1610 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
) ||
1611 svd
->tr_state
== SEGVN_TR_OFF
);
1612 if (svd
->tr_state
== SEGVN_TR_ON
) {
1613 ASSERT(newsvd
->vp
!= NULL
&& amp
!= NULL
);
1614 newsvd
->tr_state
= SEGVN_TR_INIT
;
1616 newsvd
->tr_state
= svd
->tr_state
;
1619 newsvd
->anon_index
= 0;
1621 /* regions for now are only used on pure vnode segments */
1622 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
1623 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1624 newsvd
->tr_state
= SEGVN_TR_OFF
;
1625 if (svd
->type
== MAP_SHARED
) {
1626 ASSERT(svd
->svn_inz
== SEGVN_INZ_NONE
);
1628 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
1630 ANON_LOCK_EXIT(&
->a_rwlock
);
1631 newsvd
->anon_index
= svd
->anon_index
;
1636 * Allocate and initialize new anon_map structure.
1638 newsvd
->amp
= anonmap_alloc(newseg
->s_size
, 0,
1640 newsvd
->amp
->a_szc
= newseg
->s_szc
;
1641 newsvd
->anon_index
= 0;
1642 ASSERT(svd
->svn_inz
== SEGVN_INZ_NONE
||
1643 svd
->svn_inz
== SEGVN_INZ_ALL
||
1644 svd
->svn_inz
== SEGVN_INZ_VPP
);
1647 * We don't have to acquire the anon_map lock
1648 * for the new segment (since it belongs to an
1649 * address space that is still not associated
1650 * with any process), or the segment in the old
1651 * address space (since all threads in it
1652 * are stopped while duplicating the address space).
1656 * The goal of the following code is to make sure that
1657 * softlocked pages do not end up as copy on write
1658 * pages. This would cause problems where one
1659 * thread writes to a page that is COW and a different
1660 * thread in the same process has softlocked it. The
1661 * softlock lock would move away from this process
1662 * because the write would cause this process to get
1663 * a copy (without the softlock).
1665 * The strategy here is to just break the
1666 * sharing on pages that could possibly be
1669 * In addition, if any pages have been marked that they
1670 * should be inherited as zero, then we immediately go
1671 * ahead and break COW and zero them. In the case of a
1672 * softlocked page that should be inherited zero, we
1673 * break COW and just get a zero page.
1676 if (svd
->softlockcnt
||
1677 svd
->svn_inz
!= SEGVN_INZ_NONE
) {
1679 * The softlock count might be non zero
1680 * because some pages are still stuck in the
1681 * cache for lazy reclaim. Flush the cache
1682 * now. This should drop the count to zero.
1683 * [or there is really I/O going on to these
1684 * pages]. Note, we have the writers lock so
1685 * nothing gets inserted during the flush.
1687 if (svd
->softlockcnt
&& reclaim
== 1) {
1693 error
= segvn_dup_pages(seg
, newseg
);
1695 newsvd
->vpage
= NULL
;
1698 } else { /* common case */
1699 if (seg
->s_szc
!= 0) {
1701 * If at least one of anon slots of a
1702 * large page exists then make sure
1703 * all anon slots of a large page
1704 * exist to avoid partial cow sharing
1705 * of a large page in the future.
1707 anon_dup_fill_holes(amp
->ahp
,
1708 svd
->anon_index
, newsvd
->amp
->ahp
,
1709 0, seg
->s_size
, seg
->s_szc
,
1712 anon_dup(amp
->ahp
, svd
->anon_index
,
1713 newsvd
->amp
->ahp
, 0, seg
->s_size
);
1716 hat_clrattr(seg
->s_as
->a_hat
, seg
->s_base
,
1717 seg
->s_size
, PROT_WRITE
);
1722 * If necessary, create a vpage structure for the new segment.
1723 * Do not copy any page lock indications.
1725 if (svd
->vpage
!= NULL
) {
1727 struct vpage
*ovp
= svd
->vpage
;
1730 nvp
= newsvd
->vpage
=
1731 kmem_alloc(vpgtob(npages
), KM_SLEEP
);
1732 for (i
= 0; i
< npages
; i
++) {
1734 VPP_CLRPPLOCK(nvp
++);
1737 newsvd
->vpage
= NULL
;
1739 /* Inform the vnode of the new mapping */
1740 if (newsvd
->vp
!= NULL
) {
1741 error
= fop_addmap(newsvd
->vp
, (offset_t
)newsvd
->offset
,
1742 newseg
->s_as
, newseg
->s_base
, newseg
->s_size
, newsvd
->prot
,
1743 newsvd
->maxprot
, newsvd
->type
, newsvd
->cred
, NULL
);
1746 if (error
== 0 && HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
1747 ASSERT(newsvd
->amp
== NULL
);
1748 ASSERT(newsvd
->tr_state
== SEGVN_TR_OFF
);
1749 newsvd
->rcookie
= svd
->rcookie
;
1750 hat_dup_region(newseg
->s_as
->a_hat
, newsvd
->rcookie
);
1757 * callback function to invoke free_vp_pages() for only those pages actually
1758 * processed by the HAT when a shared region is destroyed.
1760 extern int free_pages
;
1763 segvn_hat_rgn_unload_callback(caddr_t saddr
, caddr_t eaddr
, caddr_t r_saddr
,
1764 size_t r_size
, void *r_obj
, uoff_t r_objoff
)
1768 vnode_t
*vp
= (vnode_t
*)r_obj
;
1770 ASSERT(eaddr
> saddr
);
1771 ASSERT(saddr
>= r_saddr
);
1772 ASSERT(saddr
< r_saddr
+ r_size
);
1773 ASSERT(eaddr
> r_saddr
);
1774 ASSERT(eaddr
<= r_saddr
+ r_size
);
1781 len
= eaddr
- saddr
;
1782 off
= (saddr
- r_saddr
) + r_objoff
;
1783 free_vp_pages(vp
, off
, len
);
1787 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1788 * those pages actually processed by the HAT
1791 segvn_hat_unload_callback(hat_callback_t
*cb
)
1793 struct seg
*seg
= cb
->hcb_data
;
1794 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1798 ASSERT(svd
->vp
!= NULL
);
1799 ASSERT(cb
->hcb_end_addr
> cb
->hcb_start_addr
);
1800 ASSERT(cb
->hcb_start_addr
>= seg
->s_base
);
1802 len
= cb
->hcb_end_addr
- cb
->hcb_start_addr
;
1803 off
= cb
->hcb_start_addr
- seg
->s_base
;
1804 free_vp_pages(svd
->vp
, svd
->offset
+ off
, len
);
1808 * This function determines the number of bytes of swap reserved by
1809 * a segment for which per-page accounting is present. It is used to
1810 * calculate the correct value of a segvn_data's swresv.
1813 segvn_count_swap_by_vpages(struct seg
*seg
)
1815 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1816 struct vpage
*vp
, *evp
;
1817 size_t nswappages
= 0;
1819 ASSERT(svd
->pageswap
);
1820 ASSERT(svd
->vpage
!= NULL
);
1822 evp
= &svd
->vpage
[seg_page(seg
, seg
->s_base
+ seg
->s_size
)];
1824 for (vp
= svd
->vpage
; vp
< evp
; vp
++) {
1825 if (VPP_ISSWAPRES(vp
))
1829 return (nswappages
<< PAGESHIFT
);
1833 segvn_unmap(struct seg
*seg
, caddr_t addr
, size_t len
)
1835 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1836 struct segvn_data
*nsvd
;
1838 struct anon_map
*amp
;
1839 pgcnt_t opages
; /* old segment size in pages */
1840 pgcnt_t npages
; /* new segment size in pages */
1841 pgcnt_t dpages
; /* pages being deleted (unmapped) */
1842 hat_callback_t callback
; /* used for free_vp_pages() */
1843 hat_callback_t
*cbp
= NULL
;
1850 * We don't need any segment level locks for "segvn" data
1851 * since the address space is "write" locked.
1853 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1856 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1857 * softlockcnt is protected from change by the as write lock.
1860 if (svd
->softlockcnt
> 0) {
1861 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1864 * If this is shared segment non 0 softlockcnt
1865 * means locked pages are still in use.
1867 if (svd
->type
== MAP_SHARED
) {
1872 * since we do have the writers lock nobody can fill
1873 * the cache during the purge. The flush either succeeds
1874 * or we still have pending I/Os.
1885 * Check for bad sizes
1887 if (addr
< seg
->s_base
|| addr
+ len
> seg
->s_base
+ seg
->s_size
||
1888 (len
& PAGEOFFSET
) || ((uintptr_t)addr
& PAGEOFFSET
)) {
1889 panic("segvn_unmap");
1893 if (seg
->s_szc
!= 0) {
1894 size_t pgsz
= page_get_pagesize(seg
->s_szc
);
1896 if (!IS_P2ALIGNED(addr
, pgsz
) || !IS_P2ALIGNED(len
, pgsz
)) {
1897 ASSERT(seg
->s_base
!= addr
|| seg
->s_size
!= len
);
1898 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
1899 ASSERT(svd
->amp
== NULL
);
1900 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1901 hat_leave_region(seg
->s_as
->a_hat
,
1902 svd
->rcookie
, HAT_REGION_TEXT
);
1903 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
1905 * could pass a flag to segvn_demote_range()
1906 * below to tell it not to do any unloads but
1907 * this case is rare enough to not bother for
1910 } else if (svd
->tr_state
== SEGVN_TR_INIT
) {
1911 svd
->tr_state
= SEGVN_TR_OFF
;
1912 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
1913 ASSERT(svd
->amp
!= NULL
);
1914 segvn_textunrepl(seg
, 1);
1915 ASSERT(svd
->amp
== NULL
);
1916 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1918 VM_STAT_ADD(segvnvmstats
.demoterange
[0]);
1919 err
= segvn_demote_range(seg
, addr
, len
, SDR_END
, 0);
1927 /* Inform the vnode of the unmapping. */
1931 error
= fop_delmap(svd
->vp
,
1932 (offset_t
)svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
),
1933 seg
->s_as
, addr
, len
, svd
->prot
, svd
->maxprot
,
1934 svd
->type
, svd
->cred
, NULL
);
1936 if (error
== EAGAIN
)
1941 * Remove any page locks set through this mapping.
1942 * If text replication is not off no page locks could have been
1943 * established via this mapping.
1945 if (svd
->tr_state
== SEGVN_TR_OFF
) {
1946 (void) segvn_lockop(seg
, addr
, len
, 0, MC_UNLOCK
, NULL
, 0);
1949 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
1950 ASSERT(svd
->amp
== NULL
);
1951 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1952 ASSERT(svd
->type
== MAP_PRIVATE
);
1953 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
1955 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
1956 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
1957 ASSERT(svd
->amp
!= NULL
);
1958 ASSERT(svd
->pageprot
== 0 && !(svd
->prot
& PROT_WRITE
));
1959 segvn_textunrepl(seg
, 1);
1960 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
1962 if (svd
->tr_state
!= SEGVN_TR_OFF
) {
1963 ASSERT(svd
->tr_state
== SEGVN_TR_INIT
);
1964 svd
->tr_state
= SEGVN_TR_OFF
;
1967 * Unload any hardware translations in the range to be taken
1968 * out. Use a callback to invoke free_vp_pages() effectively.
1970 if (svd
->vp
!= NULL
&& free_pages
!= 0) {
1971 callback
.hcb_data
= seg
;
1972 callback
.hcb_function
= segvn_hat_unload_callback
;
1975 hat_unload_callback(seg
->s_as
->a_hat
, addr
, len
,
1976 HAT_UNLOAD_UNMAP
, cbp
);
1978 if (svd
->type
== MAP_SHARED
&& svd
->vp
!= NULL
&&
1979 (svd
->vp
->v_flag
& VVMEXEC
) &&
1980 ((svd
->prot
& PROT_WRITE
) || svd
->pageprot
)) {
1981 segvn_inval_trcache(svd
->vp
);
1986 * Check for entire segment
1988 if (addr
== seg
->s_base
&& len
== seg
->s_size
) {
1993 opages
= seg_pages(seg
);
1995 npages
= opages
- dpages
;
1997 ASSERT(amp
== NULL
|| amp
->a_szc
>= seg
->s_szc
);
2000 * Check for beginning of segment
2002 if (addr
== seg
->s_base
) {
2003 if (svd
->vpage
!= NULL
) {
2005 struct vpage
*ovpage
;
2007 ovpage
= svd
->vpage
; /* keep pointer to vpage */
2009 nbytes
= vpgtob(npages
);
2010 svd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2011 bcopy(&ovpage
[dpages
], svd
->vpage
, nbytes
);
2013 /* free up old vpage */
2014 kmem_free(ovpage
, vpgtob(opages
));
2017 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2018 if (amp
->refcnt
== 1 || svd
->type
== MAP_PRIVATE
) {
2020 * Shared anon map is no longer in use. Before
2021 * freeing its pages purge all entries from
2022 * pcache that belong to this amp.
2024 if (svd
->type
== MAP_SHARED
) {
2025 ASSERT(amp
->refcnt
== 1);
2026 ASSERT(svd
->softlockcnt
== 0);
2030 * Free up now unused parts of anon_map array.
2032 if (amp
->a_szc
== seg
->s_szc
) {
2033 if (seg
->s_szc
!= 0) {
2034 anon_free_pages(amp
->ahp
,
2035 svd
->anon_index
, len
,
2043 ASSERT(svd
->type
== MAP_SHARED
);
2044 ASSERT(amp
->a_szc
> seg
->s_szc
);
2045 anon_shmap_free_pages(amp
,
2046 svd
->anon_index
, len
);
2050 * Unreserve swap space for the
2051 * unmapped chunk of this segment in
2052 * case it's MAP_SHARED
2054 if (svd
->type
== MAP_SHARED
) {
2055 anon_unresv_zone(len
,
2056 seg
->s_as
->a_proc
->p_zone
);
2060 ANON_LOCK_EXIT(&
->a_rwlock
);
2061 svd
->anon_index
+= dpages
;
2063 if (svd
->vp
!= NULL
)
2070 if (svd
->flags
& MAP_NORESERVE
) {
2072 oswresv
= svd
->swresv
;
2074 svd
->swresv
= ptob(anon_pages(amp
->ahp
,
2075 svd
->anon_index
, npages
));
2076 anon_unresv_zone(oswresv
- svd
->swresv
,
2077 seg
->s_as
->a_proc
->p_zone
);
2078 if (SEG_IS_PARTIAL_RESV(seg
))
2079 seg
->s_as
->a_resvsize
-= oswresv
-
2084 if (svd
->pageswap
) {
2085 oswresv
= svd
->swresv
;
2087 segvn_count_swap_by_vpages(seg
);
2088 ASSERT(oswresv
>= svd
->swresv
);
2089 unlen
= oswresv
- svd
->swresv
;
2092 ASSERT(svd
->swresv
== seg
->s_size
);
2095 anon_unresv_zone(unlen
,
2096 seg
->s_as
->a_proc
->p_zone
);
2104 * Check for end of segment
2106 if (addr
+ len
== seg
->s_base
+ seg
->s_size
) {
2107 if (svd
->vpage
!= NULL
) {
2109 struct vpage
*ovpage
;
2111 ovpage
= svd
->vpage
; /* keep pointer to vpage */
2113 nbytes
= vpgtob(npages
);
2114 svd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2115 bcopy(ovpage
, svd
->vpage
, nbytes
);
2117 /* free up old vpage */
2118 kmem_free(ovpage
, vpgtob(opages
));
2122 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2123 if (amp
->refcnt
== 1 || svd
->type
== MAP_PRIVATE
) {
2125 * Free up now unused parts of anon_map array.
2127 ulong_t an_idx
= svd
->anon_index
+ npages
;
2130 * Shared anon map is no longer in use. Before
2131 * freeing its pages purge all entries from
2132 * pcache that belong to this amp.
2134 if (svd
->type
== MAP_SHARED
) {
2135 ASSERT(amp
->refcnt
== 1);
2136 ASSERT(svd
->softlockcnt
== 0);
2140 if (amp
->a_szc
== seg
->s_szc
) {
2141 if (seg
->s_szc
!= 0) {
2142 anon_free_pages(amp
->ahp
,
2146 anon_free(amp
->ahp
, an_idx
,
2150 ASSERT(svd
->type
== MAP_SHARED
);
2151 ASSERT(amp
->a_szc
> seg
->s_szc
);
2152 anon_shmap_free_pages(amp
,
2157 * Unreserve swap space for the
2158 * unmapped chunk of this segment in
2159 * case it's MAP_SHARED
2161 if (svd
->type
== MAP_SHARED
) {
2162 anon_unresv_zone(len
,
2163 seg
->s_as
->a_proc
->p_zone
);
2167 ANON_LOCK_EXIT(&
->a_rwlock
);
2173 if (svd
->flags
& MAP_NORESERVE
) {
2175 oswresv
= svd
->swresv
;
2176 svd
->swresv
= ptob(anon_pages(amp
->ahp
,
2177 svd
->anon_index
, npages
));
2178 anon_unresv_zone(oswresv
- svd
->swresv
,
2179 seg
->s_as
->a_proc
->p_zone
);
2180 if (SEG_IS_PARTIAL_RESV(seg
))
2181 seg
->s_as
->a_resvsize
-= oswresv
-
2186 if (svd
->pageswap
) {
2187 oswresv
= svd
->swresv
;
2189 segvn_count_swap_by_vpages(seg
);
2190 ASSERT(oswresv
>= svd
->swresv
);
2191 unlen
= oswresv
- svd
->swresv
;
2194 ASSERT(svd
->swresv
== seg
->s_size
);
2197 anon_unresv_zone(unlen
,
2198 seg
->s_as
->a_proc
->p_zone
);
2206 * The section to go is in the middle of the segment,
2207 * have to make it into two segments. nseg is made for
2208 * the high end while seg is cut down at the low end.
2210 nbase
= addr
+ len
; /* new seg base */
2211 nsize
= (seg
->s_base
+ seg
->s_size
) - nbase
; /* new seg size */
2212 seg
->s_size
= addr
- seg
->s_base
; /* shrink old seg */
2213 nseg
= seg_alloc(seg
->s_as
, nbase
, nsize
);
2215 panic("segvn_unmap seg_alloc");
2218 nseg
->s_ops
= seg
->s_ops
;
2219 nsvd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
2220 nseg
->s_data
= (void *)nsvd
;
2221 nseg
->s_szc
= seg
->s_szc
;
2224 nsvd
->offset
= svd
->offset
+ (uintptr_t)(nseg
->s_base
- seg
->s_base
);
2226 nsvd
->softlockcnt
= 0;
2227 nsvd
->softlockcnt_sbase
= 0;
2228 nsvd
->softlockcnt_send
= 0;
2229 nsvd
->svn_inz
= svd
->svn_inz
;
2230 ASSERT(nsvd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2232 if (svd
->vp
!= NULL
) {
2234 if (nsvd
->type
== MAP_SHARED
)
2235 lgrp_shm_policy_init(NULL
, nsvd
->vp
);
2239 if (svd
->vpage
== NULL
) {
2242 /* need to split vpage into two arrays */
2244 struct vpage
*ovpage
;
2246 ovpage
= svd
->vpage
; /* keep pointer to vpage */
2248 npages
= seg_pages(seg
); /* seg has shrunk */
2249 nbytes
= vpgtob(npages
);
2250 svd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2252 bcopy(ovpage
, svd
->vpage
, nbytes
);
2254 npages
= seg_pages(nseg
);
2255 nbytes
= vpgtob(npages
);
2256 nsvd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2258 bcopy(&ovpage
[opages
- npages
], nsvd
->vpage
, nbytes
);
2260 /* free up old vpage */
2261 kmem_free(ovpage
, vpgtob(opages
));
2266 nsvd
->anon_index
= 0;
2269 * Need to create a new anon map for the new segment.
2270 * We'll also allocate a new smaller array for the old
2271 * smaller segment to save space.
2273 opages
= btop((uintptr_t)(addr
- seg
->s_base
));
2274 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2275 if (amp
->refcnt
== 1 || svd
->type
== MAP_PRIVATE
) {
2277 * Free up now unused parts of anon_map array.
2279 ulong_t an_idx
= svd
->anon_index
+ opages
;
2282 * Shared anon map is no longer in use. Before
2283 * freeing its pages purge all entries from
2284 * pcache that belong to this amp.
2286 if (svd
->type
== MAP_SHARED
) {
2287 ASSERT(amp
->refcnt
== 1);
2288 ASSERT(svd
->softlockcnt
== 0);
2292 if (amp
->a_szc
== seg
->s_szc
) {
2293 if (seg
->s_szc
!= 0) {
2294 anon_free_pages(amp
->ahp
, an_idx
, len
,
2297 anon_free(amp
->ahp
, an_idx
,
2301 ASSERT(svd
->type
== MAP_SHARED
);
2302 ASSERT(amp
->a_szc
> seg
->s_szc
);
2303 anon_shmap_free_pages(amp
, an_idx
, len
);
2307 * Unreserve swap space for the
2308 * unmapped chunk of this segment in
2309 * case it's MAP_SHARED
2311 if (svd
->type
== MAP_SHARED
) {
2312 anon_unresv_zone(len
,
2313 seg
->s_as
->a_proc
->p_zone
);
2317 nsvd
->anon_index
= svd
->anon_index
+
2318 btop((uintptr_t)(nseg
->s_base
- seg
->s_base
));
2319 if (svd
->type
== MAP_SHARED
) {
2323 struct anon_map
*namp
;
2324 struct anon_hdr
*nahp
;
2326 ASSERT(svd
->type
== MAP_PRIVATE
);
2327 nahp
= anon_create(btop(seg
->s_size
), ANON_SLEEP
);
2328 namp
= anonmap_alloc(nseg
->s_size
, 0, ANON_SLEEP
);
2329 namp
->a_szc
= seg
->s_szc
;
2330 (void) anon_copy_ptr(amp
->ahp
, svd
->anon_index
, nahp
,
2331 0, btop(seg
->s_size
), ANON_SLEEP
);
2332 (void) anon_copy_ptr(amp
->ahp
, nsvd
->anon_index
,
2333 namp
->ahp
, 0, btop(nseg
->s_size
), ANON_SLEEP
);
2334 anon_release(amp
->ahp
, btop(amp
->size
));
2335 svd
->anon_index
= 0;
2336 nsvd
->anon_index
= 0;
2338 amp
->size
= seg
->s_size
;
2341 ANON_LOCK_EXIT(&
->a_rwlock
);
2344 if (svd
->flags
& MAP_NORESERVE
) {
2346 oswresv
= svd
->swresv
;
2347 svd
->swresv
= ptob(anon_pages(amp
->ahp
,
2348 svd
->anon_index
, btop(seg
->s_size
)));
2349 nsvd
->swresv
= ptob(anon_pages(nsvd
->amp
->ahp
,
2350 nsvd
->anon_index
, btop(nseg
->s_size
)));
2351 ASSERT(oswresv
>= (svd
->swresv
+ nsvd
->swresv
));
2352 anon_unresv_zone(oswresv
- (svd
->swresv
+ nsvd
->swresv
),
2353 seg
->s_as
->a_proc
->p_zone
);
2354 if (SEG_IS_PARTIAL_RESV(seg
))
2355 seg
->s_as
->a_resvsize
-= oswresv
-
2356 (svd
->swresv
+ nsvd
->swresv
);
2360 if (svd
->pageswap
) {
2361 oswresv
= svd
->swresv
;
2362 svd
->swresv
= segvn_count_swap_by_vpages(seg
);
2363 nsvd
->swresv
= segvn_count_swap_by_vpages(nseg
);
2364 ASSERT(oswresv
>= (svd
->swresv
+ nsvd
->swresv
));
2365 unlen
= oswresv
- (svd
->swresv
+ nsvd
->swresv
);
2367 if (seg
->s_size
+ nseg
->s_size
+ len
!=
2369 panic("segvn_unmap: cannot split "
2370 "swap reservation");
2373 svd
->swresv
= seg
->s_size
;
2374 nsvd
->swresv
= nseg
->s_size
;
2377 anon_unresv_zone(unlen
,
2378 seg
->s_as
->a_proc
->p_zone
);
2382 return (0); /* I'm glad that's all over with! */
2386 segvn_free(struct seg
*seg
)
2388 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
2389 pgcnt_t npages
= seg_pages(seg
);
2390 struct anon_map
*amp
;
2394 * We don't need any segment level locks for "segvn" data
2395 * since the address space is "write" locked.
2397 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
2398 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
2400 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2403 * Be sure to unlock pages. XXX Why do things get free'ed instead
2406 (void) segvn_lockop(seg
, seg
->s_base
, seg
->s_size
,
2407 0, MC_UNLOCK
, NULL
, 0);
2410 * Deallocate the vpage and anon pointers if necessary and possible.
2412 if (svd
->vpage
!= NULL
) {
2413 kmem_free(svd
->vpage
, vpgtob(npages
));
2416 if ((amp
= svd
->amp
) != NULL
) {
2418 * If there are no more references to this anon_map
2419 * structure, then deallocate the structure after freeing
2420 * up all the anon slot pointers that we can.
2422 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2423 ASSERT(amp
->a_szc
>= seg
->s_szc
);
2424 if (--amp
->refcnt
== 0) {
2425 if (svd
->type
== MAP_PRIVATE
) {
2427 * Private - we only need to anon_free
2428 * the part that this segment refers to.
2430 if (seg
->s_szc
!= 0) {
2431 anon_free_pages(amp
->ahp
,
2432 svd
->anon_index
, seg
->s_size
,
2435 anon_free(amp
->ahp
, svd
->anon_index
,
2441 * Shared anon map is no longer in use. Before
2442 * freeing its pages purge all entries from
2443 * pcache that belong to this amp.
2445 ASSERT(svd
->softlockcnt
== 0);
2449 * Shared - anon_free the entire
2450 * anon_map's worth of stuff and
2451 * release any swap reservation.
2453 if (amp
->a_szc
!= 0) {
2454 anon_shmap_free_pages(amp
, 0,
2457 anon_free(amp
->ahp
, 0, amp
->size
);
2459 if ((len
= amp
->swresv
) != 0) {
2460 anon_unresv_zone(len
,
2461 seg
->s_as
->a_proc
->p_zone
);
2465 ANON_LOCK_EXIT(&
->a_rwlock
);
2467 } else if (svd
->type
== MAP_PRIVATE
) {
2469 * We had a private mapping which still has
2470 * a held anon_map so just free up all the
2471 * anon slot pointers that we were using.
2473 if (seg
->s_szc
!= 0) {
2474 anon_free_pages(amp
->ahp
, svd
->anon_index
,
2475 seg
->s_size
, seg
->s_szc
);
2477 anon_free(amp
->ahp
, svd
->anon_index
,
2480 ANON_LOCK_EXIT(&
->a_rwlock
);
2482 ANON_LOCK_EXIT(&
->a_rwlock
);
2487 * Release swap reservation.
2489 if ((len
= svd
->swresv
) != 0) {
2490 anon_unresv_zone(svd
->swresv
,
2491 seg
->s_as
->a_proc
->p_zone
);
2492 if (SEG_IS_PARTIAL_RESV(seg
))
2493 seg
->s_as
->a_resvsize
-= svd
->swresv
;
2497 * Release claim on vnode, credentials, and finally free the
2500 if (svd
->vp
!= NULL
) {
2501 if (svd
->type
== MAP_SHARED
)
2502 lgrp_shm_policy_fini(NULL
, svd
->vp
);
2508 svd
->pageadvice
= 0;
2513 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2514 * still working with this segment without holding as lock (in case
2515 * it's called by pcache async thread).
2517 ASSERT(svd
->softlockcnt
== 0);
2518 mutex_enter(&svd
->segfree_syncmtx
);
2519 mutex_exit(&svd
->segfree_syncmtx
);
2522 kmem_cache_free(segvn_cache
, svd
);
2526 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2527 * already been F_SOFTLOCK'ed.
2528 * Caller must always match addr and len of a softunlock with a previous
2529 * softlock with exactly the same addr and len.
2532 segvn_softunlock(struct seg
*seg
, caddr_t addr
, size_t len
, enum seg_rw rw
)
2534 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
2540 struct anon_map
*amp
;
2541 struct anon
*ap
= NULL
;
2543 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2544 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
2546 if ((amp
= svd
->amp
) != NULL
)
2547 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
2549 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
2550 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
2551 hat_unlock_region(seg
->s_as
->a_hat
, addr
, len
, svd
->rcookie
);
2553 hat_unlock(seg
->s_as
->a_hat
, addr
, len
);
2555 for (adr
= addr
; adr
< addr
+ len
; adr
+= PAGESIZE
) {
2557 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2558 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
++))
2560 swap_xlate(ap
, &vp
, &offset
);
2563 offset
= svd
->offset
+
2564 (uintptr_t)(adr
- seg
->s_base
);
2566 ANON_LOCK_EXIT(&
->a_rwlock
);
2569 offset
= svd
->offset
+
2570 (uintptr_t)(adr
- seg
->s_base
);
2574 * Use page_find() instead of page_lookup() to
2575 * find the page since we know that it is locked.
2577 pp
= page_find(vp
, offset
);
2580 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2581 (void *)adr
, (void *)ap
, (void *)vp
, offset
);
2585 if (rw
== S_WRITE
) {
2587 if (seg
->s_as
->a_vbits
)
2588 hat_setstat(seg
->s_as
, adr
, PAGESIZE
,
2590 } else if (rw
!= S_OTHER
) {
2592 if (seg
->s_as
->a_vbits
)
2593 hat_setstat(seg
->s_as
, adr
, PAGESIZE
, P_REF
);
2597 ASSERT(svd
->softlockcnt
>= btop(len
));
2598 if (!atomic_add_long_nv((ulong_t
*)&svd
->softlockcnt
, -btop(len
))) {
2600 * All SOFTLOCKS are gone. Wakeup any waiting
2601 * unmappers so they can try again to unmap.
2602 * Check for waiters first without the mutex
2603 * held so we don't always grab the mutex on
2606 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2607 mutex_enter(&seg
->s_as
->a_contents
);
2608 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2609 AS_CLRUNMAPWAIT(seg
->s_as
);
2610 cv_broadcast(&seg
->s_as
->a_cv
);
2612 mutex_exit(&seg
->s_as
->a_contents
);
2617 #define PAGE_HANDLED ((page_t *)-1)
2620 * Release all the pages in the NULL terminated ppp list
2621 * which haven't already been converted to PAGE_HANDLED.
2624 segvn_pagelist_rele(page_t
**ppp
)
2626 for (; *ppp
!= NULL
; ppp
++) {
2627 if (*ppp
!= PAGE_HANDLED
)
2632 static int stealcow
= 1;
2635 * Workaround for viking chip bug. See bug id 1220902.
2636 * To fix this down in pagefault() would require importing so
2637 * much as and segvn code as to be unmaintainable.
2639 int enable_mbit_wa
= 0;
2642 * Handles all the dirty work of getting the right
2643 * anonymous pages and loading up the translations.
2644 * This routine is called only from segvn_fault()
2645 * when looping over the range of addresses requested.
2647 * The basic algorithm here is:
2648 * If this is an anon_zero case
2649 * Call anon_zero to allocate page
2650 * Load up translation
2653 * If this is an anon page
2654 * Use anon_getpage to get the page
2656 * Find page in pl[] list passed in
2659 * Load up the translation to the page
2662 * Call anon_private to handle cow
2663 * Load up (writable) translation to new page
2667 struct hat
*hat
, /* the hat to use for mapping */
2668 struct seg
*seg
, /* seg_vn of interest */
2669 caddr_t addr
, /* address in as */
2670 uoff_t off
, /* offset in vp */
2671 struct vpage
*vpage
, /* pointer to vpage for vp, off */
2672 page_t
*pl
[], /* object source page pointer */
2673 uint_t vpprot
, /* access allowed to object pages */
2674 enum fault_type type
, /* type of fault */
2675 enum seg_rw rw
, /* type of access at fault */
2676 int brkcow
) /* we may need to break cow */
2678 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
2680 uint_t pageflags
= 0;
2681 page_t
*anon_pl
[1 + 1];
2682 page_t
*opp
= NULL
; /* original page */
2689 struct anon
*ap
, *oldap
;
2690 struct anon_map
*amp
;
2691 int hat_flag
= (type
== F_SOFTLOCK
) ? HAT_LOAD_LOCK
: HAT_LOAD
;
2693 anon_sync_obj_t cookie
;
2695 if (svd
->flags
& MAP_TEXT
) {
2696 hat_flag
|= HAT_LOAD_TEXT
;
2699 ASSERT(SEGVN_READ_HELD(seg
->s_as
, &svd
->lock
));
2700 ASSERT(seg
->s_szc
== 0);
2701 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
2704 * Initialize protection value for this page.
2705 * If we have per page protection values check it now.
2707 if (svd
->pageprot
) {
2712 protchk
= PROT_READ
;
2715 protchk
= PROT_WRITE
;
2718 protchk
= PROT_EXEC
;
2722 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
2726 prot
= VPP_PROT(vpage
);
2727 if ((prot
& protchk
) == 0)
2728 return (FC_PROT
); /* illegal access type */
2733 if (type
== F_SOFTLOCK
) {
2734 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt
);
2738 * Always acquire the anon array lock to prevent 2 threads from
2739 * allocating separate anon slots for the same "addr".
2742 if ((amp
= svd
->amp
) != NULL
) {
2743 ASSERT(RW_READ_HELD(&
->a_rwlock
));
2744 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
2745 anon_array_enter(amp
, anon_index
, &cookie
);
2749 if (svd
->vp
== NULL
&& amp
!= NULL
) {
2750 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
)) == NULL
) {
2752 * Allocate a (normally) writable anonymous page of
2753 * zeroes. If no advance reservations, reserve now.
2755 if (svd
->flags
& MAP_NORESERVE
) {
2756 if (anon_resv_zone(ptob(1),
2757 seg
->s_as
->a_proc
->p_zone
)) {
2758 atomic_add_long(&svd
->swresv
, ptob(1));
2759 atomic_add_long(&seg
->s_as
->a_resvsize
,
2766 if ((pp
= anon_zero(seg
, addr
, &ap
,
2767 svd
->cred
)) == NULL
) {
2769 goto out
; /* out of swap space */
2772 * Re-acquire the anon_map lock and
2773 * initialize the anon array entry.
2775 (void) anon_set_ptr(amp
->ahp
, anon_index
, ap
,
2778 ASSERT(pp
->p_szc
== 0);
2781 * Handle pages that have been marked for migration
2783 if (lgrp_optimizations())
2784 page_migrate(seg
, addr
, &pp
, 1);
2786 if (enable_mbit_wa
) {
2789 else if (!hat_ismod(pp
))
2790 prot
&= ~PROT_WRITE
;
2793 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2794 * with MC_LOCKAS, MCL_FUTURE) and this is a
2795 * MAP_NORESERVE segment, we may need to
2796 * permanently lock the page as it is being faulted
2797 * for the first time. The following text applies
2798 * only to MAP_NORESERVE segments:
2800 * As per memcntl(2), if this segment was created
2801 * after MCL_FUTURE was applied (a "future"
2802 * segment), its pages must be locked. If this
2803 * segment existed at MCL_FUTURE application (a
2804 * "past" segment), the interface is unclear.
2806 * We decide to lock only if vpage is present:
2808 * - "future" segments will have a vpage array (see
2809 * as_map), and so will be locked as required
2811 * - "past" segments may not have a vpage array,
2812 * depending on whether events (such as
2813 * mprotect) have occurred. Locking if vpage
2814 * exists will preserve legacy behavior. Not
2815 * locking if vpage is absent, will not break
2816 * the interface or legacy behavior. Note that
2817 * allocating vpage here if it's absent requires
2818 * upgrading the segvn reader lock, the cost of
2819 * which does not seem worthwhile.
2821 * Usually testing and setting VPP_ISPPLOCK and
2822 * VPP_SETPPLOCK requires holding the segvn lock as
2823 * writer, but in this case all readers are
2824 * serializing on the anon array lock.
2826 if (AS_ISPGLCK(seg
->s_as
) && vpage
!= NULL
&&
2827 (svd
->flags
& MAP_NORESERVE
) &&
2828 !VPP_ISPPLOCK(vpage
)) {
2829 proc_t
*p
= seg
->s_as
->a_proc
;
2830 ASSERT(svd
->type
== MAP_PRIVATE
);
2831 mutex_enter(&p
->p_lock
);
2832 if (rctl_incr_locked_mem(p
, NULL
, PAGESIZE
,
2834 claim
= VPP_PROT(vpage
) & PROT_WRITE
;
2835 if (page_pp_lock(pp
, claim
, 0)) {
2836 VPP_SETPPLOCK(vpage
);
2838 rctl_decr_locked_mem(p
, NULL
,
2842 mutex_exit(&p
->p_lock
);
2845 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2846 hat_memload(hat
, addr
, pp
, prot
, hat_flag
);
2848 if (!(hat_flag
& HAT_LOAD_LOCK
))
2851 anon_array_exit(&cookie
);
2857 * Obtain the page structure via anon_getpage() if it is
2858 * a private copy of an object (the result of a previous
2862 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
)) != NULL
) {
2863 err
= anon_getpage(&ap
, &vpprot
, anon_pl
, PAGESIZE
,
2864 seg
, addr
, rw
, svd
->cred
);
2868 if (svd
->type
== MAP_SHARED
) {
2870 * If this is a shared mapping to an
2871 * anon_map, then ignore the write
2872 * permissions returned by anon_getpage().
2873 * They apply to the private mappings
2876 vpprot
|= PROT_WRITE
;
2883 * Search the pl[] list passed in if it is from the
2884 * original object (i.e., not a private copy).
2888 * Find original page. We must be bringing it in
2889 * from the list in pl[].
2891 for (ppp
= pl
; (opp
= *ppp
) != NULL
; ppp
++) {
2892 if (opp
== PAGE_HANDLED
)
2894 ASSERT(opp
->p_vnode
== svd
->vp
); /* XXX */
2895 if (opp
->p_offset
== off
)
2899 panic("segvn_faultpage not found");
2902 *ppp
= PAGE_HANDLED
;
2906 ASSERT(PAGE_LOCKED(opp
));
2909 * The fault is treated as a copy-on-write fault if a
2910 * write occurs on a private segment and the object
2911 * page (i.e., mapping) is write protected. We assume
2912 * that fatal protection checks have already been made.
2916 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
2917 cow
= !(vpprot
& PROT_WRITE
);
2918 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
2920 * If we are doing text replication COW on first touch.
2922 ASSERT(amp
!= NULL
);
2923 ASSERT(svd
->vp
!= NULL
);
2924 ASSERT(rw
!= S_WRITE
);
2931 * If not a copy-on-write case load the translation
2937 * Handle pages that have been marked for migration
2939 if (lgrp_optimizations())
2940 page_migrate(seg
, addr
, &opp
, 1);
2942 if (IS_VMODSORT(opp
->p_vnode
) || enable_mbit_wa
) {
2945 else if (rw
!= S_OTHER
&& !hat_ismod(opp
))
2946 prot
&= ~PROT_WRITE
;
2949 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
||
2950 (!svd
->pageprot
&& svd
->prot
== (prot
& vpprot
)));
2951 ASSERT(amp
== NULL
||
2952 svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2953 hat_memload_region(hat
, addr
, opp
, prot
& vpprot
, hat_flag
,
2956 if (!(hat_flag
& HAT_LOAD_LOCK
))
2960 anon_array_exit(&cookie
);
2965 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2969 ASSERT(amp
!= NULL
&& anon_lock
);
2972 * Steal the page only if it isn't a private page
2973 * since stealing a private page is not worth the effort.
2975 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
)) == NULL
)
2979 * Steal the original page if the following conditions are true:
2981 * We are low on memory, the page is not private, page is not large,
2982 * not shared, not modified, not `locked' or if we have it `locked'
2983 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
2984 * that the page is not shared) and if it doesn't have any
2985 * translations. page_struct_lock isn't needed to look at p_cowcnt
2986 * and p_lckcnt because we first get exclusive lock on page.
2988 (void) hat_pagesync(opp
, HAT_SYNC_DONTZERO
| HAT_SYNC_STOPON_MOD
);
2990 if (stealcow
&& freemem
< minfree
&& steal
&& opp
->p_szc
== 0 &&
2991 page_tryupgrade(opp
) && !hat_ismod(opp
) &&
2992 ((opp
->p_lckcnt
== 0 && opp
->p_cowcnt
== 0) ||
2993 (opp
->p_lckcnt
== 0 && opp
->p_cowcnt
== 1 &&
2994 vpage
!= NULL
&& VPP_ISPPLOCK(vpage
)))) {
2996 * Check if this page has other translations
2997 * after unloading our translation.
2999 if (hat_page_is_mapped(opp
)) {
3000 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
3001 hat_unload(seg
->s_as
->a_hat
, addr
, PAGESIZE
,
3006 * hat_unload() might sync back someone else's recent
3007 * modification, so check again.
3009 if (!hat_ismod(opp
) && !hat_page_is_mapped(opp
))
3010 pageflags
|= STEAL_PAGE
;
3014 * If we have a vpage pointer, see if it indicates that we have
3015 * ``locked'' the page we map -- if so, tell anon_private to
3016 * transfer the locking resource to the new page.
3018 * See Statement at the beginning of segvn_lockop regarding
3019 * the way lockcnts/cowcnts are handled during COW.
3022 if (vpage
!= NULL
&& VPP_ISPPLOCK(vpage
))
3023 pageflags
|= LOCK_PAGE
;
3026 * Allocate a private page and perform the copy.
3027 * For MAP_NORESERVE reserve swap space now, unless this
3028 * is a cow fault on an existing anon page in which case
3029 * MAP_NORESERVE will have made advance reservations.
3031 if ((svd
->flags
& MAP_NORESERVE
) && (ap
== NULL
)) {
3032 if (anon_resv_zone(ptob(1), seg
->s_as
->a_proc
->p_zone
)) {
3033 atomic_add_long(&svd
->swresv
, ptob(1));
3034 atomic_add_long(&seg
->s_as
->a_resvsize
, ptob(1));
3042 pp
= anon_private(&ap
, seg
, addr
, prot
, opp
, pageflags
, svd
->cred
);
3044 err
= ENOMEM
; /* out of swap space */
3049 * If we copied away from an anonymous page, then
3050 * we are one step closer to freeing up an anon slot.
3052 * NOTE: The original anon slot must be released while
3053 * holding the "anon_map" lock. This is necessary to prevent
3054 * other threads from obtaining a pointer to the anon slot
3055 * which may be freed if its "refcnt" is 1.
3060 (void) anon_set_ptr(amp
->ahp
, anon_index
, ap
, ANON_SLEEP
);
3063 * Handle pages that have been marked for migration
3065 if (lgrp_optimizations())
3066 page_migrate(seg
, addr
, &pp
, 1);
3068 ASSERT(pp
->p_szc
== 0);
3070 ASSERT(!IS_VMODSORT(pp
->p_vnode
));
3071 if (enable_mbit_wa
) {
3074 else if (!hat_ismod(pp
))
3075 prot
&= ~PROT_WRITE
;
3078 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
3079 hat_memload(hat
, addr
, pp
, prot
, hat_flag
);
3081 if (!(hat_flag
& HAT_LOAD_LOCK
))
3085 anon_array_exit(&cookie
);
3089 anon_array_exit(&cookie
);
3091 if (type
== F_SOFTLOCK
) {
3092 atomic_dec_ulong((ulong_t
*)&svd
->softlockcnt
);
3094 return (FC_MAKE_ERR(err
));
3098 * relocate a bunch of smaller targ pages into one large repl page. all targ
3099 * pages must be complete pages smaller than replacement pages.
3100 * it's assumed that no page's szc can change since they are all PAGESIZE or
3101 * complete large pages locked SHARED.
3104 segvn_relocate_pages(page_t
**targ
, page_t
*replacement
)
3107 pgcnt_t repl_npgs
, curnpgs
;
3109 uint_t repl_szc
= replacement
->p_szc
;
3110 page_t
*first_repl
= replacement
;
3114 VM_STAT_ADD(segvnvmstats
.relocatepages
[0]);
3116 ASSERT(repl_szc
!= 0);
3117 npgs
= repl_npgs
= page_get_pagecnt(repl_szc
);
3123 ASSERT(replacement
!= NULL
);
3125 ASSERT(pp
->p_szc
< repl_szc
);
3126 ASSERT(PAGE_EXCL(pp
));
3127 ASSERT(!PP_ISFREE(pp
));
3128 curnpgs
= page_get_pagecnt(pp
->p_szc
);
3130 VM_STAT_ADD(segvnvmstats
.relocatepages
[1]);
3132 page_sub(&replacement
, repl
);
3133 ASSERT(PAGE_EXCL(repl
));
3134 ASSERT(!PP_ISFREE(repl
));
3135 ASSERT(repl
->p_szc
== repl_szc
);
3137 page_t
*repl_savepp
;
3139 VM_STAT_ADD(segvnvmstats
.relocatepages
[2]);
3140 repl_savepp
= replacement
;
3141 for (j
= 0; j
< curnpgs
; j
++) {
3143 page_sub(&replacement
, repl
);
3144 ASSERT(PAGE_EXCL(repl
));
3145 ASSERT(!PP_ISFREE(repl
));
3146 ASSERT(repl
->p_szc
== repl_szc
);
3147 ASSERT(page_pptonum(targ
[i
+ j
]) ==
3148 page_pptonum(targ
[i
]) + j
);
3151 ASSERT(IS_P2ALIGNED(page_pptonum(repl
), curnpgs
));
3153 err
= page_relocate(&pp
, &repl
, 0, 1, &nreloc
, NULL
);
3154 if (err
|| nreloc
!= curnpgs
) {
3155 panic("segvn_relocate_pages: "
3156 "page_relocate failed err=%d curnpgs=%ld "
3157 "nreloc=%ld", err
, curnpgs
, nreloc
);
3159 ASSERT(curnpgs
<= repl_npgs
);
3160 repl_npgs
-= curnpgs
;
3163 ASSERT(replacement
== NULL
);
3167 for (i
= 0; i
< repl_npgs
; i
++) {
3168 ASSERT(PAGE_EXCL(repl
));
3169 ASSERT(!PP_ISFREE(repl
));
3171 page_downgrade(targ
[i
]);
3177 * Check if all pages in ppa array are complete smaller than szc pages and
3178 * their roots will still be aligned relative to their current size if the
3179 * entire ppa array is relocated into one szc page. If these conditions are
3182 * If all pages are properly aligned attempt to upgrade their locks
3183 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3184 * upgrdfail was set to 0 by caller.
3186 * Return 1 if all pages are aligned and locked exclusively.
3188 * If all pages in ppa array happen to be physically contiguous to make one
3189 * szc page and all exclusive locks are successfully obtained promote the page
3190 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3193 segvn_full_szcpages(page_t
**ppa
, uint_t szc
, int *upgrdfail
, uint_t
*pszc
)
3197 pgcnt_t totnpgs
= page_get_pagecnt(szc
);
3208 VM_STAT_ADD(segvnvmstats
.fullszcpages
[0]);
3210 for (i
= 0; i
< totnpgs
; i
++) {
3212 ASSERT(PAGE_SHARED(pp
));
3213 ASSERT(!PP_ISFREE(pp
));
3214 pfn
= page_pptonum(pp
);
3216 if (!IS_P2ALIGNED(pfn
, totnpgs
)) {
3221 } else if (contig
&& pfn
!= first_pfn
+ i
) {
3224 if (pp
->p_szc
== 0) {
3226 VM_STAT_ADD(segvnvmstats
.fullszcpages
[1]);
3230 if ((curszc
= pp
->p_szc
) >= szc
) {
3231 VM_STAT_ADD(segvnvmstats
.fullszcpages
[2]);
3236 * p_szc changed means we don't have all pages
3237 * locked. return failure.
3239 VM_STAT_ADD(segvnvmstats
.fullszcpages
[3]);
3242 curnpgs
= page_get_pagecnt(curszc
);
3243 if (!IS_P2ALIGNED(pfn
, curnpgs
) ||
3244 !IS_P2ALIGNED(i
, curnpgs
)) {
3245 VM_STAT_ADD(segvnvmstats
.fullszcpages
[4]);
3251 VM_STAT_ADD(segvnvmstats
.fullszcpages
[5]);
3252 if (pp
->p_szc
!= curszc
) {
3253 VM_STAT_ADD(segvnvmstats
.fullszcpages
[6]);
3256 if (pfn
- 1 != page_pptonum(ppa
[i
- 1])) {
3257 panic("segvn_full_szcpages: "
3258 "large page not physically contiguous");
3260 if (P2PHASE(pfn
, curnpgs
) == curnpgs
- 1) {
3266 for (i
= 0; i
< totnpgs
; i
++) {
3267 ASSERT(ppa
[i
]->p_szc
< szc
);
3268 if (!page_tryupgrade(ppa
[i
])) {
3269 for (j
= 0; j
< i
; j
++) {
3270 page_downgrade(ppa
[j
]);
3272 *pszc
= ppa
[i
]->p_szc
;
3274 VM_STAT_ADD(segvnvmstats
.fullszcpages
[7]);
3280 * When a page is put a free cachelist its szc is set to 0. if file
3281 * system reclaimed pages from cachelist targ pages will be physically
3282 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3283 * pages without any relocations.
3284 * To avoid any hat issues with previous small mappings
3285 * hat_pageunload() the target pages first.
3288 VM_STAT_ADD(segvnvmstats
.fullszcpages
[8]);
3289 for (i
= 0; i
< totnpgs
; i
++) {
3290 (void) hat_pageunload(ppa
[i
], HAT_FORCE_PGUNLOAD
);
3292 for (i
= 0; i
< totnpgs
; i
++) {
3293 ppa
[i
]->p_szc
= szc
;
3295 for (i
= 0; i
< totnpgs
; i
++) {
3296 ASSERT(PAGE_EXCL(ppa
[i
]));
3297 page_downgrade(ppa
[i
]);
3303 VM_STAT_ADD(segvnvmstats
.fullszcpages
[9]);
3308 * Create physically contiguous pages for [vp, off] - [vp, off +
3309 * page_size(szc)) range and for private segment return them in ppa array.
3310 * Pages are created either via IO or relocations.
3312 * Return 1 on success and 0 on failure.
3314 * If physically contiguous pages already exist for this range return 1 without
3315 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3316 * array wasn't filled. In this case caller fills ppa array via fop_getpage().
3320 segvn_fill_vp_pages(struct segvn_data
*svd
, vnode_t
*vp
, uoff_t off
,
3321 uint_t szc
, page_t
**ppa
, page_t
**ppplist
, uint_t
*ret_pszc
,
3325 page_t
*pplist
= *ppplist
;
3326 size_t pgsz
= page_get_pagesize(szc
);
3327 pgcnt_t pages
= btop(pgsz
);
3328 ulong_t start_off
= off
;
3329 uoff_t eoff
= off
+ pgsz
;
3331 uoff_t io_off
= off
;
3333 page_t
*io_pplist
= NULL
;
3334 page_t
*done_pplist
= NULL
;
3343 page_t
*targ_pplist
= NULL
;
3344 page_t
*repl_pplist
= NULL
;
3350 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[0]);
3353 ASSERT(pplist
->p_szc
== szc
);
3356 * downsize will be set to 1 only if we fail to lock pages. this will
3357 * allow subsequent faults to try to relocate the page again. If we
3358 * fail due to misalignment don't downsize and let the caller map the
3359 * whole region with small mappings to avoid more faults into the area
3360 * where we can't get large pages anyway.
3364 while (off
< eoff
) {
3366 ASSERT(newpp
!= NULL
);
3367 ASSERT(PAGE_EXCL(newpp
));
3368 ASSERT(!PP_ISFREE(newpp
));
3370 * we pass NULL for nrelocp to page_lookup_create()
3371 * so that it doesn't relocate. We relocate here
3372 * later only after we make sure we can lock all
3373 * pages in the range we handle and they are all
3376 pp
= page_lookup_create(vp
, off
, SE_SHARED
, newpp
, NULL
, 0);
3378 ASSERT(!PP_ISFREE(pp
));
3379 ASSERT(pp
->p_vnode
== vp
);
3380 ASSERT(pp
->p_offset
== off
);
3382 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[1]);
3383 page_sub(&pplist
, pp
);
3384 ASSERT(PAGE_EXCL(pp
));
3385 ASSERT(page_iolock_assert(pp
));
3386 page_list_concat(&io_pplist
, &pp
);
3390 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[2]);
3391 pfn
= page_pptonum(pp
);
3393 if (pszc
>= szc
&& targ_pplist
== NULL
&& io_pplist
== NULL
&&
3394 IS_P2ALIGNED(pfn
, pages
)) {
3395 ASSERT(repl_pplist
== NULL
);
3396 ASSERT(done_pplist
== NULL
);
3397 ASSERT(pplist
== *ppplist
);
3399 page_free_replacement_page(pplist
);
3400 page_create_putback(pages
);
3402 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[3]);
3407 segvn_faultvnmpss_align_err1
++;
3410 ppages
= page_get_pagecnt(pszc
);
3411 if (!IS_P2ALIGNED(pfn
, ppages
)) {
3414 * sizing down to pszc won't help.
3417 segvn_faultvnmpss_align_err2
++;
3420 pfn
= page_pptonum(newpp
);
3421 if (!IS_P2ALIGNED(pfn
, ppages
)) {
3424 * sizing down to pszc won't help.
3427 segvn_faultvnmpss_align_err3
++;
3430 if (!PAGE_EXCL(pp
)) {
3431 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[4]);
3434 *ret_pszc
= pp
->p_szc
;
3438 if (io_pplist
!= NULL
) {
3439 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[5]);
3440 io_len
= off
- io_off
;
3442 * Some file systems like NFS don't check EOF
3443 * conditions in fop_pageio(). Check it here
3444 * now that pages are locked SE_EXCL. Any file
3445 * truncation will wait until the pages are
3446 * unlocked so no need to worry that file will
3447 * be truncated after we check its size here.
3448 * XXX fix NFS to remove this check.
3450 va
.va_mask
= AT_SIZE
;
3451 if (fop_getattr(vp
, &va
, ATTR_HINT
, svd
->cred
, NULL
)) {
3452 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[6]);
3453 page_unlock(targpp
);
3456 if (btopr(va
.va_size
) < btopr(io_off
+ io_len
)) {
3457 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[7]);
3460 page_unlock(targpp
);
3463 io_err
= fop_pageio(vp
, io_pplist
, io_off
, io_len
,
3464 B_READ
, svd
->cred
, NULL
);
3466 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[8]);
3467 page_unlock(targpp
);
3468 if (io_err
== EDEADLK
) {
3469 segvn_vmpss_pageio_deadlk_err
++;
3474 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[9]);
3475 while (io_pplist
!= NULL
) {
3477 page_sub(&io_pplist
, pp
);
3478 ASSERT(page_iolock_assert(pp
));
3480 pgidx
= (pp
->p_offset
- start_off
) >>
3482 ASSERT(pgidx
< pages
);
3484 page_list_concat(&done_pplist
, &pp
);
3488 ASSERT(PAGE_EXCL(pp
));
3489 ASSERT(pp
->p_szc
<= pszc
);
3490 if (pszc
!= 0 && !group_page_trylock(pp
, SE_EXCL
)) {
3491 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[10]);
3494 *ret_pszc
= pp
->p_szc
;
3497 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[11]);
3499 * page szc chould have changed before the entire group was
3500 * locked. reread page szc.
3503 ppages
= page_get_pagecnt(pszc
);
3505 /* link just the roots */
3506 page_list_concat(&targ_pplist
, &pp
);
3507 page_sub(&pplist
, newpp
);
3508 page_list_concat(&repl_pplist
, &newpp
);
3510 while (--ppages
!= 0) {
3512 page_sub(&pplist
, newpp
);
3517 if (io_pplist
!= NULL
) {
3518 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[12]);
3519 io_len
= eoff
- io_off
;
3520 va
.va_mask
= AT_SIZE
;
3521 if (fop_getattr(vp
, &va
, ATTR_HINT
, svd
->cred
, NULL
) != 0) {
3522 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[13]);
3525 if (btopr(va
.va_size
) < btopr(io_off
+ io_len
)) {
3526 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[14]);
3531 io_err
= fop_pageio(vp
, io_pplist
, io_off
, io_len
,
3532 B_READ
, svd
->cred
, NULL
);
3534 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[15]);
3535 if (io_err
== EDEADLK
) {
3536 segvn_vmpss_pageio_deadlk_err
++;
3541 while (io_pplist
!= NULL
) {
3543 page_sub(&io_pplist
, pp
);
3544 ASSERT(page_iolock_assert(pp
));
3546 pgidx
= (pp
->p_offset
- start_off
) >> PAGESHIFT
;
3547 ASSERT(pgidx
< pages
);
3552 * we're now bound to succeed or panic.
3553 * remove pages from done_pplist. it's not needed anymore.
3555 while (done_pplist
!= NULL
) {
3557 page_sub(&done_pplist
, pp
);
3559 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[16]);
3560 ASSERT(pplist
== NULL
);
3562 while (targ_pplist
!= NULL
) {
3564 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[17]);
3565 ASSERT(repl_pplist
);
3567 page_sub(&targ_pplist
, pp
);
3568 pgidx
= (pp
->p_offset
- start_off
) >> PAGESHIFT
;
3569 newpp
= repl_pplist
;
3570 page_sub(&repl_pplist
, newpp
);
3572 pfn
= page_pptonum(pp
);
3574 ppages
= page_get_pagecnt(pszc
);
3575 ASSERT(IS_P2ALIGNED(pfn
, ppages
));
3576 pfn
= page_pptonum(newpp
);
3577 ASSERT(IS_P2ALIGNED(pfn
, ppages
));
3578 ASSERT(P2PHASE(pfn
, pages
) == pgidx
);
3581 ret
= page_relocate(&pp
, &newpp
, 0, 1, &nreloc
, NULL
);
3582 if (ret
!= 0 || nreloc
== 0) {
3583 panic("segvn_fill_vp_pages: "
3584 "page_relocate failed");
3587 while (nreloc
-- != 0) {
3588 ASSERT(PAGE_EXCL(pp
));
3589 ASSERT(pp
->p_vnode
== vp
);
3591 ((pp
->p_offset
- start_off
) >> PAGESHIFT
));
3597 if (svd
->type
== MAP_PRIVATE
) {
3598 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[18]);
3599 for (i
= 0; i
< pages
; i
++) {
3600 ASSERT(ppa
[i
] != NULL
);
3601 ASSERT(PAGE_EXCL(ppa
[i
]));
3602 ASSERT(ppa
[i
]->p_vnode
== vp
);
3603 ASSERT(ppa
[i
]->p_offset
==
3604 start_off
+ (i
<< PAGESHIFT
));
3605 page_downgrade(ppa
[i
]);
3609 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[19]);
3611 * the caller will still call fop_getpage() for shared segments
3612 * to check FS write permissions. For private segments we map
3613 * file read only anyway. so no fop_getpage is needed.
3615 for (i
= 0; i
< pages
; i
++) {
3616 ASSERT(ppa
[i
] != NULL
);
3617 ASSERT(PAGE_EXCL(ppa
[i
]));
3618 ASSERT(ppa
[i
]->p_vnode
== vp
);
3619 ASSERT(ppa
[i
]->p_offset
==
3620 start_off
+ (i
<< PAGESHIFT
));
3621 page_unlock(ppa
[i
]);
3629 * Do the cleanup. Unlock target pages we didn't relocate. They are
3630 * linked on targ_pplist by root pages. reassemble unused replacement
3631 * and io pages back to pplist.
3633 if (io_pplist
!= NULL
) {
3634 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[20]);
3637 ASSERT(pp
->p_vnode
== vp
);
3638 ASSERT(pp
->p_offset
== io_off
);
3639 ASSERT(page_iolock_assert(pp
));
3641 page_hashout(pp
, NULL
);
3643 } while ((pp
= pp
->p_next
) != io_pplist
);
3644 page_list_concat(&io_pplist
, &pplist
);
3648 while (targ_pplist
!= NULL
) {
3649 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[21]);
3651 ASSERT(PAGE_EXCL(pp
));
3652 page_sub(&targ_pplist
, pp
);
3655 ppages
= page_get_pagecnt(pszc
);
3656 ASSERT(IS_P2ALIGNED(page_pptonum(pp
), ppages
));
3659 group_page_unlock(pp
);
3665 ASSERT(PAGE_EXCL(pp
));
3666 ASSERT(pp
->p_szc
== szc
);
3667 page_sub(&repl_pplist
, pp
);
3669 ASSERT(IS_P2ALIGNED(page_pptonum(pp
), ppages
));
3671 /* relink replacement page */
3672 page_list_concat(&tmp_pplist
, &pp
);
3673 while (--ppages
!= 0) {
3674 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[22]);
3676 ASSERT(PAGE_EXCL(pp
));
3677 ASSERT(pp
->p_szc
== szc
);
3678 page_list_concat(&tmp_pplist
, &pp
);
3681 if (tmp_pplist
!= NULL
) {
3682 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[23]);
3683 page_list_concat(&tmp_pplist
, &pplist
);
3684 pplist
= tmp_pplist
;
3687 * at this point all pages are either on done_pplist or
3688 * pplist. They can't be all on done_pplist otherwise
3689 * we'd've been done.
3691 ASSERT(pplist
!= NULL
);
3693 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[24]);
3696 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[25]);
3697 ASSERT(pp
->p_szc
== szc
);
3698 ASSERT(PAGE_EXCL(pp
));
3699 ASSERT(pp
->p_vnode
!= vp
);
3701 } while ((pp
= pp
->p_next
) != pplist
);
3705 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[26]);
3706 ASSERT(pp
->p_szc
== szc
);
3707 ASSERT(PAGE_EXCL(pp
));
3708 ASSERT(pp
->p_vnode
== vp
);
3710 } while ((pp
= pp
->p_next
) != done_pplist
);
3712 while (pplist
!= NULL
) {
3713 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[27]);
3715 page_sub(&pplist
, pp
);
3719 while (done_pplist
!= NULL
) {
3720 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[28]);
3722 page_sub(&done_pplist
, pp
);
3728 ASSERT(pplist
== *ppplist
);
3730 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[29]);
3732 * don't downsize on io error.
3733 * see if vop_getpage succeeds.
3734 * pplist may still be used in this case
3739 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[30]);
3740 page_free_replacement_page(pplist
);
3741 page_create_putback(pages
);
3746 int segvn_anypgsz
= 0;
3748 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3749 if ((type) == F_SOFTLOCK) { \
3750 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3754 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3755 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3756 if ((rw) == S_WRITE) { \
3757 for (i = 0; i < (pages); i++) { \
3758 ASSERT((ppa)[i]->p_vnode == \
3759 (ppa)[0]->p_vnode); \
3760 hat_setmod((ppa)[i]); \
3762 } else if ((rw) != S_OTHER && \
3763 ((prot) & (vpprot) & PROT_WRITE)) { \
3764 for (i = 0; i < (pages); i++) { \
3765 ASSERT((ppa)[i]->p_vnode == \
3766 (ppa)[0]->p_vnode); \
3767 if (!hat_ismod((ppa)[i])) { \
3768 prot &= ~PROT_WRITE; \
3775 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3776 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3779 segvn_fault_vnodepages(struct hat
*hat
, struct seg
*seg
, caddr_t lpgaddr
,
3780 caddr_t lpgeaddr
, enum fault_type type
, enum seg_rw rw
, caddr_t addr
,
3781 caddr_t eaddr
, int brkcow
)
3783 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
3784 struct anon_map
*amp
= svd
->amp
;
3785 uchar_t segtype
= svd
->type
;
3786 uint_t szc
= seg
->s_szc
;
3787 size_t pgsz
= page_get_pagesize(szc
);
3788 size_t maxpgsz
= pgsz
;
3789 pgcnt_t pages
= btop(pgsz
);
3790 pgcnt_t maxpages
= pages
;
3791 size_t ppasize
= (pages
+ 1) * sizeof (page_t
*);
3792 caddr_t a
= lpgaddr
;
3793 caddr_t maxlpgeaddr
= lpgeaddr
;
3794 uoff_t off
= svd
->offset
+ (uintptr_t)(a
- seg
->s_base
);
3795 ulong_t aindx
= svd
->anon_index
+ seg_page(seg
, a
);
3796 struct vpage
*vpage
= (svd
->vpage
!= NULL
) ?
3797 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
3798 vnode_t
*vp
= svd
->vp
;
3803 faultcode_t err
= 0;
3805 int vop_size_err
= 0;
3806 uint_t protchk
, prot
, vpprot
;
3808 int hat_flag
= (type
== F_SOFTLOCK
) ? HAT_LOAD_LOCK
: HAT_LOAD
;
3809 anon_sync_obj_t an_cookie
;
3811 int alloc_failed
= 0;
3818 int segvn_anypgsz_vnode
= 0; /* for now map vnode with 2 page sizes */
3819 int tron
= (svd
->tr_state
== SEGVN_TR_ON
);
3823 ASSERT(brkcow
== 0 || amp
!= NULL
);
3824 ASSERT(tron
== 0 || amp
!= NULL
);
3825 ASSERT(enable_mbit_wa
== 0); /* no mbit simulations with large pages */
3826 ASSERT(!(svd
->flags
& MAP_NORESERVE
));
3827 ASSERT(type
!= F_SOFTUNLOCK
);
3828 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
3829 ASSERT(amp
== NULL
|| IS_P2ALIGNED(aindx
, maxpages
));
3830 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
3831 ASSERT(seg
->s_szc
< NBBY
* sizeof (int));
3832 ASSERT(type
!= F_SOFTLOCK
|| lpgeaddr
- a
== maxpgsz
);
3833 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
3835 VM_STAT_COND_ADD(type
== F_SOFTLOCK
, segvnvmstats
.fltvnpages
[0]);
3836 VM_STAT_COND_ADD(type
!= F_SOFTLOCK
, segvnvmstats
.fltvnpages
[1]);
3838 if (svd
->flags
& MAP_TEXT
) {
3839 hat_flag
|= HAT_LOAD_TEXT
;
3842 if (svd
->pageprot
) {
3845 protchk
= PROT_READ
;
3848 protchk
= PROT_WRITE
;
3851 protchk
= PROT_EXEC
;
3855 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
3860 /* caller has already done segment level protection check. */
3863 if (rw
== S_WRITE
&& segtype
== MAP_PRIVATE
) {
3864 SEGVN_VMSTAT_FLTVNPAGES(2);
3870 ppa
= kmem_alloc(ppasize
, KM_SLEEP
);
3872 VM_STAT_COND_ADD(amp
!= NULL
, segvnvmstats
.fltvnpages
[3]);
3876 for (; a
< lpgeaddr
; a
+= pgsz
, off
+= pgsz
, aindx
+= pages
) {
3878 while (szc
< seg
->s_szc
) {
3881 tszc
= segvn_anypgsz_vnode
? szc
+ 1 :
3883 ppgsz
= page_get_pagesize(tszc
);
3884 if (!IS_P2ALIGNED(a
, ppgsz
) ||
3885 ((alloc_failed
>> tszc
) & 0x1)) {
3888 SEGVN_VMSTAT_FLTVNPAGES(4);
3892 e
= P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
3893 lpgeaddr
= (caddr_t
)e
;
3898 if (IS_P2ALIGNED(a
, maxpgsz
) && amp
!= NULL
) {
3899 ASSERT(IS_P2ALIGNED(aindx
, maxpages
));
3900 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
3901 anon_array_enter(amp
, aindx
, &an_cookie
);
3902 if (anon_get_ptr(amp
->ahp
, aindx
) != NULL
) {
3903 SEGVN_VMSTAT_FLTVNPAGES(5);
3904 ASSERT(anon_pages(amp
->ahp
, aindx
,
3905 maxpages
) == maxpages
);
3906 anon_array_exit(&an_cookie
);
3907 ANON_LOCK_EXIT(&
->a_rwlock
);
3908 err
= segvn_fault_anonpages(hat
, seg
,
3909 a
, a
+ maxpgsz
, type
, rw
,
3911 MIN(a
+ maxpgsz
, eaddr
), brkcow
);
3913 SEGVN_VMSTAT_FLTVNPAGES(6);
3916 if (szc
< seg
->s_szc
) {
3920 lpgeaddr
= maxlpgeaddr
;
3924 ASSERT(anon_pages(amp
->ahp
, aindx
,
3926 SEGVN_VMSTAT_FLTVNPAGES(7);
3927 anon_array_exit(&an_cookie
);
3928 ANON_LOCK_EXIT(&
->a_rwlock
);
3931 ASSERT(!brkcow
|| IS_P2ALIGNED(a
, maxpgsz
));
3932 ASSERT(!tron
|| IS_P2ALIGNED(a
, maxpgsz
));
3934 if (svd
->pageprot
!= 0 && IS_P2ALIGNED(a
, maxpgsz
)) {
3935 ASSERT(vpage
!= NULL
);
3936 prot
= VPP_PROT(vpage
);
3937 ASSERT(sameprot(seg
, a
, maxpgsz
));
3938 if ((prot
& protchk
) == 0) {
3939 SEGVN_VMSTAT_FLTVNPAGES(8);
3944 if (type
== F_SOFTLOCK
) {
3945 atomic_add_long((ulong_t
*)&svd
->softlockcnt
,
3952 if (!brkcow
&& !tron
&& szc
&&
3953 !page_exists_physcontig(vp
, off
, szc
,
3954 segtype
== MAP_PRIVATE
? ppa
: NULL
)) {
3955 SEGVN_VMSTAT_FLTVNPAGES(9);
3956 if (page_alloc_pages(vp
, seg
, a
, &pplist
, NULL
,
3957 szc
, 0, 0) && type
!= F_SOFTLOCK
) {
3958 SEGVN_VMSTAT_FLTVNPAGES(10);
3961 alloc_failed
|= (1 << szc
);
3964 if (pplist
!= NULL
&&
3965 vp
->v_mpssdata
== SEGVN_PAGEIO
) {
3967 SEGVN_VMSTAT_FLTVNPAGES(11);
3968 physcontig
= segvn_fill_vp_pages(svd
,
3969 vp
, off
, szc
, ppa
, &pplist
,
3971 ASSERT(!physcontig
|| pplist
== NULL
);
3972 if (!physcontig
&& downsize
&&
3973 type
!= F_SOFTLOCK
) {
3974 ASSERT(pplist
== NULL
);
3975 SEGVN_VMSTAT_FLTVNPAGES(12);
3979 ASSERT(!physcontig
||
3980 segtype
== MAP_PRIVATE
||
3982 if (physcontig
&& ppa
[0] == NULL
) {
3986 } else if (!brkcow
&& !tron
&& szc
&& ppa
[0] != NULL
) {
3987 SEGVN_VMSTAT_FLTVNPAGES(13);
3988 ASSERT(segtype
== MAP_PRIVATE
);
3993 SEGVN_VMSTAT_FLTVNPAGES(14);
3995 ierr
= fop_getpage(vp
, (offset_t
)off
, pgsz
,
3996 &vpprot
, ppa
, pgsz
, seg
, a
, arw
,
4000 for (i
= 0; i
< pages
; i
++) {
4001 ASSERT(PAGE_LOCKED(ppa
[i
]));
4002 ASSERT(!PP_ISFREE(ppa
[i
]));
4003 ASSERT(ppa
[i
]->p_vnode
== vp
);
4004 ASSERT(ppa
[i
]->p_offset
==
4005 off
+ (i
<< PAGESHIFT
));
4009 if (segtype
== MAP_PRIVATE
) {
4010 SEGVN_VMSTAT_FLTVNPAGES(15);
4011 vpprot
&= ~PROT_WRITE
;
4014 ASSERT(segtype
== MAP_PRIVATE
);
4015 SEGVN_VMSTAT_FLTVNPAGES(16);
4016 vpprot
= PROT_ALL
& ~PROT_WRITE
;
4021 SEGVN_VMSTAT_FLTVNPAGES(17);
4022 if (pplist
!= NULL
) {
4023 SEGVN_VMSTAT_FLTVNPAGES(18);
4024 page_free_replacement_page(pplist
);
4025 page_create_putback(pages
);
4027 SEGVN_RESTORE_SOFTLOCK_VP(type
, pages
);
4028 if (a
+ pgsz
<= eaddr
) {
4029 SEGVN_VMSTAT_FLTVNPAGES(19);
4030 err
= FC_MAKE_ERR(ierr
);
4033 va
.va_mask
= AT_SIZE
;
4034 if (fop_getattr(vp
, &va
, 0, svd
->cred
, NULL
)) {
4035 SEGVN_VMSTAT_FLTVNPAGES(20);
4036 err
= FC_MAKE_ERR(EIO
);
4039 if (btopr(va
.va_size
) >= btopr(off
+ pgsz
)) {
4040 SEGVN_VMSTAT_FLTVNPAGES(21);
4041 err
= FC_MAKE_ERR(ierr
);
4044 if (btopr(va
.va_size
) <
4045 btopr(off
+ (eaddr
- a
))) {
4046 SEGVN_VMSTAT_FLTVNPAGES(22);
4047 err
= FC_MAKE_ERR(ierr
);
4050 if (brkcow
|| tron
|| type
== F_SOFTLOCK
) {
4051 /* can't reduce map area */
4052 SEGVN_VMSTAT_FLTVNPAGES(23);
4056 SEGVN_VMSTAT_FLTVNPAGES(24);
4064 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
4065 anon_array_enter(amp
, aindx
, &an_cookie
);
4068 anon_get_ptr(amp
->ahp
, aindx
) != NULL
) {
4069 ulong_t taindx
= P2ALIGN(aindx
, maxpages
);
4071 SEGVN_VMSTAT_FLTVNPAGES(25);
4072 ASSERT(anon_pages(amp
->ahp
, taindx
,
4073 maxpages
) == maxpages
);
4074 for (i
= 0; i
< pages
; i
++) {
4075 page_unlock(ppa
[i
]);
4077 anon_array_exit(&an_cookie
);
4078 ANON_LOCK_EXIT(&
->a_rwlock
);
4079 if (pplist
!= NULL
) {
4080 page_free_replacement_page(pplist
);
4081 page_create_putback(pages
);
4083 SEGVN_RESTORE_SOFTLOCK_VP(type
, pages
);
4084 if (szc
< seg
->s_szc
) {
4085 SEGVN_VMSTAT_FLTVNPAGES(26);
4087 * For private segments SOFTLOCK
4088 * either always breaks cow (any rw
4089 * type except S_READ_NOCOW) or
4090 * address space is locked as writer
4091 * (S_READ_NOCOW case) and anon slots
4092 * can't show up on second check.
4093 * Therefore if we are here for
4094 * SOFTLOCK case it must be a cow
4095 * break but cow break never reduces
4096 * szc. text replication (tron) in
4097 * this case works as cow break.
4098 * Thus the assert below.
4100 ASSERT(!brkcow
&& !tron
&&
4101 type
!= F_SOFTLOCK
);
4106 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
4111 ulong_t taindx
= P2ALIGN(aindx
, maxpages
);
4112 ASSERT(!anon_pages(amp
->ahp
, taindx
, maxpages
));
4116 if (brkcow
|| tron
) {
4117 ASSERT(amp
!= NULL
);
4118 ASSERT(pplist
== NULL
);
4119 ASSERT(szc
== seg
->s_szc
);
4120 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
4121 ASSERT(IS_P2ALIGNED(aindx
, maxpages
));
4122 SEGVN_VMSTAT_FLTVNPAGES(27);
4123 ierr
= anon_map_privatepages(amp
, aindx
, szc
,
4124 seg
, a
, prot
, ppa
, vpage
, segvn_anypgsz
,
4125 tron
? PG_LOCAL
: 0, svd
->cred
);
4127 SEGVN_VMSTAT_FLTVNPAGES(28);
4128 anon_array_exit(&an_cookie
);
4129 ANON_LOCK_EXIT(&
->a_rwlock
);
4130 SEGVN_RESTORE_SOFTLOCK_VP(type
, pages
);
4131 err
= FC_MAKE_ERR(ierr
);
4135 ASSERT(!IS_VMODSORT(ppa
[0]->p_vnode
));
4137 * p_szc can't be changed for locked
4140 ASSERT(svd
->rcookie
==
4141 HAT_INVALID_REGION_COOKIE
);
4142 hat_memload_array(hat
, a
, pgsz
, ppa
, prot
,
4145 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4146 SEGVN_VMSTAT_FLTVNPAGES(29);
4147 for (i
= 0; i
< pages
; i
++) {
4148 page_unlock(ppa
[i
]);
4151 anon_array_exit(&an_cookie
);
4152 ANON_LOCK_EXIT(&
->a_rwlock
);
4156 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
||
4157 (!svd
->pageprot
&& svd
->prot
== (prot
& vpprot
)));
4159 pfn
= page_pptonum(ppa
[0]);
4161 * hat_page_demote() needs an SE_EXCL lock on one of
4162 * constituent page_t's and it decreases root's p_szc
4163 * last. This means if root's p_szc is equal szc and
4164 * all its constituent pages are locked
4165 * hat_page_demote() that could have changed p_szc to
4166 * szc is already done and no new have page_demote()
4167 * can start for this large page.
4171 * we need to make sure same mapping size is used for
4172 * the same address range if there's a possibility the
4173 * adddress is already mapped because hat layer panics
4174 * when translation is loaded for the range already
4175 * mapped with a different page size. We achieve it
4176 * by always using largest page size possible subject
4177 * to the constraints of page size, segment page size
4178 * and page alignment. Since mappings are invalidated
4179 * when those constraints change and make it
4180 * impossible to use previously used mapping size no
4181 * mapping size conflicts should happen.
4185 if ((pszc
= ppa
[0]->p_szc
) == szc
&&
4186 IS_P2ALIGNED(pfn
, pages
)) {
4188 SEGVN_VMSTAT_FLTVNPAGES(30);
4190 for (i
= 0; i
< pages
; i
++) {
4191 ASSERT(PAGE_LOCKED(ppa
[i
]));
4192 ASSERT(!PP_ISFREE(ppa
[i
]));
4193 ASSERT(page_pptonum(ppa
[i
]) ==
4195 ASSERT(ppa
[i
]->p_szc
== szc
);
4196 ASSERT(ppa
[i
]->p_vnode
== vp
);
4197 ASSERT(ppa
[i
]->p_offset
==
4198 off
+ (i
<< PAGESHIFT
));
4202 * All pages are of szc we need and they are
4203 * all locked so they can't change szc. load
4206 * if page got promoted since last check
4207 * we don't need pplist.
4209 if (pplist
!= NULL
) {
4210 page_free_replacement_page(pplist
);
4211 page_create_putback(pages
);
4213 if (PP_ISMIGRATE(ppa
[0])) {
4214 page_migrate(seg
, a
, ppa
, pages
);
4216 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
,
4218 hat_memload_array_region(hat
, a
, pgsz
,
4219 ppa
, prot
& vpprot
, hat_flag
,
4222 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4223 for (i
= 0; i
< pages
; i
++) {
4224 page_unlock(ppa
[i
]);
4228 anon_array_exit(&an_cookie
);
4229 ANON_LOCK_EXIT(&
->a_rwlock
);
4235 * See if upsize is possible.
4237 if (pszc
> szc
&& szc
< seg
->s_szc
&&
4238 (segvn_anypgsz_vnode
|| pszc
>= seg
->s_szc
)) {
4240 uint_t pszc1
= MIN(pszc
, seg
->s_szc
);
4241 ppgsz
= page_get_pagesize(pszc1
);
4242 ppages
= btop(ppgsz
);
4243 aphase
= btop(P2PHASE((uintptr_t)a
, ppgsz
));
4245 ASSERT(type
!= F_SOFTLOCK
);
4247 SEGVN_VMSTAT_FLTVNPAGES(31);
4248 if (aphase
!= P2PHASE(pfn
, ppages
)) {
4249 segvn_faultvnmpss_align_err4
++;
4251 SEGVN_VMSTAT_FLTVNPAGES(32);
4252 if (pplist
!= NULL
) {
4253 page_t
*pl
= pplist
;
4254 page_free_replacement_page(pl
);
4255 page_create_putback(pages
);
4257 for (i
= 0; i
< pages
; i
++) {
4258 page_unlock(ppa
[i
]);
4261 anon_array_exit(&an_cookie
);
4262 ANON_LOCK_EXIT(&
->a_rwlock
);
4271 * check if we should use smallest mapping size.
4276 !IS_P2ALIGNED(pfn
, pages
)) ||
4278 !segvn_full_szcpages(ppa
, szc
, &upgrdfail
,
4281 if (upgrdfail
&& type
!= F_SOFTLOCK
) {
4283 * segvn_full_szcpages failed to lock
4284 * all pages EXCL. Size down.
4288 SEGVN_VMSTAT_FLTVNPAGES(33);
4290 if (pplist
!= NULL
) {
4291 page_t
*pl
= pplist
;
4292 page_free_replacement_page(pl
);
4293 page_create_putback(pages
);
4296 for (i
= 0; i
< pages
; i
++) {
4297 page_unlock(ppa
[i
]);
4300 anon_array_exit(&an_cookie
);
4301 ANON_LOCK_EXIT(&
->a_rwlock
);
4306 if (szc
!= 0 && !upgrdfail
) {
4307 segvn_faultvnmpss_align_err5
++;
4309 SEGVN_VMSTAT_FLTVNPAGES(34);
4310 if (pplist
!= NULL
) {
4311 page_free_replacement_page(pplist
);
4312 page_create_putback(pages
);
4314 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
,
4316 if (upgrdfail
&& segvn_anypgsz_vnode
) {
4318 hat_memload_array_region(hat
, a
, pgsz
,
4319 ppa
, prot
& vpprot
, hat_flag
,
4322 for (i
= 0; i
< pages
; i
++) {
4323 hat_memload_region(hat
,
4324 a
+ (i
<< PAGESHIFT
),
4325 ppa
[i
], prot
& vpprot
,
4326 hat_flag
, svd
->rcookie
);
4329 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4330 for (i
= 0; i
< pages
; i
++) {
4331 page_unlock(ppa
[i
]);
4335 anon_array_exit(&an_cookie
);
4336 ANON_LOCK_EXIT(&
->a_rwlock
);
4343 * segvn_full_szcpages() upgraded pages szc.
4345 ASSERT(pszc
== ppa
[0]->p_szc
);
4346 ASSERT(IS_P2ALIGNED(pfn
, pages
));
4352 SEGVN_VMSTAT_FLTVNPAGES(35);
4354 * p_szc of ppa[0] can change since we haven't
4355 * locked all constituent pages. Call
4356 * page_lock_szc() to prevent szc changes.
4357 * This should be a rare case that happens when
4358 * multiple segments use a different page size
4359 * to map the same file offsets.
4361 szcmtx
= page_szc_lock(ppa
[0]);
4362 pszc
= ppa
[0]->p_szc
;
4363 ASSERT(szcmtx
!= NULL
|| pszc
== 0);
4364 ASSERT(ppa
[0]->p_szc
<= pszc
);
4366 SEGVN_VMSTAT_FLTVNPAGES(36);
4367 if (szcmtx
!= NULL
) {
4372 if (pplist
!= NULL
) {
4374 * page got promoted since last check.
4375 * we don't need preaalocated large
4378 SEGVN_VMSTAT_FLTVNPAGES(37);
4379 page_free_replacement_page(pplist
);
4380 page_create_putback(pages
);
4382 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
,
4384 hat_memload_array_region(hat
, a
, pgsz
, ppa
,
4385 prot
& vpprot
, hat_flag
, svd
->rcookie
);
4387 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4388 for (i
= 0; i
< pages
; i
++) {
4389 page_unlock(ppa
[i
]);
4393 anon_array_exit(&an_cookie
);
4394 ANON_LOCK_EXIT(&
->a_rwlock
);
4400 * if page got demoted since last check
4401 * we could have not allocated larger page.
4404 if (pplist
== NULL
&&
4405 page_alloc_pages(vp
, seg
, a
, &pplist
, NULL
,
4406 szc
, 0, 0) && type
!= F_SOFTLOCK
) {
4407 SEGVN_VMSTAT_FLTVNPAGES(38);
4408 for (i
= 0; i
< pages
; i
++) {
4409 page_unlock(ppa
[i
]);
4412 anon_array_exit(&an_cookie
);
4413 ANON_LOCK_EXIT(&
->a_rwlock
);
4416 alloc_failed
|= (1 << szc
);
4420 SEGVN_VMSTAT_FLTVNPAGES(39);
4422 if (pplist
!= NULL
) {
4423 segvn_relocate_pages(ppa
, pplist
);
4426 ASSERT(type
== F_SOFTLOCK
);
4427 SEGVN_VMSTAT_FLTVNPAGES(40);
4431 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
, prot
, vpprot
);
4433 if (pplist
== NULL
&& segvn_anypgsz_vnode
== 0) {
4434 ASSERT(type
== F_SOFTLOCK
);
4435 for (i
= 0; i
< pages
; i
++) {
4436 ASSERT(ppa
[i
]->p_szc
< szc
);
4437 hat_memload_region(hat
,
4438 a
+ (i
<< PAGESHIFT
),
4439 ppa
[i
], prot
& vpprot
, hat_flag
,
4443 ASSERT(pplist
!= NULL
|| type
== F_SOFTLOCK
);
4444 hat_memload_array_region(hat
, a
, pgsz
, ppa
,
4445 prot
& vpprot
, hat_flag
, svd
->rcookie
);
4447 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4448 for (i
= 0; i
< pages
; i
++) {
4449 ASSERT(PAGE_SHARED(ppa
[i
]));
4450 page_unlock(ppa
[i
]);
4454 anon_array_exit(&an_cookie
);
4455 ANON_LOCK_EXIT(&
->a_rwlock
);
4459 if (vpage
!= NULL
) {
4466 ASSERT(a
< lpgeaddr
);
4468 ASSERT(!brkcow
&& !tron
&& type
!= F_SOFTLOCK
);
4471 * ierr == -1 means we failed to map with a large page.
4472 * (either due to allocation/relocation failures or
4473 * misalignment with other mappings to this file.
4475 * ierr == -2 means some other thread allocated a large page
4476 * after we gave up tp map with a large page. retry with
4479 ASSERT(ierr
== -1 || ierr
== -2);
4480 ASSERT(ierr
== -2 || szc
!= 0);
4481 ASSERT(ierr
== -1 || szc
< seg
->s_szc
);
4483 SEGVN_VMSTAT_FLTVNPAGES(41);
4484 ASSERT(pszc
> szc
&& pszc
<= seg
->s_szc
);
4486 } else if (segvn_anypgsz_vnode
) {
4487 SEGVN_VMSTAT_FLTVNPAGES(42);
4490 SEGVN_VMSTAT_FLTVNPAGES(43);
4493 * other process created pszc large page.
4494 * but we still have to drop to 0 szc.
4499 pgsz
= page_get_pagesize(szc
);
4503 * Size up case. Note lpgaddr may only be needed for
4504 * softlock case so we don't adjust it here.
4506 a
= (caddr_t
)P2ALIGN((uintptr_t)a
, pgsz
);
4507 ASSERT(a
>= lpgaddr
);
4508 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4509 off
= svd
->offset
+ (uintptr_t)(a
- seg
->s_base
);
4510 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4511 vpage
= (svd
->vpage
!= NULL
) ?
4512 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4515 * Size down case. Note lpgaddr may only be needed for
4516 * softlock case so we don't adjust it here.
4518 ASSERT(IS_P2ALIGNED(a
, pgsz
));
4519 ASSERT(IS_P2ALIGNED(lpgeaddr
, pgsz
));
4520 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4521 ASSERT(a
< lpgeaddr
);
4523 SEGVN_VMSTAT_FLTVNPAGES(44);
4525 * The beginning of the large page region can
4526 * be pulled to the right to make a smaller
4527 * region. We haven't yet faulted a single
4530 a
= (caddr_t
)P2ALIGN((uintptr_t)addr
, pgsz
);
4531 ASSERT(a
>= lpgaddr
);
4533 (uintptr_t)(a
- seg
->s_base
);
4534 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4535 vpage
= (svd
->vpage
!= NULL
) ?
4536 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4541 kmem_free(ppa
, ppasize
);
4542 if (!err
&& !vop_size_err
) {
4543 SEGVN_VMSTAT_FLTVNPAGES(45);
4546 if (type
== F_SOFTLOCK
&& a
> lpgaddr
) {
4547 SEGVN_VMSTAT_FLTVNPAGES(46);
4548 segvn_softunlock(seg
, lpgaddr
, a
- lpgaddr
, S_OTHER
);
4550 if (!vop_size_err
) {
4551 SEGVN_VMSTAT_FLTVNPAGES(47);
4554 ASSERT(brkcow
|| tron
|| type
== F_SOFTLOCK
);
4556 * Large page end is mapped beyond the end of file and it's a cow
4557 * fault (can be a text replication induced cow) or softlock so we can't
4558 * reduce the map area. For now just demote the segment. This should
4559 * really only happen if the end of the file changed after the mapping
4560 * was established since when large page segments are created we make
4561 * sure they don't extend beyond the end of the file.
4563 SEGVN_VMSTAT_FLTVNPAGES(48);
4565 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4566 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
4568 if (seg
->s_szc
!= 0) {
4569 segvn_fltvnpages_clrszc_cnt
++;
4570 ASSERT(svd
->softlockcnt
== 0);
4571 err
= segvn_clrszc(seg
);
4573 segvn_fltvnpages_clrszc_err
++;
4576 ASSERT(err
|| seg
->s_szc
== 0);
4577 SEGVN_LOCK_DOWNGRADE(seg
->s_as
, &svd
->lock
);
4578 /* segvn_fault will do its job as if szc had been zero to begin with */
4579 return (err
== 0 ? IE_RETRY
: FC_MAKE_ERR(err
));
4583 * This routine will attempt to fault in one large page.
4584 * it will use smaller pages if that fails.
4585 * It should only be called for pure anonymous segments.
4588 segvn_fault_anonpages(struct hat
*hat
, struct seg
*seg
, caddr_t lpgaddr
,
4589 caddr_t lpgeaddr
, enum fault_type type
, enum seg_rw rw
, caddr_t addr
,
4590 caddr_t eaddr
, int brkcow
)
4592 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
4593 struct anon_map
*amp
= svd
->amp
;
4594 uchar_t segtype
= svd
->type
;
4595 uint_t szc
= seg
->s_szc
;
4596 size_t pgsz
= page_get_pagesize(szc
);
4597 size_t maxpgsz
= pgsz
;
4598 pgcnt_t pages
= btop(pgsz
);
4599 uint_t ppaszc
= szc
;
4600 caddr_t a
= lpgaddr
;
4601 ulong_t aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4602 struct vpage
*vpage
= (svd
->vpage
!= NULL
) ?
4603 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4608 uint_t protchk
, prot
, vpprot
;
4610 int hat_flag
= (type
== F_SOFTLOCK
) ? HAT_LOAD_LOCK
: HAT_LOAD
;
4611 anon_sync_obj_t cookie
;
4613 int pgflags
= (svd
->tr_state
== SEGVN_TR_ON
) ? PG_LOCAL
: 0;
4616 ASSERT(amp
!= NULL
);
4617 ASSERT(enable_mbit_wa
== 0); /* no mbit simulations with large pages */
4618 ASSERT(!(svd
->flags
& MAP_NORESERVE
));
4619 ASSERT(type
!= F_SOFTUNLOCK
);
4620 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
4621 ASSERT(!brkcow
|| svd
->tr_state
== SEGVN_TR_OFF
);
4622 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
4624 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
4626 VM_STAT_COND_ADD(type
== F_SOFTLOCK
, segvnvmstats
.fltanpages
[0]);
4627 VM_STAT_COND_ADD(type
!= F_SOFTLOCK
, segvnvmstats
.fltanpages
[1]);
4629 if (svd
->flags
& MAP_TEXT
) {
4630 hat_flag
|= HAT_LOAD_TEXT
;
4633 if (svd
->pageprot
) {
4636 protchk
= PROT_READ
;
4639 protchk
= PROT_WRITE
;
4642 protchk
= PROT_EXEC
;
4646 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
4649 VM_STAT_ADD(segvnvmstats
.fltanpages
[2]);
4652 /* caller has already done segment level protection check. */
4655 ppa
= kmem_cache_alloc(segvn_szc_cache
[ppaszc
], KM_SLEEP
);
4656 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
4659 for (; a
< lpgeaddr
; a
+= pgsz
, aindx
+= pages
) {
4660 if (svd
->pageprot
!= 0 && IS_P2ALIGNED(a
, maxpgsz
)) {
4661 VM_STAT_ADD(segvnvmstats
.fltanpages
[3]);
4662 ASSERT(vpage
!= NULL
);
4663 prot
= VPP_PROT(vpage
);
4664 ASSERT(sameprot(seg
, a
, maxpgsz
));
4665 if ((prot
& protchk
) == 0) {
4670 if (adjszc_chk
&& IS_P2ALIGNED(a
, maxpgsz
) &&
4672 ASSERT(a
> lpgaddr
);
4676 ASSERT(IS_P2ALIGNED(aindx
, pages
));
4677 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
,
4680 if (type
== F_SOFTLOCK
) {
4681 atomic_add_long((ulong_t
*)&svd
->softlockcnt
,
4684 anon_array_enter(amp
, aindx
, &cookie
);
4685 ppa_szc
= (uint_t
)-1;
4686 ierr
= anon_map_getpages(amp
, aindx
, szc
, seg
, a
,
4687 prot
, &vpprot
, ppa
, &ppa_szc
, vpage
, rw
, brkcow
,
4688 segvn_anypgsz
, pgflags
, svd
->cred
);
4690 anon_array_exit(&cookie
);
4691 VM_STAT_ADD(segvnvmstats
.fltanpages
[4]);
4692 if (type
== F_SOFTLOCK
) {
4694 (ulong_t
*)&svd
->softlockcnt
,
4698 VM_STAT_ADD(segvnvmstats
.fltanpages
[6]);
4699 err
= FC_MAKE_ERR(ierr
);
4705 ASSERT(!IS_VMODSORT(ppa
[0]->p_vnode
));
4707 ASSERT(segtype
== MAP_SHARED
||
4708 ppa
[0]->p_szc
<= szc
);
4709 ASSERT(segtype
== MAP_PRIVATE
||
4710 ppa
[0]->p_szc
>= szc
);
4713 * Handle pages that have been marked for migration
4715 if (lgrp_optimizations())
4716 page_migrate(seg
, a
, ppa
, pages
);
4718 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
4720 if (segtype
== MAP_SHARED
) {
4721 vpprot
|= PROT_WRITE
;
4724 hat_memload_array(hat
, a
, pgsz
, ppa
,
4725 prot
& vpprot
, hat_flag
);
4727 if (hat_flag
& HAT_LOAD_LOCK
) {
4728 VM_STAT_ADD(segvnvmstats
.fltanpages
[7]);
4730 VM_STAT_ADD(segvnvmstats
.fltanpages
[8]);
4731 for (i
= 0; i
< pages
; i
++)
4732 page_unlock(ppa
[i
]);
4737 anon_array_exit(&cookie
);
4742 ASSERT(a
< lpgeaddr
);
4744 * ierr == -1 means we failed to allocate a large page.
4745 * so do a size down operation.
4747 * ierr == -2 means some other process that privately shares
4748 * pages with this process has allocated a larger page and we
4749 * need to retry with larger pages. So do a size up
4750 * operation. This relies on the fact that large pages are
4751 * never partially shared i.e. if we share any constituent
4752 * page of a large page with another process we must share the
4753 * entire large page. Note this cannot happen for SOFTLOCK
4754 * case, unless current address (a) is at the beginning of the
4755 * next page size boundary because the other process couldn't
4756 * have relocated locked pages.
4758 ASSERT(ierr
== -1 || ierr
== -2);
4760 if (segvn_anypgsz
) {
4761 ASSERT(ierr
== -2 || szc
!= 0);
4762 ASSERT(ierr
== -1 || szc
< seg
->s_szc
);
4763 szc
= (ierr
== -1) ? szc
- 1 : szc
+ 1;
4766 * For non COW faults and segvn_anypgsz == 0
4767 * we need to be careful not to loop forever
4768 * if existing page is found with szc other
4769 * than 0 or seg->s_szc. This could be due
4770 * to page relocations on behalf of DR or
4771 * more likely large page creation. For this
4772 * case simply re-size to existing page's szc
4773 * if returned by anon_map_getpages().
4775 if (ppa_szc
== (uint_t
)-1) {
4776 szc
= (ierr
== -1) ? 0 : seg
->s_szc
;
4778 ASSERT(ppa_szc
<= seg
->s_szc
);
4779 ASSERT(ierr
== -2 || ppa_szc
< szc
);
4780 ASSERT(ierr
== -1 || ppa_szc
> szc
);
4785 pgsz
= page_get_pagesize(szc
);
4787 ASSERT(type
!= F_SOFTLOCK
|| ierr
== -1 ||
4788 (IS_P2ALIGNED(a
, pgsz
) && IS_P2ALIGNED(lpgeaddr
, pgsz
)));
4789 if (type
== F_SOFTLOCK
) {
4791 * For softlocks we cannot reduce the fault area
4792 * (calculated based on the largest page size for this
4793 * segment) for size down and a is already next
4794 * page size aligned as assertted above for size
4795 * ups. Therefore just continue in case of softlock.
4797 VM_STAT_ADD(segvnvmstats
.fltanpages
[9]);
4798 continue; /* keep lint happy */
4799 } else if (ierr
== -2) {
4802 * Size up case. Note lpgaddr may only be needed for
4803 * softlock case so we don't adjust it here.
4805 VM_STAT_ADD(segvnvmstats
.fltanpages
[10]);
4806 a
= (caddr_t
)P2ALIGN((uintptr_t)a
, pgsz
);
4807 ASSERT(a
>= lpgaddr
);
4808 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4809 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4810 vpage
= (svd
->vpage
!= NULL
) ?
4811 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4814 * Size down case. Note lpgaddr may only be needed for
4815 * softlock case so we don't adjust it here.
4817 VM_STAT_ADD(segvnvmstats
.fltanpages
[11]);
4818 ASSERT(IS_P2ALIGNED(a
, pgsz
));
4819 ASSERT(IS_P2ALIGNED(lpgeaddr
, pgsz
));
4820 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4821 ASSERT(a
< lpgeaddr
);
4824 * The beginning of the large page region can
4825 * be pulled to the right to make a smaller
4826 * region. We haven't yet faulted a single
4829 VM_STAT_ADD(segvnvmstats
.fltanpages
[12]);
4830 a
= (caddr_t
)P2ALIGN((uintptr_t)addr
, pgsz
);
4831 ASSERT(a
>= lpgaddr
);
4832 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4833 vpage
= (svd
->vpage
!= NULL
) ?
4834 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4838 VM_STAT_ADD(segvnvmstats
.fltanpages
[13]);
4839 ANON_LOCK_EXIT(&
->a_rwlock
);
4840 kmem_cache_free(segvn_szc_cache
[ppaszc
], ppa
);
4843 VM_STAT_ADD(segvnvmstats
.fltanpages
[14]);
4844 ANON_LOCK_EXIT(&
->a_rwlock
);
4845 kmem_cache_free(segvn_szc_cache
[ppaszc
], ppa
);
4846 if (type
== F_SOFTLOCK
&& a
> lpgaddr
) {
4847 VM_STAT_ADD(segvnvmstats
.fltanpages
[15]);
4848 segvn_softunlock(seg
, lpgaddr
, a
- lpgaddr
, S_OTHER
);
4853 int fltadvice
= 1; /* set to free behind pages for sequential access */
4856 * This routine is called via a machine specific fault handling routine.
4857 * It is also called by software routines wishing to lock or unlock
4858 * a range of addresses.
4860 * Here is the basic algorithm:
4862 * Call segvn_softunlock
4865 * Checking and set up work
4866 * If we will need some non-anonymous pages
4867 * Call fop_getpage over the range of non-anonymous pages
4869 * Loop over all addresses requested
4870 * Call segvn_faultpage passing in page list
4871 * to load up translations and handle anonymous pages
4873 * Load up translation to any additional pages in page list not
4874 * already handled that fit into this segment
4877 segvn_fault(struct hat
*hat
, struct seg
*seg
, caddr_t addr
, size_t len
,
4878 enum fault_type type
, enum seg_rw rw
)
4880 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
4881 page_t
**plp
, **ppp
, *pp
;
4884 struct vpage
*vpage
;
4885 uint_t vpprot
, prot
;
4887 page_t
*pl
[FAULT_TMP_PAGES_NUM
+ 1];
4888 size_t plsz
, pl_alloc_sz
;
4891 struct anon_map
*amp
;
4893 caddr_t lpgaddr
, lpgeaddr
;
4895 anon_sync_obj_t cookie
;
4896 int brkcow
= BREAK_COW_SHARE(rw
, type
, svd
->type
);
4898 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
4899 ASSERT(svd
->amp
== NULL
|| svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
4902 * First handle the easy stuff
4904 if (type
== F_SOFTUNLOCK
) {
4905 if (rw
== S_READ_NOCOW
) {
4907 ASSERT(AS_WRITE_HELD(seg
->s_as
));
4909 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
4910 pgsz
= (seg
->s_szc
== 0) ? PAGESIZE
:
4911 page_get_pagesize(seg
->s_szc
);
4912 VM_STAT_COND_ADD(pgsz
> PAGESIZE
, segvnvmstats
.fltanpages
[16]);
4913 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
4914 segvn_softunlock(seg
, lpgaddr
, lpgeaddr
- lpgaddr
, rw
);
4915 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4919 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
||
4920 !HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
4922 if (svd
->tr_state
== SEGVN_TR_INIT
) {
4923 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
4924 if (svd
->tr_state
== SEGVN_TR_INIT
) {
4925 ASSERT(svd
->vp
!= NULL
&& svd
->amp
== NULL
);
4926 ASSERT(svd
->flags
& MAP_TEXT
);
4927 ASSERT(svd
->type
== MAP_PRIVATE
);
4928 segvn_textrepl(seg
);
4929 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
4930 ASSERT(svd
->tr_state
!= SEGVN_TR_ON
||
4933 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4935 } else if (svd
->tr_state
!= SEGVN_TR_OFF
) {
4936 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
4938 if (rw
== S_WRITE
&& svd
->tr_state
!= SEGVN_TR_OFF
) {
4939 ASSERT(!svd
->pageprot
&& !(svd
->prot
& PROT_WRITE
));
4940 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4944 if (svd
->tr_state
== SEGVN_TR_ON
) {
4945 ASSERT(svd
->vp
!= NULL
&& svd
->amp
!= NULL
);
4946 segvn_textunrepl(seg
, 0);
4947 ASSERT(svd
->amp
== NULL
&&
4948 svd
->tr_state
== SEGVN_TR_OFF
);
4949 } else if (svd
->tr_state
!= SEGVN_TR_OFF
) {
4950 svd
->tr_state
= SEGVN_TR_OFF
;
4952 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
4953 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4957 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
4960 * If we have the same protections for the entire segment,
4961 * insure that the access being attempted is legitimate.
4964 if (svd
->pageprot
== 0) {
4970 protchk
= PROT_READ
;
4973 protchk
= PROT_WRITE
;
4976 protchk
= PROT_EXEC
;
4980 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
4984 if ((svd
->prot
& protchk
) == 0) {
4985 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4986 return (FC_PROT
); /* illegal access type */
4990 if (brkcow
&& HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
4991 /* this must be SOFTLOCK S_READ fault */
4992 ASSERT(svd
->amp
== NULL
);
4993 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
4994 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4995 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
4996 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
4998 * this must be the first ever non S_READ_NOCOW
4999 * softlock for this segment.
5001 ASSERT(svd
->softlockcnt
== 0);
5002 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
5004 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
5006 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5011 * We can't allow the long term use of softlocks for vmpss segments,
5012 * because in some file truncation cases we should be able to demote
5013 * the segment, which requires that there are no softlocks. The
5014 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5015 * segment is S_READ_NOCOW, where the caller holds the address space
5016 * locked as writer and calls softunlock before dropping the as lock.
5017 * S_READ_NOCOW is used by /proc to read memory from another user.
5019 * Another deadlock between SOFTLOCK and file truncation can happen
5020 * because segvn_fault_vnodepages() calls the FS one pagesize at
5021 * a time. A second fop_getpage() call by segvn_fault_vnodepages()
5022 * can cause a deadlock because the first set of page_t's remain
5023 * locked SE_SHARED. To avoid this, we demote segments on a first
5024 * SOFTLOCK if they have a length greater than the segment's
5027 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5028 * the access type is S_READ_NOCOW and the fault length is less than
5029 * or equal to the segment's page size. While this is quite restrictive,
5030 * it should be the most common case of SOFTLOCK against a vmpss
5033 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5034 * caller makes sure no COW will be caused by another thread for a
5037 if (type
== F_SOFTLOCK
&& svd
->vp
!= NULL
&& seg
->s_szc
!= 0) {
5040 if (rw
!= S_READ_NOCOW
) {
5043 if (!demote
&& len
> PAGESIZE
) {
5044 pgsz
= page_get_pagesize(seg
->s_szc
);
5045 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
,
5047 if (lpgeaddr
- lpgaddr
> pgsz
) {
5052 ASSERT(demote
|| AS_WRITE_HELD(seg
->s_as
));
5055 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5056 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5057 if (seg
->s_szc
!= 0) {
5058 segvn_vmpss_clrszc_cnt
++;
5059 ASSERT(svd
->softlockcnt
== 0);
5060 err
= segvn_clrszc(seg
);
5062 segvn_vmpss_clrszc_err
++;
5063 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5064 return (FC_MAKE_ERR(err
));
5067 ASSERT(seg
->s_szc
== 0);
5068 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5074 * Check to see if we need to allocate an anon_map structure.
5076 if (svd
->amp
== NULL
&& (svd
->vp
== NULL
|| brkcow
)) {
5077 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
5079 * Drop the "read" lock on the segment and acquire
5080 * the "write" version since we have to allocate the
5083 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5084 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5086 if (svd
->amp
== NULL
) {
5087 svd
->amp
= anonmap_alloc(seg
->s_size
, 0, ANON_SLEEP
);
5088 svd
->amp
->a_szc
= seg
->s_szc
;
5090 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5093 * Start all over again since segment protections
5094 * may have changed after we dropped the "read" lock.
5100 * S_READ_NOCOW vs S_READ distinction was
5101 * only needed for the code above. After
5102 * that we treat it as S_READ.
5104 if (rw
== S_READ_NOCOW
) {
5105 ASSERT(type
== F_SOFTLOCK
);
5106 ASSERT(AS_WRITE_HELD(seg
->s_as
));
5113 * MADV_SEQUENTIAL work is ignored for large page segments.
5115 if (seg
->s_szc
!= 0) {
5116 pgsz
= page_get_pagesize(seg
->s_szc
);
5117 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
5118 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
5119 if (svd
->vp
== NULL
) {
5120 err
= segvn_fault_anonpages(hat
, seg
, lpgaddr
,
5121 lpgeaddr
, type
, rw
, addr
, addr
+ len
, brkcow
);
5123 err
= segvn_fault_vnodepages(hat
, seg
, lpgaddr
,
5124 lpgeaddr
, type
, rw
, addr
, addr
+ len
, brkcow
);
5125 if (err
== IE_RETRY
) {
5126 ASSERT(seg
->s_szc
== 0);
5127 ASSERT(SEGVN_READ_HELD(seg
->s_as
, &svd
->lock
));
5128 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5132 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5136 page
= seg_page(seg
, addr
);
5138 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
5139 anon_index
= svd
->anon_index
+ page
;
5141 if (type
== F_PROT
&& rw
== S_READ
&&
5142 svd
->tr_state
== SEGVN_TR_OFF
&&
5143 svd
->type
== MAP_PRIVATE
&& svd
->pageprot
== 0) {
5144 size_t index
= anon_index
;
5147 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5149 * The fast path could apply to S_WRITE also, except
5150 * that the protection fault could be caused by lazy
5151 * tlb flush when ro->rw. In this case, the pte is
5152 * RW already. But RO in the other cpu's tlb causes
5153 * the fault. Since hat_chgprot won't do anything if
5154 * pte doesn't change, we may end up faulting
5155 * indefinitely until the RO tlb entry gets replaced.
5157 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
, index
++) {
5158 anon_array_enter(amp
, index
, &cookie
);
5159 ap
= anon_get_ptr(amp
->ahp
, index
);
5160 anon_array_exit(&cookie
);
5161 if ((ap
== NULL
) || (ap
->an_refcnt
!= 1)) {
5162 ANON_LOCK_EXIT(&
->a_rwlock
);
5166 hat_chgprot(seg
->s_as
->a_hat
, addr
, len
, svd
->prot
);
5167 ANON_LOCK_EXIT(&
->a_rwlock
);
5168 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5174 if (svd
->vpage
== NULL
)
5177 vpage
= &svd
->vpage
[page
];
5179 off
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
5182 * If MADV_SEQUENTIAL has been set for the particular page we
5183 * are faulting on, free behind all pages in the segment and put
5184 * them on the free list.
5187 if ((page
!= 0) && fltadvice
&& svd
->tr_state
!= SEGVN_TR_ON
) {
5189 ulong_t fanon_index
;
5191 uoff_t pgoff
, fpgoff
;
5193 struct anon
*fap
= NULL
;
5195 if (svd
->advice
== MADV_SEQUENTIAL
||
5197 VPP_ADVICE(vpage
) == MADV_SEQUENTIAL
)) {
5198 pgoff
= off
- PAGESIZE
;
5201 vpp
= &svd
->vpage
[fpage
];
5203 fanon_index
= svd
->anon_index
+ fpage
;
5205 while (pgoff
> svd
->offset
) {
5206 if (svd
->advice
!= MADV_SEQUENTIAL
&&
5207 (!svd
->pageadvice
|| (vpage
&&
5208 VPP_ADVICE(vpp
) != MADV_SEQUENTIAL
)))
5212 * If this is an anon page, we must find the
5213 * correct <vp, offset> for it
5217 ANON_LOCK_ENTER(&
->a_rwlock
,
5219 anon_array_enter(amp
, fanon_index
,
5221 fap
= anon_get_ptr(amp
->ahp
,
5224 swap_xlate(fap
, &fvp
, &fpgoff
);
5229 anon_array_exit(&cookie
);
5230 ANON_LOCK_EXIT(&
->a_rwlock
);
5238 * Skip pages that are free or have an
5241 pp
= page_lookup_nowait(fvp
, fpgoff
, SE_SHARED
);
5245 * We don't need the page_struct_lock to test
5246 * as this is only advisory; even if we
5247 * acquire it someone might race in and lock
5248 * the page after we unlock and before the
5249 * PUTPAGE, then fop_putpage will do nothing.
5251 if (pp
->p_lckcnt
== 0 && pp
->p_cowcnt
== 0) {
5253 * Hold the vnode before releasing
5254 * the page lock to prevent it from
5255 * being freed and re-used by some
5261 * We should build a page list
5262 * to kluster putpages XXX
5264 (void) fop_putpage(fvp
,
5265 (offset_t
)fpgoff
, PAGESIZE
,
5266 (B_DONTNEED
|B_FREE
|B_ASYNC
),
5271 * XXX - Should the loop terminate if
5272 * the page is `locked'?
5288 * See if we need to call fop_getpage for
5289 * *any* of the range being faulted on.
5290 * We can skip all of this work if there
5291 * was no original vnode.
5293 if (svd
->vp
!= NULL
) {
5306 * Only acquire reader lock to prevent amp->ahp
5307 * from being changed. It's ok to miss pages,
5308 * hence we don't do anon_array_enter
5310 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5311 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
5313 if (len
<= PAGESIZE
)
5314 /* inline non_anon() */
5315 dogetpage
= (ap
== NULL
);
5317 dogetpage
= non_anon(amp
->ahp
, anon_index
,
5319 ANON_LOCK_EXIT(&
->a_rwlock
);
5324 struct as
*as
= seg
->s_as
;
5326 if (len
> FAULT_TMP_PAGES_SZ
) {
5328 * Page list won't fit in local array,
5329 * allocate one of the needed size.
5332 (btop(len
) + 1) * sizeof (page_t
*);
5333 plp
= kmem_alloc(pl_alloc_sz
, KM_SLEEP
);
5336 } else if (rw
== S_WRITE
&& svd
->type
== MAP_PRIVATE
||
5337 svd
->tr_state
== SEGVN_TR_ON
|| rw
== S_OTHER
||
5338 (((size_t)(addr
+ PAGESIZE
) <
5339 (size_t)(seg
->s_base
+ seg
->s_size
)) &&
5340 hat_probe(as
->a_hat
, addr
+ PAGESIZE
))) {
5342 * Ask fop_getpage to return the exact number
5344 * (a) this is a COW fault, or
5345 * (b) this is a software fault, or
5346 * (c) next page is already mapped.
5351 * Ask fop_getpage to return adjacent pages
5352 * within the segment.
5354 plsz
= MIN((size_t)FAULT_TMP_PAGES_SZ
, (size_t)
5355 ((seg
->s_base
+ seg
->s_size
) - addr
));
5356 ASSERT((addr
+ plsz
) <=
5357 (seg
->s_base
+ seg
->s_size
));
5361 * Need to get some non-anonymous pages.
5362 * We need to make only one call to GETPAGE to do
5363 * this to prevent certain deadlocking conditions
5364 * when we are doing locking. In this case
5365 * non_anon() should have picked up the smallest
5366 * range which includes all the non-anonymous
5367 * pages in the requested range. We have to
5368 * be careful regarding which rw flag to pass in
5369 * because on a private mapping, the underlying
5370 * object is never allowed to be written.
5372 if (rw
== S_WRITE
&& svd
->type
== MAP_PRIVATE
) {
5378 err
= fop_getpage(vp
, (offset_t
)vp_off
, vp_len
,
5379 &vpprot
, plp
, plsz
, seg
, addr
+ (vp_off
- off
), arw
,
5382 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5383 segvn_pagelist_rele(plp
);
5385 kmem_free(plp
, pl_alloc_sz
);
5386 return (FC_MAKE_ERR(err
));
5388 if (svd
->type
== MAP_PRIVATE
)
5389 vpprot
&= ~PROT_WRITE
;
5394 * N.B. at this time the plp array has all the needed non-anon
5395 * pages in addition to (possibly) having some adjacent pages.
5399 * Always acquire the anon_array_lock to prevent
5400 * 2 threads from allocating separate anon slots for
5403 * If this is a copy-on-write fault and we don't already
5404 * have the anon_array_lock, acquire it to prevent the
5405 * fault routine from handling multiple copy-on-write faults
5406 * on the same "addr" in the same address space.
5408 * Only one thread should deal with the fault since after
5409 * it is handled, the other threads can acquire a translation
5410 * to the newly created private page. This prevents two or
5411 * more threads from creating different private pages for the
5414 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5415 * to prevent deadlock between this thread and another thread
5416 * which has soft-locked this page and wants to acquire serial_lock.
5419 * The fix for bug 4026339 becomes unnecessary when using the
5420 * locking scheme with per amp rwlock and a global set of hash
5421 * lock, anon_array_lock. If we steal a vnode page when low
5422 * on memory and upgrad the page lock through page_rename,
5423 * then the page is PAGE_HANDLED, nothing needs to be done
5424 * for this page after returning from segvn_faultpage.
5426 * But really, the page lock should be downgraded after
5427 * the stolen page is page_rename'd.
5431 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5434 * Ok, now loop over the address range and handle faults
5436 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
, off
+= PAGESIZE
) {
5437 err
= segvn_faultpage(hat
, seg
, a
, off
, vpage
, plp
, vpprot
,
5441 ANON_LOCK_EXIT(&
->a_rwlock
);
5442 if (type
== F_SOFTLOCK
&& a
> addr
) {
5443 segvn_softunlock(seg
, addr
, (a
- addr
),
5446 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5447 segvn_pagelist_rele(plp
);
5449 kmem_free(plp
, pl_alloc_sz
);
5454 } else if (svd
->vpage
) {
5455 page
= seg_page(seg
, addr
);
5456 vpage
= &svd
->vpage
[++page
];
5460 /* Didn't get pages from the underlying fs so we're done */
5465 * Now handle any other pages in the list returned.
5466 * If the page can be used, load up the translations now.
5467 * Note that the for loop will only be entered if "plp"
5468 * is pointing to a non-NULL page pointer which means that
5469 * fop_getpage() was called and vpprot has been initialized.
5471 if (svd
->pageprot
== 0)
5472 prot
= svd
->prot
& vpprot
;
5476 * Large Files: diff should be unsigned value because we started
5477 * supporting > 2GB segment sizes from 2.5.1 and when a
5478 * large file of size > 2GB gets mapped to address space
5479 * the diff value can be > 2GB.
5482 for (ppp
= plp
; (pp
= *ppp
) != NULL
; ppp
++) {
5486 anon_sync_obj_t cookie
;
5487 int hat_flag
= HAT_LOAD_ADV
;
5489 if (svd
->flags
& MAP_TEXT
) {
5490 hat_flag
|= HAT_LOAD_TEXT
;
5493 if (pp
== PAGE_HANDLED
)
5496 if (svd
->tr_state
!= SEGVN_TR_ON
&&
5497 pp
->p_offset
>= svd
->offset
&&
5498 pp
->p_offset
< svd
->offset
+ seg
->s_size
) {
5500 diff
= pp
->p_offset
- svd
->offset
;
5503 * Large Files: Following is the assertion
5504 * validating the above cast.
5506 ASSERT(svd
->vp
== pp
->p_vnode
);
5510 prot
= VPP_PROT(&svd
->vpage
[page
]) & vpprot
;
5513 * Prevent other threads in the address space from
5514 * creating private pages (i.e., allocating anon slots)
5515 * while we are in the process of loading translations
5516 * to additional pages returned by the underlying
5520 anon_index
= svd
->anon_index
+ page
;
5521 anon_array_enter(amp
, anon_index
, &cookie
);
5522 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
5524 if ((amp
== NULL
) || (ap
== NULL
)) {
5525 if (IS_VMODSORT(pp
->p_vnode
) ||
5529 else if (rw
!= S_OTHER
&&
5531 prot
&= ~PROT_WRITE
;
5534 * Skip mapping read ahead pages marked
5535 * for migration, so they will get migrated
5538 ASSERT(amp
== NULL
||
5539 svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
5540 if ((prot
& PROT_READ
) && !PP_ISMIGRATE(pp
)) {
5541 hat_memload_region(hat
,
5548 anon_array_exit(&cookie
);
5554 ANON_LOCK_EXIT(&
->a_rwlock
);
5555 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5557 kmem_free(plp
, pl_alloc_sz
);
5562 * This routine is used to start I/O on pages asynchronously. XXX it will
5563 * only create PAGESIZE pages. At fault time they will be relocated into
5567 segvn_faulta(struct seg
*seg
, caddr_t addr
)
5569 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
5571 struct anon_map
*amp
;
5574 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
5576 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
5577 if ((amp
= svd
->amp
) != NULL
) {
5581 * Reader lock to prevent amp->ahp from being changed.
5582 * This is advisory, it's ok to miss a page, so
5583 * we don't do anon_array_enter lock.
5585 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5586 if ((ap
= anon_get_ptr(amp
->ahp
,
5587 svd
->anon_index
+ seg_page(seg
, addr
))) != NULL
) {
5589 err
= anon_getpage(&ap
, NULL
, NULL
,
5590 0, seg
, addr
, S_READ
, svd
->cred
);
5592 ANON_LOCK_EXIT(&
->a_rwlock
);
5593 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5595 return (FC_MAKE_ERR(err
));
5598 ANON_LOCK_EXIT(&
->a_rwlock
);
5601 if (svd
->vp
== NULL
) {
5602 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5603 return (0); /* zfod page - do nothing now */
5607 err
= fop_getpage(vp
,
5608 (offset_t
)(svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
)),
5609 PAGESIZE
, NULL
, NULL
, 0, seg
, addr
,
5610 S_OTHER
, svd
->cred
, NULL
);
5612 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5614 return (FC_MAKE_ERR(err
));
5619 segvn_setprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t prot
)
5621 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
5622 struct vpage
*cvp
, *svp
, *evp
;
5626 anon_sync_obj_t cookie
;
5627 int unload_done
= 0;
5629 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
5631 if ((svd
->maxprot
& prot
) != prot
)
5632 return (EACCES
); /* violated maxprot */
5634 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5636 /* return if prot is the same */
5637 if (!svd
->pageprot
&& svd
->prot
== prot
) {
5638 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5643 * Since we change protections we first have to flush the cache.
5644 * This makes sure all the pagelock calls have to recheck
5647 if (svd
->softlockcnt
> 0) {
5648 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
5651 * If this is shared segment non 0 softlockcnt
5652 * means locked pages are still in use.
5654 if (svd
->type
== MAP_SHARED
) {
5655 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5660 * Since we do have the segvn writers lock nobody can fill
5661 * the cache with entries belonging to this seg during
5662 * the purge. The flush either succeeds or we still have
5666 if (svd
->softlockcnt
> 0) {
5667 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5672 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
5673 ASSERT(svd
->amp
== NULL
);
5674 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
5675 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
5677 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
5679 } else if (svd
->tr_state
== SEGVN_TR_INIT
) {
5680 svd
->tr_state
= SEGVN_TR_OFF
;
5681 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
5682 ASSERT(svd
->amp
!= NULL
);
5683 segvn_textunrepl(seg
, 0);
5684 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
5688 if ((prot
& PROT_WRITE
) && svd
->type
== MAP_SHARED
&&
5689 svd
->vp
!= NULL
&& (svd
->vp
->v_flag
& VVMEXEC
)) {
5690 ASSERT(vn_is_mapped(svd
->vp
, V_WRITE
));
5691 segvn_inval_trcache(svd
->vp
);
5693 if (seg
->s_szc
!= 0) {
5695 pgsz
= page_get_pagesize(seg
->s_szc
);
5696 pgcnt
= pgsz
>> PAGESHIFT
;
5697 ASSERT(IS_P2ALIGNED(pgcnt
, pgcnt
));
5698 if (!IS_P2ALIGNED(addr
, pgsz
) || !IS_P2ALIGNED(len
, pgsz
)) {
5699 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5700 ASSERT(seg
->s_base
!= addr
|| seg
->s_size
!= len
);
5702 * If we are holding the as lock as a reader then
5703 * we need to return IE_RETRY and let the as
5704 * layer drop and re-acquire the lock as a writer.
5706 if (AS_READ_HELD(seg
->s_as
))
5708 VM_STAT_ADD(segvnvmstats
.demoterange
[1]);
5709 if (svd
->type
== MAP_PRIVATE
|| svd
->vp
!= NULL
) {
5710 err
= segvn_demote_range(seg
, addr
, len
,
5713 uint_t szcvec
= map_pgszcvec(seg
->s_base
,
5714 pgsz
, (uintptr_t)seg
->s_base
,
5715 (svd
->flags
& MAP_TEXT
), MAPPGSZC_SHM
, 0);
5716 err
= segvn_demote_range(seg
, addr
, len
,
5729 * If it's a private mapping and we're making it writable then we
5730 * may have to reserve the additional swap space now. If we are
5731 * making writable only a part of the segment then we use its vpage
5732 * array to keep a record of the pages for which we have reserved
5733 * swap. In this case we set the pageswap field in the segment's
5734 * segvn structure to record this.
5736 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5737 * removing write permission on the entire segment and we haven't
5738 * modified any pages, we can release the swap space.
5740 if (svd
->type
== MAP_PRIVATE
) {
5741 if (prot
& PROT_WRITE
) {
5742 if (!(svd
->flags
& MAP_NORESERVE
) &&
5743 !(svd
->swresv
&& svd
->pageswap
== 0)) {
5747 * Start by determining how much swap
5748 * space is required.
5750 if (addr
== seg
->s_base
&&
5751 len
== seg
->s_size
&&
5752 svd
->pageswap
== 0) {
5753 /* The whole segment */
5757 * Make sure that the vpage array
5758 * exists, and make a note of the
5759 * range of elements corresponding
5763 if (svd
->vpage
== NULL
) {
5764 SEGVN_LOCK_EXIT(seg
->s_as
,
5768 svp
= &svd
->vpage
[seg_page(seg
, addr
)];
5769 evp
= &svd
->vpage
[seg_page(seg
,
5772 if (svd
->pageswap
== 0) {
5774 * This is the first time we've
5775 * asked for a part of this
5776 * segment, so we need to
5777 * reserve everything we've
5783 * We have to count the number
5784 * of pages required.
5786 for (cvp
= svp
; cvp
< evp
;
5788 if (!VPP_ISSWAPRES(cvp
))
5795 /* Try to reserve the necessary swap. */
5796 if (anon_resv_zone(sz
,
5797 seg
->s_as
->a_proc
->p_zone
) == 0) {
5798 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5803 * Make a note of how much swap space
5806 if (svd
->pageswap
== 0 && sz
== seg
->s_size
) {
5809 ASSERT(svd
->vpage
!= NULL
);
5812 for (cvp
= svp
; cvp
< evp
; cvp
++) {
5813 if (!VPP_ISSWAPRES(cvp
))
5814 VPP_SETSWAPRES(cvp
);
5820 * Swap space is released only if this segment
5821 * does not map anonymous memory, since read faults
5822 * on such segments still need an anon slot to read
5825 if (svd
->swresv
!= 0 && svd
->vp
!= NULL
&&
5826 svd
->amp
== NULL
&& addr
== seg
->s_base
&&
5827 len
== seg
->s_size
&& svd
->pageprot
== 0) {
5828 ASSERT(svd
->pageswap
== 0);
5829 anon_unresv_zone(svd
->swresv
,
5830 seg
->s_as
->a_proc
->p_zone
);
5836 if (addr
== seg
->s_base
&& len
== seg
->s_size
&& svd
->vpage
== NULL
) {
5837 if (svd
->prot
== prot
) {
5838 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5839 return (0); /* all done */
5841 svd
->prot
= (uchar_t
)prot
;
5842 } else if (svd
->type
== MAP_PRIVATE
) {
5843 struct anon
*ap
= NULL
;
5846 struct anon_map
*amp
;
5847 ulong_t anon_idx
= 0;
5850 * A vpage structure exists or else the change does not
5851 * involve the entire segment. Establish a vpage structure
5852 * if none is there. Then, for each page in the range,
5853 * adjust its individual permissions. Note that write-
5854 * enabling a MAP_PRIVATE page can affect the claims for
5855 * locked down memory. Overcommitting memory terminates
5859 if (svd
->vpage
== NULL
) {
5860 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5864 if ((amp
= svd
->amp
) != NULL
) {
5865 anon_idx
= svd
->anon_index
+ seg_page(seg
, addr
);
5866 ASSERT(seg
->s_szc
== 0 ||
5867 IS_P2ALIGNED(anon_idx
, pgcnt
));
5868 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5871 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
5872 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
5875 * See Statement at the beginning of segvn_lockop regarding
5876 * the way cowcnts and lckcnts are handled.
5878 for (svp
= &svd
->vpage
[seg_page(seg
, addr
)]; svp
< evp
; svp
++) {
5880 if (seg
->s_szc
!= 0) {
5882 anon_array_enter(amp
, anon_idx
,
5885 if (IS_P2ALIGNED(anon_idx
, pgcnt
) &&
5886 !segvn_claim_pages(seg
, svp
, offset
,
5889 anon_array_exit(&cookie
);
5894 anon_array_exit(&cookie
);
5899 anon_array_enter(amp
, anon_idx
,
5901 ap
= anon_get_ptr(amp
->ahp
, anon_idx
++);
5904 if (VPP_ISPPLOCK(svp
) &&
5905 VPP_PROT(svp
) != prot
) {
5907 if (amp
== NULL
|| ap
== NULL
) {
5911 swap_xlate(ap
, &vp
, &off
);
5913 anon_array_exit(&cookie
);
5915 if ((pp
= page_lookup(vp
, off
,
5916 SE_SHARED
)) == NULL
) {
5917 panic("segvn_setprot: no page");
5920 ASSERT(seg
->s_szc
== 0);
5921 if ((VPP_PROT(svp
) ^ prot
) &
5923 if (prot
& PROT_WRITE
) {
5938 } else if (amp
!= NULL
)
5939 anon_array_exit(&cookie
);
5941 VPP_SETPROT(svp
, prot
);
5945 ANON_LOCK_EXIT(&
->a_rwlock
);
5948 * Did we terminate prematurely? If so, simply unload
5949 * the translations to the things we've updated so far.
5953 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5956 len
= (svp
- &svd
->vpage
[seg_page(seg
, addr
)]) *
5958 ASSERT(seg
->s_szc
== 0 || IS_P2ALIGNED(len
, pgsz
));
5960 hat_unload(seg
->s_as
->a_hat
, addr
,
5962 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5967 if (svd
->vpage
== NULL
) {
5968 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5972 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
5973 for (svp
= &svd
->vpage
[seg_page(seg
, addr
)]; svp
< evp
; svp
++) {
5974 VPP_SETPROT(svp
, prot
);
5979 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5983 if (((prot
& PROT_WRITE
) != 0 &&
5984 (svd
->vp
!= NULL
|| svd
->type
== MAP_PRIVATE
)) ||
5985 (prot
& ~PROT_USER
) == PROT_NONE
) {
5987 * Either private or shared data with write access (in
5988 * which case we need to throw out all former translations
5989 * so that we get the right translations set up on fault
5990 * and we don't allow write access to any copy-on-write pages
5991 * that might be around or to prevent write access to pages
5992 * representing holes in a file), or we don't have permission
5993 * to access the memory at all (in which case we have to
5994 * unload any current translations that might exist).
5996 hat_unload(seg
->s_as
->a_hat
, addr
, len
, HAT_UNLOAD
);
5999 * A shared mapping or a private mapping in which write
6000 * protection is going to be denied - just change all the
6001 * protections over the range of addresses in question.
6002 * segvn does not support any other attributes other
6003 * than prot so we can use hat_chgattr.
6005 hat_chgattr(seg
->s_as
->a_hat
, addr
, len
, prot
);
6008 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6014 * segvn_setpagesize is called via segop_setpagesize from as_setpagesize,
6015 * to determine if the seg is capable of mapping the requested szc.
6018 segvn_setpagesize(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t szc
)
6020 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6021 struct segvn_data
*nsvd
;
6022 struct anon_map
*amp
= svd
->amp
;
6024 caddr_t eaddr
= addr
+ len
, a
;
6025 size_t pgsz
= page_get_pagesize(szc
);
6026 pgcnt_t pgcnt
= page_get_pagecnt(szc
);
6028 uoff_t off
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
6030 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
6031 ASSERT(addr
>= seg
->s_base
&& eaddr
<= seg
->s_base
+ seg
->s_size
);
6033 if (seg
->s_szc
== szc
|| segvn_lpg_disable
!= 0) {
6038 * addr should always be pgsz aligned but eaddr may be misaligned if
6039 * it's at the end of the segment.
6041 * XXX we should assert this condition since as_setpagesize() logic
6044 if (!IS_P2ALIGNED(addr
, pgsz
) ||
6045 (!IS_P2ALIGNED(eaddr
, pgsz
) &&
6046 eaddr
!= seg
->s_base
+ seg
->s_size
)) {
6048 segvn_setpgsz_align_err
++;
6052 if (amp
!= NULL
&& svd
->type
== MAP_SHARED
) {
6053 ulong_t an_idx
= svd
->anon_index
+ seg_page(seg
, addr
);
6054 if (!IS_P2ALIGNED(an_idx
, pgcnt
)) {
6056 segvn_setpgsz_anon_align_err
++;
6061 if ((svd
->flags
& MAP_NORESERVE
) || seg
->s_as
== &kas
||
6062 szc
> segvn_maxpgszc
) {
6066 /* paranoid check */
6067 if (svd
->vp
!= NULL
&&
6068 (IS_SWAPFSVP(svd
->vp
) || VN_ISKAS(svd
->vp
))) {
6072 if (seg
->s_szc
== 0 && svd
->vp
!= NULL
&&
6073 map_addr_vacalign_check(addr
, off
)) {
6078 * Check that protections are the same within new page
6081 if (svd
->pageprot
) {
6082 for (a
= addr
; a
< eaddr
; a
+= pgsz
) {
6083 if ((a
+ pgsz
) > eaddr
) {
6084 if (!sameprot(seg
, a
, eaddr
- a
)) {
6088 if (!sameprot(seg
, a
, pgsz
)) {
6096 * Since we are changing page size we first have to flush
6097 * the cache. This makes sure all the pagelock calls have
6098 * to recheck protections.
6100 if (svd
->softlockcnt
> 0) {
6101 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6104 * If this is shared segment non 0 softlockcnt
6105 * means locked pages are still in use.
6107 if (svd
->type
== MAP_SHARED
) {
6112 * Since we do have the segvn writers lock nobody can fill
6113 * the cache with entries belonging to this seg during
6114 * the purge. The flush either succeeds or we still have
6118 if (svd
->softlockcnt
> 0) {
6123 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
6124 ASSERT(svd
->amp
== NULL
);
6125 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6126 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
6128 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
6129 } else if (svd
->tr_state
== SEGVN_TR_INIT
) {
6130 svd
->tr_state
= SEGVN_TR_OFF
;
6131 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
6132 ASSERT(svd
->amp
!= NULL
);
6133 segvn_textunrepl(seg
, 1);
6134 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
6139 * Operation for sub range of existing segment.
6141 if (addr
!= seg
->s_base
|| eaddr
!= (seg
->s_base
+ seg
->s_size
)) {
6142 if (szc
< seg
->s_szc
) {
6143 VM_STAT_ADD(segvnvmstats
.demoterange
[2]);
6144 err
= segvn_demote_range(seg
, addr
, len
, SDR_RANGE
, 0);
6148 if (err
== ENOMEM
) {
6153 if (addr
!= seg
->s_base
) {
6154 nseg
= segvn_split_seg(seg
, addr
);
6155 if (eaddr
!= (nseg
->s_base
+ nseg
->s_size
)) {
6156 /* eaddr is szc aligned */
6157 (void) segvn_split_seg(nseg
, eaddr
);
6161 if (eaddr
!= (seg
->s_base
+ seg
->s_size
)) {
6162 /* eaddr is szc aligned */
6163 (void) segvn_split_seg(seg
, eaddr
);
6169 * Break any low level sharing and reset seg->s_szc to 0.
6171 if ((err
= segvn_clrszc(seg
)) != 0) {
6172 if (err
== ENOMEM
) {
6177 ASSERT(seg
->s_szc
== 0);
6180 * If the end of the current segment is not pgsz aligned
6181 * then attempt to concatenate with the next segment.
6183 if (!IS_P2ALIGNED(eaddr
, pgsz
)) {
6184 nseg
= AS_SEGNEXT(seg
->s_as
, seg
);
6185 if (nseg
== NULL
|| nseg
== seg
|| eaddr
!= nseg
->s_base
) {
6188 if (nseg
->s_ops
!= &segvn_ops
) {
6191 nsvd
= (struct segvn_data
*)nseg
->s_data
;
6192 if (nsvd
->softlockcnt
> 0) {
6194 * If this is shared segment non 0 softlockcnt
6195 * means locked pages are still in use.
6197 if (nsvd
->type
== MAP_SHARED
) {
6201 if (nsvd
->softlockcnt
> 0) {
6205 err
= segvn_clrszc(nseg
);
6206 if (err
== ENOMEM
) {
6212 ASSERT(nsvd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6213 err
= segvn_concat(seg
, nseg
, 1);
6224 * May need to re-align anon array to
6228 if (!IS_P2ALIGNED(svd
->anon_index
, pgcnt
)) {
6229 struct anon_hdr
*nahp
;
6231 ASSERT(svd
->type
== MAP_PRIVATE
);
6233 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
6234 ASSERT(amp
->refcnt
== 1);
6235 nahp
= anon_create(btop(amp
->size
), ANON_NOSLEEP
);
6237 ANON_LOCK_EXIT(&
->a_rwlock
);
6240 if (anon_copy_ptr(amp
->ahp
, svd
->anon_index
,
6241 nahp
, 0, btop(seg
->s_size
), ANON_NOSLEEP
)) {
6242 anon_release(nahp
, btop(amp
->size
));
6243 ANON_LOCK_EXIT(&
->a_rwlock
);
6246 anon_release(amp
->ahp
, btop(amp
->size
));
6248 svd
->anon_index
= 0;
6249 ANON_LOCK_EXIT(&
->a_rwlock
);
6252 if (svd
->vp
!= NULL
&& szc
!= 0) {
6254 uoff_t eoffpage
= svd
->offset
;
6255 va
.va_mask
= AT_SIZE
;
6256 eoffpage
+= seg
->s_size
;
6257 eoffpage
= btopr(eoffpage
);
6258 if (fop_getattr(svd
->vp
, &va
, 0, svd
->cred
, NULL
) != 0) {
6259 segvn_setpgsz_getattr_err
++;
6262 if (btopr(va
.va_size
) < eoffpage
) {
6263 segvn_setpgsz_eof_err
++;
6268 * anon_fill_cow_holes() may call fop_getpage().
6269 * don't take anon map lock here to avoid holding it
6270 * across fop_getpage() calls that may call back into
6271 * segvn for klsutering checks. We don't really need
6272 * anon map lock here since it's a private segment and
6273 * we hold as level lock as writers.
6275 if ((err
= anon_fill_cow_holes(seg
, seg
->s_base
,
6276 amp
->ahp
, svd
->anon_index
, svd
->vp
, svd
->offset
,
6277 seg
->s_size
, szc
, svd
->prot
, svd
->vpage
,
6282 segvn_setvnode_mpss(svd
->vp
);
6286 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
6287 if (svd
->type
== MAP_PRIVATE
) {
6289 } else if (szc
> amp
->a_szc
) {
6292 ANON_LOCK_EXIT(&
->a_rwlock
);
6301 segvn_clrszc(struct seg
*seg
)
6303 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6304 struct anon_map
*amp
= svd
->amp
;
6308 caddr_t a
= seg
->s_base
;
6309 caddr_t ea
= a
+ seg
->s_size
;
6310 ulong_t an_idx
= svd
->anon_index
;
6311 vnode_t
*vp
= svd
->vp
;
6312 struct vpage
*vpage
= svd
->vpage
;
6313 page_t
*anon_pl
[1 + 1], *pp
;
6314 struct anon
*ap
, *oldap
;
6315 uint_t prot
= svd
->prot
, vpprot
;
6318 ASSERT(AS_WRITE_HELD(seg
->s_as
) ||
6319 SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
6320 ASSERT(svd
->softlockcnt
== 0);
6322 if (vp
== NULL
&& amp
== NULL
) {
6323 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6328 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
6329 ASSERT(svd
->amp
== NULL
);
6330 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6331 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
6333 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
6334 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
6335 ASSERT(svd
->amp
!= NULL
);
6336 segvn_textunrepl(seg
, 1);
6337 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
6340 if (svd
->tr_state
!= SEGVN_TR_OFF
) {
6341 ASSERT(svd
->tr_state
== SEGVN_TR_INIT
);
6342 svd
->tr_state
= SEGVN_TR_OFF
;
6346 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6347 * unload argument is 0 when we are freeing the segment
6348 * and unload was already done.
6350 hat_unload(seg
->s_as
->a_hat
, seg
->s_base
, seg
->s_size
,
6354 if (amp
== NULL
|| svd
->type
== MAP_SHARED
) {
6359 pgsz
= page_get_pagesize(seg
->s_szc
);
6363 * XXX anon rwlock is not really needed because this is a
6364 * private segment and we are writers.
6366 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
6368 for (; a
< ea
; a
+= pgsz
, an_idx
+= pages
) {
6369 if ((oldap
= anon_get_ptr(amp
->ahp
, an_idx
)) != NULL
) {
6370 ASSERT(vpage
!= NULL
|| svd
->pageprot
== 0);
6371 if (vpage
!= NULL
) {
6372 ASSERT(sameprot(seg
, a
, pgsz
));
6373 prot
= VPP_PROT(vpage
);
6374 pageflag
= VPP_ISPPLOCK(vpage
) ? LOCK_PAGE
: 0;
6376 if (seg
->s_szc
!= 0) {
6377 ASSERT(vp
== NULL
|| anon_pages(amp
->ahp
,
6378 an_idx
, pages
) == pages
);
6379 if ((err
= anon_map_demotepages(amp
, an_idx
,
6380 seg
, a
, prot
, vpage
, svd
->cred
)) != 0) {
6384 if (oldap
->an_refcnt
== 1) {
6387 if ((err
= anon_getpage(&oldap
, &vpprot
,
6388 anon_pl
, PAGESIZE
, seg
, a
, S_READ
,
6392 if ((pp
= anon_private(&ap
, seg
, a
, prot
,
6393 anon_pl
[0], pageflag
, svd
->cred
)) == NULL
) {
6398 (void) anon_set_ptr(amp
->ahp
, an_idx
, ap
,
6403 vpage
= (vpage
== NULL
) ? NULL
: vpage
+ pages
;
6409 ANON_LOCK_EXIT(&
->a_rwlock
);
6421 pgcnt_t pgcnt
= page_get_pagecnt(seg
->s_szc
);
6422 size_t ppasize
= (pgcnt
+ 1) * sizeof (page_t
*);
6424 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6425 struct anon_map
*amp
= svd
->amp
;
6426 struct vpage
*evp
= svp
+ pgcnt
;
6427 caddr_t addr
= ((uintptr_t)(svp
- svd
->vpage
) << PAGESHIFT
)
6430 struct vnode
*vp
= svd
->vp
;
6435 int anon
= (amp
!= NULL
) ? 1 : 0;
6437 ASSERT(svd
->type
== MAP_PRIVATE
);
6438 ASSERT(svd
->vpage
!= NULL
);
6439 ASSERT(seg
->s_szc
!= 0);
6440 ASSERT(IS_P2ALIGNED(pgcnt
, pgcnt
));
6441 ASSERT(amp
== NULL
|| IS_P2ALIGNED(anon_idx
, pgcnt
));
6442 ASSERT(sameprot(seg
, addr
, pgcnt
<< PAGESHIFT
));
6444 if (VPP_PROT(svp
) == prot
)
6446 if (!((VPP_PROT(svp
) ^ prot
) & PROT_WRITE
))
6449 ppa
= kmem_alloc(ppasize
, KM_SLEEP
);
6450 if (anon
&& vp
!= NULL
) {
6451 if (anon_get_ptr(amp
->ahp
, anon_idx
) == NULL
) {
6453 ASSERT(!anon_pages(amp
->ahp
, anon_idx
, pgcnt
));
6456 anon_pages(amp
->ahp
, anon_idx
, pgcnt
) == pgcnt
);
6459 for (*ppa
= NULL
, pg_idx
= 0; svp
< evp
; svp
++, anon_idx
++) {
6460 if (!VPP_ISPPLOCK(svp
))
6463 ap
= anon_get_ptr(amp
->ahp
, anon_idx
);
6465 panic("segvn_claim_pages: no anon slot");
6467 swap_xlate(ap
, &vp
, &aoff
);
6471 if ((pp
= page_lookup(vp
,
6472 (uoff_t
)off
, SE_SHARED
)) == NULL
) {
6473 panic("segvn_claim_pages: no page");
6479 if (ppa
[0] == NULL
) {
6480 kmem_free(ppa
, ppasize
);
6484 ASSERT(pg_idx
<= pgcnt
);
6488 /* Find each large page within ppa, and adjust its claim */
6490 /* Does ppa cover a single large page? */
6491 if (ppa
[0]->p_szc
== seg
->s_szc
) {
6492 if (prot
& PROT_WRITE
)
6493 err
= page_addclaim_pages(ppa
);
6495 err
= page_subclaim_pages(ppa
);
6497 for (i
= 0; ppa
[i
]; i
+= pgcnt
) {
6498 ASSERT(IS_P2ALIGNED(page_pptonum(ppa
[i
]), pgcnt
));
6499 if (prot
& PROT_WRITE
)
6500 err
= page_addclaim_pages(&ppa
[i
]);
6502 err
= page_subclaim_pages(&ppa
[i
]);
6508 for (i
= 0; i
< pg_idx
; i
++) {
6509 ASSERT(ppa
[i
] != NULL
);
6510 page_unlock(ppa
[i
]);
6513 kmem_free(ppa
, ppasize
);
6518 * Returns right (upper address) segment if split occurred.
6519 * If the address is equal to the beginning or end of its segment it returns
6520 * the current segment.
6523 segvn_split_seg(struct seg
*seg
, caddr_t addr
)
6525 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6528 struct segvn_data
*nsvd
;
6530 ASSERT(AS_WRITE_HELD(seg
->s_as
));
6531 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6533 ASSERT(addr
>= seg
->s_base
);
6534 ASSERT(addr
<= seg
->s_base
+ seg
->s_size
);
6535 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6537 if (addr
== seg
->s_base
|| addr
== seg
->s_base
+ seg
->s_size
)
6540 nsize
= seg
->s_base
+ seg
->s_size
- addr
;
6541 seg
->s_size
= addr
- seg
->s_base
;
6542 nseg
= seg_alloc(seg
->s_as
, addr
, nsize
);
6543 ASSERT(nseg
!= NULL
);
6544 nseg
->s_ops
= seg
->s_ops
;
6545 nsvd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
6546 nseg
->s_data
= (void *)nsvd
;
6547 nseg
->s_szc
= seg
->s_szc
;
6549 ASSERT(nsvd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6551 rw_init(&nsvd
->lock
, NULL
, RW_DEFAULT
, NULL
);
6553 if (nsvd
->vp
!= NULL
) {
6555 nsvd
->offset
= svd
->offset
+
6556 (uintptr_t)(nseg
->s_base
- seg
->s_base
);
6557 if (nsvd
->type
== MAP_SHARED
)
6558 lgrp_shm_policy_init(NULL
, nsvd
->vp
);
6561 * The offset for an anonymous segment has no signifigance in
6562 * terms of an offset into a file. If we were to use the above
6563 * calculation instead, the structures read out of
6564 * /proc/<pid>/xmap would be more difficult to decipher since
6565 * it would be unclear whether two seemingly contiguous
6566 * prxmap_t structures represented different segments or a
6567 * single segment that had been split up into multiple prxmap_t
6568 * structures (e.g. if some part of the segment had not yet
6574 ASSERT(svd
->softlockcnt
== 0);
6575 ASSERT(svd
->softlockcnt_sbase
== 0);
6576 ASSERT(svd
->softlockcnt_send
== 0);
6579 if (svd
->vpage
!= NULL
) {
6580 size_t bytes
= vpgtob(seg_pages(seg
));
6581 size_t nbytes
= vpgtob(seg_pages(nseg
));
6582 struct vpage
*ovpage
= svd
->vpage
;
6584 svd
->vpage
= kmem_alloc(bytes
, KM_SLEEP
);
6585 bcopy(ovpage
, svd
->vpage
, bytes
);
6586 nsvd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
6587 bcopy(ovpage
+ seg_pages(seg
), nsvd
->vpage
, nbytes
);
6588 kmem_free(ovpage
, bytes
+ nbytes
);
6590 if (svd
->amp
!= NULL
&& svd
->type
== MAP_PRIVATE
) {
6591 struct anon_map
*oamp
= svd
->amp
, *namp
;
6592 struct anon_hdr
*nahp
;
6594 ANON_LOCK_ENTER(&oamp
->a_rwlock
, RW_WRITER
);
6595 ASSERT(oamp
->refcnt
== 1);
6596 nahp
= anon_create(btop(seg
->s_size
), ANON_SLEEP
);
6597 (void) anon_copy_ptr(oamp
->ahp
, svd
->anon_index
,
6598 nahp
, 0, btop(seg
->s_size
), ANON_SLEEP
);
6600 namp
= anonmap_alloc(nseg
->s_size
, 0, ANON_SLEEP
);
6601 namp
->a_szc
= nseg
->s_szc
;
6602 (void) anon_copy_ptr(oamp
->ahp
,
6603 svd
->anon_index
+ btop(seg
->s_size
),
6604 namp
->ahp
, 0, btop(nseg
->s_size
), ANON_SLEEP
);
6605 anon_release(oamp
->ahp
, btop(oamp
->size
));
6607 oamp
->size
= seg
->s_size
;
6608 svd
->anon_index
= 0;
6610 nsvd
->anon_index
= 0;
6611 ANON_LOCK_EXIT(&oamp
->a_rwlock
);
6612 } else if (svd
->amp
!= NULL
) {
6613 pgcnt_t pgcnt
= page_get_pagecnt(seg
->s_szc
);
6614 ASSERT(svd
->amp
== nsvd
->amp
);
6615 ASSERT(seg
->s_szc
<= svd
->amp
->a_szc
);
6616 nsvd
->anon_index
= svd
->anon_index
+ seg_pages(seg
);
6617 ASSERT(IS_P2ALIGNED(nsvd
->anon_index
, pgcnt
));
6618 ANON_LOCK_ENTER(&svd
->amp
->a_rwlock
, RW_WRITER
);
6620 ANON_LOCK_EXIT(&svd
->amp
->a_rwlock
);
6624 * Split the amount of swap reserved.
6628 * For MAP_NORESERVE, only allocate swap reserve for pages
6629 * being used. Other segments get enough to cover whole
6632 if (svd
->flags
& MAP_NORESERVE
) {
6636 oswresv
= svd
->swresv
;
6637 svd
->swresv
= ptob(anon_pages(svd
->amp
->ahp
,
6638 svd
->anon_index
, btop(seg
->s_size
)));
6639 nsvd
->swresv
= ptob(anon_pages(nsvd
->amp
->ahp
,
6640 nsvd
->anon_index
, btop(nseg
->s_size
)));
6641 ASSERT(oswresv
>= (svd
->swresv
+ nsvd
->swresv
));
6643 if (svd
->pageswap
) {
6644 svd
->swresv
= segvn_count_swap_by_vpages(seg
);
6645 ASSERT(nsvd
->swresv
>= svd
->swresv
);
6646 nsvd
->swresv
-= svd
->swresv
;
6648 ASSERT(svd
->swresv
== seg
->s_size
+
6650 svd
->swresv
= seg
->s_size
;
6651 nsvd
->swresv
= nseg
->s_size
;
6660 * called on memory operations (unmap, setprot, setpagesize) for a subset
6661 * of a large page segment to either demote the memory range (SDR_RANGE)
6662 * or the ends (SDR_END) by addr/len.
6664 * returns 0 on success. returns errno, including ENOMEM, on failure.
6674 caddr_t eaddr
= addr
+ len
;
6675 caddr_t lpgaddr
, lpgeaddr
;
6677 struct seg
*badseg1
= NULL
;
6678 struct seg
*badseg2
= NULL
;
6680 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6682 uint_t szc
= seg
->s_szc
;
6685 ASSERT(AS_WRITE_HELD(seg
->s_as
));
6686 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6688 pgsz
= page_get_pagesize(szc
);
6689 ASSERT(seg
->s_base
!= addr
|| seg
->s_size
!= len
);
6690 ASSERT(addr
>= seg
->s_base
&& eaddr
<= seg
->s_base
+ seg
->s_size
);
6691 ASSERT(svd
->softlockcnt
== 0);
6692 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6693 ASSERT(szcvec
== 0 || (flag
== SDR_END
&& svd
->type
== MAP_SHARED
));
6695 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
6696 ASSERT(flag
== SDR_RANGE
|| eaddr
< lpgeaddr
|| addr
> lpgaddr
);
6697 if (flag
== SDR_RANGE
) {
6698 /* demote entire range */
6699 badseg1
= nseg
= segvn_split_seg(seg
, lpgaddr
);
6700 (void) segvn_split_seg(nseg
, lpgeaddr
);
6701 ASSERT(badseg1
->s_base
== lpgaddr
);
6702 ASSERT(badseg1
->s_size
== lpgeaddr
- lpgaddr
);
6703 } else if (addr
!= lpgaddr
) {
6704 ASSERT(flag
== SDR_END
);
6705 badseg1
= nseg
= segvn_split_seg(seg
, lpgaddr
);
6706 if (eaddr
!= lpgeaddr
&& eaddr
> lpgaddr
+ pgsz
&&
6707 eaddr
< lpgaddr
+ 2 * pgsz
) {
6708 (void) segvn_split_seg(nseg
, lpgeaddr
);
6709 ASSERT(badseg1
->s_base
== lpgaddr
);
6710 ASSERT(badseg1
->s_size
== 2 * pgsz
);
6712 nseg
= segvn_split_seg(nseg
, lpgaddr
+ pgsz
);
6713 ASSERT(badseg1
->s_base
== lpgaddr
);
6714 ASSERT(badseg1
->s_size
== pgsz
);
6715 if (eaddr
!= lpgeaddr
&& eaddr
> lpgaddr
+ pgsz
) {
6716 ASSERT(lpgeaddr
- lpgaddr
> 2 * pgsz
);
6717 nseg
= segvn_split_seg(nseg
, lpgeaddr
- pgsz
);
6719 (void) segvn_split_seg(nseg
, lpgeaddr
);
6720 ASSERT(badseg2
->s_base
== lpgeaddr
- pgsz
);
6721 ASSERT(badseg2
->s_size
== pgsz
);
6725 ASSERT(flag
== SDR_END
);
6726 ASSERT(eaddr
< lpgeaddr
);
6727 badseg1
= nseg
= segvn_split_seg(seg
, lpgeaddr
- pgsz
);
6728 (void) segvn_split_seg(nseg
, lpgeaddr
);
6729 ASSERT(badseg1
->s_base
== lpgeaddr
- pgsz
);
6730 ASSERT(badseg1
->s_size
== pgsz
);
6733 ASSERT(badseg1
!= NULL
);
6734 ASSERT(badseg1
->s_szc
== szc
);
6735 ASSERT(flag
== SDR_RANGE
|| badseg1
->s_size
== pgsz
||
6736 badseg1
->s_size
== 2 * pgsz
);
6737 ASSERT(sameprot(badseg1
, badseg1
->s_base
, pgsz
));
6738 ASSERT(badseg1
->s_size
== pgsz
||
6739 sameprot(badseg1
, badseg1
->s_base
+ pgsz
, pgsz
));
6740 if (err
= segvn_clrszc(badseg1
)) {
6743 ASSERT(badseg1
->s_szc
== 0);
6745 if (szc
> 1 && (tszcvec
= P2PHASE(szcvec
, 1 << szc
)) > 1) {
6746 uint_t tszc
= highbit(tszcvec
) - 1;
6747 caddr_t ta
= MAX(addr
, badseg1
->s_base
);
6749 size_t tpgsz
= page_get_pagesize(tszc
);
6751 ASSERT(svd
->type
== MAP_SHARED
);
6752 ASSERT(flag
== SDR_END
);
6753 ASSERT(tszc
< szc
&& tszc
> 0);
6755 if (eaddr
> badseg1
->s_base
+ badseg1
->s_size
) {
6756 te
= badseg1
->s_base
+ badseg1
->s_size
;
6762 badseg1
->s_szc
= tszc
;
6763 if (!IS_P2ALIGNED(ta
, tpgsz
) || !IS_P2ALIGNED(te
, tpgsz
)) {
6764 if (badseg2
!= NULL
) {
6765 err
= segvn_demote_range(badseg1
, ta
, te
- ta
,
6771 return (segvn_demote_range(badseg1
, ta
,
6772 te
- ta
, SDR_END
, tszcvec
));
6777 if (badseg2
== NULL
)
6779 ASSERT(badseg2
->s_szc
== szc
);
6780 ASSERT(badseg2
->s_size
== pgsz
);
6781 ASSERT(sameprot(badseg2
, badseg2
->s_base
, badseg2
->s_size
));
6782 if (err
= segvn_clrszc(badseg2
)) {
6785 ASSERT(badseg2
->s_szc
== 0);
6787 if (szc
> 1 && (tszcvec
= P2PHASE(szcvec
, 1 << szc
)) > 1) {
6788 uint_t tszc
= highbit(tszcvec
) - 1;
6789 size_t tpgsz
= page_get_pagesize(tszc
);
6791 ASSERT(svd
->type
== MAP_SHARED
);
6792 ASSERT(flag
== SDR_END
);
6793 ASSERT(tszc
< szc
&& tszc
> 0);
6794 ASSERT(badseg2
->s_base
> addr
);
6795 ASSERT(eaddr
> badseg2
->s_base
);
6796 ASSERT(eaddr
< badseg2
->s_base
+ badseg2
->s_size
);
6798 badseg2
->s_szc
= tszc
;
6799 if (!IS_P2ALIGNED(eaddr
, tpgsz
)) {
6800 return (segvn_demote_range(badseg2
, badseg2
->s_base
,
6801 eaddr
- badseg2
->s_base
, SDR_END
, tszcvec
));
6809 segvn_checkprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t prot
)
6811 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6812 struct vpage
*vp
, *evp
;
6814 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6816 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
6818 * If segment protection can be used, simply check against them.
6820 if (svd
->pageprot
== 0) {
6823 err
= ((svd
->prot
& prot
) != prot
) ? EACCES
: 0;
6824 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6829 * Have to check down to the vpage level.
6831 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
6832 for (vp
= &svd
->vpage
[seg_page(seg
, addr
)]; vp
< evp
; vp
++) {
6833 if ((VPP_PROT(vp
) & prot
) != prot
) {
6834 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6838 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6843 segvn_getprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t
*protv
)
6845 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6846 size_t pgno
= seg_page(seg
, addr
+ len
) - seg_page(seg
, addr
) + 1;
6848 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6851 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
6852 if (svd
->pageprot
== 0) {
6854 protv
[--pgno
] = svd
->prot
;
6855 } while (pgno
!= 0);
6857 size_t pgoff
= seg_page(seg
, addr
);
6861 protv
[pgno
] = VPP_PROT(&svd
->vpage
[pgno
+pgoff
]);
6862 } while (pgno
!= 0);
6864 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6870 segvn_getoffset(struct seg
*seg
, caddr_t addr
)
6872 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6874 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6876 return (svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
));
6881 segvn_gettype(struct seg
*seg
, caddr_t addr
)
6883 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6885 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6887 return (svd
->type
| (svd
->flags
& (MAP_NORESERVE
| MAP_TEXT
|
6893 segvn_getvp(struct seg
*seg
, caddr_t addr
, struct vnode
**vpp
)
6895 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6897 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6904 * Check to see if it makes sense to do kluster/read ahead to
6905 * addr + delta relative to the mapping at addr. We assume here
6906 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6908 * For segvn, we currently "approve" of the action if we are
6909 * still in the segment and it maps from the same vp/off,
6910 * or if the advice stored in segvn_data or vpages allows it.
6911 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6914 segvn_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
)
6916 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6917 struct anon
*oap
, *ap
;
6920 struct vnode
*vp1
, *vp2
;
6922 struct anon_map
*amp
;
6924 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6925 ASSERT(AS_WRITE_HELD(seg
->s_as
) ||
6926 SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
6928 if (addr
+ delta
< seg
->s_base
||
6929 addr
+ delta
>= (seg
->s_base
+ seg
->s_size
))
6930 return (-1); /* exceeded segment bounds */
6932 pd
= delta
/ (ssize_t
)PAGESIZE
; /* divide to preserve sign bit */
6933 page
= seg_page(seg
, addr
);
6936 * Check to see if either of the pages addr or addr + delta
6937 * have advice set that prevents klustering (if MADV_RANDOM advice
6938 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
6941 if (svd
->advice
== MADV_RANDOM
||
6942 svd
->advice
== MADV_SEQUENTIAL
&& delta
< 0)
6944 else if (svd
->pageadvice
&& svd
->vpage
) {
6945 struct vpage
*bvpp
, *evpp
;
6947 bvpp
= &svd
->vpage
[page
];
6948 evpp
= &svd
->vpage
[page
+ pd
];
6949 if (VPP_ADVICE(bvpp
) == MADV_RANDOM
||
6950 VPP_ADVICE(evpp
) == MADV_SEQUENTIAL
&& delta
< 0)
6952 if (VPP_ADVICE(bvpp
) != VPP_ADVICE(evpp
) &&
6953 VPP_ADVICE(evpp
) == MADV_RANDOM
)
6957 if (svd
->type
== MAP_SHARED
)
6958 return (0); /* shared mapping - all ok */
6960 if ((amp
= svd
->amp
) == NULL
)
6961 return (0); /* off original vnode */
6963 page
+= svd
->anon_index
;
6965 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
6967 oap
= anon_get_ptr(amp
->ahp
, page
);
6968 ap
= anon_get_ptr(amp
->ahp
, page
+ pd
);
6970 ANON_LOCK_EXIT(&
->a_rwlock
);
6972 if ((oap
== NULL
&& ap
!= NULL
) || (oap
!= NULL
&& ap
== NULL
)) {
6973 return (-1); /* one with and one without an anon */
6976 if (oap
== NULL
) { /* implies that ap == NULL */
6977 return (0); /* off original vnode */
6981 * Now we know we have two anon pointers - check to
6982 * see if they happen to be properly allocated.
6986 * XXX We cheat here and don't lock the anon slots. We can't because
6987 * we may have been called from the anon layer which might already
6988 * have locked them. We are holding a refcnt on the slots so they
6989 * can't disappear. The worst that will happen is we'll get the wrong
6990 * names (vp, off) for the slots and make a poor klustering decision.
6992 swap_xlate(ap
, &vp1
, &off1
);
6993 swap_xlate(oap
, &vp2
, &off2
);
6996 if (!fop_cmp(vp1
, vp2
, NULL
) || off1
- off2
!= delta
)
7002 * Synchronize primary storage cache with real object in virtual memory.
7004 * XXX - Anonymous pages should not be sync'ed out at all.
7007 segvn_sync(struct seg
*seg
, caddr_t addr
, size_t len
, int attr
, uint_t flags
)
7009 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7022 struct anon_map
*amp
;
7024 anon_sync_obj_t cookie
;
7026 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7028 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
7030 if (svd
->softlockcnt
> 0) {
7032 * If this is shared segment non 0 softlockcnt
7033 * means locked pages are still in use.
7035 if (svd
->type
== MAP_SHARED
) {
7036 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7041 * flush all pages from seg cache
7042 * otherwise we may deadlock in swap_putpage
7043 * for B_INVAL page (4175402).
7045 * Even if we grab segvn WRITER's lock
7046 * here, there might be another thread which could've
7047 * successfully performed lookup/insert just before
7048 * we acquired the lock here. So, grabbing either
7049 * lock here is of not much use. Until we devise
7050 * a strategy at upper layers to solve the
7051 * synchronization issues completely, we expect
7052 * applications to handle this appropriately.
7055 if (svd
->softlockcnt
> 0) {
7056 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7059 } else if (svd
->type
== MAP_SHARED
&& svd
->amp
!= NULL
&&
7060 svd
->amp
->a_softlockcnt
> 0) {
7062 * Try to purge this amp's entries from pcache. It will
7063 * succeed only if other segments that share the amp have no
7064 * outstanding softlock's.
7067 if (svd
->amp
->a_softlockcnt
> 0 || svd
->softlockcnt
> 0) {
7068 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7074 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
7075 bflags
= ((flags
& MS_ASYNC
) ? B_ASYNC
: 0) |
7076 ((flags
& MS_INVALIDATE
) ? B_INVAL
: 0);
7079 pageprot
= attr
& ~(SHARED
|PRIVATE
);
7080 segtype
= (attr
& SHARED
) ? MAP_SHARED
: MAP_PRIVATE
;
7083 * We are done if the segment types don't match
7084 * or if we have segment level protections and
7087 if (svd
->type
!= segtype
) {
7088 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7092 if (svd
->prot
!= pageprot
) {
7093 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7098 vpp
= &svd
->vpage
[seg_page(seg
, addr
)];
7100 } else if (svd
->vp
&& svd
->amp
== NULL
&&
7101 (flags
& MS_INVALIDATE
) == 0) {
7104 * No attributes, no anonymous pages and MS_INVALIDATE flag
7105 * is not on, just use one big request.
7107 err
= fop_putpage(svd
->vp
, (offset_t
)offset
, len
,
7108 bflags
, svd
->cred
, NULL
);
7109 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7113 if ((amp
= svd
->amp
) != NULL
)
7114 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
7116 for (eaddr
= addr
+ len
; addr
< eaddr
; addr
+= PAGESIZE
) {
7119 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7120 anon_array_enter(amp
, anon_index
, &cookie
);
7121 ap
= anon_get_ptr(amp
->ahp
, anon_index
++);
7123 swap_xlate(ap
, &vp
, &off
);
7128 anon_array_exit(&cookie
);
7129 ANON_LOCK_EXIT(&
->a_rwlock
);
7136 if (vp
== NULL
) /* untouched zfod page */
7141 prot
= VPP_PROT(vpp
);
7144 if (prot
!= pageprot
) {
7150 * See if any of these pages are locked -- if so, then we
7151 * will have to truncate an invalidate request at the first
7152 * locked one. We don't need the page_struct_lock to test
7153 * as this is only advisory; even if we acquire it someone
7154 * might race in and lock the page after we unlock and before
7155 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7157 if (flags
& MS_INVALIDATE
) {
7158 if ((pp
= page_lookup(vp
, off
, SE_SHARED
)) != NULL
) {
7159 if (pp
->p_lckcnt
!= 0 || pp
->p_cowcnt
!= 0) {
7161 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7164 if (ap
!= NULL
&& pp
->p_szc
!= 0 &&
7165 page_tryupgrade(pp
)) {
7166 if (pp
->p_lckcnt
== 0 &&
7167 pp
->p_cowcnt
== 0) {
7169 * swapfs VN_DISPOSE() won't
7170 * invalidate large pages.
7171 * Attempt to demote.
7172 * XXX can't help it if it
7173 * fails. But for swapfs
7174 * pages it is no big deal.
7176 (void) page_try_demote_pages(
7182 } else if (svd
->type
== MAP_SHARED
&& amp
!= NULL
) {
7184 * Avoid writing out to disk ISM's large pages
7185 * because segspt_free_pages() relies on NULL an_pvp
7186 * of anon slots of such pages.
7189 ASSERT(svd
->vp
== NULL
);
7191 * swapfs uses page_lookup_nowait if not freeing or
7192 * invalidating and skips a page if
7193 * page_lookup_nowait returns NULL.
7195 pp
= page_lookup_nowait(vp
, off
, SE_SHARED
);
7199 if (pp
->p_szc
!= 0) {
7205 * Note ISM pages are created large so (vp, off)'s
7206 * page cannot suddenly become large after we unlock
7212 * XXX - Should ultimately try to kluster
7213 * calls to fop_putpage() for performance.
7216 err
= fop_putpage(vp
, (offset_t
)off
, PAGESIZE
,
7217 (bflags
| (IS_SWAPFSVP(vp
) ? B_PAGE_NOWAIT
: 0)),
7224 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7229 * Determine if we have data corresponding to pages in the
7230 * primary storage virtual memory cache (i.e., "in core").
7233 segvn_incore(struct seg
*seg
, caddr_t addr
, size_t len
, char *vec
)
7235 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7236 struct vnode
*vp
, *avp
;
7237 uoff_t offset
, aoffset
;
7243 struct anon_map
*amp
; /* XXX - for locknest */
7246 anon_sync_obj_t cookie
;
7248 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7250 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
7251 if (svd
->amp
== NULL
&& svd
->vp
== NULL
) {
7252 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7253 bzero(vec
, btopr(len
));
7254 return (len
); /* no anonymous pages created yet */
7257 p
= seg_page(seg
, addr
);
7258 ep
= seg_page(seg
, addr
+ len
);
7259 start
= svd
->vp
? SEG_PAGE_VNODEBACKED
: 0;
7262 for (; p
< ep
; p
++, addr
+= PAGESIZE
) {
7263 vpp
= (svd
->vpage
) ? &svd
->vpage
[p
]: NULL
;
7267 /* Grab the vnode/offset for the anon slot */
7269 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7270 anon_array_enter(amp
, svd
->anon_index
+ p
, &cookie
);
7271 ap
= anon_get_ptr(amp
->ahp
, svd
->anon_index
+ p
);
7273 swap_xlate(ap
, &avp
, &aoffset
);
7275 anon_array_exit(&cookie
);
7276 ANON_LOCK_EXIT(&
->a_rwlock
);
7278 if ((avp
!= NULL
) && page_exists(avp
, aoffset
)) {
7279 /* A page exists for the anon slot */
7280 ret
|= SEG_PAGE_INCORE
;
7283 * If page is mapped and writable
7286 if ((hat_getattr(seg
->s_as
->a_hat
, addr
,
7287 &attr
) != -1) && (attr
& PROT_WRITE
)) {
7288 ret
|= SEG_PAGE_ANON
;
7291 * Don't get page_struct lock for lckcnt and cowcnt,
7292 * since this is purely advisory.
7294 if ((pp
= page_lookup_nowait(avp
, aoffset
,
7295 SE_SHARED
)) != NULL
) {
7297 ret
|= SEG_PAGE_SOFTLOCK
;
7299 ret
|= SEG_PAGE_HASCOW
;
7304 /* Gather vnode statistics */
7306 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
7310 * Try to obtain a "shared" lock on the page
7311 * without blocking. If this fails, determine
7312 * if the page is in memory.
7314 pp
= page_lookup_nowait(vp
, offset
, SE_SHARED
);
7315 if ((pp
== NULL
) && (page_exists(vp
, offset
))) {
7316 /* Page is incore, and is named */
7317 ret
|= (SEG_PAGE_INCORE
| SEG_PAGE_VNODE
);
7320 * Don't get page_struct lock for lckcnt and cowcnt,
7321 * since this is purely advisory.
7324 ret
|= (SEG_PAGE_INCORE
| SEG_PAGE_VNODE
);
7326 ret
|= SEG_PAGE_SOFTLOCK
;
7328 ret
|= SEG_PAGE_HASCOW
;
7333 /* Gather virtual page information */
7335 if (VPP_ISPPLOCK(vpp
))
7336 ret
|= SEG_PAGE_LOCKED
;
7342 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7347 * Statement for p_cowcnts/p_lckcnts.
7349 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7350 * irrespective of the following factors or anything else:
7352 * (1) anon slots are populated or not
7353 * (2) cow is broken or not
7354 * (3) refcnt on ap is 1 or greater than 1
7356 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7360 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7362 * if vpage has PROT_WRITE
7363 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7365 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7367 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7368 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7370 * We may also break COW if softlocking on read access in the physio case.
7371 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7372 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7373 * vpage doesn't have PROT_WRITE.
7376 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7378 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7379 * increment p_lckcnt by calling page_subclaim() which takes care of
7380 * availrmem accounting and p_lckcnt overflow.
7382 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7383 * increment p_cowcnt by calling page_addclaim() which takes care of
7384 * availrmem availability and p_cowcnt overflow.
7388 * Lock down (or unlock) pages mapped by this segment.
7390 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7391 * At fault time they will be relocated into larger pages.
7394 segvn_lockop(struct seg
*seg
, caddr_t addr
, size_t len
,
7395 int attr
, int op
, ulong_t
*lockmap
, size_t pos
)
7397 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7408 struct anon_map
*amp
;
7411 anon_sync_obj_t cookie
;
7412 struct kshmid
*sp
= NULL
;
7413 struct proc
*p
= curproc
;
7414 kproject_t
*proj
= NULL
;
7416 size_t locked_bytes
= 0;
7417 size_t unlocked_bytes
= 0;
7421 * Hold write lock on address space because may split or concatenate
7424 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7427 * If this is a shm, use shm's project and zone, else use
7428 * project and zone of calling process
7431 /* Determine if this segment backs a sysV shm */
7432 if (svd
->amp
!= NULL
&& svd
->amp
->a_sp
!= NULL
) {
7433 ASSERT(svd
->type
== MAP_SHARED
);
7434 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
7435 sp
= svd
->amp
->a_sp
;
7436 proj
= sp
->shm_perm
.ipc_proj
;
7440 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
7442 pageprot
= attr
& ~(SHARED
|PRIVATE
);
7443 segtype
= attr
& SHARED
? MAP_SHARED
: MAP_PRIVATE
;
7446 * We are done if the segment types don't match
7447 * or if we have segment level protections and
7450 if (svd
->type
!= segtype
) {
7451 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7454 if (svd
->pageprot
== 0 && svd
->prot
!= pageprot
) {
7455 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7460 if (op
== MC_LOCK
) {
7461 if (svd
->tr_state
== SEGVN_TR_INIT
) {
7462 svd
->tr_state
= SEGVN_TR_OFF
;
7463 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
7464 ASSERT(svd
->amp
!= NULL
);
7465 segvn_textunrepl(seg
, 0);
7466 ASSERT(svd
->amp
== NULL
&&
7467 svd
->tr_state
== SEGVN_TR_OFF
);
7472 * If we're locking, then we must create a vpage structure if
7473 * none exists. If we're unlocking, then check to see if there
7474 * is a vpage -- if not, then we could not have locked anything.
7477 if ((vpp
= svd
->vpage
) == NULL
) {
7478 if (op
== MC_LOCK
) {
7480 if (svd
->vpage
== NULL
) {
7481 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7485 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7491 * The anonymous data vector (i.e., previously
7492 * unreferenced mapping to swap space) can be allocated
7493 * by lazily testing for its existence.
7495 if (op
== MC_LOCK
&& svd
->amp
== NULL
&& svd
->vp
== NULL
) {
7496 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
7497 svd
->amp
= anonmap_alloc(seg
->s_size
, 0, ANON_SLEEP
);
7498 svd
->amp
->a_szc
= seg
->s_szc
;
7501 if ((amp
= svd
->amp
) != NULL
) {
7502 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
7505 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
7506 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
7509 mutex_enter(&sp
->shm_mlock
);
7511 /* determine number of unlocked bytes in range for lock operation */
7512 if (op
== MC_LOCK
) {
7515 for (vpp
= &svd
->vpage
[seg_page(seg
, addr
)]; vpp
< evp
;
7517 if (!VPP_ISPPLOCK(vpp
))
7518 unlocked_bytes
+= PAGESIZE
;
7521 ulong_t i_idx
, i_edx
;
7522 anon_sync_obj_t i_cookie
;
7527 /* Only count sysV pages once for locked memory */
7528 i_edx
= svd
->anon_index
+ seg_page(seg
, addr
+ len
);
7529 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7530 for (i_idx
= anon_index
; i_idx
< i_edx
; i_idx
++) {
7531 anon_array_enter(amp
, i_idx
, &i_cookie
);
7532 i_ap
= anon_get_ptr(amp
->ahp
, i_idx
);
7534 unlocked_bytes
+= PAGESIZE
;
7535 anon_array_exit(&i_cookie
);
7538 swap_xlate(i_ap
, &i_vp
, &i_off
);
7539 anon_array_exit(&i_cookie
);
7540 pp
= page_lookup(i_vp
, i_off
, SE_SHARED
);
7542 unlocked_bytes
+= PAGESIZE
;
7544 } else if (pp
->p_lckcnt
== 0)
7545 unlocked_bytes
+= PAGESIZE
;
7548 ANON_LOCK_EXIT(&
->a_rwlock
);
7551 mutex_enter(&p
->p_lock
);
7552 err
= rctl_incr_locked_mem(p
, proj
, unlocked_bytes
,
7554 mutex_exit(&p
->p_lock
);
7558 mutex_exit(&sp
->shm_mlock
);
7559 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7564 * Loop over all pages in the range. Process if we're locking and
7565 * page has not already been locked in this mapping; or if we're
7566 * unlocking and the page has been locked.
7568 for (vpp
= &svd
->vpage
[seg_page(seg
, addr
)]; vpp
< evp
;
7569 vpp
++, pos
++, addr
+= PAGESIZE
, offset
+= PAGESIZE
, anon_index
++) {
7570 if ((attr
== 0 || VPP_PROT(vpp
) == pageprot
) &&
7571 ((op
== MC_LOCK
&& !VPP_ISPPLOCK(vpp
)) ||
7572 (op
== MC_UNLOCK
&& VPP_ISPPLOCK(vpp
)))) {
7575 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7577 * If this isn't a MAP_NORESERVE segment and
7578 * we're locking, allocate anon slots if they
7579 * don't exist. The page is brought in later on.
7581 if (op
== MC_LOCK
&& svd
->vp
== NULL
&&
7582 ((svd
->flags
& MAP_NORESERVE
) == 0) &&
7584 ((ap
= anon_get_ptr(amp
->ahp
, anon_index
))
7586 anon_array_enter(amp
, anon_index
, &cookie
);
7588 if ((ap
= anon_get_ptr(amp
->ahp
,
7589 anon_index
)) == NULL
) {
7590 pp
= anon_zero(seg
, addr
, &ap
,
7593 anon_array_exit(&cookie
);
7594 ANON_LOCK_EXIT(&
->a_rwlock
);
7598 ASSERT(anon_get_ptr(amp
->ahp
,
7599 anon_index
) == NULL
);
7600 (void) anon_set_ptr(amp
->ahp
,
7601 anon_index
, ap
, ANON_SLEEP
);
7604 anon_array_exit(&cookie
);
7608 * Get name for page, accounting for
7609 * existence of private copy.
7613 anon_array_enter(amp
, anon_index
, &cookie
);
7614 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
7616 swap_xlate(ap
, &vp
, &off
);
7618 if (svd
->vp
== NULL
&&
7619 (svd
->flags
& MAP_NORESERVE
)) {
7620 anon_array_exit(&cookie
);
7621 ANON_LOCK_EXIT(&
->a_rwlock
);
7627 if (op
!= MC_LOCK
|| ap
== NULL
) {
7628 anon_array_exit(&cookie
);
7629 ANON_LOCK_EXIT(&
->a_rwlock
);
7637 * Get page frame. It's ok if the page is
7638 * not available when we're unlocking, as this
7639 * may simply mean that a page we locked got
7640 * truncated out of existence after we locked it.
7642 * Invoke fop_getpage() to obtain the page struct
7643 * since we may need to read it from disk if its
7647 pp
= page_lookup(vp
, off
, SE_SHARED
);
7654 error
= fop_getpage(vp
, (offset_t
)off
, PAGESIZE
,
7655 (uint_t
*)NULL
, pl
, PAGESIZE
, seg
, addr
,
7656 S_OTHER
, svd
->cred
, NULL
);
7658 if (error
&& ap
!= NULL
) {
7659 anon_array_exit(&cookie
);
7660 ANON_LOCK_EXIT(&
->a_rwlock
);
7664 * If the error is EDEADLK then we must bounce
7665 * up and drop all vm subsystem locks and then
7666 * retry the operation later
7667 * This behavior is a temporary measure because
7668 * ufs/sds logging is badly designed and will
7669 * deadlock if we don't allow this bounce to
7670 * happen. The real solution is to re-design
7671 * the logging code to work properly. See bug
7672 * 4125102 for details of the problem.
7674 if (error
== EDEADLK
) {
7679 * Quit if we fail to fault in the page. Treat
7680 * the failure as an error, unless the addr
7681 * is mapped beyond the end of a file.
7683 if (error
&& svd
->vp
) {
7684 va
.va_mask
= AT_SIZE
;
7685 if (fop_getattr(svd
->vp
, &va
, 0,
7686 svd
->cred
, NULL
) != 0) {
7690 if (btopr(va
.va_size
) >=
7706 * See Statement at the beginning of this routine.
7708 * claim is always set if MAP_PRIVATE and PROT_WRITE
7709 * irrespective of following factors:
7711 * (1) anon slots are populated or not
7712 * (2) cow is broken or not
7713 * (3) refcnt on ap is 1 or greater than 1
7715 * See 4140683 for details
7717 claim
= ((VPP_PROT(vpp
) & PROT_WRITE
) &&
7718 (svd
->type
== MAP_PRIVATE
));
7721 * Perform page-level operation appropriate to
7722 * operation. If locking, undo the SOFTLOCK
7723 * performed to bring the page into memory
7724 * after setting the lock. If unlocking,
7725 * and no page was found, account for the claim
7728 if (op
== MC_LOCK
) {
7729 int ret
= 1; /* Assume success */
7731 ASSERT(!VPP_ISPPLOCK(vpp
));
7733 ret
= page_pp_lock(pp
, claim
, 0);
7735 if (ap
->an_pvp
!= NULL
) {
7736 anon_swap_free(ap
, pp
);
7738 anon_array_exit(&cookie
);
7739 ANON_LOCK_EXIT(&
->a_rwlock
);
7742 /* locking page failed */
7749 if (pp
->p_lckcnt
== 1)
7750 locked_bytes
+= PAGESIZE
;
7752 locked_bytes
+= PAGESIZE
;
7754 if (lockmap
!= NULL
)
7755 BT_SET(lockmap
, pos
);
7759 ASSERT(VPP_ISPPLOCK(vpp
));
7761 /* sysV pages should be locked */
7762 ASSERT(sp
== NULL
|| pp
->p_lckcnt
> 0);
7763 page_pp_unlock(pp
, claim
, 0);
7765 if (pp
->p_lckcnt
== 0)
7769 unlocked_bytes
+= PAGESIZE
;
7773 unlocked_bytes
+= PAGESIZE
;
7780 if (op
== MC_LOCK
) {
7781 /* Credit back bytes that did not get locked */
7782 if ((unlocked_bytes
- locked_bytes
) > 0) {
7784 mutex_enter(&p
->p_lock
);
7785 rctl_decr_locked_mem(p
, proj
,
7786 (unlocked_bytes
- locked_bytes
), chargeproc
);
7788 mutex_exit(&p
->p_lock
);
7792 /* Account bytes that were unlocked */
7793 if (unlocked_bytes
> 0) {
7795 mutex_enter(&p
->p_lock
);
7796 rctl_decr_locked_mem(p
, proj
, unlocked_bytes
,
7799 mutex_exit(&p
->p_lock
);
7803 mutex_exit(&sp
->shm_mlock
);
7804 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7810 * Set advice from user for specified pages
7811 * There are 10 types of advice:
7812 * MADV_NORMAL - Normal (default) behavior (whatever that is)
7813 * MADV_RANDOM - Random page references
7814 * do not allow readahead or 'klustering'
7815 * MADV_SEQUENTIAL - Sequential page references
7816 * Pages previous to the one currently being
7817 * accessed (determined by fault) are 'not needed'
7818 * and are freed immediately
7819 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
7820 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
7821 * MADV_FREE - Contents can be discarded
7822 * MADV_ACCESS_DEFAULT- Default access
7823 * MADV_ACCESS_LWP - Next LWP will access heavily
7824 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
7825 * MADV_PURGE - Contents will be immediately discarded
7828 segvn_advise(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t behav
)
7830 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7834 struct anon_map
*amp
;
7837 lgrp_mem_policy_t policy
;
7841 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7844 * In case of MADV_FREE/MADV_PURGE, we won't be modifying any segment
7845 * private data structures; so, we only need to grab READER's lock
7847 if (behav
!= MADV_FREE
&& behav
!= MADV_PURGE
) {
7848 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
7849 if (svd
->tr_state
!= SEGVN_TR_OFF
) {
7850 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7854 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
7858 * Large pages are assumed to be only turned on when accesses to the
7859 * segment's address range have spatial and temporal locality. That
7860 * justifies ignoring MADV_SEQUENTIAL for large page segments.
7861 * Also, ignore advice affecting lgroup memory allocation
7862 * if don't need to do lgroup optimizations on this system
7865 if ((behav
== MADV_SEQUENTIAL
&&
7866 (seg
->s_szc
!= 0 || HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
))) ||
7867 (!lgrp_optimizations() && (behav
== MADV_ACCESS_DEFAULT
||
7868 behav
== MADV_ACCESS_LWP
|| behav
== MADV_ACCESS_MANY
))) {
7869 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7873 if (behav
== MADV_SEQUENTIAL
|| behav
== MADV_ACCESS_DEFAULT
||
7874 behav
== MADV_ACCESS_LWP
|| behav
== MADV_ACCESS_MANY
) {
7876 * Since we are going to unload hat mappings
7877 * we first have to flush the cache. Otherwise
7878 * this might lead to system panic if another
7879 * thread is doing physio on the range whose
7880 * mappings are unloaded by madvise(3C).
7882 if (svd
->softlockcnt
> 0) {
7884 * If this is shared segment non 0 softlockcnt
7885 * means locked pages are still in use.
7887 if (svd
->type
== MAP_SHARED
) {
7888 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7892 * Since we do have the segvn writers lock
7893 * nobody can fill the cache with entries
7894 * belonging to this seg during the purge.
7895 * The flush either succeeds or we still
7896 * have pending I/Os. In the later case,
7897 * madvise(3C) fails.
7900 if (svd
->softlockcnt
> 0) {
7902 * Since madvise(3C) is advisory and
7903 * it's not part of UNIX98, madvise(3C)
7904 * failure here doesn't cause any hardship.
7905 * Note that we don't block in "as" layer.
7907 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7910 } else if (svd
->type
== MAP_SHARED
&& svd
->amp
!= NULL
&&
7911 svd
->amp
->a_softlockcnt
> 0) {
7913 * Try to purge this amp's entries from pcache. It
7914 * will succeed only if other segments that share the
7915 * amp have no outstanding softlock's.
7923 if (behav
== MADV_FREE
|| behav
== MADV_PURGE
) {
7926 if (behav
== MADV_FREE
&& (vp
!= NULL
|| amp
== NULL
)) {
7928 * MADV_FREE is not supported for segments with an
7929 * underlying object; if anonmap is NULL, anon slots
7930 * are not yet populated and there is nothing for us
7931 * to do. As MADV_FREE is advisory, we don't return an
7932 * error in either case.
7934 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7940 * If we're here with a NULL anonmap, it's because we
7941 * are doing a MADV_PURGE. We have nothing to do, but
7942 * because MADV_PURGE isn't merely advisory, we return
7943 * an error in this case.
7945 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7951 page
= seg_page(seg
, addr
);
7952 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7953 err
= anon_disclaim(amp
,
7954 svd
->anon_index
+ page
, len
, behav
, &purged
);
7956 if (purged
!= 0 && (svd
->flags
& MAP_NORESERVE
)) {
7958 * If we purged pages on a MAP_NORESERVE mapping, we
7959 * need to be sure to now unreserve our reserved swap.
7960 * (We use the atomic operations to manipulate our
7961 * segment and address space counters because we only
7962 * have the corresponding locks held as reader, not
7965 ssize_t bytes
= ptob(purged
);
7967 anon_unresv_zone(bytes
, seg
->s_as
->a_proc
->p_zone
);
7968 atomic_add_long(&svd
->swresv
, -bytes
);
7969 atomic_add_long(&seg
->s_as
->a_resvsize
, -bytes
);
7972 ANON_LOCK_EXIT(&
->a_rwlock
);
7973 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7976 * MADV_PURGE and MADV_FREE differ in their return semantics:
7977 * because MADV_PURGE is designed to be bug-for-bug compatible
7978 * with its clumsy Linux forebear, it will fail where MADV_FREE
7981 return (behav
== MADV_PURGE
? err
: 0);
7985 * If advice is to be applied to entire segment,
7986 * use advice field in seg_data structure
7987 * otherwise use appropriate vpage entry.
7989 if ((addr
== seg
->s_base
) && (len
== seg
->s_size
)) {
7991 case MADV_ACCESS_LWP
:
7992 case MADV_ACCESS_MANY
:
7993 case MADV_ACCESS_DEFAULT
:
7995 * Set memory allocation policy for this segment
7997 policy
= lgrp_madv_to_policy(behav
, len
, svd
->type
);
7998 if (svd
->type
== MAP_SHARED
)
7999 already_set
= lgrp_shm_policy_set(policy
, amp
,
8000 svd
->anon_index
, vp
, svd
->offset
, len
);
8003 * For private memory, need writers lock on
8004 * address space because the segment may be
8005 * split or concatenated when changing policy
8007 if (AS_READ_HELD(seg
->s_as
)) {
8008 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8012 already_set
= lgrp_privm_policy_set(policy
,
8013 &svd
->policy_info
, len
);
8017 * If policy set already and it shouldn't be reapplied,
8018 * don't do anything.
8021 !LGRP_MEM_POLICY_REAPPLICABLE(policy
))
8025 * Mark any existing pages in given range for
8028 page_mark_migrate(seg
, addr
, len
, amp
, svd
->anon_index
,
8029 vp
, svd
->offset
, 1);
8032 * If same policy set already or this is a shared
8033 * memory segment, don't need to try to concatenate
8034 * segment with adjacent ones.
8036 if (already_set
|| svd
->type
== MAP_SHARED
)
8040 * Try to concatenate this segment with previous
8041 * one and next one, since we changed policy for
8042 * this one and it may be compatible with adjacent
8045 prev
= AS_SEGPREV(seg
->s_as
, seg
);
8046 next
= AS_SEGNEXT(seg
->s_as
, seg
);
8048 if (next
&& next
->s_ops
== &segvn_ops
&&
8049 addr
+ len
== next
->s_base
)
8050 (void) segvn_concat(seg
, next
, 1);
8052 if (prev
&& prev
->s_ops
== &segvn_ops
&&
8053 addr
== prev
->s_base
+ prev
->s_size
) {
8055 * Drop lock for private data of current
8056 * segment before concatenating (deleting) it
8057 * and return IE_REATTACH to tell as_ctl() that
8058 * current segment has changed
8060 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8061 if (!segvn_concat(prev
, seg
, 1))
8068 case MADV_SEQUENTIAL
:
8070 * unloading mapping guarantees
8071 * detection in segvn_fault
8073 ASSERT(seg
->s_szc
== 0);
8074 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
8075 hat_unload(seg
->s_as
->a_hat
, addr
, len
,
8080 svd
->advice
= (uchar_t
)behav
;
8081 svd
->pageadvice
= 0;
8083 case MADV_WILLNEED
: /* handled in memcntl */
8084 case MADV_DONTNEED
: /* handled in memcntl */
8085 case MADV_FREE
: /* handled above */
8086 case MADV_PURGE
: /* handled above */
8093 struct seg
*new_seg
;
8094 struct segvn_data
*new_svd
;
8098 page
= seg_page(seg
, addr
);
8101 if (svd
->vpage
== NULL
) {
8102 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8107 struct vpage
*bvpp
, *evpp
;
8109 case MADV_ACCESS_LWP
:
8110 case MADV_ACCESS_MANY
:
8111 case MADV_ACCESS_DEFAULT
:
8113 * Set memory allocation policy for portion of this
8118 * Align address and length of advice to page
8119 * boundaries for large pages
8121 if (seg
->s_szc
!= 0) {
8124 pgsz
= page_get_pagesize(seg
->s_szc
);
8125 addr
= (caddr_t
)P2ALIGN((uintptr_t)addr
, pgsz
);
8126 len
= P2ROUNDUP(len
, pgsz
);
8130 * Check to see whether policy is set already
8132 policy
= lgrp_madv_to_policy(behav
, len
, svd
->type
);
8134 anon_index
= svd
->anon_index
+ page
;
8135 off
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
8137 if (svd
->type
== MAP_SHARED
)
8138 already_set
= lgrp_shm_policy_set(policy
, amp
,
8139 anon_index
, vp
, off
, len
);
8142 (policy
== svd
->policy_info
.mem_policy
);
8145 * If policy set already and it shouldn't be reapplied,
8146 * don't do anything.
8149 !LGRP_MEM_POLICY_REAPPLICABLE(policy
))
8153 * For private memory, need writers lock on
8154 * address space because the segment may be
8155 * split or concatenated when changing policy
8157 if (svd
->type
== MAP_PRIVATE
&&
8158 AS_READ_HELD(seg
->s_as
)) {
8159 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8164 * Mark any existing pages in given range for
8167 page_mark_migrate(seg
, addr
, len
, amp
, svd
->anon_index
,
8168 vp
, svd
->offset
, 1);
8171 * Don't need to try to split or concatenate
8172 * segments, since policy is same or this is a shared
8175 if (already_set
|| svd
->type
== MAP_SHARED
)
8178 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
8179 ASSERT(svd
->amp
== NULL
);
8180 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
8181 ASSERT(svd
->softlockcnt
== 0);
8182 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
8184 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
8188 * Split off new segment if advice only applies to a
8189 * portion of existing segment starting in middle
8193 oldeaddr
= seg
->s_base
+ seg
->s_size
;
8194 if (addr
> seg
->s_base
) {
8196 * Must flush I/O page cache
8197 * before splitting segment
8199 if (svd
->softlockcnt
> 0)
8203 * Split segment and return IE_REATTACH to tell
8204 * as_ctl() that current segment changed
8206 new_seg
= segvn_split_seg(seg
, addr
);
8207 new_svd
= (struct segvn_data
*)new_seg
->s_data
;
8211 * If new segment ends where old one
8212 * did, try to concatenate the new
8213 * segment with next one.
8215 if (eaddr
== oldeaddr
) {
8217 * Set policy for new segment
8219 (void) lgrp_privm_policy_set(policy
,
8220 &new_svd
->policy_info
,
8223 next
= AS_SEGNEXT(new_seg
->s_as
,
8227 next
->s_ops
== &segvn_ops
&&
8228 eaddr
== next
->s_base
)
8229 (void) segvn_concat(new_seg
,
8235 * Split off end of existing segment if advice only
8236 * applies to a portion of segment ending before
8237 * end of the existing segment
8239 if (eaddr
< oldeaddr
) {
8241 * Must flush I/O page cache
8242 * before splitting segment
8244 if (svd
->softlockcnt
> 0)
8248 * If beginning of old segment was already
8249 * split off, use new segment to split end off
8252 if (new_seg
!= NULL
&& new_seg
!= seg
) {
8256 (void) segvn_split_seg(new_seg
, eaddr
);
8259 * Set policy for new segment
8261 (void) lgrp_privm_policy_set(policy
,
8262 &new_svd
->policy_info
,
8266 * Split segment and return IE_REATTACH
8267 * to tell as_ctl() that current
8270 (void) segvn_split_seg(seg
, eaddr
);
8273 (void) lgrp_privm_policy_set(policy
,
8274 &svd
->policy_info
, seg
->s_size
);
8277 * If new segment starts where old one
8278 * did, try to concatenate it with
8281 if (addr
== seg
->s_base
) {
8282 prev
= AS_SEGPREV(seg
->s_as
,
8286 * Drop lock for private data
8287 * of current segment before
8288 * concatenating (deleting) it
8293 addr
== prev
->s_base
+
8298 (void) segvn_concat(
8306 case MADV_SEQUENTIAL
:
8307 ASSERT(seg
->s_szc
== 0);
8308 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
8309 hat_unload(seg
->s_as
->a_hat
, addr
, len
, HAT_UNLOAD
);
8313 bvpp
= &svd
->vpage
[page
];
8314 evpp
= &svd
->vpage
[page
+ (len
>> PAGESHIFT
)];
8315 for (; bvpp
< evpp
; bvpp
++)
8316 VPP_SETADVICE(bvpp
, behav
);
8317 svd
->advice
= MADV_NORMAL
;
8319 case MADV_WILLNEED
: /* handled in memcntl */
8320 case MADV_DONTNEED
: /* handled in memcntl */
8321 case MADV_FREE
: /* handled above */
8322 case MADV_PURGE
: /* handled above */
8328 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8333 * There is one kind of inheritance that can be specified for pages:
8335 * SEGP_INH_ZERO - Pages should be zeroed in the child
8338 segvn_inherit(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t behav
)
8340 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8341 struct vpage
*bvpp
, *evpp
;
8345 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
8347 /* Can't support something we don't know about */
8348 if (behav
!= SEGP_INH_ZERO
)
8351 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
8354 * This must be a straightforward anonymous segment that is mapped
8355 * privately and is not backed by a vnode.
8357 if (svd
->tr_state
!= SEGVN_TR_OFF
||
8358 svd
->type
!= MAP_PRIVATE
||
8365 * If the entire segment has been marked as inherit zero, then no reason
8366 * to do anything else.
8368 if (svd
->svn_inz
== SEGVN_INZ_ALL
) {
8374 * If this applies to the entire segment, simply mark it and we're done.
8376 if ((addr
== seg
->s_base
) && (len
== seg
->s_size
)) {
8377 svd
->svn_inz
= SEGVN_INZ_ALL
;
8383 * We've been asked to mark a subset of this segment as inherit zero,
8384 * therefore we need to mainpulate its vpages.
8386 if (svd
->vpage
== NULL
) {
8388 if (svd
->vpage
== NULL
) {
8394 svd
->svn_inz
= SEGVN_INZ_VPP
;
8395 page
= seg_page(seg
, addr
);
8396 bvpp
= &svd
->vpage
[page
];
8397 evpp
= &svd
->vpage
[page
+ (len
>> PAGESHIFT
)];
8398 for (; bvpp
< evpp
; bvpp
++)
8399 VPP_SETINHZERO(bvpp
);
8403 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8408 * Create a vpage structure for this seg.
8411 segvn_vpage(struct seg
*seg
)
8413 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8414 struct vpage
*vp
, *evp
;
8415 static pgcnt_t page_limit
= 0;
8417 ASSERT(SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
8420 * If no vpage structure exists, allocate one. Copy the protections
8421 * and the advice from the segment itself to the individual pages.
8423 if (svd
->vpage
== NULL
) {
8425 * Start by calculating the number of pages we must allocate to
8426 * track the per-page vpage structs needs for this entire
8427 * segment. If we know now that it will require more than our
8428 * heuristic for the maximum amount of kmem we can consume then
8429 * fail. We do this here, instead of trying to detect this deep
8430 * in page_resv and propagating the error up, since the entire
8431 * memory allocation stack is not amenable to passing this
8432 * back. Instead, it wants to keep trying.
8434 * As a heuristic we set a page limit of 5/8s of total_pages
8435 * for this allocation. We use shifts so that no floating
8436 * point conversion takes place and only need to do the
8439 ulong_t mem_needed
= seg_pages(seg
) * sizeof (struct vpage
);
8440 pgcnt_t npages
= mem_needed
>> PAGESHIFT
;
8442 if (page_limit
== 0)
8443 page_limit
= (total_pages
>> 1) + (total_pages
>> 3);
8445 if (npages
> page_limit
)
8448 svd
->pageadvice
= 1;
8449 svd
->vpage
= kmem_zalloc(mem_needed
, KM_SLEEP
);
8450 evp
= &svd
->vpage
[seg_page(seg
, seg
->s_base
+ seg
->s_size
)];
8451 for (vp
= svd
->vpage
; vp
< evp
; vp
++) {
8452 VPP_SETPROT(vp
, svd
->prot
);
8453 VPP_SETADVICE(vp
, svd
->advice
);
8459 * Dump the pages belonging to this segvn segment.
8462 segvn_dump(struct seg
*seg
)
8464 struct segvn_data
*svd
;
8466 struct anon_map
*amp
;
8471 pgcnt_t page
, npages
;
8474 npages
= seg_pages(seg
);
8475 svd
= (struct segvn_data
*)seg
->s_data
;
8477 off
= offset
= svd
->offset
;
8480 if ((amp
= svd
->amp
) != NULL
) {
8481 anon_index
= svd
->anon_index
;
8482 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
8485 for (page
= 0; page
< npages
; page
++, offset
+= PAGESIZE
) {
8489 if (amp
&& (ap
= anon_get_ptr(svd
->amp
->ahp
, anon_index
++))) {
8490 swap_xlate_nopanic(ap
, &vp
, &off
);
8497 * If pp == NULL, the page either does not exist
8498 * or is exclusively locked. So determine if it
8499 * exists before searching for it.
8502 if ((pp
= page_lookup_nowait(vp
, off
, SE_SHARED
)))
8505 pp
= page_exists(vp
, off
);
8508 pfn
= page_pptonum(pp
);
8509 dump_addpage(seg
->s_as
, addr
, pfn
);
8514 dump_timeleft
= dump_timeout
;
8518 ANON_LOCK_EXIT(&
->a_rwlock
);
8522 static uint32_t segvn_pglock_mtbf
= 0;
8525 #define PCACHE_SHWLIST ((page_t *)-2)
8526 #define NOPCACHE_SHWLIST ((page_t *)-1)
8529 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8530 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8531 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8532 * the same parts of the segment. Currently shadow list creation is only
8533 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8534 * tagged with segment pointer, starting virtual address and length. This
8535 * approach for MAP_SHARED segments may add many pcache entries for the same
8536 * set of pages and lead to long hash chains that decrease pcache lookup
8537 * performance. To avoid this issue for shared segments shared anon map and
8538 * starting anon index are used for pcache entry tagging. This allows all
8539 * segments to share pcache entries for the same anon range and reduces pcache
8540 * chain's length as well as memory overhead from duplicate shadow lists and
8543 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8544 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8545 * part of softlockcnt accounting is done differently for private and shared
8546 * segments. In private segment case softlock is only incremented when a new
8547 * shadow list is created but not when an existing one is found via
8548 * seg_plookup(). pcache entries have reference count incremented/decremented
8549 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8550 * reference count can be purged (and purging is needed before segment can be
8551 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8552 * decrement softlockcnt. Since in private segment case each of its pcache
8553 * entries only belongs to this segment we can expect that when
8554 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8555 * segment purge will succeed and softlockcnt will drop to 0. In shared
8556 * segment case reference count in pcache entry counts active locks from many
8557 * different segments so we can't expect segment purging to succeed even when
8558 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8559 * segment. To be able to determine when there're no pending pagelocks in
8560 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8561 * but instead softlockcnt is incremented and decremented for every
8562 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8563 * list was created or an existing one was found. When softlockcnt drops to 0
8564 * this segment no longer has any claims for pcached shadow lists and the
8565 * segment can be freed even if there're still active pcache entries
8566 * shared by this segment anon map. Shared segment pcache entries belong to
8567 * anon map and are typically removed when anon map is freed after all
8568 * processes destroy the segments that use this anon map.
8571 segvn_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
, struct page
***ppp
,
8572 enum lock_type type
, enum seg_rw rw
)
8574 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8576 pgcnt_t adjustpages
;
8579 uint_t protchk
= (rw
== S_READ
) ? PROT_READ
: PROT_WRITE
;
8581 struct anon_map
*amp
;
8583 struct page
**pplist
, **pl
, *pp
;
8586 caddr_t lpgaddr
, lpgeaddr
;
8587 anon_sync_obj_t cookie
;
8589 struct anon_map
*pamp
;
8591 seg_preclaim_cbfunc_t preclaim_callback
;
8596 int sftlck_sbase
= 0;
8597 int sftlck_send
= 0;
8600 if (type
== L_PAGELOCK
&& segvn_pglock_mtbf
) {
8601 hrtime_t ts
= gethrtime();
8602 if ((ts
% segvn_pglock_mtbf
) == 0) {
8605 if ((ts
% segvn_pglock_mtbf
) == 1) {
8611 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
8612 ASSERT(type
== L_PAGELOCK
|| type
== L_PAGEUNLOCK
);
8614 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
8617 * for now we only support pagelock to anon memory. We would have to
8618 * check protections for vnode objects and call into the vnode driver.
8619 * That's too much for a fast path. Let the fault entry point handle
8622 if (svd
->vp
!= NULL
) {
8623 if (type
== L_PAGELOCK
) {
8627 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8629 if ((amp
= svd
->amp
) == NULL
) {
8630 if (type
== L_PAGELOCK
) {
8634 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8636 if (rw
!= S_READ
&& rw
!= S_WRITE
) {
8637 if (type
== L_PAGELOCK
) {
8641 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8644 if (seg
->s_szc
!= 0) {
8646 * We are adjusting the pagelock region to the large page size
8647 * boundary because the unlocked part of a large page cannot
8648 * be freed anyway unless all constituent pages of a large
8649 * page are locked. Bigger regions reduce pcache chain length
8650 * and improve lookup performance. The tradeoff is that the
8651 * very first segvn_pagelock() call for a given page is more
8652 * expensive if only 1 page_t is needed for IO. This is only
8653 * an issue if pcache entry doesn't get reused by several
8654 * subsequent calls. We optimize here for the case when pcache
8655 * is heavily used by repeated IOs to the same address range.
8657 * Note segment's page size cannot change while we are holding
8658 * as lock. And then it cannot change while softlockcnt is
8659 * not 0. This will allow us to correctly recalculate large
8660 * page size region for the matching pageunlock/reclaim call
8661 * since as_pageunlock() caller must always match
8662 * as_pagelock() call's addr and len.
8664 * For pageunlock *ppp points to the pointer of page_t that
8665 * corresponds to the real unadjusted start address. Similar
8666 * for pagelock *ppp must point to the pointer of page_t that
8667 * corresponds to the real unadjusted start address.
8669 pgsz
= page_get_pagesize(seg
->s_szc
);
8670 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
8671 adjustpages
= btop((uintptr_t)(addr
- lpgaddr
));
8672 } else if (len
< segvn_pglock_comb_thrshld
) {
8674 lpgeaddr
= addr
+ len
;
8679 * Align the address range of large enough requests to allow
8680 * combining of different shadow lists into 1 to reduce memory
8681 * overhead from potentially overlapping large shadow lists
8682 * (worst case is we have a 1MB IO into buffers with start
8683 * addresses separated by 4K). Alignment is only possible if
8684 * padded chunks have sufficient access permissions. Note
8685 * permissions won't change between L_PAGELOCK and
8686 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8687 * segvn_setprot() to wait until softlockcnt drops to 0. This
8688 * allows us to determine in L_PAGEUNLOCK the same range we
8689 * computed in L_PAGELOCK.
8691 * If alignment is limited by segment ends set
8692 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8693 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8694 * per segment counters. In L_PAGEUNLOCK case decrease
8695 * softlockcnt_sbase/softlockcnt_send counters if
8696 * sftlck_sbase/sftlck_send flags are set. When
8697 * softlockcnt_sbase/softlockcnt_send are non 0
8698 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8699 * won't merge the segments. This restriction combined with
8700 * restriction on segment unmapping and splitting for segments
8701 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8702 * correctly determine the same range that was previously
8703 * locked by matching L_PAGELOCK.
8705 pflags
= SEGP_PSHIFT
| (segvn_pglock_comb_bshift
<< 16);
8707 if (svd
->type
== MAP_PRIVATE
) {
8708 lpgaddr
= (caddr_t
)P2ALIGN((uintptr_t)addr
,
8709 segvn_pglock_comb_balign
);
8710 if (lpgaddr
< seg
->s_base
) {
8711 lpgaddr
= seg
->s_base
;
8715 ulong_t aix
= svd
->anon_index
+ seg_page(seg
, addr
);
8716 ulong_t aaix
= P2ALIGN(aix
, segvn_pglock_comb_palign
);
8717 if (aaix
< svd
->anon_index
) {
8718 lpgaddr
= seg
->s_base
;
8721 lpgaddr
= addr
- ptob(aix
- aaix
);
8722 ASSERT(lpgaddr
>= seg
->s_base
);
8725 if (svd
->pageprot
&& lpgaddr
!= addr
) {
8726 struct vpage
*vp
= &svd
->vpage
[seg_page(seg
, lpgaddr
)];
8727 struct vpage
*evp
= &svd
->vpage
[seg_page(seg
, addr
)];
8729 if ((VPP_PROT(vp
) & protchk
) == 0) {
8739 lpgeaddr
= addr
+ len
;
8741 if (svd
->type
== MAP_PRIVATE
) {
8742 lpgeaddr
= (caddr_t
)P2ROUNDUP(
8743 (uintptr_t)lpgeaddr
,
8744 segvn_pglock_comb_balign
);
8746 ulong_t aix
= svd
->anon_index
+
8747 seg_page(seg
, lpgeaddr
);
8748 ulong_t aaix
= P2ROUNDUP(aix
,
8749 segvn_pglock_comb_palign
);
8753 lpgeaddr
+= ptob(aaix
- aix
);
8756 if (lpgeaddr
== 0 ||
8757 lpgeaddr
> seg
->s_base
+ seg
->s_size
) {
8758 lpgeaddr
= seg
->s_base
+ seg
->s_size
;
8762 if (svd
->pageprot
&& lpgeaddr
!= addr
+ len
) {
8766 vp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
8767 evp
= &svd
->vpage
[seg_page(seg
, lpgeaddr
)];
8770 if ((VPP_PROT(vp
) & protchk
) == 0) {
8776 lpgeaddr
= addr
+ len
;
8779 adjustpages
= btop((uintptr_t)(addr
- lpgaddr
));
8783 * For MAP_SHARED segments we create pcache entries tagged by amp and
8784 * anon index so that we can share pcache entries with other segments
8785 * that map this amp. For private segments pcache entries are tagged
8786 * with segment and virtual address.
8788 if (svd
->type
== MAP_SHARED
) {
8790 paddr
= (caddr_t
)((lpgaddr
- seg
->s_base
) +
8791 ptob(svd
->anon_index
));
8792 preclaim_callback
= shamp_reclaim
;
8796 preclaim_callback
= segvn_reclaim
;
8799 if (type
== L_PAGEUNLOCK
) {
8800 VM_STAT_ADD(segvnvmstats
.pagelock
[0]);
8803 * update hat ref bits for /proc. We need to make sure
8804 * that threads tracing the ref and mod bits of the
8805 * address space get the right data.
8806 * Note: page ref and mod bits are updated at reclaim time
8808 if (seg
->s_as
->a_vbits
) {
8809 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
) {
8810 if (rw
== S_WRITE
) {
8811 hat_setstat(seg
->s_as
, a
,
8812 PAGESIZE
, P_REF
| P_MOD
);
8814 hat_setstat(seg
->s_as
, a
,
8821 * Check the shadow list entry after the last page used in
8822 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
8823 * was not inserted into pcache and is not large page
8824 * adjusted. In this case call reclaim callback directly and
8825 * don't adjust the shadow list start and size for large
8829 if ((*ppp
)[npages
] == NOPCACHE_SHWLIST
) {
8832 ASSERT(svd
->type
== MAP_SHARED
);
8833 ptag
= (void *)pamp
;
8834 paddr
= (caddr_t
)((addr
- seg
->s_base
) +
8835 ptob(svd
->anon_index
));
8840 (*preclaim_callback
)(ptag
, paddr
, len
, *ppp
, rw
, 0);
8842 ASSERT((*ppp
)[npages
] == PCACHE_SHWLIST
||
8843 IS_SWAPFSVP((*ppp
)[npages
]->p_vnode
));
8844 len
= lpgeaddr
- lpgaddr
;
8846 seg_pinactive(seg
, pamp
, paddr
, len
,
8847 *ppp
- adjustpages
, rw
, pflags
, preclaim_callback
);
8851 ASSERT(svd
->type
== MAP_SHARED
);
8852 ASSERT(svd
->softlockcnt
>= npages
);
8853 atomic_add_long((ulong_t
*)&svd
->softlockcnt
, -npages
);
8857 ASSERT(svd
->softlockcnt_sbase
> 0);
8858 atomic_dec_ulong((ulong_t
*)&svd
->softlockcnt_sbase
);
8861 ASSERT(svd
->softlockcnt_send
> 0);
8862 atomic_dec_ulong((ulong_t
*)&svd
->softlockcnt_send
);
8866 * If someone is blocked while unmapping, we purge
8867 * segment page cache and thus reclaim pplist synchronously
8868 * without waiting for seg_pasync_thread. This speeds up
8869 * unmapping in cases where munmap(2) is called, while
8870 * raw async i/o is still in progress or where a thread
8871 * exits on data fault in a multithreaded application.
8873 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
8874 if (svd
->softlockcnt
== 0) {
8875 mutex_enter(&seg
->s_as
->a_contents
);
8876 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
8877 AS_CLRUNMAPWAIT(seg
->s_as
);
8878 cv_broadcast(&seg
->s_as
->a_cv
);
8880 mutex_exit(&seg
->s_as
->a_contents
);
8881 } else if (pamp
== NULL
) {
8883 * softlockcnt is not 0 and this is a
8884 * MAP_PRIVATE segment. Try to purge its
8885 * pcache entries to reduce softlockcnt.
8886 * If it drops to 0 segvn_reclaim()
8887 * will wake up a thread waiting on
8890 * We don't purge MAP_SHARED segments with non
8891 * 0 softlockcnt since IO is still in progress
8892 * for such segments.
8894 ASSERT(svd
->type
== MAP_PRIVATE
);
8898 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8902 /* The L_PAGELOCK case ... */
8904 VM_STAT_ADD(segvnvmstats
.pagelock
[1]);
8907 * For MAP_SHARED segments we have to check protections before
8908 * seg_plookup() since pcache entries may be shared by many segments
8909 * with potentially different page protections.
8912 ASSERT(svd
->type
== MAP_SHARED
);
8913 if (svd
->pageprot
== 0) {
8914 if ((svd
->prot
& protchk
) == 0) {
8920 * check page protections
8931 for (; a
< ea
; a
+= pgsz
) {
8934 ASSERT(seg
->s_szc
== 0 ||
8935 sameprot(seg
, a
, pgsz
));
8936 vp
= &svd
->vpage
[seg_page(seg
, a
)];
8937 if ((VPP_PROT(vp
) & protchk
) == 0) {
8946 * try to find pages in segment page cache
8948 pplist
= seg_plookup(seg
, pamp
, paddr
, lpgeaddr
- lpgaddr
, rw
, pflags
);
8949 if (pplist
!= NULL
) {
8951 npages
= btop((uintptr_t)(lpgeaddr
- lpgaddr
));
8952 ASSERT(svd
->type
== MAP_SHARED
);
8953 atomic_add_long((ulong_t
*)&svd
->softlockcnt
,
8957 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_sbase
);
8960 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_send
);
8962 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8963 *ppp
= pplist
+ adjustpages
;
8968 * For MAP_SHARED segments we already verified above that segment
8969 * protections allow this pagelock operation.
8972 ASSERT(svd
->type
== MAP_PRIVATE
);
8973 if (svd
->pageprot
== 0) {
8974 if ((svd
->prot
& protchk
) == 0) {
8978 if (svd
->prot
& PROT_WRITE
) {
8979 wlen
= lpgeaddr
- lpgaddr
;
8982 ASSERT(rw
== S_READ
);
8987 * check page protections
8989 for (a
= lpgaddr
, wlen
= 0; a
< lpgeaddr
; a
+= pgsz
) {
8992 ASSERT(seg
->s_szc
== 0 ||
8993 sameprot(seg
, a
, pgsz
));
8994 vp
= &svd
->vpage
[seg_page(seg
, a
)];
8995 if ((VPP_PROT(vp
) & protchk
) == 0) {
8999 if (wcont
&& (VPP_PROT(vp
) & PROT_WRITE
)) {
9003 ASSERT(rw
== S_READ
);
9007 ASSERT(rw
== S_READ
|| wlen
== lpgeaddr
- lpgaddr
);
9008 ASSERT(rw
== S_WRITE
|| wlen
<= lpgeaddr
- lpgaddr
);
9012 * Only build large page adjusted shadow list if we expect to insert
9013 * it into pcache. For large enough pages it's a big overhead to
9014 * create a shadow list of the entire large page. But this overhead
9015 * should be amortized over repeated pcache hits on subsequent reuse
9016 * of this shadow list (IO into any range within this shadow list will
9017 * find it in pcache since we large page align the request for pcache
9018 * lookups). pcache performance is improved with bigger shadow lists
9019 * as it reduces the time to pcache the entire big segment and reduces
9020 * pcache chain length.
9022 if (seg_pinsert_check(seg
, pamp
, paddr
,
9023 lpgeaddr
- lpgaddr
, pflags
) == SEGP_SUCCESS
) {
9025 len
= lpgeaddr
- lpgaddr
;
9030 * Since this entry will not be inserted into the pcache, we
9031 * will not do any adjustments to the starting address or
9032 * size of the memory to be locked.
9038 pplist
= kmem_alloc(sizeof (page_t
*) * (npages
+ 1), KM_SLEEP
);
9040 *ppp
= pplist
+ adjustpages
;
9042 * If use_pcache is 0 this shadow list is not large page adjusted.
9043 * Record this info in the last entry of shadow array so that
9044 * L_PAGEUNLOCK can determine if it should large page adjust the
9045 * address range to find the real range that was locked.
9047 pl
[npages
] = use_pcache
? PCACHE_SHWLIST
: NOPCACHE_SHWLIST
;
9049 page
= seg_page(seg
, addr
);
9050 anon_index
= svd
->anon_index
+ page
;
9053 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
9054 ASSERT(amp
->a_szc
>= seg
->s_szc
);
9055 anpgcnt
= page_get_pagecnt(amp
->a_szc
);
9056 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
, anon_index
++) {
9062 * Lock and unlock anon array only once per large page.
9063 * anon_array_enter() locks the root anon slot according to
9064 * a_szc which can't change while anon map is locked. We lock
9065 * anon the first time through this loop and each time we
9066 * reach anon index that corresponds to a root of a large
9069 if (a
== addr
|| P2PHASE(anon_index
, anpgcnt
) == 0) {
9070 ASSERT(anlock
== 0);
9071 anon_array_enter(amp
, anon_index
, &cookie
);
9074 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
9077 * We must never use seg_pcache for COW pages
9078 * because we might end up with original page still
9079 * lying in seg_pcache even after private page is
9080 * created. This leads to data corruption as
9081 * aio_write refers to the page still in cache
9082 * while all other accesses refer to the private
9085 if (ap
== NULL
|| ap
->an_refcnt
!= 1) {
9086 struct vpage
*vpage
;
9092 if (svd
->vpage
!= NULL
) {
9093 vpage
= &svd
->vpage
[seg_page(seg
, a
)];
9098 anon_array_exit(&cookie
);
9101 error
= segvn_faultpage(seg
->s_as
->a_hat
, seg
, a
, 0,
9102 vpage
, &pp
, 0, F_INVAL
, rw
, 1);
9104 error
= fc_decode(error
);
9107 anon_array_enter(amp
, anon_index
, &cookie
);
9109 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
9110 if (ap
== NULL
|| ap
->an_refcnt
!= 1) {
9115 swap_xlate(ap
, &vp
, &off
);
9116 pp
= page_lookup_nowait(vp
, off
, SE_SHARED
);
9121 if (ap
->an_pvp
!= NULL
) {
9122 anon_swap_free(ap
, pp
);
9125 * Unlock anon if this is the last slot in a large page.
9127 if (P2PHASE(anon_index
, anpgcnt
) == anpgcnt
- 1) {
9129 anon_array_exit(&cookie
);
9134 if (anlock
) { /* Ensure the lock is dropped */
9135 anon_array_exit(&cookie
);
9137 ANON_LOCK_EXIT(&
->a_rwlock
);
9139 if (a
>= addr
+ len
) {
9140 atomic_add_long((ulong_t
*)&svd
->softlockcnt
, npages
);
9142 ASSERT(svd
->type
== MAP_SHARED
);
9143 atomic_add_long((ulong_t
*)&pamp
->a_softlockcnt
,
9148 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_sbase
);
9151 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_send
);
9154 (void) seg_pinsert(seg
, pamp
, paddr
, len
, wlen
, pl
,
9155 rw
, pflags
, preclaim_callback
);
9157 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9162 np
= ((uintptr_t)(a
- addr
)) >> PAGESHIFT
;
9163 while (np
> (uint_t
)0) {
9164 ASSERT(PAGE_LOCKED(*pplist
));
9165 page_unlock(*pplist
);
9169 kmem_free(pl
, sizeof (page_t
*) * (npages
+ 1));
9171 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9177 * purge any cached pages in the I/O page cache
9180 segvn_purge(struct seg
*seg
)
9182 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9185 * pcache is only used by pure anon segments.
9187 if (svd
->amp
== NULL
|| svd
->vp
!= NULL
) {
9192 * For MAP_SHARED segments non 0 segment's softlockcnt means
9193 * active IO is still in progress via this segment. So we only
9194 * purge MAP_SHARED segments when their softlockcnt is 0.
9196 if (svd
->type
== MAP_PRIVATE
) {
9197 if (svd
->softlockcnt
) {
9198 seg_ppurge(seg
, NULL
, 0);
9200 } else if (svd
->softlockcnt
== 0 && svd
->amp
->a_softlockcnt
!= 0) {
9201 seg_ppurge(seg
, svd
->amp
, 0);
9206 * If async argument is not 0 we are called from pcache async thread and don't
9212 segvn_reclaim(void *ptag
, caddr_t addr
, size_t len
, struct page
**pplist
,
9213 enum seg_rw rw
, int async
)
9215 struct seg
*seg
= (struct seg
*)ptag
;
9216 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9220 npages
= np
= btop(len
);
9223 ASSERT(svd
->vp
== NULL
&& svd
->amp
!= NULL
);
9224 ASSERT(svd
->softlockcnt
>= npages
);
9225 ASSERT(async
|| AS_LOCK_HELD(seg
->s_as
));
9229 ASSERT(pl
[np
] == NOPCACHE_SHWLIST
|| pl
[np
] == PCACHE_SHWLIST
);
9230 ASSERT(!async
|| pl
[np
] == PCACHE_SHWLIST
);
9232 while (np
> (uint_t
)0) {
9233 if (rw
== S_WRITE
) {
9234 hat_setrefmod(*pplist
);
9236 hat_setref(*pplist
);
9238 page_unlock(*pplist
);
9243 kmem_free(pl
, sizeof (page_t
*) * (npages
+ 1));
9246 * If we are pcache async thread we don't hold AS lock. This means if
9247 * softlockcnt drops to 0 after the decrement below address space may
9248 * get freed. We can't allow it since after softlock derement to 0 we
9249 * still need to access as structure for possible wakeup of unmap
9250 * waiters. To prevent the disappearance of as we take this segment
9251 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9252 * make sure this routine completes before segment is freed.
9254 * The second complication we have to deal with in async case is a
9255 * possibility of missed wake up of unmap wait thread. When we don't
9256 * hold as lock here we may take a_contents lock before unmap wait
9257 * thread that was first to see softlockcnt was still not 0. As a
9258 * result we'll fail to wake up an unmap wait thread. To avoid this
9259 * race we set nounmapwait flag in as structure if we drop softlockcnt
9260 * to 0 when we were called by pcache async thread. unmapwait thread
9261 * will not block if this flag is set.
9264 mutex_enter(&svd
->segfree_syncmtx
);
9267 if (!atomic_add_long_nv((ulong_t
*)&svd
->softlockcnt
, -npages
)) {
9268 if (async
|| AS_ISUNMAPWAIT(seg
->s_as
)) {
9269 mutex_enter(&seg
->s_as
->a_contents
);
9271 AS_SETNOUNMAPWAIT(seg
->s_as
);
9273 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
9274 AS_CLRUNMAPWAIT(seg
->s_as
);
9275 cv_broadcast(&seg
->s_as
->a_cv
);
9277 mutex_exit(&seg
->s_as
->a_contents
);
9282 mutex_exit(&svd
->segfree_syncmtx
);
9289 shamp_reclaim(void *ptag
, caddr_t addr
, size_t len
, struct page
**pplist
,
9290 enum seg_rw rw
, int async
)
9292 amp_t
*amp
= (amp_t
*)ptag
;
9296 npages
= np
= btop(len
);
9298 ASSERT(amp
->a_softlockcnt
>= npages
);
9302 ASSERT(pl
[np
] == NOPCACHE_SHWLIST
|| pl
[np
] == PCACHE_SHWLIST
);
9303 ASSERT(!async
|| pl
[np
] == PCACHE_SHWLIST
);
9305 while (np
> (uint_t
)0) {
9306 if (rw
== S_WRITE
) {
9307 hat_setrefmod(*pplist
);
9309 hat_setref(*pplist
);
9311 page_unlock(*pplist
);
9316 kmem_free(pl
, sizeof (page_t
*) * (npages
+ 1));
9319 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9320 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9321 * and anonmap_purge() acquires a_purgemtx.
9323 mutex_enter(&
->a_purgemtx
);
9324 if (!atomic_add_long_nv((ulong_t
*)&
->a_softlockcnt
, -npages
) &&
9326 amp
->a_purgewait
= 0;
9327 cv_broadcast(&
->a_purgecv
);
9329 mutex_exit(&
->a_purgemtx
);
9334 * get a memory ID for an addr in a given segment
9336 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9337 * At fault time they will be relocated into larger pages.
9340 segvn_getmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
)
9342 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9343 struct anon
*ap
= NULL
;
9345 struct anon_map
*amp
;
9346 anon_sync_obj_t cookie
;
9348 if (svd
->type
== MAP_PRIVATE
) {
9349 memidp
->val
[0] = (uintptr_t)seg
->s_as
;
9350 memidp
->val
[1] = (uintptr_t)addr
;
9354 if (svd
->type
== MAP_SHARED
) {
9356 memidp
->val
[0] = (uintptr_t)svd
->vp
;
9357 memidp
->val
[1] = (u_longlong_t
)svd
->offset
+
9358 (uintptr_t)(addr
- seg
->s_base
);
9362 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
9363 if ((amp
= svd
->amp
) != NULL
) {
9364 anon_index
= svd
->anon_index
+
9365 seg_page(seg
, addr
);
9367 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9369 ASSERT(amp
!= NULL
);
9371 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
9372 anon_array_enter(amp
, anon_index
, &cookie
);
9373 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
9377 pp
= anon_zero(seg
, addr
, &ap
, svd
->cred
);
9379 anon_array_exit(&cookie
);
9380 ANON_LOCK_EXIT(&
->a_rwlock
);
9383 ASSERT(anon_get_ptr(amp
->ahp
, anon_index
)
9385 (void) anon_set_ptr(amp
->ahp
, anon_index
,
9390 anon_array_exit(&cookie
);
9391 ANON_LOCK_EXIT(&
->a_rwlock
);
9393 memidp
->val
[0] = (uintptr_t)ap
;
9394 memidp
->val
[1] = (uintptr_t)addr
& PAGEOFFSET
;
9402 sameprot(struct seg
*seg
, caddr_t a
, size_t len
)
9404 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9405 struct vpage
*vpage
;
9406 spgcnt_t pages
= btop(len
);
9409 if (svd
->pageprot
== 0)
9412 ASSERT(svd
->vpage
!= NULL
);
9414 vpage
= &svd
->vpage
[seg_page(seg
, a
)];
9415 prot
= VPP_PROT(vpage
);
9418 while (pages
-- > 0) {
9419 if (prot
!= VPP_PROT(vpage
))
9427 * Get memory allocation policy info for specified address in given segment
9429 static lgrp_mem_policy_info_t
*
9430 segvn_getpolicy(struct seg
*seg
, caddr_t addr
)
9432 struct anon_map
*amp
;
9434 lgrp_mem_policy_info_t
*policy_info
;
9435 struct segvn_data
*svn_data
;
9439 ASSERT(seg
!= NULL
);
9441 svn_data
= (struct segvn_data
*)seg
->s_data
;
9442 if (svn_data
== NULL
)
9446 * Get policy info for private or shared memory
9448 if (svn_data
->type
!= MAP_SHARED
) {
9449 if (svn_data
->tr_state
!= SEGVN_TR_ON
) {
9450 policy_info
= &svn_data
->policy_info
;
9452 policy_info
= &svn_data
->tr_policy_info
;
9453 ASSERT(policy_info
->mem_policy
==
9454 LGRP_MEM_POLICY_NEXT_SEG
);
9457 amp
= svn_data
->amp
;
9458 anon_index
= svn_data
->anon_index
+ seg_page(seg
, addr
);
9460 vn_off
= svn_data
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
9461 policy_info
= lgrp_shm_policy_get(amp
, anon_index
, vp
, vn_off
);
9464 return (policy_info
);
9468 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9469 * established to per vnode mapping per lgroup amp pages instead of to vnode
9470 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9471 * may share the same text replication amp. If a suitable amp doesn't already
9472 * exist in svntr hash table create a new one. We may fail to bind to amp if
9473 * segment is not eligible for text replication. Code below first checks for
9474 * these conditions. If binding is successful segment tr_state is set to on
9475 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9476 * svd->amp remains as NULL.
9479 segvn_textrepl(struct seg
*seg
)
9481 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9482 vnode_t
*vp
= svd
->vp
;
9483 uoff_t off
= svd
->offset
;
9484 size_t size
= seg
->s_size
;
9485 uoff_t eoff
= off
+ size
;
9486 uint_t szc
= seg
->s_szc
;
9487 ulong_t hash
= SVNTR_HASH_FUNC(vp
);
9490 proc_t
*p
= seg
->s_as
->a_proc
;
9494 struct anon_map
*amp
;
9496 ASSERT(AS_LOCK_HELD(seg
->s_as
));
9497 ASSERT(SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
9499 ASSERT(svd
->tr_state
== SEGVN_TR_INIT
);
9500 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
9501 ASSERT(svd
->flags
& MAP_TEXT
);
9502 ASSERT(svd
->type
== MAP_PRIVATE
);
9503 ASSERT(vp
!= NULL
&& svd
->amp
== NULL
);
9504 ASSERT(!svd
->pageprot
&& !(svd
->prot
& PROT_WRITE
));
9505 ASSERT(!(svd
->flags
& MAP_NORESERVE
) && svd
->swresv
== 0);
9506 ASSERT(seg
->s_as
!= &kas
);
9508 ASSERT(svntr_hashtab
!= NULL
);
9511 * If numa optimizations are no longer desired bail out.
9513 if (!lgrp_optimizations()) {
9514 svd
->tr_state
= SEGVN_TR_OFF
;
9519 * Avoid creating anon maps with size bigger than the file size.
9520 * If fop_getattr() call fails bail out.
9522 va
.va_mask
= AT_SIZE
| AT_MTIME
| AT_CTIME
;
9523 if (fop_getattr(vp
, &va
, 0, svd
->cred
, NULL
) != 0) {
9524 svd
->tr_state
= SEGVN_TR_OFF
;
9525 SEGVN_TR_ADDSTAT(gaerr
);
9528 if (btopr(va
.va_size
) < btopr(eoff
)) {
9529 svd
->tr_state
= SEGVN_TR_OFF
;
9530 SEGVN_TR_ADDSTAT(overmap
);
9535 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9536 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9537 * mapping that checks if trcache for this vnode needs to be
9538 * invalidated can't miss us.
9540 if (!(vp
->v_flag
& VVMEXEC
)) {
9541 mutex_enter(&vp
->v_lock
);
9542 vp
->v_flag
|= VVMEXEC
;
9543 mutex_exit(&vp
->v_lock
);
9545 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
9547 * Bail out if potentially MAP_SHARED writable mappings exist to this
9548 * vnode. We don't want to use old file contents from existing
9549 * replicas if this mapping was established after the original file
9552 if (vn_is_mapped(vp
, V_WRITE
)) {
9553 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9554 svd
->tr_state
= SEGVN_TR_OFF
;
9555 SEGVN_TR_ADDSTAT(wrcnt
);
9558 svntrp
= svntr_hashtab
[hash
].tr_head
;
9559 for (; svntrp
!= NULL
; svntrp
= svntrp
->tr_next
) {
9560 ASSERT(svntrp
->tr_refcnt
!= 0);
9561 if (svntrp
->tr_vp
!= vp
) {
9566 * Bail out if the file or its attributes were changed after
9567 * this replication entry was created since we need to use the
9568 * latest file contents. Note that mtime test alone is not
9569 * sufficient because a user can explicitly change mtime via
9570 * utimes(2) interfaces back to the old value after modifiying
9571 * the file contents. To detect this case we also have to test
9572 * ctime which among other things records the time of the last
9573 * mtime change by utimes(2). ctime is not changed when the file
9574 * is only read or executed so we expect that typically existing
9575 * replication amp's can be used most of the time.
9577 if (!svntrp
->tr_valid
||
9578 svntrp
->tr_mtime
.tv_sec
!= va
.va_mtime
.tv_sec
||
9579 svntrp
->tr_mtime
.tv_nsec
!= va
.va_mtime
.tv_nsec
||
9580 svntrp
->tr_ctime
.tv_sec
!= va
.va_ctime
.tv_sec
||
9581 svntrp
->tr_ctime
.tv_nsec
!= va
.va_ctime
.tv_nsec
) {
9582 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9583 svd
->tr_state
= SEGVN_TR_OFF
;
9584 SEGVN_TR_ADDSTAT(stale
);
9588 * if off, eoff and szc match current segment we found the
9589 * existing entry we can use.
9591 if (svntrp
->tr_off
== off
&& svntrp
->tr_eoff
== eoff
&&
9592 svntrp
->tr_szc
== szc
) {
9596 * Don't create different but overlapping in file offsets
9597 * entries to avoid replication of the same file pages more
9598 * than once per lgroup.
9600 if ((off
>= svntrp
->tr_off
&& off
< svntrp
->tr_eoff
) ||
9601 (eoff
> svntrp
->tr_off
&& eoff
<= svntrp
->tr_eoff
)) {
9602 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9603 svd
->tr_state
= SEGVN_TR_OFF
;
9604 SEGVN_TR_ADDSTAT(overlap
);
9609 * If we didn't find existing entry create a new one.
9611 if (svntrp
== NULL
) {
9612 svntrp
= kmem_cache_alloc(svntr_cache
, KM_NOSLEEP
);
9613 if (svntrp
== NULL
) {
9614 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9615 svd
->tr_state
= SEGVN_TR_OFF
;
9616 SEGVN_TR_ADDSTAT(nokmem
);
9622 for (i
= 0; i
< NLGRPS_MAX
; i
++) {
9623 ASSERT(svntrp
->tr_amp
[i
] == NULL
);
9628 svntrp
->tr_off
= off
;
9629 svntrp
->tr_eoff
= eoff
;
9630 svntrp
->tr_szc
= szc
;
9631 svntrp
->tr_valid
= 1;
9632 svntrp
->tr_mtime
= va
.va_mtime
;
9633 svntrp
->tr_ctime
= va
.va_ctime
;
9634 svntrp
->tr_refcnt
= 0;
9635 svntrp
->tr_next
= svntr_hashtab
[hash
].tr_head
;
9636 svntr_hashtab
[hash
].tr_head
= svntrp
;
9641 * We want to pick a replica with pages on main thread's (t_tid = 1,
9642 * aka T1) lgrp. Currently text replication is only optimized for
9643 * workloads that either have all threads of a process on the same
9644 * lgrp or execute their large text primarily on main thread.
9646 lgrp_id
= p
->p_t1_lgrpid
;
9647 if (lgrp_id
== LGRP_NONE
) {
9649 * In case exec() prefaults text on non main thread use
9650 * current thread lgrpid. It will become main thread anyway
9653 lgrp_id
= lgrp_home_id(curthread
);
9656 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9657 * just set it to NLGRPS_MAX if it's different from current process T1
9658 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9659 * replication and T1 new home is different from lgrp used for text
9660 * replication. When this happens asyncronous segvn thread rechecks if
9661 * segments should change lgrps used for text replication. If we fail
9662 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9663 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9664 * we want to use. We don't need to use cas in this case because
9665 * another thread that races in between our non atomic check and set
9666 * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9668 ASSERT(lgrp_id
!= LGRP_NONE
&& lgrp_id
< NLGRPS_MAX
);
9669 olid
= p
->p_tr_lgrpid
;
9670 if (lgrp_id
!= olid
&& olid
!= NLGRPS_MAX
) {
9671 lgrp_id_t nlid
= (olid
== LGRP_NONE
) ? lgrp_id
: NLGRPS_MAX
;
9672 if (atomic_cas_32((uint32_t *)&p
->p_tr_lgrpid
, olid
, nlid
) !=
9674 olid
= p
->p_tr_lgrpid
;
9675 ASSERT(olid
!= LGRP_NONE
);
9676 if (olid
!= lgrp_id
&& olid
!= NLGRPS_MAX
) {
9677 p
->p_tr_lgrpid
= NLGRPS_MAX
;
9680 ASSERT(p
->p_tr_lgrpid
!= LGRP_NONE
);
9683 * lgrp_move_thread() won't schedule async recheck after
9684 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9685 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9688 if (first
&& p
->p_t1_lgrpid
!= LGRP_NONE
&&
9689 p
->p_t1_lgrpid
!= lgrp_id
) {
9695 * If no amp was created yet for lgrp_id create a new one as long as
9696 * we have enough memory to afford it.
9698 if ((amp
= svntrp
->tr_amp
[lgrp_id
]) == NULL
) {
9699 size_t trmem
= atomic_add_long_nv(&segvn_textrepl_bytes
, size
);
9700 if (trmem
> segvn_textrepl_max_bytes
) {
9701 SEGVN_TR_ADDSTAT(normem
);
9704 if (anon_try_resv_zone(size
, NULL
) == 0) {
9705 SEGVN_TR_ADDSTAT(noanon
);
9708 amp
= anonmap_alloc(size
, size
, ANON_NOSLEEP
);
9710 anon_unresv_zone(size
, NULL
);
9711 SEGVN_TR_ADDSTAT(nokmem
);
9714 ASSERT(amp
->refcnt
== 1);
9716 svntrp
->tr_amp
[lgrp_id
] = amp
;
9717 SEGVN_TR_ADDSTAT(newamp
);
9719 svntrp
->tr_refcnt
++;
9720 ASSERT(svd
->svn_trnext
== NULL
);
9721 ASSERT(svd
->svn_trprev
== NULL
);
9722 svd
->svn_trnext
= svntrp
->tr_svnhead
;
9723 svd
->svn_trprev
= NULL
;
9724 if (svntrp
->tr_svnhead
!= NULL
) {
9725 svntrp
->tr_svnhead
->svn_trprev
= svd
;
9727 svntrp
->tr_svnhead
= svd
;
9728 ASSERT(amp
->a_szc
== szc
&& amp
->size
== size
&& amp
->swresv
== size
);
9729 ASSERT(amp
->refcnt
>= 1);
9731 svd
->anon_index
= 0;
9732 svd
->tr_policy_info
.mem_policy
= LGRP_MEM_POLICY_NEXT_SEG
;
9733 svd
->tr_policy_info
.mem_lgrpid
= lgrp_id
;
9734 svd
->tr_state
= SEGVN_TR_ON
;
9735 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9736 SEGVN_TR_ADDSTAT(repl
);
9739 ASSERT(segvn_textrepl_bytes
>= size
);
9740 atomic_add_long(&segvn_textrepl_bytes
, -size
);
9741 ASSERT(svntrp
!= NULL
);
9742 ASSERT(svntrp
->tr_amp
[lgrp_id
] == NULL
);
9743 if (svntrp
->tr_refcnt
== 0) {
9744 ASSERT(svntrp
== svntr_hashtab
[hash
].tr_head
);
9745 svntr_hashtab
[hash
].tr_head
= svntrp
->tr_next
;
9746 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9747 kmem_cache_free(svntr_cache
, svntrp
);
9749 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9751 svd
->tr_state
= SEGVN_TR_OFF
;
9755 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9756 * replication amp. This routine is most typically called when segment is
9757 * unmapped but can also be called when segment no longer qualifies for text
9758 * replication (e.g. due to protection changes). If unload_unmap is set use
9759 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9760 * svntr free all its anon maps and remove it from the hash table.
9763 segvn_textunrepl(struct seg
*seg
, int unload_unmap
)
9765 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9766 vnode_t
*vp
= svd
->vp
;
9767 uoff_t off
= svd
->offset
;
9768 size_t size
= seg
->s_size
;
9769 uoff_t eoff
= off
+ size
;
9770 uint_t szc
= seg
->s_szc
;
9771 ulong_t hash
= SVNTR_HASH_FUNC(vp
);
9773 svntr_t
**prv_svntrp
;
9774 lgrp_id_t lgrp_id
= svd
->tr_policy_info
.mem_lgrpid
;
9777 ASSERT(AS_LOCK_HELD(seg
->s_as
));
9778 ASSERT(AS_WRITE_HELD(seg
->s_as
) ||
9779 SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
9780 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
9781 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
9782 ASSERT(svd
->amp
!= NULL
);
9783 ASSERT(svd
->amp
->refcnt
>= 1);
9784 ASSERT(svd
->anon_index
== 0);
9785 ASSERT(lgrp_id
!= LGRP_NONE
&& lgrp_id
< NLGRPS_MAX
);
9786 ASSERT(svntr_hashtab
!= NULL
);
9788 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
9789 prv_svntrp
= &svntr_hashtab
[hash
].tr_head
;
9790 for (; (svntrp
= *prv_svntrp
) != NULL
; prv_svntrp
= &svntrp
->tr_next
) {
9791 ASSERT(svntrp
->tr_refcnt
!= 0);
9792 if (svntrp
->tr_vp
== vp
&& svntrp
->tr_off
== off
&&
9793 svntrp
->tr_eoff
== eoff
&& svntrp
->tr_szc
== szc
) {
9797 if (svntrp
== NULL
) {
9798 panic("segvn_textunrepl: svntr record not found");
9800 if (svntrp
->tr_amp
[lgrp_id
] != svd
->amp
) {
9801 panic("segvn_textunrepl: amp mismatch");
9803 svd
->tr_state
= SEGVN_TR_OFF
;
9805 if (svd
->svn_trprev
== NULL
) {
9806 ASSERT(svntrp
->tr_svnhead
== svd
);
9807 svntrp
->tr_svnhead
= svd
->svn_trnext
;
9808 if (svntrp
->tr_svnhead
!= NULL
) {
9809 svntrp
->tr_svnhead
->svn_trprev
= NULL
;
9811 svd
->svn_trnext
= NULL
;
9813 svd
->svn_trprev
->svn_trnext
= svd
->svn_trnext
;
9814 if (svd
->svn_trnext
!= NULL
) {
9815 svd
->svn_trnext
->svn_trprev
= svd
->svn_trprev
;
9816 svd
->svn_trnext
= NULL
;
9818 svd
->svn_trprev
= NULL
;
9820 if (--svntrp
->tr_refcnt
) {
9821 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9824 *prv_svntrp
= svntrp
->tr_next
;
9825 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9826 for (i
= 0; i
< NLGRPS_MAX
; i
++) {
9827 struct anon_map
*amp
= svntrp
->tr_amp
[i
];
9831 ASSERT(amp
->refcnt
== 1);
9832 ASSERT(amp
->swresv
== size
);
9833 ASSERT(amp
->size
== size
);
9834 ASSERT(amp
->a_szc
== szc
);
9835 if (amp
->a_szc
!= 0) {
9836 anon_free_pages(amp
->ahp
, 0, size
, szc
);
9838 anon_free(amp
->ahp
, 0, size
);
9840 svntrp
->tr_amp
[i
] = NULL
;
9841 ASSERT(segvn_textrepl_bytes
>= size
);
9842 atomic_add_long(&segvn_textrepl_bytes
, -size
);
9843 anon_unresv_zone(amp
->swresv
, NULL
);
9847 kmem_cache_free(svntr_cache
, svntrp
);
9849 hat_unload_callback(seg
->s_as
->a_hat
, seg
->s_base
, size
,
9850 unload_unmap
? HAT_UNLOAD_UNMAP
: 0, NULL
);
9854 * This is called when a MAP_SHARED writable mapping is created to a vnode
9855 * that is currently used for execution (VVMEXEC flag is set). In this case we
9856 * need to prevent further use of existing replicas.
9859 segvn_inval_trcache(vnode_t
*vp
)
9861 ulong_t hash
= SVNTR_HASH_FUNC(vp
);
9864 ASSERT(vp
->v_flag
& VVMEXEC
);
9866 if (svntr_hashtab
== NULL
) {
9870 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
9871 svntrp
= svntr_hashtab
[hash
].tr_head
;
9872 for (; svntrp
!= NULL
; svntrp
= svntrp
->tr_next
) {
9873 ASSERT(svntrp
->tr_refcnt
!= 0);
9874 if (svntrp
->tr_vp
== vp
&& svntrp
->tr_valid
) {
9875 svntrp
->tr_valid
= 0;
9878 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9882 segvn_trasync_thread(void)
9884 callb_cpr_t cpr_info
;
9885 kmutex_t cpr_lock
; /* just for CPR stuff */
9887 mutex_init(&cpr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
9889 CALLB_CPR_INIT(&cpr_info
, &cpr_lock
,
9890 callb_generic_cpr
, "segvn_async");
9892 if (segvn_update_textrepl_interval
== 0) {
9893 segvn_update_textrepl_interval
= segvn_update_tr_time
* hz
;
9895 segvn_update_textrepl_interval
*= hz
;
9897 (void) timeout(segvn_trupdate_wakeup
, NULL
,
9898 segvn_update_textrepl_interval
);
9901 mutex_enter(&cpr_lock
);
9902 CALLB_CPR_SAFE_BEGIN(&cpr_info
);
9903 mutex_exit(&cpr_lock
);
9904 sema_p(&segvn_trasync_sem
);
9905 mutex_enter(&cpr_lock
);
9906 CALLB_CPR_SAFE_END(&cpr_info
, &cpr_lock
);
9907 mutex_exit(&cpr_lock
);
9912 static uint64_t segvn_lgrp_trthr_migrs_snpsht
= 0;
9915 segvn_trupdate_wakeup(void *dummy
)
9917 uint64_t cur_lgrp_trthr_migrs
= lgrp_get_trthr_migrations();
9919 if (cur_lgrp_trthr_migrs
!= segvn_lgrp_trthr_migrs_snpsht
) {
9920 segvn_lgrp_trthr_migrs_snpsht
= cur_lgrp_trthr_migrs
;
9921 sema_v(&segvn_trasync_sem
);
9924 if (!segvn_disable_textrepl_update
&&
9925 segvn_update_textrepl_interval
!= 0) {
9926 (void) timeout(segvn_trupdate_wakeup
, dummy
,
9927 segvn_update_textrepl_interval
);
9932 segvn_trupdate(void)
9938 ASSERT(svntr_hashtab
!= NULL
);
9940 for (hash
= 0; hash
< svntr_hashtab_sz
; hash
++) {
9941 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
9942 svntrp
= svntr_hashtab
[hash
].tr_head
;
9943 for (; svntrp
!= NULL
; svntrp
= svntrp
->tr_next
) {
9944 ASSERT(svntrp
->tr_refcnt
!= 0);
9945 svd
= svntrp
->tr_svnhead
;
9946 for (; svd
!= NULL
; svd
= svd
->svn_trnext
) {
9947 segvn_trupdate_seg(svd
->seg
, svd
, svntrp
,
9951 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9956 segvn_trupdate_seg(struct seg
*seg
,
9965 struct anon_map
*amp
;
9967 ASSERT(svd
->vp
!= NULL
);
9968 ASSERT(svd
->vp
== svntrp
->tr_vp
);
9969 ASSERT(svd
->offset
== svntrp
->tr_off
);
9970 ASSERT(svd
->offset
+ seg
->s_size
== svntrp
->tr_eoff
);
9971 ASSERT(seg
!= NULL
);
9972 ASSERT(svd
->seg
== seg
);
9973 ASSERT(seg
->s_data
== (void *)svd
);
9974 ASSERT(seg
->s_szc
== svntrp
->tr_szc
);
9975 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
9976 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
9977 ASSERT(svd
->amp
!= NULL
);
9978 ASSERT(svd
->tr_policy_info
.mem_policy
== LGRP_MEM_POLICY_NEXT_SEG
);
9979 ASSERT(svd
->tr_policy_info
.mem_lgrpid
!= LGRP_NONE
);
9980 ASSERT(svd
->tr_policy_info
.mem_lgrpid
< NLGRPS_MAX
);
9981 ASSERT(svntrp
->tr_amp
[svd
->tr_policy_info
.mem_lgrpid
] == svd
->amp
);
9982 ASSERT(svntrp
->tr_refcnt
!= 0);
9983 ASSERT(mutex_owned(&svntr_hashtab
[hash
].tr_lock
));
9986 ASSERT(as
!= NULL
&& as
!= &kas
);
9989 ASSERT(p
->p_tr_lgrpid
!= LGRP_NONE
);
9990 lgrp_id
= p
->p_t1_lgrpid
;
9991 if (lgrp_id
== LGRP_NONE
) {
9994 ASSERT(lgrp_id
< NLGRPS_MAX
);
9995 if (svd
->tr_policy_info
.mem_lgrpid
== lgrp_id
) {
10000 * Use tryenter locking since we are locking as/seg and svntr hash
10001 * lock in reverse from syncrounous thread order.
10003 if (!AS_LOCK_TRYENTER(as
, RW_READER
)) {
10004 SEGVN_TR_ADDSTAT(nolock
);
10005 if (segvn_lgrp_trthr_migrs_snpsht
) {
10006 segvn_lgrp_trthr_migrs_snpsht
= 0;
10010 if (!SEGVN_LOCK_TRYENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
)) {
10012 SEGVN_TR_ADDSTAT(nolock
);
10013 if (segvn_lgrp_trthr_migrs_snpsht
) {
10014 segvn_lgrp_trthr_migrs_snpsht
= 0;
10018 size
= seg
->s_size
;
10019 if (svntrp
->tr_amp
[lgrp_id
] == NULL
) {
10020 size_t trmem
= atomic_add_long_nv(&segvn_textrepl_bytes
, size
);
10021 if (trmem
> segvn_textrepl_max_bytes
) {
10022 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10024 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10025 SEGVN_TR_ADDSTAT(normem
);
10028 if (anon_try_resv_zone(size
, NULL
) == 0) {
10029 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10031 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10032 SEGVN_TR_ADDSTAT(noanon
);
10035 amp
= anonmap_alloc(size
, size
, KM_NOSLEEP
);
10037 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10039 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10040 anon_unresv_zone(size
, NULL
);
10041 SEGVN_TR_ADDSTAT(nokmem
);
10044 ASSERT(amp
->refcnt
== 1);
10045 amp
->a_szc
= seg
->s_szc
;
10046 svntrp
->tr_amp
[lgrp_id
] = amp
;
10049 * We don't need to drop the bucket lock but here we give other
10050 * threads a chance. svntr and svd can't be unlinked as long as
10051 * segment lock is held as a writer and AS held as well. After we
10052 * retake bucket lock we'll continue from where we left. We'll be able
10053 * to reach the end of either list since new entries are always added
10054 * to the beginning of the lists.
10056 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10057 hat_unload_callback(as
->a_hat
, seg
->s_base
, size
, 0, NULL
);
10058 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
10060 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
10061 ASSERT(svd
->amp
!= NULL
);
10062 ASSERT(svd
->tr_policy_info
.mem_policy
== LGRP_MEM_POLICY_NEXT_SEG
);
10063 ASSERT(svd
->tr_policy_info
.mem_lgrpid
!= lgrp_id
);
10064 ASSERT(svd
->amp
!= svntrp
->tr_amp
[lgrp_id
]);
10066 svd
->tr_policy_info
.mem_lgrpid
= lgrp_id
;
10067 svd
->amp
= svntrp
->tr_amp
[lgrp_id
];
10068 p
->p_tr_lgrpid
= NLGRPS_MAX
;
10069 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10072 ASSERT(svntrp
->tr_refcnt
!= 0);
10073 ASSERT(svd
->vp
== svntrp
->tr_vp
);
10074 ASSERT(svd
->tr_policy_info
.mem_lgrpid
== lgrp_id
);
10075 ASSERT(svd
->amp
!= NULL
&& svd
->amp
== svntrp
->tr_amp
[lgrp_id
]);
10076 ASSERT(svd
->seg
== seg
);
10077 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
10079 SEGVN_TR_ADDSTAT(asyncrepl
);