4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2018 Joyent, Inc.
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 * The Regents of the University of California
35 * University Acknowledgment- Portions of this document are derived from
36 * software developed by the University of California, Berkeley, and its
41 * VM - shared or copy-on-write from a vnode/anonymous memory.
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/t_lock.h>
47 #include <sys/errno.h>
48 #include <sys/systm.h>
50 #include <sys/debug.h>
52 #include <sys/vmsystm.h>
53 #include <sys/tuneable.h>
54 #include <sys/bitmap.h>
57 #include <sys/sysmacros.h>
58 #include <sys/vtrace.h>
59 #include <sys/cmn_err.h>
60 #include <sys/callb.h>
62 #include <sys/dumphdr.h>
68 #include <vm/seg_vn.h>
75 #include <sys/project.h>
77 #include <sys/shm_impl.h>
80 * segvn_fault needs a temporary page list array. To avoid calling kmem all
81 * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if
82 * it can. In the rare case when this page list is not large enough, it
83 * goes and gets a large enough array from kmem.
85 * This small page list array covers either 8 pages or 64kB worth of pages -
86 * whichever is smaller.
88 #define PVN_MAX_GETPAGE_SZ 0x10000
89 #define PVN_MAX_GETPAGE_NUM 0x8
91 #if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE
92 #define PVN_GETPAGE_SZ ptob(PVN_MAX_GETPAGE_NUM)
93 #define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM
95 #define PVN_GETPAGE_SZ PVN_MAX_GETPAGE_SZ
96 #define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ)
100 * Private seg op routines.
102 static int segvn_dup(struct seg
*seg
, struct seg
*newseg
);
103 static int segvn_unmap(struct seg
*seg
, caddr_t addr
, size_t len
);
104 static void segvn_free(struct seg
*seg
);
105 static faultcode_t
segvn_fault(struct hat
*hat
, struct seg
*seg
,
106 caddr_t addr
, size_t len
, enum fault_type type
,
108 static faultcode_t
segvn_faulta(struct seg
*seg
, caddr_t addr
);
109 static int segvn_setprot(struct seg
*seg
, caddr_t addr
,
110 size_t len
, uint_t prot
);
111 static int segvn_checkprot(struct seg
*seg
, caddr_t addr
,
112 size_t len
, uint_t prot
);
113 static int segvn_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
);
114 static size_t segvn_swapout(struct seg
*seg
);
115 static int segvn_sync(struct seg
*seg
, caddr_t addr
, size_t len
,
116 int attr
, uint_t flags
);
117 static size_t segvn_incore(struct seg
*seg
, caddr_t addr
, size_t len
,
119 static int segvn_lockop(struct seg
*seg
, caddr_t addr
, size_t len
,
120 int attr
, int op
, ulong_t
*lockmap
, size_t pos
);
121 static int segvn_getprot(struct seg
*seg
, caddr_t addr
, size_t len
,
123 static u_offset_t
segvn_getoffset(struct seg
*seg
, caddr_t addr
);
124 static int segvn_gettype(struct seg
*seg
, caddr_t addr
);
125 static int segvn_getvp(struct seg
*seg
, caddr_t addr
,
127 static int segvn_advise(struct seg
*seg
, caddr_t addr
, size_t len
,
129 static void segvn_dump(struct seg
*seg
);
130 static int segvn_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
131 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
);
132 static int segvn_setpagesize(struct seg
*seg
, caddr_t addr
, size_t len
,
134 static int segvn_getmemid(struct seg
*seg
, caddr_t addr
,
136 static lgrp_mem_policy_info_t
*segvn_getpolicy(struct seg
*, caddr_t
);
137 static int segvn_capable(struct seg
*seg
, segcapability_t capable
);
138 static int segvn_inherit(struct seg
*, caddr_t
, size_t, uint_t
);
140 struct seg_ops segvn_ops
= {
168 * Common zfod structures, provided as a shorthand for others to use.
170 static segvn_crargs_t zfod_segvn_crargs
=
171 SEGVN_ZFOD_ARGS(PROT_ZFOD
, PROT_ALL
);
172 static segvn_crargs_t kzfod_segvn_crargs
=
173 SEGVN_ZFOD_ARGS(PROT_ZFOD
& ~PROT_USER
,
174 PROT_ALL
& ~PROT_USER
);
175 static segvn_crargs_t stack_noexec_crargs
=
176 SEGVN_ZFOD_ARGS(PROT_ZFOD
& ~PROT_EXEC
, PROT_ALL
);
178 caddr_t zfod_argsp
= (caddr_t
)&zfod_segvn_crargs
; /* user zfod argsp */
179 caddr_t kzfod_argsp
= (caddr_t
)&kzfod_segvn_crargs
; /* kernel zfod argsp */
180 caddr_t stack_exec_argsp
= (caddr_t
)&zfod_segvn_crargs
; /* executable stack */
181 caddr_t stack_noexec_argsp
= (caddr_t
)&stack_noexec_crargs
; /* noexec stack */
183 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
185 size_t segvn_comb_thrshld
= UINT_MAX
; /* patchable -- see 1196681 */
187 size_t segvn_pglock_comb_thrshld
= (1UL << 16); /* 64K */
188 size_t segvn_pglock_comb_balign
= (1UL << 16); /* 64K */
189 uint_t segvn_pglock_comb_bshift
;
190 size_t segvn_pglock_comb_palign
;
192 static int segvn_concat(struct seg
*, struct seg
*, int);
193 static int segvn_extend_prev(struct seg
*, struct seg
*,
194 struct segvn_crargs
*, size_t);
195 static int segvn_extend_next(struct seg
*, struct seg
*,
196 struct segvn_crargs
*, size_t);
197 static void segvn_softunlock(struct seg
*, caddr_t
, size_t, enum seg_rw
);
198 static void segvn_pagelist_rele(page_t
**);
199 static void segvn_setvnode_mpss(vnode_t
*);
200 static void segvn_relocate_pages(page_t
**, page_t
*);
201 static int segvn_full_szcpages(page_t
**, uint_t
, int *, uint_t
*);
202 static int segvn_fill_vp_pages(struct segvn_data
*, vnode_t
*, u_offset_t
,
203 uint_t
, page_t
**, page_t
**, uint_t
*, int *);
204 static faultcode_t
segvn_fault_vnodepages(struct hat
*, struct seg
*, caddr_t
,
205 caddr_t
, enum fault_type
, enum seg_rw
, caddr_t
, caddr_t
, int);
206 static faultcode_t
segvn_fault_anonpages(struct hat
*, struct seg
*, caddr_t
,
207 caddr_t
, enum fault_type
, enum seg_rw
, caddr_t
, caddr_t
, int);
208 static faultcode_t
segvn_faultpage(struct hat
*, struct seg
*, caddr_t
,
209 u_offset_t
, struct vpage
*, page_t
**, uint_t
,
210 enum fault_type
, enum seg_rw
, int);
211 static void segvn_vpage(struct seg
*);
212 static size_t segvn_count_swap_by_vpages(struct seg
*);
214 static void segvn_purge(struct seg
*seg
);
215 static int segvn_reclaim(void *, caddr_t
, size_t, struct page
**,
217 static int shamp_reclaim(void *, caddr_t
, size_t, struct page
**,
220 static int sameprot(struct seg
*, caddr_t
, size_t);
222 static int segvn_demote_range(struct seg
*, caddr_t
, size_t, int, uint_t
);
223 static int segvn_clrszc(struct seg
*);
224 static struct seg
*segvn_split_seg(struct seg
*, caddr_t
);
225 static int segvn_claim_pages(struct seg
*, struct vpage
*, u_offset_t
,
228 static void segvn_hat_rgn_unload_callback(caddr_t
, caddr_t
, caddr_t
,
229 size_t, void *, u_offset_t
);
231 static struct kmem_cache
*segvn_cache
;
232 static struct kmem_cache
**segvn_szc_cache
;
235 static struct segvnvmstats_str
{
236 ulong_t fill_vp_pages
[31];
237 ulong_t fltvnpages
[49];
238 ulong_t fullszcpages
[10];
239 ulong_t relocatepages
[3];
240 ulong_t fltanpages
[17];
242 ulong_t demoterange
[3];
244 #endif /* VM_STATS */
246 #define SDR_RANGE 1 /* demote entire range */
247 #define SDR_END 2 /* demote non aligned ends only */
249 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
251 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
252 ASSERT(lpgaddr >= (seg)->s_base); \
253 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
255 ASSERT(lpgeaddr > lpgaddr); \
256 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
258 lpgeaddr = lpgaddr = (addr); \
264 segvn_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
266 struct segvn_data
*svd
= buf
;
268 rw_init(&svd
->lock
, NULL
, RW_DEFAULT
, NULL
);
269 mutex_init(&svd
->segfree_syncmtx
, NULL
, MUTEX_DEFAULT
, NULL
);
270 svd
->svn_trnext
= svd
->svn_trprev
= NULL
;
276 segvn_cache_destructor(void *buf
, void *cdrarg
)
278 struct segvn_data
*svd
= buf
;
280 rw_destroy(&svd
->lock
);
281 mutex_destroy(&svd
->segfree_syncmtx
);
286 svntr_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
288 bzero(buf
, sizeof (svntr_t
));
293 * Patching this variable to non-zero allows the system to run with
294 * stacks marked as "not executable". It's a bit of a kludge, but is
295 * provided as a tweakable for platforms that export those ABIs
296 * (e.g. sparc V8) that have executable stacks enabled by default.
297 * There are also some restrictions for platforms that don't actually
298 * implement 'noexec' protections.
300 * Once enabled, the system is (therefore) unable to provide a fully
301 * ABI-compliant execution environment, though practically speaking,
302 * most everything works. The exceptions are generally some interpreters
303 * and debuggers that create executable code on the stack and jump
304 * into it (without explicitly mprotecting the address range to include
307 * One important class of applications that are disabled are those
308 * that have been transformed into malicious agents using one of the
309 * numerous "buffer overflow" attacks. See 4007890.
311 int noexec_user_stack
= 0;
312 int noexec_user_stack_log
= 1;
314 int segvn_lpg_disable
= 0;
315 uint_t segvn_maxpgszc
= 0;
317 ulong_t segvn_vmpss_clrszc_cnt
;
318 ulong_t segvn_vmpss_clrszc_err
;
319 ulong_t segvn_fltvnpages_clrszc_cnt
;
320 ulong_t segvn_fltvnpages_clrszc_err
;
321 ulong_t segvn_setpgsz_align_err
;
322 ulong_t segvn_setpgsz_anon_align_err
;
323 ulong_t segvn_setpgsz_getattr_err
;
324 ulong_t segvn_setpgsz_eof_err
;
325 ulong_t segvn_faultvnmpss_align_err1
;
326 ulong_t segvn_faultvnmpss_align_err2
;
327 ulong_t segvn_faultvnmpss_align_err3
;
328 ulong_t segvn_faultvnmpss_align_err4
;
329 ulong_t segvn_faultvnmpss_align_err5
;
330 ulong_t segvn_vmpss_pageio_deadlk_err
;
332 int segvn_use_regions
= 1;
335 * Segvn supports text replication optimization for NUMA platforms. Text
336 * replica's are represented by anon maps (amp). There's one amp per text file
337 * region per lgroup. A process chooses the amp for each of its text mappings
338 * based on the lgroup assignment of its main thread (t_tid = 1). All
339 * processes that want a replica on a particular lgroup for the same text file
340 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
341 * with vp,off,size,szc used as a key. Text replication segments are read only
342 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
343 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
344 * pages. Replication amp is assigned to a segment when it gets its first
345 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
346 * rechecks periodically if the process still maps an amp local to the main
347 * thread. If not async thread forces process to remap to an amp in the new
348 * home lgroup of the main thread. Current text replication implementation
349 * only provides the benefit to workloads that do most of their work in the
350 * main thread of a process or all the threads of a process run in the same
351 * lgroup. To extend text replication benefit to different types of
352 * multithreaded workloads further work would be needed in the hat layer to
353 * allow the same virtual address in the same hat to simultaneously map
354 * different physical addresses (i.e. page table replication would be needed
357 * amp pages are used instead of vnode pages as long as segment has a very
358 * simple life cycle. It's created via segvn_create(), handles S_EXEC
359 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
360 * happens such as protection is changed, real COW fault happens, pagesize is
361 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
362 * text replication by converting the segment back to vnode only segment
363 * (unmap segment's address range and set svd->amp to NULL).
365 * The original file can be changed after amp is inserted into
366 * svntr_hashtab. Processes that are launched after the file is already
367 * changed can't use the replica's created prior to the file change. To
368 * implement this functionality hash entries are timestamped. Replica's can
369 * only be used if current file modification time is the same as the timestamp
370 * saved when hash entry was created. However just timestamps alone are not
371 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
372 * deal with file changes via MAP_SHARED mappings differently. When writable
373 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
374 * existing replica's for this vnode as not usable for future text
375 * mappings. And we don't create new replica's for files that currently have
376 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
380 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
381 size_t segvn_textrepl_max_bytes_factor
= SEGVN_TEXTREPL_MAXBYTES_FACTOR
;
383 static ulong_t svntr_hashtab_sz
= 512;
384 static svntr_bucket_t
*svntr_hashtab
= NULL
;
385 static struct kmem_cache
*svntr_cache
;
386 static svntr_stats_t
*segvn_textrepl_stats
;
387 static ksema_t segvn_trasync_sem
;
389 int segvn_disable_textrepl
= 1;
390 size_t textrepl_size_thresh
= (size_t)-1;
391 size_t segvn_textrepl_bytes
= 0;
392 size_t segvn_textrepl_max_bytes
= 0;
393 clock_t segvn_update_textrepl_interval
= 0;
394 int segvn_update_tr_time
= 10;
395 int segvn_disable_textrepl_update
= 0;
397 static void segvn_textrepl(struct seg
*);
398 static void segvn_textunrepl(struct seg
*, int);
399 static void segvn_inval_trcache(vnode_t
*);
400 static void segvn_trasync_thread(void);
401 static void segvn_trupdate_wakeup(void *);
402 static void segvn_trupdate(void);
403 static void segvn_trupdate_seg(struct seg
*, segvn_data_t
*, svntr_t
*,
407 * Initialize segvn data structures
416 segvn_cache
= kmem_cache_create("segvn_cache",
417 sizeof (struct segvn_data
), 0,
418 segvn_cache_constructor
, segvn_cache_destructor
, NULL
,
421 if (segvn_lpg_disable
== 0) {
422 szc
= maxszc
= page_num_pagesizes() - 1;
424 segvn_lpg_disable
= 1;
426 if (page_get_pagesize(0) != PAGESIZE
) {
427 panic("segvn_init: bad szc 0");
431 pgsz
= page_get_pagesize(szc
);
432 if (pgsz
<= PAGESIZE
|| !IS_P2ALIGNED(pgsz
, pgsz
)) {
433 panic("segvn_init: bad szc %d", szc
);
438 if (segvn_maxpgszc
== 0 || segvn_maxpgszc
> maxszc
)
439 segvn_maxpgszc
= maxszc
;
442 if (segvn_maxpgszc
) {
443 segvn_szc_cache
= (struct kmem_cache
**)kmem_alloc(
444 (segvn_maxpgszc
+ 1) * sizeof (struct kmem_cache
*),
448 for (szc
= 1; szc
<= segvn_maxpgszc
; szc
++) {
451 (void) sprintf(str
, "segvn_szc_cache%d", szc
);
452 segvn_szc_cache
[szc
] = kmem_cache_create(str
,
453 page_get_pagecnt(szc
) * sizeof (page_t
*), 0,
454 NULL
, NULL
, NULL
, NULL
, NULL
, KMC_NODEBUG
);
458 if (segvn_use_regions
&& !hat_supported(HAT_SHARED_REGIONS
, NULL
))
459 segvn_use_regions
= 0;
462 * For now shared regions and text replication segvn support
463 * are mutually exclusive. This is acceptable because
464 * currently significant benefit from text replication was
465 * only observed on AMD64 NUMA platforms (due to relatively
466 * small L2$ size) and currently we don't support shared
469 if (segvn_use_regions
&& !segvn_disable_textrepl
) {
470 segvn_disable_textrepl
= 1;
474 if (lgrp_optimizations() && textrepl_size_thresh
!= (size_t)-1 &&
475 !segvn_disable_textrepl
) {
477 size_t hsz
= svntr_hashtab_sz
* sizeof (svntr_bucket_t
);
479 svntr_cache
= kmem_cache_create("svntr_cache",
480 sizeof (svntr_t
), 0, svntr_cache_constructor
, NULL
,
481 NULL
, NULL
, NULL
, 0);
482 svntr_hashtab
= kmem_zalloc(hsz
, KM_SLEEP
);
483 for (i
= 0; i
< svntr_hashtab_sz
; i
++) {
484 mutex_init(&svntr_hashtab
[i
].tr_lock
, NULL
,
485 MUTEX_DEFAULT
, NULL
);
487 segvn_textrepl_max_bytes
= ptob(physmem
) /
488 segvn_textrepl_max_bytes_factor
;
489 segvn_textrepl_stats
= kmem_zalloc(NCPU
*
490 sizeof (svntr_stats_t
), KM_SLEEP
);
491 sema_init(&segvn_trasync_sem
, 0, NULL
, SEMA_DEFAULT
, NULL
);
492 (void) thread_create(NULL
, 0, segvn_trasync_thread
,
493 NULL
, 0, &p0
, TS_RUN
, minclsyspri
);
497 if (!ISP2(segvn_pglock_comb_balign
) ||
498 segvn_pglock_comb_balign
< PAGESIZE
) {
499 segvn_pglock_comb_balign
= 1UL << 16; /* 64K */
501 segvn_pglock_comb_bshift
= highbit(segvn_pglock_comb_balign
) - 1;
502 segvn_pglock_comb_palign
= btop(segvn_pglock_comb_balign
);
505 #define SEGVN_PAGEIO ((void *)0x1)
506 #define SEGVN_NOPAGEIO ((void *)0x2)
509 segvn_setvnode_mpss(vnode_t
*vp
)
513 ASSERT(vp
->v_mpssdata
== NULL
||
514 vp
->v_mpssdata
== SEGVN_PAGEIO
||
515 vp
->v_mpssdata
== SEGVN_NOPAGEIO
);
517 if (vp
->v_mpssdata
== NULL
) {
518 if (vn_vmpss_usepageio(vp
)) {
519 err
= VOP_PAGEIO(vp
, (page_t
*)NULL
,
520 (u_offset_t
)0, 0, 0, CRED(), NULL
);
525 * set v_mpssdata just once per vnode life
526 * so that it never changes.
528 mutex_enter(&vp
->v_lock
);
529 if (vp
->v_mpssdata
== NULL
) {
531 vp
->v_mpssdata
= SEGVN_PAGEIO
;
533 vp
->v_mpssdata
= SEGVN_NOPAGEIO
;
536 mutex_exit(&vp
->v_lock
);
541 segvn_create(struct seg
**segpp
, void *argsp
)
543 struct seg
*seg
= *segpp
;
544 extern lgrp_mem_policy_t lgrp_mem_default_policy
;
545 struct segvn_crargs
*a
= (struct segvn_crargs
*)argsp
;
546 struct segvn_data
*svd
;
549 struct anon_map
*amp
;
552 lgrp_mem_policy_t mpolicy
= lgrp_mem_default_policy
;
556 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
558 if (a
->type
!= MAP_PRIVATE
&& a
->type
!= MAP_SHARED
) {
559 panic("segvn_create type");
564 * Check arguments. If a shared anon structure is given then
565 * it is illegal to also specify a vp.
567 if (a
->amp
!= NULL
&& a
->vp
!= NULL
) {
568 panic("segvn_create anon_map");
572 if (a
->type
== MAP_PRIVATE
&& (a
->flags
& MAP_TEXT
) &&
573 a
->vp
!= NULL
&& a
->prot
== (PROT_USER
| PROT_READ
| PROT_EXEC
) &&
578 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
579 if (a
->type
== MAP_SHARED
)
580 a
->flags
&= ~MAP_NORESERVE
;
583 if (segvn_lpg_disable
!= 0 || (a
->szc
== AS_MAP_NO_LPOOB
) ||
584 (a
->amp
!= NULL
&& a
->type
== MAP_PRIVATE
) ||
585 (a
->flags
& MAP_NORESERVE
) || seg
->s_as
== &kas
) {
588 if (a
->szc
> segvn_maxpgszc
)
589 a
->szc
= segvn_maxpgszc
;
590 pgsz
= page_get_pagesize(a
->szc
);
591 if (!IS_P2ALIGNED(seg
->s_base
, pgsz
) ||
592 !IS_P2ALIGNED(seg
->s_size
, pgsz
)) {
594 } else if (a
->vp
!= NULL
) {
595 if (IS_SWAPFSVP(a
->vp
) || VN_ISKAS(a
->vp
)) {
598 * hat_page_demote() is not supported
602 } else if (map_addr_vacalign_check(seg
->s_base
,
603 a
->offset
& PAGEMASK
)) {
606 } else if (a
->amp
!= NULL
) {
607 pgcnt_t anum
= btopr(a
->offset
);
608 pgcnt_t pgcnt
= page_get_pagecnt(a
->szc
);
609 if (!IS_P2ALIGNED(anum
, pgcnt
)) {
617 * If segment may need private pages, reserve them now.
619 if (!(a
->flags
& MAP_NORESERVE
) && ((a
->vp
== NULL
&& a
->amp
== NULL
) ||
620 (a
->type
== MAP_PRIVATE
&& (a
->prot
& PROT_WRITE
)))) {
621 if (anon_resv_zone(seg
->s_size
,
622 seg
->s_as
->a_proc
->p_zone
) == 0)
624 swresv
= seg
->s_size
;
625 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
, "anon proc:%p %lu %u",
630 * Reserve any mapping structures that may be required.
632 * Don't do it for segments that may use regions. It's currently a
633 * noop in the hat implementations anyway.
636 hat_map(seg
->s_as
->a_hat
, seg
->s_base
, seg
->s_size
, HAT_MAP
);
643 crhold(cred
= CRED());
646 /* Inform the vnode of the new mapping */
648 error
= VOP_ADDMAP(a
->vp
, a
->offset
& PAGEMASK
,
649 seg
->s_as
, seg
->s_base
, seg
->s_size
, a
->prot
,
650 a
->maxprot
, a
->type
, cred
, NULL
);
653 anon_unresv_zone(swresv
,
654 seg
->s_as
->a_proc
->p_zone
);
655 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
,
656 "anon proc:%p %lu %u", seg
, swresv
, 0);
660 hat_unload(seg
->s_as
->a_hat
, seg
->s_base
,
661 seg
->s_size
, HAT_UNLOAD_UNMAP
);
666 * svntr_hashtab will be NULL if we support shared regions.
668 trok
= ((a
->flags
& MAP_TEXT
) &&
669 (seg
->s_size
> textrepl_size_thresh
||
670 (a
->flags
& _MAP_TEXTREPL
)) &&
671 lgrp_optimizations() && svntr_hashtab
!= NULL
&&
672 a
->type
== MAP_PRIVATE
&& swresv
== 0 &&
673 !(a
->flags
& MAP_NORESERVE
) &&
674 seg
->s_as
!= &kas
&& a
->vp
->v_type
== VREG
);
676 ASSERT(!trok
|| !use_rgn
);
680 * MAP_NORESERVE mappings don't count towards the VSZ of a process
681 * until we fault the pages in.
683 if ((a
->vp
== NULL
|| a
->vp
->v_type
!= VREG
) &&
684 a
->flags
& MAP_NORESERVE
) {
685 seg
->s_as
->a_resvsize
-= seg
->s_size
;
689 * If more than one segment in the address space, and they're adjacent
690 * virtually, try to concatenate them. Don't concatenate if an
691 * explicit anon_map structure was supplied (e.g., SystemV shared
692 * memory) or if we'll use text replication for this segment.
694 if (a
->amp
== NULL
&& !use_rgn
&& !trok
) {
695 struct seg
*pseg
, *nseg
;
696 struct segvn_data
*psvd
, *nsvd
;
697 lgrp_mem_policy_t ppolicy
, npolicy
;
698 uint_t lgrp_mem_policy_flags
= 0;
701 * Memory policy flags (lgrp_mem_policy_flags) is valid when
702 * extending stack/heap segments.
704 if ((a
->vp
== NULL
) && (a
->type
== MAP_PRIVATE
) &&
705 !(a
->flags
& MAP_NORESERVE
) && (seg
->s_as
!= &kas
)) {
706 lgrp_mem_policy_flags
= a
->lgrp_mem_policy_flags
;
709 * Get policy when not extending it from another segment
711 mpolicy
= lgrp_mem_policy_default(seg
->s_size
, a
->type
);
715 * First, try to concatenate the previous and new segments
717 pseg
= AS_SEGPREV(seg
->s_as
, seg
);
719 pseg
->s_base
+ pseg
->s_size
== seg
->s_base
&&
720 pseg
->s_ops
== &segvn_ops
) {
722 * Get memory allocation policy from previous segment.
723 * When extension is specified (e.g. for heap) apply
724 * this policy to the new segment regardless of the
725 * outcome of segment concatenation. Extension occurs
726 * for non-default policy otherwise default policy is
727 * used and is based on extended segment size.
729 psvd
= (struct segvn_data
*)pseg
->s_data
;
730 ppolicy
= psvd
->policy_info
.mem_policy
;
731 if (lgrp_mem_policy_flags
==
732 LGRP_MP_FLAG_EXTEND_UP
) {
733 if (ppolicy
!= lgrp_mem_default_policy
) {
736 mpolicy
= lgrp_mem_policy_default(
737 pseg
->s_size
+ seg
->s_size
,
742 if (mpolicy
== ppolicy
&&
743 (pseg
->s_size
+ seg
->s_size
<=
744 segvn_comb_thrshld
|| psvd
->amp
== NULL
) &&
745 segvn_extend_prev(pseg
, seg
, a
, swresv
) == 0) {
747 * success! now try to concatenate
751 nseg
= AS_SEGNEXT(pseg
->s_as
, pseg
);
754 nseg
->s_ops
== &segvn_ops
&&
755 pseg
->s_base
+ pseg
->s_size
==
757 (void) segvn_concat(pseg
, nseg
, 0);
758 ASSERT(pseg
->s_szc
== 0 ||
759 (a
->szc
== pseg
->s_szc
&&
760 IS_P2ALIGNED(pseg
->s_base
, pgsz
) &&
761 IS_P2ALIGNED(pseg
->s_size
, pgsz
)));
763 * Communicate out the newly concatenated
764 * segment as part of the result.
772 * Failed, so try to concatenate with following seg
774 nseg
= AS_SEGNEXT(seg
->s_as
, seg
);
776 seg
->s_base
+ seg
->s_size
== nseg
->s_base
&&
777 nseg
->s_ops
== &segvn_ops
) {
779 * Get memory allocation policy from next segment.
780 * When extension is specified (e.g. for stack) apply
781 * this policy to the new segment regardless of the
782 * outcome of segment concatenation. Extension occurs
783 * for non-default policy otherwise default policy is
784 * used and is based on extended segment size.
786 nsvd
= (struct segvn_data
*)nseg
->s_data
;
787 npolicy
= nsvd
->policy_info
.mem_policy
;
788 if (lgrp_mem_policy_flags
==
789 LGRP_MP_FLAG_EXTEND_DOWN
) {
790 if (npolicy
!= lgrp_mem_default_policy
) {
793 mpolicy
= lgrp_mem_policy_default(
794 nseg
->s_size
+ seg
->s_size
,
799 if (mpolicy
== npolicy
&&
800 segvn_extend_next(seg
, nseg
, a
, swresv
) == 0) {
802 ASSERT(nseg
->s_szc
== 0 ||
803 (a
->szc
== nseg
->s_szc
&&
804 IS_P2ALIGNED(nseg
->s_base
, pgsz
) &&
805 IS_P2ALIGNED(nseg
->s_size
, pgsz
)));
807 * Communicate out the newly concatenated
808 * segment as part of the result.
818 if (a
->type
== MAP_SHARED
)
819 lgrp_shm_policy_init(NULL
, a
->vp
);
821 svd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
823 seg
->s_ops
= &segvn_ops
;
824 seg
->s_data
= (void *)svd
;
830 * Anonymous mappings have no backing file so the offset is meaningless.
832 svd
->offset
= a
->vp
? (a
->offset
& PAGEMASK
) : 0;
834 svd
->maxprot
= a
->maxprot
;
839 svd
->advice
= MADV_NORMAL
;
841 svd
->flags
= (ushort_t
)a
->flags
;
842 svd
->softlockcnt
= 0;
843 svd
->softlockcnt_sbase
= 0;
844 svd
->softlockcnt_send
= 0;
846 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
849 if (a
->szc
!= 0 && a
->vp
!= NULL
) {
850 segvn_setvnode_mpss(a
->vp
);
852 if (svd
->type
== MAP_SHARED
&& svd
->vp
!= NULL
&&
853 (svd
->vp
->v_flag
& VVMEXEC
) && (svd
->prot
& PROT_WRITE
)) {
854 ASSERT(vn_is_mapped(svd
->vp
, V_WRITE
));
855 segvn_inval_trcache(svd
->vp
);
859 if ((svd
->amp
= amp
) == NULL
) {
861 if (svd
->type
== MAP_SHARED
) {
864 * Shared mappings to a vp need no other setup.
865 * If we have a shared mapping to an anon_map object
866 * which hasn't been allocated yet, allocate the
867 * struct now so that it will be properly shared
868 * by remembering the swap reservation there.
871 svd
->amp
= anonmap_alloc(seg
->s_size
, swresv
,
873 svd
->amp
->a_szc
= seg
->s_szc
;
877 * Private mapping (with or without a vp).
878 * Allocate anon_map when needed.
880 svd
->swresv
= swresv
;
886 * Mapping to an existing anon_map structure without a vp.
887 * For now we will insure that the segment size isn't larger
888 * than the size - offset gives us. Later on we may wish to
889 * have the anon array dynamically allocated itself so that
890 * we don't always have to allocate all the anon pointer slots.
891 * This of course involves adding extra code to check that we
892 * aren't trying to use an anon pointer slot beyond the end
893 * of the currently allocated anon array.
895 if ((amp
->size
- a
->offset
) < seg
->s_size
) {
896 panic("segvn_create anon_map size");
900 anon_num
= btopr(a
->offset
);
902 if (a
->type
== MAP_SHARED
) {
904 * SHARED mapping to a given anon_map.
906 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
908 if (a
->szc
> amp
->a_szc
) {
911 ANON_LOCK_EXIT(&
->a_rwlock
);
912 svd
->anon_index
= anon_num
;
916 * PRIVATE mapping to a given anon_map.
917 * Make sure that all the needed anon
918 * structures are created (so that we will
919 * share the underlying pages if nothing
920 * is written by this mapping) and then
921 * duplicate the anon array as is done
922 * when a privately mapped segment is dup'ed.
928 int hat_flag
= HAT_LOAD
;
930 if (svd
->flags
& MAP_TEXT
) {
931 hat_flag
|= HAT_LOAD_TEXT
;
934 svd
->amp
= anonmap_alloc(seg
->s_size
, 0, ANON_SLEEP
);
935 svd
->amp
->a_szc
= seg
->s_szc
;
937 svd
->swresv
= swresv
;
940 * Prevent 2 threads from allocating anon
941 * slots simultaneously.
943 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
944 eaddr
= seg
->s_base
+ seg
->s_size
;
946 for (anon_idx
= anon_num
, addr
= seg
->s_base
;
947 addr
< eaddr
; addr
+= PAGESIZE
, anon_idx
++) {
950 if ((ap
= anon_get_ptr(amp
->ahp
,
955 * Allocate the anon struct now.
956 * Might as well load up translation
957 * to the page while we're at it...
959 pp
= anon_zero(seg
, addr
, &ap
, cred
);
960 if (ap
== NULL
|| pp
== NULL
) {
961 panic("segvn_create anon_zero");
966 * Re-acquire the anon_map lock and
967 * initialize the anon array entry.
969 ASSERT(anon_get_ptr(amp
->ahp
,
971 (void) anon_set_ptr(amp
->ahp
, anon_idx
, ap
,
974 ASSERT(seg
->s_szc
== 0);
975 ASSERT(!IS_VMODSORT(pp
->p_vnode
));
977 ASSERT(use_rgn
== 0);
978 hat_memload(seg
->s_as
->a_hat
, addr
, pp
,
979 svd
->prot
& ~PROT_WRITE
, hat_flag
);
983 ASSERT(seg
->s_szc
== 0);
984 anon_dup(amp
->ahp
, anon_num
, svd
->amp
->ahp
,
986 ANON_LOCK_EXIT(&
->a_rwlock
);
991 * Set default memory allocation policy for segment
993 * Always set policy for private memory at least for initialization
994 * even if this is a shared memory segment
996 (void) lgrp_privm_policy_set(mpolicy
, &svd
->policy_info
, seg
->s_size
);
998 if (svd
->type
== MAP_SHARED
)
999 (void) lgrp_shm_policy_set(mpolicy
, svd
->amp
, svd
->anon_index
,
1000 svd
->vp
, svd
->offset
, seg
->s_size
);
1004 ASSERT(svd
->amp
== NULL
);
1005 svd
->rcookie
= hat_join_region(seg
->s_as
->a_hat
, seg
->s_base
,
1006 seg
->s_size
, (void *)svd
->vp
, svd
->offset
, svd
->prot
,
1007 (uchar_t
)seg
->s_szc
, segvn_hat_rgn_unload_callback
,
1011 ASSERT(!trok
|| !(svd
->prot
& PROT_WRITE
));
1012 svd
->tr_state
= trok
? SEGVN_TR_INIT
: SEGVN_TR_OFF
;
1018 * Concatenate two existing segments, if possible.
1019 * Return 0 on success, -1 if two segments are not compatible
1020 * or -2 on memory allocation failure.
1021 * If amp_cat == 1 then try and concat segments with anon maps
1024 segvn_concat(struct seg
*seg1
, struct seg
*seg2
, int amp_cat
)
1026 struct segvn_data
*svd1
= seg1
->s_data
;
1027 struct segvn_data
*svd2
= seg2
->s_data
;
1028 struct anon_map
*amp1
= svd1
->amp
;
1029 struct anon_map
*amp2
= svd2
->amp
;
1030 struct vpage
*vpage1
= svd1
->vpage
;
1031 struct vpage
*vpage2
= svd2
->vpage
, *nvpage
= NULL
;
1032 size_t size
, nvpsize
;
1033 pgcnt_t npages1
, npages2
;
1035 ASSERT(seg1
->s_as
&& seg2
->s_as
&& seg1
->s_as
== seg2
->s_as
);
1036 ASSERT(AS_WRITE_HELD(seg1
->s_as
));
1037 ASSERT(seg1
->s_ops
== seg2
->s_ops
);
1039 if (HAT_IS_REGION_COOKIE_VALID(svd1
->rcookie
) ||
1040 HAT_IS_REGION_COOKIE_VALID(svd2
->rcookie
)) {
1044 /* both segments exist, try to merge them */
1045 #define incompat(x) (svd1->x != svd2->x)
1046 if (incompat(vp
) || incompat(maxprot
) ||
1047 (!svd1
->pageadvice
&& !svd2
->pageadvice
&& incompat(advice
)) ||
1048 (!svd1
->pageprot
&& !svd2
->pageprot
&& incompat(prot
)) ||
1049 incompat(type
) || incompat(cred
) || incompat(flags
) ||
1050 seg1
->s_szc
!= seg2
->s_szc
|| incompat(policy_info
.mem_policy
) ||
1051 (svd2
->softlockcnt
> 0) || svd1
->softlockcnt_send
> 0)
1056 * vp == NULL implies zfod, offset doesn't matter
1058 if (svd1
->vp
!= NULL
&&
1059 svd1
->offset
+ seg1
->s_size
!= svd2
->offset
) {
1064 * Don't concatenate if either segment uses text replication.
1066 if (svd1
->tr_state
!= SEGVN_TR_OFF
|| svd2
->tr_state
!= SEGVN_TR_OFF
) {
1071 * Fail early if we're not supposed to concatenate
1072 * segments with non NULL amp.
1074 if (amp_cat
== 0 && (amp1
!= NULL
|| amp2
!= NULL
)) {
1078 if (svd1
->vp
== NULL
&& svd1
->type
== MAP_SHARED
) {
1082 if (amp1
!= NULL
&& svd1
->anon_index
+ btop(seg1
->s_size
) !=
1086 ASSERT(amp1
== NULL
|| amp1
->refcnt
>= 2);
1090 * If either seg has vpages, create a new merged vpage array.
1092 if (vpage1
!= NULL
|| vpage2
!= NULL
) {
1093 struct vpage
*vp
, *evp
;
1095 npages1
= seg_pages(seg1
);
1096 npages2
= seg_pages(seg2
);
1097 nvpsize
= vpgtob(npages1
+ npages2
);
1099 if ((nvpage
= kmem_zalloc(nvpsize
, KM_NOSLEEP
)) == NULL
) {
1103 if (vpage1
!= NULL
) {
1104 bcopy(vpage1
, nvpage
, vpgtob(npages1
));
1106 evp
= nvpage
+ npages1
;
1107 for (vp
= nvpage
; vp
< evp
; vp
++) {
1108 VPP_SETPROT(vp
, svd1
->prot
);
1109 VPP_SETADVICE(vp
, svd1
->advice
);
1113 if (vpage2
!= NULL
) {
1114 bcopy(vpage2
, nvpage
+ npages1
, vpgtob(npages2
));
1116 evp
= nvpage
+ npages1
+ npages2
;
1117 for (vp
= nvpage
+ npages1
; vp
< evp
; vp
++) {
1118 VPP_SETPROT(vp
, svd2
->prot
);
1119 VPP_SETADVICE(vp
, svd2
->advice
);
1123 if (svd2
->pageswap
&& (!svd1
->pageswap
&& svd1
->swresv
)) {
1124 ASSERT(svd1
->swresv
== seg1
->s_size
);
1125 ASSERT(!(svd1
->flags
& MAP_NORESERVE
));
1126 ASSERT(!(svd2
->flags
& MAP_NORESERVE
));
1127 evp
= nvpage
+ npages1
;
1128 for (vp
= nvpage
; vp
< evp
; vp
++) {
1133 if (svd1
->pageswap
&& (!svd2
->pageswap
&& svd2
->swresv
)) {
1134 ASSERT(svd2
->swresv
== seg2
->s_size
);
1135 ASSERT(!(svd1
->flags
& MAP_NORESERVE
));
1136 ASSERT(!(svd2
->flags
& MAP_NORESERVE
));
1137 vp
= nvpage
+ npages1
;
1139 for (; vp
< evp
; vp
++) {
1144 ASSERT((vpage1
!= NULL
|| vpage2
!= NULL
) ||
1145 (svd1
->pageswap
== 0 && svd2
->pageswap
== 0));
1148 * If either segment has private pages, create a new merged anon
1149 * array. If mergeing shared anon segments just decrement anon map's
1152 if (amp1
!= NULL
&& svd1
->type
== MAP_SHARED
) {
1153 ASSERT(amp1
== amp2
&& svd1
->vp
== NULL
);
1154 ANON_LOCK_ENTER(&1
->a_rwlock
, RW_WRITER
);
1155 ASSERT(amp1
->refcnt
>= 2);
1157 ANON_LOCK_EXIT(&1
->a_rwlock
);
1159 } else if (amp1
!= NULL
|| amp2
!= NULL
) {
1160 struct anon_hdr
*nahp
;
1161 struct anon_map
*namp
= NULL
;
1164 ASSERT(svd1
->type
== MAP_PRIVATE
);
1166 asize
= seg1
->s_size
+ seg2
->s_size
;
1167 if ((nahp
= anon_create(btop(asize
), ANON_NOSLEEP
)) == NULL
) {
1168 if (nvpage
!= NULL
) {
1169 kmem_free(nvpage
, nvpsize
);
1175 * XXX anon rwlock is not really needed because
1176 * this is a private segment and we are writers.
1178 ANON_LOCK_ENTER(&1
->a_rwlock
, RW_WRITER
);
1179 ASSERT(amp1
->refcnt
== 1);
1180 if (anon_copy_ptr(amp1
->ahp
, svd1
->anon_index
,
1181 nahp
, 0, btop(seg1
->s_size
), ANON_NOSLEEP
)) {
1182 anon_release(nahp
, btop(asize
));
1183 ANON_LOCK_EXIT(&1
->a_rwlock
);
1184 if (nvpage
!= NULL
) {
1185 kmem_free(nvpage
, nvpsize
);
1191 ANON_LOCK_ENTER(&2
->a_rwlock
, RW_WRITER
);
1192 ASSERT(amp2
->refcnt
== 1);
1193 if (anon_copy_ptr(amp2
->ahp
, svd2
->anon_index
,
1194 nahp
, btop(seg1
->s_size
), btop(seg2
->s_size
),
1196 anon_release(nahp
, btop(asize
));
1197 ANON_LOCK_EXIT(&2
->a_rwlock
);
1199 ANON_LOCK_EXIT(&1
->a_rwlock
);
1201 if (nvpage
!= NULL
) {
1202 kmem_free(nvpage
, nvpsize
);
1209 anon_release(amp1
->ahp
, btop(amp1
->size
));
1213 ASSERT(amp1
== NULL
);
1215 anon_release(amp2
->ahp
, btop(amp2
->size
));
1218 ANON_LOCK_EXIT(&2
->a_rwlock
);
1221 svd2
->amp
= NULL
; /* needed for seg_free */
1226 svd1
->anon_index
= 0;
1227 ANON_LOCK_EXIT(&namp
->a_rwlock
);
1230 * Now free the old vpage structures.
1232 if (nvpage
!= NULL
) {
1233 if (vpage1
!= NULL
) {
1234 kmem_free(vpage1
, vpgtob(npages1
));
1236 if (vpage2
!= NULL
) {
1238 kmem_free(vpage2
, vpgtob(npages2
));
1240 if (svd2
->pageprot
) {
1243 if (svd2
->pageadvice
) {
1244 svd1
->pageadvice
= 1;
1246 if (svd2
->pageswap
) {
1249 svd1
->vpage
= nvpage
;
1252 /* all looks ok, merge segments */
1253 svd1
->swresv
+= svd2
->swresv
;
1254 svd2
->swresv
= 0; /* so seg_free doesn't release swap space */
1255 size
= seg2
->s_size
;
1257 seg1
->s_size
+= size
;
1262 * Extend the previous segment (seg1) to include the
1263 * new segment (seg2 + a), if possible.
1264 * Return 0 on success.
1267 segvn_extend_prev(struct seg
*seg1
, struct seg
*seg2
, struct segvn_crargs
*a
,
1270 struct segvn_data
*svd1
= (struct segvn_data
*)seg1
->s_data
;
1272 struct anon_map
*amp1
;
1273 struct vpage
*new_vpage
;
1276 * We don't need any segment level locks for "segvn" data
1277 * since the address space is "write" locked.
1279 ASSERT(seg1
->s_as
&& AS_WRITE_HELD(seg1
->s_as
));
1281 if (HAT_IS_REGION_COOKIE_VALID(svd1
->rcookie
)) {
1285 /* second segment is new, try to extend first */
1286 /* XXX - should also check cred */
1287 if (svd1
->vp
!= a
->vp
|| svd1
->maxprot
!= a
->maxprot
||
1288 (!svd1
->pageprot
&& (svd1
->prot
!= a
->prot
)) ||
1289 svd1
->type
!= a
->type
|| svd1
->flags
!= a
->flags
||
1290 seg1
->s_szc
!= a
->szc
|| svd1
->softlockcnt_send
> 0)
1293 /* vp == NULL implies zfod, offset doesn't matter */
1294 if (svd1
->vp
!= NULL
&&
1295 svd1
->offset
+ seg1
->s_size
!= (a
->offset
& PAGEMASK
))
1298 if (svd1
->tr_state
!= SEGVN_TR_OFF
) {
1307 * Segment has private pages, can data structures
1310 * Acquire the anon_map lock to prevent it from changing,
1311 * if it is shared. This ensures that the anon_map
1312 * will not change while a thread which has a read/write
1313 * lock on an address space references it.
1314 * XXX - Don't need the anon_map lock at all if "refcnt"
1317 * Can't grow a MAP_SHARED segment with an anonmap because
1318 * there may be existing anon slots where we want to extend
1319 * the segment and we wouldn't know what to do with them
1320 * (e.g., for tmpfs right thing is to just leave them there,
1321 * for /dev/zero they should be cleared out).
1323 if (svd1
->type
== MAP_SHARED
)
1326 ANON_LOCK_ENTER(&1
->a_rwlock
, RW_WRITER
);
1327 if (amp1
->refcnt
> 1) {
1328 ANON_LOCK_EXIT(&1
->a_rwlock
);
1331 newpgs
= anon_grow(amp1
->ahp
, &svd1
->anon_index
,
1332 btop(seg1
->s_size
), btop(seg2
->s_size
), ANON_NOSLEEP
);
1335 ANON_LOCK_EXIT(&1
->a_rwlock
);
1338 amp1
->size
= ptob(newpgs
);
1339 ANON_LOCK_EXIT(&1
->a_rwlock
);
1341 if (svd1
->vpage
!= NULL
) {
1342 struct vpage
*vp
, *evp
;
1344 kmem_zalloc(vpgtob(seg_pages(seg1
) + seg_pages(seg2
)),
1346 if (new_vpage
== NULL
)
1348 bcopy(svd1
->vpage
, new_vpage
, vpgtob(seg_pages(seg1
)));
1349 kmem_free(svd1
->vpage
, vpgtob(seg_pages(seg1
)));
1350 svd1
->vpage
= new_vpage
;
1352 vp
= new_vpage
+ seg_pages(seg1
);
1353 evp
= vp
+ seg_pages(seg2
);
1354 for (; vp
< evp
; vp
++)
1355 VPP_SETPROT(vp
, a
->prot
);
1356 if (svd1
->pageswap
&& swresv
) {
1357 ASSERT(!(svd1
->flags
& MAP_NORESERVE
));
1358 ASSERT(swresv
== seg2
->s_size
);
1359 vp
= new_vpage
+ seg_pages(seg1
);
1360 for (; vp
< evp
; vp
++) {
1365 ASSERT(svd1
->vpage
!= NULL
|| svd1
->pageswap
== 0);
1366 size
= seg2
->s_size
;
1368 seg1
->s_size
+= size
;
1369 svd1
->swresv
+= swresv
;
1370 if (svd1
->pageprot
&& (a
->prot
& PROT_WRITE
) &&
1371 svd1
->type
== MAP_SHARED
&& svd1
->vp
!= NULL
&&
1372 (svd1
->vp
->v_flag
& VVMEXEC
)) {
1373 ASSERT(vn_is_mapped(svd1
->vp
, V_WRITE
));
1374 segvn_inval_trcache(svd1
->vp
);
1380 * Extend the next segment (seg2) to include the
1381 * new segment (seg1 + a), if possible.
1382 * Return 0 on success.
1385 segvn_extend_next(struct seg
*seg1
, struct seg
*seg2
, struct segvn_crargs
*a
,
1388 struct segvn_data
*svd2
= (struct segvn_data
*)seg2
->s_data
;
1390 struct anon_map
*amp2
;
1391 struct vpage
*new_vpage
;
1394 * We don't need any segment level locks for "segvn" data
1395 * since the address space is "write" locked.
1397 ASSERT(seg2
->s_as
&& AS_WRITE_HELD(seg2
->s_as
));
1399 if (HAT_IS_REGION_COOKIE_VALID(svd2
->rcookie
)) {
1403 /* first segment is new, try to extend second */
1404 /* XXX - should also check cred */
1405 if (svd2
->vp
!= a
->vp
|| svd2
->maxprot
!= a
->maxprot
||
1406 (!svd2
->pageprot
&& (svd2
->prot
!= a
->prot
)) ||
1407 svd2
->type
!= a
->type
|| svd2
->flags
!= a
->flags
||
1408 seg2
->s_szc
!= a
->szc
|| svd2
->softlockcnt_sbase
> 0)
1410 /* vp == NULL implies zfod, offset doesn't matter */
1411 if (svd2
->vp
!= NULL
&&
1412 (a
->offset
& PAGEMASK
) + seg1
->s_size
!= svd2
->offset
)
1415 if (svd2
->tr_state
!= SEGVN_TR_OFF
) {
1424 * Segment has private pages, can data structures
1427 * Acquire the anon_map lock to prevent it from changing,
1428 * if it is shared. This ensures that the anon_map
1429 * will not change while a thread which has a read/write
1430 * lock on an address space references it.
1432 * XXX - Don't need the anon_map lock at all if "refcnt"
1435 if (svd2
->type
== MAP_SHARED
)
1438 ANON_LOCK_ENTER(&2
->a_rwlock
, RW_WRITER
);
1439 if (amp2
->refcnt
> 1) {
1440 ANON_LOCK_EXIT(&2
->a_rwlock
);
1443 newpgs
= anon_grow(amp2
->ahp
, &svd2
->anon_index
,
1444 btop(seg2
->s_size
), btop(seg1
->s_size
),
1445 ANON_NOSLEEP
| ANON_GROWDOWN
);
1448 ANON_LOCK_EXIT(&2
->a_rwlock
);
1451 amp2
->size
= ptob(newpgs
);
1452 ANON_LOCK_EXIT(&2
->a_rwlock
);
1454 if (svd2
->vpage
!= NULL
) {
1455 struct vpage
*vp
, *evp
;
1457 kmem_zalloc(vpgtob(seg_pages(seg1
) + seg_pages(seg2
)),
1459 if (new_vpage
== NULL
) {
1460 /* Not merging segments so adjust anon_index back */
1462 svd2
->anon_index
+= seg_pages(seg1
);
1465 bcopy(svd2
->vpage
, new_vpage
+ seg_pages(seg1
),
1466 vpgtob(seg_pages(seg2
)));
1467 kmem_free(svd2
->vpage
, vpgtob(seg_pages(seg2
)));
1468 svd2
->vpage
= new_vpage
;
1471 evp
= vp
+ seg_pages(seg1
);
1472 for (; vp
< evp
; vp
++)
1473 VPP_SETPROT(vp
, a
->prot
);
1474 if (svd2
->pageswap
&& swresv
) {
1475 ASSERT(!(svd2
->flags
& MAP_NORESERVE
));
1476 ASSERT(swresv
== seg1
->s_size
);
1478 for (; vp
< evp
; vp
++) {
1483 ASSERT(svd2
->vpage
!= NULL
|| svd2
->pageswap
== 0);
1484 size
= seg1
->s_size
;
1486 seg2
->s_size
+= size
;
1487 seg2
->s_base
-= size
;
1488 svd2
->offset
-= size
;
1489 svd2
->swresv
+= swresv
;
1490 if (svd2
->pageprot
&& (a
->prot
& PROT_WRITE
) &&
1491 svd2
->type
== MAP_SHARED
&& svd2
->vp
!= NULL
&&
1492 (svd2
->vp
->v_flag
& VVMEXEC
)) {
1493 ASSERT(vn_is_mapped(svd2
->vp
, V_WRITE
));
1494 segvn_inval_trcache(svd2
->vp
);
1500 * Duplicate all the pages in the segment. This may break COW sharing for a
1501 * given page. If the page is marked with inherit zero set, then instead of
1502 * duplicating the page, we zero the page.
1505 segvn_dup_pages(struct seg
*seg
, struct seg
*newseg
)
1510 struct anon
*ap
, *newap
;
1514 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1515 struct segvn_data
*newsvd
= (struct segvn_data
*)newseg
->s_data
;
1516 ulong_t old_idx
= svd
->anon_index
;
1517 ulong_t new_idx
= 0;
1519 i
= btopr(seg
->s_size
);
1523 * XXX break cow sharing using PAGESIZE
1524 * pages. They will be relocated into larger
1525 * pages at fault time.
1528 if ((ap
= anon_get_ptr(svd
->amp
->ahp
, old_idx
)) != NULL
) {
1531 vpp
= &svd
->vpage
[seg_page(seg
, addr
)];
1534 * prot need not be computed below 'cause anon_private
1535 * is going to ignore it anyway as child doesn't inherit
1536 * pagelock from parent.
1538 prot
= svd
->pageprot
? VPP_PROT(vpp
) : svd
->prot
;
1541 * Check whether we should zero this or dup it.
1543 if (svd
->svn_inz
== SEGVN_INZ_ALL
||
1544 (svd
->svn_inz
== SEGVN_INZ_VPP
&&
1545 VPP_ISINHZERO(vpp
))) {
1546 pp
= anon_zero(newseg
, addr
, &newap
,
1549 page_t
*anon_pl
[1+1];
1551 error
= anon_getpage(&ap
, &vpprot
, anon_pl
,
1552 PAGESIZE
, seg
, addr
, S_READ
, svd
->cred
);
1556 pp
= anon_private(&newap
, newseg
, addr
, prot
,
1557 anon_pl
[0], 0, newsvd
->cred
);
1562 (void) anon_set_ptr(newsvd
->amp
->ahp
, new_idx
, newap
,
1575 segvn_dup(struct seg
*seg
, struct seg
*newseg
)
1577 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1578 struct segvn_data
*newsvd
;
1579 pgcnt_t npages
= seg_pages(seg
);
1582 struct anon_map
*amp
;
1584 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1585 ASSERT(newseg
->s_as
->a_proc
->p_parent
== curproc
);
1588 * If segment has anon reserved, reserve more for the new seg.
1589 * For a MAP_NORESERVE segment swresv will be a count of all the
1590 * allocated anon slots; thus we reserve for the child as many slots
1591 * as the parent has allocated. This semantic prevents the child or
1592 * parent from dieing during a copy-on-write fault caused by trying
1593 * to write a shared pre-existing anon page.
1595 if ((len
= svd
->swresv
) != 0) {
1596 if (anon_resv(svd
->swresv
) == 0)
1599 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
, "anon proc:%p %lu %u",
1603 newsvd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
1605 newseg
->s_ops
= &segvn_ops
;
1606 newseg
->s_data
= (void *)newsvd
;
1607 newseg
->s_szc
= seg
->s_szc
;
1609 newsvd
->seg
= newseg
;
1610 if ((newsvd
->vp
= svd
->vp
) != NULL
) {
1612 if (svd
->type
== MAP_SHARED
)
1613 lgrp_shm_policy_init(NULL
, svd
->vp
);
1615 newsvd
->offset
= svd
->offset
;
1616 newsvd
->prot
= svd
->prot
;
1617 newsvd
->maxprot
= svd
->maxprot
;
1618 newsvd
->pageprot
= svd
->pageprot
;
1619 newsvd
->type
= svd
->type
;
1620 newsvd
->cred
= svd
->cred
;
1621 crhold(newsvd
->cred
);
1622 newsvd
->advice
= svd
->advice
;
1623 newsvd
->pageadvice
= svd
->pageadvice
;
1624 newsvd
->svn_inz
= svd
->svn_inz
;
1625 newsvd
->swresv
= svd
->swresv
;
1626 newsvd
->pageswap
= svd
->pageswap
;
1627 newsvd
->flags
= svd
->flags
;
1628 newsvd
->softlockcnt
= 0;
1629 newsvd
->softlockcnt_sbase
= 0;
1630 newsvd
->softlockcnt_send
= 0;
1631 newsvd
->policy_info
= svd
->policy_info
;
1632 newsvd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
1634 if ((amp
= svd
->amp
) == NULL
|| svd
->tr_state
== SEGVN_TR_ON
) {
1636 * Not attaching to a shared anon object.
1638 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
) ||
1639 svd
->tr_state
== SEGVN_TR_OFF
);
1640 if (svd
->tr_state
== SEGVN_TR_ON
) {
1641 ASSERT(newsvd
->vp
!= NULL
&& amp
!= NULL
);
1642 newsvd
->tr_state
= SEGVN_TR_INIT
;
1644 newsvd
->tr_state
= svd
->tr_state
;
1647 newsvd
->anon_index
= 0;
1649 /* regions for now are only used on pure vnode segments */
1650 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
1651 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1652 newsvd
->tr_state
= SEGVN_TR_OFF
;
1653 if (svd
->type
== MAP_SHARED
) {
1654 ASSERT(svd
->svn_inz
== SEGVN_INZ_NONE
);
1656 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
1658 ANON_LOCK_EXIT(&
->a_rwlock
);
1659 newsvd
->anon_index
= svd
->anon_index
;
1664 * Allocate and initialize new anon_map structure.
1666 newsvd
->amp
= anonmap_alloc(newseg
->s_size
, 0,
1668 newsvd
->amp
->a_szc
= newseg
->s_szc
;
1669 newsvd
->anon_index
= 0;
1670 ASSERT(svd
->svn_inz
== SEGVN_INZ_NONE
||
1671 svd
->svn_inz
== SEGVN_INZ_ALL
||
1672 svd
->svn_inz
== SEGVN_INZ_VPP
);
1675 * We don't have to acquire the anon_map lock
1676 * for the new segment (since it belongs to an
1677 * address space that is still not associated
1678 * with any process), or the segment in the old
1679 * address space (since all threads in it
1680 * are stopped while duplicating the address space).
1684 * The goal of the following code is to make sure that
1685 * softlocked pages do not end up as copy on write
1686 * pages. This would cause problems where one
1687 * thread writes to a page that is COW and a different
1688 * thread in the same process has softlocked it. The
1689 * softlock lock would move away from this process
1690 * because the write would cause this process to get
1691 * a copy (without the softlock).
1693 * The strategy here is to just break the
1694 * sharing on pages that could possibly be
1697 * In addition, if any pages have been marked that they
1698 * should be inherited as zero, then we immediately go
1699 * ahead and break COW and zero them. In the case of a
1700 * softlocked page that should be inherited zero, we
1701 * break COW and just get a zero page.
1704 if (svd
->softlockcnt
||
1705 svd
->svn_inz
!= SEGVN_INZ_NONE
) {
1707 * The softlock count might be non zero
1708 * because some pages are still stuck in the
1709 * cache for lazy reclaim. Flush the cache
1710 * now. This should drop the count to zero.
1711 * [or there is really I/O going on to these
1712 * pages]. Note, we have the writers lock so
1713 * nothing gets inserted during the flush.
1715 if (svd
->softlockcnt
&& reclaim
== 1) {
1721 error
= segvn_dup_pages(seg
, newseg
);
1723 newsvd
->vpage
= NULL
;
1726 } else { /* common case */
1727 if (seg
->s_szc
!= 0) {
1729 * If at least one of anon slots of a
1730 * large page exists then make sure
1731 * all anon slots of a large page
1732 * exist to avoid partial cow sharing
1733 * of a large page in the future.
1735 anon_dup_fill_holes(amp
->ahp
,
1736 svd
->anon_index
, newsvd
->amp
->ahp
,
1737 0, seg
->s_size
, seg
->s_szc
,
1740 anon_dup(amp
->ahp
, svd
->anon_index
,
1741 newsvd
->amp
->ahp
, 0, seg
->s_size
);
1744 hat_clrattr(seg
->s_as
->a_hat
, seg
->s_base
,
1745 seg
->s_size
, PROT_WRITE
);
1750 * If necessary, create a vpage structure for the new segment.
1751 * Do not copy any page lock indications.
1753 if (svd
->vpage
!= NULL
) {
1755 struct vpage
*ovp
= svd
->vpage
;
1758 nvp
= newsvd
->vpage
=
1759 kmem_alloc(vpgtob(npages
), KM_SLEEP
);
1760 for (i
= 0; i
< npages
; i
++) {
1762 VPP_CLRPPLOCK(nvp
++);
1765 newsvd
->vpage
= NULL
;
1767 /* Inform the vnode of the new mapping */
1768 if (newsvd
->vp
!= NULL
) {
1769 error
= VOP_ADDMAP(newsvd
->vp
, (offset_t
)newsvd
->offset
,
1770 newseg
->s_as
, newseg
->s_base
, newseg
->s_size
, newsvd
->prot
,
1771 newsvd
->maxprot
, newsvd
->type
, newsvd
->cred
, NULL
);
1774 if (error
== 0 && HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
1775 ASSERT(newsvd
->amp
== NULL
);
1776 ASSERT(newsvd
->tr_state
== SEGVN_TR_OFF
);
1777 newsvd
->rcookie
= svd
->rcookie
;
1778 hat_dup_region(newseg
->s_as
->a_hat
, newsvd
->rcookie
);
1785 * callback function to invoke free_vp_pages() for only those pages actually
1786 * processed by the HAT when a shared region is destroyed.
1788 extern int free_pages
;
1791 segvn_hat_rgn_unload_callback(caddr_t saddr
, caddr_t eaddr
, caddr_t r_saddr
,
1792 size_t r_size
, void *r_obj
, u_offset_t r_objoff
)
1796 vnode_t
*vp
= (vnode_t
*)r_obj
;
1798 ASSERT(eaddr
> saddr
);
1799 ASSERT(saddr
>= r_saddr
);
1800 ASSERT(saddr
< r_saddr
+ r_size
);
1801 ASSERT(eaddr
> r_saddr
);
1802 ASSERT(eaddr
<= r_saddr
+ r_size
);
1809 len
= eaddr
- saddr
;
1810 off
= (saddr
- r_saddr
) + r_objoff
;
1811 free_vp_pages(vp
, off
, len
);
1815 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1816 * those pages actually processed by the HAT
1819 segvn_hat_unload_callback(hat_callback_t
*cb
)
1821 struct seg
*seg
= cb
->hcb_data
;
1822 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1826 ASSERT(svd
->vp
!= NULL
);
1827 ASSERT(cb
->hcb_end_addr
> cb
->hcb_start_addr
);
1828 ASSERT(cb
->hcb_start_addr
>= seg
->s_base
);
1830 len
= cb
->hcb_end_addr
- cb
->hcb_start_addr
;
1831 off
= cb
->hcb_start_addr
- seg
->s_base
;
1832 free_vp_pages(svd
->vp
, svd
->offset
+ off
, len
);
1836 * This function determines the number of bytes of swap reserved by
1837 * a segment for which per-page accounting is present. It is used to
1838 * calculate the correct value of a segvn_data's swresv.
1841 segvn_count_swap_by_vpages(struct seg
*seg
)
1843 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1844 struct vpage
*vp
, *evp
;
1845 size_t nswappages
= 0;
1847 ASSERT(svd
->pageswap
);
1848 ASSERT(svd
->vpage
!= NULL
);
1850 evp
= &svd
->vpage
[seg_page(seg
, seg
->s_base
+ seg
->s_size
)];
1852 for (vp
= svd
->vpage
; vp
< evp
; vp
++) {
1853 if (VPP_ISSWAPRES(vp
))
1857 return (nswappages
<< PAGESHIFT
);
1861 segvn_unmap(struct seg
*seg
, caddr_t addr
, size_t len
)
1863 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1864 struct segvn_data
*nsvd
;
1866 struct anon_map
*amp
;
1867 pgcnt_t opages
; /* old segment size in pages */
1868 pgcnt_t npages
; /* new segment size in pages */
1869 pgcnt_t dpages
; /* pages being deleted (unmapped) */
1870 hat_callback_t callback
; /* used for free_vp_pages() */
1871 hat_callback_t
*cbp
= NULL
;
1878 * We don't need any segment level locks for "segvn" data
1879 * since the address space is "write" locked.
1881 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1884 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1885 * softlockcnt is protected from change by the as write lock.
1888 if (svd
->softlockcnt
> 0) {
1889 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1892 * If this is shared segment non 0 softlockcnt
1893 * means locked pages are still in use.
1895 if (svd
->type
== MAP_SHARED
) {
1900 * since we do have the writers lock nobody can fill
1901 * the cache during the purge. The flush either succeeds
1902 * or we still have pending I/Os.
1913 * Check for bad sizes
1915 if (addr
< seg
->s_base
|| addr
+ len
> seg
->s_base
+ seg
->s_size
||
1916 (len
& PAGEOFFSET
) || ((uintptr_t)addr
& PAGEOFFSET
)) {
1917 panic("segvn_unmap");
1921 if (seg
->s_szc
!= 0) {
1922 size_t pgsz
= page_get_pagesize(seg
->s_szc
);
1924 if (!IS_P2ALIGNED(addr
, pgsz
) || !IS_P2ALIGNED(len
, pgsz
)) {
1925 ASSERT(seg
->s_base
!= addr
|| seg
->s_size
!= len
);
1926 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
1927 ASSERT(svd
->amp
== NULL
);
1928 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1929 hat_leave_region(seg
->s_as
->a_hat
,
1930 svd
->rcookie
, HAT_REGION_TEXT
);
1931 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
1933 * could pass a flag to segvn_demote_range()
1934 * below to tell it not to do any unloads but
1935 * this case is rare enough to not bother for
1938 } else if (svd
->tr_state
== SEGVN_TR_INIT
) {
1939 svd
->tr_state
= SEGVN_TR_OFF
;
1940 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
1941 ASSERT(svd
->amp
!= NULL
);
1942 segvn_textunrepl(seg
, 1);
1943 ASSERT(svd
->amp
== NULL
);
1944 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1946 VM_STAT_ADD(segvnvmstats
.demoterange
[0]);
1947 err
= segvn_demote_range(seg
, addr
, len
, SDR_END
, 0);
1955 /* Inform the vnode of the unmapping. */
1959 error
= VOP_DELMAP(svd
->vp
,
1960 (offset_t
)svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
),
1961 seg
->s_as
, addr
, len
, svd
->prot
, svd
->maxprot
,
1962 svd
->type
, svd
->cred
, NULL
);
1964 if (error
== EAGAIN
)
1969 * Remove any page locks set through this mapping.
1970 * If text replication is not off no page locks could have been
1971 * established via this mapping.
1973 if (svd
->tr_state
== SEGVN_TR_OFF
) {
1974 (void) segvn_lockop(seg
, addr
, len
, 0, MC_UNLOCK
, NULL
, 0);
1977 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
1978 ASSERT(svd
->amp
== NULL
);
1979 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1980 ASSERT(svd
->type
== MAP_PRIVATE
);
1981 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
1983 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
1984 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
1985 ASSERT(svd
->amp
!= NULL
);
1986 ASSERT(svd
->pageprot
== 0 && !(svd
->prot
& PROT_WRITE
));
1987 segvn_textunrepl(seg
, 1);
1988 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
1990 if (svd
->tr_state
!= SEGVN_TR_OFF
) {
1991 ASSERT(svd
->tr_state
== SEGVN_TR_INIT
);
1992 svd
->tr_state
= SEGVN_TR_OFF
;
1995 * Unload any hardware translations in the range to be taken
1996 * out. Use a callback to invoke free_vp_pages() effectively.
1998 if (svd
->vp
!= NULL
&& free_pages
!= 0) {
1999 callback
.hcb_data
= seg
;
2000 callback
.hcb_function
= segvn_hat_unload_callback
;
2003 hat_unload_callback(seg
->s_as
->a_hat
, addr
, len
,
2004 HAT_UNLOAD_UNMAP
, cbp
);
2006 if (svd
->type
== MAP_SHARED
&& svd
->vp
!= NULL
&&
2007 (svd
->vp
->v_flag
& VVMEXEC
) &&
2008 ((svd
->prot
& PROT_WRITE
) || svd
->pageprot
)) {
2009 segvn_inval_trcache(svd
->vp
);
2014 * Check for entire segment
2016 if (addr
== seg
->s_base
&& len
== seg
->s_size
) {
2021 opages
= seg_pages(seg
);
2023 npages
= opages
- dpages
;
2025 ASSERT(amp
== NULL
|| amp
->a_szc
>= seg
->s_szc
);
2028 * Check for beginning of segment
2030 if (addr
== seg
->s_base
) {
2031 if (svd
->vpage
!= NULL
) {
2033 struct vpage
*ovpage
;
2035 ovpage
= svd
->vpage
; /* keep pointer to vpage */
2037 nbytes
= vpgtob(npages
);
2038 svd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2039 bcopy(&ovpage
[dpages
], svd
->vpage
, nbytes
);
2041 /* free up old vpage */
2042 kmem_free(ovpage
, vpgtob(opages
));
2045 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2046 if (amp
->refcnt
== 1 || svd
->type
== MAP_PRIVATE
) {
2048 * Shared anon map is no longer in use. Before
2049 * freeing its pages purge all entries from
2050 * pcache that belong to this amp.
2052 if (svd
->type
== MAP_SHARED
) {
2053 ASSERT(amp
->refcnt
== 1);
2054 ASSERT(svd
->softlockcnt
== 0);
2058 * Free up now unused parts of anon_map array.
2060 if (amp
->a_szc
== seg
->s_szc
) {
2061 if (seg
->s_szc
!= 0) {
2062 anon_free_pages(amp
->ahp
,
2063 svd
->anon_index
, len
,
2071 ASSERT(svd
->type
== MAP_SHARED
);
2072 ASSERT(amp
->a_szc
> seg
->s_szc
);
2073 anon_shmap_free_pages(amp
,
2074 svd
->anon_index
, len
);
2078 * Unreserve swap space for the
2079 * unmapped chunk of this segment in
2080 * case it's MAP_SHARED
2082 if (svd
->type
== MAP_SHARED
) {
2083 anon_unresv_zone(len
,
2084 seg
->s_as
->a_proc
->p_zone
);
2088 ANON_LOCK_EXIT(&
->a_rwlock
);
2089 svd
->anon_index
+= dpages
;
2091 if (svd
->vp
!= NULL
)
2098 if (svd
->flags
& MAP_NORESERVE
) {
2100 oswresv
= svd
->swresv
;
2102 svd
->swresv
= ptob(anon_pages(amp
->ahp
,
2103 svd
->anon_index
, npages
));
2104 anon_unresv_zone(oswresv
- svd
->swresv
,
2105 seg
->s_as
->a_proc
->p_zone
);
2106 if (SEG_IS_PARTIAL_RESV(seg
))
2107 seg
->s_as
->a_resvsize
-= oswresv
-
2112 if (svd
->pageswap
) {
2113 oswresv
= svd
->swresv
;
2115 segvn_count_swap_by_vpages(seg
);
2116 ASSERT(oswresv
>= svd
->swresv
);
2117 unlen
= oswresv
- svd
->swresv
;
2120 ASSERT(svd
->swresv
== seg
->s_size
);
2123 anon_unresv_zone(unlen
,
2124 seg
->s_as
->a_proc
->p_zone
);
2126 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
, "anon proc:%p %lu %u",
2134 * Check for end of segment
2136 if (addr
+ len
== seg
->s_base
+ seg
->s_size
) {
2137 if (svd
->vpage
!= NULL
) {
2139 struct vpage
*ovpage
;
2141 ovpage
= svd
->vpage
; /* keep pointer to vpage */
2143 nbytes
= vpgtob(npages
);
2144 svd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2145 bcopy(ovpage
, svd
->vpage
, nbytes
);
2147 /* free up old vpage */
2148 kmem_free(ovpage
, vpgtob(opages
));
2152 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2153 if (amp
->refcnt
== 1 || svd
->type
== MAP_PRIVATE
) {
2155 * Free up now unused parts of anon_map array.
2157 ulong_t an_idx
= svd
->anon_index
+ npages
;
2160 * Shared anon map is no longer in use. Before
2161 * freeing its pages purge all entries from
2162 * pcache that belong to this amp.
2164 if (svd
->type
== MAP_SHARED
) {
2165 ASSERT(amp
->refcnt
== 1);
2166 ASSERT(svd
->softlockcnt
== 0);
2170 if (amp
->a_szc
== seg
->s_szc
) {
2171 if (seg
->s_szc
!= 0) {
2172 anon_free_pages(amp
->ahp
,
2176 anon_free(amp
->ahp
, an_idx
,
2180 ASSERT(svd
->type
== MAP_SHARED
);
2181 ASSERT(amp
->a_szc
> seg
->s_szc
);
2182 anon_shmap_free_pages(amp
,
2187 * Unreserve swap space for the
2188 * unmapped chunk of this segment in
2189 * case it's MAP_SHARED
2191 if (svd
->type
== MAP_SHARED
) {
2192 anon_unresv_zone(len
,
2193 seg
->s_as
->a_proc
->p_zone
);
2197 ANON_LOCK_EXIT(&
->a_rwlock
);
2203 if (svd
->flags
& MAP_NORESERVE
) {
2205 oswresv
= svd
->swresv
;
2206 svd
->swresv
= ptob(anon_pages(amp
->ahp
,
2207 svd
->anon_index
, npages
));
2208 anon_unresv_zone(oswresv
- svd
->swresv
,
2209 seg
->s_as
->a_proc
->p_zone
);
2210 if (SEG_IS_PARTIAL_RESV(seg
))
2211 seg
->s_as
->a_resvsize
-= oswresv
-
2216 if (svd
->pageswap
) {
2217 oswresv
= svd
->swresv
;
2219 segvn_count_swap_by_vpages(seg
);
2220 ASSERT(oswresv
>= svd
->swresv
);
2221 unlen
= oswresv
- svd
->swresv
;
2224 ASSERT(svd
->swresv
== seg
->s_size
);
2227 anon_unresv_zone(unlen
,
2228 seg
->s_as
->a_proc
->p_zone
);
2230 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
,
2231 "anon proc:%p %lu %u", seg
, len
, 0);
2238 * The section to go is in the middle of the segment,
2239 * have to make it into two segments. nseg is made for
2240 * the high end while seg is cut down at the low end.
2242 nbase
= addr
+ len
; /* new seg base */
2243 nsize
= (seg
->s_base
+ seg
->s_size
) - nbase
; /* new seg size */
2244 seg
->s_size
= addr
- seg
->s_base
; /* shrink old seg */
2245 nseg
= seg_alloc(seg
->s_as
, nbase
, nsize
);
2247 panic("segvn_unmap seg_alloc");
2250 nseg
->s_ops
= seg
->s_ops
;
2251 nsvd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
2252 nseg
->s_data
= (void *)nsvd
;
2253 nseg
->s_szc
= seg
->s_szc
;
2256 nsvd
->offset
= svd
->offset
+ (uintptr_t)(nseg
->s_base
- seg
->s_base
);
2258 nsvd
->softlockcnt
= 0;
2259 nsvd
->softlockcnt_sbase
= 0;
2260 nsvd
->softlockcnt_send
= 0;
2261 nsvd
->svn_inz
= svd
->svn_inz
;
2262 ASSERT(nsvd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2264 if (svd
->vp
!= NULL
) {
2266 if (nsvd
->type
== MAP_SHARED
)
2267 lgrp_shm_policy_init(NULL
, nsvd
->vp
);
2271 if (svd
->vpage
== NULL
) {
2274 /* need to split vpage into two arrays */
2276 struct vpage
*ovpage
;
2278 ovpage
= svd
->vpage
; /* keep pointer to vpage */
2280 npages
= seg_pages(seg
); /* seg has shrunk */
2281 nbytes
= vpgtob(npages
);
2282 svd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2284 bcopy(ovpage
, svd
->vpage
, nbytes
);
2286 npages
= seg_pages(nseg
);
2287 nbytes
= vpgtob(npages
);
2288 nsvd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2290 bcopy(&ovpage
[opages
- npages
], nsvd
->vpage
, nbytes
);
2292 /* free up old vpage */
2293 kmem_free(ovpage
, vpgtob(opages
));
2298 nsvd
->anon_index
= 0;
2301 * Need to create a new anon map for the new segment.
2302 * We'll also allocate a new smaller array for the old
2303 * smaller segment to save space.
2305 opages
= btop((uintptr_t)(addr
- seg
->s_base
));
2306 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2307 if (amp
->refcnt
== 1 || svd
->type
== MAP_PRIVATE
) {
2309 * Free up now unused parts of anon_map array.
2311 ulong_t an_idx
= svd
->anon_index
+ opages
;
2314 * Shared anon map is no longer in use. Before
2315 * freeing its pages purge all entries from
2316 * pcache that belong to this amp.
2318 if (svd
->type
== MAP_SHARED
) {
2319 ASSERT(amp
->refcnt
== 1);
2320 ASSERT(svd
->softlockcnt
== 0);
2324 if (amp
->a_szc
== seg
->s_szc
) {
2325 if (seg
->s_szc
!= 0) {
2326 anon_free_pages(amp
->ahp
, an_idx
, len
,
2329 anon_free(amp
->ahp
, an_idx
,
2333 ASSERT(svd
->type
== MAP_SHARED
);
2334 ASSERT(amp
->a_szc
> seg
->s_szc
);
2335 anon_shmap_free_pages(amp
, an_idx
, len
);
2339 * Unreserve swap space for the
2340 * unmapped chunk of this segment in
2341 * case it's MAP_SHARED
2343 if (svd
->type
== MAP_SHARED
) {
2344 anon_unresv_zone(len
,
2345 seg
->s_as
->a_proc
->p_zone
);
2349 nsvd
->anon_index
= svd
->anon_index
+
2350 btop((uintptr_t)(nseg
->s_base
- seg
->s_base
));
2351 if (svd
->type
== MAP_SHARED
) {
2355 struct anon_map
*namp
;
2356 struct anon_hdr
*nahp
;
2358 ASSERT(svd
->type
== MAP_PRIVATE
);
2359 nahp
= anon_create(btop(seg
->s_size
), ANON_SLEEP
);
2360 namp
= anonmap_alloc(nseg
->s_size
, 0, ANON_SLEEP
);
2361 namp
->a_szc
= seg
->s_szc
;
2362 (void) anon_copy_ptr(amp
->ahp
, svd
->anon_index
, nahp
,
2363 0, btop(seg
->s_size
), ANON_SLEEP
);
2364 (void) anon_copy_ptr(amp
->ahp
, nsvd
->anon_index
,
2365 namp
->ahp
, 0, btop(nseg
->s_size
), ANON_SLEEP
);
2366 anon_release(amp
->ahp
, btop(amp
->size
));
2367 svd
->anon_index
= 0;
2368 nsvd
->anon_index
= 0;
2370 amp
->size
= seg
->s_size
;
2373 ANON_LOCK_EXIT(&
->a_rwlock
);
2376 if (svd
->flags
& MAP_NORESERVE
) {
2378 oswresv
= svd
->swresv
;
2379 svd
->swresv
= ptob(anon_pages(amp
->ahp
,
2380 svd
->anon_index
, btop(seg
->s_size
)));
2381 nsvd
->swresv
= ptob(anon_pages(nsvd
->amp
->ahp
,
2382 nsvd
->anon_index
, btop(nseg
->s_size
)));
2383 ASSERT(oswresv
>= (svd
->swresv
+ nsvd
->swresv
));
2384 anon_unresv_zone(oswresv
- (svd
->swresv
+ nsvd
->swresv
),
2385 seg
->s_as
->a_proc
->p_zone
);
2386 if (SEG_IS_PARTIAL_RESV(seg
))
2387 seg
->s_as
->a_resvsize
-= oswresv
-
2388 (svd
->swresv
+ nsvd
->swresv
);
2392 if (svd
->pageswap
) {
2393 oswresv
= svd
->swresv
;
2394 svd
->swresv
= segvn_count_swap_by_vpages(seg
);
2395 nsvd
->swresv
= segvn_count_swap_by_vpages(nseg
);
2396 ASSERT(oswresv
>= (svd
->swresv
+ nsvd
->swresv
));
2397 unlen
= oswresv
- (svd
->swresv
+ nsvd
->swresv
);
2399 if (seg
->s_size
+ nseg
->s_size
+ len
!=
2401 panic("segvn_unmap: cannot split "
2402 "swap reservation");
2405 svd
->swresv
= seg
->s_size
;
2406 nsvd
->swresv
= nseg
->s_size
;
2409 anon_unresv_zone(unlen
,
2410 seg
->s_as
->a_proc
->p_zone
);
2412 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
, "anon proc:%p %lu %u",
2416 return (0); /* I'm glad that's all over with! */
2420 segvn_free(struct seg
*seg
)
2422 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
2423 pgcnt_t npages
= seg_pages(seg
);
2424 struct anon_map
*amp
;
2428 * We don't need any segment level locks for "segvn" data
2429 * since the address space is "write" locked.
2431 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
2432 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
2434 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2437 * Be sure to unlock pages. XXX Why do things get free'ed instead
2440 (void) segvn_lockop(seg
, seg
->s_base
, seg
->s_size
,
2441 0, MC_UNLOCK
, NULL
, 0);
2444 * Deallocate the vpage and anon pointers if necessary and possible.
2446 if (svd
->vpage
!= NULL
) {
2447 kmem_free(svd
->vpage
, vpgtob(npages
));
2450 if ((amp
= svd
->amp
) != NULL
) {
2452 * If there are no more references to this anon_map
2453 * structure, then deallocate the structure after freeing
2454 * up all the anon slot pointers that we can.
2456 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2457 ASSERT(amp
->a_szc
>= seg
->s_szc
);
2458 if (--amp
->refcnt
== 0) {
2459 if (svd
->type
== MAP_PRIVATE
) {
2461 * Private - we only need to anon_free
2462 * the part that this segment refers to.
2464 if (seg
->s_szc
!= 0) {
2465 anon_free_pages(amp
->ahp
,
2466 svd
->anon_index
, seg
->s_size
,
2469 anon_free(amp
->ahp
, svd
->anon_index
,
2475 * Shared anon map is no longer in use. Before
2476 * freeing its pages purge all entries from
2477 * pcache that belong to this amp.
2479 ASSERT(svd
->softlockcnt
== 0);
2483 * Shared - anon_free the entire
2484 * anon_map's worth of stuff and
2485 * release any swap reservation.
2487 if (amp
->a_szc
!= 0) {
2488 anon_shmap_free_pages(amp
, 0,
2491 anon_free(amp
->ahp
, 0, amp
->size
);
2493 if ((len
= amp
->swresv
) != 0) {
2494 anon_unresv_zone(len
,
2495 seg
->s_as
->a_proc
->p_zone
);
2496 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
,
2497 "anon proc:%p %lu %u", seg
, len
, 0);
2501 ANON_LOCK_EXIT(&
->a_rwlock
);
2503 } else if (svd
->type
== MAP_PRIVATE
) {
2505 * We had a private mapping which still has
2506 * a held anon_map so just free up all the
2507 * anon slot pointers that we were using.
2509 if (seg
->s_szc
!= 0) {
2510 anon_free_pages(amp
->ahp
, svd
->anon_index
,
2511 seg
->s_size
, seg
->s_szc
);
2513 anon_free(amp
->ahp
, svd
->anon_index
,
2516 ANON_LOCK_EXIT(&
->a_rwlock
);
2518 ANON_LOCK_EXIT(&
->a_rwlock
);
2523 * Release swap reservation.
2525 if ((len
= svd
->swresv
) != 0) {
2526 anon_unresv_zone(svd
->swresv
,
2527 seg
->s_as
->a_proc
->p_zone
);
2528 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
, "anon proc:%p %lu %u",
2530 if (SEG_IS_PARTIAL_RESV(seg
))
2531 seg
->s_as
->a_resvsize
-= svd
->swresv
;
2535 * Release claim on vnode, credentials, and finally free the
2538 if (svd
->vp
!= NULL
) {
2539 if (svd
->type
== MAP_SHARED
)
2540 lgrp_shm_policy_fini(NULL
, svd
->vp
);
2546 svd
->pageadvice
= 0;
2551 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2552 * still working with this segment without holding as lock (in case
2553 * it's called by pcache async thread).
2555 ASSERT(svd
->softlockcnt
== 0);
2556 mutex_enter(&svd
->segfree_syncmtx
);
2557 mutex_exit(&svd
->segfree_syncmtx
);
2560 kmem_cache_free(segvn_cache
, svd
);
2564 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2565 * already been F_SOFTLOCK'ed.
2566 * Caller must always match addr and len of a softunlock with a previous
2567 * softlock with exactly the same addr and len.
2570 segvn_softunlock(struct seg
*seg
, caddr_t addr
, size_t len
, enum seg_rw rw
)
2572 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
2577 ulong_t anon_index
= 0;
2578 struct anon_map
*amp
;
2579 struct anon
*ap
= NULL
;
2581 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2582 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
2584 if ((amp
= svd
->amp
) != NULL
)
2585 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
2587 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
2588 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
2589 hat_unlock_region(seg
->s_as
->a_hat
, addr
, len
, svd
->rcookie
);
2591 hat_unlock(seg
->s_as
->a_hat
, addr
, len
);
2593 for (adr
= addr
; adr
< addr
+ len
; adr
+= PAGESIZE
) {
2595 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2596 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
++))
2598 swap_xlate(ap
, &vp
, &offset
);
2601 offset
= svd
->offset
+
2602 (uintptr_t)(adr
- seg
->s_base
);
2604 ANON_LOCK_EXIT(&
->a_rwlock
);
2607 offset
= svd
->offset
+
2608 (uintptr_t)(adr
- seg
->s_base
);
2612 * Use page_find() instead of page_lookup() to
2613 * find the page since we know that it is locked.
2615 pp
= page_find(vp
, offset
);
2618 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2619 (void *)adr
, (void *)ap
, (void *)vp
, offset
);
2623 if (rw
== S_WRITE
) {
2625 if (seg
->s_as
->a_vbits
)
2626 hat_setstat(seg
->s_as
, adr
, PAGESIZE
,
2628 } else if (rw
!= S_OTHER
) {
2630 if (seg
->s_as
->a_vbits
)
2631 hat_setstat(seg
->s_as
, adr
, PAGESIZE
, P_REF
);
2633 TRACE_3(TR_FAC_VM
, TR_SEGVN_FAULT
,
2634 "segvn_fault:pp %p vp %p offset %llx", pp
, vp
, offset
);
2637 ASSERT(svd
->softlockcnt
>= btop(len
));
2638 if (!atomic_add_long_nv((ulong_t
*)&svd
->softlockcnt
, -btop(len
))) {
2640 * All SOFTLOCKS are gone. Wakeup any waiting
2641 * unmappers so they can try again to unmap.
2642 * Check for waiters first without the mutex
2643 * held so we don't always grab the mutex on
2646 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2647 mutex_enter(&seg
->s_as
->a_contents
);
2648 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2649 AS_CLRUNMAPWAIT(seg
->s_as
);
2650 cv_broadcast(&seg
->s_as
->a_cv
);
2652 mutex_exit(&seg
->s_as
->a_contents
);
2657 #define PAGE_HANDLED ((page_t *)-1)
2660 * Release all the pages in the NULL terminated ppp list
2661 * which haven't already been converted to PAGE_HANDLED.
2664 segvn_pagelist_rele(page_t
**ppp
)
2666 for (; *ppp
!= NULL
; ppp
++) {
2667 if (*ppp
!= PAGE_HANDLED
)
2672 static int stealcow
= 1;
2675 * Workaround for viking chip bug. See bug id 1220902.
2676 * To fix this down in pagefault() would require importing so
2677 * much as and segvn code as to be unmaintainable.
2679 int enable_mbit_wa
= 0;
2682 * Handles all the dirty work of getting the right
2683 * anonymous pages and loading up the translations.
2684 * This routine is called only from segvn_fault()
2685 * when looping over the range of addresses requested.
2687 * The basic algorithm here is:
2688 * If this is an anon_zero case
2689 * Call anon_zero to allocate page
2690 * Load up translation
2693 * If this is an anon page
2694 * Use anon_getpage to get the page
2696 * Find page in pl[] list passed in
2699 * Load up the translation to the page
2702 * Call anon_private to handle cow
2703 * Load up (writable) translation to new page
2707 struct hat
*hat
, /* the hat to use for mapping */
2708 struct seg
*seg
, /* seg_vn of interest */
2709 caddr_t addr
, /* address in as */
2710 u_offset_t off
, /* offset in vp */
2711 struct vpage
*vpage
, /* pointer to vpage for vp, off */
2712 page_t
*pl
[], /* object source page pointer */
2713 uint_t vpprot
, /* access allowed to object pages */
2714 enum fault_type type
, /* type of fault */
2715 enum seg_rw rw
, /* type of access at fault */
2716 int brkcow
) /* we may need to break cow */
2718 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
2720 uint_t pageflags
= 0;
2721 page_t
*anon_pl
[1 + 1];
2722 page_t
*opp
= NULL
; /* original page */
2728 ulong_t anon_index
= 0;
2729 struct anon
*ap
, *oldap
;
2730 struct anon_map
*amp
;
2731 int hat_flag
= (type
== F_SOFTLOCK
) ? HAT_LOAD_LOCK
: HAT_LOAD
;
2733 anon_sync_obj_t cookie
;
2735 if (svd
->flags
& MAP_TEXT
) {
2736 hat_flag
|= HAT_LOAD_TEXT
;
2739 ASSERT(SEGVN_READ_HELD(seg
->s_as
, &svd
->lock
));
2740 ASSERT(seg
->s_szc
== 0);
2741 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
2744 * Initialize protection value for this page.
2745 * If we have per page protection values check it now.
2747 if (svd
->pageprot
) {
2752 protchk
= PROT_READ
;
2755 protchk
= PROT_WRITE
;
2758 protchk
= PROT_EXEC
;
2762 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
2766 prot
= VPP_PROT(vpage
);
2767 if ((prot
& protchk
) == 0)
2768 return (FC_PROT
); /* illegal access type */
2773 if (type
== F_SOFTLOCK
) {
2774 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt
);
2778 * Always acquire the anon array lock to prevent 2 threads from
2779 * allocating separate anon slots for the same "addr".
2782 if ((amp
= svd
->amp
) != NULL
) {
2783 ASSERT(RW_READ_HELD(&
->a_rwlock
));
2784 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
2785 anon_array_enter(amp
, anon_index
, &cookie
);
2789 if (svd
->vp
== NULL
&& amp
!= NULL
) {
2790 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
)) == NULL
) {
2792 * Allocate a (normally) writable anonymous page of
2793 * zeroes. If no advance reservations, reserve now.
2795 if (svd
->flags
& MAP_NORESERVE
) {
2796 if (anon_resv_zone(ptob(1),
2797 seg
->s_as
->a_proc
->p_zone
)) {
2798 atomic_add_long(&svd
->swresv
, ptob(1));
2799 atomic_add_long(&seg
->s_as
->a_resvsize
,
2806 if ((pp
= anon_zero(seg
, addr
, &ap
,
2807 svd
->cred
)) == NULL
) {
2809 goto out
; /* out of swap space */
2812 * Re-acquire the anon_map lock and
2813 * initialize the anon array entry.
2815 (void) anon_set_ptr(amp
->ahp
, anon_index
, ap
,
2818 ASSERT(pp
->p_szc
== 0);
2821 * Handle pages that have been marked for migration
2823 if (lgrp_optimizations())
2824 page_migrate(seg
, addr
, &pp
, 1);
2826 if (enable_mbit_wa
) {
2829 else if (!hat_ismod(pp
))
2830 prot
&= ~PROT_WRITE
;
2833 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2834 * with MC_LOCKAS, MCL_FUTURE) and this is a
2835 * MAP_NORESERVE segment, we may need to
2836 * permanently lock the page as it is being faulted
2837 * for the first time. The following text applies
2838 * only to MAP_NORESERVE segments:
2840 * As per memcntl(2), if this segment was created
2841 * after MCL_FUTURE was applied (a "future"
2842 * segment), its pages must be locked. If this
2843 * segment existed at MCL_FUTURE application (a
2844 * "past" segment), the interface is unclear.
2846 * We decide to lock only if vpage is present:
2848 * - "future" segments will have a vpage array (see
2849 * as_map), and so will be locked as required
2851 * - "past" segments may not have a vpage array,
2852 * depending on whether events (such as
2853 * mprotect) have occurred. Locking if vpage
2854 * exists will preserve legacy behavior. Not
2855 * locking if vpage is absent, will not break
2856 * the interface or legacy behavior. Note that
2857 * allocating vpage here if it's absent requires
2858 * upgrading the segvn reader lock, the cost of
2859 * which does not seem worthwhile.
2861 * Usually testing and setting VPP_ISPPLOCK and
2862 * VPP_SETPPLOCK requires holding the segvn lock as
2863 * writer, but in this case all readers are
2864 * serializing on the anon array lock.
2866 if (AS_ISPGLCK(seg
->s_as
) && vpage
!= NULL
&&
2867 (svd
->flags
& MAP_NORESERVE
) &&
2868 !VPP_ISPPLOCK(vpage
)) {
2869 proc_t
*p
= seg
->s_as
->a_proc
;
2870 ASSERT(svd
->type
== MAP_PRIVATE
);
2871 mutex_enter(&p
->p_lock
);
2872 if (rctl_incr_locked_mem(p
, NULL
, PAGESIZE
,
2874 claim
= VPP_PROT(vpage
) & PROT_WRITE
;
2875 if (page_pp_lock(pp
, claim
, 0)) {
2876 VPP_SETPPLOCK(vpage
);
2878 rctl_decr_locked_mem(p
, NULL
,
2882 mutex_exit(&p
->p_lock
);
2885 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2886 hat_memload(hat
, addr
, pp
, prot
, hat_flag
);
2888 if (!(hat_flag
& HAT_LOAD_LOCK
))
2891 anon_array_exit(&cookie
);
2897 * Obtain the page structure via anon_getpage() if it is
2898 * a private copy of an object (the result of a previous
2902 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
)) != NULL
) {
2903 err
= anon_getpage(&ap
, &vpprot
, anon_pl
, PAGESIZE
,
2904 seg
, addr
, rw
, svd
->cred
);
2908 if (svd
->type
== MAP_SHARED
) {
2910 * If this is a shared mapping to an
2911 * anon_map, then ignore the write
2912 * permissions returned by anon_getpage().
2913 * They apply to the private mappings
2916 vpprot
|= PROT_WRITE
;
2923 * Search the pl[] list passed in if it is from the
2924 * original object (i.e., not a private copy).
2928 * Find original page. We must be bringing it in
2929 * from the list in pl[].
2931 for (ppp
= pl
; (opp
= *ppp
) != NULL
; ppp
++) {
2932 if (opp
== PAGE_HANDLED
)
2934 ASSERT(opp
->p_vnode
== svd
->vp
); /* XXX */
2935 if (opp
->p_offset
== off
)
2939 panic("segvn_faultpage not found");
2942 *ppp
= PAGE_HANDLED
;
2946 ASSERT(PAGE_LOCKED(opp
));
2948 TRACE_3(TR_FAC_VM
, TR_SEGVN_FAULT
,
2949 "segvn_fault:pp %p vp %p offset %llx", opp
, NULL
, 0);
2952 * The fault is treated as a copy-on-write fault if a
2953 * write occurs on a private segment and the object
2954 * page (i.e., mapping) is write protected. We assume
2955 * that fatal protection checks have already been made.
2959 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
2960 cow
= !(vpprot
& PROT_WRITE
);
2961 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
2963 * If we are doing text replication COW on first touch.
2965 ASSERT(amp
!= NULL
);
2966 ASSERT(svd
->vp
!= NULL
);
2967 ASSERT(rw
!= S_WRITE
);
2974 * If not a copy-on-write case load the translation
2980 * Handle pages that have been marked for migration
2982 if (lgrp_optimizations())
2983 page_migrate(seg
, addr
, &opp
, 1);
2985 if (IS_VMODSORT(opp
->p_vnode
) || enable_mbit_wa
) {
2988 else if (rw
!= S_OTHER
&& !hat_ismod(opp
))
2989 prot
&= ~PROT_WRITE
;
2992 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
||
2993 (!svd
->pageprot
&& svd
->prot
== (prot
& vpprot
)));
2994 ASSERT(amp
== NULL
||
2995 svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2996 hat_memload_region(hat
, addr
, opp
, prot
& vpprot
, hat_flag
,
2999 if (!(hat_flag
& HAT_LOAD_LOCK
))
3003 anon_array_exit(&cookie
);
3008 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
3012 ASSERT(amp
!= NULL
&& anon_lock
);
3015 * Steal the page only if it isn't a private page
3016 * since stealing a private page is not worth the effort.
3018 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
)) == NULL
)
3022 * Steal the original page if the following conditions are true:
3024 * We are low on memory, the page is not private, page is not large,
3025 * not shared, not modified, not `locked' or if we have it `locked'
3026 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
3027 * that the page is not shared) and if it doesn't have any
3028 * translations. page_struct_lock isn't needed to look at p_cowcnt
3029 * and p_lckcnt because we first get exclusive lock on page.
3031 (void) hat_pagesync(opp
, HAT_SYNC_DONTZERO
| HAT_SYNC_STOPON_MOD
);
3033 if (stealcow
&& freemem
< minfree
&& steal
&& opp
->p_szc
== 0 &&
3034 page_tryupgrade(opp
) && !hat_ismod(opp
) &&
3035 ((opp
->p_lckcnt
== 0 && opp
->p_cowcnt
== 0) ||
3036 (opp
->p_lckcnt
== 0 && opp
->p_cowcnt
== 1 &&
3037 vpage
!= NULL
&& VPP_ISPPLOCK(vpage
)))) {
3039 * Check if this page has other translations
3040 * after unloading our translation.
3042 if (hat_page_is_mapped(opp
)) {
3043 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
3044 hat_unload(seg
->s_as
->a_hat
, addr
, PAGESIZE
,
3049 * hat_unload() might sync back someone else's recent
3050 * modification, so check again.
3052 if (!hat_ismod(opp
) && !hat_page_is_mapped(opp
))
3053 pageflags
|= STEAL_PAGE
;
3057 * If we have a vpage pointer, see if it indicates that we have
3058 * ``locked'' the page we map -- if so, tell anon_private to
3059 * transfer the locking resource to the new page.
3061 * See Statement at the beginning of segvn_lockop regarding
3062 * the way lockcnts/cowcnts are handled during COW.
3065 if (vpage
!= NULL
&& VPP_ISPPLOCK(vpage
))
3066 pageflags
|= LOCK_PAGE
;
3069 * Allocate a private page and perform the copy.
3070 * For MAP_NORESERVE reserve swap space now, unless this
3071 * is a cow fault on an existing anon page in which case
3072 * MAP_NORESERVE will have made advance reservations.
3074 if ((svd
->flags
& MAP_NORESERVE
) && (ap
== NULL
)) {
3075 if (anon_resv_zone(ptob(1), seg
->s_as
->a_proc
->p_zone
)) {
3076 atomic_add_long(&svd
->swresv
, ptob(1));
3077 atomic_add_long(&seg
->s_as
->a_resvsize
, ptob(1));
3085 pp
= anon_private(&ap
, seg
, addr
, prot
, opp
, pageflags
, svd
->cred
);
3087 err
= ENOMEM
; /* out of swap space */
3092 * If we copied away from an anonymous page, then
3093 * we are one step closer to freeing up an anon slot.
3095 * NOTE: The original anon slot must be released while
3096 * holding the "anon_map" lock. This is necessary to prevent
3097 * other threads from obtaining a pointer to the anon slot
3098 * which may be freed if its "refcnt" is 1.
3103 (void) anon_set_ptr(amp
->ahp
, anon_index
, ap
, ANON_SLEEP
);
3106 * Handle pages that have been marked for migration
3108 if (lgrp_optimizations())
3109 page_migrate(seg
, addr
, &pp
, 1);
3111 ASSERT(pp
->p_szc
== 0);
3113 ASSERT(!IS_VMODSORT(pp
->p_vnode
));
3114 if (enable_mbit_wa
) {
3117 else if (!hat_ismod(pp
))
3118 prot
&= ~PROT_WRITE
;
3121 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
3122 hat_memload(hat
, addr
, pp
, prot
, hat_flag
);
3124 if (!(hat_flag
& HAT_LOAD_LOCK
))
3128 anon_array_exit(&cookie
);
3132 anon_array_exit(&cookie
);
3134 if (type
== F_SOFTLOCK
) {
3135 atomic_dec_ulong((ulong_t
*)&svd
->softlockcnt
);
3137 return (FC_MAKE_ERR(err
));
3141 * relocate a bunch of smaller targ pages into one large repl page. all targ
3142 * pages must be complete pages smaller than replacement pages.
3143 * it's assumed that no page's szc can change since they are all PAGESIZE or
3144 * complete large pages locked SHARED.
3147 segvn_relocate_pages(page_t
**targ
, page_t
*replacement
)
3150 pgcnt_t repl_npgs
, curnpgs
;
3152 uint_t repl_szc
= replacement
->p_szc
;
3153 page_t
*first_repl
= replacement
;
3157 VM_STAT_ADD(segvnvmstats
.relocatepages
[0]);
3159 ASSERT(repl_szc
!= 0);
3160 npgs
= repl_npgs
= page_get_pagecnt(repl_szc
);
3166 ASSERT(replacement
!= NULL
);
3168 ASSERT(pp
->p_szc
< repl_szc
);
3169 ASSERT(PAGE_EXCL(pp
));
3170 ASSERT(!PP_ISFREE(pp
));
3171 curnpgs
= page_get_pagecnt(pp
->p_szc
);
3173 VM_STAT_ADD(segvnvmstats
.relocatepages
[1]);
3175 page_sub(&replacement
, repl
);
3176 ASSERT(PAGE_EXCL(repl
));
3177 ASSERT(!PP_ISFREE(repl
));
3178 ASSERT(repl
->p_szc
== repl_szc
);
3180 page_t
*repl_savepp
;
3182 VM_STAT_ADD(segvnvmstats
.relocatepages
[2]);
3183 repl_savepp
= replacement
;
3184 for (j
= 0; j
< curnpgs
; j
++) {
3186 page_sub(&replacement
, repl
);
3187 ASSERT(PAGE_EXCL(repl
));
3188 ASSERT(!PP_ISFREE(repl
));
3189 ASSERT(repl
->p_szc
== repl_szc
);
3190 ASSERT(page_pptonum(targ
[i
+ j
]) ==
3191 page_pptonum(targ
[i
]) + j
);
3194 ASSERT(IS_P2ALIGNED(page_pptonum(repl
), curnpgs
));
3196 err
= page_relocate(&pp
, &repl
, 0, 1, &nreloc
, NULL
);
3197 if (err
|| nreloc
!= curnpgs
) {
3198 panic("segvn_relocate_pages: "
3199 "page_relocate failed err=%d curnpgs=%ld "
3200 "nreloc=%ld", err
, curnpgs
, nreloc
);
3202 ASSERT(curnpgs
<= repl_npgs
);
3203 repl_npgs
-= curnpgs
;
3206 ASSERT(replacement
== NULL
);
3210 for (i
= 0; i
< repl_npgs
; i
++) {
3211 ASSERT(PAGE_EXCL(repl
));
3212 ASSERT(!PP_ISFREE(repl
));
3214 page_downgrade(targ
[i
]);
3220 * Check if all pages in ppa array are complete smaller than szc pages and
3221 * their roots will still be aligned relative to their current size if the
3222 * entire ppa array is relocated into one szc page. If these conditions are
3225 * If all pages are properly aligned attempt to upgrade their locks
3226 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3227 * upgrdfail was set to 0 by caller.
3229 * Return 1 if all pages are aligned and locked exclusively.
3231 * If all pages in ppa array happen to be physically contiguous to make one
3232 * szc page and all exclusive locks are successfully obtained promote the page
3233 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3236 segvn_full_szcpages(page_t
**ppa
, uint_t szc
, int *upgrdfail
, uint_t
*pszc
)
3240 pgcnt_t totnpgs
= page_get_pagecnt(szc
);
3251 VM_STAT_ADD(segvnvmstats
.fullszcpages
[0]);
3253 for (i
= 0; i
< totnpgs
; i
++) {
3255 ASSERT(PAGE_SHARED(pp
));
3256 ASSERT(!PP_ISFREE(pp
));
3257 pfn
= page_pptonum(pp
);
3259 if (!IS_P2ALIGNED(pfn
, totnpgs
)) {
3264 } else if (contig
&& pfn
!= first_pfn
+ i
) {
3267 if (pp
->p_szc
== 0) {
3269 VM_STAT_ADD(segvnvmstats
.fullszcpages
[1]);
3273 if ((curszc
= pp
->p_szc
) >= szc
) {
3274 VM_STAT_ADD(segvnvmstats
.fullszcpages
[2]);
3279 * p_szc changed means we don't have all pages
3280 * locked. return failure.
3282 VM_STAT_ADD(segvnvmstats
.fullszcpages
[3]);
3285 curnpgs
= page_get_pagecnt(curszc
);
3286 if (!IS_P2ALIGNED(pfn
, curnpgs
) ||
3287 !IS_P2ALIGNED(i
, curnpgs
)) {
3288 VM_STAT_ADD(segvnvmstats
.fullszcpages
[4]);
3294 VM_STAT_ADD(segvnvmstats
.fullszcpages
[5]);
3295 if (pp
->p_szc
!= curszc
) {
3296 VM_STAT_ADD(segvnvmstats
.fullszcpages
[6]);
3299 if (pfn
- 1 != page_pptonum(ppa
[i
- 1])) {
3300 panic("segvn_full_szcpages: "
3301 "large page not physically contiguous");
3303 if (P2PHASE(pfn
, curnpgs
) == curnpgs
- 1) {
3309 for (i
= 0; i
< totnpgs
; i
++) {
3310 ASSERT(ppa
[i
]->p_szc
< szc
);
3311 if (!page_tryupgrade(ppa
[i
])) {
3312 for (j
= 0; j
< i
; j
++) {
3313 page_downgrade(ppa
[j
]);
3315 *pszc
= ppa
[i
]->p_szc
;
3317 VM_STAT_ADD(segvnvmstats
.fullszcpages
[7]);
3323 * When a page is put a free cachelist its szc is set to 0. if file
3324 * system reclaimed pages from cachelist targ pages will be physically
3325 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3326 * pages without any relocations.
3327 * To avoid any hat issues with previous small mappings
3328 * hat_pageunload() the target pages first.
3331 VM_STAT_ADD(segvnvmstats
.fullszcpages
[8]);
3332 for (i
= 0; i
< totnpgs
; i
++) {
3333 (void) hat_pageunload(ppa
[i
], HAT_FORCE_PGUNLOAD
);
3335 for (i
= 0; i
< totnpgs
; i
++) {
3336 ppa
[i
]->p_szc
= szc
;
3338 for (i
= 0; i
< totnpgs
; i
++) {
3339 ASSERT(PAGE_EXCL(ppa
[i
]));
3340 page_downgrade(ppa
[i
]);
3346 VM_STAT_ADD(segvnvmstats
.fullszcpages
[9]);
3351 * Create physically contiguous pages for [vp, off] - [vp, off +
3352 * page_size(szc)) range and for private segment return them in ppa array.
3353 * Pages are created either via IO or relocations.
3355 * Return 1 on success and 0 on failure.
3357 * If physically contiguous pages already exist for this range return 1 without
3358 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3359 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3363 segvn_fill_vp_pages(struct segvn_data
*svd
, vnode_t
*vp
, u_offset_t off
,
3364 uint_t szc
, page_t
**ppa
, page_t
**ppplist
, uint_t
*ret_pszc
,
3367 page_t
*pplist
= *ppplist
;
3368 size_t pgsz
= page_get_pagesize(szc
);
3369 pgcnt_t pages
= btop(pgsz
);
3370 ulong_t start_off
= off
;
3371 u_offset_t eoff
= off
+ pgsz
;
3373 u_offset_t io_off
= off
;
3375 page_t
*io_pplist
= NULL
;
3376 page_t
*done_pplist
= NULL
;
3385 page_t
*targ_pplist
= NULL
;
3386 page_t
*repl_pplist
= NULL
;
3392 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[0]);
3395 ASSERT(pplist
->p_szc
== szc
);
3398 * downsize will be set to 1 only if we fail to lock pages. this will
3399 * allow subsequent faults to try to relocate the page again. If we
3400 * fail due to misalignment don't downsize and let the caller map the
3401 * whole region with small mappings to avoid more faults into the area
3402 * where we can't get large pages anyway.
3406 while (off
< eoff
) {
3408 ASSERT(newpp
!= NULL
);
3409 ASSERT(PAGE_EXCL(newpp
));
3410 ASSERT(!PP_ISFREE(newpp
));
3412 * we pass NULL for nrelocp to page_lookup_create()
3413 * so that it doesn't relocate. We relocate here
3414 * later only after we make sure we can lock all
3415 * pages in the range we handle and they are all
3418 pp
= page_lookup_create(vp
, off
, SE_SHARED
, newpp
, NULL
, 0);
3420 ASSERT(!PP_ISFREE(pp
));
3421 ASSERT(pp
->p_vnode
== vp
);
3422 ASSERT(pp
->p_offset
== off
);
3424 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[1]);
3425 page_sub(&pplist
, pp
);
3426 ASSERT(PAGE_EXCL(pp
));
3427 ASSERT(page_iolock_assert(pp
));
3428 page_list_concat(&io_pplist
, &pp
);
3432 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[2]);
3433 pfn
= page_pptonum(pp
);
3435 if (pszc
>= szc
&& targ_pplist
== NULL
&& io_pplist
== NULL
&&
3436 IS_P2ALIGNED(pfn
, pages
)) {
3437 ASSERT(repl_pplist
== NULL
);
3438 ASSERT(done_pplist
== NULL
);
3439 ASSERT(pplist
== *ppplist
);
3441 page_free_replacement_page(pplist
);
3442 page_create_putback(pages
);
3444 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[3]);
3449 segvn_faultvnmpss_align_err1
++;
3452 ppages
= page_get_pagecnt(pszc
);
3453 if (!IS_P2ALIGNED(pfn
, ppages
)) {
3456 * sizing down to pszc won't help.
3459 segvn_faultvnmpss_align_err2
++;
3462 pfn
= page_pptonum(newpp
);
3463 if (!IS_P2ALIGNED(pfn
, ppages
)) {
3466 * sizing down to pszc won't help.
3469 segvn_faultvnmpss_align_err3
++;
3472 if (!PAGE_EXCL(pp
)) {
3473 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[4]);
3476 *ret_pszc
= pp
->p_szc
;
3480 if (io_pplist
!= NULL
) {
3481 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[5]);
3482 io_len
= off
- io_off
;
3484 * Some file systems like NFS don't check EOF
3485 * conditions in VOP_PAGEIO(). Check it here
3486 * now that pages are locked SE_EXCL. Any file
3487 * truncation will wait until the pages are
3488 * unlocked so no need to worry that file will
3489 * be truncated after we check its size here.
3490 * XXX fix NFS to remove this check.
3492 va
.va_mask
= AT_SIZE
;
3493 if (VOP_GETATTR(vp
, &va
, ATTR_HINT
, svd
->cred
, NULL
)) {
3494 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[6]);
3495 page_unlock(targpp
);
3498 if (btopr(va
.va_size
) < btopr(io_off
+ io_len
)) {
3499 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[7]);
3502 page_unlock(targpp
);
3505 io_err
= VOP_PAGEIO(vp
, io_pplist
, io_off
, io_len
,
3506 B_READ
, svd
->cred
, NULL
);
3508 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[8]);
3509 page_unlock(targpp
);
3510 if (io_err
== EDEADLK
) {
3511 segvn_vmpss_pageio_deadlk_err
++;
3516 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[9]);
3517 while (io_pplist
!= NULL
) {
3519 page_sub(&io_pplist
, pp
);
3520 ASSERT(page_iolock_assert(pp
));
3522 pgidx
= (pp
->p_offset
- start_off
) >>
3524 ASSERT(pgidx
< pages
);
3526 page_list_concat(&done_pplist
, &pp
);
3530 ASSERT(PAGE_EXCL(pp
));
3531 ASSERT(pp
->p_szc
<= pszc
);
3532 if (pszc
!= 0 && !group_page_trylock(pp
, SE_EXCL
)) {
3533 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[10]);
3536 *ret_pszc
= pp
->p_szc
;
3539 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[11]);
3541 * page szc chould have changed before the entire group was
3542 * locked. reread page szc.
3545 ppages
= page_get_pagecnt(pszc
);
3547 /* link just the roots */
3548 page_list_concat(&targ_pplist
, &pp
);
3549 page_sub(&pplist
, newpp
);
3550 page_list_concat(&repl_pplist
, &newpp
);
3552 while (--ppages
!= 0) {
3554 page_sub(&pplist
, newpp
);
3559 if (io_pplist
!= NULL
) {
3560 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[12]);
3561 io_len
= eoff
- io_off
;
3562 va
.va_mask
= AT_SIZE
;
3563 if (VOP_GETATTR(vp
, &va
, ATTR_HINT
, svd
->cred
, NULL
) != 0) {
3564 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[13]);
3567 if (btopr(va
.va_size
) < btopr(io_off
+ io_len
)) {
3568 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[14]);
3573 io_err
= VOP_PAGEIO(vp
, io_pplist
, io_off
, io_len
,
3574 B_READ
, svd
->cred
, NULL
);
3576 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[15]);
3577 if (io_err
== EDEADLK
) {
3578 segvn_vmpss_pageio_deadlk_err
++;
3583 while (io_pplist
!= NULL
) {
3585 page_sub(&io_pplist
, pp
);
3586 ASSERT(page_iolock_assert(pp
));
3588 pgidx
= (pp
->p_offset
- start_off
) >> PAGESHIFT
;
3589 ASSERT(pgidx
< pages
);
3594 * we're now bound to succeed or panic.
3595 * remove pages from done_pplist. it's not needed anymore.
3597 while (done_pplist
!= NULL
) {
3599 page_sub(&done_pplist
, pp
);
3601 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[16]);
3602 ASSERT(pplist
== NULL
);
3604 while (targ_pplist
!= NULL
) {
3606 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[17]);
3607 ASSERT(repl_pplist
);
3609 page_sub(&targ_pplist
, pp
);
3610 pgidx
= (pp
->p_offset
- start_off
) >> PAGESHIFT
;
3611 newpp
= repl_pplist
;
3612 page_sub(&repl_pplist
, newpp
);
3614 pfn
= page_pptonum(pp
);
3616 ppages
= page_get_pagecnt(pszc
);
3617 ASSERT(IS_P2ALIGNED(pfn
, ppages
));
3618 pfn
= page_pptonum(newpp
);
3619 ASSERT(IS_P2ALIGNED(pfn
, ppages
));
3620 ASSERT(P2PHASE(pfn
, pages
) == pgidx
);
3623 ret
= page_relocate(&pp
, &newpp
, 0, 1, &nreloc
, NULL
);
3624 if (ret
!= 0 || nreloc
== 0) {
3625 panic("segvn_fill_vp_pages: "
3626 "page_relocate failed");
3629 while (nreloc
-- != 0) {
3630 ASSERT(PAGE_EXCL(pp
));
3631 ASSERT(pp
->p_vnode
== vp
);
3633 ((pp
->p_offset
- start_off
) >> PAGESHIFT
));
3639 if (svd
->type
== MAP_PRIVATE
) {
3640 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[18]);
3641 for (i
= 0; i
< pages
; i
++) {
3642 ASSERT(ppa
[i
] != NULL
);
3643 ASSERT(PAGE_EXCL(ppa
[i
]));
3644 ASSERT(ppa
[i
]->p_vnode
== vp
);
3645 ASSERT(ppa
[i
]->p_offset
==
3646 start_off
+ (i
<< PAGESHIFT
));
3647 page_downgrade(ppa
[i
]);
3651 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[19]);
3653 * the caller will still call VOP_GETPAGE() for shared segments
3654 * to check FS write permissions. For private segments we map
3655 * file read only anyway. so no VOP_GETPAGE is needed.
3657 for (i
= 0; i
< pages
; i
++) {
3658 ASSERT(ppa
[i
] != NULL
);
3659 ASSERT(PAGE_EXCL(ppa
[i
]));
3660 ASSERT(ppa
[i
]->p_vnode
== vp
);
3661 ASSERT(ppa
[i
]->p_offset
==
3662 start_off
+ (i
<< PAGESHIFT
));
3663 page_unlock(ppa
[i
]);
3671 * Do the cleanup. Unlock target pages we didn't relocate. They are
3672 * linked on targ_pplist by root pages. reassemble unused replacement
3673 * and io pages back to pplist.
3675 if (io_pplist
!= NULL
) {
3676 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[20]);
3679 ASSERT(pp
->p_vnode
== vp
);
3680 ASSERT(pp
->p_offset
== io_off
);
3681 ASSERT(page_iolock_assert(pp
));
3683 page_hashout(pp
, NULL
);
3685 } while ((pp
= pp
->p_next
) != io_pplist
);
3686 page_list_concat(&io_pplist
, &pplist
);
3690 while (targ_pplist
!= NULL
) {
3691 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[21]);
3693 ASSERT(PAGE_EXCL(pp
));
3694 page_sub(&targ_pplist
, pp
);
3697 ppages
= page_get_pagecnt(pszc
);
3698 ASSERT(IS_P2ALIGNED(page_pptonum(pp
), ppages
));
3701 group_page_unlock(pp
);
3707 ASSERT(PAGE_EXCL(pp
));
3708 ASSERT(pp
->p_szc
== szc
);
3709 page_sub(&repl_pplist
, pp
);
3711 ASSERT(IS_P2ALIGNED(page_pptonum(pp
), ppages
));
3713 /* relink replacement page */
3714 page_list_concat(&tmp_pplist
, &pp
);
3715 while (--ppages
!= 0) {
3716 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[22]);
3718 ASSERT(PAGE_EXCL(pp
));
3719 ASSERT(pp
->p_szc
== szc
);
3720 page_list_concat(&tmp_pplist
, &pp
);
3723 if (tmp_pplist
!= NULL
) {
3724 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[23]);
3725 page_list_concat(&tmp_pplist
, &pplist
);
3726 pplist
= tmp_pplist
;
3729 * at this point all pages are either on done_pplist or
3730 * pplist. They can't be all on done_pplist otherwise
3731 * we'd've been done.
3733 ASSERT(pplist
!= NULL
);
3735 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[24]);
3738 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[25]);
3739 ASSERT(pp
->p_szc
== szc
);
3740 ASSERT(PAGE_EXCL(pp
));
3741 ASSERT(pp
->p_vnode
!= vp
);
3743 } while ((pp
= pp
->p_next
) != pplist
);
3747 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[26]);
3748 ASSERT(pp
->p_szc
== szc
);
3749 ASSERT(PAGE_EXCL(pp
));
3750 ASSERT(pp
->p_vnode
== vp
);
3752 } while ((pp
= pp
->p_next
) != done_pplist
);
3754 while (pplist
!= NULL
) {
3755 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[27]);
3757 page_sub(&pplist
, pp
);
3761 while (done_pplist
!= NULL
) {
3762 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[28]);
3764 page_sub(&done_pplist
, pp
);
3770 ASSERT(pplist
== *ppplist
);
3772 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[29]);
3774 * don't downsize on io error.
3775 * see if vop_getpage succeeds.
3776 * pplist may still be used in this case
3781 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[30]);
3782 page_free_replacement_page(pplist
);
3783 page_create_putback(pages
);
3788 int segvn_anypgsz
= 0;
3790 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3791 if ((type) == F_SOFTLOCK) { \
3792 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3796 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3797 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3798 if ((rw) == S_WRITE) { \
3799 for (i = 0; i < (pages); i++) { \
3800 ASSERT((ppa)[i]->p_vnode == \
3801 (ppa)[0]->p_vnode); \
3802 hat_setmod((ppa)[i]); \
3804 } else if ((rw) != S_OTHER && \
3805 ((prot) & (vpprot) & PROT_WRITE)) { \
3806 for (i = 0; i < (pages); i++) { \
3807 ASSERT((ppa)[i]->p_vnode == \
3808 (ppa)[0]->p_vnode); \
3809 if (!hat_ismod((ppa)[i])) { \
3810 prot &= ~PROT_WRITE; \
3819 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3820 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3822 #else /* VM_STATS */
3824 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3829 segvn_fault_vnodepages(struct hat
*hat
, struct seg
*seg
, caddr_t lpgaddr
,
3830 caddr_t lpgeaddr
, enum fault_type type
, enum seg_rw rw
, caddr_t addr
,
3831 caddr_t eaddr
, int brkcow
)
3833 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
3834 struct anon_map
*amp
= svd
->amp
;
3835 uchar_t segtype
= svd
->type
;
3836 uint_t szc
= seg
->s_szc
;
3837 size_t pgsz
= page_get_pagesize(szc
);
3838 size_t maxpgsz
= pgsz
;
3839 pgcnt_t pages
= btop(pgsz
);
3840 pgcnt_t maxpages
= pages
;
3841 size_t ppasize
= (pages
+ 1) * sizeof (page_t
*);
3842 caddr_t a
= lpgaddr
;
3843 caddr_t maxlpgeaddr
= lpgeaddr
;
3844 u_offset_t off
= svd
->offset
+ (uintptr_t)(a
- seg
->s_base
);
3845 ulong_t aindx
= svd
->anon_index
+ seg_page(seg
, a
);
3846 struct vpage
*vpage
= (svd
->vpage
!= NULL
) ?
3847 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
3848 vnode_t
*vp
= svd
->vp
;
3853 faultcode_t err
= 0;
3855 int vop_size_err
= 0;
3856 uint_t protchk
, prot
, vpprot
;
3858 int hat_flag
= (type
== F_SOFTLOCK
) ? HAT_LOAD_LOCK
: HAT_LOAD
;
3859 anon_sync_obj_t an_cookie
;
3861 int alloc_failed
= 0;
3868 int segvn_anypgsz_vnode
= 0; /* for now map vnode with 2 page sizes */
3869 int tron
= (svd
->tr_state
== SEGVN_TR_ON
);
3873 ASSERT(brkcow
== 0 || amp
!= NULL
);
3874 ASSERT(tron
== 0 || amp
!= NULL
);
3875 ASSERT(enable_mbit_wa
== 0); /* no mbit simulations with large pages */
3876 ASSERT(!(svd
->flags
& MAP_NORESERVE
));
3877 ASSERT(type
!= F_SOFTUNLOCK
);
3878 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
3879 ASSERT(amp
== NULL
|| IS_P2ALIGNED(aindx
, maxpages
));
3880 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
3881 ASSERT(seg
->s_szc
< NBBY
* sizeof (int));
3882 ASSERT(type
!= F_SOFTLOCK
|| lpgeaddr
- a
== maxpgsz
);
3883 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
3885 VM_STAT_COND_ADD(type
== F_SOFTLOCK
, segvnvmstats
.fltvnpages
[0]);
3886 VM_STAT_COND_ADD(type
!= F_SOFTLOCK
, segvnvmstats
.fltvnpages
[1]);
3888 if (svd
->flags
& MAP_TEXT
) {
3889 hat_flag
|= HAT_LOAD_TEXT
;
3892 if (svd
->pageprot
) {
3896 protchk
= PROT_READ
;
3899 protchk
= PROT_WRITE
;
3902 protchk
= PROT_EXEC
;
3906 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
3910 protchk
= PROT_NONE
;
3912 /* caller has already done segment level protection check. */
3915 if (rw
== S_WRITE
&& segtype
== MAP_PRIVATE
) {
3916 SEGVN_VMSTAT_FLTVNPAGES(2);
3922 ppa
= kmem_alloc(ppasize
, KM_SLEEP
);
3924 VM_STAT_COND_ADD(amp
!= NULL
, segvnvmstats
.fltvnpages
[3]);
3929 for (; a
< lpgeaddr
; a
+= pgsz
, off
+= pgsz
, aindx
+= pages
) {
3931 while (szc
< seg
->s_szc
) {
3934 tszc
= segvn_anypgsz_vnode
? szc
+ 1 :
3936 ppgsz
= page_get_pagesize(tszc
);
3937 if (!IS_P2ALIGNED(a
, ppgsz
) ||
3938 ((alloc_failed
>> tszc
) & 0x1)) {
3941 SEGVN_VMSTAT_FLTVNPAGES(4);
3945 e
= P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
3946 lpgeaddr
= (caddr_t
)e
;
3951 if (IS_P2ALIGNED(a
, maxpgsz
) && amp
!= NULL
) {
3952 ASSERT(IS_P2ALIGNED(aindx
, maxpages
));
3953 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
3954 anon_array_enter(amp
, aindx
, &an_cookie
);
3955 if (anon_get_ptr(amp
->ahp
, aindx
) != NULL
) {
3956 SEGVN_VMSTAT_FLTVNPAGES(5);
3957 ASSERT(anon_pages(amp
->ahp
, aindx
,
3958 maxpages
) == maxpages
);
3959 anon_array_exit(&an_cookie
);
3960 ANON_LOCK_EXIT(&
->a_rwlock
);
3961 err
= segvn_fault_anonpages(hat
, seg
,
3962 a
, a
+ maxpgsz
, type
, rw
,
3964 MIN(a
+ maxpgsz
, eaddr
), brkcow
);
3966 SEGVN_VMSTAT_FLTVNPAGES(6);
3969 if (szc
< seg
->s_szc
) {
3973 lpgeaddr
= maxlpgeaddr
;
3977 ASSERT(anon_pages(amp
->ahp
, aindx
,
3979 SEGVN_VMSTAT_FLTVNPAGES(7);
3980 anon_array_exit(&an_cookie
);
3981 ANON_LOCK_EXIT(&
->a_rwlock
);
3984 ASSERT(!brkcow
|| IS_P2ALIGNED(a
, maxpgsz
));
3985 ASSERT(!tron
|| IS_P2ALIGNED(a
, maxpgsz
));
3987 if (svd
->pageprot
!= 0 && IS_P2ALIGNED(a
, maxpgsz
)) {
3988 ASSERT(vpage
!= NULL
);
3989 prot
= VPP_PROT(vpage
);
3990 ASSERT(sameprot(seg
, a
, maxpgsz
));
3991 if ((prot
& protchk
) == 0) {
3992 SEGVN_VMSTAT_FLTVNPAGES(8);
3997 if (type
== F_SOFTLOCK
) {
3998 atomic_add_long((ulong_t
*)&svd
->softlockcnt
,
4005 if (!brkcow
&& !tron
&& szc
&&
4006 !page_exists_physcontig(vp
, off
, szc
,
4007 segtype
== MAP_PRIVATE
? ppa
: NULL
)) {
4008 SEGVN_VMSTAT_FLTVNPAGES(9);
4009 if (page_alloc_pages(vp
, seg
, a
, &pplist
, NULL
,
4010 szc
, 0, 0) && type
!= F_SOFTLOCK
) {
4011 SEGVN_VMSTAT_FLTVNPAGES(10);
4014 alloc_failed
|= (1 << szc
);
4017 if (pplist
!= NULL
&&
4018 vp
->v_mpssdata
== SEGVN_PAGEIO
) {
4020 SEGVN_VMSTAT_FLTVNPAGES(11);
4021 physcontig
= segvn_fill_vp_pages(svd
,
4022 vp
, off
, szc
, ppa
, &pplist
,
4024 ASSERT(!physcontig
|| pplist
== NULL
);
4025 if (!physcontig
&& downsize
&&
4026 type
!= F_SOFTLOCK
) {
4027 ASSERT(pplist
== NULL
);
4028 SEGVN_VMSTAT_FLTVNPAGES(12);
4032 ASSERT(!physcontig
||
4033 segtype
== MAP_PRIVATE
||
4035 if (physcontig
&& ppa
[0] == NULL
) {
4039 } else if (!brkcow
&& !tron
&& szc
&& ppa
[0] != NULL
) {
4040 SEGVN_VMSTAT_FLTVNPAGES(13);
4041 ASSERT(segtype
== MAP_PRIVATE
);
4046 SEGVN_VMSTAT_FLTVNPAGES(14);
4048 ierr
= VOP_GETPAGE(vp
, (offset_t
)off
, pgsz
,
4049 &vpprot
, ppa
, pgsz
, seg
, a
, arw
,
4053 for (i
= 0; i
< pages
; i
++) {
4054 ASSERT(PAGE_LOCKED(ppa
[i
]));
4055 ASSERT(!PP_ISFREE(ppa
[i
]));
4056 ASSERT(ppa
[i
]->p_vnode
== vp
);
4057 ASSERT(ppa
[i
]->p_offset
==
4058 off
+ (i
<< PAGESHIFT
));
4062 if (segtype
== MAP_PRIVATE
) {
4063 SEGVN_VMSTAT_FLTVNPAGES(15);
4064 vpprot
&= ~PROT_WRITE
;
4067 ASSERT(segtype
== MAP_PRIVATE
);
4068 SEGVN_VMSTAT_FLTVNPAGES(16);
4069 vpprot
= PROT_ALL
& ~PROT_WRITE
;
4074 SEGVN_VMSTAT_FLTVNPAGES(17);
4075 if (pplist
!= NULL
) {
4076 SEGVN_VMSTAT_FLTVNPAGES(18);
4077 page_free_replacement_page(pplist
);
4078 page_create_putback(pages
);
4080 SEGVN_RESTORE_SOFTLOCK_VP(type
, pages
);
4081 if (a
+ pgsz
<= eaddr
) {
4082 SEGVN_VMSTAT_FLTVNPAGES(19);
4083 err
= FC_MAKE_ERR(ierr
);
4086 va
.va_mask
= AT_SIZE
;
4087 if (VOP_GETATTR(vp
, &va
, 0, svd
->cred
, NULL
)) {
4088 SEGVN_VMSTAT_FLTVNPAGES(20);
4089 err
= FC_MAKE_ERR(EIO
);
4092 if (btopr(va
.va_size
) >= btopr(off
+ pgsz
)) {
4093 SEGVN_VMSTAT_FLTVNPAGES(21);
4094 err
= FC_MAKE_ERR(ierr
);
4097 if (btopr(va
.va_size
) <
4098 btopr(off
+ (eaddr
- a
))) {
4099 SEGVN_VMSTAT_FLTVNPAGES(22);
4100 err
= FC_MAKE_ERR(ierr
);
4103 if (brkcow
|| tron
|| type
== F_SOFTLOCK
) {
4104 /* can't reduce map area */
4105 SEGVN_VMSTAT_FLTVNPAGES(23);
4109 SEGVN_VMSTAT_FLTVNPAGES(24);
4117 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
4118 anon_array_enter(amp
, aindx
, &an_cookie
);
4121 anon_get_ptr(amp
->ahp
, aindx
) != NULL
) {
4122 ulong_t taindx
= P2ALIGN(aindx
, maxpages
);
4124 SEGVN_VMSTAT_FLTVNPAGES(25);
4125 ASSERT(anon_pages(amp
->ahp
, taindx
,
4126 maxpages
) == maxpages
);
4127 for (i
= 0; i
< pages
; i
++) {
4128 page_unlock(ppa
[i
]);
4130 anon_array_exit(&an_cookie
);
4131 ANON_LOCK_EXIT(&
->a_rwlock
);
4132 if (pplist
!= NULL
) {
4133 page_free_replacement_page(pplist
);
4134 page_create_putback(pages
);
4136 SEGVN_RESTORE_SOFTLOCK_VP(type
, pages
);
4137 if (szc
< seg
->s_szc
) {
4138 SEGVN_VMSTAT_FLTVNPAGES(26);
4140 * For private segments SOFTLOCK
4141 * either always breaks cow (any rw
4142 * type except S_READ_NOCOW) or
4143 * address space is locked as writer
4144 * (S_READ_NOCOW case) and anon slots
4145 * can't show up on second check.
4146 * Therefore if we are here for
4147 * SOFTLOCK case it must be a cow
4148 * break but cow break never reduces
4149 * szc. text replication (tron) in
4150 * this case works as cow break.
4151 * Thus the assert below.
4153 ASSERT(!brkcow
&& !tron
&&
4154 type
!= F_SOFTLOCK
);
4159 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
4164 ulong_t taindx
= P2ALIGN(aindx
, maxpages
);
4165 ASSERT(!anon_pages(amp
->ahp
, taindx
, maxpages
));
4169 if (brkcow
|| tron
) {
4170 ASSERT(amp
!= NULL
);
4171 ASSERT(pplist
== NULL
);
4172 ASSERT(szc
== seg
->s_szc
);
4173 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
4174 ASSERT(IS_P2ALIGNED(aindx
, maxpages
));
4175 SEGVN_VMSTAT_FLTVNPAGES(27);
4176 ierr
= anon_map_privatepages(amp
, aindx
, szc
,
4177 seg
, a
, prot
, ppa
, vpage
, segvn_anypgsz
,
4178 tron
? PG_LOCAL
: 0, svd
->cred
);
4180 SEGVN_VMSTAT_FLTVNPAGES(28);
4181 anon_array_exit(&an_cookie
);
4182 ANON_LOCK_EXIT(&
->a_rwlock
);
4183 SEGVN_RESTORE_SOFTLOCK_VP(type
, pages
);
4184 err
= FC_MAKE_ERR(ierr
);
4188 ASSERT(!IS_VMODSORT(ppa
[0]->p_vnode
));
4190 * p_szc can't be changed for locked
4193 ASSERT(svd
->rcookie
==
4194 HAT_INVALID_REGION_COOKIE
);
4195 hat_memload_array(hat
, a
, pgsz
, ppa
, prot
,
4198 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4199 SEGVN_VMSTAT_FLTVNPAGES(29);
4200 for (i
= 0; i
< pages
; i
++) {
4201 page_unlock(ppa
[i
]);
4204 anon_array_exit(&an_cookie
);
4205 ANON_LOCK_EXIT(&
->a_rwlock
);
4209 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
||
4210 (!svd
->pageprot
&& svd
->prot
== (prot
& vpprot
)));
4212 pfn
= page_pptonum(ppa
[0]);
4214 * hat_page_demote() needs an SE_EXCL lock on one of
4215 * constituent page_t's and it decreases root's p_szc
4216 * last. This means if root's p_szc is equal szc and
4217 * all its constituent pages are locked
4218 * hat_page_demote() that could have changed p_szc to
4219 * szc is already done and no new have page_demote()
4220 * can start for this large page.
4224 * we need to make sure same mapping size is used for
4225 * the same address range if there's a possibility the
4226 * adddress is already mapped because hat layer panics
4227 * when translation is loaded for the range already
4228 * mapped with a different page size. We achieve it
4229 * by always using largest page size possible subject
4230 * to the constraints of page size, segment page size
4231 * and page alignment. Since mappings are invalidated
4232 * when those constraints change and make it
4233 * impossible to use previously used mapping size no
4234 * mapping size conflicts should happen.
4238 if ((pszc
= ppa
[0]->p_szc
) == szc
&&
4239 IS_P2ALIGNED(pfn
, pages
)) {
4241 SEGVN_VMSTAT_FLTVNPAGES(30);
4243 for (i
= 0; i
< pages
; i
++) {
4244 ASSERT(PAGE_LOCKED(ppa
[i
]));
4245 ASSERT(!PP_ISFREE(ppa
[i
]));
4246 ASSERT(page_pptonum(ppa
[i
]) ==
4248 ASSERT(ppa
[i
]->p_szc
== szc
);
4249 ASSERT(ppa
[i
]->p_vnode
== vp
);
4250 ASSERT(ppa
[i
]->p_offset
==
4251 off
+ (i
<< PAGESHIFT
));
4255 * All pages are of szc we need and they are
4256 * all locked so they can't change szc. load
4259 * if page got promoted since last check
4260 * we don't need pplist.
4262 if (pplist
!= NULL
) {
4263 page_free_replacement_page(pplist
);
4264 page_create_putback(pages
);
4266 if (PP_ISMIGRATE(ppa
[0])) {
4267 page_migrate(seg
, a
, ppa
, pages
);
4269 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
,
4271 hat_memload_array_region(hat
, a
, pgsz
,
4272 ppa
, prot
& vpprot
, hat_flag
,
4275 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4276 for (i
= 0; i
< pages
; i
++) {
4277 page_unlock(ppa
[i
]);
4281 anon_array_exit(&an_cookie
);
4282 ANON_LOCK_EXIT(&
->a_rwlock
);
4288 * See if upsize is possible.
4290 if (pszc
> szc
&& szc
< seg
->s_szc
&&
4291 (segvn_anypgsz_vnode
|| pszc
>= seg
->s_szc
)) {
4293 uint_t pszc1
= MIN(pszc
, seg
->s_szc
);
4294 ppgsz
= page_get_pagesize(pszc1
);
4295 ppages
= btop(ppgsz
);
4296 aphase
= btop(P2PHASE((uintptr_t)a
, ppgsz
));
4298 ASSERT(type
!= F_SOFTLOCK
);
4300 SEGVN_VMSTAT_FLTVNPAGES(31);
4301 if (aphase
!= P2PHASE(pfn
, ppages
)) {
4302 segvn_faultvnmpss_align_err4
++;
4304 SEGVN_VMSTAT_FLTVNPAGES(32);
4305 if (pplist
!= NULL
) {
4306 page_t
*pl
= pplist
;
4307 page_free_replacement_page(pl
);
4308 page_create_putback(pages
);
4310 for (i
= 0; i
< pages
; i
++) {
4311 page_unlock(ppa
[i
]);
4314 anon_array_exit(&an_cookie
);
4315 ANON_LOCK_EXIT(&
->a_rwlock
);
4324 * check if we should use smallest mapping size.
4329 !IS_P2ALIGNED(pfn
, pages
)) ||
4331 !segvn_full_szcpages(ppa
, szc
, &upgrdfail
,
4334 if (upgrdfail
&& type
!= F_SOFTLOCK
) {
4336 * segvn_full_szcpages failed to lock
4337 * all pages EXCL. Size down.
4341 SEGVN_VMSTAT_FLTVNPAGES(33);
4343 if (pplist
!= NULL
) {
4344 page_t
*pl
= pplist
;
4345 page_free_replacement_page(pl
);
4346 page_create_putback(pages
);
4349 for (i
= 0; i
< pages
; i
++) {
4350 page_unlock(ppa
[i
]);
4353 anon_array_exit(&an_cookie
);
4354 ANON_LOCK_EXIT(&
->a_rwlock
);
4359 if (szc
!= 0 && !upgrdfail
) {
4360 segvn_faultvnmpss_align_err5
++;
4362 SEGVN_VMSTAT_FLTVNPAGES(34);
4363 if (pplist
!= NULL
) {
4364 page_free_replacement_page(pplist
);
4365 page_create_putback(pages
);
4367 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
,
4369 if (upgrdfail
&& segvn_anypgsz_vnode
) {
4371 hat_memload_array_region(hat
, a
, pgsz
,
4372 ppa
, prot
& vpprot
, hat_flag
,
4375 for (i
= 0; i
< pages
; i
++) {
4376 hat_memload_region(hat
,
4377 a
+ (i
<< PAGESHIFT
),
4378 ppa
[i
], prot
& vpprot
,
4379 hat_flag
, svd
->rcookie
);
4382 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4383 for (i
= 0; i
< pages
; i
++) {
4384 page_unlock(ppa
[i
]);
4388 anon_array_exit(&an_cookie
);
4389 ANON_LOCK_EXIT(&
->a_rwlock
);
4396 * segvn_full_szcpages() upgraded pages szc.
4398 ASSERT(pszc
== ppa
[0]->p_szc
);
4399 ASSERT(IS_P2ALIGNED(pfn
, pages
));
4405 SEGVN_VMSTAT_FLTVNPAGES(35);
4407 * p_szc of ppa[0] can change since we haven't
4408 * locked all constituent pages. Call
4409 * page_lock_szc() to prevent szc changes.
4410 * This should be a rare case that happens when
4411 * multiple segments use a different page size
4412 * to map the same file offsets.
4414 szcmtx
= page_szc_lock(ppa
[0]);
4415 pszc
= ppa
[0]->p_szc
;
4416 ASSERT(szcmtx
!= NULL
|| pszc
== 0);
4417 ASSERT(ppa
[0]->p_szc
<= pszc
);
4419 SEGVN_VMSTAT_FLTVNPAGES(36);
4420 if (szcmtx
!= NULL
) {
4425 if (pplist
!= NULL
) {
4427 * page got promoted since last check.
4428 * we don't need preaalocated large
4431 SEGVN_VMSTAT_FLTVNPAGES(37);
4432 page_free_replacement_page(pplist
);
4433 page_create_putback(pages
);
4435 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
,
4437 hat_memload_array_region(hat
, a
, pgsz
, ppa
,
4438 prot
& vpprot
, hat_flag
, svd
->rcookie
);
4440 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4441 for (i
= 0; i
< pages
; i
++) {
4442 page_unlock(ppa
[i
]);
4446 anon_array_exit(&an_cookie
);
4447 ANON_LOCK_EXIT(&
->a_rwlock
);
4453 * if page got demoted since last check
4454 * we could have not allocated larger page.
4457 if (pplist
== NULL
&&
4458 page_alloc_pages(vp
, seg
, a
, &pplist
, NULL
,
4459 szc
, 0, 0) && type
!= F_SOFTLOCK
) {
4460 SEGVN_VMSTAT_FLTVNPAGES(38);
4461 for (i
= 0; i
< pages
; i
++) {
4462 page_unlock(ppa
[i
]);
4465 anon_array_exit(&an_cookie
);
4466 ANON_LOCK_EXIT(&
->a_rwlock
);
4469 alloc_failed
|= (1 << szc
);
4473 SEGVN_VMSTAT_FLTVNPAGES(39);
4475 if (pplist
!= NULL
) {
4476 segvn_relocate_pages(ppa
, pplist
);
4479 ASSERT(type
== F_SOFTLOCK
);
4480 SEGVN_VMSTAT_FLTVNPAGES(40);
4484 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
, prot
, vpprot
);
4486 if (pplist
== NULL
&& segvn_anypgsz_vnode
== 0) {
4487 ASSERT(type
== F_SOFTLOCK
);
4488 for (i
= 0; i
< pages
; i
++) {
4489 ASSERT(ppa
[i
]->p_szc
< szc
);
4490 hat_memload_region(hat
,
4491 a
+ (i
<< PAGESHIFT
),
4492 ppa
[i
], prot
& vpprot
, hat_flag
,
4496 ASSERT(pplist
!= NULL
|| type
== F_SOFTLOCK
);
4497 hat_memload_array_region(hat
, a
, pgsz
, ppa
,
4498 prot
& vpprot
, hat_flag
, svd
->rcookie
);
4500 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4501 for (i
= 0; i
< pages
; i
++) {
4502 ASSERT(PAGE_SHARED(ppa
[i
]));
4503 page_unlock(ppa
[i
]);
4507 anon_array_exit(&an_cookie
);
4508 ANON_LOCK_EXIT(&
->a_rwlock
);
4512 if (vpage
!= NULL
) {
4519 ASSERT(a
< lpgeaddr
);
4521 ASSERT(!brkcow
&& !tron
&& type
!= F_SOFTLOCK
);
4524 * ierr == -1 means we failed to map with a large page.
4525 * (either due to allocation/relocation failures or
4526 * misalignment with other mappings to this file.
4528 * ierr == -2 means some other thread allocated a large page
4529 * after we gave up tp map with a large page. retry with
4532 ASSERT(ierr
== -1 || ierr
== -2);
4533 ASSERT(ierr
== -2 || szc
!= 0);
4534 ASSERT(ierr
== -1 || szc
< seg
->s_szc
);
4536 SEGVN_VMSTAT_FLTVNPAGES(41);
4537 ASSERT(pszc
> szc
&& pszc
<= seg
->s_szc
);
4539 } else if (segvn_anypgsz_vnode
) {
4540 SEGVN_VMSTAT_FLTVNPAGES(42);
4543 SEGVN_VMSTAT_FLTVNPAGES(43);
4546 * other process created pszc large page.
4547 * but we still have to drop to 0 szc.
4552 pgsz
= page_get_pagesize(szc
);
4556 * Size up case. Note lpgaddr may only be needed for
4557 * softlock case so we don't adjust it here.
4559 a
= (caddr_t
)P2ALIGN((uintptr_t)a
, pgsz
);
4560 ASSERT(a
>= lpgaddr
);
4561 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4562 off
= svd
->offset
+ (uintptr_t)(a
- seg
->s_base
);
4563 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4564 vpage
= (svd
->vpage
!= NULL
) ?
4565 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4568 * Size down case. Note lpgaddr may only be needed for
4569 * softlock case so we don't adjust it here.
4571 ASSERT(IS_P2ALIGNED(a
, pgsz
));
4572 ASSERT(IS_P2ALIGNED(lpgeaddr
, pgsz
));
4573 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4574 ASSERT(a
< lpgeaddr
);
4576 SEGVN_VMSTAT_FLTVNPAGES(44);
4578 * The beginning of the large page region can
4579 * be pulled to the right to make a smaller
4580 * region. We haven't yet faulted a single
4583 a
= (caddr_t
)P2ALIGN((uintptr_t)addr
, pgsz
);
4584 ASSERT(a
>= lpgaddr
);
4586 (uintptr_t)(a
- seg
->s_base
);
4587 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4588 vpage
= (svd
->vpage
!= NULL
) ?
4589 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4594 kmem_free(ppa
, ppasize
);
4595 if (!err
&& !vop_size_err
) {
4596 SEGVN_VMSTAT_FLTVNPAGES(45);
4599 if (type
== F_SOFTLOCK
&& a
> lpgaddr
) {
4600 SEGVN_VMSTAT_FLTVNPAGES(46);
4601 segvn_softunlock(seg
, lpgaddr
, a
- lpgaddr
, S_OTHER
);
4603 if (!vop_size_err
) {
4604 SEGVN_VMSTAT_FLTVNPAGES(47);
4607 ASSERT(brkcow
|| tron
|| type
== F_SOFTLOCK
);
4609 * Large page end is mapped beyond the end of file and it's a cow
4610 * fault (can be a text replication induced cow) or softlock so we can't
4611 * reduce the map area. For now just demote the segment. This should
4612 * really only happen if the end of the file changed after the mapping
4613 * was established since when large page segments are created we make
4614 * sure they don't extend beyond the end of the file.
4616 SEGVN_VMSTAT_FLTVNPAGES(48);
4618 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4619 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
4621 if (seg
->s_szc
!= 0) {
4622 segvn_fltvnpages_clrszc_cnt
++;
4623 ASSERT(svd
->softlockcnt
== 0);
4624 err
= segvn_clrszc(seg
);
4626 segvn_fltvnpages_clrszc_err
++;
4629 ASSERT(err
|| seg
->s_szc
== 0);
4630 SEGVN_LOCK_DOWNGRADE(seg
->s_as
, &svd
->lock
);
4631 /* segvn_fault will do its job as if szc had been zero to begin with */
4632 return (err
== 0 ? IE_RETRY
: FC_MAKE_ERR(err
));
4636 * This routine will attempt to fault in one large page.
4637 * it will use smaller pages if that fails.
4638 * It should only be called for pure anonymous segments.
4641 segvn_fault_anonpages(struct hat
*hat
, struct seg
*seg
, caddr_t lpgaddr
,
4642 caddr_t lpgeaddr
, enum fault_type type
, enum seg_rw rw
, caddr_t addr
,
4643 caddr_t eaddr
, int brkcow
)
4645 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
4646 struct anon_map
*amp
= svd
->amp
;
4647 uchar_t segtype
= svd
->type
;
4648 uint_t szc
= seg
->s_szc
;
4649 size_t pgsz
= page_get_pagesize(szc
);
4650 size_t maxpgsz
= pgsz
;
4651 pgcnt_t pages
= btop(pgsz
);
4652 uint_t ppaszc
= szc
;
4653 caddr_t a
= lpgaddr
;
4654 ulong_t aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4655 struct vpage
*vpage
= (svd
->vpage
!= NULL
) ?
4656 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4661 uint_t protchk
, prot
, vpprot
;
4663 int hat_flag
= (type
== F_SOFTLOCK
) ? HAT_LOAD_LOCK
: HAT_LOAD
;
4664 anon_sync_obj_t cookie
;
4666 int pgflags
= (svd
->tr_state
== SEGVN_TR_ON
) ? PG_LOCAL
: 0;
4669 ASSERT(amp
!= NULL
);
4670 ASSERT(enable_mbit_wa
== 0); /* no mbit simulations with large pages */
4671 ASSERT(!(svd
->flags
& MAP_NORESERVE
));
4672 ASSERT(type
!= F_SOFTUNLOCK
);
4673 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
4674 ASSERT(!brkcow
|| svd
->tr_state
== SEGVN_TR_OFF
);
4675 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
4677 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
4679 VM_STAT_COND_ADD(type
== F_SOFTLOCK
, segvnvmstats
.fltanpages
[0]);
4680 VM_STAT_COND_ADD(type
!= F_SOFTLOCK
, segvnvmstats
.fltanpages
[1]);
4682 if (svd
->flags
& MAP_TEXT
) {
4683 hat_flag
|= HAT_LOAD_TEXT
;
4686 if (svd
->pageprot
) {
4690 protchk
= PROT_READ
;
4693 protchk
= PROT_WRITE
;
4696 protchk
= PROT_EXEC
;
4700 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
4703 VM_STAT_ADD(segvnvmstats
.fltanpages
[2]);
4705 protchk
= PROT_NONE
;
4707 /* caller has already done segment level protection check. */
4710 ppa
= kmem_cache_alloc(segvn_szc_cache
[ppaszc
], KM_SLEEP
);
4711 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
4715 for (; a
< lpgeaddr
; a
+= pgsz
, aindx
+= pages
) {
4716 if (svd
->pageprot
!= 0 && IS_P2ALIGNED(a
, maxpgsz
)) {
4717 VM_STAT_ADD(segvnvmstats
.fltanpages
[3]);
4718 ASSERT(vpage
!= NULL
);
4719 prot
= VPP_PROT(vpage
);
4720 ASSERT(sameprot(seg
, a
, maxpgsz
));
4721 if ((prot
& protchk
) == 0) {
4726 if (adjszc_chk
&& IS_P2ALIGNED(a
, maxpgsz
) &&
4728 ASSERT(a
> lpgaddr
);
4732 ASSERT(IS_P2ALIGNED(aindx
, pages
));
4733 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
,
4736 if (type
== F_SOFTLOCK
) {
4737 atomic_add_long((ulong_t
*)&svd
->softlockcnt
,
4740 anon_array_enter(amp
, aindx
, &cookie
);
4741 ppa_szc
= (uint_t
)-1;
4742 ierr
= anon_map_getpages(amp
, aindx
, szc
, seg
, a
,
4743 prot
, &vpprot
, ppa
, &ppa_szc
, vpage
, rw
, brkcow
,
4744 segvn_anypgsz
, pgflags
, svd
->cred
);
4746 anon_array_exit(&cookie
);
4747 VM_STAT_ADD(segvnvmstats
.fltanpages
[4]);
4748 if (type
== F_SOFTLOCK
) {
4750 (ulong_t
*)&svd
->softlockcnt
,
4754 VM_STAT_ADD(segvnvmstats
.fltanpages
[6]);
4755 err
= FC_MAKE_ERR(ierr
);
4761 ASSERT(!IS_VMODSORT(ppa
[0]->p_vnode
));
4763 ASSERT(segtype
== MAP_SHARED
||
4764 ppa
[0]->p_szc
<= szc
);
4765 ASSERT(segtype
== MAP_PRIVATE
||
4766 ppa
[0]->p_szc
>= szc
);
4769 * Handle pages that have been marked for migration
4771 if (lgrp_optimizations())
4772 page_migrate(seg
, a
, ppa
, pages
);
4774 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
4776 if (segtype
== MAP_SHARED
) {
4777 vpprot
|= PROT_WRITE
;
4780 hat_memload_array(hat
, a
, pgsz
, ppa
,
4781 prot
& vpprot
, hat_flag
);
4783 if (hat_flag
& HAT_LOAD_LOCK
) {
4784 VM_STAT_ADD(segvnvmstats
.fltanpages
[7]);
4786 VM_STAT_ADD(segvnvmstats
.fltanpages
[8]);
4787 for (i
= 0; i
< pages
; i
++)
4788 page_unlock(ppa
[i
]);
4793 anon_array_exit(&cookie
);
4798 ASSERT(a
< lpgeaddr
);
4800 * ierr == -1 means we failed to allocate a large page.
4801 * so do a size down operation.
4803 * ierr == -2 means some other process that privately shares
4804 * pages with this process has allocated a larger page and we
4805 * need to retry with larger pages. So do a size up
4806 * operation. This relies on the fact that large pages are
4807 * never partially shared i.e. if we share any constituent
4808 * page of a large page with another process we must share the
4809 * entire large page. Note this cannot happen for SOFTLOCK
4810 * case, unless current address (a) is at the beginning of the
4811 * next page size boundary because the other process couldn't
4812 * have relocated locked pages.
4814 ASSERT(ierr
== -1 || ierr
== -2);
4816 if (segvn_anypgsz
) {
4817 ASSERT(ierr
== -2 || szc
!= 0);
4818 ASSERT(ierr
== -1 || szc
< seg
->s_szc
);
4819 szc
= (ierr
== -1) ? szc
- 1 : szc
+ 1;
4822 * For non COW faults and segvn_anypgsz == 0
4823 * we need to be careful not to loop forever
4824 * if existing page is found with szc other
4825 * than 0 or seg->s_szc. This could be due
4826 * to page relocations on behalf of DR or
4827 * more likely large page creation. For this
4828 * case simply re-size to existing page's szc
4829 * if returned by anon_map_getpages().
4831 if (ppa_szc
== (uint_t
)-1) {
4832 szc
= (ierr
== -1) ? 0 : seg
->s_szc
;
4834 ASSERT(ppa_szc
<= seg
->s_szc
);
4835 ASSERT(ierr
== -2 || ppa_szc
< szc
);
4836 ASSERT(ierr
== -1 || ppa_szc
> szc
);
4841 pgsz
= page_get_pagesize(szc
);
4843 ASSERT(type
!= F_SOFTLOCK
|| ierr
== -1 ||
4844 (IS_P2ALIGNED(a
, pgsz
) && IS_P2ALIGNED(lpgeaddr
, pgsz
)));
4845 if (type
== F_SOFTLOCK
) {
4847 * For softlocks we cannot reduce the fault area
4848 * (calculated based on the largest page size for this
4849 * segment) for size down and a is already next
4850 * page size aligned as assertted above for size
4851 * ups. Therefore just continue in case of softlock.
4853 VM_STAT_ADD(segvnvmstats
.fltanpages
[9]);
4854 continue; /* keep lint happy */
4855 } else if (ierr
== -2) {
4858 * Size up case. Note lpgaddr may only be needed for
4859 * softlock case so we don't adjust it here.
4861 VM_STAT_ADD(segvnvmstats
.fltanpages
[10]);
4862 a
= (caddr_t
)P2ALIGN((uintptr_t)a
, pgsz
);
4863 ASSERT(a
>= lpgaddr
);
4864 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4865 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4866 vpage
= (svd
->vpage
!= NULL
) ?
4867 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4870 * Size down case. Note lpgaddr may only be needed for
4871 * softlock case so we don't adjust it here.
4873 VM_STAT_ADD(segvnvmstats
.fltanpages
[11]);
4874 ASSERT(IS_P2ALIGNED(a
, pgsz
));
4875 ASSERT(IS_P2ALIGNED(lpgeaddr
, pgsz
));
4876 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4877 ASSERT(a
< lpgeaddr
);
4880 * The beginning of the large page region can
4881 * be pulled to the right to make a smaller
4882 * region. We haven't yet faulted a single
4885 VM_STAT_ADD(segvnvmstats
.fltanpages
[12]);
4886 a
= (caddr_t
)P2ALIGN((uintptr_t)addr
, pgsz
);
4887 ASSERT(a
>= lpgaddr
);
4888 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4889 vpage
= (svd
->vpage
!= NULL
) ?
4890 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4894 VM_STAT_ADD(segvnvmstats
.fltanpages
[13]);
4895 ANON_LOCK_EXIT(&
->a_rwlock
);
4896 kmem_cache_free(segvn_szc_cache
[ppaszc
], ppa
);
4899 VM_STAT_ADD(segvnvmstats
.fltanpages
[14]);
4900 ANON_LOCK_EXIT(&
->a_rwlock
);
4901 kmem_cache_free(segvn_szc_cache
[ppaszc
], ppa
);
4902 if (type
== F_SOFTLOCK
&& a
> lpgaddr
) {
4903 VM_STAT_ADD(segvnvmstats
.fltanpages
[15]);
4904 segvn_softunlock(seg
, lpgaddr
, a
- lpgaddr
, S_OTHER
);
4909 int fltadvice
= 1; /* set to free behind pages for sequential access */
4912 * This routine is called via a machine specific fault handling routine.
4913 * It is also called by software routines wishing to lock or unlock
4914 * a range of addresses.
4916 * Here is the basic algorithm:
4918 * Call segvn_softunlock
4921 * Checking and set up work
4922 * If we will need some non-anonymous pages
4923 * Call VOP_GETPAGE over the range of non-anonymous pages
4925 * Loop over all addresses requested
4926 * Call segvn_faultpage passing in page list
4927 * to load up translations and handle anonymous pages
4929 * Load up translation to any additional pages in page list not
4930 * already handled that fit into this segment
4933 segvn_fault(struct hat
*hat
, struct seg
*seg
, caddr_t addr
, size_t len
,
4934 enum fault_type type
, enum seg_rw rw
)
4936 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
4937 page_t
**plp
, **ppp
, *pp
;
4940 struct vpage
*vpage
;
4941 uint_t vpprot
, prot
= 0;
4943 page_t
*pl
[PVN_GETPAGE_NUM
+ 1];
4944 size_t plsz
, pl_alloc_sz
;
4946 ulong_t anon_index
= 0;
4947 struct anon_map
*amp
;
4949 caddr_t lpgaddr
, lpgeaddr
;
4951 anon_sync_obj_t cookie
;
4952 int brkcow
= BREAK_COW_SHARE(rw
, type
, svd
->type
);
4954 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
4955 ASSERT(svd
->amp
== NULL
|| svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
4958 * First handle the easy stuff
4960 if (type
== F_SOFTUNLOCK
) {
4961 if (rw
== S_READ_NOCOW
) {
4963 ASSERT(AS_WRITE_HELD(seg
->s_as
));
4965 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
4966 pgsz
= (seg
->s_szc
== 0) ? PAGESIZE
:
4967 page_get_pagesize(seg
->s_szc
);
4968 VM_STAT_COND_ADD(pgsz
> PAGESIZE
, segvnvmstats
.fltanpages
[16]);
4969 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
4970 segvn_softunlock(seg
, lpgaddr
, lpgeaddr
- lpgaddr
, rw
);
4971 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4975 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
||
4976 !HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
4978 if (svd
->tr_state
== SEGVN_TR_INIT
) {
4979 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
4980 if (svd
->tr_state
== SEGVN_TR_INIT
) {
4981 ASSERT(svd
->vp
!= NULL
&& svd
->amp
== NULL
);
4982 ASSERT(svd
->flags
& MAP_TEXT
);
4983 ASSERT(svd
->type
== MAP_PRIVATE
);
4984 segvn_textrepl(seg
);
4985 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
4986 ASSERT(svd
->tr_state
!= SEGVN_TR_ON
||
4989 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4991 } else if (svd
->tr_state
!= SEGVN_TR_OFF
) {
4992 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
4994 if (rw
== S_WRITE
&& svd
->tr_state
!= SEGVN_TR_OFF
) {
4995 ASSERT(!svd
->pageprot
&& !(svd
->prot
& PROT_WRITE
));
4996 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5000 if (svd
->tr_state
== SEGVN_TR_ON
) {
5001 ASSERT(svd
->vp
!= NULL
&& svd
->amp
!= NULL
);
5002 segvn_textunrepl(seg
, 0);
5003 ASSERT(svd
->amp
== NULL
&&
5004 svd
->tr_state
== SEGVN_TR_OFF
);
5005 } else if (svd
->tr_state
!= SEGVN_TR_OFF
) {
5006 svd
->tr_state
= SEGVN_TR_OFF
;
5008 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
5009 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5013 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
5016 * If we have the same protections for the entire segment,
5017 * insure that the access being attempted is legitimate.
5020 if (svd
->pageprot
== 0) {
5026 protchk
= PROT_READ
;
5029 protchk
= PROT_WRITE
;
5032 protchk
= PROT_EXEC
;
5036 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
5040 if ((svd
->prot
& protchk
) == 0) {
5041 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5042 return (FC_PROT
); /* illegal access type */
5046 if (brkcow
&& HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
5047 /* this must be SOFTLOCK S_READ fault */
5048 ASSERT(svd
->amp
== NULL
);
5049 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
5050 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5051 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5052 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
5054 * this must be the first ever non S_READ_NOCOW
5055 * softlock for this segment.
5057 ASSERT(svd
->softlockcnt
== 0);
5058 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
5060 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
5062 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5067 * We can't allow the long term use of softlocks for vmpss segments,
5068 * because in some file truncation cases we should be able to demote
5069 * the segment, which requires that there are no softlocks. The
5070 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5071 * segment is S_READ_NOCOW, where the caller holds the address space
5072 * locked as writer and calls softunlock before dropping the as lock.
5073 * S_READ_NOCOW is used by /proc to read memory from another user.
5075 * Another deadlock between SOFTLOCK and file truncation can happen
5076 * because segvn_fault_vnodepages() calls the FS one pagesize at
5077 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5078 * can cause a deadlock because the first set of page_t's remain
5079 * locked SE_SHARED. To avoid this, we demote segments on a first
5080 * SOFTLOCK if they have a length greater than the segment's
5083 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5084 * the access type is S_READ_NOCOW and the fault length is less than
5085 * or equal to the segment's page size. While this is quite restrictive,
5086 * it should be the most common case of SOFTLOCK against a vmpss
5089 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5090 * caller makes sure no COW will be caused by another thread for a
5093 if (type
== F_SOFTLOCK
&& svd
->vp
!= NULL
&& seg
->s_szc
!= 0) {
5096 if (rw
!= S_READ_NOCOW
) {
5099 if (!demote
&& len
> PAGESIZE
) {
5100 pgsz
= page_get_pagesize(seg
->s_szc
);
5101 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
,
5103 if (lpgeaddr
- lpgaddr
> pgsz
) {
5108 ASSERT(demote
|| AS_WRITE_HELD(seg
->s_as
));
5111 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5112 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5113 if (seg
->s_szc
!= 0) {
5114 segvn_vmpss_clrszc_cnt
++;
5115 ASSERT(svd
->softlockcnt
== 0);
5116 err
= segvn_clrszc(seg
);
5118 segvn_vmpss_clrszc_err
++;
5119 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5120 return (FC_MAKE_ERR(err
));
5123 ASSERT(seg
->s_szc
== 0);
5124 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5130 * Check to see if we need to allocate an anon_map structure.
5132 if (svd
->amp
== NULL
&& (svd
->vp
== NULL
|| brkcow
)) {
5133 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
5135 * Drop the "read" lock on the segment and acquire
5136 * the "write" version since we have to allocate the
5139 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5140 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5142 if (svd
->amp
== NULL
) {
5143 svd
->amp
= anonmap_alloc(seg
->s_size
, 0, ANON_SLEEP
);
5144 svd
->amp
->a_szc
= seg
->s_szc
;
5146 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5149 * Start all over again since segment protections
5150 * may have changed after we dropped the "read" lock.
5156 * S_READ_NOCOW vs S_READ distinction was
5157 * only needed for the code above. After
5158 * that we treat it as S_READ.
5160 if (rw
== S_READ_NOCOW
) {
5161 ASSERT(type
== F_SOFTLOCK
);
5162 ASSERT(AS_WRITE_HELD(seg
->s_as
));
5169 * MADV_SEQUENTIAL work is ignored for large page segments.
5171 if (seg
->s_szc
!= 0) {
5172 pgsz
= page_get_pagesize(seg
->s_szc
);
5173 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
5174 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
5175 if (svd
->vp
== NULL
) {
5176 err
= segvn_fault_anonpages(hat
, seg
, lpgaddr
,
5177 lpgeaddr
, type
, rw
, addr
, addr
+ len
, brkcow
);
5179 err
= segvn_fault_vnodepages(hat
, seg
, lpgaddr
,
5180 lpgeaddr
, type
, rw
, addr
, addr
+ len
, brkcow
);
5181 if (err
== IE_RETRY
) {
5182 ASSERT(seg
->s_szc
== 0);
5183 ASSERT(SEGVN_READ_HELD(seg
->s_as
, &svd
->lock
));
5184 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5188 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5192 page
= seg_page(seg
, addr
);
5194 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
5195 anon_index
= svd
->anon_index
+ page
;
5197 if (type
== F_PROT
&& rw
== S_READ
&&
5198 svd
->tr_state
== SEGVN_TR_OFF
&&
5199 svd
->type
== MAP_PRIVATE
&& svd
->pageprot
== 0) {
5200 size_t index
= anon_index
;
5203 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5205 * The fast path could apply to S_WRITE also, except
5206 * that the protection fault could be caused by lazy
5207 * tlb flush when ro->rw. In this case, the pte is
5208 * RW already. But RO in the other cpu's tlb causes
5209 * the fault. Since hat_chgprot won't do anything if
5210 * pte doesn't change, we may end up faulting
5211 * indefinitely until the RO tlb entry gets replaced.
5213 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
, index
++) {
5214 anon_array_enter(amp
, index
, &cookie
);
5215 ap
= anon_get_ptr(amp
->ahp
, index
);
5216 anon_array_exit(&cookie
);
5217 if ((ap
== NULL
) || (ap
->an_refcnt
!= 1)) {
5218 ANON_LOCK_EXIT(&
->a_rwlock
);
5222 hat_chgprot(seg
->s_as
->a_hat
, addr
, len
, svd
->prot
);
5223 ANON_LOCK_EXIT(&
->a_rwlock
);
5224 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5230 if (svd
->vpage
== NULL
)
5233 vpage
= &svd
->vpage
[page
];
5235 off
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
5238 * If MADV_SEQUENTIAL has been set for the particular page we
5239 * are faulting on, free behind all pages in the segment and put
5240 * them on the free list.
5243 if ((page
!= 0) && fltadvice
&& svd
->tr_state
!= SEGVN_TR_ON
) {
5244 struct vpage
*vpp
= NULL
;
5245 ulong_t fanon_index
= 0;
5247 u_offset_t pgoff
, fpgoff
;
5249 struct anon
*fap
= NULL
;
5251 if (svd
->advice
== MADV_SEQUENTIAL
||
5253 VPP_ADVICE(vpage
) == MADV_SEQUENTIAL
)) {
5254 pgoff
= off
- PAGESIZE
;
5257 vpp
= &svd
->vpage
[fpage
];
5259 fanon_index
= svd
->anon_index
+ fpage
;
5261 while (pgoff
> svd
->offset
) {
5262 if (svd
->advice
!= MADV_SEQUENTIAL
&&
5263 (!svd
->pageadvice
|| (vpage
&&
5264 VPP_ADVICE(vpp
) != MADV_SEQUENTIAL
)))
5268 * If this is an anon page, we must find the
5269 * correct <vp, offset> for it
5273 ANON_LOCK_ENTER(&
->a_rwlock
,
5275 anon_array_enter(amp
, fanon_index
,
5277 fap
= anon_get_ptr(amp
->ahp
,
5280 swap_xlate(fap
, &fvp
, &fpgoff
);
5285 anon_array_exit(&cookie
);
5286 ANON_LOCK_EXIT(&
->a_rwlock
);
5294 * Skip pages that are free or have an
5297 pp
= page_lookup_nowait(fvp
, fpgoff
, SE_SHARED
);
5301 * We don't need the page_struct_lock to test
5302 * as this is only advisory; even if we
5303 * acquire it someone might race in and lock
5304 * the page after we unlock and before the
5305 * PUTPAGE, then VOP_PUTPAGE will do nothing.
5307 if (pp
->p_lckcnt
== 0 && pp
->p_cowcnt
== 0) {
5309 * Hold the vnode before releasing
5310 * the page lock to prevent it from
5311 * being freed and re-used by some
5317 * We should build a page list
5318 * to kluster putpages XXX
5320 (void) VOP_PUTPAGE(fvp
,
5321 (offset_t
)fpgoff
, PAGESIZE
,
5322 (B_DONTNEED
|B_FREE
|B_ASYNC
),
5327 * XXX - Should the loop terminate if
5328 * the page is `locked'?
5344 * See if we need to call VOP_GETPAGE for
5345 * *any* of the range being faulted on.
5346 * We can skip all of this work if there
5347 * was no original vnode.
5349 if (svd
->vp
!= NULL
) {
5362 * Only acquire reader lock to prevent amp->ahp
5363 * from being changed. It's ok to miss pages,
5364 * hence we don't do anon_array_enter
5366 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5367 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
5369 if (len
<= PAGESIZE
)
5370 /* inline non_anon() */
5371 dogetpage
= (ap
== NULL
);
5373 dogetpage
= non_anon(amp
->ahp
, anon_index
,
5375 ANON_LOCK_EXIT(&
->a_rwlock
);
5380 struct as
*as
= seg
->s_as
;
5382 if (len
> ptob((sizeof (pl
) / sizeof (pl
[0])) - 1)) {
5384 * Page list won't fit in local array,
5385 * allocate one of the needed size.
5388 (btop(len
) + 1) * sizeof (page_t
*);
5389 plp
= kmem_alloc(pl_alloc_sz
, KM_SLEEP
);
5392 } else if (rw
== S_WRITE
&& svd
->type
== MAP_PRIVATE
||
5393 svd
->tr_state
== SEGVN_TR_ON
|| rw
== S_OTHER
||
5394 (((size_t)(addr
+ PAGESIZE
) <
5395 (size_t)(seg
->s_base
+ seg
->s_size
)) &&
5396 hat_probe(as
->a_hat
, addr
+ PAGESIZE
))) {
5398 * Ask VOP_GETPAGE to return the exact number
5400 * (a) this is a COW fault, or
5401 * (b) this is a software fault, or
5402 * (c) next page is already mapped.
5407 * Ask VOP_GETPAGE to return adjacent pages
5408 * within the segment.
5410 plsz
= MIN((size_t)PVN_GETPAGE_SZ
, (size_t)
5411 ((seg
->s_base
+ seg
->s_size
) - addr
));
5412 ASSERT((addr
+ plsz
) <=
5413 (seg
->s_base
+ seg
->s_size
));
5417 * Need to get some non-anonymous pages.
5418 * We need to make only one call to GETPAGE to do
5419 * this to prevent certain deadlocking conditions
5420 * when we are doing locking. In this case
5421 * non_anon() should have picked up the smallest
5422 * range which includes all the non-anonymous
5423 * pages in the requested range. We have to
5424 * be careful regarding which rw flag to pass in
5425 * because on a private mapping, the underlying
5426 * object is never allowed to be written.
5428 if (rw
== S_WRITE
&& svd
->type
== MAP_PRIVATE
) {
5434 TRACE_3(TR_FAC_VM
, TR_SEGVN_GETPAGE
,
5435 "segvn_getpage:seg %p addr %p vp %p",
5437 err
= VOP_GETPAGE(vp
, (offset_t
)vp_off
, vp_len
,
5438 &vpprot
, plp
, plsz
, seg
, addr
+ (vp_off
- off
), arw
,
5441 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5442 segvn_pagelist_rele(plp
);
5444 kmem_free(plp
, pl_alloc_sz
);
5445 return (FC_MAKE_ERR(err
));
5447 if (svd
->type
== MAP_PRIVATE
)
5448 vpprot
&= ~PROT_WRITE
;
5453 * N.B. at this time the plp array has all the needed non-anon
5454 * pages in addition to (possibly) having some adjacent pages.
5458 * Always acquire the anon_array_lock to prevent
5459 * 2 threads from allocating separate anon slots for
5462 * If this is a copy-on-write fault and we don't already
5463 * have the anon_array_lock, acquire it to prevent the
5464 * fault routine from handling multiple copy-on-write faults
5465 * on the same "addr" in the same address space.
5467 * Only one thread should deal with the fault since after
5468 * it is handled, the other threads can acquire a translation
5469 * to the newly created private page. This prevents two or
5470 * more threads from creating different private pages for the
5473 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5474 * to prevent deadlock between this thread and another thread
5475 * which has soft-locked this page and wants to acquire serial_lock.
5478 * The fix for bug 4026339 becomes unnecessary when using the
5479 * locking scheme with per amp rwlock and a global set of hash
5480 * lock, anon_array_lock. If we steal a vnode page when low
5481 * on memory and upgrad the page lock through page_rename,
5482 * then the page is PAGE_HANDLED, nothing needs to be done
5483 * for this page after returning from segvn_faultpage.
5485 * But really, the page lock should be downgraded after
5486 * the stolen page is page_rename'd.
5490 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5493 * Ok, now loop over the address range and handle faults
5495 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
, off
+= PAGESIZE
) {
5496 err
= segvn_faultpage(hat
, seg
, a
, off
, vpage
, plp
, vpprot
,
5500 ANON_LOCK_EXIT(&
->a_rwlock
);
5501 if (type
== F_SOFTLOCK
&& a
> addr
) {
5502 segvn_softunlock(seg
, addr
, (a
- addr
),
5505 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5506 segvn_pagelist_rele(plp
);
5508 kmem_free(plp
, pl_alloc_sz
);
5513 } else if (svd
->vpage
) {
5514 page
= seg_page(seg
, addr
);
5515 vpage
= &svd
->vpage
[++page
];
5519 /* Didn't get pages from the underlying fs so we're done */
5524 * Now handle any other pages in the list returned.
5525 * If the page can be used, load up the translations now.
5526 * Note that the for loop will only be entered if "plp"
5527 * is pointing to a non-NULL page pointer which means that
5528 * VOP_GETPAGE() was called and vpprot has been initialized.
5530 if (svd
->pageprot
== 0)
5531 prot
= svd
->prot
& vpprot
;
5535 * Large Files: diff should be unsigned value because we started
5536 * supporting > 2GB segment sizes from 2.5.1 and when a
5537 * large file of size > 2GB gets mapped to address space
5538 * the diff value can be > 2GB.
5541 for (ppp
= plp
; (pp
= *ppp
) != NULL
; ppp
++) {
5545 anon_sync_obj_t cookie
;
5546 int hat_flag
= HAT_LOAD_ADV
;
5548 if (svd
->flags
& MAP_TEXT
) {
5549 hat_flag
|= HAT_LOAD_TEXT
;
5552 if (pp
== PAGE_HANDLED
)
5555 if (svd
->tr_state
!= SEGVN_TR_ON
&&
5556 pp
->p_offset
>= svd
->offset
&&
5557 pp
->p_offset
< svd
->offset
+ seg
->s_size
) {
5559 diff
= pp
->p_offset
- svd
->offset
;
5562 * Large Files: Following is the assertion
5563 * validating the above cast.
5565 ASSERT(svd
->vp
== pp
->p_vnode
);
5569 prot
= VPP_PROT(&svd
->vpage
[page
]) & vpprot
;
5572 * Prevent other threads in the address space from
5573 * creating private pages (i.e., allocating anon slots)
5574 * while we are in the process of loading translations
5575 * to additional pages returned by the underlying
5579 anon_index
= svd
->anon_index
+ page
;
5580 anon_array_enter(amp
, anon_index
, &cookie
);
5581 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
5583 if ((amp
== NULL
) || (ap
== NULL
)) {
5584 if (IS_VMODSORT(pp
->p_vnode
) ||
5588 else if (rw
!= S_OTHER
&&
5590 prot
&= ~PROT_WRITE
;
5593 * Skip mapping read ahead pages marked
5594 * for migration, so they will get migrated
5597 ASSERT(amp
== NULL
||
5598 svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
5599 if ((prot
& PROT_READ
) && !PP_ISMIGRATE(pp
)) {
5600 hat_memload_region(hat
,
5607 anon_array_exit(&cookie
);
5613 ANON_LOCK_EXIT(&
->a_rwlock
);
5614 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5616 kmem_free(plp
, pl_alloc_sz
);
5621 * This routine is used to start I/O on pages asynchronously. XXX it will
5622 * only create PAGESIZE pages. At fault time they will be relocated into
5626 segvn_faulta(struct seg
*seg
, caddr_t addr
)
5628 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
5630 struct anon_map
*amp
;
5633 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
5635 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
5636 if ((amp
= svd
->amp
) != NULL
) {
5640 * Reader lock to prevent amp->ahp from being changed.
5641 * This is advisory, it's ok to miss a page, so
5642 * we don't do anon_array_enter lock.
5644 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5645 if ((ap
= anon_get_ptr(amp
->ahp
,
5646 svd
->anon_index
+ seg_page(seg
, addr
))) != NULL
) {
5648 err
= anon_getpage(&ap
, NULL
, NULL
,
5649 0, seg
, addr
, S_READ
, svd
->cred
);
5651 ANON_LOCK_EXIT(&
->a_rwlock
);
5652 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5654 return (FC_MAKE_ERR(err
));
5657 ANON_LOCK_EXIT(&
->a_rwlock
);
5660 if (svd
->vp
== NULL
) {
5661 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5662 return (0); /* zfod page - do nothing now */
5666 TRACE_3(TR_FAC_VM
, TR_SEGVN_GETPAGE
,
5667 "segvn_getpage:seg %p addr %p vp %p", seg
, addr
, vp
);
5668 err
= VOP_GETPAGE(vp
,
5669 (offset_t
)(svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
)),
5670 PAGESIZE
, NULL
, NULL
, 0, seg
, addr
,
5671 S_OTHER
, svd
->cred
, NULL
);
5673 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5675 return (FC_MAKE_ERR(err
));
5680 segvn_setprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t prot
)
5682 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
5683 struct vpage
*cvp
, *svp
= NULL
, *evp
= NULL
;
5687 anon_sync_obj_t cookie
;
5688 int unload_done
= 0;
5690 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
5692 if ((svd
->maxprot
& prot
) != prot
)
5693 return (EACCES
); /* violated maxprot */
5695 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5697 /* return if prot is the same */
5698 if (!svd
->pageprot
&& svd
->prot
== prot
) {
5699 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5704 * Since we change protections we first have to flush the cache.
5705 * This makes sure all the pagelock calls have to recheck
5708 if (svd
->softlockcnt
> 0) {
5709 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
5712 * If this is shared segment non 0 softlockcnt
5713 * means locked pages are still in use.
5715 if (svd
->type
== MAP_SHARED
) {
5716 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5721 * Since we do have the segvn writers lock nobody can fill
5722 * the cache with entries belonging to this seg during
5723 * the purge. The flush either succeeds or we still have
5727 if (svd
->softlockcnt
> 0) {
5728 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5733 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
5734 ASSERT(svd
->amp
== NULL
);
5735 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
5736 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
5738 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
5740 } else if (svd
->tr_state
== SEGVN_TR_INIT
) {
5741 svd
->tr_state
= SEGVN_TR_OFF
;
5742 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
5743 ASSERT(svd
->amp
!= NULL
);
5744 segvn_textunrepl(seg
, 0);
5745 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
5749 if ((prot
& PROT_WRITE
) && svd
->type
== MAP_SHARED
&&
5750 svd
->vp
!= NULL
&& (svd
->vp
->v_flag
& VVMEXEC
)) {
5751 ASSERT(vn_is_mapped(svd
->vp
, V_WRITE
));
5752 segvn_inval_trcache(svd
->vp
);
5754 if (seg
->s_szc
!= 0) {
5756 pgsz
= page_get_pagesize(seg
->s_szc
);
5757 pgcnt
= pgsz
>> PAGESHIFT
;
5758 ASSERT(IS_P2ALIGNED(pgcnt
, pgcnt
));
5759 if (!IS_P2ALIGNED(addr
, pgsz
) || !IS_P2ALIGNED(len
, pgsz
)) {
5760 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5761 ASSERT(seg
->s_base
!= addr
|| seg
->s_size
!= len
);
5763 * If we are holding the as lock as a reader then
5764 * we need to return IE_RETRY and let the as
5765 * layer drop and re-acquire the lock as a writer.
5767 if (AS_READ_HELD(seg
->s_as
))
5769 VM_STAT_ADD(segvnvmstats
.demoterange
[1]);
5770 if (svd
->type
== MAP_PRIVATE
|| svd
->vp
!= NULL
) {
5771 err
= segvn_demote_range(seg
, addr
, len
,
5774 uint_t szcvec
= map_pgszcvec(seg
->s_base
,
5775 pgsz
, (uintptr_t)seg
->s_base
,
5776 (svd
->flags
& MAP_TEXT
), MAPPGSZC_SHM
, 0);
5777 err
= segvn_demote_range(seg
, addr
, len
,
5790 * If it's a private mapping and we're making it writable then we
5791 * may have to reserve the additional swap space now. If we are
5792 * making writable only a part of the segment then we use its vpage
5793 * array to keep a record of the pages for which we have reserved
5794 * swap. In this case we set the pageswap field in the segment's
5795 * segvn structure to record this.
5797 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5798 * removing write permission on the entire segment and we haven't
5799 * modified any pages, we can release the swap space.
5801 if (svd
->type
== MAP_PRIVATE
) {
5802 if (prot
& PROT_WRITE
) {
5803 if (!(svd
->flags
& MAP_NORESERVE
) &&
5804 !(svd
->swresv
&& svd
->pageswap
== 0)) {
5808 * Start by determining how much swap
5809 * space is required.
5811 if (addr
== seg
->s_base
&&
5812 len
== seg
->s_size
&&
5813 svd
->pageswap
== 0) {
5814 /* The whole segment */
5818 * Make sure that the vpage array
5819 * exists, and make a note of the
5820 * range of elements corresponding
5824 if (svd
->vpage
== NULL
) {
5825 SEGVN_LOCK_EXIT(seg
->s_as
,
5829 svp
= &svd
->vpage
[seg_page(seg
, addr
)];
5830 evp
= &svd
->vpage
[seg_page(seg
,
5833 if (svd
->pageswap
== 0) {
5835 * This is the first time we've
5836 * asked for a part of this
5837 * segment, so we need to
5838 * reserve everything we've
5844 * We have to count the number
5845 * of pages required.
5847 for (cvp
= svp
; cvp
< evp
;
5849 if (!VPP_ISSWAPRES(cvp
))
5856 /* Try to reserve the necessary swap. */
5857 if (anon_resv_zone(sz
,
5858 seg
->s_as
->a_proc
->p_zone
) == 0) {
5859 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5864 * Make a note of how much swap space
5867 if (svd
->pageswap
== 0 && sz
== seg
->s_size
) {
5870 ASSERT(svd
->vpage
!= NULL
);
5873 for (cvp
= svp
; cvp
< evp
; cvp
++) {
5874 if (!VPP_ISSWAPRES(cvp
))
5875 VPP_SETSWAPRES(cvp
);
5881 * Swap space is released only if this segment
5882 * does not map anonymous memory, since read faults
5883 * on such segments still need an anon slot to read
5886 if (svd
->swresv
!= 0 && svd
->vp
!= NULL
&&
5887 svd
->amp
== NULL
&& addr
== seg
->s_base
&&
5888 len
== seg
->s_size
&& svd
->pageprot
== 0) {
5889 ASSERT(svd
->pageswap
== 0);
5890 anon_unresv_zone(svd
->swresv
,
5891 seg
->s_as
->a_proc
->p_zone
);
5893 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
,
5894 "anon proc:%p %lu %u", seg
, 0, 0);
5899 if (addr
== seg
->s_base
&& len
== seg
->s_size
&& svd
->vpage
== NULL
) {
5900 if (svd
->prot
== prot
) {
5901 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5902 return (0); /* all done */
5904 svd
->prot
= (uchar_t
)prot
;
5905 } else if (svd
->type
== MAP_PRIVATE
) {
5906 struct anon
*ap
= NULL
;
5908 u_offset_t offset
, off
;
5909 struct anon_map
*amp
;
5910 ulong_t anon_idx
= 0;
5913 * A vpage structure exists or else the change does not
5914 * involve the entire segment. Establish a vpage structure
5915 * if none is there. Then, for each page in the range,
5916 * adjust its individual permissions. Note that write-
5917 * enabling a MAP_PRIVATE page can affect the claims for
5918 * locked down memory. Overcommitting memory terminates
5922 if (svd
->vpage
== NULL
) {
5923 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5927 if ((amp
= svd
->amp
) != NULL
) {
5928 anon_idx
= svd
->anon_index
+ seg_page(seg
, addr
);
5929 ASSERT(seg
->s_szc
== 0 ||
5930 IS_P2ALIGNED(anon_idx
, pgcnt
));
5931 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5934 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
5935 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
5938 * See Statement at the beginning of segvn_lockop regarding
5939 * the way cowcnts and lckcnts are handled.
5941 for (svp
= &svd
->vpage
[seg_page(seg
, addr
)]; svp
< evp
; svp
++) {
5943 if (seg
->s_szc
!= 0) {
5945 anon_array_enter(amp
, anon_idx
,
5948 if (IS_P2ALIGNED(anon_idx
, pgcnt
) &&
5949 !segvn_claim_pages(seg
, svp
, offset
,
5952 anon_array_exit(&cookie
);
5957 anon_array_exit(&cookie
);
5962 anon_array_enter(amp
, anon_idx
,
5964 ap
= anon_get_ptr(amp
->ahp
, anon_idx
++);
5967 if (VPP_ISPPLOCK(svp
) &&
5968 VPP_PROT(svp
) != prot
) {
5970 if (amp
== NULL
|| ap
== NULL
) {
5974 swap_xlate(ap
, &vp
, &off
);
5976 anon_array_exit(&cookie
);
5978 if ((pp
= page_lookup(vp
, off
,
5979 SE_SHARED
)) == NULL
) {
5980 panic("segvn_setprot: no page");
5983 ASSERT(seg
->s_szc
== 0);
5984 if ((VPP_PROT(svp
) ^ prot
) &
5986 if (prot
& PROT_WRITE
) {
6001 } else if (amp
!= NULL
)
6002 anon_array_exit(&cookie
);
6004 VPP_SETPROT(svp
, prot
);
6008 ANON_LOCK_EXIT(&
->a_rwlock
);
6011 * Did we terminate prematurely? If so, simply unload
6012 * the translations to the things we've updated so far.
6016 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6019 len
= (svp
- &svd
->vpage
[seg_page(seg
, addr
)]) *
6021 ASSERT(seg
->s_szc
== 0 || IS_P2ALIGNED(len
, pgsz
));
6023 hat_unload(seg
->s_as
->a_hat
, addr
,
6025 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6030 if (svd
->vpage
== NULL
) {
6031 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6035 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
6036 for (svp
= &svd
->vpage
[seg_page(seg
, addr
)]; svp
< evp
; svp
++) {
6037 VPP_SETPROT(svp
, prot
);
6042 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6046 if (((prot
& PROT_WRITE
) != 0 &&
6047 (svd
->vp
!= NULL
|| svd
->type
== MAP_PRIVATE
)) ||
6048 (prot
& ~PROT_USER
) == PROT_NONE
) {
6050 * Either private or shared data with write access (in
6051 * which case we need to throw out all former translations
6052 * so that we get the right translations set up on fault
6053 * and we don't allow write access to any copy-on-write pages
6054 * that might be around or to prevent write access to pages
6055 * representing holes in a file), or we don't have permission
6056 * to access the memory at all (in which case we have to
6057 * unload any current translations that might exist).
6059 hat_unload(seg
->s_as
->a_hat
, addr
, len
, HAT_UNLOAD
);
6062 * A shared mapping or a private mapping in which write
6063 * protection is going to be denied - just change all the
6064 * protections over the range of addresses in question.
6065 * segvn does not support any other attributes other
6066 * than prot so we can use hat_chgattr.
6068 hat_chgattr(seg
->s_as
->a_hat
, addr
, len
, prot
);
6071 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6077 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize,
6078 * to determine if the seg is capable of mapping the requested szc.
6081 segvn_setpagesize(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t szc
)
6083 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6084 struct segvn_data
*nsvd
;
6085 struct anon_map
*amp
= svd
->amp
;
6087 caddr_t eaddr
= addr
+ len
, a
;
6088 size_t pgsz
= page_get_pagesize(szc
);
6089 pgcnt_t pgcnt
= page_get_pagecnt(szc
);
6091 u_offset_t off
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
6093 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
6094 ASSERT(addr
>= seg
->s_base
&& eaddr
<= seg
->s_base
+ seg
->s_size
);
6096 if (seg
->s_szc
== szc
|| segvn_lpg_disable
!= 0) {
6101 * addr should always be pgsz aligned but eaddr may be misaligned if
6102 * it's at the end of the segment.
6104 * XXX we should assert this condition since as_setpagesize() logic
6107 if (!IS_P2ALIGNED(addr
, pgsz
) ||
6108 (!IS_P2ALIGNED(eaddr
, pgsz
) &&
6109 eaddr
!= seg
->s_base
+ seg
->s_size
)) {
6111 segvn_setpgsz_align_err
++;
6115 if (amp
!= NULL
&& svd
->type
== MAP_SHARED
) {
6116 ulong_t an_idx
= svd
->anon_index
+ seg_page(seg
, addr
);
6117 if (!IS_P2ALIGNED(an_idx
, pgcnt
)) {
6119 segvn_setpgsz_anon_align_err
++;
6124 if ((svd
->flags
& MAP_NORESERVE
) || seg
->s_as
== &kas
||
6125 szc
> segvn_maxpgszc
) {
6129 /* paranoid check */
6130 if (svd
->vp
!= NULL
&&
6131 (IS_SWAPFSVP(svd
->vp
) || VN_ISKAS(svd
->vp
))) {
6135 if (seg
->s_szc
== 0 && svd
->vp
!= NULL
&&
6136 map_addr_vacalign_check(addr
, off
)) {
6141 * Check that protections are the same within new page
6144 if (svd
->pageprot
) {
6145 for (a
= addr
; a
< eaddr
; a
+= pgsz
) {
6146 if ((a
+ pgsz
) > eaddr
) {
6147 if (!sameprot(seg
, a
, eaddr
- a
)) {
6151 if (!sameprot(seg
, a
, pgsz
)) {
6159 * Since we are changing page size we first have to flush
6160 * the cache. This makes sure all the pagelock calls have
6161 * to recheck protections.
6163 if (svd
->softlockcnt
> 0) {
6164 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6167 * If this is shared segment non 0 softlockcnt
6168 * means locked pages are still in use.
6170 if (svd
->type
== MAP_SHARED
) {
6175 * Since we do have the segvn writers lock nobody can fill
6176 * the cache with entries belonging to this seg during
6177 * the purge. The flush either succeeds or we still have
6181 if (svd
->softlockcnt
> 0) {
6186 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
6187 ASSERT(svd
->amp
== NULL
);
6188 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6189 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
6191 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
6192 } else if (svd
->tr_state
== SEGVN_TR_INIT
) {
6193 svd
->tr_state
= SEGVN_TR_OFF
;
6194 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
6195 ASSERT(svd
->amp
!= NULL
);
6196 segvn_textunrepl(seg
, 1);
6197 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
6202 * Operation for sub range of existing segment.
6204 if (addr
!= seg
->s_base
|| eaddr
!= (seg
->s_base
+ seg
->s_size
)) {
6205 if (szc
< seg
->s_szc
) {
6206 VM_STAT_ADD(segvnvmstats
.demoterange
[2]);
6207 err
= segvn_demote_range(seg
, addr
, len
, SDR_RANGE
, 0);
6211 if (err
== ENOMEM
) {
6216 if (addr
!= seg
->s_base
) {
6217 nseg
= segvn_split_seg(seg
, addr
);
6218 if (eaddr
!= (nseg
->s_base
+ nseg
->s_size
)) {
6219 /* eaddr is szc aligned */
6220 (void) segvn_split_seg(nseg
, eaddr
);
6224 if (eaddr
!= (seg
->s_base
+ seg
->s_size
)) {
6225 /* eaddr is szc aligned */
6226 (void) segvn_split_seg(seg
, eaddr
);
6232 * Break any low level sharing and reset seg->s_szc to 0.
6234 if ((err
= segvn_clrszc(seg
)) != 0) {
6235 if (err
== ENOMEM
) {
6240 ASSERT(seg
->s_szc
== 0);
6243 * If the end of the current segment is not pgsz aligned
6244 * then attempt to concatenate with the next segment.
6246 if (!IS_P2ALIGNED(eaddr
, pgsz
)) {
6247 nseg
= AS_SEGNEXT(seg
->s_as
, seg
);
6248 if (nseg
== NULL
|| nseg
== seg
|| eaddr
!= nseg
->s_base
) {
6251 if (nseg
->s_ops
!= &segvn_ops
) {
6254 nsvd
= (struct segvn_data
*)nseg
->s_data
;
6255 if (nsvd
->softlockcnt
> 0) {
6257 * If this is shared segment non 0 softlockcnt
6258 * means locked pages are still in use.
6260 if (nsvd
->type
== MAP_SHARED
) {
6264 if (nsvd
->softlockcnt
> 0) {
6268 err
= segvn_clrszc(nseg
);
6269 if (err
== ENOMEM
) {
6275 ASSERT(nsvd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6276 err
= segvn_concat(seg
, nseg
, 1);
6287 * May need to re-align anon array to
6291 if (!IS_P2ALIGNED(svd
->anon_index
, pgcnt
)) {
6292 struct anon_hdr
*nahp
;
6294 ASSERT(svd
->type
== MAP_PRIVATE
);
6296 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
6297 ASSERT(amp
->refcnt
== 1);
6298 nahp
= anon_create(btop(amp
->size
), ANON_NOSLEEP
);
6300 ANON_LOCK_EXIT(&
->a_rwlock
);
6303 if (anon_copy_ptr(amp
->ahp
, svd
->anon_index
,
6304 nahp
, 0, btop(seg
->s_size
), ANON_NOSLEEP
)) {
6305 anon_release(nahp
, btop(amp
->size
));
6306 ANON_LOCK_EXIT(&
->a_rwlock
);
6309 anon_release(amp
->ahp
, btop(amp
->size
));
6311 svd
->anon_index
= 0;
6312 ANON_LOCK_EXIT(&
->a_rwlock
);
6315 if (svd
->vp
!= NULL
&& szc
!= 0) {
6317 u_offset_t eoffpage
= svd
->offset
;
6318 va
.va_mask
= AT_SIZE
;
6319 eoffpage
+= seg
->s_size
;
6320 eoffpage
= btopr(eoffpage
);
6321 if (VOP_GETATTR(svd
->vp
, &va
, 0, svd
->cred
, NULL
) != 0) {
6322 segvn_setpgsz_getattr_err
++;
6325 if (btopr(va
.va_size
) < eoffpage
) {
6326 segvn_setpgsz_eof_err
++;
6331 * anon_fill_cow_holes() may call VOP_GETPAGE().
6332 * don't take anon map lock here to avoid holding it
6333 * across VOP_GETPAGE() calls that may call back into
6334 * segvn for klsutering checks. We don't really need
6335 * anon map lock here since it's a private segment and
6336 * we hold as level lock as writers.
6338 if ((err
= anon_fill_cow_holes(seg
, seg
->s_base
,
6339 amp
->ahp
, svd
->anon_index
, svd
->vp
, svd
->offset
,
6340 seg
->s_size
, szc
, svd
->prot
, svd
->vpage
,
6345 segvn_setvnode_mpss(svd
->vp
);
6349 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
6350 if (svd
->type
== MAP_PRIVATE
) {
6352 } else if (szc
> amp
->a_szc
) {
6355 ANON_LOCK_EXIT(&
->a_rwlock
);
6364 segvn_clrszc(struct seg
*seg
)
6366 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6367 struct anon_map
*amp
= svd
->amp
;
6371 caddr_t a
= seg
->s_base
;
6372 caddr_t ea
= a
+ seg
->s_size
;
6373 ulong_t an_idx
= svd
->anon_index
;
6374 vnode_t
*vp
= svd
->vp
;
6375 struct vpage
*vpage
= svd
->vpage
;
6376 page_t
*anon_pl
[1 + 1], *pp
;
6377 struct anon
*ap
, *oldap
;
6378 uint_t prot
= svd
->prot
, vpprot
;
6381 ASSERT(AS_WRITE_HELD(seg
->s_as
) ||
6382 SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
6383 ASSERT(svd
->softlockcnt
== 0);
6385 if (vp
== NULL
&& amp
== NULL
) {
6386 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6391 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
6392 ASSERT(svd
->amp
== NULL
);
6393 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6394 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
6396 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
6397 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
6398 ASSERT(svd
->amp
!= NULL
);
6399 segvn_textunrepl(seg
, 1);
6400 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
6403 if (svd
->tr_state
!= SEGVN_TR_OFF
) {
6404 ASSERT(svd
->tr_state
== SEGVN_TR_INIT
);
6405 svd
->tr_state
= SEGVN_TR_OFF
;
6409 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6410 * unload argument is 0 when we are freeing the segment
6411 * and unload was already done.
6413 hat_unload(seg
->s_as
->a_hat
, seg
->s_base
, seg
->s_size
,
6417 if (amp
== NULL
|| svd
->type
== MAP_SHARED
) {
6422 pgsz
= page_get_pagesize(seg
->s_szc
);
6426 * XXX anon rwlock is not really needed because this is a
6427 * private segment and we are writers.
6429 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
6431 for (; a
< ea
; a
+= pgsz
, an_idx
+= pages
) {
6432 if ((oldap
= anon_get_ptr(amp
->ahp
, an_idx
)) != NULL
) {
6433 ASSERT(vpage
!= NULL
|| svd
->pageprot
== 0);
6434 if (vpage
!= NULL
) {
6435 ASSERT(sameprot(seg
, a
, pgsz
));
6436 prot
= VPP_PROT(vpage
);
6437 pageflag
= VPP_ISPPLOCK(vpage
) ? LOCK_PAGE
: 0;
6439 if (seg
->s_szc
!= 0) {
6440 ASSERT(vp
== NULL
|| anon_pages(amp
->ahp
,
6441 an_idx
, pages
) == pages
);
6442 if ((err
= anon_map_demotepages(amp
, an_idx
,
6443 seg
, a
, prot
, vpage
, svd
->cred
)) != 0) {
6447 if (oldap
->an_refcnt
== 1) {
6450 if ((err
= anon_getpage(&oldap
, &vpprot
,
6451 anon_pl
, PAGESIZE
, seg
, a
, S_READ
,
6455 if ((pp
= anon_private(&ap
, seg
, a
, prot
,
6456 anon_pl
[0], pageflag
, svd
->cred
)) == NULL
) {
6461 (void) anon_set_ptr(amp
->ahp
, an_idx
, ap
,
6466 vpage
= (vpage
== NULL
) ? NULL
: vpage
+ pages
;
6472 ANON_LOCK_EXIT(&
->a_rwlock
);
6484 pgcnt_t pgcnt
= page_get_pagecnt(seg
->s_szc
);
6485 size_t ppasize
= (pgcnt
+ 1) * sizeof (page_t
*);
6487 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6488 struct anon_map
*amp
= svd
->amp
;
6489 struct vpage
*evp
= svp
+ pgcnt
;
6490 caddr_t addr
= ((uintptr_t)(svp
- svd
->vpage
) << PAGESHIFT
)
6493 struct vnode
*vp
= svd
->vp
;
6498 int anon
= (amp
!= NULL
) ? 1 : 0;
6500 ASSERT(svd
->type
== MAP_PRIVATE
);
6501 ASSERT(svd
->vpage
!= NULL
);
6502 ASSERT(seg
->s_szc
!= 0);
6503 ASSERT(IS_P2ALIGNED(pgcnt
, pgcnt
));
6504 ASSERT(amp
== NULL
|| IS_P2ALIGNED(anon_idx
, pgcnt
));
6505 ASSERT(sameprot(seg
, addr
, pgcnt
<< PAGESHIFT
));
6507 if (VPP_PROT(svp
) == prot
)
6509 if (!((VPP_PROT(svp
) ^ prot
) & PROT_WRITE
))
6512 ppa
= kmem_alloc(ppasize
, KM_SLEEP
);
6513 if (anon
&& vp
!= NULL
) {
6514 if (anon_get_ptr(amp
->ahp
, anon_idx
) == NULL
) {
6516 ASSERT(!anon_pages(amp
->ahp
, anon_idx
, pgcnt
));
6519 anon_pages(amp
->ahp
, anon_idx
, pgcnt
) == pgcnt
);
6522 for (*ppa
= NULL
, pg_idx
= 0; svp
< evp
; svp
++, anon_idx
++) {
6523 if (!VPP_ISPPLOCK(svp
))
6526 ap
= anon_get_ptr(amp
->ahp
, anon_idx
);
6528 panic("segvn_claim_pages: no anon slot");
6530 swap_xlate(ap
, &vp
, &aoff
);
6531 off
= (u_offset_t
)aoff
;
6534 if ((pp
= page_lookup(vp
,
6535 (u_offset_t
)off
, SE_SHARED
)) == NULL
) {
6536 panic("segvn_claim_pages: no page");
6542 if (ppa
[0] == NULL
) {
6543 kmem_free(ppa
, ppasize
);
6547 ASSERT(pg_idx
<= pgcnt
);
6551 /* Find each large page within ppa, and adjust its claim */
6553 /* Does ppa cover a single large page? */
6554 if (ppa
[0]->p_szc
== seg
->s_szc
) {
6555 if (prot
& PROT_WRITE
)
6556 err
= page_addclaim_pages(ppa
);
6558 err
= page_subclaim_pages(ppa
);
6560 for (i
= 0; ppa
[i
]; i
+= pgcnt
) {
6561 ASSERT(IS_P2ALIGNED(page_pptonum(ppa
[i
]), pgcnt
));
6562 if (prot
& PROT_WRITE
)
6563 err
= page_addclaim_pages(&ppa
[i
]);
6565 err
= page_subclaim_pages(&ppa
[i
]);
6571 for (i
= 0; i
< pg_idx
; i
++) {
6572 ASSERT(ppa
[i
] != NULL
);
6573 page_unlock(ppa
[i
]);
6576 kmem_free(ppa
, ppasize
);
6581 * Returns right (upper address) segment if split occurred.
6582 * If the address is equal to the beginning or end of its segment it returns
6583 * the current segment.
6586 segvn_split_seg(struct seg
*seg
, caddr_t addr
)
6588 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6591 struct segvn_data
*nsvd
;
6593 ASSERT(AS_WRITE_HELD(seg
->s_as
));
6594 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6596 ASSERT(addr
>= seg
->s_base
);
6597 ASSERT(addr
<= seg
->s_base
+ seg
->s_size
);
6598 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6600 if (addr
== seg
->s_base
|| addr
== seg
->s_base
+ seg
->s_size
)
6603 nsize
= seg
->s_base
+ seg
->s_size
- addr
;
6604 seg
->s_size
= addr
- seg
->s_base
;
6605 nseg
= seg_alloc(seg
->s_as
, addr
, nsize
);
6606 ASSERT(nseg
!= NULL
);
6607 nseg
->s_ops
= seg
->s_ops
;
6608 nsvd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
6609 nseg
->s_data
= (void *)nsvd
;
6610 nseg
->s_szc
= seg
->s_szc
;
6612 ASSERT(nsvd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6614 rw_init(&nsvd
->lock
, NULL
, RW_DEFAULT
, NULL
);
6616 if (nsvd
->vp
!= NULL
) {
6618 nsvd
->offset
= svd
->offset
+
6619 (uintptr_t)(nseg
->s_base
- seg
->s_base
);
6620 if (nsvd
->type
== MAP_SHARED
)
6621 lgrp_shm_policy_init(NULL
, nsvd
->vp
);
6624 * The offset for an anonymous segment has no signifigance in
6625 * terms of an offset into a file. If we were to use the above
6626 * calculation instead, the structures read out of
6627 * /proc/<pid>/xmap would be more difficult to decipher since
6628 * it would be unclear whether two seemingly contiguous
6629 * prxmap_t structures represented different segments or a
6630 * single segment that had been split up into multiple prxmap_t
6631 * structures (e.g. if some part of the segment had not yet
6637 ASSERT(svd
->softlockcnt
== 0);
6638 ASSERT(svd
->softlockcnt_sbase
== 0);
6639 ASSERT(svd
->softlockcnt_send
== 0);
6642 if (svd
->vpage
!= NULL
) {
6643 size_t bytes
= vpgtob(seg_pages(seg
));
6644 size_t nbytes
= vpgtob(seg_pages(nseg
));
6645 struct vpage
*ovpage
= svd
->vpage
;
6647 svd
->vpage
= kmem_alloc(bytes
, KM_SLEEP
);
6648 bcopy(ovpage
, svd
->vpage
, bytes
);
6649 nsvd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
6650 bcopy(ovpage
+ seg_pages(seg
), nsvd
->vpage
, nbytes
);
6651 kmem_free(ovpage
, bytes
+ nbytes
);
6653 if (svd
->amp
!= NULL
&& svd
->type
== MAP_PRIVATE
) {
6654 struct anon_map
*oamp
= svd
->amp
, *namp
;
6655 struct anon_hdr
*nahp
;
6657 ANON_LOCK_ENTER(&oamp
->a_rwlock
, RW_WRITER
);
6658 ASSERT(oamp
->refcnt
== 1);
6659 nahp
= anon_create(btop(seg
->s_size
), ANON_SLEEP
);
6660 (void) anon_copy_ptr(oamp
->ahp
, svd
->anon_index
,
6661 nahp
, 0, btop(seg
->s_size
), ANON_SLEEP
);
6663 namp
= anonmap_alloc(nseg
->s_size
, 0, ANON_SLEEP
);
6664 namp
->a_szc
= nseg
->s_szc
;
6665 (void) anon_copy_ptr(oamp
->ahp
,
6666 svd
->anon_index
+ btop(seg
->s_size
),
6667 namp
->ahp
, 0, btop(nseg
->s_size
), ANON_SLEEP
);
6668 anon_release(oamp
->ahp
, btop(oamp
->size
));
6670 oamp
->size
= seg
->s_size
;
6671 svd
->anon_index
= 0;
6673 nsvd
->anon_index
= 0;
6674 ANON_LOCK_EXIT(&oamp
->a_rwlock
);
6675 } else if (svd
->amp
!= NULL
) {
6676 pgcnt_t pgcnt
= page_get_pagecnt(seg
->s_szc
);
6677 ASSERT(svd
->amp
== nsvd
->amp
);
6678 ASSERT(seg
->s_szc
<= svd
->amp
->a_szc
);
6679 nsvd
->anon_index
= svd
->anon_index
+ seg_pages(seg
);
6680 ASSERT(IS_P2ALIGNED(nsvd
->anon_index
, pgcnt
));
6681 ANON_LOCK_ENTER(&svd
->amp
->a_rwlock
, RW_WRITER
);
6683 ANON_LOCK_EXIT(&svd
->amp
->a_rwlock
);
6687 * Split the amount of swap reserved.
6691 * For MAP_NORESERVE, only allocate swap reserve for pages
6692 * being used. Other segments get enough to cover whole
6695 if (svd
->flags
& MAP_NORESERVE
) {
6699 oswresv
= svd
->swresv
;
6700 svd
->swresv
= ptob(anon_pages(svd
->amp
->ahp
,
6701 svd
->anon_index
, btop(seg
->s_size
)));
6702 nsvd
->swresv
= ptob(anon_pages(nsvd
->amp
->ahp
,
6703 nsvd
->anon_index
, btop(nseg
->s_size
)));
6704 ASSERT(oswresv
>= (svd
->swresv
+ nsvd
->swresv
));
6706 if (svd
->pageswap
) {
6707 svd
->swresv
= segvn_count_swap_by_vpages(seg
);
6708 ASSERT(nsvd
->swresv
>= svd
->swresv
);
6709 nsvd
->swresv
-= svd
->swresv
;
6711 ASSERT(svd
->swresv
== seg
->s_size
+
6713 svd
->swresv
= seg
->s_size
;
6714 nsvd
->swresv
= nseg
->s_size
;
6723 * called on memory operations (unmap, setprot, setpagesize) for a subset
6724 * of a large page segment to either demote the memory range (SDR_RANGE)
6725 * or the ends (SDR_END) by addr/len.
6727 * returns 0 on success. returns errno, including ENOMEM, on failure.
6737 caddr_t eaddr
= addr
+ len
;
6738 caddr_t lpgaddr
, lpgeaddr
;
6740 struct seg
*badseg1
= NULL
;
6741 struct seg
*badseg2
= NULL
;
6743 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6745 uint_t szc
= seg
->s_szc
;
6748 ASSERT(AS_WRITE_HELD(seg
->s_as
));
6749 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6751 pgsz
= page_get_pagesize(szc
);
6752 ASSERT(seg
->s_base
!= addr
|| seg
->s_size
!= len
);
6753 ASSERT(addr
>= seg
->s_base
&& eaddr
<= seg
->s_base
+ seg
->s_size
);
6754 ASSERT(svd
->softlockcnt
== 0);
6755 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6756 ASSERT(szcvec
== 0 || (flag
== SDR_END
&& svd
->type
== MAP_SHARED
));
6758 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
6759 ASSERT(flag
== SDR_RANGE
|| eaddr
< lpgeaddr
|| addr
> lpgaddr
);
6760 if (flag
== SDR_RANGE
) {
6761 /* demote entire range */
6762 badseg1
= nseg
= segvn_split_seg(seg
, lpgaddr
);
6763 (void) segvn_split_seg(nseg
, lpgeaddr
);
6764 ASSERT(badseg1
->s_base
== lpgaddr
);
6765 ASSERT(badseg1
->s_size
== lpgeaddr
- lpgaddr
);
6766 } else if (addr
!= lpgaddr
) {
6767 ASSERT(flag
== SDR_END
);
6768 badseg1
= nseg
= segvn_split_seg(seg
, lpgaddr
);
6769 if (eaddr
!= lpgeaddr
&& eaddr
> lpgaddr
+ pgsz
&&
6770 eaddr
< lpgaddr
+ 2 * pgsz
) {
6771 (void) segvn_split_seg(nseg
, lpgeaddr
);
6772 ASSERT(badseg1
->s_base
== lpgaddr
);
6773 ASSERT(badseg1
->s_size
== 2 * pgsz
);
6775 nseg
= segvn_split_seg(nseg
, lpgaddr
+ pgsz
);
6776 ASSERT(badseg1
->s_base
== lpgaddr
);
6777 ASSERT(badseg1
->s_size
== pgsz
);
6778 if (eaddr
!= lpgeaddr
&& eaddr
> lpgaddr
+ pgsz
) {
6779 ASSERT(lpgeaddr
- lpgaddr
> 2 * pgsz
);
6780 nseg
= segvn_split_seg(nseg
, lpgeaddr
- pgsz
);
6782 (void) segvn_split_seg(nseg
, lpgeaddr
);
6783 ASSERT(badseg2
->s_base
== lpgeaddr
- pgsz
);
6784 ASSERT(badseg2
->s_size
== pgsz
);
6788 ASSERT(flag
== SDR_END
);
6789 ASSERT(eaddr
< lpgeaddr
);
6790 badseg1
= nseg
= segvn_split_seg(seg
, lpgeaddr
- pgsz
);
6791 (void) segvn_split_seg(nseg
, lpgeaddr
);
6792 ASSERT(badseg1
->s_base
== lpgeaddr
- pgsz
);
6793 ASSERT(badseg1
->s_size
== pgsz
);
6796 ASSERT(badseg1
!= NULL
);
6797 ASSERT(badseg1
->s_szc
== szc
);
6798 ASSERT(flag
== SDR_RANGE
|| badseg1
->s_size
== pgsz
||
6799 badseg1
->s_size
== 2 * pgsz
);
6800 ASSERT(sameprot(badseg1
, badseg1
->s_base
, pgsz
));
6801 ASSERT(badseg1
->s_size
== pgsz
||
6802 sameprot(badseg1
, badseg1
->s_base
+ pgsz
, pgsz
));
6803 if (err
= segvn_clrszc(badseg1
)) {
6806 ASSERT(badseg1
->s_szc
== 0);
6808 if (szc
> 1 && (tszcvec
= P2PHASE(szcvec
, 1 << szc
)) > 1) {
6809 uint_t tszc
= highbit(tszcvec
) - 1;
6810 caddr_t ta
= MAX(addr
, badseg1
->s_base
);
6812 size_t tpgsz
= page_get_pagesize(tszc
);
6814 ASSERT(svd
->type
== MAP_SHARED
);
6815 ASSERT(flag
== SDR_END
);
6816 ASSERT(tszc
< szc
&& tszc
> 0);
6818 if (eaddr
> badseg1
->s_base
+ badseg1
->s_size
) {
6819 te
= badseg1
->s_base
+ badseg1
->s_size
;
6825 badseg1
->s_szc
= tszc
;
6826 if (!IS_P2ALIGNED(ta
, tpgsz
) || !IS_P2ALIGNED(te
, tpgsz
)) {
6827 if (badseg2
!= NULL
) {
6828 err
= segvn_demote_range(badseg1
, ta
, te
- ta
,
6834 return (segvn_demote_range(badseg1
, ta
,
6835 te
- ta
, SDR_END
, tszcvec
));
6840 if (badseg2
== NULL
)
6842 ASSERT(badseg2
->s_szc
== szc
);
6843 ASSERT(badseg2
->s_size
== pgsz
);
6844 ASSERT(sameprot(badseg2
, badseg2
->s_base
, badseg2
->s_size
));
6845 if (err
= segvn_clrszc(badseg2
)) {
6848 ASSERT(badseg2
->s_szc
== 0);
6850 if (szc
> 1 && (tszcvec
= P2PHASE(szcvec
, 1 << szc
)) > 1) {
6851 uint_t tszc
= highbit(tszcvec
) - 1;
6852 size_t tpgsz
= page_get_pagesize(tszc
);
6854 ASSERT(svd
->type
== MAP_SHARED
);
6855 ASSERT(flag
== SDR_END
);
6856 ASSERT(tszc
< szc
&& tszc
> 0);
6857 ASSERT(badseg2
->s_base
> addr
);
6858 ASSERT(eaddr
> badseg2
->s_base
);
6859 ASSERT(eaddr
< badseg2
->s_base
+ badseg2
->s_size
);
6861 badseg2
->s_szc
= tszc
;
6862 if (!IS_P2ALIGNED(eaddr
, tpgsz
)) {
6863 return (segvn_demote_range(badseg2
, badseg2
->s_base
,
6864 eaddr
- badseg2
->s_base
, SDR_END
, tszcvec
));
6872 segvn_checkprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t prot
)
6874 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6875 struct vpage
*vp
, *evp
;
6877 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6879 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
6881 * If segment protection can be used, simply check against them.
6883 if (svd
->pageprot
== 0) {
6886 err
= ((svd
->prot
& prot
) != prot
) ? EACCES
: 0;
6887 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6892 * Have to check down to the vpage level.
6894 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
6895 for (vp
= &svd
->vpage
[seg_page(seg
, addr
)]; vp
< evp
; vp
++) {
6896 if ((VPP_PROT(vp
) & prot
) != prot
) {
6897 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6901 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6906 segvn_getprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t
*protv
)
6908 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6909 size_t pgno
= seg_page(seg
, addr
+ len
) - seg_page(seg
, addr
) + 1;
6911 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6914 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
6915 if (svd
->pageprot
== 0) {
6917 protv
[--pgno
] = svd
->prot
;
6918 } while (pgno
!= 0);
6920 size_t pgoff
= seg_page(seg
, addr
);
6924 protv
[pgno
] = VPP_PROT(&svd
->vpage
[pgno
+pgoff
]);
6925 } while (pgno
!= 0);
6927 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6933 segvn_getoffset(struct seg
*seg
, caddr_t addr
)
6935 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6937 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6939 return (svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
));
6944 segvn_gettype(struct seg
*seg
, caddr_t addr
)
6946 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6948 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6950 return (svd
->type
| (svd
->flags
& (MAP_NORESERVE
| MAP_TEXT
|
6956 segvn_getvp(struct seg
*seg
, caddr_t addr
, struct vnode
**vpp
)
6958 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6960 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6967 * Check to see if it makes sense to do kluster/read ahead to
6968 * addr + delta relative to the mapping at addr. We assume here
6969 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6971 * For segvn, we currently "approve" of the action if we are
6972 * still in the segment and it maps from the same vp/off,
6973 * or if the advice stored in segvn_data or vpages allows it.
6974 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6977 segvn_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
)
6979 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6980 struct anon
*oap
, *ap
;
6983 struct vnode
*vp1
, *vp2
;
6984 u_offset_t off1
, off2
;
6985 struct anon_map
*amp
;
6987 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6988 ASSERT(AS_WRITE_HELD(seg
->s_as
) ||
6989 SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
6991 if (addr
+ delta
< seg
->s_base
||
6992 addr
+ delta
>= (seg
->s_base
+ seg
->s_size
))
6993 return (-1); /* exceeded segment bounds */
6995 pd
= delta
/ (ssize_t
)PAGESIZE
; /* divide to preserve sign bit */
6996 page
= seg_page(seg
, addr
);
6999 * Check to see if either of the pages addr or addr + delta
7000 * have advice set that prevents klustering (if MADV_RANDOM advice
7001 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
7004 if (svd
->advice
== MADV_RANDOM
||
7005 svd
->advice
== MADV_SEQUENTIAL
&& delta
< 0)
7007 else if (svd
->pageadvice
&& svd
->vpage
) {
7008 struct vpage
*bvpp
, *evpp
;
7010 bvpp
= &svd
->vpage
[page
];
7011 evpp
= &svd
->vpage
[page
+ pd
];
7012 if (VPP_ADVICE(bvpp
) == MADV_RANDOM
||
7013 VPP_ADVICE(evpp
) == MADV_SEQUENTIAL
&& delta
< 0)
7015 if (VPP_ADVICE(bvpp
) != VPP_ADVICE(evpp
) &&
7016 VPP_ADVICE(evpp
) == MADV_RANDOM
)
7020 if (svd
->type
== MAP_SHARED
)
7021 return (0); /* shared mapping - all ok */
7023 if ((amp
= svd
->amp
) == NULL
)
7024 return (0); /* off original vnode */
7026 page
+= svd
->anon_index
;
7028 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7030 oap
= anon_get_ptr(amp
->ahp
, page
);
7031 ap
= anon_get_ptr(amp
->ahp
, page
+ pd
);
7033 ANON_LOCK_EXIT(&
->a_rwlock
);
7035 if ((oap
== NULL
&& ap
!= NULL
) || (oap
!= NULL
&& ap
== NULL
)) {
7036 return (-1); /* one with and one without an anon */
7039 if (oap
== NULL
) { /* implies that ap == NULL */
7040 return (0); /* off original vnode */
7044 * Now we know we have two anon pointers - check to
7045 * see if they happen to be properly allocated.
7049 * XXX We cheat here and don't lock the anon slots. We can't because
7050 * we may have been called from the anon layer which might already
7051 * have locked them. We are holding a refcnt on the slots so they
7052 * can't disappear. The worst that will happen is we'll get the wrong
7053 * names (vp, off) for the slots and make a poor klustering decision.
7055 swap_xlate(ap
, &vp1
, &off1
);
7056 swap_xlate(oap
, &vp2
, &off2
);
7059 if (!VOP_CMP(vp1
, vp2
, NULL
) || off1
- off2
!= delta
)
7065 * Swap the pages of seg out to secondary storage, returning the
7066 * number of bytes of storage freed.
7068 * The basic idea is first to unload all translations and then to call
7069 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the
7070 * swap device. Pages to which other segments have mappings will remain
7071 * mapped and won't be swapped. Our caller (as_swapout) has already
7072 * performed the unloading step.
7074 * The value returned is intended to correlate well with the process's
7075 * memory requirements. However, there are some caveats:
7076 * 1) When given a shared segment as argument, this routine will
7077 * only succeed in swapping out pages for the last sharer of the
7078 * segment. (Previous callers will only have decremented mapping
7079 * reference counts.)
7080 * 2) We assume that the hat layer maintains a large enough translation
7081 * cache to capture process reference patterns.
7084 segvn_swapout(struct seg
*seg
)
7086 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7087 struct anon_map
*amp
;
7093 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7095 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
7097 * Find pages unmapped by our caller and force them
7098 * out to the virtual swap device.
7100 if ((amp
= svd
->amp
) != NULL
)
7101 anon_index
= svd
->anon_index
;
7102 npages
= seg
->s_size
>> PAGESHIFT
;
7103 for (page
= 0; page
< npages
; page
++) {
7108 anon_sync_obj_t cookie
;
7111 * Obtain <vp, off> pair for the page, then look it up.
7113 * Note that this code is willing to consider regular
7114 * pages as well as anon pages. Is this appropriate here?
7118 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7119 if (anon_array_try_enter(amp
, anon_index
+ page
,
7121 ANON_LOCK_EXIT(&
->a_rwlock
);
7124 ap
= anon_get_ptr(amp
->ahp
, anon_index
+ page
);
7126 swap_xlate(ap
, &vp
, &off
);
7129 off
= svd
->offset
+ ptob(page
);
7131 anon_array_exit(&cookie
);
7132 ANON_LOCK_EXIT(&
->a_rwlock
);
7135 off
= svd
->offset
+ ptob(page
);
7137 if (vp
== NULL
) { /* untouched zfod page */
7142 pp
= page_lookup_nowait(vp
, off
, SE_SHARED
);
7148 * Examine the page to see whether it can be tossed out,
7149 * keeping track of how many we've found.
7151 if (!page_tryupgrade(pp
)) {
7153 * If the page has an i/o lock and no mappings,
7154 * it's very likely that the page is being
7155 * written out as a result of klustering.
7156 * Assume this is so and take credit for it here.
7158 if (!page_io_trylock(pp
)) {
7159 if (!hat_page_is_mapped(pp
))
7167 ASSERT(!page_iolock_assert(pp
));
7171 * Skip if page is locked or has mappings.
7172 * We don't need the page_struct_lock to look at lckcnt
7173 * and cowcnt because the page is exclusive locked.
7175 if (pp
->p_lckcnt
!= 0 || pp
->p_cowcnt
!= 0 ||
7176 hat_page_is_mapped(pp
)) {
7182 * dispose skips large pages so try to demote first.
7184 if (pp
->p_szc
!= 0 && !page_try_demote_pages(pp
)) {
7187 * XXX should skip the remaining page_t's of this
7193 ASSERT(pp
->p_szc
== 0);
7196 * No longer mapped -- we can toss it out. How
7197 * we do so depends on whether or not it's dirty.
7199 if (hat_ismod(pp
) && pp
->p_vnode
) {
7201 * We must clean the page before it can be
7202 * freed. Setting B_FREE will cause pvn_done
7203 * to free the page when the i/o completes.
7204 * XXX: This also causes it to be accounted
7205 * as a pageout instead of a swap: need
7206 * B_SWAPOUT bit to use instead of B_FREE.
7208 * Hold the vnode before releasing the page lock
7209 * to prevent it from being freed and re-used by
7210 * some other thread.
7216 * Queue all i/o requests for the pageout thread
7217 * to avoid saturating the pageout devices.
7219 if (!queue_io_request(vp
, off
))
7223 * The page was clean, free it.
7225 * XXX: Can we ever encounter modified pages
7226 * with no associated vnode here?
7228 ASSERT(pp
->p_vnode
!= NULL
);
7229 /*LINTED: constant in conditional context*/
7230 VN_DISPOSE(pp
, B_FREE
, 0, kcred
);
7234 * Credit now even if i/o is in progress.
7238 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7241 * Wakeup pageout to initiate i/o on all queued requests.
7243 cv_signal_pageout();
7244 return (ptob(pgcnt
));
7248 * Synchronize primary storage cache with real object in virtual memory.
7250 * XXX - Anonymous pages should not be sync'ed out at all.
7253 segvn_sync(struct seg
*seg
, caddr_t addr
, size_t len
, int attr
, uint_t flags
)
7255 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7267 ulong_t anon_index
= 0;
7268 struct anon_map
*amp
;
7270 anon_sync_obj_t cookie
;
7272 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7274 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
7276 if (svd
->softlockcnt
> 0) {
7278 * If this is shared segment non 0 softlockcnt
7279 * means locked pages are still in use.
7281 if (svd
->type
== MAP_SHARED
) {
7282 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7287 * flush all pages from seg cache
7288 * otherwise we may deadlock in swap_putpage
7289 * for B_INVAL page (4175402).
7291 * Even if we grab segvn WRITER's lock
7292 * here, there might be another thread which could've
7293 * successfully performed lookup/insert just before
7294 * we acquired the lock here. So, grabbing either
7295 * lock here is of not much use. Until we devise
7296 * a strategy at upper layers to solve the
7297 * synchronization issues completely, we expect
7298 * applications to handle this appropriately.
7301 if (svd
->softlockcnt
> 0) {
7302 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7305 } else if (svd
->type
== MAP_SHARED
&& svd
->amp
!= NULL
&&
7306 svd
->amp
->a_softlockcnt
> 0) {
7308 * Try to purge this amp's entries from pcache. It will
7309 * succeed only if other segments that share the amp have no
7310 * outstanding softlock's.
7313 if (svd
->amp
->a_softlockcnt
> 0 || svd
->softlockcnt
> 0) {
7314 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7320 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
7321 bflags
= ((flags
& MS_ASYNC
) ? B_ASYNC
: 0) |
7322 ((flags
& MS_INVALIDATE
) ? B_INVAL
: 0);
7325 pageprot
= attr
& ~(SHARED
|PRIVATE
);
7326 segtype
= (attr
& SHARED
) ? MAP_SHARED
: MAP_PRIVATE
;
7329 * We are done if the segment types don't match
7330 * or if we have segment level protections and
7333 if (svd
->type
!= segtype
) {
7334 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7338 if (svd
->prot
!= pageprot
) {
7339 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7344 vpp
= &svd
->vpage
[seg_page(seg
, addr
)];
7346 } else if (svd
->vp
&& svd
->amp
== NULL
&&
7347 (flags
& MS_INVALIDATE
) == 0) {
7350 * No attributes, no anonymous pages and MS_INVALIDATE flag
7351 * is not on, just use one big request.
7353 err
= VOP_PUTPAGE(svd
->vp
, (offset_t
)offset
, len
,
7354 bflags
, svd
->cred
, NULL
);
7355 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7359 if ((amp
= svd
->amp
) != NULL
)
7360 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
7362 for (eaddr
= addr
+ len
; addr
< eaddr
; addr
+= PAGESIZE
) {
7365 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7366 anon_array_enter(amp
, anon_index
, &cookie
);
7367 ap
= anon_get_ptr(amp
->ahp
, anon_index
++);
7369 swap_xlate(ap
, &vp
, &off
);
7374 anon_array_exit(&cookie
);
7375 ANON_LOCK_EXIT(&
->a_rwlock
);
7382 if (vp
== NULL
) /* untouched zfod page */
7387 prot
= VPP_PROT(vpp
);
7390 if (prot
!= pageprot
) {
7396 * See if any of these pages are locked -- if so, then we
7397 * will have to truncate an invalidate request at the first
7398 * locked one. We don't need the page_struct_lock to test
7399 * as this is only advisory; even if we acquire it someone
7400 * might race in and lock the page after we unlock and before
7401 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7403 if (flags
& MS_INVALIDATE
) {
7404 if ((pp
= page_lookup(vp
, off
, SE_SHARED
)) != NULL
) {
7405 if (pp
->p_lckcnt
!= 0 || pp
->p_cowcnt
!= 0) {
7407 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7410 if (ap
!= NULL
&& pp
->p_szc
!= 0 &&
7411 page_tryupgrade(pp
)) {
7412 if (pp
->p_lckcnt
== 0 &&
7413 pp
->p_cowcnt
== 0) {
7415 * swapfs VN_DISPOSE() won't
7416 * invalidate large pages.
7417 * Attempt to demote.
7418 * XXX can't help it if it
7419 * fails. But for swapfs
7420 * pages it is no big deal.
7422 (void) page_try_demote_pages(
7428 } else if (svd
->type
== MAP_SHARED
&& amp
!= NULL
) {
7430 * Avoid writing out to disk ISM's large pages
7431 * because segspt_free_pages() relies on NULL an_pvp
7432 * of anon slots of such pages.
7435 ASSERT(svd
->vp
== NULL
);
7437 * swapfs uses page_lookup_nowait if not freeing or
7438 * invalidating and skips a page if
7439 * page_lookup_nowait returns NULL.
7441 pp
= page_lookup_nowait(vp
, off
, SE_SHARED
);
7445 if (pp
->p_szc
!= 0) {
7451 * Note ISM pages are created large so (vp, off)'s
7452 * page cannot suddenly become large after we unlock
7458 * XXX - Should ultimately try to kluster
7459 * calls to VOP_PUTPAGE() for performance.
7462 err
= VOP_PUTPAGE(vp
, (offset_t
)off
, PAGESIZE
,
7463 (bflags
| (IS_SWAPFSVP(vp
) ? B_PAGE_NOWAIT
: 0)),
7470 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7475 * Determine if we have data corresponding to pages in the
7476 * primary storage virtual memory cache (i.e., "in core").
7479 segvn_incore(struct seg
*seg
, caddr_t addr
, size_t len
, char *vec
)
7481 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7482 struct vnode
*vp
, *avp
;
7483 u_offset_t offset
, aoffset
;
7489 struct anon_map
*amp
; /* XXX - for locknest */
7492 anon_sync_obj_t cookie
;
7494 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7496 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
7497 if (svd
->amp
== NULL
&& svd
->vp
== NULL
) {
7498 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7499 bzero(vec
, btopr(len
));
7500 return (len
); /* no anonymous pages created yet */
7503 p
= seg_page(seg
, addr
);
7504 ep
= seg_page(seg
, addr
+ len
);
7505 start
= svd
->vp
? SEG_PAGE_VNODEBACKED
: 0;
7508 for (; p
< ep
; p
++, addr
+= PAGESIZE
) {
7509 vpp
= (svd
->vpage
) ? &svd
->vpage
[p
]: NULL
;
7513 /* Grab the vnode/offset for the anon slot */
7515 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7516 anon_array_enter(amp
, svd
->anon_index
+ p
, &cookie
);
7517 ap
= anon_get_ptr(amp
->ahp
, svd
->anon_index
+ p
);
7519 swap_xlate(ap
, &avp
, &aoffset
);
7521 anon_array_exit(&cookie
);
7522 ANON_LOCK_EXIT(&
->a_rwlock
);
7524 if ((avp
!= NULL
) && page_exists(avp
, aoffset
)) {
7525 /* A page exists for the anon slot */
7526 ret
|= SEG_PAGE_INCORE
;
7529 * If page is mapped and writable
7532 if ((hat_getattr(seg
->s_as
->a_hat
, addr
,
7533 &attr
) != -1) && (attr
& PROT_WRITE
)) {
7534 ret
|= SEG_PAGE_ANON
;
7537 * Don't get page_struct lock for lckcnt and cowcnt,
7538 * since this is purely advisory.
7540 if ((pp
= page_lookup_nowait(avp
, aoffset
,
7541 SE_SHARED
)) != NULL
) {
7543 ret
|= SEG_PAGE_SOFTLOCK
;
7545 ret
|= SEG_PAGE_HASCOW
;
7550 /* Gather vnode statistics */
7552 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
7556 * Try to obtain a "shared" lock on the page
7557 * without blocking. If this fails, determine
7558 * if the page is in memory.
7560 pp
= page_lookup_nowait(vp
, offset
, SE_SHARED
);
7561 if ((pp
== NULL
) && (page_exists(vp
, offset
))) {
7562 /* Page is incore, and is named */
7563 ret
|= (SEG_PAGE_INCORE
| SEG_PAGE_VNODE
);
7566 * Don't get page_struct lock for lckcnt and cowcnt,
7567 * since this is purely advisory.
7570 ret
|= (SEG_PAGE_INCORE
| SEG_PAGE_VNODE
);
7572 ret
|= SEG_PAGE_SOFTLOCK
;
7574 ret
|= SEG_PAGE_HASCOW
;
7579 /* Gather virtual page information */
7581 if (VPP_ISPPLOCK(vpp
))
7582 ret
|= SEG_PAGE_LOCKED
;
7588 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7593 * Statement for p_cowcnts/p_lckcnts.
7595 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7596 * irrespective of the following factors or anything else:
7598 * (1) anon slots are populated or not
7599 * (2) cow is broken or not
7600 * (3) refcnt on ap is 1 or greater than 1
7602 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7606 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7608 * if vpage has PROT_WRITE
7609 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7611 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7613 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7614 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7616 * We may also break COW if softlocking on read access in the physio case.
7617 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7618 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7619 * vpage doesn't have PROT_WRITE.
7622 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7624 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7625 * increment p_lckcnt by calling page_subclaim() which takes care of
7626 * availrmem accounting and p_lckcnt overflow.
7628 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7629 * increment p_cowcnt by calling page_addclaim() which takes care of
7630 * availrmem availability and p_cowcnt overflow.
7634 * Lock down (or unlock) pages mapped by this segment.
7636 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7637 * At fault time they will be relocated into larger pages.
7640 segvn_lockop(struct seg
*seg
, caddr_t addr
, size_t len
,
7641 int attr
, int op
, ulong_t
*lockmap
, size_t pos
)
7643 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7653 ulong_t anon_index
= 0;
7654 struct anon_map
*amp
;
7657 anon_sync_obj_t cookie
;
7658 struct kshmid
*sp
= NULL
;
7659 struct proc
*p
= curproc
;
7660 kproject_t
*proj
= NULL
;
7662 size_t locked_bytes
= 0;
7663 size_t unlocked_bytes
= 0;
7667 * Hold write lock on address space because may split or concatenate
7670 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7673 * If this is a shm, use shm's project and zone, else use
7674 * project and zone of calling process
7677 /* Determine if this segment backs a sysV shm */
7678 if (svd
->amp
!= NULL
&& svd
->amp
->a_sp
!= NULL
) {
7679 ASSERT(svd
->type
== MAP_SHARED
);
7680 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
7681 sp
= svd
->amp
->a_sp
;
7682 proj
= sp
->shm_perm
.ipc_proj
;
7686 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
7688 pageprot
= attr
& ~(SHARED
|PRIVATE
);
7689 segtype
= attr
& SHARED
? MAP_SHARED
: MAP_PRIVATE
;
7692 * We are done if the segment types don't match
7693 * or if we have segment level protections and
7696 if (svd
->type
!= segtype
) {
7697 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7700 if (svd
->pageprot
== 0 && svd
->prot
!= pageprot
) {
7701 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7706 if (op
== MC_LOCK
) {
7707 if (svd
->tr_state
== SEGVN_TR_INIT
) {
7708 svd
->tr_state
= SEGVN_TR_OFF
;
7709 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
7710 ASSERT(svd
->amp
!= NULL
);
7711 segvn_textunrepl(seg
, 0);
7712 ASSERT(svd
->amp
== NULL
&&
7713 svd
->tr_state
== SEGVN_TR_OFF
);
7718 * If we're locking, then we must create a vpage structure if
7719 * none exists. If we're unlocking, then check to see if there
7720 * is a vpage -- if not, then we could not have locked anything.
7723 if ((vpp
= svd
->vpage
) == NULL
) {
7724 if (op
== MC_LOCK
) {
7726 if (svd
->vpage
== NULL
) {
7727 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7731 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7737 * The anonymous data vector (i.e., previously
7738 * unreferenced mapping to swap space) can be allocated
7739 * by lazily testing for its existence.
7741 if (op
== MC_LOCK
&& svd
->amp
== NULL
&& svd
->vp
== NULL
) {
7742 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
7743 svd
->amp
= anonmap_alloc(seg
->s_size
, 0, ANON_SLEEP
);
7744 svd
->amp
->a_szc
= seg
->s_szc
;
7747 if ((amp
= svd
->amp
) != NULL
) {
7748 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
7751 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
7752 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
7755 mutex_enter(&sp
->shm_mlock
);
7757 /* determine number of unlocked bytes in range for lock operation */
7758 if (op
== MC_LOCK
) {
7761 for (vpp
= &svd
->vpage
[seg_page(seg
, addr
)]; vpp
< evp
;
7763 if (!VPP_ISPPLOCK(vpp
))
7764 unlocked_bytes
+= PAGESIZE
;
7767 ulong_t i_idx
, i_edx
;
7768 anon_sync_obj_t i_cookie
;
7773 /* Only count sysV pages once for locked memory */
7774 i_edx
= svd
->anon_index
+ seg_page(seg
, addr
+ len
);
7775 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7776 for (i_idx
= anon_index
; i_idx
< i_edx
; i_idx
++) {
7777 anon_array_enter(amp
, i_idx
, &i_cookie
);
7778 i_ap
= anon_get_ptr(amp
->ahp
, i_idx
);
7780 unlocked_bytes
+= PAGESIZE
;
7781 anon_array_exit(&i_cookie
);
7784 swap_xlate(i_ap
, &i_vp
, &i_off
);
7785 anon_array_exit(&i_cookie
);
7786 pp
= page_lookup(i_vp
, i_off
, SE_SHARED
);
7788 unlocked_bytes
+= PAGESIZE
;
7790 } else if (pp
->p_lckcnt
== 0)
7791 unlocked_bytes
+= PAGESIZE
;
7794 ANON_LOCK_EXIT(&
->a_rwlock
);
7797 mutex_enter(&p
->p_lock
);
7798 err
= rctl_incr_locked_mem(p
, proj
, unlocked_bytes
,
7800 mutex_exit(&p
->p_lock
);
7804 mutex_exit(&sp
->shm_mlock
);
7805 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7810 * Loop over all pages in the range. Process if we're locking and
7811 * page has not already been locked in this mapping; or if we're
7812 * unlocking and the page has been locked.
7814 for (vpp
= &svd
->vpage
[seg_page(seg
, addr
)]; vpp
< evp
;
7815 vpp
++, pos
++, addr
+= PAGESIZE
, offset
+= PAGESIZE
, anon_index
++) {
7816 if ((attr
== 0 || VPP_PROT(vpp
) == pageprot
) &&
7817 ((op
== MC_LOCK
&& !VPP_ISPPLOCK(vpp
)) ||
7818 (op
== MC_UNLOCK
&& VPP_ISPPLOCK(vpp
)))) {
7821 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7823 * If this isn't a MAP_NORESERVE segment and
7824 * we're locking, allocate anon slots if they
7825 * don't exist. The page is brought in later on.
7827 if (op
== MC_LOCK
&& svd
->vp
== NULL
&&
7828 ((svd
->flags
& MAP_NORESERVE
) == 0) &&
7830 ((ap
= anon_get_ptr(amp
->ahp
, anon_index
))
7832 anon_array_enter(amp
, anon_index
, &cookie
);
7834 if ((ap
= anon_get_ptr(amp
->ahp
,
7835 anon_index
)) == NULL
) {
7836 pp
= anon_zero(seg
, addr
, &ap
,
7839 anon_array_exit(&cookie
);
7840 ANON_LOCK_EXIT(&
->a_rwlock
);
7844 ASSERT(anon_get_ptr(amp
->ahp
,
7845 anon_index
) == NULL
);
7846 (void) anon_set_ptr(amp
->ahp
,
7847 anon_index
, ap
, ANON_SLEEP
);
7850 anon_array_exit(&cookie
);
7854 * Get name for page, accounting for
7855 * existence of private copy.
7859 anon_array_enter(amp
, anon_index
, &cookie
);
7860 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
7862 swap_xlate(ap
, &vp
, &off
);
7864 if (svd
->vp
== NULL
&&
7865 (svd
->flags
& MAP_NORESERVE
)) {
7866 anon_array_exit(&cookie
);
7867 ANON_LOCK_EXIT(&
->a_rwlock
);
7873 if (op
!= MC_LOCK
|| ap
== NULL
) {
7874 anon_array_exit(&cookie
);
7875 ANON_LOCK_EXIT(&
->a_rwlock
);
7883 * Get page frame. It's ok if the page is
7884 * not available when we're unlocking, as this
7885 * may simply mean that a page we locked got
7886 * truncated out of existence after we locked it.
7888 * Invoke VOP_GETPAGE() to obtain the page struct
7889 * since we may need to read it from disk if its
7893 pp
= page_lookup(vp
, off
, SE_SHARED
);
7900 error
= VOP_GETPAGE(vp
, (offset_t
)off
, PAGESIZE
,
7901 (uint_t
*)NULL
, pl
, PAGESIZE
, seg
, addr
,
7902 S_OTHER
, svd
->cred
, NULL
);
7904 if (error
&& ap
!= NULL
) {
7905 anon_array_exit(&cookie
);
7906 ANON_LOCK_EXIT(&
->a_rwlock
);
7910 * If the error is EDEADLK then we must bounce
7911 * up and drop all vm subsystem locks and then
7912 * retry the operation later
7913 * This behavior is a temporary measure because
7914 * ufs/sds logging is badly designed and will
7915 * deadlock if we don't allow this bounce to
7916 * happen. The real solution is to re-design
7917 * the logging code to work properly. See bug
7918 * 4125102 for details of the problem.
7920 if (error
== EDEADLK
) {
7925 * Quit if we fail to fault in the page. Treat
7926 * the failure as an error, unless the addr
7927 * is mapped beyond the end of a file.
7929 if (error
&& svd
->vp
) {
7930 va
.va_mask
= AT_SIZE
;
7931 if (VOP_GETATTR(svd
->vp
, &va
, 0,
7932 svd
->cred
, NULL
) != 0) {
7936 if (btopr(va
.va_size
) >=
7952 * See Statement at the beginning of this routine.
7954 * claim is always set if MAP_PRIVATE and PROT_WRITE
7955 * irrespective of following factors:
7957 * (1) anon slots are populated or not
7958 * (2) cow is broken or not
7959 * (3) refcnt on ap is 1 or greater than 1
7961 * See 4140683 for details
7963 claim
= ((VPP_PROT(vpp
) & PROT_WRITE
) &&
7964 (svd
->type
== MAP_PRIVATE
));
7967 * Perform page-level operation appropriate to
7968 * operation. If locking, undo the SOFTLOCK
7969 * performed to bring the page into memory
7970 * after setting the lock. If unlocking,
7971 * and no page was found, account for the claim
7974 if (op
== MC_LOCK
) {
7975 int ret
= 1; /* Assume success */
7977 ASSERT(!VPP_ISPPLOCK(vpp
));
7979 ret
= page_pp_lock(pp
, claim
, 0);
7981 if (ap
->an_pvp
!= NULL
) {
7982 anon_swap_free(ap
, pp
);
7984 anon_array_exit(&cookie
);
7985 ANON_LOCK_EXIT(&
->a_rwlock
);
7988 /* locking page failed */
7995 if (pp
->p_lckcnt
== 1)
7996 locked_bytes
+= PAGESIZE
;
7998 locked_bytes
+= PAGESIZE
;
8000 if (lockmap
!= (ulong_t
*)NULL
)
8001 BT_SET(lockmap
, pos
);
8005 ASSERT(VPP_ISPPLOCK(vpp
));
8007 /* sysV pages should be locked */
8008 ASSERT(sp
== NULL
|| pp
->p_lckcnt
> 0);
8009 page_pp_unlock(pp
, claim
, 0);
8011 if (pp
->p_lckcnt
== 0)
8015 unlocked_bytes
+= PAGESIZE
;
8019 unlocked_bytes
+= PAGESIZE
;
8026 if (op
== MC_LOCK
) {
8027 /* Credit back bytes that did not get locked */
8028 if ((unlocked_bytes
- locked_bytes
) > 0) {
8030 mutex_enter(&p
->p_lock
);
8031 rctl_decr_locked_mem(p
, proj
,
8032 (unlocked_bytes
- locked_bytes
), chargeproc
);
8034 mutex_exit(&p
->p_lock
);
8038 /* Account bytes that were unlocked */
8039 if (unlocked_bytes
> 0) {
8041 mutex_enter(&p
->p_lock
);
8042 rctl_decr_locked_mem(p
, proj
, unlocked_bytes
,
8045 mutex_exit(&p
->p_lock
);
8049 mutex_exit(&sp
->shm_mlock
);
8050 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8056 * Set advice from user for specified pages
8057 * There are 10 types of advice:
8058 * MADV_NORMAL - Normal (default) behavior (whatever that is)
8059 * MADV_RANDOM - Random page references
8060 * do not allow readahead or 'klustering'
8061 * MADV_SEQUENTIAL - Sequential page references
8062 * Pages previous to the one currently being
8063 * accessed (determined by fault) are 'not needed'
8064 * and are freed immediately
8065 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
8066 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
8067 * MADV_FREE - Contents can be discarded
8068 * MADV_ACCESS_DEFAULT- Default access
8069 * MADV_ACCESS_LWP - Next LWP will access heavily
8070 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
8071 * MADV_PURGE - Contents will be immediately discarded
8074 segvn_advise(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t behav
)
8076 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8080 struct anon_map
*amp
;
8083 lgrp_mem_policy_t policy
;
8087 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
8090 * In case of MADV_FREE/MADV_PURGE, we won't be modifying any segment
8091 * private data structures; so, we only need to grab READER's lock
8093 if (behav
!= MADV_FREE
&& behav
!= MADV_PURGE
) {
8094 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
8095 if (svd
->tr_state
!= SEGVN_TR_OFF
) {
8096 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8100 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
8104 * Large pages are assumed to be only turned on when accesses to the
8105 * segment's address range have spatial and temporal locality. That
8106 * justifies ignoring MADV_SEQUENTIAL for large page segments.
8107 * Also, ignore advice affecting lgroup memory allocation
8108 * if don't need to do lgroup optimizations on this system
8111 if ((behav
== MADV_SEQUENTIAL
&&
8112 (seg
->s_szc
!= 0 || HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
))) ||
8113 (!lgrp_optimizations() && (behav
== MADV_ACCESS_DEFAULT
||
8114 behav
== MADV_ACCESS_LWP
|| behav
== MADV_ACCESS_MANY
))) {
8115 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8119 if (behav
== MADV_SEQUENTIAL
|| behav
== MADV_ACCESS_DEFAULT
||
8120 behav
== MADV_ACCESS_LWP
|| behav
== MADV_ACCESS_MANY
) {
8122 * Since we are going to unload hat mappings
8123 * we first have to flush the cache. Otherwise
8124 * this might lead to system panic if another
8125 * thread is doing physio on the range whose
8126 * mappings are unloaded by madvise(3C).
8128 if (svd
->softlockcnt
> 0) {
8130 * If this is shared segment non 0 softlockcnt
8131 * means locked pages are still in use.
8133 if (svd
->type
== MAP_SHARED
) {
8134 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8138 * Since we do have the segvn writers lock
8139 * nobody can fill the cache with entries
8140 * belonging to this seg during the purge.
8141 * The flush either succeeds or we still
8142 * have pending I/Os. In the later case,
8143 * madvise(3C) fails.
8146 if (svd
->softlockcnt
> 0) {
8148 * Since madvise(3C) is advisory and
8149 * it's not part of UNIX98, madvise(3C)
8150 * failure here doesn't cause any hardship.
8151 * Note that we don't block in "as" layer.
8153 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8156 } else if (svd
->type
== MAP_SHARED
&& svd
->amp
!= NULL
&&
8157 svd
->amp
->a_softlockcnt
> 0) {
8159 * Try to purge this amp's entries from pcache. It
8160 * will succeed only if other segments that share the
8161 * amp have no outstanding softlock's.
8169 if (behav
== MADV_FREE
|| behav
== MADV_PURGE
) {
8172 if (behav
== MADV_FREE
&& (vp
!= NULL
|| amp
== NULL
)) {
8174 * MADV_FREE is not supported for segments with an
8175 * underlying object; if anonmap is NULL, anon slots
8176 * are not yet populated and there is nothing for us
8177 * to do. As MADV_FREE is advisory, we don't return an
8178 * error in either case.
8180 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8186 * If we're here with a NULL anonmap, it's because we
8187 * are doing a MADV_PURGE. We have nothing to do, but
8188 * because MADV_PURGE isn't merely advisory, we return
8189 * an error in this case.
8191 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8197 page
= seg_page(seg
, addr
);
8198 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
8199 err
= anon_disclaim(amp
,
8200 svd
->anon_index
+ page
, len
, behav
, &purged
);
8202 if (purged
!= 0 && (svd
->flags
& MAP_NORESERVE
)) {
8204 * If we purged pages on a MAP_NORESERVE mapping, we
8205 * need to be sure to now unreserve our reserved swap.
8206 * (We use the atomic operations to manipulate our
8207 * segment and address space counters because we only
8208 * have the corresponding locks held as reader, not
8211 ssize_t bytes
= ptob(purged
);
8213 anon_unresv_zone(bytes
, seg
->s_as
->a_proc
->p_zone
);
8214 atomic_add_long(&svd
->swresv
, -bytes
);
8215 atomic_add_long(&seg
->s_as
->a_resvsize
, -bytes
);
8218 ANON_LOCK_EXIT(&
->a_rwlock
);
8219 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8222 * MADV_PURGE and MADV_FREE differ in their return semantics:
8223 * because MADV_PURGE is designed to be bug-for-bug compatible
8224 * with its clumsy Linux forebear, it will fail where MADV_FREE
8227 return (behav
== MADV_PURGE
? err
: 0);
8231 * If advice is to be applied to entire segment,
8232 * use advice field in seg_data structure
8233 * otherwise use appropriate vpage entry.
8235 if ((addr
== seg
->s_base
) && (len
== seg
->s_size
)) {
8237 case MADV_ACCESS_LWP
:
8238 case MADV_ACCESS_MANY
:
8239 case MADV_ACCESS_DEFAULT
:
8241 * Set memory allocation policy for this segment
8243 policy
= lgrp_madv_to_policy(behav
, len
, svd
->type
);
8244 if (svd
->type
== MAP_SHARED
)
8245 already_set
= lgrp_shm_policy_set(policy
, amp
,
8246 svd
->anon_index
, vp
, svd
->offset
, len
);
8249 * For private memory, need writers lock on
8250 * address space because the segment may be
8251 * split or concatenated when changing policy
8253 if (AS_READ_HELD(seg
->s_as
)) {
8254 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8258 already_set
= lgrp_privm_policy_set(policy
,
8259 &svd
->policy_info
, len
);
8263 * If policy set already and it shouldn't be reapplied,
8264 * don't do anything.
8267 !LGRP_MEM_POLICY_REAPPLICABLE(policy
))
8271 * Mark any existing pages in given range for
8274 page_mark_migrate(seg
, addr
, len
, amp
, svd
->anon_index
,
8275 vp
, svd
->offset
, 1);
8278 * If same policy set already or this is a shared
8279 * memory segment, don't need to try to concatenate
8280 * segment with adjacent ones.
8282 if (already_set
|| svd
->type
== MAP_SHARED
)
8286 * Try to concatenate this segment with previous
8287 * one and next one, since we changed policy for
8288 * this one and it may be compatible with adjacent
8291 prev
= AS_SEGPREV(seg
->s_as
, seg
);
8292 next
= AS_SEGNEXT(seg
->s_as
, seg
);
8294 if (next
&& next
->s_ops
== &segvn_ops
&&
8295 addr
+ len
== next
->s_base
)
8296 (void) segvn_concat(seg
, next
, 1);
8298 if (prev
&& prev
->s_ops
== &segvn_ops
&&
8299 addr
== prev
->s_base
+ prev
->s_size
) {
8301 * Drop lock for private data of current
8302 * segment before concatenating (deleting) it
8303 * and return IE_REATTACH to tell as_ctl() that
8304 * current segment has changed
8306 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8307 if (!segvn_concat(prev
, seg
, 1))
8314 case MADV_SEQUENTIAL
:
8316 * unloading mapping guarantees
8317 * detection in segvn_fault
8319 ASSERT(seg
->s_szc
== 0);
8320 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
8321 hat_unload(seg
->s_as
->a_hat
, addr
, len
,
8326 svd
->advice
= (uchar_t
)behav
;
8327 svd
->pageadvice
= 0;
8329 case MADV_WILLNEED
: /* handled in memcntl */
8330 case MADV_DONTNEED
: /* handled in memcntl */
8331 case MADV_FREE
: /* handled above */
8332 case MADV_PURGE
: /* handled above */
8339 struct seg
*new_seg
;
8340 struct segvn_data
*new_svd
= NULL
;
8344 page
= seg_page(seg
, addr
);
8347 if (svd
->vpage
== NULL
) {
8348 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8353 struct vpage
*bvpp
, *evpp
;
8355 case MADV_ACCESS_LWP
:
8356 case MADV_ACCESS_MANY
:
8357 case MADV_ACCESS_DEFAULT
:
8359 * Set memory allocation policy for portion of this
8364 * Align address and length of advice to page
8365 * boundaries for large pages
8367 if (seg
->s_szc
!= 0) {
8370 pgsz
= page_get_pagesize(seg
->s_szc
);
8371 addr
= (caddr_t
)P2ALIGN((uintptr_t)addr
, pgsz
);
8372 len
= P2ROUNDUP(len
, pgsz
);
8376 * Check to see whether policy is set already
8378 policy
= lgrp_madv_to_policy(behav
, len
, svd
->type
);
8380 anon_index
= svd
->anon_index
+ page
;
8381 off
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
8383 if (svd
->type
== MAP_SHARED
)
8384 already_set
= lgrp_shm_policy_set(policy
, amp
,
8385 anon_index
, vp
, off
, len
);
8388 (policy
== svd
->policy_info
.mem_policy
);
8391 * If policy set already and it shouldn't be reapplied,
8392 * don't do anything.
8395 !LGRP_MEM_POLICY_REAPPLICABLE(policy
))
8399 * For private memory, need writers lock on
8400 * address space because the segment may be
8401 * split or concatenated when changing policy
8403 if (svd
->type
== MAP_PRIVATE
&&
8404 AS_READ_HELD(seg
->s_as
)) {
8405 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8410 * Mark any existing pages in given range for
8413 page_mark_migrate(seg
, addr
, len
, amp
, svd
->anon_index
,
8414 vp
, svd
->offset
, 1);
8417 * Don't need to try to split or concatenate
8418 * segments, since policy is same or this is a shared
8421 if (already_set
|| svd
->type
== MAP_SHARED
)
8424 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
8425 ASSERT(svd
->amp
== NULL
);
8426 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
8427 ASSERT(svd
->softlockcnt
== 0);
8428 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
8430 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
8434 * Split off new segment if advice only applies to a
8435 * portion of existing segment starting in middle
8439 oldeaddr
= seg
->s_base
+ seg
->s_size
;
8440 if (addr
> seg
->s_base
) {
8442 * Must flush I/O page cache
8443 * before splitting segment
8445 if (svd
->softlockcnt
> 0)
8449 * Split segment and return IE_REATTACH to tell
8450 * as_ctl() that current segment changed
8452 new_seg
= segvn_split_seg(seg
, addr
);
8453 new_svd
= (struct segvn_data
*)new_seg
->s_data
;
8457 * If new segment ends where old one
8458 * did, try to concatenate the new
8459 * segment with next one.
8461 if (eaddr
== oldeaddr
) {
8463 * Set policy for new segment
8465 (void) lgrp_privm_policy_set(policy
,
8466 &new_svd
->policy_info
,
8469 next
= AS_SEGNEXT(new_seg
->s_as
,
8473 next
->s_ops
== &segvn_ops
&&
8474 eaddr
== next
->s_base
)
8475 (void) segvn_concat(new_seg
,
8481 * Split off end of existing segment if advice only
8482 * applies to a portion of segment ending before
8483 * end of the existing segment
8485 if (eaddr
< oldeaddr
) {
8487 * Must flush I/O page cache
8488 * before splitting segment
8490 if (svd
->softlockcnt
> 0)
8494 * If beginning of old segment was already
8495 * split off, use new segment to split end off
8498 if (new_seg
!= NULL
&& new_seg
!= seg
) {
8502 (void) segvn_split_seg(new_seg
, eaddr
);
8505 * Set policy for new segment
8507 (void) lgrp_privm_policy_set(policy
,
8508 &new_svd
->policy_info
,
8512 * Split segment and return IE_REATTACH
8513 * to tell as_ctl() that current
8516 (void) segvn_split_seg(seg
, eaddr
);
8519 (void) lgrp_privm_policy_set(policy
,
8520 &svd
->policy_info
, seg
->s_size
);
8523 * If new segment starts where old one
8524 * did, try to concatenate it with
8527 if (addr
== seg
->s_base
) {
8528 prev
= AS_SEGPREV(seg
->s_as
,
8532 * Drop lock for private data
8533 * of current segment before
8534 * concatenating (deleting) it
8539 addr
== prev
->s_base
+
8544 (void) segvn_concat(
8552 case MADV_SEQUENTIAL
:
8553 ASSERT(seg
->s_szc
== 0);
8554 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
8555 hat_unload(seg
->s_as
->a_hat
, addr
, len
, HAT_UNLOAD
);
8559 bvpp
= &svd
->vpage
[page
];
8560 evpp
= &svd
->vpage
[page
+ (len
>> PAGESHIFT
)];
8561 for (; bvpp
< evpp
; bvpp
++)
8562 VPP_SETADVICE(bvpp
, behav
);
8563 svd
->advice
= MADV_NORMAL
;
8565 case MADV_WILLNEED
: /* handled in memcntl */
8566 case MADV_DONTNEED
: /* handled in memcntl */
8567 case MADV_FREE
: /* handled above */
8568 case MADV_PURGE
: /* handled above */
8574 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8579 * There is one kind of inheritance that can be specified for pages:
8581 * SEGP_INH_ZERO - Pages should be zeroed in the child
8584 segvn_inherit(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t behav
)
8586 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8587 struct vpage
*bvpp
, *evpp
;
8591 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
8593 /* Can't support something we don't know about */
8594 if (behav
!= SEGP_INH_ZERO
)
8597 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
8600 * This must be a straightforward anonymous segment that is mapped
8601 * privately and is not backed by a vnode.
8603 if (svd
->tr_state
!= SEGVN_TR_OFF
||
8604 svd
->type
!= MAP_PRIVATE
||
8611 * If the entire segment has been marked as inherit zero, then no reason
8612 * to do anything else.
8614 if (svd
->svn_inz
== SEGVN_INZ_ALL
) {
8620 * If this applies to the entire segment, simply mark it and we're done.
8622 if ((addr
== seg
->s_base
) && (len
== seg
->s_size
)) {
8623 svd
->svn_inz
= SEGVN_INZ_ALL
;
8629 * We've been asked to mark a subset of this segment as inherit zero,
8630 * therefore we need to mainpulate its vpages.
8632 if (svd
->vpage
== NULL
) {
8634 if (svd
->vpage
== NULL
) {
8640 svd
->svn_inz
= SEGVN_INZ_VPP
;
8641 page
= seg_page(seg
, addr
);
8642 bvpp
= &svd
->vpage
[page
];
8643 evpp
= &svd
->vpage
[page
+ (len
>> PAGESHIFT
)];
8644 for (; bvpp
< evpp
; bvpp
++)
8645 VPP_SETINHZERO(bvpp
);
8649 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8654 * Create a vpage structure for this seg.
8657 segvn_vpage(struct seg
*seg
)
8659 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8660 struct vpage
*vp
, *evp
;
8661 static pgcnt_t page_limit
= 0;
8663 ASSERT(SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
8666 * If no vpage structure exists, allocate one. Copy the protections
8667 * and the advice from the segment itself to the individual pages.
8669 if (svd
->vpage
== NULL
) {
8671 * Start by calculating the number of pages we must allocate to
8672 * track the per-page vpage structs needs for this entire
8673 * segment. If we know now that it will require more than our
8674 * heuristic for the maximum amount of kmem we can consume then
8675 * fail. We do this here, instead of trying to detect this deep
8676 * in page_resv and propagating the error up, since the entire
8677 * memory allocation stack is not amenable to passing this
8678 * back. Instead, it wants to keep trying.
8680 * As a heuristic we set a page limit of 5/8s of total_pages
8681 * for this allocation. We use shifts so that no floating
8682 * point conversion takes place and only need to do the
8685 ulong_t mem_needed
= seg_pages(seg
) * sizeof (struct vpage
);
8686 pgcnt_t npages
= mem_needed
>> PAGESHIFT
;
8688 if (page_limit
== 0)
8689 page_limit
= (total_pages
>> 1) + (total_pages
>> 3);
8691 if (npages
> page_limit
)
8694 svd
->pageadvice
= 1;
8695 svd
->vpage
= kmem_zalloc(mem_needed
, KM_SLEEP
);
8696 evp
= &svd
->vpage
[seg_page(seg
, seg
->s_base
+ seg
->s_size
)];
8697 for (vp
= svd
->vpage
; vp
< evp
; vp
++) {
8698 VPP_SETPROT(vp
, svd
->prot
);
8699 VPP_SETADVICE(vp
, svd
->advice
);
8705 * Dump the pages belonging to this segvn segment.
8708 segvn_dump(struct seg
*seg
)
8710 struct segvn_data
*svd
;
8712 struct anon_map
*amp
;
8713 ulong_t anon_index
= 0;
8715 u_offset_t off
, offset
;
8717 pgcnt_t page
, npages
;
8720 npages
= seg_pages(seg
);
8721 svd
= (struct segvn_data
*)seg
->s_data
;
8723 off
= offset
= svd
->offset
;
8726 if ((amp
= svd
->amp
) != NULL
) {
8727 anon_index
= svd
->anon_index
;
8728 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
8731 for (page
= 0; page
< npages
; page
++, offset
+= PAGESIZE
) {
8735 if (amp
&& (ap
= anon_get_ptr(svd
->amp
->ahp
, anon_index
++))) {
8736 swap_xlate_nopanic(ap
, &vp
, &off
);
8743 * If pp == NULL, the page either does not exist
8744 * or is exclusively locked. So determine if it
8745 * exists before searching for it.
8748 if ((pp
= page_lookup_nowait(vp
, off
, SE_SHARED
)))
8751 pp
= page_exists(vp
, off
);
8754 pfn
= page_pptonum(pp
);
8755 dump_addpage(seg
->s_as
, addr
, pfn
);
8760 dump_timeleft
= dump_timeout
;
8764 ANON_LOCK_EXIT(&
->a_rwlock
);
8768 static uint32_t segvn_pglock_mtbf
= 0;
8771 #define PCACHE_SHWLIST ((page_t *)-2)
8772 #define NOPCACHE_SHWLIST ((page_t *)-1)
8775 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8776 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8777 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8778 * the same parts of the segment. Currently shadow list creation is only
8779 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8780 * tagged with segment pointer, starting virtual address and length. This
8781 * approach for MAP_SHARED segments may add many pcache entries for the same
8782 * set of pages and lead to long hash chains that decrease pcache lookup
8783 * performance. To avoid this issue for shared segments shared anon map and
8784 * starting anon index are used for pcache entry tagging. This allows all
8785 * segments to share pcache entries for the same anon range and reduces pcache
8786 * chain's length as well as memory overhead from duplicate shadow lists and
8789 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8790 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8791 * part of softlockcnt accounting is done differently for private and shared
8792 * segments. In private segment case softlock is only incremented when a new
8793 * shadow list is created but not when an existing one is found via
8794 * seg_plookup(). pcache entries have reference count incremented/decremented
8795 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8796 * reference count can be purged (and purging is needed before segment can be
8797 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8798 * decrement softlockcnt. Since in private segment case each of its pcache
8799 * entries only belongs to this segment we can expect that when
8800 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8801 * segment purge will succeed and softlockcnt will drop to 0. In shared
8802 * segment case reference count in pcache entry counts active locks from many
8803 * different segments so we can't expect segment purging to succeed even when
8804 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8805 * segment. To be able to determine when there're no pending pagelocks in
8806 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8807 * but instead softlockcnt is incremented and decremented for every
8808 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8809 * list was created or an existing one was found. When softlockcnt drops to 0
8810 * this segment no longer has any claims for pcached shadow lists and the
8811 * segment can be freed even if there're still active pcache entries
8812 * shared by this segment anon map. Shared segment pcache entries belong to
8813 * anon map and are typically removed when anon map is freed after all
8814 * processes destroy the segments that use this anon map.
8817 segvn_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
, struct page
***ppp
,
8818 enum lock_type type
, enum seg_rw rw
)
8820 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8822 pgcnt_t adjustpages
;
8825 uint_t protchk
= (rw
== S_READ
) ? PROT_READ
: PROT_WRITE
;
8827 struct anon_map
*amp
;
8829 struct page
**pplist
, **pl
, *pp
;
8832 caddr_t lpgaddr
, lpgeaddr
;
8833 anon_sync_obj_t cookie
;
8835 struct anon_map
*pamp
;
8837 seg_preclaim_cbfunc_t preclaim_callback
;
8842 int sftlck_sbase
= 0;
8843 int sftlck_send
= 0;
8846 if (type
== L_PAGELOCK
&& segvn_pglock_mtbf
) {
8847 hrtime_t ts
= gethrtime();
8848 if ((ts
% segvn_pglock_mtbf
) == 0) {
8851 if ((ts
% segvn_pglock_mtbf
) == 1) {
8857 TRACE_2(TR_FAC_PHYSIO
, TR_PHYSIO_SEGVN_START
,
8858 "segvn_pagelock: start seg %p addr %p", seg
, addr
);
8860 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
8861 ASSERT(type
== L_PAGELOCK
|| type
== L_PAGEUNLOCK
);
8863 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
8866 * for now we only support pagelock to anon memory. We would have to
8867 * check protections for vnode objects and call into the vnode driver.
8868 * That's too much for a fast path. Let the fault entry point handle
8871 if (svd
->vp
!= NULL
) {
8872 if (type
== L_PAGELOCK
) {
8876 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8878 if ((amp
= svd
->amp
) == NULL
) {
8879 if (type
== L_PAGELOCK
) {
8883 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8885 if (rw
!= S_READ
&& rw
!= S_WRITE
) {
8886 if (type
== L_PAGELOCK
) {
8890 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8893 if (seg
->s_szc
!= 0) {
8895 * We are adjusting the pagelock region to the large page size
8896 * boundary because the unlocked part of a large page cannot
8897 * be freed anyway unless all constituent pages of a large
8898 * page are locked. Bigger regions reduce pcache chain length
8899 * and improve lookup performance. The tradeoff is that the
8900 * very first segvn_pagelock() call for a given page is more
8901 * expensive if only 1 page_t is needed for IO. This is only
8902 * an issue if pcache entry doesn't get reused by several
8903 * subsequent calls. We optimize here for the case when pcache
8904 * is heavily used by repeated IOs to the same address range.
8906 * Note segment's page size cannot change while we are holding
8907 * as lock. And then it cannot change while softlockcnt is
8908 * not 0. This will allow us to correctly recalculate large
8909 * page size region for the matching pageunlock/reclaim call
8910 * since as_pageunlock() caller must always match
8911 * as_pagelock() call's addr and len.
8913 * For pageunlock *ppp points to the pointer of page_t that
8914 * corresponds to the real unadjusted start address. Similar
8915 * for pagelock *ppp must point to the pointer of page_t that
8916 * corresponds to the real unadjusted start address.
8918 pgsz
= page_get_pagesize(seg
->s_szc
);
8919 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
8920 adjustpages
= btop((uintptr_t)(addr
- lpgaddr
));
8921 } else if (len
< segvn_pglock_comb_thrshld
) {
8923 lpgeaddr
= addr
+ len
;
8928 * Align the address range of large enough requests to allow
8929 * combining of different shadow lists into 1 to reduce memory
8930 * overhead from potentially overlapping large shadow lists
8931 * (worst case is we have a 1MB IO into buffers with start
8932 * addresses separated by 4K). Alignment is only possible if
8933 * padded chunks have sufficient access permissions. Note
8934 * permissions won't change between L_PAGELOCK and
8935 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8936 * segvn_setprot() to wait until softlockcnt drops to 0. This
8937 * allows us to determine in L_PAGEUNLOCK the same range we
8938 * computed in L_PAGELOCK.
8940 * If alignment is limited by segment ends set
8941 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8942 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8943 * per segment counters. In L_PAGEUNLOCK case decrease
8944 * softlockcnt_sbase/softlockcnt_send counters if
8945 * sftlck_sbase/sftlck_send flags are set. When
8946 * softlockcnt_sbase/softlockcnt_send are non 0
8947 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8948 * won't merge the segments. This restriction combined with
8949 * restriction on segment unmapping and splitting for segments
8950 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8951 * correctly determine the same range that was previously
8952 * locked by matching L_PAGELOCK.
8954 pflags
= SEGP_PSHIFT
| (segvn_pglock_comb_bshift
<< 16);
8956 if (svd
->type
== MAP_PRIVATE
) {
8957 lpgaddr
= (caddr_t
)P2ALIGN((uintptr_t)addr
,
8958 segvn_pglock_comb_balign
);
8959 if (lpgaddr
< seg
->s_base
) {
8960 lpgaddr
= seg
->s_base
;
8964 ulong_t aix
= svd
->anon_index
+ seg_page(seg
, addr
);
8965 ulong_t aaix
= P2ALIGN(aix
, segvn_pglock_comb_palign
);
8966 if (aaix
< svd
->anon_index
) {
8967 lpgaddr
= seg
->s_base
;
8970 lpgaddr
= addr
- ptob(aix
- aaix
);
8971 ASSERT(lpgaddr
>= seg
->s_base
);
8974 if (svd
->pageprot
&& lpgaddr
!= addr
) {
8975 struct vpage
*vp
= &svd
->vpage
[seg_page(seg
, lpgaddr
)];
8976 struct vpage
*evp
= &svd
->vpage
[seg_page(seg
, addr
)];
8978 if ((VPP_PROT(vp
) & protchk
) == 0) {
8988 lpgeaddr
= addr
+ len
;
8990 if (svd
->type
== MAP_PRIVATE
) {
8991 lpgeaddr
= (caddr_t
)P2ROUNDUP(
8992 (uintptr_t)lpgeaddr
,
8993 segvn_pglock_comb_balign
);
8995 ulong_t aix
= svd
->anon_index
+
8996 seg_page(seg
, lpgeaddr
);
8997 ulong_t aaix
= P2ROUNDUP(aix
,
8998 segvn_pglock_comb_palign
);
9002 lpgeaddr
+= ptob(aaix
- aix
);
9005 if (lpgeaddr
== 0 ||
9006 lpgeaddr
> seg
->s_base
+ seg
->s_size
) {
9007 lpgeaddr
= seg
->s_base
+ seg
->s_size
;
9011 if (svd
->pageprot
&& lpgeaddr
!= addr
+ len
) {
9015 vp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
9016 evp
= &svd
->vpage
[seg_page(seg
, lpgeaddr
)];
9019 if ((VPP_PROT(vp
) & protchk
) == 0) {
9025 lpgeaddr
= addr
+ len
;
9028 adjustpages
= btop((uintptr_t)(addr
- lpgaddr
));
9032 * For MAP_SHARED segments we create pcache entries tagged by amp and
9033 * anon index so that we can share pcache entries with other segments
9034 * that map this amp. For private segments pcache entries are tagged
9035 * with segment and virtual address.
9037 if (svd
->type
== MAP_SHARED
) {
9039 paddr
= (caddr_t
)((lpgaddr
- seg
->s_base
) +
9040 ptob(svd
->anon_index
));
9041 preclaim_callback
= shamp_reclaim
;
9045 preclaim_callback
= segvn_reclaim
;
9048 if (type
== L_PAGEUNLOCK
) {
9049 VM_STAT_ADD(segvnvmstats
.pagelock
[0]);
9052 * update hat ref bits for /proc. We need to make sure
9053 * that threads tracing the ref and mod bits of the
9054 * address space get the right data.
9055 * Note: page ref and mod bits are updated at reclaim time
9057 if (seg
->s_as
->a_vbits
) {
9058 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
) {
9059 if (rw
== S_WRITE
) {
9060 hat_setstat(seg
->s_as
, a
,
9061 PAGESIZE
, P_REF
| P_MOD
);
9063 hat_setstat(seg
->s_as
, a
,
9070 * Check the shadow list entry after the last page used in
9071 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
9072 * was not inserted into pcache and is not large page
9073 * adjusted. In this case call reclaim callback directly and
9074 * don't adjust the shadow list start and size for large
9078 if ((*ppp
)[npages
] == NOPCACHE_SHWLIST
) {
9081 ASSERT(svd
->type
== MAP_SHARED
);
9082 ptag
= (void *)pamp
;
9083 paddr
= (caddr_t
)((addr
- seg
->s_base
) +
9084 ptob(svd
->anon_index
));
9089 (void) preclaim_callback(ptag
, paddr
, len
, *ppp
, rw
, 0);
9091 ASSERT((*ppp
)[npages
] == PCACHE_SHWLIST
||
9092 IS_SWAPFSVP((*ppp
)[npages
]->p_vnode
));
9093 len
= lpgeaddr
- lpgaddr
;
9095 seg_pinactive(seg
, pamp
, paddr
, len
,
9096 *ppp
- adjustpages
, rw
, pflags
, preclaim_callback
);
9100 ASSERT(svd
->type
== MAP_SHARED
);
9101 ASSERT(svd
->softlockcnt
>= npages
);
9102 atomic_add_long((ulong_t
*)&svd
->softlockcnt
, -npages
);
9106 ASSERT(svd
->softlockcnt_sbase
> 0);
9107 atomic_dec_ulong((ulong_t
*)&svd
->softlockcnt_sbase
);
9110 ASSERT(svd
->softlockcnt_send
> 0);
9111 atomic_dec_ulong((ulong_t
*)&svd
->softlockcnt_send
);
9115 * If someone is blocked while unmapping, we purge
9116 * segment page cache and thus reclaim pplist synchronously
9117 * without waiting for seg_pasync_thread. This speeds up
9118 * unmapping in cases where munmap(2) is called, while
9119 * raw async i/o is still in progress or where a thread
9120 * exits on data fault in a multithreaded application.
9122 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
9123 if (svd
->softlockcnt
== 0) {
9124 mutex_enter(&seg
->s_as
->a_contents
);
9125 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
9126 AS_CLRUNMAPWAIT(seg
->s_as
);
9127 cv_broadcast(&seg
->s_as
->a_cv
);
9129 mutex_exit(&seg
->s_as
->a_contents
);
9130 } else if (pamp
== NULL
) {
9132 * softlockcnt is not 0 and this is a
9133 * MAP_PRIVATE segment. Try to purge its
9134 * pcache entries to reduce softlockcnt.
9135 * If it drops to 0 segvn_reclaim()
9136 * will wake up a thread waiting on
9139 * We don't purge MAP_SHARED segments with non
9140 * 0 softlockcnt since IO is still in progress
9141 * for such segments.
9143 ASSERT(svd
->type
== MAP_PRIVATE
);
9147 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9148 TRACE_2(TR_FAC_PHYSIO
, TR_PHYSIO_SEGVN_UNLOCK_END
,
9149 "segvn_pagelock: unlock seg %p addr %p", seg
, addr
);
9153 /* The L_PAGELOCK case ... */
9155 VM_STAT_ADD(segvnvmstats
.pagelock
[1]);
9158 * For MAP_SHARED segments we have to check protections before
9159 * seg_plookup() since pcache entries may be shared by many segments
9160 * with potentially different page protections.
9163 ASSERT(svd
->type
== MAP_SHARED
);
9164 if (svd
->pageprot
== 0) {
9165 if ((svd
->prot
& protchk
) == 0) {
9171 * check page protections
9182 for (; a
< ea
; a
+= pgsz
) {
9185 ASSERT(seg
->s_szc
== 0 ||
9186 sameprot(seg
, a
, pgsz
));
9187 vp
= &svd
->vpage
[seg_page(seg
, a
)];
9188 if ((VPP_PROT(vp
) & protchk
) == 0) {
9197 * try to find pages in segment page cache
9199 pplist
= seg_plookup(seg
, pamp
, paddr
, lpgeaddr
- lpgaddr
, rw
, pflags
);
9200 if (pplist
!= NULL
) {
9202 npages
= btop((uintptr_t)(lpgeaddr
- lpgaddr
));
9203 ASSERT(svd
->type
== MAP_SHARED
);
9204 atomic_add_long((ulong_t
*)&svd
->softlockcnt
,
9208 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_sbase
);
9211 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_send
);
9213 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9214 *ppp
= pplist
+ adjustpages
;
9215 TRACE_2(TR_FAC_PHYSIO
, TR_PHYSIO_SEGVN_HIT_END
,
9216 "segvn_pagelock: cache hit seg %p addr %p", seg
, addr
);
9221 * For MAP_SHARED segments we already verified above that segment
9222 * protections allow this pagelock operation.
9225 ASSERT(svd
->type
== MAP_PRIVATE
);
9226 if (svd
->pageprot
== 0) {
9227 if ((svd
->prot
& protchk
) == 0) {
9231 if (svd
->prot
& PROT_WRITE
) {
9232 wlen
= lpgeaddr
- lpgaddr
;
9235 ASSERT(rw
== S_READ
);
9240 * check page protections
9242 for (a
= lpgaddr
, wlen
= 0; a
< lpgeaddr
; a
+= pgsz
) {
9245 ASSERT(seg
->s_szc
== 0 ||
9246 sameprot(seg
, a
, pgsz
));
9247 vp
= &svd
->vpage
[seg_page(seg
, a
)];
9248 if ((VPP_PROT(vp
) & protchk
) == 0) {
9252 if (wcont
&& (VPP_PROT(vp
) & PROT_WRITE
)) {
9256 ASSERT(rw
== S_READ
);
9260 ASSERT(rw
== S_READ
|| wlen
== lpgeaddr
- lpgaddr
);
9261 ASSERT(rw
== S_WRITE
|| wlen
<= lpgeaddr
- lpgaddr
);
9265 * Only build large page adjusted shadow list if we expect to insert
9266 * it into pcache. For large enough pages it's a big overhead to
9267 * create a shadow list of the entire large page. But this overhead
9268 * should be amortized over repeated pcache hits on subsequent reuse
9269 * of this shadow list (IO into any range within this shadow list will
9270 * find it in pcache since we large page align the request for pcache
9271 * lookups). pcache performance is improved with bigger shadow lists
9272 * as it reduces the time to pcache the entire big segment and reduces
9273 * pcache chain length.
9275 if (seg_pinsert_check(seg
, pamp
, paddr
,
9276 lpgeaddr
- lpgaddr
, pflags
) == SEGP_SUCCESS
) {
9278 len
= lpgeaddr
- lpgaddr
;
9283 * Since this entry will not be inserted into the pcache, we
9284 * will not do any adjustments to the starting address or
9285 * size of the memory to be locked.
9291 pplist
= kmem_alloc(sizeof (page_t
*) * (npages
+ 1), KM_SLEEP
);
9293 *ppp
= pplist
+ adjustpages
;
9295 * If use_pcache is 0 this shadow list is not large page adjusted.
9296 * Record this info in the last entry of shadow array so that
9297 * L_PAGEUNLOCK can determine if it should large page adjust the
9298 * address range to find the real range that was locked.
9300 pl
[npages
] = use_pcache
? PCACHE_SHWLIST
: NOPCACHE_SHWLIST
;
9302 page
= seg_page(seg
, addr
);
9303 anon_index
= svd
->anon_index
+ page
;
9306 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
9307 ASSERT(amp
->a_szc
>= seg
->s_szc
);
9308 anpgcnt
= page_get_pagecnt(amp
->a_szc
);
9309 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
, anon_index
++) {
9315 * Lock and unlock anon array only once per large page.
9316 * anon_array_enter() locks the root anon slot according to
9317 * a_szc which can't change while anon map is locked. We lock
9318 * anon the first time through this loop and each time we
9319 * reach anon index that corresponds to a root of a large
9322 if (a
== addr
|| P2PHASE(anon_index
, anpgcnt
) == 0) {
9323 ASSERT(anlock
== 0);
9324 anon_array_enter(amp
, anon_index
, &cookie
);
9327 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
9330 * We must never use seg_pcache for COW pages
9331 * because we might end up with original page still
9332 * lying in seg_pcache even after private page is
9333 * created. This leads to data corruption as
9334 * aio_write refers to the page still in cache
9335 * while all other accesses refer to the private
9338 if (ap
== NULL
|| ap
->an_refcnt
!= 1) {
9339 struct vpage
*vpage
;
9345 if (svd
->vpage
!= NULL
) {
9346 vpage
= &svd
->vpage
[seg_page(seg
, a
)];
9351 anon_array_exit(&cookie
);
9354 error
= segvn_faultpage(seg
->s_as
->a_hat
, seg
, a
, 0,
9355 vpage
, &pp
, 0, F_INVAL
, rw
, 1);
9357 error
= fc_decode(error
);
9360 anon_array_enter(amp
, anon_index
, &cookie
);
9362 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
9363 if (ap
== NULL
|| ap
->an_refcnt
!= 1) {
9368 swap_xlate(ap
, &vp
, &off
);
9369 pp
= page_lookup_nowait(vp
, off
, SE_SHARED
);
9374 if (ap
->an_pvp
!= NULL
) {
9375 anon_swap_free(ap
, pp
);
9378 * Unlock anon if this is the last slot in a large page.
9380 if (P2PHASE(anon_index
, anpgcnt
) == anpgcnt
- 1) {
9382 anon_array_exit(&cookie
);
9387 if (anlock
) { /* Ensure the lock is dropped */
9388 anon_array_exit(&cookie
);
9390 ANON_LOCK_EXIT(&
->a_rwlock
);
9392 if (a
>= addr
+ len
) {
9393 atomic_add_long((ulong_t
*)&svd
->softlockcnt
, npages
);
9395 ASSERT(svd
->type
== MAP_SHARED
);
9396 atomic_add_long((ulong_t
*)&pamp
->a_softlockcnt
,
9401 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_sbase
);
9404 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_send
);
9407 (void) seg_pinsert(seg
, pamp
, paddr
, len
, wlen
, pl
,
9408 rw
, pflags
, preclaim_callback
);
9410 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9411 TRACE_2(TR_FAC_PHYSIO
, TR_PHYSIO_SEGVN_FILL_END
,
9412 "segvn_pagelock: cache fill seg %p addr %p", seg
, addr
);
9417 np
= ((uintptr_t)(a
- addr
)) >> PAGESHIFT
;
9418 while (np
> (uint_t
)0) {
9419 ASSERT(PAGE_LOCKED(*pplist
));
9420 page_unlock(*pplist
);
9424 kmem_free(pl
, sizeof (page_t
*) * (npages
+ 1));
9426 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9428 TRACE_2(TR_FAC_PHYSIO
, TR_PHYSIO_SEGVN_MISS_END
,
9429 "segvn_pagelock: cache miss seg %p addr %p", seg
, addr
);
9434 * purge any cached pages in the I/O page cache
9437 segvn_purge(struct seg
*seg
)
9439 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9442 * pcache is only used by pure anon segments.
9444 if (svd
->amp
== NULL
|| svd
->vp
!= NULL
) {
9449 * For MAP_SHARED segments non 0 segment's softlockcnt means
9450 * active IO is still in progress via this segment. So we only
9451 * purge MAP_SHARED segments when their softlockcnt is 0.
9453 if (svd
->type
== MAP_PRIVATE
) {
9454 if (svd
->softlockcnt
) {
9455 seg_ppurge(seg
, NULL
, 0);
9457 } else if (svd
->softlockcnt
== 0 && svd
->amp
->a_softlockcnt
!= 0) {
9458 seg_ppurge(seg
, svd
->amp
, 0);
9463 * If async argument is not 0 we are called from pcache async thread and don't
9469 segvn_reclaim(void *ptag
, caddr_t addr
, size_t len
, struct page
**pplist
,
9470 enum seg_rw rw
, int async
)
9472 struct seg
*seg
= (struct seg
*)ptag
;
9473 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9477 npages
= np
= btop(len
);
9480 ASSERT(svd
->vp
== NULL
&& svd
->amp
!= NULL
);
9481 ASSERT(svd
->softlockcnt
>= npages
);
9482 ASSERT(async
|| AS_LOCK_HELD(seg
->s_as
));
9486 ASSERT(pl
[np
] == NOPCACHE_SHWLIST
|| pl
[np
] == PCACHE_SHWLIST
);
9487 ASSERT(!async
|| pl
[np
] == PCACHE_SHWLIST
);
9489 while (np
> (uint_t
)0) {
9490 if (rw
== S_WRITE
) {
9491 hat_setrefmod(*pplist
);
9493 hat_setref(*pplist
);
9495 page_unlock(*pplist
);
9500 kmem_free(pl
, sizeof (page_t
*) * (npages
+ 1));
9503 * If we are pcache async thread we don't hold AS lock. This means if
9504 * softlockcnt drops to 0 after the decrement below address space may
9505 * get freed. We can't allow it since after softlock derement to 0 we
9506 * still need to access as structure for possible wakeup of unmap
9507 * waiters. To prevent the disappearance of as we take this segment
9508 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9509 * make sure this routine completes before segment is freed.
9511 * The second complication we have to deal with in async case is a
9512 * possibility of missed wake up of unmap wait thread. When we don't
9513 * hold as lock here we may take a_contents lock before unmap wait
9514 * thread that was first to see softlockcnt was still not 0. As a
9515 * result we'll fail to wake up an unmap wait thread. To avoid this
9516 * race we set nounmapwait flag in as structure if we drop softlockcnt
9517 * to 0 when we were called by pcache async thread. unmapwait thread
9518 * will not block if this flag is set.
9521 mutex_enter(&svd
->segfree_syncmtx
);
9524 if (!atomic_add_long_nv((ulong_t
*)&svd
->softlockcnt
, -npages
)) {
9525 if (async
|| AS_ISUNMAPWAIT(seg
->s_as
)) {
9526 mutex_enter(&seg
->s_as
->a_contents
);
9528 AS_SETNOUNMAPWAIT(seg
->s_as
);
9530 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
9531 AS_CLRUNMAPWAIT(seg
->s_as
);
9532 cv_broadcast(&seg
->s_as
->a_cv
);
9534 mutex_exit(&seg
->s_as
->a_contents
);
9539 mutex_exit(&svd
->segfree_syncmtx
);
9546 shamp_reclaim(void *ptag
, caddr_t addr
, size_t len
, struct page
**pplist
,
9547 enum seg_rw rw
, int async
)
9549 amp_t
*amp
= (amp_t
*)ptag
;
9553 npages
= np
= btop(len
);
9555 ASSERT(amp
->a_softlockcnt
>= npages
);
9559 ASSERT(pl
[np
] == NOPCACHE_SHWLIST
|| pl
[np
] == PCACHE_SHWLIST
);
9560 ASSERT(!async
|| pl
[np
] == PCACHE_SHWLIST
);
9562 while (np
> (uint_t
)0) {
9563 if (rw
== S_WRITE
) {
9564 hat_setrefmod(*pplist
);
9566 hat_setref(*pplist
);
9568 page_unlock(*pplist
);
9573 kmem_free(pl
, sizeof (page_t
*) * (npages
+ 1));
9576 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9577 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9578 * and anonmap_purge() acquires a_purgemtx.
9580 mutex_enter(&
->a_purgemtx
);
9581 if (!atomic_add_long_nv((ulong_t
*)&
->a_softlockcnt
, -npages
) &&
9583 amp
->a_purgewait
= 0;
9584 cv_broadcast(&
->a_purgecv
);
9586 mutex_exit(&
->a_purgemtx
);
9591 * get a memory ID for an addr in a given segment
9593 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9594 * At fault time they will be relocated into larger pages.
9597 segvn_getmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
)
9599 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9600 struct anon
*ap
= NULL
;
9601 ulong_t anon_index
= 0;
9602 struct anon_map
*amp
;
9603 anon_sync_obj_t cookie
;
9605 if (svd
->type
== MAP_PRIVATE
) {
9606 memidp
->val
[0] = (uintptr_t)seg
->s_as
;
9607 memidp
->val
[1] = (uintptr_t)addr
;
9611 if (svd
->type
== MAP_SHARED
) {
9613 memidp
->val
[0] = (uintptr_t)svd
->vp
;
9614 memidp
->val
[1] = (u_longlong_t
)svd
->offset
+
9615 (uintptr_t)(addr
- seg
->s_base
);
9619 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
9620 if ((amp
= svd
->amp
) != NULL
) {
9621 anon_index
= svd
->anon_index
+
9622 seg_page(seg
, addr
);
9624 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9626 ASSERT(amp
!= NULL
);
9628 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
9629 anon_array_enter(amp
, anon_index
, &cookie
);
9630 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
9634 pp
= anon_zero(seg
, addr
, &ap
, svd
->cred
);
9636 anon_array_exit(&cookie
);
9637 ANON_LOCK_EXIT(&
->a_rwlock
);
9640 ASSERT(anon_get_ptr(amp
->ahp
, anon_index
)
9642 (void) anon_set_ptr(amp
->ahp
, anon_index
,
9647 anon_array_exit(&cookie
);
9648 ANON_LOCK_EXIT(&
->a_rwlock
);
9650 memidp
->val
[0] = (uintptr_t)ap
;
9651 memidp
->val
[1] = (uintptr_t)addr
& PAGEOFFSET
;
9659 sameprot(struct seg
*seg
, caddr_t a
, size_t len
)
9661 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9662 struct vpage
*vpage
;
9663 spgcnt_t pages
= btop(len
);
9666 if (svd
->pageprot
== 0)
9669 ASSERT(svd
->vpage
!= NULL
);
9671 vpage
= &svd
->vpage
[seg_page(seg
, a
)];
9672 prot
= VPP_PROT(vpage
);
9675 while (pages
-- > 0) {
9676 if (prot
!= VPP_PROT(vpage
))
9684 * Get memory allocation policy info for specified address in given segment
9686 static lgrp_mem_policy_info_t
*
9687 segvn_getpolicy(struct seg
*seg
, caddr_t addr
)
9689 struct anon_map
*amp
;
9691 lgrp_mem_policy_info_t
*policy_info
;
9692 struct segvn_data
*svn_data
;
9696 ASSERT(seg
!= NULL
);
9698 svn_data
= (struct segvn_data
*)seg
->s_data
;
9699 if (svn_data
== NULL
)
9703 * Get policy info for private or shared memory
9705 if (svn_data
->type
!= MAP_SHARED
) {
9706 if (svn_data
->tr_state
!= SEGVN_TR_ON
) {
9707 policy_info
= &svn_data
->policy_info
;
9709 policy_info
= &svn_data
->tr_policy_info
;
9710 ASSERT(policy_info
->mem_policy
==
9711 LGRP_MEM_POLICY_NEXT_SEG
);
9714 amp
= svn_data
->amp
;
9715 anon_index
= svn_data
->anon_index
+ seg_page(seg
, addr
);
9717 vn_off
= svn_data
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
9718 policy_info
= lgrp_shm_policy_get(amp
, anon_index
, vp
, vn_off
);
9721 return (policy_info
);
9726 segvn_capable(struct seg
*seg
, segcapability_t capability
)
9732 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9733 * established to per vnode mapping per lgroup amp pages instead of to vnode
9734 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9735 * may share the same text replication amp. If a suitable amp doesn't already
9736 * exist in svntr hash table create a new one. We may fail to bind to amp if
9737 * segment is not eligible for text replication. Code below first checks for
9738 * these conditions. If binding is successful segment tr_state is set to on
9739 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9740 * svd->amp remains as NULL.
9743 segvn_textrepl(struct seg
*seg
)
9745 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9746 vnode_t
*vp
= svd
->vp
;
9747 u_offset_t off
= svd
->offset
;
9748 size_t size
= seg
->s_size
;
9749 u_offset_t eoff
= off
+ size
;
9750 uint_t szc
= seg
->s_szc
;
9751 ulong_t hash
= SVNTR_HASH_FUNC(vp
);
9754 proc_t
*p
= seg
->s_as
->a_proc
;
9758 struct anon_map
*amp
;
9760 ASSERT(AS_LOCK_HELD(seg
->s_as
));
9761 ASSERT(SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
9763 ASSERT(svd
->tr_state
== SEGVN_TR_INIT
);
9764 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
9765 ASSERT(svd
->flags
& MAP_TEXT
);
9766 ASSERT(svd
->type
== MAP_PRIVATE
);
9767 ASSERT(vp
!= NULL
&& svd
->amp
== NULL
);
9768 ASSERT(!svd
->pageprot
&& !(svd
->prot
& PROT_WRITE
));
9769 ASSERT(!(svd
->flags
& MAP_NORESERVE
) && svd
->swresv
== 0);
9770 ASSERT(seg
->s_as
!= &kas
);
9772 ASSERT(svntr_hashtab
!= NULL
);
9775 * If numa optimizations are no longer desired bail out.
9777 if (!lgrp_optimizations()) {
9778 svd
->tr_state
= SEGVN_TR_OFF
;
9783 * Avoid creating anon maps with size bigger than the file size.
9784 * If VOP_GETATTR() call fails bail out.
9786 va
.va_mask
= AT_SIZE
| AT_MTIME
| AT_CTIME
;
9787 if (VOP_GETATTR(vp
, &va
, 0, svd
->cred
, NULL
) != 0) {
9788 svd
->tr_state
= SEGVN_TR_OFF
;
9789 SEGVN_TR_ADDSTAT(gaerr
);
9792 if (btopr(va
.va_size
) < btopr(eoff
)) {
9793 svd
->tr_state
= SEGVN_TR_OFF
;
9794 SEGVN_TR_ADDSTAT(overmap
);
9799 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9800 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9801 * mapping that checks if trcache for this vnode needs to be
9802 * invalidated can't miss us.
9804 if (!(vp
->v_flag
& VVMEXEC
)) {
9805 mutex_enter(&vp
->v_lock
);
9806 vp
->v_flag
|= VVMEXEC
;
9807 mutex_exit(&vp
->v_lock
);
9809 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
9811 * Bail out if potentially MAP_SHARED writable mappings exist to this
9812 * vnode. We don't want to use old file contents from existing
9813 * replicas if this mapping was established after the original file
9816 if (vn_is_mapped(vp
, V_WRITE
)) {
9817 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9818 svd
->tr_state
= SEGVN_TR_OFF
;
9819 SEGVN_TR_ADDSTAT(wrcnt
);
9822 svntrp
= svntr_hashtab
[hash
].tr_head
;
9823 for (; svntrp
!= NULL
; svntrp
= svntrp
->tr_next
) {
9824 ASSERT(svntrp
->tr_refcnt
!= 0);
9825 if (svntrp
->tr_vp
!= vp
) {
9830 * Bail out if the file or its attributes were changed after
9831 * this replication entry was created since we need to use the
9832 * latest file contents. Note that mtime test alone is not
9833 * sufficient because a user can explicitly change mtime via
9834 * utimes(2) interfaces back to the old value after modifiying
9835 * the file contents. To detect this case we also have to test
9836 * ctime which among other things records the time of the last
9837 * mtime change by utimes(2). ctime is not changed when the file
9838 * is only read or executed so we expect that typically existing
9839 * replication amp's can be used most of the time.
9841 if (!svntrp
->tr_valid
||
9842 svntrp
->tr_mtime
.tv_sec
!= va
.va_mtime
.tv_sec
||
9843 svntrp
->tr_mtime
.tv_nsec
!= va
.va_mtime
.tv_nsec
||
9844 svntrp
->tr_ctime
.tv_sec
!= va
.va_ctime
.tv_sec
||
9845 svntrp
->tr_ctime
.tv_nsec
!= va
.va_ctime
.tv_nsec
) {
9846 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9847 svd
->tr_state
= SEGVN_TR_OFF
;
9848 SEGVN_TR_ADDSTAT(stale
);
9852 * if off, eoff and szc match current segment we found the
9853 * existing entry we can use.
9855 if (svntrp
->tr_off
== off
&& svntrp
->tr_eoff
== eoff
&&
9856 svntrp
->tr_szc
== szc
) {
9860 * Don't create different but overlapping in file offsets
9861 * entries to avoid replication of the same file pages more
9862 * than once per lgroup.
9864 if ((off
>= svntrp
->tr_off
&& off
< svntrp
->tr_eoff
) ||
9865 (eoff
> svntrp
->tr_off
&& eoff
<= svntrp
->tr_eoff
)) {
9866 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9867 svd
->tr_state
= SEGVN_TR_OFF
;
9868 SEGVN_TR_ADDSTAT(overlap
);
9873 * If we didn't find existing entry create a new one.
9875 if (svntrp
== NULL
) {
9876 svntrp
= kmem_cache_alloc(svntr_cache
, KM_NOSLEEP
);
9877 if (svntrp
== NULL
) {
9878 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9879 svd
->tr_state
= SEGVN_TR_OFF
;
9880 SEGVN_TR_ADDSTAT(nokmem
);
9886 for (i
= 0; i
< NLGRPS_MAX
; i
++) {
9887 ASSERT(svntrp
->tr_amp
[i
] == NULL
);
9892 svntrp
->tr_off
= off
;
9893 svntrp
->tr_eoff
= eoff
;
9894 svntrp
->tr_szc
= szc
;
9895 svntrp
->tr_valid
= 1;
9896 svntrp
->tr_mtime
= va
.va_mtime
;
9897 svntrp
->tr_ctime
= va
.va_ctime
;
9898 svntrp
->tr_refcnt
= 0;
9899 svntrp
->tr_next
= svntr_hashtab
[hash
].tr_head
;
9900 svntr_hashtab
[hash
].tr_head
= svntrp
;
9905 * We want to pick a replica with pages on main thread's (t_tid = 1,
9906 * aka T1) lgrp. Currently text replication is only optimized for
9907 * workloads that either have all threads of a process on the same
9908 * lgrp or execute their large text primarily on main thread.
9910 lgrp_id
= p
->p_t1_lgrpid
;
9911 if (lgrp_id
== LGRP_NONE
) {
9913 * In case exec() prefaults text on non main thread use
9914 * current thread lgrpid. It will become main thread anyway
9917 lgrp_id
= lgrp_home_id(curthread
);
9920 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9921 * just set it to NLGRPS_MAX if it's different from current process T1
9922 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9923 * replication and T1 new home is different from lgrp used for text
9924 * replication. When this happens asyncronous segvn thread rechecks if
9925 * segments should change lgrps used for text replication. If we fail
9926 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9927 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9928 * we want to use. We don't need to use cas in this case because
9929 * another thread that races in between our non atomic check and set
9930 * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9932 ASSERT(lgrp_id
!= LGRP_NONE
&& lgrp_id
< NLGRPS_MAX
);
9933 olid
= p
->p_tr_lgrpid
;
9934 if (lgrp_id
!= olid
&& olid
!= NLGRPS_MAX
) {
9935 lgrp_id_t nlid
= (olid
== LGRP_NONE
) ? lgrp_id
: NLGRPS_MAX
;
9936 if (atomic_cas_32((uint32_t *)&p
->p_tr_lgrpid
, olid
, nlid
) !=
9938 olid
= p
->p_tr_lgrpid
;
9939 ASSERT(olid
!= LGRP_NONE
);
9940 if (olid
!= lgrp_id
&& olid
!= NLGRPS_MAX
) {
9941 p
->p_tr_lgrpid
= NLGRPS_MAX
;
9944 ASSERT(p
->p_tr_lgrpid
!= LGRP_NONE
);
9947 * lgrp_move_thread() won't schedule async recheck after
9948 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9949 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9952 if (first
&& p
->p_t1_lgrpid
!= LGRP_NONE
&&
9953 p
->p_t1_lgrpid
!= lgrp_id
) {
9959 * If no amp was created yet for lgrp_id create a new one as long as
9960 * we have enough memory to afford it.
9962 if ((amp
= svntrp
->tr_amp
[lgrp_id
]) == NULL
) {
9963 size_t trmem
= atomic_add_long_nv(&segvn_textrepl_bytes
, size
);
9964 if (trmem
> segvn_textrepl_max_bytes
) {
9965 SEGVN_TR_ADDSTAT(normem
);
9968 if (anon_try_resv_zone(size
, NULL
) == 0) {
9969 SEGVN_TR_ADDSTAT(noanon
);
9972 amp
= anonmap_alloc(size
, size
, ANON_NOSLEEP
);
9974 anon_unresv_zone(size
, NULL
);
9975 SEGVN_TR_ADDSTAT(nokmem
);
9978 ASSERT(amp
->refcnt
== 1);
9980 svntrp
->tr_amp
[lgrp_id
] = amp
;
9981 SEGVN_TR_ADDSTAT(newamp
);
9983 svntrp
->tr_refcnt
++;
9984 ASSERT(svd
->svn_trnext
== NULL
);
9985 ASSERT(svd
->svn_trprev
== NULL
);
9986 svd
->svn_trnext
= svntrp
->tr_svnhead
;
9987 svd
->svn_trprev
= NULL
;
9988 if (svntrp
->tr_svnhead
!= NULL
) {
9989 svntrp
->tr_svnhead
->svn_trprev
= svd
;
9991 svntrp
->tr_svnhead
= svd
;
9992 ASSERT(amp
->a_szc
== szc
&& amp
->size
== size
&& amp
->swresv
== size
);
9993 ASSERT(amp
->refcnt
>= 1);
9995 svd
->anon_index
= 0;
9996 svd
->tr_policy_info
.mem_policy
= LGRP_MEM_POLICY_NEXT_SEG
;
9997 svd
->tr_policy_info
.mem_lgrpid
= lgrp_id
;
9998 svd
->tr_state
= SEGVN_TR_ON
;
9999 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10000 SEGVN_TR_ADDSTAT(repl
);
10003 ASSERT(segvn_textrepl_bytes
>= size
);
10004 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10005 ASSERT(svntrp
!= NULL
);
10006 ASSERT(svntrp
->tr_amp
[lgrp_id
] == NULL
);
10007 if (svntrp
->tr_refcnt
== 0) {
10008 ASSERT(svntrp
== svntr_hashtab
[hash
].tr_head
);
10009 svntr_hashtab
[hash
].tr_head
= svntrp
->tr_next
;
10010 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10011 kmem_cache_free(svntr_cache
, svntrp
);
10013 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10015 svd
->tr_state
= SEGVN_TR_OFF
;
10019 * Convert seg back to regular vnode mapping seg by unbinding it from its text
10020 * replication amp. This routine is most typically called when segment is
10021 * unmapped but can also be called when segment no longer qualifies for text
10022 * replication (e.g. due to protection changes). If unload_unmap is set use
10023 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
10024 * svntr free all its anon maps and remove it from the hash table.
10027 segvn_textunrepl(struct seg
*seg
, int unload_unmap
)
10029 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
10030 vnode_t
*vp
= svd
->vp
;
10031 u_offset_t off
= svd
->offset
;
10032 size_t size
= seg
->s_size
;
10033 u_offset_t eoff
= off
+ size
;
10034 uint_t szc
= seg
->s_szc
;
10035 ulong_t hash
= SVNTR_HASH_FUNC(vp
);
10037 svntr_t
**prv_svntrp
;
10038 lgrp_id_t lgrp_id
= svd
->tr_policy_info
.mem_lgrpid
;
10041 ASSERT(AS_LOCK_HELD(seg
->s_as
));
10042 ASSERT(AS_WRITE_HELD(seg
->s_as
) ||
10043 SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
10044 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
10045 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
10046 ASSERT(svd
->amp
!= NULL
);
10047 ASSERT(svd
->amp
->refcnt
>= 1);
10048 ASSERT(svd
->anon_index
== 0);
10049 ASSERT(lgrp_id
!= LGRP_NONE
&& lgrp_id
< NLGRPS_MAX
);
10050 ASSERT(svntr_hashtab
!= NULL
);
10052 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
10053 prv_svntrp
= &svntr_hashtab
[hash
].tr_head
;
10054 for (; (svntrp
= *prv_svntrp
) != NULL
; prv_svntrp
= &svntrp
->tr_next
) {
10055 ASSERT(svntrp
->tr_refcnt
!= 0);
10056 if (svntrp
->tr_vp
== vp
&& svntrp
->tr_off
== off
&&
10057 svntrp
->tr_eoff
== eoff
&& svntrp
->tr_szc
== szc
) {
10061 if (svntrp
== NULL
) {
10062 panic("segvn_textunrepl: svntr record not found");
10064 if (svntrp
->tr_amp
[lgrp_id
] != svd
->amp
) {
10065 panic("segvn_textunrepl: amp mismatch");
10067 svd
->tr_state
= SEGVN_TR_OFF
;
10069 if (svd
->svn_trprev
== NULL
) {
10070 ASSERT(svntrp
->tr_svnhead
== svd
);
10071 svntrp
->tr_svnhead
= svd
->svn_trnext
;
10072 if (svntrp
->tr_svnhead
!= NULL
) {
10073 svntrp
->tr_svnhead
->svn_trprev
= NULL
;
10075 svd
->svn_trnext
= NULL
;
10077 svd
->svn_trprev
->svn_trnext
= svd
->svn_trnext
;
10078 if (svd
->svn_trnext
!= NULL
) {
10079 svd
->svn_trnext
->svn_trprev
= svd
->svn_trprev
;
10080 svd
->svn_trnext
= NULL
;
10082 svd
->svn_trprev
= NULL
;
10084 if (--svntrp
->tr_refcnt
) {
10085 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10088 *prv_svntrp
= svntrp
->tr_next
;
10089 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10090 for (i
= 0; i
< NLGRPS_MAX
; i
++) {
10091 struct anon_map
*amp
= svntrp
->tr_amp
[i
];
10095 ASSERT(amp
->refcnt
== 1);
10096 ASSERT(amp
->swresv
== size
);
10097 ASSERT(amp
->size
== size
);
10098 ASSERT(amp
->a_szc
== szc
);
10099 if (amp
->a_szc
!= 0) {
10100 anon_free_pages(amp
->ahp
, 0, size
, szc
);
10102 anon_free(amp
->ahp
, 0, size
);
10104 svntrp
->tr_amp
[i
] = NULL
;
10105 ASSERT(segvn_textrepl_bytes
>= size
);
10106 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10107 anon_unresv_zone(amp
->swresv
, NULL
);
10111 kmem_cache_free(svntr_cache
, svntrp
);
10113 hat_unload_callback(seg
->s_as
->a_hat
, seg
->s_base
, size
,
10114 unload_unmap
? HAT_UNLOAD_UNMAP
: 0, NULL
);
10118 * This is called when a MAP_SHARED writable mapping is created to a vnode
10119 * that is currently used for execution (VVMEXEC flag is set). In this case we
10120 * need to prevent further use of existing replicas.
10123 segvn_inval_trcache(vnode_t
*vp
)
10125 ulong_t hash
= SVNTR_HASH_FUNC(vp
);
10128 ASSERT(vp
->v_flag
& VVMEXEC
);
10130 if (svntr_hashtab
== NULL
) {
10134 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
10135 svntrp
= svntr_hashtab
[hash
].tr_head
;
10136 for (; svntrp
!= NULL
; svntrp
= svntrp
->tr_next
) {
10137 ASSERT(svntrp
->tr_refcnt
!= 0);
10138 if (svntrp
->tr_vp
== vp
&& svntrp
->tr_valid
) {
10139 svntrp
->tr_valid
= 0;
10142 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10146 segvn_trasync_thread(void)
10148 callb_cpr_t cpr_info
;
10149 kmutex_t cpr_lock
; /* just for CPR stuff */
10151 mutex_init(&cpr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
10153 CALLB_CPR_INIT(&cpr_info
, &cpr_lock
,
10154 callb_generic_cpr
, "segvn_async");
10156 if (segvn_update_textrepl_interval
== 0) {
10157 segvn_update_textrepl_interval
= segvn_update_tr_time
* hz
;
10159 segvn_update_textrepl_interval
*= hz
;
10161 (void) timeout(segvn_trupdate_wakeup
, NULL
,
10162 segvn_update_textrepl_interval
);
10165 mutex_enter(&cpr_lock
);
10166 CALLB_CPR_SAFE_BEGIN(&cpr_info
);
10167 mutex_exit(&cpr_lock
);
10168 sema_p(&segvn_trasync_sem
);
10169 mutex_enter(&cpr_lock
);
10170 CALLB_CPR_SAFE_END(&cpr_info
, &cpr_lock
);
10171 mutex_exit(&cpr_lock
);
10176 static uint64_t segvn_lgrp_trthr_migrs_snpsht
= 0;
10179 segvn_trupdate_wakeup(void *dummy
)
10181 uint64_t cur_lgrp_trthr_migrs
= lgrp_get_trthr_migrations();
10183 if (cur_lgrp_trthr_migrs
!= segvn_lgrp_trthr_migrs_snpsht
) {
10184 segvn_lgrp_trthr_migrs_snpsht
= cur_lgrp_trthr_migrs
;
10185 sema_v(&segvn_trasync_sem
);
10188 if (!segvn_disable_textrepl_update
&&
10189 segvn_update_textrepl_interval
!= 0) {
10190 (void) timeout(segvn_trupdate_wakeup
, dummy
,
10191 segvn_update_textrepl_interval
);
10196 segvn_trupdate(void)
10202 ASSERT(svntr_hashtab
!= NULL
);
10204 for (hash
= 0; hash
< svntr_hashtab_sz
; hash
++) {
10205 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
10206 svntrp
= svntr_hashtab
[hash
].tr_head
;
10207 for (; svntrp
!= NULL
; svntrp
= svntrp
->tr_next
) {
10208 ASSERT(svntrp
->tr_refcnt
!= 0);
10209 svd
= svntrp
->tr_svnhead
;
10210 for (; svd
!= NULL
; svd
= svd
->svn_trnext
) {
10211 segvn_trupdate_seg(svd
->seg
, svd
, svntrp
,
10215 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10220 segvn_trupdate_seg(struct seg
*seg
, segvn_data_t
*svd
, svntr_t
*svntrp
,
10227 struct anon_map
*amp
;
10229 ASSERT(svd
->vp
!= NULL
);
10230 ASSERT(svd
->vp
== svntrp
->tr_vp
);
10231 ASSERT(svd
->offset
== svntrp
->tr_off
);
10232 ASSERT(svd
->offset
+ seg
->s_size
== svntrp
->tr_eoff
);
10233 ASSERT(seg
!= NULL
);
10234 ASSERT(svd
->seg
== seg
);
10235 ASSERT(seg
->s_data
== (void *)svd
);
10236 ASSERT(seg
->s_szc
== svntrp
->tr_szc
);
10237 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
10238 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
10239 ASSERT(svd
->amp
!= NULL
);
10240 ASSERT(svd
->tr_policy_info
.mem_policy
== LGRP_MEM_POLICY_NEXT_SEG
);
10241 ASSERT(svd
->tr_policy_info
.mem_lgrpid
!= LGRP_NONE
);
10242 ASSERT(svd
->tr_policy_info
.mem_lgrpid
< NLGRPS_MAX
);
10243 ASSERT(svntrp
->tr_amp
[svd
->tr_policy_info
.mem_lgrpid
] == svd
->amp
);
10244 ASSERT(svntrp
->tr_refcnt
!= 0);
10245 ASSERT(mutex_owned(&svntr_hashtab
[hash
].tr_lock
));
10248 ASSERT(as
!= NULL
&& as
!= &kas
);
10251 ASSERT(p
->p_tr_lgrpid
!= LGRP_NONE
);
10252 lgrp_id
= p
->p_t1_lgrpid
;
10253 if (lgrp_id
== LGRP_NONE
) {
10256 ASSERT(lgrp_id
< NLGRPS_MAX
);
10257 if (svd
->tr_policy_info
.mem_lgrpid
== lgrp_id
) {
10262 * Use tryenter locking since we are locking as/seg and svntr hash
10263 * lock in reverse from syncrounous thread order.
10265 if (!AS_LOCK_TRYENTER(as
, RW_READER
)) {
10266 SEGVN_TR_ADDSTAT(nolock
);
10267 if (segvn_lgrp_trthr_migrs_snpsht
) {
10268 segvn_lgrp_trthr_migrs_snpsht
= 0;
10272 if (!SEGVN_LOCK_TRYENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
)) {
10274 SEGVN_TR_ADDSTAT(nolock
);
10275 if (segvn_lgrp_trthr_migrs_snpsht
) {
10276 segvn_lgrp_trthr_migrs_snpsht
= 0;
10280 size
= seg
->s_size
;
10281 if (svntrp
->tr_amp
[lgrp_id
] == NULL
) {
10282 size_t trmem
= atomic_add_long_nv(&segvn_textrepl_bytes
, size
);
10283 if (trmem
> segvn_textrepl_max_bytes
) {
10284 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10286 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10287 SEGVN_TR_ADDSTAT(normem
);
10290 if (anon_try_resv_zone(size
, NULL
) == 0) {
10291 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10293 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10294 SEGVN_TR_ADDSTAT(noanon
);
10297 amp
= anonmap_alloc(size
, size
, KM_NOSLEEP
);
10299 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10301 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10302 anon_unresv_zone(size
, NULL
);
10303 SEGVN_TR_ADDSTAT(nokmem
);
10306 ASSERT(amp
->refcnt
== 1);
10307 amp
->a_szc
= seg
->s_szc
;
10308 svntrp
->tr_amp
[lgrp_id
] = amp
;
10311 * We don't need to drop the bucket lock but here we give other
10312 * threads a chance. svntr and svd can't be unlinked as long as
10313 * segment lock is held as a writer and AS held as well. After we
10314 * retake bucket lock we'll continue from where we left. We'll be able
10315 * to reach the end of either list since new entries are always added
10316 * to the beginning of the lists.
10318 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10319 hat_unload_callback(as
->a_hat
, seg
->s_base
, size
, 0, NULL
);
10320 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
10322 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
10323 ASSERT(svd
->amp
!= NULL
);
10324 ASSERT(svd
->tr_policy_info
.mem_policy
== LGRP_MEM_POLICY_NEXT_SEG
);
10325 ASSERT(svd
->tr_policy_info
.mem_lgrpid
!= lgrp_id
);
10326 ASSERT(svd
->amp
!= svntrp
->tr_amp
[lgrp_id
]);
10328 svd
->tr_policy_info
.mem_lgrpid
= lgrp_id
;
10329 svd
->amp
= svntrp
->tr_amp
[lgrp_id
];
10330 p
->p_tr_lgrpid
= NLGRPS_MAX
;
10331 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10334 ASSERT(svntrp
->tr_refcnt
!= 0);
10335 ASSERT(svd
->vp
== svntrp
->tr_vp
);
10336 ASSERT(svd
->tr_policy_info
.mem_lgrpid
== lgrp_id
);
10337 ASSERT(svd
->amp
!= NULL
&& svd
->amp
== svntrp
->tr_amp
[lgrp_id
]);
10338 ASSERT(svd
->seg
== seg
);
10339 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
10341 SEGVN_TR_ADDSTAT(asyncrepl
);