4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2015, Joyent, Inc. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 * The Regents of the University of California
35 * University Acknowledgment- Portions of this document are derived from
36 * software developed by the University of California, Berkeley, and its
41 * VM - shared or copy-on-write from a vnode/anonymous memory.
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/t_lock.h>
47 #include <sys/errno.h>
48 #include <sys/systm.h>
50 #include <sys/debug.h>
52 #include <sys/vmsystm.h>
53 #include <sys/tuneable.h>
54 #include <sys/bitmap.h>
57 #include <sys/sysmacros.h>
58 #include <sys/vtrace.h>
59 #include <sys/cmn_err.h>
60 #include <sys/callb.h>
62 #include <sys/dumphdr.h>
68 #include <vm/seg_vn.h>
75 #include <sys/project.h>
77 #include <sys/shm_impl.h>
80 * segvn_fault needs a temporary page list array. To avoid calling kmem all
81 * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if
82 * it can. In the rare case when this page list is not large enough, it
83 * goes and gets a large enough array from kmem.
85 * This small page list array covers either 8 pages or 64kB worth of pages -
86 * whichever is smaller.
88 #define PVN_MAX_GETPAGE_SZ 0x10000
89 #define PVN_MAX_GETPAGE_NUM 0x8
91 #if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE
92 #define PVN_GETPAGE_SZ ptob(PVN_MAX_GETPAGE_NUM)
93 #define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM
95 #define PVN_GETPAGE_SZ PVN_MAX_GETPAGE_SZ
96 #define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ)
100 * Private seg op routines.
102 static int segvn_dup(struct seg
*seg
, struct seg
*newseg
);
103 static int segvn_unmap(struct seg
*seg
, caddr_t addr
, size_t len
);
104 static void segvn_free(struct seg
*seg
);
105 static faultcode_t
segvn_fault(struct hat
*hat
, struct seg
*seg
,
106 caddr_t addr
, size_t len
, enum fault_type type
,
108 static faultcode_t
segvn_faulta(struct seg
*seg
, caddr_t addr
);
109 static int segvn_setprot(struct seg
*seg
, caddr_t addr
,
110 size_t len
, uint_t prot
);
111 static int segvn_checkprot(struct seg
*seg
, caddr_t addr
,
112 size_t len
, uint_t prot
);
113 static int segvn_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
);
114 static size_t segvn_swapout(struct seg
*seg
);
115 static int segvn_sync(struct seg
*seg
, caddr_t addr
, size_t len
,
116 int attr
, uint_t flags
);
117 static size_t segvn_incore(struct seg
*seg
, caddr_t addr
, size_t len
,
119 static int segvn_lockop(struct seg
*seg
, caddr_t addr
, size_t len
,
120 int attr
, int op
, ulong_t
*lockmap
, size_t pos
);
121 static int segvn_getprot(struct seg
*seg
, caddr_t addr
, size_t len
,
123 static u_offset_t
segvn_getoffset(struct seg
*seg
, caddr_t addr
);
124 static int segvn_gettype(struct seg
*seg
, caddr_t addr
);
125 static int segvn_getvp(struct seg
*seg
, caddr_t addr
,
127 static int segvn_advise(struct seg
*seg
, caddr_t addr
, size_t len
,
129 static void segvn_dump(struct seg
*seg
);
130 static int segvn_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
131 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
);
132 static int segvn_setpagesize(struct seg
*seg
, caddr_t addr
, size_t len
,
134 static int segvn_getmemid(struct seg
*seg
, caddr_t addr
,
136 static lgrp_mem_policy_info_t
*segvn_getpolicy(struct seg
*, caddr_t
);
137 static int segvn_capable(struct seg
*seg
, segcapability_t capable
);
138 static int segvn_inherit(struct seg
*, caddr_t
, size_t, uint_t
);
140 struct seg_ops segvn_ops
= {
168 * Common zfod structures, provided as a shorthand for others to use.
170 static segvn_crargs_t zfod_segvn_crargs
=
171 SEGVN_ZFOD_ARGS(PROT_ZFOD
, PROT_ALL
);
172 static segvn_crargs_t kzfod_segvn_crargs
=
173 SEGVN_ZFOD_ARGS(PROT_ZFOD
& ~PROT_USER
,
174 PROT_ALL
& ~PROT_USER
);
175 static segvn_crargs_t stack_noexec_crargs
=
176 SEGVN_ZFOD_ARGS(PROT_ZFOD
& ~PROT_EXEC
, PROT_ALL
);
178 caddr_t zfod_argsp
= (caddr_t
)&zfod_segvn_crargs
; /* user zfod argsp */
179 caddr_t kzfod_argsp
= (caddr_t
)&kzfod_segvn_crargs
; /* kernel zfod argsp */
180 caddr_t stack_exec_argsp
= (caddr_t
)&zfod_segvn_crargs
; /* executable stack */
181 caddr_t stack_noexec_argsp
= (caddr_t
)&stack_noexec_crargs
; /* noexec stack */
183 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
185 size_t segvn_comb_thrshld
= UINT_MAX
; /* patchable -- see 1196681 */
187 size_t segvn_pglock_comb_thrshld
= (1UL << 16); /* 64K */
188 size_t segvn_pglock_comb_balign
= (1UL << 16); /* 64K */
189 uint_t segvn_pglock_comb_bshift
;
190 size_t segvn_pglock_comb_palign
;
192 static int segvn_concat(struct seg
*, struct seg
*, int);
193 static int segvn_extend_prev(struct seg
*, struct seg
*,
194 struct segvn_crargs
*, size_t);
195 static int segvn_extend_next(struct seg
*, struct seg
*,
196 struct segvn_crargs
*, size_t);
197 static void segvn_softunlock(struct seg
*, caddr_t
, size_t, enum seg_rw
);
198 static void segvn_pagelist_rele(page_t
**);
199 static void segvn_setvnode_mpss(vnode_t
*);
200 static void segvn_relocate_pages(page_t
**, page_t
*);
201 static int segvn_full_szcpages(page_t
**, uint_t
, int *, uint_t
*);
202 static int segvn_fill_vp_pages(struct segvn_data
*, vnode_t
*, u_offset_t
,
203 uint_t
, page_t
**, page_t
**, uint_t
*, int *);
204 static faultcode_t
segvn_fault_vnodepages(struct hat
*, struct seg
*, caddr_t
,
205 caddr_t
, enum fault_type
, enum seg_rw
, caddr_t
, caddr_t
, int);
206 static faultcode_t
segvn_fault_anonpages(struct hat
*, struct seg
*, caddr_t
,
207 caddr_t
, enum fault_type
, enum seg_rw
, caddr_t
, caddr_t
, int);
208 static faultcode_t
segvn_faultpage(struct hat
*, struct seg
*, caddr_t
,
209 u_offset_t
, struct vpage
*, page_t
**, uint_t
,
210 enum fault_type
, enum seg_rw
, int);
211 static void segvn_vpage(struct seg
*);
212 static size_t segvn_count_swap_by_vpages(struct seg
*);
214 static void segvn_purge(struct seg
*seg
);
215 static int segvn_reclaim(void *, caddr_t
, size_t, struct page
**,
217 static int shamp_reclaim(void *, caddr_t
, size_t, struct page
**,
220 static int sameprot(struct seg
*, caddr_t
, size_t);
222 static int segvn_demote_range(struct seg
*, caddr_t
, size_t, int, uint_t
);
223 static int segvn_clrszc(struct seg
*);
224 static struct seg
*segvn_split_seg(struct seg
*, caddr_t
);
225 static int segvn_claim_pages(struct seg
*, struct vpage
*, u_offset_t
,
228 static void segvn_hat_rgn_unload_callback(caddr_t
, caddr_t
, caddr_t
,
229 size_t, void *, u_offset_t
);
231 static struct kmem_cache
*segvn_cache
;
232 static struct kmem_cache
**segvn_szc_cache
;
235 static struct segvnvmstats_str
{
236 ulong_t fill_vp_pages
[31];
237 ulong_t fltvnpages
[49];
238 ulong_t fullszcpages
[10];
239 ulong_t relocatepages
[3];
240 ulong_t fltanpages
[17];
242 ulong_t demoterange
[3];
244 #endif /* VM_STATS */
246 #define SDR_RANGE 1 /* demote entire range */
247 #define SDR_END 2 /* demote non aligned ends only */
249 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
251 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
252 ASSERT(lpgaddr >= (seg)->s_base); \
253 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
255 ASSERT(lpgeaddr > lpgaddr); \
256 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
258 lpgeaddr = lpgaddr = (addr); \
264 segvn_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
266 struct segvn_data
*svd
= buf
;
268 rw_init(&svd
->lock
, NULL
, RW_DEFAULT
, NULL
);
269 mutex_init(&svd
->segfree_syncmtx
, NULL
, MUTEX_DEFAULT
, NULL
);
270 svd
->svn_trnext
= svd
->svn_trprev
= NULL
;
276 segvn_cache_destructor(void *buf
, void *cdrarg
)
278 struct segvn_data
*svd
= buf
;
280 rw_destroy(&svd
->lock
);
281 mutex_destroy(&svd
->segfree_syncmtx
);
286 svntr_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
288 bzero(buf
, sizeof (svntr_t
));
293 * Patching this variable to non-zero allows the system to run with
294 * stacks marked as "not executable". It's a bit of a kludge, but is
295 * provided as a tweakable for platforms that export those ABIs
296 * (e.g. sparc V8) that have executable stacks enabled by default.
297 * There are also some restrictions for platforms that don't actually
298 * implement 'noexec' protections.
300 * Once enabled, the system is (therefore) unable to provide a fully
301 * ABI-compliant execution environment, though practically speaking,
302 * most everything works. The exceptions are generally some interpreters
303 * and debuggers that create executable code on the stack and jump
304 * into it (without explicitly mprotecting the address range to include
307 * One important class of applications that are disabled are those
308 * that have been transformed into malicious agents using one of the
309 * numerous "buffer overflow" attacks. See 4007890.
311 int noexec_user_stack
= 0;
312 int noexec_user_stack_log
= 1;
314 int segvn_lpg_disable
= 0;
315 uint_t segvn_maxpgszc
= 0;
317 ulong_t segvn_vmpss_clrszc_cnt
;
318 ulong_t segvn_vmpss_clrszc_err
;
319 ulong_t segvn_fltvnpages_clrszc_cnt
;
320 ulong_t segvn_fltvnpages_clrszc_err
;
321 ulong_t segvn_setpgsz_align_err
;
322 ulong_t segvn_setpgsz_anon_align_err
;
323 ulong_t segvn_setpgsz_getattr_err
;
324 ulong_t segvn_setpgsz_eof_err
;
325 ulong_t segvn_faultvnmpss_align_err1
;
326 ulong_t segvn_faultvnmpss_align_err2
;
327 ulong_t segvn_faultvnmpss_align_err3
;
328 ulong_t segvn_faultvnmpss_align_err4
;
329 ulong_t segvn_faultvnmpss_align_err5
;
330 ulong_t segvn_vmpss_pageio_deadlk_err
;
332 int segvn_use_regions
= 1;
335 * Segvn supports text replication optimization for NUMA platforms. Text
336 * replica's are represented by anon maps (amp). There's one amp per text file
337 * region per lgroup. A process chooses the amp for each of its text mappings
338 * based on the lgroup assignment of its main thread (t_tid = 1). All
339 * processes that want a replica on a particular lgroup for the same text file
340 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
341 * with vp,off,size,szc used as a key. Text replication segments are read only
342 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
343 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
344 * pages. Replication amp is assigned to a segment when it gets its first
345 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
346 * rechecks periodically if the process still maps an amp local to the main
347 * thread. If not async thread forces process to remap to an amp in the new
348 * home lgroup of the main thread. Current text replication implementation
349 * only provides the benefit to workloads that do most of their work in the
350 * main thread of a process or all the threads of a process run in the same
351 * lgroup. To extend text replication benefit to different types of
352 * multithreaded workloads further work would be needed in the hat layer to
353 * allow the same virtual address in the same hat to simultaneously map
354 * different physical addresses (i.e. page table replication would be needed
357 * amp pages are used instead of vnode pages as long as segment has a very
358 * simple life cycle. It's created via segvn_create(), handles S_EXEC
359 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
360 * happens such as protection is changed, real COW fault happens, pagesize is
361 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
362 * text replication by converting the segment back to vnode only segment
363 * (unmap segment's address range and set svd->amp to NULL).
365 * The original file can be changed after amp is inserted into
366 * svntr_hashtab. Processes that are launched after the file is already
367 * changed can't use the replica's created prior to the file change. To
368 * implement this functionality hash entries are timestamped. Replica's can
369 * only be used if current file modification time is the same as the timestamp
370 * saved when hash entry was created. However just timestamps alone are not
371 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
372 * deal with file changes via MAP_SHARED mappings differently. When writable
373 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
374 * existing replica's for this vnode as not usable for future text
375 * mappings. And we don't create new replica's for files that currently have
376 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
380 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
381 size_t segvn_textrepl_max_bytes_factor
= SEGVN_TEXTREPL_MAXBYTES_FACTOR
;
383 static ulong_t svntr_hashtab_sz
= 512;
384 static svntr_bucket_t
*svntr_hashtab
= NULL
;
385 static struct kmem_cache
*svntr_cache
;
386 static svntr_stats_t
*segvn_textrepl_stats
;
387 static ksema_t segvn_trasync_sem
;
389 int segvn_disable_textrepl
= 1;
390 size_t textrepl_size_thresh
= (size_t)-1;
391 size_t segvn_textrepl_bytes
= 0;
392 size_t segvn_textrepl_max_bytes
= 0;
393 clock_t segvn_update_textrepl_interval
= 0;
394 int segvn_update_tr_time
= 10;
395 int segvn_disable_textrepl_update
= 0;
397 static void segvn_textrepl(struct seg
*);
398 static void segvn_textunrepl(struct seg
*, int);
399 static void segvn_inval_trcache(vnode_t
*);
400 static void segvn_trasync_thread(void);
401 static void segvn_trupdate_wakeup(void *);
402 static void segvn_trupdate(void);
403 static void segvn_trupdate_seg(struct seg
*, segvn_data_t
*, svntr_t
*,
407 * Initialize segvn data structures
416 segvn_cache
= kmem_cache_create("segvn_cache",
417 sizeof (struct segvn_data
), 0,
418 segvn_cache_constructor
, segvn_cache_destructor
, NULL
,
421 if (segvn_lpg_disable
== 0) {
422 szc
= maxszc
= page_num_pagesizes() - 1;
424 segvn_lpg_disable
= 1;
426 if (page_get_pagesize(0) != PAGESIZE
) {
427 panic("segvn_init: bad szc 0");
431 pgsz
= page_get_pagesize(szc
);
432 if (pgsz
<= PAGESIZE
|| !IS_P2ALIGNED(pgsz
, pgsz
)) {
433 panic("segvn_init: bad szc %d", szc
);
438 if (segvn_maxpgszc
== 0 || segvn_maxpgszc
> maxszc
)
439 segvn_maxpgszc
= maxszc
;
442 if (segvn_maxpgszc
) {
443 segvn_szc_cache
= (struct kmem_cache
**)kmem_alloc(
444 (segvn_maxpgszc
+ 1) * sizeof (struct kmem_cache
*),
448 for (szc
= 1; szc
<= segvn_maxpgszc
; szc
++) {
451 (void) sprintf(str
, "segvn_szc_cache%d", szc
);
452 segvn_szc_cache
[szc
] = kmem_cache_create(str
,
453 page_get_pagecnt(szc
) * sizeof (page_t
*), 0,
454 NULL
, NULL
, NULL
, NULL
, NULL
, KMC_NODEBUG
);
458 if (segvn_use_regions
&& !hat_supported(HAT_SHARED_REGIONS
, NULL
))
459 segvn_use_regions
= 0;
462 * For now shared regions and text replication segvn support
463 * are mutually exclusive. This is acceptable because
464 * currently significant benefit from text replication was
465 * only observed on AMD64 NUMA platforms (due to relatively
466 * small L2$ size) and currently we don't support shared
469 if (segvn_use_regions
&& !segvn_disable_textrepl
) {
470 segvn_disable_textrepl
= 1;
474 if (lgrp_optimizations() && textrepl_size_thresh
!= (size_t)-1 &&
475 !segvn_disable_textrepl
) {
477 size_t hsz
= svntr_hashtab_sz
* sizeof (svntr_bucket_t
);
479 svntr_cache
= kmem_cache_create("svntr_cache",
480 sizeof (svntr_t
), 0, svntr_cache_constructor
, NULL
,
481 NULL
, NULL
, NULL
, 0);
482 svntr_hashtab
= kmem_zalloc(hsz
, KM_SLEEP
);
483 for (i
= 0; i
< svntr_hashtab_sz
; i
++) {
484 mutex_init(&svntr_hashtab
[i
].tr_lock
, NULL
,
485 MUTEX_DEFAULT
, NULL
);
487 segvn_textrepl_max_bytes
= ptob(physmem
) /
488 segvn_textrepl_max_bytes_factor
;
489 segvn_textrepl_stats
= kmem_zalloc(NCPU
*
490 sizeof (svntr_stats_t
), KM_SLEEP
);
491 sema_init(&segvn_trasync_sem
, 0, NULL
, SEMA_DEFAULT
, NULL
);
492 (void) thread_create(NULL
, 0, segvn_trasync_thread
,
493 NULL
, 0, &p0
, TS_RUN
, minclsyspri
);
497 if (!ISP2(segvn_pglock_comb_balign
) ||
498 segvn_pglock_comb_balign
< PAGESIZE
) {
499 segvn_pglock_comb_balign
= 1UL << 16; /* 64K */
501 segvn_pglock_comb_bshift
= highbit(segvn_pglock_comb_balign
) - 1;
502 segvn_pglock_comb_palign
= btop(segvn_pglock_comb_balign
);
505 #define SEGVN_PAGEIO ((void *)0x1)
506 #define SEGVN_NOPAGEIO ((void *)0x2)
509 segvn_setvnode_mpss(vnode_t
*vp
)
513 ASSERT(vp
->v_mpssdata
== NULL
||
514 vp
->v_mpssdata
== SEGVN_PAGEIO
||
515 vp
->v_mpssdata
== SEGVN_NOPAGEIO
);
517 if (vp
->v_mpssdata
== NULL
) {
518 if (vn_vmpss_usepageio(vp
)) {
519 err
= VOP_PAGEIO(vp
, (page_t
*)NULL
,
520 (u_offset_t
)0, 0, 0, CRED(), NULL
);
525 * set v_mpssdata just once per vnode life
526 * so that it never changes.
528 mutex_enter(&vp
->v_lock
);
529 if (vp
->v_mpssdata
== NULL
) {
531 vp
->v_mpssdata
= SEGVN_PAGEIO
;
533 vp
->v_mpssdata
= SEGVN_NOPAGEIO
;
536 mutex_exit(&vp
->v_lock
);
541 segvn_create(struct seg
*seg
, void *argsp
)
543 struct segvn_crargs
*a
= (struct segvn_crargs
*)argsp
;
544 struct segvn_data
*svd
;
547 struct anon_map
*amp
;
550 lgrp_mem_policy_t mpolicy
= LGRP_MEM_POLICY_DEFAULT
;
554 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
556 if (a
->type
!= MAP_PRIVATE
&& a
->type
!= MAP_SHARED
) {
557 panic("segvn_create type");
562 * Check arguments. If a shared anon structure is given then
563 * it is illegal to also specify a vp.
565 if (a
->amp
!= NULL
&& a
->vp
!= NULL
) {
566 panic("segvn_create anon_map");
570 if (a
->type
== MAP_PRIVATE
&& (a
->flags
& MAP_TEXT
) &&
571 a
->vp
!= NULL
&& a
->prot
== (PROT_USER
| PROT_READ
| PROT_EXEC
) &&
576 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
577 if (a
->type
== MAP_SHARED
)
578 a
->flags
&= ~MAP_NORESERVE
;
581 if (segvn_lpg_disable
!= 0 || (a
->szc
== AS_MAP_NO_LPOOB
) ||
582 (a
->amp
!= NULL
&& a
->type
== MAP_PRIVATE
) ||
583 (a
->flags
& MAP_NORESERVE
) || seg
->s_as
== &kas
) {
586 if (a
->szc
> segvn_maxpgszc
)
587 a
->szc
= segvn_maxpgszc
;
588 pgsz
= page_get_pagesize(a
->szc
);
589 if (!IS_P2ALIGNED(seg
->s_base
, pgsz
) ||
590 !IS_P2ALIGNED(seg
->s_size
, pgsz
)) {
592 } else if (a
->vp
!= NULL
) {
593 if (IS_SWAPFSVP(a
->vp
) || VN_ISKAS(a
->vp
)) {
596 * hat_page_demote() is not supported
600 } else if (map_addr_vacalign_check(seg
->s_base
,
601 a
->offset
& PAGEMASK
)) {
604 } else if (a
->amp
!= NULL
) {
605 pgcnt_t anum
= btopr(a
->offset
);
606 pgcnt_t pgcnt
= page_get_pagecnt(a
->szc
);
607 if (!IS_P2ALIGNED(anum
, pgcnt
)) {
615 * If segment may need private pages, reserve them now.
617 if (!(a
->flags
& MAP_NORESERVE
) && ((a
->vp
== NULL
&& a
->amp
== NULL
) ||
618 (a
->type
== MAP_PRIVATE
&& (a
->prot
& PROT_WRITE
)))) {
619 if (anon_resv_zone(seg
->s_size
,
620 seg
->s_as
->a_proc
->p_zone
) == 0)
622 swresv
= seg
->s_size
;
623 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
, "anon proc:%p %lu %u",
628 * Reserve any mapping structures that may be required.
630 * Don't do it for segments that may use regions. It's currently a
631 * noop in the hat implementations anyway.
634 hat_map(seg
->s_as
->a_hat
, seg
->s_base
, seg
->s_size
, HAT_MAP
);
641 crhold(cred
= CRED());
644 /* Inform the vnode of the new mapping */
646 error
= VOP_ADDMAP(a
->vp
, a
->offset
& PAGEMASK
,
647 seg
->s_as
, seg
->s_base
, seg
->s_size
, a
->prot
,
648 a
->maxprot
, a
->type
, cred
, NULL
);
651 anon_unresv_zone(swresv
,
652 seg
->s_as
->a_proc
->p_zone
);
653 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
,
654 "anon proc:%p %lu %u", seg
, swresv
, 0);
658 hat_unload(seg
->s_as
->a_hat
, seg
->s_base
,
659 seg
->s_size
, HAT_UNLOAD_UNMAP
);
664 * svntr_hashtab will be NULL if we support shared regions.
666 trok
= ((a
->flags
& MAP_TEXT
) &&
667 (seg
->s_size
> textrepl_size_thresh
||
668 (a
->flags
& _MAP_TEXTREPL
)) &&
669 lgrp_optimizations() && svntr_hashtab
!= NULL
&&
670 a
->type
== MAP_PRIVATE
&& swresv
== 0 &&
671 !(a
->flags
& MAP_NORESERVE
) &&
672 seg
->s_as
!= &kas
&& a
->vp
->v_type
== VREG
);
674 ASSERT(!trok
|| !use_rgn
);
678 * MAP_NORESERVE mappings don't count towards the VSZ of a process
679 * until we fault the pages in.
681 if ((a
->vp
== NULL
|| a
->vp
->v_type
!= VREG
) &&
682 a
->flags
& MAP_NORESERVE
) {
683 seg
->s_as
->a_resvsize
-= seg
->s_size
;
687 * If more than one segment in the address space, and they're adjacent
688 * virtually, try to concatenate them. Don't concatenate if an
689 * explicit anon_map structure was supplied (e.g., SystemV shared
690 * memory) or if we'll use text replication for this segment.
692 if (a
->amp
== NULL
&& !use_rgn
&& !trok
) {
693 struct seg
*pseg
, *nseg
;
694 struct segvn_data
*psvd
, *nsvd
;
695 lgrp_mem_policy_t ppolicy
, npolicy
;
696 uint_t lgrp_mem_policy_flags
= 0;
697 extern lgrp_mem_policy_t lgrp_mem_default_policy
;
700 * Memory policy flags (lgrp_mem_policy_flags) is valid when
701 * extending stack/heap segments.
703 if ((a
->vp
== NULL
) && (a
->type
== MAP_PRIVATE
) &&
704 !(a
->flags
& MAP_NORESERVE
) && (seg
->s_as
!= &kas
)) {
705 lgrp_mem_policy_flags
= a
->lgrp_mem_policy_flags
;
708 * Get policy when not extending it from another segment
710 mpolicy
= lgrp_mem_policy_default(seg
->s_size
, a
->type
);
714 * First, try to concatenate the previous and new segments
716 pseg
= AS_SEGPREV(seg
->s_as
, seg
);
718 pseg
->s_base
+ pseg
->s_size
== seg
->s_base
&&
719 pseg
->s_ops
== &segvn_ops
) {
721 * Get memory allocation policy from previous segment.
722 * When extension is specified (e.g. for heap) apply
723 * this policy to the new segment regardless of the
724 * outcome of segment concatenation. Extension occurs
725 * for non-default policy otherwise default policy is
726 * used and is based on extended segment size.
728 psvd
= (struct segvn_data
*)pseg
->s_data
;
729 ppolicy
= psvd
->policy_info
.mem_policy
;
730 if (lgrp_mem_policy_flags
==
731 LGRP_MP_FLAG_EXTEND_UP
) {
732 if (ppolicy
!= lgrp_mem_default_policy
) {
735 mpolicy
= lgrp_mem_policy_default(
736 pseg
->s_size
+ seg
->s_size
,
741 if (mpolicy
== ppolicy
&&
742 (pseg
->s_size
+ seg
->s_size
<=
743 segvn_comb_thrshld
|| psvd
->amp
== NULL
) &&
744 segvn_extend_prev(pseg
, seg
, a
, swresv
) == 0) {
746 * success! now try to concatenate
750 nseg
= AS_SEGNEXT(pseg
->s_as
, pseg
);
753 nseg
->s_ops
== &segvn_ops
&&
754 pseg
->s_base
+ pseg
->s_size
==
756 (void) segvn_concat(pseg
, nseg
, 0);
757 ASSERT(pseg
->s_szc
== 0 ||
758 (a
->szc
== pseg
->s_szc
&&
759 IS_P2ALIGNED(pseg
->s_base
, pgsz
) &&
760 IS_P2ALIGNED(pseg
->s_size
, pgsz
)));
766 * Failed, so try to concatenate with following seg
768 nseg
= AS_SEGNEXT(seg
->s_as
, seg
);
770 seg
->s_base
+ seg
->s_size
== nseg
->s_base
&&
771 nseg
->s_ops
== &segvn_ops
) {
773 * Get memory allocation policy from next segment.
774 * When extension is specified (e.g. for stack) apply
775 * this policy to the new segment regardless of the
776 * outcome of segment concatenation. Extension occurs
777 * for non-default policy otherwise default policy is
778 * used and is based on extended segment size.
780 nsvd
= (struct segvn_data
*)nseg
->s_data
;
781 npolicy
= nsvd
->policy_info
.mem_policy
;
782 if (lgrp_mem_policy_flags
==
783 LGRP_MP_FLAG_EXTEND_DOWN
) {
784 if (npolicy
!= lgrp_mem_default_policy
) {
787 mpolicy
= lgrp_mem_policy_default(
788 nseg
->s_size
+ seg
->s_size
,
793 if (mpolicy
== npolicy
&&
794 segvn_extend_next(seg
, nseg
, a
, swresv
) == 0) {
796 ASSERT(nseg
->s_szc
== 0 ||
797 (a
->szc
== nseg
->s_szc
&&
798 IS_P2ALIGNED(nseg
->s_base
, pgsz
) &&
799 IS_P2ALIGNED(nseg
->s_size
, pgsz
)));
807 if (a
->type
== MAP_SHARED
)
808 lgrp_shm_policy_init(NULL
, a
->vp
);
810 svd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
812 seg
->s_ops
= &segvn_ops
;
813 seg
->s_data
= (void *)svd
;
819 * Anonymous mappings have no backing file so the offset is meaningless.
821 svd
->offset
= a
->vp
? (a
->offset
& PAGEMASK
) : 0;
823 svd
->maxprot
= a
->maxprot
;
828 svd
->advice
= MADV_NORMAL
;
830 svd
->flags
= (ushort_t
)a
->flags
;
831 svd
->softlockcnt
= 0;
832 svd
->softlockcnt_sbase
= 0;
833 svd
->softlockcnt_send
= 0;
835 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
838 if (a
->szc
!= 0 && a
->vp
!= NULL
) {
839 segvn_setvnode_mpss(a
->vp
);
841 if (svd
->type
== MAP_SHARED
&& svd
->vp
!= NULL
&&
842 (svd
->vp
->v_flag
& VVMEXEC
) && (svd
->prot
& PROT_WRITE
)) {
843 ASSERT(vn_is_mapped(svd
->vp
, V_WRITE
));
844 segvn_inval_trcache(svd
->vp
);
848 if ((svd
->amp
= amp
) == NULL
) {
850 if (svd
->type
== MAP_SHARED
) {
853 * Shared mappings to a vp need no other setup.
854 * If we have a shared mapping to an anon_map object
855 * which hasn't been allocated yet, allocate the
856 * struct now so that it will be properly shared
857 * by remembering the swap reservation there.
860 svd
->amp
= anonmap_alloc(seg
->s_size
, swresv
,
862 svd
->amp
->a_szc
= seg
->s_szc
;
866 * Private mapping (with or without a vp).
867 * Allocate anon_map when needed.
869 svd
->swresv
= swresv
;
875 * Mapping to an existing anon_map structure without a vp.
876 * For now we will insure that the segment size isn't larger
877 * than the size - offset gives us. Later on we may wish to
878 * have the anon array dynamically allocated itself so that
879 * we don't always have to allocate all the anon pointer slots.
880 * This of course involves adding extra code to check that we
881 * aren't trying to use an anon pointer slot beyond the end
882 * of the currently allocated anon array.
884 if ((amp
->size
- a
->offset
) < seg
->s_size
) {
885 panic("segvn_create anon_map size");
889 anon_num
= btopr(a
->offset
);
891 if (a
->type
== MAP_SHARED
) {
893 * SHARED mapping to a given anon_map.
895 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
897 if (a
->szc
> amp
->a_szc
) {
900 ANON_LOCK_EXIT(&
->a_rwlock
);
901 svd
->anon_index
= anon_num
;
905 * PRIVATE mapping to a given anon_map.
906 * Make sure that all the needed anon
907 * structures are created (so that we will
908 * share the underlying pages if nothing
909 * is written by this mapping) and then
910 * duplicate the anon array as is done
911 * when a privately mapped segment is dup'ed.
917 int hat_flag
= HAT_LOAD
;
919 if (svd
->flags
& MAP_TEXT
) {
920 hat_flag
|= HAT_LOAD_TEXT
;
923 svd
->amp
= anonmap_alloc(seg
->s_size
, 0, ANON_SLEEP
);
924 svd
->amp
->a_szc
= seg
->s_szc
;
926 svd
->swresv
= swresv
;
929 * Prevent 2 threads from allocating anon
930 * slots simultaneously.
932 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
933 eaddr
= seg
->s_base
+ seg
->s_size
;
935 for (anon_idx
= anon_num
, addr
= seg
->s_base
;
936 addr
< eaddr
; addr
+= PAGESIZE
, anon_idx
++) {
939 if ((ap
= anon_get_ptr(amp
->ahp
,
944 * Allocate the anon struct now.
945 * Might as well load up translation
946 * to the page while we're at it...
948 pp
= anon_zero(seg
, addr
, &ap
, cred
);
949 if (ap
== NULL
|| pp
== NULL
) {
950 panic("segvn_create anon_zero");
955 * Re-acquire the anon_map lock and
956 * initialize the anon array entry.
958 ASSERT(anon_get_ptr(amp
->ahp
,
960 (void) anon_set_ptr(amp
->ahp
, anon_idx
, ap
,
963 ASSERT(seg
->s_szc
== 0);
964 ASSERT(!IS_VMODSORT(pp
->p_vnode
));
966 ASSERT(use_rgn
== 0);
967 hat_memload(seg
->s_as
->a_hat
, addr
, pp
,
968 svd
->prot
& ~PROT_WRITE
, hat_flag
);
972 ASSERT(seg
->s_szc
== 0);
973 anon_dup(amp
->ahp
, anon_num
, svd
->amp
->ahp
,
975 ANON_LOCK_EXIT(&
->a_rwlock
);
980 * Set default memory allocation policy for segment
982 * Always set policy for private memory at least for initialization
983 * even if this is a shared memory segment
985 (void) lgrp_privm_policy_set(mpolicy
, &svd
->policy_info
, seg
->s_size
);
987 if (svd
->type
== MAP_SHARED
)
988 (void) lgrp_shm_policy_set(mpolicy
, svd
->amp
, svd
->anon_index
,
989 svd
->vp
, svd
->offset
, seg
->s_size
);
993 ASSERT(svd
->amp
== NULL
);
994 svd
->rcookie
= hat_join_region(seg
->s_as
->a_hat
, seg
->s_base
,
995 seg
->s_size
, (void *)svd
->vp
, svd
->offset
, svd
->prot
,
996 (uchar_t
)seg
->s_szc
, segvn_hat_rgn_unload_callback
,
1000 ASSERT(!trok
|| !(svd
->prot
& PROT_WRITE
));
1001 svd
->tr_state
= trok
? SEGVN_TR_INIT
: SEGVN_TR_OFF
;
1007 * Concatenate two existing segments, if possible.
1008 * Return 0 on success, -1 if two segments are not compatible
1009 * or -2 on memory allocation failure.
1010 * If amp_cat == 1 then try and concat segments with anon maps
1013 segvn_concat(struct seg
*seg1
, struct seg
*seg2
, int amp_cat
)
1015 struct segvn_data
*svd1
= seg1
->s_data
;
1016 struct segvn_data
*svd2
= seg2
->s_data
;
1017 struct anon_map
*amp1
= svd1
->amp
;
1018 struct anon_map
*amp2
= svd2
->amp
;
1019 struct vpage
*vpage1
= svd1
->vpage
;
1020 struct vpage
*vpage2
= svd2
->vpage
, *nvpage
= NULL
;
1021 size_t size
, nvpsize
;
1022 pgcnt_t npages1
, npages2
;
1024 ASSERT(seg1
->s_as
&& seg2
->s_as
&& seg1
->s_as
== seg2
->s_as
);
1025 ASSERT(AS_WRITE_HELD(seg1
->s_as
));
1026 ASSERT(seg1
->s_ops
== seg2
->s_ops
);
1028 if (HAT_IS_REGION_COOKIE_VALID(svd1
->rcookie
) ||
1029 HAT_IS_REGION_COOKIE_VALID(svd2
->rcookie
)) {
1033 /* both segments exist, try to merge them */
1034 #define incompat(x) (svd1->x != svd2->x)
1035 if (incompat(vp
) || incompat(maxprot
) ||
1036 (!svd1
->pageadvice
&& !svd2
->pageadvice
&& incompat(advice
)) ||
1037 (!svd1
->pageprot
&& !svd2
->pageprot
&& incompat(prot
)) ||
1038 incompat(type
) || incompat(cred
) || incompat(flags
) ||
1039 seg1
->s_szc
!= seg2
->s_szc
|| incompat(policy_info
.mem_policy
) ||
1040 (svd2
->softlockcnt
> 0) || svd1
->softlockcnt_send
> 0)
1045 * vp == NULL implies zfod, offset doesn't matter
1047 if (svd1
->vp
!= NULL
&&
1048 svd1
->offset
+ seg1
->s_size
!= svd2
->offset
) {
1053 * Don't concatenate if either segment uses text replication.
1055 if (svd1
->tr_state
!= SEGVN_TR_OFF
|| svd2
->tr_state
!= SEGVN_TR_OFF
) {
1060 * Fail early if we're not supposed to concatenate
1061 * segments with non NULL amp.
1063 if (amp_cat
== 0 && (amp1
!= NULL
|| amp2
!= NULL
)) {
1067 if (svd1
->vp
== NULL
&& svd1
->type
== MAP_SHARED
) {
1071 if (amp1
!= NULL
&& svd1
->anon_index
+ btop(seg1
->s_size
) !=
1075 ASSERT(amp1
== NULL
|| amp1
->refcnt
>= 2);
1079 * If either seg has vpages, create a new merged vpage array.
1081 if (vpage1
!= NULL
|| vpage2
!= NULL
) {
1082 struct vpage
*vp
, *evp
;
1084 npages1
= seg_pages(seg1
);
1085 npages2
= seg_pages(seg2
);
1086 nvpsize
= vpgtob(npages1
+ npages2
);
1088 if ((nvpage
= kmem_zalloc(nvpsize
, KM_NOSLEEP
)) == NULL
) {
1092 if (vpage1
!= NULL
) {
1093 bcopy(vpage1
, nvpage
, vpgtob(npages1
));
1095 evp
= nvpage
+ npages1
;
1096 for (vp
= nvpage
; vp
< evp
; vp
++) {
1097 VPP_SETPROT(vp
, svd1
->prot
);
1098 VPP_SETADVICE(vp
, svd1
->advice
);
1102 if (vpage2
!= NULL
) {
1103 bcopy(vpage2
, nvpage
+ npages1
, vpgtob(npages2
));
1105 evp
= nvpage
+ npages1
+ npages2
;
1106 for (vp
= nvpage
+ npages1
; vp
< evp
; vp
++) {
1107 VPP_SETPROT(vp
, svd2
->prot
);
1108 VPP_SETADVICE(vp
, svd2
->advice
);
1112 if (svd2
->pageswap
&& (!svd1
->pageswap
&& svd1
->swresv
)) {
1113 ASSERT(svd1
->swresv
== seg1
->s_size
);
1114 ASSERT(!(svd1
->flags
& MAP_NORESERVE
));
1115 ASSERT(!(svd2
->flags
& MAP_NORESERVE
));
1116 evp
= nvpage
+ npages1
;
1117 for (vp
= nvpage
; vp
< evp
; vp
++) {
1122 if (svd1
->pageswap
&& (!svd2
->pageswap
&& svd2
->swresv
)) {
1123 ASSERT(svd2
->swresv
== seg2
->s_size
);
1124 ASSERT(!(svd1
->flags
& MAP_NORESERVE
));
1125 ASSERT(!(svd2
->flags
& MAP_NORESERVE
));
1126 vp
= nvpage
+ npages1
;
1128 for (; vp
< evp
; vp
++) {
1133 ASSERT((vpage1
!= NULL
|| vpage2
!= NULL
) ||
1134 (svd1
->pageswap
== 0 && svd2
->pageswap
== 0));
1137 * If either segment has private pages, create a new merged anon
1138 * array. If mergeing shared anon segments just decrement anon map's
1141 if (amp1
!= NULL
&& svd1
->type
== MAP_SHARED
) {
1142 ASSERT(amp1
== amp2
&& svd1
->vp
== NULL
);
1143 ANON_LOCK_ENTER(&1
->a_rwlock
, RW_WRITER
);
1144 ASSERT(amp1
->refcnt
>= 2);
1146 ANON_LOCK_EXIT(&1
->a_rwlock
);
1148 } else if (amp1
!= NULL
|| amp2
!= NULL
) {
1149 struct anon_hdr
*nahp
;
1150 struct anon_map
*namp
= NULL
;
1153 ASSERT(svd1
->type
== MAP_PRIVATE
);
1155 asize
= seg1
->s_size
+ seg2
->s_size
;
1156 if ((nahp
= anon_create(btop(asize
), ANON_NOSLEEP
)) == NULL
) {
1157 if (nvpage
!= NULL
) {
1158 kmem_free(nvpage
, nvpsize
);
1164 * XXX anon rwlock is not really needed because
1165 * this is a private segment and we are writers.
1167 ANON_LOCK_ENTER(&1
->a_rwlock
, RW_WRITER
);
1168 ASSERT(amp1
->refcnt
== 1);
1169 if (anon_copy_ptr(amp1
->ahp
, svd1
->anon_index
,
1170 nahp
, 0, btop(seg1
->s_size
), ANON_NOSLEEP
)) {
1171 anon_release(nahp
, btop(asize
));
1172 ANON_LOCK_EXIT(&1
->a_rwlock
);
1173 if (nvpage
!= NULL
) {
1174 kmem_free(nvpage
, nvpsize
);
1180 ANON_LOCK_ENTER(&2
->a_rwlock
, RW_WRITER
);
1181 ASSERT(amp2
->refcnt
== 1);
1182 if (anon_copy_ptr(amp2
->ahp
, svd2
->anon_index
,
1183 nahp
, btop(seg1
->s_size
), btop(seg2
->s_size
),
1185 anon_release(nahp
, btop(asize
));
1186 ANON_LOCK_EXIT(&2
->a_rwlock
);
1188 ANON_LOCK_EXIT(&1
->a_rwlock
);
1190 if (nvpage
!= NULL
) {
1191 kmem_free(nvpage
, nvpsize
);
1198 anon_release(amp1
->ahp
, btop(amp1
->size
));
1202 ASSERT(amp1
== NULL
);
1204 anon_release(amp2
->ahp
, btop(amp2
->size
));
1207 ANON_LOCK_EXIT(&2
->a_rwlock
);
1210 svd2
->amp
= NULL
; /* needed for seg_free */
1215 svd1
->anon_index
= 0;
1216 ANON_LOCK_EXIT(&namp
->a_rwlock
);
1219 * Now free the old vpage structures.
1221 if (nvpage
!= NULL
) {
1222 if (vpage1
!= NULL
) {
1223 kmem_free(vpage1
, vpgtob(npages1
));
1225 if (vpage2
!= NULL
) {
1227 kmem_free(vpage2
, vpgtob(npages2
));
1229 if (svd2
->pageprot
) {
1232 if (svd2
->pageadvice
) {
1233 svd1
->pageadvice
= 1;
1235 if (svd2
->pageswap
) {
1238 svd1
->vpage
= nvpage
;
1241 /* all looks ok, merge segments */
1242 svd1
->swresv
+= svd2
->swresv
;
1243 svd2
->swresv
= 0; /* so seg_free doesn't release swap space */
1244 size
= seg2
->s_size
;
1246 seg1
->s_size
+= size
;
1251 * Extend the previous segment (seg1) to include the
1252 * new segment (seg2 + a), if possible.
1253 * Return 0 on success.
1256 segvn_extend_prev(seg1
, seg2
, a
, swresv
)
1257 struct seg
*seg1
, *seg2
;
1258 struct segvn_crargs
*a
;
1261 struct segvn_data
*svd1
= (struct segvn_data
*)seg1
->s_data
;
1263 struct anon_map
*amp1
;
1264 struct vpage
*new_vpage
;
1267 * We don't need any segment level locks for "segvn" data
1268 * since the address space is "write" locked.
1270 ASSERT(seg1
->s_as
&& AS_WRITE_HELD(seg1
->s_as
));
1272 if (HAT_IS_REGION_COOKIE_VALID(svd1
->rcookie
)) {
1276 /* second segment is new, try to extend first */
1277 /* XXX - should also check cred */
1278 if (svd1
->vp
!= a
->vp
|| svd1
->maxprot
!= a
->maxprot
||
1279 (!svd1
->pageprot
&& (svd1
->prot
!= a
->prot
)) ||
1280 svd1
->type
!= a
->type
|| svd1
->flags
!= a
->flags
||
1281 seg1
->s_szc
!= a
->szc
|| svd1
->softlockcnt_send
> 0)
1284 /* vp == NULL implies zfod, offset doesn't matter */
1285 if (svd1
->vp
!= NULL
&&
1286 svd1
->offset
+ seg1
->s_size
!= (a
->offset
& PAGEMASK
))
1289 if (svd1
->tr_state
!= SEGVN_TR_OFF
) {
1298 * Segment has private pages, can data structures
1301 * Acquire the anon_map lock to prevent it from changing,
1302 * if it is shared. This ensures that the anon_map
1303 * will not change while a thread which has a read/write
1304 * lock on an address space references it.
1305 * XXX - Don't need the anon_map lock at all if "refcnt"
1308 * Can't grow a MAP_SHARED segment with an anonmap because
1309 * there may be existing anon slots where we want to extend
1310 * the segment and we wouldn't know what to do with them
1311 * (e.g., for tmpfs right thing is to just leave them there,
1312 * for /dev/zero they should be cleared out).
1314 if (svd1
->type
== MAP_SHARED
)
1317 ANON_LOCK_ENTER(&1
->a_rwlock
, RW_WRITER
);
1318 if (amp1
->refcnt
> 1) {
1319 ANON_LOCK_EXIT(&1
->a_rwlock
);
1322 newpgs
= anon_grow(amp1
->ahp
, &svd1
->anon_index
,
1323 btop(seg1
->s_size
), btop(seg2
->s_size
), ANON_NOSLEEP
);
1326 ANON_LOCK_EXIT(&1
->a_rwlock
);
1329 amp1
->size
= ptob(newpgs
);
1330 ANON_LOCK_EXIT(&1
->a_rwlock
);
1332 if (svd1
->vpage
!= NULL
) {
1333 struct vpage
*vp
, *evp
;
1335 kmem_zalloc(vpgtob(seg_pages(seg1
) + seg_pages(seg2
)),
1337 if (new_vpage
== NULL
)
1339 bcopy(svd1
->vpage
, new_vpage
, vpgtob(seg_pages(seg1
)));
1340 kmem_free(svd1
->vpage
, vpgtob(seg_pages(seg1
)));
1341 svd1
->vpage
= new_vpage
;
1343 vp
= new_vpage
+ seg_pages(seg1
);
1344 evp
= vp
+ seg_pages(seg2
);
1345 for (; vp
< evp
; vp
++)
1346 VPP_SETPROT(vp
, a
->prot
);
1347 if (svd1
->pageswap
&& swresv
) {
1348 ASSERT(!(svd1
->flags
& MAP_NORESERVE
));
1349 ASSERT(swresv
== seg2
->s_size
);
1350 vp
= new_vpage
+ seg_pages(seg1
);
1351 for (; vp
< evp
; vp
++) {
1356 ASSERT(svd1
->vpage
!= NULL
|| svd1
->pageswap
== 0);
1357 size
= seg2
->s_size
;
1359 seg1
->s_size
+= size
;
1360 svd1
->swresv
+= swresv
;
1361 if (svd1
->pageprot
&& (a
->prot
& PROT_WRITE
) &&
1362 svd1
->type
== MAP_SHARED
&& svd1
->vp
!= NULL
&&
1363 (svd1
->vp
->v_flag
& VVMEXEC
)) {
1364 ASSERT(vn_is_mapped(svd1
->vp
, V_WRITE
));
1365 segvn_inval_trcache(svd1
->vp
);
1371 * Extend the next segment (seg2) to include the
1372 * new segment (seg1 + a), if possible.
1373 * Return 0 on success.
1379 struct segvn_crargs
*a
,
1382 struct segvn_data
*svd2
= (struct segvn_data
*)seg2
->s_data
;
1384 struct anon_map
*amp2
;
1385 struct vpage
*new_vpage
;
1388 * We don't need any segment level locks for "segvn" data
1389 * since the address space is "write" locked.
1391 ASSERT(seg2
->s_as
&& AS_WRITE_HELD(seg2
->s_as
));
1393 if (HAT_IS_REGION_COOKIE_VALID(svd2
->rcookie
)) {
1397 /* first segment is new, try to extend second */
1398 /* XXX - should also check cred */
1399 if (svd2
->vp
!= a
->vp
|| svd2
->maxprot
!= a
->maxprot
||
1400 (!svd2
->pageprot
&& (svd2
->prot
!= a
->prot
)) ||
1401 svd2
->type
!= a
->type
|| svd2
->flags
!= a
->flags
||
1402 seg2
->s_szc
!= a
->szc
|| svd2
->softlockcnt_sbase
> 0)
1404 /* vp == NULL implies zfod, offset doesn't matter */
1405 if (svd2
->vp
!= NULL
&&
1406 (a
->offset
& PAGEMASK
) + seg1
->s_size
!= svd2
->offset
)
1409 if (svd2
->tr_state
!= SEGVN_TR_OFF
) {
1418 * Segment has private pages, can data structures
1421 * Acquire the anon_map lock to prevent it from changing,
1422 * if it is shared. This ensures that the anon_map
1423 * will not change while a thread which has a read/write
1424 * lock on an address space references it.
1426 * XXX - Don't need the anon_map lock at all if "refcnt"
1429 if (svd2
->type
== MAP_SHARED
)
1432 ANON_LOCK_ENTER(&2
->a_rwlock
, RW_WRITER
);
1433 if (amp2
->refcnt
> 1) {
1434 ANON_LOCK_EXIT(&2
->a_rwlock
);
1437 newpgs
= anon_grow(amp2
->ahp
, &svd2
->anon_index
,
1438 btop(seg2
->s_size
), btop(seg1
->s_size
),
1439 ANON_NOSLEEP
| ANON_GROWDOWN
);
1442 ANON_LOCK_EXIT(&2
->a_rwlock
);
1445 amp2
->size
= ptob(newpgs
);
1446 ANON_LOCK_EXIT(&2
->a_rwlock
);
1448 if (svd2
->vpage
!= NULL
) {
1449 struct vpage
*vp
, *evp
;
1451 kmem_zalloc(vpgtob(seg_pages(seg1
) + seg_pages(seg2
)),
1453 if (new_vpage
== NULL
) {
1454 /* Not merging segments so adjust anon_index back */
1456 svd2
->anon_index
+= seg_pages(seg1
);
1459 bcopy(svd2
->vpage
, new_vpage
+ seg_pages(seg1
),
1460 vpgtob(seg_pages(seg2
)));
1461 kmem_free(svd2
->vpage
, vpgtob(seg_pages(seg2
)));
1462 svd2
->vpage
= new_vpage
;
1465 evp
= vp
+ seg_pages(seg1
);
1466 for (; vp
< evp
; vp
++)
1467 VPP_SETPROT(vp
, a
->prot
);
1468 if (svd2
->pageswap
&& swresv
) {
1469 ASSERT(!(svd2
->flags
& MAP_NORESERVE
));
1470 ASSERT(swresv
== seg1
->s_size
);
1472 for (; vp
< evp
; vp
++) {
1477 ASSERT(svd2
->vpage
!= NULL
|| svd2
->pageswap
== 0);
1478 size
= seg1
->s_size
;
1480 seg2
->s_size
+= size
;
1481 seg2
->s_base
-= size
;
1482 svd2
->offset
-= size
;
1483 svd2
->swresv
+= swresv
;
1484 if (svd2
->pageprot
&& (a
->prot
& PROT_WRITE
) &&
1485 svd2
->type
== MAP_SHARED
&& svd2
->vp
!= NULL
&&
1486 (svd2
->vp
->v_flag
& VVMEXEC
)) {
1487 ASSERT(vn_is_mapped(svd2
->vp
, V_WRITE
));
1488 segvn_inval_trcache(svd2
->vp
);
1494 * Duplicate all the pages in the segment. This may break COW sharing for a
1495 * given page. If the page is marked with inherit zero set, then instead of
1496 * duplicating the page, we zero the page.
1499 segvn_dup_pages(struct seg
*seg
, struct seg
*newseg
)
1504 struct anon
*ap
, *newap
;
1508 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1509 struct segvn_data
*newsvd
= (struct segvn_data
*)newseg
->s_data
;
1510 ulong_t old_idx
= svd
->anon_index
;
1511 ulong_t new_idx
= 0;
1513 i
= btopr(seg
->s_size
);
1517 * XXX break cow sharing using PAGESIZE
1518 * pages. They will be relocated into larger
1519 * pages at fault time.
1522 if ((ap
= anon_get_ptr(svd
->amp
->ahp
, old_idx
)) != NULL
) {
1525 vpp
= &svd
->vpage
[seg_page(seg
, addr
)];
1528 * prot need not be computed below 'cause anon_private
1529 * is going to ignore it anyway as child doesn't inherit
1530 * pagelock from parent.
1532 prot
= svd
->pageprot
? VPP_PROT(vpp
) : svd
->prot
;
1535 * Check whether we should zero this or dup it.
1537 if (svd
->svn_inz
== SEGVN_INZ_ALL
||
1538 (svd
->svn_inz
== SEGVN_INZ_VPP
&&
1539 VPP_ISINHZERO(vpp
))) {
1540 pp
= anon_zero(newseg
, addr
, &newap
,
1543 page_t
*anon_pl
[1+1];
1545 error
= anon_getpage(&ap
, &vpprot
, anon_pl
,
1546 PAGESIZE
, seg
, addr
, S_READ
, svd
->cred
);
1550 pp
= anon_private(&newap
, newseg
, addr
, prot
,
1551 anon_pl
[0], 0, newsvd
->cred
);
1556 (void) anon_set_ptr(newsvd
->amp
->ahp
, new_idx
, newap
,
1569 segvn_dup(struct seg
*seg
, struct seg
*newseg
)
1571 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1572 struct segvn_data
*newsvd
;
1573 pgcnt_t npages
= seg_pages(seg
);
1576 struct anon_map
*amp
;
1578 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1579 ASSERT(newseg
->s_as
->a_proc
->p_parent
== curproc
);
1582 * If segment has anon reserved, reserve more for the new seg.
1583 * For a MAP_NORESERVE segment swresv will be a count of all the
1584 * allocated anon slots; thus we reserve for the child as many slots
1585 * as the parent has allocated. This semantic prevents the child or
1586 * parent from dieing during a copy-on-write fault caused by trying
1587 * to write a shared pre-existing anon page.
1589 if ((len
= svd
->swresv
) != 0) {
1590 if (anon_resv(svd
->swresv
) == 0)
1593 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
, "anon proc:%p %lu %u",
1597 newsvd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
1599 newseg
->s_ops
= &segvn_ops
;
1600 newseg
->s_data
= (void *)newsvd
;
1601 newseg
->s_szc
= seg
->s_szc
;
1603 newsvd
->seg
= newseg
;
1604 if ((newsvd
->vp
= svd
->vp
) != NULL
) {
1606 if (svd
->type
== MAP_SHARED
)
1607 lgrp_shm_policy_init(NULL
, svd
->vp
);
1609 newsvd
->offset
= svd
->offset
;
1610 newsvd
->prot
= svd
->prot
;
1611 newsvd
->maxprot
= svd
->maxprot
;
1612 newsvd
->pageprot
= svd
->pageprot
;
1613 newsvd
->type
= svd
->type
;
1614 newsvd
->cred
= svd
->cred
;
1615 crhold(newsvd
->cred
);
1616 newsvd
->advice
= svd
->advice
;
1617 newsvd
->pageadvice
= svd
->pageadvice
;
1618 newsvd
->svn_inz
= svd
->svn_inz
;
1619 newsvd
->swresv
= svd
->swresv
;
1620 newsvd
->pageswap
= svd
->pageswap
;
1621 newsvd
->flags
= svd
->flags
;
1622 newsvd
->softlockcnt
= 0;
1623 newsvd
->softlockcnt_sbase
= 0;
1624 newsvd
->softlockcnt_send
= 0;
1625 newsvd
->policy_info
= svd
->policy_info
;
1626 newsvd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
1628 if ((amp
= svd
->amp
) == NULL
|| svd
->tr_state
== SEGVN_TR_ON
) {
1630 * Not attaching to a shared anon object.
1632 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
) ||
1633 svd
->tr_state
== SEGVN_TR_OFF
);
1634 if (svd
->tr_state
== SEGVN_TR_ON
) {
1635 ASSERT(newsvd
->vp
!= NULL
&& amp
!= NULL
);
1636 newsvd
->tr_state
= SEGVN_TR_INIT
;
1638 newsvd
->tr_state
= svd
->tr_state
;
1641 newsvd
->anon_index
= 0;
1643 /* regions for now are only used on pure vnode segments */
1644 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
1645 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1646 newsvd
->tr_state
= SEGVN_TR_OFF
;
1647 if (svd
->type
== MAP_SHARED
) {
1648 ASSERT(svd
->svn_inz
== SEGVN_INZ_NONE
);
1650 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
1652 ANON_LOCK_EXIT(&
->a_rwlock
);
1653 newsvd
->anon_index
= svd
->anon_index
;
1658 * Allocate and initialize new anon_map structure.
1660 newsvd
->amp
= anonmap_alloc(newseg
->s_size
, 0,
1662 newsvd
->amp
->a_szc
= newseg
->s_szc
;
1663 newsvd
->anon_index
= 0;
1664 ASSERT(svd
->svn_inz
== SEGVN_INZ_NONE
||
1665 svd
->svn_inz
== SEGVN_INZ_ALL
||
1666 svd
->svn_inz
== SEGVN_INZ_VPP
);
1669 * We don't have to acquire the anon_map lock
1670 * for the new segment (since it belongs to an
1671 * address space that is still not associated
1672 * with any process), or the segment in the old
1673 * address space (since all threads in it
1674 * are stopped while duplicating the address space).
1678 * The goal of the following code is to make sure that
1679 * softlocked pages do not end up as copy on write
1680 * pages. This would cause problems where one
1681 * thread writes to a page that is COW and a different
1682 * thread in the same process has softlocked it. The
1683 * softlock lock would move away from this process
1684 * because the write would cause this process to get
1685 * a copy (without the softlock).
1687 * The strategy here is to just break the
1688 * sharing on pages that could possibly be
1691 * In addition, if any pages have been marked that they
1692 * should be inherited as zero, then we immediately go
1693 * ahead and break COW and zero them. In the case of a
1694 * softlocked page that should be inherited zero, we
1695 * break COW and just get a zero page.
1698 if (svd
->softlockcnt
||
1699 svd
->svn_inz
!= SEGVN_INZ_NONE
) {
1701 * The softlock count might be non zero
1702 * because some pages are still stuck in the
1703 * cache for lazy reclaim. Flush the cache
1704 * now. This should drop the count to zero.
1705 * [or there is really I/O going on to these
1706 * pages]. Note, we have the writers lock so
1707 * nothing gets inserted during the flush.
1709 if (svd
->softlockcnt
&& reclaim
== 1) {
1715 error
= segvn_dup_pages(seg
, newseg
);
1717 newsvd
->vpage
= NULL
;
1720 } else { /* common case */
1721 if (seg
->s_szc
!= 0) {
1723 * If at least one of anon slots of a
1724 * large page exists then make sure
1725 * all anon slots of a large page
1726 * exist to avoid partial cow sharing
1727 * of a large page in the future.
1729 anon_dup_fill_holes(amp
->ahp
,
1730 svd
->anon_index
, newsvd
->amp
->ahp
,
1731 0, seg
->s_size
, seg
->s_szc
,
1734 anon_dup(amp
->ahp
, svd
->anon_index
,
1735 newsvd
->amp
->ahp
, 0, seg
->s_size
);
1738 hat_clrattr(seg
->s_as
->a_hat
, seg
->s_base
,
1739 seg
->s_size
, PROT_WRITE
);
1744 * If necessary, create a vpage structure for the new segment.
1745 * Do not copy any page lock indications.
1747 if (svd
->vpage
!= NULL
) {
1749 struct vpage
*ovp
= svd
->vpage
;
1752 nvp
= newsvd
->vpage
=
1753 kmem_alloc(vpgtob(npages
), KM_SLEEP
);
1754 for (i
= 0; i
< npages
; i
++) {
1756 VPP_CLRPPLOCK(nvp
++);
1759 newsvd
->vpage
= NULL
;
1761 /* Inform the vnode of the new mapping */
1762 if (newsvd
->vp
!= NULL
) {
1763 error
= VOP_ADDMAP(newsvd
->vp
, (offset_t
)newsvd
->offset
,
1764 newseg
->s_as
, newseg
->s_base
, newseg
->s_size
, newsvd
->prot
,
1765 newsvd
->maxprot
, newsvd
->type
, newsvd
->cred
, NULL
);
1768 if (error
== 0 && HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
1769 ASSERT(newsvd
->amp
== NULL
);
1770 ASSERT(newsvd
->tr_state
== SEGVN_TR_OFF
);
1771 newsvd
->rcookie
= svd
->rcookie
;
1772 hat_dup_region(newseg
->s_as
->a_hat
, newsvd
->rcookie
);
1779 * callback function to invoke free_vp_pages() for only those pages actually
1780 * processed by the HAT when a shared region is destroyed.
1782 extern int free_pages
;
1785 segvn_hat_rgn_unload_callback(caddr_t saddr
, caddr_t eaddr
, caddr_t r_saddr
,
1786 size_t r_size
, void *r_obj
, u_offset_t r_objoff
)
1790 vnode_t
*vp
= (vnode_t
*)r_obj
;
1792 ASSERT(eaddr
> saddr
);
1793 ASSERT(saddr
>= r_saddr
);
1794 ASSERT(saddr
< r_saddr
+ r_size
);
1795 ASSERT(eaddr
> r_saddr
);
1796 ASSERT(eaddr
<= r_saddr
+ r_size
);
1803 len
= eaddr
- saddr
;
1804 off
= (saddr
- r_saddr
) + r_objoff
;
1805 free_vp_pages(vp
, off
, len
);
1809 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1810 * those pages actually processed by the HAT
1813 segvn_hat_unload_callback(hat_callback_t
*cb
)
1815 struct seg
*seg
= cb
->hcb_data
;
1816 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1820 ASSERT(svd
->vp
!= NULL
);
1821 ASSERT(cb
->hcb_end_addr
> cb
->hcb_start_addr
);
1822 ASSERT(cb
->hcb_start_addr
>= seg
->s_base
);
1824 len
= cb
->hcb_end_addr
- cb
->hcb_start_addr
;
1825 off
= cb
->hcb_start_addr
- seg
->s_base
;
1826 free_vp_pages(svd
->vp
, svd
->offset
+ off
, len
);
1830 * This function determines the number of bytes of swap reserved by
1831 * a segment for which per-page accounting is present. It is used to
1832 * calculate the correct value of a segvn_data's swresv.
1835 segvn_count_swap_by_vpages(struct seg
*seg
)
1837 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1838 struct vpage
*vp
, *evp
;
1839 size_t nswappages
= 0;
1841 ASSERT(svd
->pageswap
);
1842 ASSERT(svd
->vpage
!= NULL
);
1844 evp
= &svd
->vpage
[seg_page(seg
, seg
->s_base
+ seg
->s_size
)];
1846 for (vp
= svd
->vpage
; vp
< evp
; vp
++) {
1847 if (VPP_ISSWAPRES(vp
))
1851 return (nswappages
<< PAGESHIFT
);
1855 segvn_unmap(struct seg
*seg
, caddr_t addr
, size_t len
)
1857 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1858 struct segvn_data
*nsvd
;
1860 struct anon_map
*amp
;
1861 pgcnt_t opages
; /* old segment size in pages */
1862 pgcnt_t npages
; /* new segment size in pages */
1863 pgcnt_t dpages
; /* pages being deleted (unmapped) */
1864 hat_callback_t callback
; /* used for free_vp_pages() */
1865 hat_callback_t
*cbp
= NULL
;
1872 * We don't need any segment level locks for "segvn" data
1873 * since the address space is "write" locked.
1875 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1878 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1879 * softlockcnt is protected from change by the as write lock.
1882 if (svd
->softlockcnt
> 0) {
1883 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1886 * If this is shared segment non 0 softlockcnt
1887 * means locked pages are still in use.
1889 if (svd
->type
== MAP_SHARED
) {
1894 * since we do have the writers lock nobody can fill
1895 * the cache during the purge. The flush either succeeds
1896 * or we still have pending I/Os.
1907 * Check for bad sizes
1909 if (addr
< seg
->s_base
|| addr
+ len
> seg
->s_base
+ seg
->s_size
||
1910 (len
& PAGEOFFSET
) || ((uintptr_t)addr
& PAGEOFFSET
)) {
1911 panic("segvn_unmap");
1915 if (seg
->s_szc
!= 0) {
1916 size_t pgsz
= page_get_pagesize(seg
->s_szc
);
1918 if (!IS_P2ALIGNED(addr
, pgsz
) || !IS_P2ALIGNED(len
, pgsz
)) {
1919 ASSERT(seg
->s_base
!= addr
|| seg
->s_size
!= len
);
1920 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
1921 ASSERT(svd
->amp
== NULL
);
1922 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1923 hat_leave_region(seg
->s_as
->a_hat
,
1924 svd
->rcookie
, HAT_REGION_TEXT
);
1925 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
1927 * could pass a flag to segvn_demote_range()
1928 * below to tell it not to do any unloads but
1929 * this case is rare enough to not bother for
1932 } else if (svd
->tr_state
== SEGVN_TR_INIT
) {
1933 svd
->tr_state
= SEGVN_TR_OFF
;
1934 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
1935 ASSERT(svd
->amp
!= NULL
);
1936 segvn_textunrepl(seg
, 1);
1937 ASSERT(svd
->amp
== NULL
);
1938 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1940 VM_STAT_ADD(segvnvmstats
.demoterange
[0]);
1941 err
= segvn_demote_range(seg
, addr
, len
, SDR_END
, 0);
1949 /* Inform the vnode of the unmapping. */
1953 error
= VOP_DELMAP(svd
->vp
,
1954 (offset_t
)svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
),
1955 seg
->s_as
, addr
, len
, svd
->prot
, svd
->maxprot
,
1956 svd
->type
, svd
->cred
, NULL
);
1958 if (error
== EAGAIN
)
1963 * Remove any page locks set through this mapping.
1964 * If text replication is not off no page locks could have been
1965 * established via this mapping.
1967 if (svd
->tr_state
== SEGVN_TR_OFF
) {
1968 (void) segvn_lockop(seg
, addr
, len
, 0, MC_UNLOCK
, NULL
, 0);
1971 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
1972 ASSERT(svd
->amp
== NULL
);
1973 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1974 ASSERT(svd
->type
== MAP_PRIVATE
);
1975 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
1977 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
1978 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
1979 ASSERT(svd
->amp
!= NULL
);
1980 ASSERT(svd
->pageprot
== 0 && !(svd
->prot
& PROT_WRITE
));
1981 segvn_textunrepl(seg
, 1);
1982 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
1984 if (svd
->tr_state
!= SEGVN_TR_OFF
) {
1985 ASSERT(svd
->tr_state
== SEGVN_TR_INIT
);
1986 svd
->tr_state
= SEGVN_TR_OFF
;
1989 * Unload any hardware translations in the range to be taken
1990 * out. Use a callback to invoke free_vp_pages() effectively.
1992 if (svd
->vp
!= NULL
&& free_pages
!= 0) {
1993 callback
.hcb_data
= seg
;
1994 callback
.hcb_function
= segvn_hat_unload_callback
;
1997 hat_unload_callback(seg
->s_as
->a_hat
, addr
, len
,
1998 HAT_UNLOAD_UNMAP
, cbp
);
2000 if (svd
->type
== MAP_SHARED
&& svd
->vp
!= NULL
&&
2001 (svd
->vp
->v_flag
& VVMEXEC
) &&
2002 ((svd
->prot
& PROT_WRITE
) || svd
->pageprot
)) {
2003 segvn_inval_trcache(svd
->vp
);
2008 * Check for entire segment
2010 if (addr
== seg
->s_base
&& len
== seg
->s_size
) {
2015 opages
= seg_pages(seg
);
2017 npages
= opages
- dpages
;
2019 ASSERT(amp
== NULL
|| amp
->a_szc
>= seg
->s_szc
);
2022 * Check for beginning of segment
2024 if (addr
== seg
->s_base
) {
2025 if (svd
->vpage
!= NULL
) {
2027 struct vpage
*ovpage
;
2029 ovpage
= svd
->vpage
; /* keep pointer to vpage */
2031 nbytes
= vpgtob(npages
);
2032 svd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2033 bcopy(&ovpage
[dpages
], svd
->vpage
, nbytes
);
2035 /* free up old vpage */
2036 kmem_free(ovpage
, vpgtob(opages
));
2039 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2040 if (amp
->refcnt
== 1 || svd
->type
== MAP_PRIVATE
) {
2042 * Shared anon map is no longer in use. Before
2043 * freeing its pages purge all entries from
2044 * pcache that belong to this amp.
2046 if (svd
->type
== MAP_SHARED
) {
2047 ASSERT(amp
->refcnt
== 1);
2048 ASSERT(svd
->softlockcnt
== 0);
2052 * Free up now unused parts of anon_map array.
2054 if (amp
->a_szc
== seg
->s_szc
) {
2055 if (seg
->s_szc
!= 0) {
2056 anon_free_pages(amp
->ahp
,
2057 svd
->anon_index
, len
,
2065 ASSERT(svd
->type
== MAP_SHARED
);
2066 ASSERT(amp
->a_szc
> seg
->s_szc
);
2067 anon_shmap_free_pages(amp
,
2068 svd
->anon_index
, len
);
2072 * Unreserve swap space for the
2073 * unmapped chunk of this segment in
2074 * case it's MAP_SHARED
2076 if (svd
->type
== MAP_SHARED
) {
2077 anon_unresv_zone(len
,
2078 seg
->s_as
->a_proc
->p_zone
);
2082 ANON_LOCK_EXIT(&
->a_rwlock
);
2083 svd
->anon_index
+= dpages
;
2085 if (svd
->vp
!= NULL
)
2092 if (svd
->flags
& MAP_NORESERVE
) {
2094 oswresv
= svd
->swresv
;
2096 svd
->swresv
= ptob(anon_pages(amp
->ahp
,
2097 svd
->anon_index
, npages
));
2098 anon_unresv_zone(oswresv
- svd
->swresv
,
2099 seg
->s_as
->a_proc
->p_zone
);
2100 if (SEG_IS_PARTIAL_RESV(seg
))
2101 seg
->s_as
->a_resvsize
-= oswresv
-
2106 if (svd
->pageswap
) {
2107 oswresv
= svd
->swresv
;
2109 segvn_count_swap_by_vpages(seg
);
2110 ASSERT(oswresv
>= svd
->swresv
);
2111 unlen
= oswresv
- svd
->swresv
;
2114 ASSERT(svd
->swresv
== seg
->s_size
);
2117 anon_unresv_zone(unlen
,
2118 seg
->s_as
->a_proc
->p_zone
);
2120 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
, "anon proc:%p %lu %u",
2128 * Check for end of segment
2130 if (addr
+ len
== seg
->s_base
+ seg
->s_size
) {
2131 if (svd
->vpage
!= NULL
) {
2133 struct vpage
*ovpage
;
2135 ovpage
= svd
->vpage
; /* keep pointer to vpage */
2137 nbytes
= vpgtob(npages
);
2138 svd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2139 bcopy(ovpage
, svd
->vpage
, nbytes
);
2141 /* free up old vpage */
2142 kmem_free(ovpage
, vpgtob(opages
));
2146 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2147 if (amp
->refcnt
== 1 || svd
->type
== MAP_PRIVATE
) {
2149 * Free up now unused parts of anon_map array.
2151 ulong_t an_idx
= svd
->anon_index
+ npages
;
2154 * Shared anon map is no longer in use. Before
2155 * freeing its pages purge all entries from
2156 * pcache that belong to this amp.
2158 if (svd
->type
== MAP_SHARED
) {
2159 ASSERT(amp
->refcnt
== 1);
2160 ASSERT(svd
->softlockcnt
== 0);
2164 if (amp
->a_szc
== seg
->s_szc
) {
2165 if (seg
->s_szc
!= 0) {
2166 anon_free_pages(amp
->ahp
,
2170 anon_free(amp
->ahp
, an_idx
,
2174 ASSERT(svd
->type
== MAP_SHARED
);
2175 ASSERT(amp
->a_szc
> seg
->s_szc
);
2176 anon_shmap_free_pages(amp
,
2181 * Unreserve swap space for the
2182 * unmapped chunk of this segment in
2183 * case it's MAP_SHARED
2185 if (svd
->type
== MAP_SHARED
) {
2186 anon_unresv_zone(len
,
2187 seg
->s_as
->a_proc
->p_zone
);
2191 ANON_LOCK_EXIT(&
->a_rwlock
);
2197 if (svd
->flags
& MAP_NORESERVE
) {
2199 oswresv
= svd
->swresv
;
2200 svd
->swresv
= ptob(anon_pages(amp
->ahp
,
2201 svd
->anon_index
, npages
));
2202 anon_unresv_zone(oswresv
- svd
->swresv
,
2203 seg
->s_as
->a_proc
->p_zone
);
2204 if (SEG_IS_PARTIAL_RESV(seg
))
2205 seg
->s_as
->a_resvsize
-= oswresv
-
2210 if (svd
->pageswap
) {
2211 oswresv
= svd
->swresv
;
2213 segvn_count_swap_by_vpages(seg
);
2214 ASSERT(oswresv
>= svd
->swresv
);
2215 unlen
= oswresv
- svd
->swresv
;
2218 ASSERT(svd
->swresv
== seg
->s_size
);
2221 anon_unresv_zone(unlen
,
2222 seg
->s_as
->a_proc
->p_zone
);
2224 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
,
2225 "anon proc:%p %lu %u", seg
, len
, 0);
2232 * The section to go is in the middle of the segment,
2233 * have to make it into two segments. nseg is made for
2234 * the high end while seg is cut down at the low end.
2236 nbase
= addr
+ len
; /* new seg base */
2237 nsize
= (seg
->s_base
+ seg
->s_size
) - nbase
; /* new seg size */
2238 seg
->s_size
= addr
- seg
->s_base
; /* shrink old seg */
2239 nseg
= seg_alloc(seg
->s_as
, nbase
, nsize
);
2241 panic("segvn_unmap seg_alloc");
2244 nseg
->s_ops
= seg
->s_ops
;
2245 nsvd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
2246 nseg
->s_data
= (void *)nsvd
;
2247 nseg
->s_szc
= seg
->s_szc
;
2250 nsvd
->offset
= svd
->offset
+ (uintptr_t)(nseg
->s_base
- seg
->s_base
);
2252 nsvd
->softlockcnt
= 0;
2253 nsvd
->softlockcnt_sbase
= 0;
2254 nsvd
->softlockcnt_send
= 0;
2255 nsvd
->svn_inz
= svd
->svn_inz
;
2256 ASSERT(nsvd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2258 if (svd
->vp
!= NULL
) {
2260 if (nsvd
->type
== MAP_SHARED
)
2261 lgrp_shm_policy_init(NULL
, nsvd
->vp
);
2265 if (svd
->vpage
== NULL
) {
2268 /* need to split vpage into two arrays */
2270 struct vpage
*ovpage
;
2272 ovpage
= svd
->vpage
; /* keep pointer to vpage */
2274 npages
= seg_pages(seg
); /* seg has shrunk */
2275 nbytes
= vpgtob(npages
);
2276 svd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2278 bcopy(ovpage
, svd
->vpage
, nbytes
);
2280 npages
= seg_pages(nseg
);
2281 nbytes
= vpgtob(npages
);
2282 nsvd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2284 bcopy(&ovpage
[opages
- npages
], nsvd
->vpage
, nbytes
);
2286 /* free up old vpage */
2287 kmem_free(ovpage
, vpgtob(opages
));
2292 nsvd
->anon_index
= 0;
2295 * Need to create a new anon map for the new segment.
2296 * We'll also allocate a new smaller array for the old
2297 * smaller segment to save space.
2299 opages
= btop((uintptr_t)(addr
- seg
->s_base
));
2300 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2301 if (amp
->refcnt
== 1 || svd
->type
== MAP_PRIVATE
) {
2303 * Free up now unused parts of anon_map array.
2305 ulong_t an_idx
= svd
->anon_index
+ opages
;
2308 * Shared anon map is no longer in use. Before
2309 * freeing its pages purge all entries from
2310 * pcache that belong to this amp.
2312 if (svd
->type
== MAP_SHARED
) {
2313 ASSERT(amp
->refcnt
== 1);
2314 ASSERT(svd
->softlockcnt
== 0);
2318 if (amp
->a_szc
== seg
->s_szc
) {
2319 if (seg
->s_szc
!= 0) {
2320 anon_free_pages(amp
->ahp
, an_idx
, len
,
2323 anon_free(amp
->ahp
, an_idx
,
2327 ASSERT(svd
->type
== MAP_SHARED
);
2328 ASSERT(amp
->a_szc
> seg
->s_szc
);
2329 anon_shmap_free_pages(amp
, an_idx
, len
);
2333 * Unreserve swap space for the
2334 * unmapped chunk of this segment in
2335 * case it's MAP_SHARED
2337 if (svd
->type
== MAP_SHARED
) {
2338 anon_unresv_zone(len
,
2339 seg
->s_as
->a_proc
->p_zone
);
2343 nsvd
->anon_index
= svd
->anon_index
+
2344 btop((uintptr_t)(nseg
->s_base
- seg
->s_base
));
2345 if (svd
->type
== MAP_SHARED
) {
2349 struct anon_map
*namp
;
2350 struct anon_hdr
*nahp
;
2352 ASSERT(svd
->type
== MAP_PRIVATE
);
2353 nahp
= anon_create(btop(seg
->s_size
), ANON_SLEEP
);
2354 namp
= anonmap_alloc(nseg
->s_size
, 0, ANON_SLEEP
);
2355 namp
->a_szc
= seg
->s_szc
;
2356 (void) anon_copy_ptr(amp
->ahp
, svd
->anon_index
, nahp
,
2357 0, btop(seg
->s_size
), ANON_SLEEP
);
2358 (void) anon_copy_ptr(amp
->ahp
, nsvd
->anon_index
,
2359 namp
->ahp
, 0, btop(nseg
->s_size
), ANON_SLEEP
);
2360 anon_release(amp
->ahp
, btop(amp
->size
));
2361 svd
->anon_index
= 0;
2362 nsvd
->anon_index
= 0;
2364 amp
->size
= seg
->s_size
;
2367 ANON_LOCK_EXIT(&
->a_rwlock
);
2370 if (svd
->flags
& MAP_NORESERVE
) {
2372 oswresv
= svd
->swresv
;
2373 svd
->swresv
= ptob(anon_pages(amp
->ahp
,
2374 svd
->anon_index
, btop(seg
->s_size
)));
2375 nsvd
->swresv
= ptob(anon_pages(nsvd
->amp
->ahp
,
2376 nsvd
->anon_index
, btop(nseg
->s_size
)));
2377 ASSERT(oswresv
>= (svd
->swresv
+ nsvd
->swresv
));
2378 anon_unresv_zone(oswresv
- (svd
->swresv
+ nsvd
->swresv
),
2379 seg
->s_as
->a_proc
->p_zone
);
2380 if (SEG_IS_PARTIAL_RESV(seg
))
2381 seg
->s_as
->a_resvsize
-= oswresv
-
2382 (svd
->swresv
+ nsvd
->swresv
);
2386 if (svd
->pageswap
) {
2387 oswresv
= svd
->swresv
;
2388 svd
->swresv
= segvn_count_swap_by_vpages(seg
);
2389 nsvd
->swresv
= segvn_count_swap_by_vpages(nseg
);
2390 ASSERT(oswresv
>= (svd
->swresv
+ nsvd
->swresv
));
2391 unlen
= oswresv
- (svd
->swresv
+ nsvd
->swresv
);
2393 if (seg
->s_size
+ nseg
->s_size
+ len
!=
2395 panic("segvn_unmap: cannot split "
2396 "swap reservation");
2399 svd
->swresv
= seg
->s_size
;
2400 nsvd
->swresv
= nseg
->s_size
;
2403 anon_unresv_zone(unlen
,
2404 seg
->s_as
->a_proc
->p_zone
);
2406 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
, "anon proc:%p %lu %u",
2410 return (0); /* I'm glad that's all over with! */
2414 segvn_free(struct seg
*seg
)
2416 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
2417 pgcnt_t npages
= seg_pages(seg
);
2418 struct anon_map
*amp
;
2422 * We don't need any segment level locks for "segvn" data
2423 * since the address space is "write" locked.
2425 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
2426 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
2428 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2431 * Be sure to unlock pages. XXX Why do things get free'ed instead
2434 (void) segvn_lockop(seg
, seg
->s_base
, seg
->s_size
,
2435 0, MC_UNLOCK
, NULL
, 0);
2438 * Deallocate the vpage and anon pointers if necessary and possible.
2440 if (svd
->vpage
!= NULL
) {
2441 kmem_free(svd
->vpage
, vpgtob(npages
));
2444 if ((amp
= svd
->amp
) != NULL
) {
2446 * If there are no more references to this anon_map
2447 * structure, then deallocate the structure after freeing
2448 * up all the anon slot pointers that we can.
2450 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2451 ASSERT(amp
->a_szc
>= seg
->s_szc
);
2452 if (--amp
->refcnt
== 0) {
2453 if (svd
->type
== MAP_PRIVATE
) {
2455 * Private - we only need to anon_free
2456 * the part that this segment refers to.
2458 if (seg
->s_szc
!= 0) {
2459 anon_free_pages(amp
->ahp
,
2460 svd
->anon_index
, seg
->s_size
,
2463 anon_free(amp
->ahp
, svd
->anon_index
,
2469 * Shared anon map is no longer in use. Before
2470 * freeing its pages purge all entries from
2471 * pcache that belong to this amp.
2473 ASSERT(svd
->softlockcnt
== 0);
2477 * Shared - anon_free the entire
2478 * anon_map's worth of stuff and
2479 * release any swap reservation.
2481 if (amp
->a_szc
!= 0) {
2482 anon_shmap_free_pages(amp
, 0,
2485 anon_free(amp
->ahp
, 0, amp
->size
);
2487 if ((len
= amp
->swresv
) != 0) {
2488 anon_unresv_zone(len
,
2489 seg
->s_as
->a_proc
->p_zone
);
2490 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
,
2491 "anon proc:%p %lu %u", seg
, len
, 0);
2495 ANON_LOCK_EXIT(&
->a_rwlock
);
2497 } else if (svd
->type
== MAP_PRIVATE
) {
2499 * We had a private mapping which still has
2500 * a held anon_map so just free up all the
2501 * anon slot pointers that we were using.
2503 if (seg
->s_szc
!= 0) {
2504 anon_free_pages(amp
->ahp
, svd
->anon_index
,
2505 seg
->s_size
, seg
->s_szc
);
2507 anon_free(amp
->ahp
, svd
->anon_index
,
2510 ANON_LOCK_EXIT(&
->a_rwlock
);
2512 ANON_LOCK_EXIT(&
->a_rwlock
);
2517 * Release swap reservation.
2519 if ((len
= svd
->swresv
) != 0) {
2520 anon_unresv_zone(svd
->swresv
,
2521 seg
->s_as
->a_proc
->p_zone
);
2522 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
, "anon proc:%p %lu %u",
2524 if (SEG_IS_PARTIAL_RESV(seg
))
2525 seg
->s_as
->a_resvsize
-= svd
->swresv
;
2529 * Release claim on vnode, credentials, and finally free the
2532 if (svd
->vp
!= NULL
) {
2533 if (svd
->type
== MAP_SHARED
)
2534 lgrp_shm_policy_fini(NULL
, svd
->vp
);
2540 svd
->pageadvice
= 0;
2545 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2546 * still working with this segment without holding as lock (in case
2547 * it's called by pcache async thread).
2549 ASSERT(svd
->softlockcnt
== 0);
2550 mutex_enter(&svd
->segfree_syncmtx
);
2551 mutex_exit(&svd
->segfree_syncmtx
);
2554 kmem_cache_free(segvn_cache
, svd
);
2558 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2559 * already been F_SOFTLOCK'ed.
2560 * Caller must always match addr and len of a softunlock with a previous
2561 * softlock with exactly the same addr and len.
2564 segvn_softunlock(struct seg
*seg
, caddr_t addr
, size_t len
, enum seg_rw rw
)
2566 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
2572 struct anon_map
*amp
;
2573 struct anon
*ap
= NULL
;
2575 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2576 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
2578 if ((amp
= svd
->amp
) != NULL
)
2579 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
2581 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
2582 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
2583 hat_unlock_region(seg
->s_as
->a_hat
, addr
, len
, svd
->rcookie
);
2585 hat_unlock(seg
->s_as
->a_hat
, addr
, len
);
2587 for (adr
= addr
; adr
< addr
+ len
; adr
+= PAGESIZE
) {
2589 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2590 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
++))
2592 swap_xlate(ap
, &vp
, &offset
);
2595 offset
= svd
->offset
+
2596 (uintptr_t)(adr
- seg
->s_base
);
2598 ANON_LOCK_EXIT(&
->a_rwlock
);
2601 offset
= svd
->offset
+
2602 (uintptr_t)(adr
- seg
->s_base
);
2606 * Use page_find() instead of page_lookup() to
2607 * find the page since we know that it is locked.
2609 pp
= page_find(vp
, offset
);
2612 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2613 (void *)adr
, (void *)ap
, (void *)vp
, offset
);
2617 if (rw
== S_WRITE
) {
2619 if (seg
->s_as
->a_vbits
)
2620 hat_setstat(seg
->s_as
, adr
, PAGESIZE
,
2622 } else if (rw
!= S_OTHER
) {
2624 if (seg
->s_as
->a_vbits
)
2625 hat_setstat(seg
->s_as
, adr
, PAGESIZE
, P_REF
);
2627 TRACE_3(TR_FAC_VM
, TR_SEGVN_FAULT
,
2628 "segvn_fault:pp %p vp %p offset %llx", pp
, vp
, offset
);
2631 ASSERT(svd
->softlockcnt
>= btop(len
));
2632 if (!atomic_add_long_nv((ulong_t
*)&svd
->softlockcnt
, -btop(len
))) {
2634 * All SOFTLOCKS are gone. Wakeup any waiting
2635 * unmappers so they can try again to unmap.
2636 * Check for waiters first without the mutex
2637 * held so we don't always grab the mutex on
2640 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2641 mutex_enter(&seg
->s_as
->a_contents
);
2642 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2643 AS_CLRUNMAPWAIT(seg
->s_as
);
2644 cv_broadcast(&seg
->s_as
->a_cv
);
2646 mutex_exit(&seg
->s_as
->a_contents
);
2651 #define PAGE_HANDLED ((page_t *)-1)
2654 * Release all the pages in the NULL terminated ppp list
2655 * which haven't already been converted to PAGE_HANDLED.
2658 segvn_pagelist_rele(page_t
**ppp
)
2660 for (; *ppp
!= NULL
; ppp
++) {
2661 if (*ppp
!= PAGE_HANDLED
)
2666 static int stealcow
= 1;
2669 * Workaround for viking chip bug. See bug id 1220902.
2670 * To fix this down in pagefault() would require importing so
2671 * much as and segvn code as to be unmaintainable.
2673 int enable_mbit_wa
= 0;
2676 * Handles all the dirty work of getting the right
2677 * anonymous pages and loading up the translations.
2678 * This routine is called only from segvn_fault()
2679 * when looping over the range of addresses requested.
2681 * The basic algorithm here is:
2682 * If this is an anon_zero case
2683 * Call anon_zero to allocate page
2684 * Load up translation
2687 * If this is an anon page
2688 * Use anon_getpage to get the page
2690 * Find page in pl[] list passed in
2693 * Load up the translation to the page
2696 * Call anon_private to handle cow
2697 * Load up (writable) translation to new page
2701 struct hat
*hat
, /* the hat to use for mapping */
2702 struct seg
*seg
, /* seg_vn of interest */
2703 caddr_t addr
, /* address in as */
2704 u_offset_t off
, /* offset in vp */
2705 struct vpage
*vpage
, /* pointer to vpage for vp, off */
2706 page_t
*pl
[], /* object source page pointer */
2707 uint_t vpprot
, /* access allowed to object pages */
2708 enum fault_type type
, /* type of fault */
2709 enum seg_rw rw
, /* type of access at fault */
2710 int brkcow
) /* we may need to break cow */
2712 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
2714 uint_t pageflags
= 0;
2715 page_t
*anon_pl
[1 + 1];
2716 page_t
*opp
= NULL
; /* original page */
2723 struct anon
*ap
, *oldap
;
2724 struct anon_map
*amp
;
2725 int hat_flag
= (type
== F_SOFTLOCK
) ? HAT_LOAD_LOCK
: HAT_LOAD
;
2727 anon_sync_obj_t cookie
;
2729 if (svd
->flags
& MAP_TEXT
) {
2730 hat_flag
|= HAT_LOAD_TEXT
;
2733 ASSERT(SEGVN_READ_HELD(seg
->s_as
, &svd
->lock
));
2734 ASSERT(seg
->s_szc
== 0);
2735 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
2738 * Initialize protection value for this page.
2739 * If we have per page protection values check it now.
2741 if (svd
->pageprot
) {
2746 protchk
= PROT_READ
;
2749 protchk
= PROT_WRITE
;
2752 protchk
= PROT_EXEC
;
2756 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
2760 prot
= VPP_PROT(vpage
);
2761 if ((prot
& protchk
) == 0)
2762 return (FC_PROT
); /* illegal access type */
2767 if (type
== F_SOFTLOCK
) {
2768 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt
);
2772 * Always acquire the anon array lock to prevent 2 threads from
2773 * allocating separate anon slots for the same "addr".
2776 if ((amp
= svd
->amp
) != NULL
) {
2777 ASSERT(RW_READ_HELD(&
->a_rwlock
));
2778 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
2779 anon_array_enter(amp
, anon_index
, &cookie
);
2783 if (svd
->vp
== NULL
&& amp
!= NULL
) {
2784 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
)) == NULL
) {
2786 * Allocate a (normally) writable anonymous page of
2787 * zeroes. If no advance reservations, reserve now.
2789 if (svd
->flags
& MAP_NORESERVE
) {
2790 if (anon_resv_zone(ptob(1),
2791 seg
->s_as
->a_proc
->p_zone
)) {
2792 atomic_add_long(&svd
->swresv
, ptob(1));
2793 atomic_add_long(&seg
->s_as
->a_resvsize
,
2800 if ((pp
= anon_zero(seg
, addr
, &ap
,
2801 svd
->cred
)) == NULL
) {
2803 goto out
; /* out of swap space */
2806 * Re-acquire the anon_map lock and
2807 * initialize the anon array entry.
2809 (void) anon_set_ptr(amp
->ahp
, anon_index
, ap
,
2812 ASSERT(pp
->p_szc
== 0);
2815 * Handle pages that have been marked for migration
2817 if (lgrp_optimizations())
2818 page_migrate(seg
, addr
, &pp
, 1);
2820 if (enable_mbit_wa
) {
2823 else if (!hat_ismod(pp
))
2824 prot
&= ~PROT_WRITE
;
2827 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2828 * with MC_LOCKAS, MCL_FUTURE) and this is a
2829 * MAP_NORESERVE segment, we may need to
2830 * permanently lock the page as it is being faulted
2831 * for the first time. The following text applies
2832 * only to MAP_NORESERVE segments:
2834 * As per memcntl(2), if this segment was created
2835 * after MCL_FUTURE was applied (a "future"
2836 * segment), its pages must be locked. If this
2837 * segment existed at MCL_FUTURE application (a
2838 * "past" segment), the interface is unclear.
2840 * We decide to lock only if vpage is present:
2842 * - "future" segments will have a vpage array (see
2843 * as_map), and so will be locked as required
2845 * - "past" segments may not have a vpage array,
2846 * depending on whether events (such as
2847 * mprotect) have occurred. Locking if vpage
2848 * exists will preserve legacy behavior. Not
2849 * locking if vpage is absent, will not break
2850 * the interface or legacy behavior. Note that
2851 * allocating vpage here if it's absent requires
2852 * upgrading the segvn reader lock, the cost of
2853 * which does not seem worthwhile.
2855 * Usually testing and setting VPP_ISPPLOCK and
2856 * VPP_SETPPLOCK requires holding the segvn lock as
2857 * writer, but in this case all readers are
2858 * serializing on the anon array lock.
2860 if (AS_ISPGLCK(seg
->s_as
) && vpage
!= NULL
&&
2861 (svd
->flags
& MAP_NORESERVE
) &&
2862 !VPP_ISPPLOCK(vpage
)) {
2863 proc_t
*p
= seg
->s_as
->a_proc
;
2864 ASSERT(svd
->type
== MAP_PRIVATE
);
2865 mutex_enter(&p
->p_lock
);
2866 if (rctl_incr_locked_mem(p
, NULL
, PAGESIZE
,
2868 claim
= VPP_PROT(vpage
) & PROT_WRITE
;
2869 if (page_pp_lock(pp
, claim
, 0)) {
2870 VPP_SETPPLOCK(vpage
);
2872 rctl_decr_locked_mem(p
, NULL
,
2876 mutex_exit(&p
->p_lock
);
2879 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2880 hat_memload(hat
, addr
, pp
, prot
, hat_flag
);
2882 if (!(hat_flag
& HAT_LOAD_LOCK
))
2885 anon_array_exit(&cookie
);
2891 * Obtain the page structure via anon_getpage() if it is
2892 * a private copy of an object (the result of a previous
2896 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
)) != NULL
) {
2897 err
= anon_getpage(&ap
, &vpprot
, anon_pl
, PAGESIZE
,
2898 seg
, addr
, rw
, svd
->cred
);
2902 if (svd
->type
== MAP_SHARED
) {
2904 * If this is a shared mapping to an
2905 * anon_map, then ignore the write
2906 * permissions returned by anon_getpage().
2907 * They apply to the private mappings
2910 vpprot
|= PROT_WRITE
;
2917 * Search the pl[] list passed in if it is from the
2918 * original object (i.e., not a private copy).
2922 * Find original page. We must be bringing it in
2923 * from the list in pl[].
2925 for (ppp
= pl
; (opp
= *ppp
) != NULL
; ppp
++) {
2926 if (opp
== PAGE_HANDLED
)
2928 ASSERT(opp
->p_vnode
== svd
->vp
); /* XXX */
2929 if (opp
->p_offset
== off
)
2933 panic("segvn_faultpage not found");
2936 *ppp
= PAGE_HANDLED
;
2940 ASSERT(PAGE_LOCKED(opp
));
2942 TRACE_3(TR_FAC_VM
, TR_SEGVN_FAULT
,
2943 "segvn_fault:pp %p vp %p offset %llx", opp
, NULL
, 0);
2946 * The fault is treated as a copy-on-write fault if a
2947 * write occurs on a private segment and the object
2948 * page (i.e., mapping) is write protected. We assume
2949 * that fatal protection checks have already been made.
2953 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
2954 cow
= !(vpprot
& PROT_WRITE
);
2955 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
2957 * If we are doing text replication COW on first touch.
2959 ASSERT(amp
!= NULL
);
2960 ASSERT(svd
->vp
!= NULL
);
2961 ASSERT(rw
!= S_WRITE
);
2968 * If not a copy-on-write case load the translation
2974 * Handle pages that have been marked for migration
2976 if (lgrp_optimizations())
2977 page_migrate(seg
, addr
, &opp
, 1);
2979 if (IS_VMODSORT(opp
->p_vnode
) || enable_mbit_wa
) {
2982 else if (rw
!= S_OTHER
&& !hat_ismod(opp
))
2983 prot
&= ~PROT_WRITE
;
2986 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
||
2987 (!svd
->pageprot
&& svd
->prot
== (prot
& vpprot
)));
2988 ASSERT(amp
== NULL
||
2989 svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2990 hat_memload_region(hat
, addr
, opp
, prot
& vpprot
, hat_flag
,
2993 if (!(hat_flag
& HAT_LOAD_LOCK
))
2997 anon_array_exit(&cookie
);
3002 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
3006 ASSERT(amp
!= NULL
&& anon_lock
);
3009 * Steal the page only if it isn't a private page
3010 * since stealing a private page is not worth the effort.
3012 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
)) == NULL
)
3016 * Steal the original page if the following conditions are true:
3018 * We are low on memory, the page is not private, page is not large,
3019 * not shared, not modified, not `locked' or if we have it `locked'
3020 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
3021 * that the page is not shared) and if it doesn't have any
3022 * translations. page_struct_lock isn't needed to look at p_cowcnt
3023 * and p_lckcnt because we first get exclusive lock on page.
3025 (void) hat_pagesync(opp
, HAT_SYNC_DONTZERO
| HAT_SYNC_STOPON_MOD
);
3027 if (stealcow
&& freemem
< minfree
&& steal
&& opp
->p_szc
== 0 &&
3028 page_tryupgrade(opp
) && !hat_ismod(opp
) &&
3029 ((opp
->p_lckcnt
== 0 && opp
->p_cowcnt
== 0) ||
3030 (opp
->p_lckcnt
== 0 && opp
->p_cowcnt
== 1 &&
3031 vpage
!= NULL
&& VPP_ISPPLOCK(vpage
)))) {
3033 * Check if this page has other translations
3034 * after unloading our translation.
3036 if (hat_page_is_mapped(opp
)) {
3037 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
3038 hat_unload(seg
->s_as
->a_hat
, addr
, PAGESIZE
,
3043 * hat_unload() might sync back someone else's recent
3044 * modification, so check again.
3046 if (!hat_ismod(opp
) && !hat_page_is_mapped(opp
))
3047 pageflags
|= STEAL_PAGE
;
3051 * If we have a vpage pointer, see if it indicates that we have
3052 * ``locked'' the page we map -- if so, tell anon_private to
3053 * transfer the locking resource to the new page.
3055 * See Statement at the beginning of segvn_lockop regarding
3056 * the way lockcnts/cowcnts are handled during COW.
3059 if (vpage
!= NULL
&& VPP_ISPPLOCK(vpage
))
3060 pageflags
|= LOCK_PAGE
;
3063 * Allocate a private page and perform the copy.
3064 * For MAP_NORESERVE reserve swap space now, unless this
3065 * is a cow fault on an existing anon page in which case
3066 * MAP_NORESERVE will have made advance reservations.
3068 if ((svd
->flags
& MAP_NORESERVE
) && (ap
== NULL
)) {
3069 if (anon_resv_zone(ptob(1), seg
->s_as
->a_proc
->p_zone
)) {
3070 atomic_add_long(&svd
->swresv
, ptob(1));
3071 atomic_add_long(&seg
->s_as
->a_resvsize
, ptob(1));
3079 pp
= anon_private(&ap
, seg
, addr
, prot
, opp
, pageflags
, svd
->cred
);
3081 err
= ENOMEM
; /* out of swap space */
3086 * If we copied away from an anonymous page, then
3087 * we are one step closer to freeing up an anon slot.
3089 * NOTE: The original anon slot must be released while
3090 * holding the "anon_map" lock. This is necessary to prevent
3091 * other threads from obtaining a pointer to the anon slot
3092 * which may be freed if its "refcnt" is 1.
3097 (void) anon_set_ptr(amp
->ahp
, anon_index
, ap
, ANON_SLEEP
);
3100 * Handle pages that have been marked for migration
3102 if (lgrp_optimizations())
3103 page_migrate(seg
, addr
, &pp
, 1);
3105 ASSERT(pp
->p_szc
== 0);
3107 ASSERT(!IS_VMODSORT(pp
->p_vnode
));
3108 if (enable_mbit_wa
) {
3111 else if (!hat_ismod(pp
))
3112 prot
&= ~PROT_WRITE
;
3115 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
3116 hat_memload(hat
, addr
, pp
, prot
, hat_flag
);
3118 if (!(hat_flag
& HAT_LOAD_LOCK
))
3122 anon_array_exit(&cookie
);
3126 anon_array_exit(&cookie
);
3128 if (type
== F_SOFTLOCK
) {
3129 atomic_dec_ulong((ulong_t
*)&svd
->softlockcnt
);
3131 return (FC_MAKE_ERR(err
));
3135 * relocate a bunch of smaller targ pages into one large repl page. all targ
3136 * pages must be complete pages smaller than replacement pages.
3137 * it's assumed that no page's szc can change since they are all PAGESIZE or
3138 * complete large pages locked SHARED.
3141 segvn_relocate_pages(page_t
**targ
, page_t
*replacement
)
3144 pgcnt_t repl_npgs
, curnpgs
;
3146 uint_t repl_szc
= replacement
->p_szc
;
3147 page_t
*first_repl
= replacement
;
3151 VM_STAT_ADD(segvnvmstats
.relocatepages
[0]);
3153 ASSERT(repl_szc
!= 0);
3154 npgs
= repl_npgs
= page_get_pagecnt(repl_szc
);
3160 ASSERT(replacement
!= NULL
);
3162 ASSERT(pp
->p_szc
< repl_szc
);
3163 ASSERT(PAGE_EXCL(pp
));
3164 ASSERT(!PP_ISFREE(pp
));
3165 curnpgs
= page_get_pagecnt(pp
->p_szc
);
3167 VM_STAT_ADD(segvnvmstats
.relocatepages
[1]);
3169 page_sub(&replacement
, repl
);
3170 ASSERT(PAGE_EXCL(repl
));
3171 ASSERT(!PP_ISFREE(repl
));
3172 ASSERT(repl
->p_szc
== repl_szc
);
3174 page_t
*repl_savepp
;
3176 VM_STAT_ADD(segvnvmstats
.relocatepages
[2]);
3177 repl_savepp
= replacement
;
3178 for (j
= 0; j
< curnpgs
; j
++) {
3180 page_sub(&replacement
, repl
);
3181 ASSERT(PAGE_EXCL(repl
));
3182 ASSERT(!PP_ISFREE(repl
));
3183 ASSERT(repl
->p_szc
== repl_szc
);
3184 ASSERT(page_pptonum(targ
[i
+ j
]) ==
3185 page_pptonum(targ
[i
]) + j
);
3188 ASSERT(IS_P2ALIGNED(page_pptonum(repl
), curnpgs
));
3190 err
= page_relocate(&pp
, &repl
, 0, 1, &nreloc
, NULL
);
3191 if (err
|| nreloc
!= curnpgs
) {
3192 panic("segvn_relocate_pages: "
3193 "page_relocate failed err=%d curnpgs=%ld "
3194 "nreloc=%ld", err
, curnpgs
, nreloc
);
3196 ASSERT(curnpgs
<= repl_npgs
);
3197 repl_npgs
-= curnpgs
;
3200 ASSERT(replacement
== NULL
);
3204 for (i
= 0; i
< repl_npgs
; i
++) {
3205 ASSERT(PAGE_EXCL(repl
));
3206 ASSERT(!PP_ISFREE(repl
));
3208 page_downgrade(targ
[i
]);
3214 * Check if all pages in ppa array are complete smaller than szc pages and
3215 * their roots will still be aligned relative to their current size if the
3216 * entire ppa array is relocated into one szc page. If these conditions are
3219 * If all pages are properly aligned attempt to upgrade their locks
3220 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3221 * upgrdfail was set to 0 by caller.
3223 * Return 1 if all pages are aligned and locked exclusively.
3225 * If all pages in ppa array happen to be physically contiguous to make one
3226 * szc page and all exclusive locks are successfully obtained promote the page
3227 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3230 segvn_full_szcpages(page_t
**ppa
, uint_t szc
, int *upgrdfail
, uint_t
*pszc
)
3234 pgcnt_t totnpgs
= page_get_pagecnt(szc
);
3245 VM_STAT_ADD(segvnvmstats
.fullszcpages
[0]);
3247 for (i
= 0; i
< totnpgs
; i
++) {
3249 ASSERT(PAGE_SHARED(pp
));
3250 ASSERT(!PP_ISFREE(pp
));
3251 pfn
= page_pptonum(pp
);
3253 if (!IS_P2ALIGNED(pfn
, totnpgs
)) {
3258 } else if (contig
&& pfn
!= first_pfn
+ i
) {
3261 if (pp
->p_szc
== 0) {
3263 VM_STAT_ADD(segvnvmstats
.fullszcpages
[1]);
3267 if ((curszc
= pp
->p_szc
) >= szc
) {
3268 VM_STAT_ADD(segvnvmstats
.fullszcpages
[2]);
3273 * p_szc changed means we don't have all pages
3274 * locked. return failure.
3276 VM_STAT_ADD(segvnvmstats
.fullszcpages
[3]);
3279 curnpgs
= page_get_pagecnt(curszc
);
3280 if (!IS_P2ALIGNED(pfn
, curnpgs
) ||
3281 !IS_P2ALIGNED(i
, curnpgs
)) {
3282 VM_STAT_ADD(segvnvmstats
.fullszcpages
[4]);
3288 VM_STAT_ADD(segvnvmstats
.fullszcpages
[5]);
3289 if (pp
->p_szc
!= curszc
) {
3290 VM_STAT_ADD(segvnvmstats
.fullszcpages
[6]);
3293 if (pfn
- 1 != page_pptonum(ppa
[i
- 1])) {
3294 panic("segvn_full_szcpages: "
3295 "large page not physically contiguous");
3297 if (P2PHASE(pfn
, curnpgs
) == curnpgs
- 1) {
3303 for (i
= 0; i
< totnpgs
; i
++) {
3304 ASSERT(ppa
[i
]->p_szc
< szc
);
3305 if (!page_tryupgrade(ppa
[i
])) {
3306 for (j
= 0; j
< i
; j
++) {
3307 page_downgrade(ppa
[j
]);
3309 *pszc
= ppa
[i
]->p_szc
;
3311 VM_STAT_ADD(segvnvmstats
.fullszcpages
[7]);
3317 * When a page is put a free cachelist its szc is set to 0. if file
3318 * system reclaimed pages from cachelist targ pages will be physically
3319 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3320 * pages without any relocations.
3321 * To avoid any hat issues with previous small mappings
3322 * hat_pageunload() the target pages first.
3325 VM_STAT_ADD(segvnvmstats
.fullszcpages
[8]);
3326 for (i
= 0; i
< totnpgs
; i
++) {
3327 (void) hat_pageunload(ppa
[i
], HAT_FORCE_PGUNLOAD
);
3329 for (i
= 0; i
< totnpgs
; i
++) {
3330 ppa
[i
]->p_szc
= szc
;
3332 for (i
= 0; i
< totnpgs
; i
++) {
3333 ASSERT(PAGE_EXCL(ppa
[i
]));
3334 page_downgrade(ppa
[i
]);
3340 VM_STAT_ADD(segvnvmstats
.fullszcpages
[9]);
3345 * Create physically contiguous pages for [vp, off] - [vp, off +
3346 * page_size(szc)) range and for private segment return them in ppa array.
3347 * Pages are created either via IO or relocations.
3349 * Return 1 on success and 0 on failure.
3351 * If physically contiguous pages already exist for this range return 1 without
3352 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3353 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3357 segvn_fill_vp_pages(struct segvn_data
*svd
, vnode_t
*vp
, u_offset_t off
,
3358 uint_t szc
, page_t
**ppa
, page_t
**ppplist
, uint_t
*ret_pszc
,
3362 page_t
*pplist
= *ppplist
;
3363 size_t pgsz
= page_get_pagesize(szc
);
3364 pgcnt_t pages
= btop(pgsz
);
3365 ulong_t start_off
= off
;
3366 u_offset_t eoff
= off
+ pgsz
;
3368 u_offset_t io_off
= off
;
3370 page_t
*io_pplist
= NULL
;
3371 page_t
*done_pplist
= NULL
;
3380 page_t
*targ_pplist
= NULL
;
3381 page_t
*repl_pplist
= NULL
;
3387 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[0]);
3390 ASSERT(pplist
->p_szc
== szc
);
3393 * downsize will be set to 1 only if we fail to lock pages. this will
3394 * allow subsequent faults to try to relocate the page again. If we
3395 * fail due to misalignment don't downsize and let the caller map the
3396 * whole region with small mappings to avoid more faults into the area
3397 * where we can't get large pages anyway.
3401 while (off
< eoff
) {
3403 ASSERT(newpp
!= NULL
);
3404 ASSERT(PAGE_EXCL(newpp
));
3405 ASSERT(!PP_ISFREE(newpp
));
3407 * we pass NULL for nrelocp to page_lookup_create()
3408 * so that it doesn't relocate. We relocate here
3409 * later only after we make sure we can lock all
3410 * pages in the range we handle and they are all
3413 pp
= page_lookup_create(vp
, off
, SE_SHARED
, newpp
, NULL
, 0);
3415 ASSERT(!PP_ISFREE(pp
));
3416 ASSERT(pp
->p_vnode
== vp
);
3417 ASSERT(pp
->p_offset
== off
);
3419 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[1]);
3420 page_sub(&pplist
, pp
);
3421 ASSERT(PAGE_EXCL(pp
));
3422 ASSERT(page_iolock_assert(pp
));
3423 page_list_concat(&io_pplist
, &pp
);
3427 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[2]);
3428 pfn
= page_pptonum(pp
);
3430 if (pszc
>= szc
&& targ_pplist
== NULL
&& io_pplist
== NULL
&&
3431 IS_P2ALIGNED(pfn
, pages
)) {
3432 ASSERT(repl_pplist
== NULL
);
3433 ASSERT(done_pplist
== NULL
);
3434 ASSERT(pplist
== *ppplist
);
3436 page_free_replacement_page(pplist
);
3437 page_create_putback(pages
);
3439 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[3]);
3444 segvn_faultvnmpss_align_err1
++;
3447 ppages
= page_get_pagecnt(pszc
);
3448 if (!IS_P2ALIGNED(pfn
, ppages
)) {
3451 * sizing down to pszc won't help.
3454 segvn_faultvnmpss_align_err2
++;
3457 pfn
= page_pptonum(newpp
);
3458 if (!IS_P2ALIGNED(pfn
, ppages
)) {
3461 * sizing down to pszc won't help.
3464 segvn_faultvnmpss_align_err3
++;
3467 if (!PAGE_EXCL(pp
)) {
3468 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[4]);
3471 *ret_pszc
= pp
->p_szc
;
3475 if (io_pplist
!= NULL
) {
3476 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[5]);
3477 io_len
= off
- io_off
;
3479 * Some file systems like NFS don't check EOF
3480 * conditions in VOP_PAGEIO(). Check it here
3481 * now that pages are locked SE_EXCL. Any file
3482 * truncation will wait until the pages are
3483 * unlocked so no need to worry that file will
3484 * be truncated after we check its size here.
3485 * XXX fix NFS to remove this check.
3487 va
.va_mask
= AT_SIZE
;
3488 if (VOP_GETATTR(vp
, &va
, ATTR_HINT
, svd
->cred
, NULL
)) {
3489 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[6]);
3490 page_unlock(targpp
);
3493 if (btopr(va
.va_size
) < btopr(io_off
+ io_len
)) {
3494 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[7]);
3497 page_unlock(targpp
);
3500 io_err
= VOP_PAGEIO(vp
, io_pplist
, io_off
, io_len
,
3501 B_READ
, svd
->cred
, NULL
);
3503 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[8]);
3504 page_unlock(targpp
);
3505 if (io_err
== EDEADLK
) {
3506 segvn_vmpss_pageio_deadlk_err
++;
3511 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[9]);
3512 while (io_pplist
!= NULL
) {
3514 page_sub(&io_pplist
, pp
);
3515 ASSERT(page_iolock_assert(pp
));
3517 pgidx
= (pp
->p_offset
- start_off
) >>
3519 ASSERT(pgidx
< pages
);
3521 page_list_concat(&done_pplist
, &pp
);
3525 ASSERT(PAGE_EXCL(pp
));
3526 ASSERT(pp
->p_szc
<= pszc
);
3527 if (pszc
!= 0 && !group_page_trylock(pp
, SE_EXCL
)) {
3528 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[10]);
3531 *ret_pszc
= pp
->p_szc
;
3534 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[11]);
3536 * page szc chould have changed before the entire group was
3537 * locked. reread page szc.
3540 ppages
= page_get_pagecnt(pszc
);
3542 /* link just the roots */
3543 page_list_concat(&targ_pplist
, &pp
);
3544 page_sub(&pplist
, newpp
);
3545 page_list_concat(&repl_pplist
, &newpp
);
3547 while (--ppages
!= 0) {
3549 page_sub(&pplist
, newpp
);
3554 if (io_pplist
!= NULL
) {
3555 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[12]);
3556 io_len
= eoff
- io_off
;
3557 va
.va_mask
= AT_SIZE
;
3558 if (VOP_GETATTR(vp
, &va
, ATTR_HINT
, svd
->cred
, NULL
) != 0) {
3559 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[13]);
3562 if (btopr(va
.va_size
) < btopr(io_off
+ io_len
)) {
3563 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[14]);
3568 io_err
= VOP_PAGEIO(vp
, io_pplist
, io_off
, io_len
,
3569 B_READ
, svd
->cred
, NULL
);
3571 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[15]);
3572 if (io_err
== EDEADLK
) {
3573 segvn_vmpss_pageio_deadlk_err
++;
3578 while (io_pplist
!= NULL
) {
3580 page_sub(&io_pplist
, pp
);
3581 ASSERT(page_iolock_assert(pp
));
3583 pgidx
= (pp
->p_offset
- start_off
) >> PAGESHIFT
;
3584 ASSERT(pgidx
< pages
);
3589 * we're now bound to succeed or panic.
3590 * remove pages from done_pplist. it's not needed anymore.
3592 while (done_pplist
!= NULL
) {
3594 page_sub(&done_pplist
, pp
);
3596 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[16]);
3597 ASSERT(pplist
== NULL
);
3599 while (targ_pplist
!= NULL
) {
3601 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[17]);
3602 ASSERT(repl_pplist
);
3604 page_sub(&targ_pplist
, pp
);
3605 pgidx
= (pp
->p_offset
- start_off
) >> PAGESHIFT
;
3606 newpp
= repl_pplist
;
3607 page_sub(&repl_pplist
, newpp
);
3609 pfn
= page_pptonum(pp
);
3611 ppages
= page_get_pagecnt(pszc
);
3612 ASSERT(IS_P2ALIGNED(pfn
, ppages
));
3613 pfn
= page_pptonum(newpp
);
3614 ASSERT(IS_P2ALIGNED(pfn
, ppages
));
3615 ASSERT(P2PHASE(pfn
, pages
) == pgidx
);
3618 ret
= page_relocate(&pp
, &newpp
, 0, 1, &nreloc
, NULL
);
3619 if (ret
!= 0 || nreloc
== 0) {
3620 panic("segvn_fill_vp_pages: "
3621 "page_relocate failed");
3624 while (nreloc
-- != 0) {
3625 ASSERT(PAGE_EXCL(pp
));
3626 ASSERT(pp
->p_vnode
== vp
);
3628 ((pp
->p_offset
- start_off
) >> PAGESHIFT
));
3634 if (svd
->type
== MAP_PRIVATE
) {
3635 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[18]);
3636 for (i
= 0; i
< pages
; i
++) {
3637 ASSERT(ppa
[i
] != NULL
);
3638 ASSERT(PAGE_EXCL(ppa
[i
]));
3639 ASSERT(ppa
[i
]->p_vnode
== vp
);
3640 ASSERT(ppa
[i
]->p_offset
==
3641 start_off
+ (i
<< PAGESHIFT
));
3642 page_downgrade(ppa
[i
]);
3646 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[19]);
3648 * the caller will still call VOP_GETPAGE() for shared segments
3649 * to check FS write permissions. For private segments we map
3650 * file read only anyway. so no VOP_GETPAGE is needed.
3652 for (i
= 0; i
< pages
; i
++) {
3653 ASSERT(ppa
[i
] != NULL
);
3654 ASSERT(PAGE_EXCL(ppa
[i
]));
3655 ASSERT(ppa
[i
]->p_vnode
== vp
);
3656 ASSERT(ppa
[i
]->p_offset
==
3657 start_off
+ (i
<< PAGESHIFT
));
3658 page_unlock(ppa
[i
]);
3666 * Do the cleanup. Unlock target pages we didn't relocate. They are
3667 * linked on targ_pplist by root pages. reassemble unused replacement
3668 * and io pages back to pplist.
3670 if (io_pplist
!= NULL
) {
3671 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[20]);
3674 ASSERT(pp
->p_vnode
== vp
);
3675 ASSERT(pp
->p_offset
== io_off
);
3676 ASSERT(page_iolock_assert(pp
));
3678 page_hashout(pp
, NULL
);
3680 } while ((pp
= pp
->p_next
) != io_pplist
);
3681 page_list_concat(&io_pplist
, &pplist
);
3685 while (targ_pplist
!= NULL
) {
3686 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[21]);
3688 ASSERT(PAGE_EXCL(pp
));
3689 page_sub(&targ_pplist
, pp
);
3692 ppages
= page_get_pagecnt(pszc
);
3693 ASSERT(IS_P2ALIGNED(page_pptonum(pp
), ppages
));
3696 group_page_unlock(pp
);
3702 ASSERT(PAGE_EXCL(pp
));
3703 ASSERT(pp
->p_szc
== szc
);
3704 page_sub(&repl_pplist
, pp
);
3706 ASSERT(IS_P2ALIGNED(page_pptonum(pp
), ppages
));
3708 /* relink replacement page */
3709 page_list_concat(&tmp_pplist
, &pp
);
3710 while (--ppages
!= 0) {
3711 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[22]);
3713 ASSERT(PAGE_EXCL(pp
));
3714 ASSERT(pp
->p_szc
== szc
);
3715 page_list_concat(&tmp_pplist
, &pp
);
3718 if (tmp_pplist
!= NULL
) {
3719 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[23]);
3720 page_list_concat(&tmp_pplist
, &pplist
);
3721 pplist
= tmp_pplist
;
3724 * at this point all pages are either on done_pplist or
3725 * pplist. They can't be all on done_pplist otherwise
3726 * we'd've been done.
3728 ASSERT(pplist
!= NULL
);
3730 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[24]);
3733 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[25]);
3734 ASSERT(pp
->p_szc
== szc
);
3735 ASSERT(PAGE_EXCL(pp
));
3736 ASSERT(pp
->p_vnode
!= vp
);
3738 } while ((pp
= pp
->p_next
) != pplist
);
3742 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[26]);
3743 ASSERT(pp
->p_szc
== szc
);
3744 ASSERT(PAGE_EXCL(pp
));
3745 ASSERT(pp
->p_vnode
== vp
);
3747 } while ((pp
= pp
->p_next
) != done_pplist
);
3749 while (pplist
!= NULL
) {
3750 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[27]);
3752 page_sub(&pplist
, pp
);
3756 while (done_pplist
!= NULL
) {
3757 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[28]);
3759 page_sub(&done_pplist
, pp
);
3765 ASSERT(pplist
== *ppplist
);
3767 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[29]);
3769 * don't downsize on io error.
3770 * see if vop_getpage succeeds.
3771 * pplist may still be used in this case
3776 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[30]);
3777 page_free_replacement_page(pplist
);
3778 page_create_putback(pages
);
3783 int segvn_anypgsz
= 0;
3785 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3786 if ((type) == F_SOFTLOCK) { \
3787 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3791 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3792 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3793 if ((rw) == S_WRITE) { \
3794 for (i = 0; i < (pages); i++) { \
3795 ASSERT((ppa)[i]->p_vnode == \
3796 (ppa)[0]->p_vnode); \
3797 hat_setmod((ppa)[i]); \
3799 } else if ((rw) != S_OTHER && \
3800 ((prot) & (vpprot) & PROT_WRITE)) { \
3801 for (i = 0; i < (pages); i++) { \
3802 ASSERT((ppa)[i]->p_vnode == \
3803 (ppa)[0]->p_vnode); \
3804 if (!hat_ismod((ppa)[i])) { \
3805 prot &= ~PROT_WRITE; \
3814 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3815 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3817 #else /* VM_STATS */
3819 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3824 segvn_fault_vnodepages(struct hat
*hat
, struct seg
*seg
, caddr_t lpgaddr
,
3825 caddr_t lpgeaddr
, enum fault_type type
, enum seg_rw rw
, caddr_t addr
,
3826 caddr_t eaddr
, int brkcow
)
3828 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
3829 struct anon_map
*amp
= svd
->amp
;
3830 uchar_t segtype
= svd
->type
;
3831 uint_t szc
= seg
->s_szc
;
3832 size_t pgsz
= page_get_pagesize(szc
);
3833 size_t maxpgsz
= pgsz
;
3834 pgcnt_t pages
= btop(pgsz
);
3835 pgcnt_t maxpages
= pages
;
3836 size_t ppasize
= (pages
+ 1) * sizeof (page_t
*);
3837 caddr_t a
= lpgaddr
;
3838 caddr_t maxlpgeaddr
= lpgeaddr
;
3839 u_offset_t off
= svd
->offset
+ (uintptr_t)(a
- seg
->s_base
);
3840 ulong_t aindx
= svd
->anon_index
+ seg_page(seg
, a
);
3841 struct vpage
*vpage
= (svd
->vpage
!= NULL
) ?
3842 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
3843 vnode_t
*vp
= svd
->vp
;
3848 faultcode_t err
= 0;
3850 int vop_size_err
= 0;
3851 uint_t protchk
, prot
, vpprot
;
3853 int hat_flag
= (type
== F_SOFTLOCK
) ? HAT_LOAD_LOCK
: HAT_LOAD
;
3854 anon_sync_obj_t an_cookie
;
3856 int alloc_failed
= 0;
3864 int segvn_anypgsz_vnode
= 0; /* for now map vnode with 2 page sizes */
3865 int tron
= (svd
->tr_state
== SEGVN_TR_ON
);
3869 ASSERT(brkcow
== 0 || amp
!= NULL
);
3870 ASSERT(tron
== 0 || amp
!= NULL
);
3871 ASSERT(enable_mbit_wa
== 0); /* no mbit simulations with large pages */
3872 ASSERT(!(svd
->flags
& MAP_NORESERVE
));
3873 ASSERT(type
!= F_SOFTUNLOCK
);
3874 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
3875 ASSERT(amp
== NULL
|| IS_P2ALIGNED(aindx
, maxpages
));
3876 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
3877 ASSERT(seg
->s_szc
< NBBY
* sizeof (int));
3878 ASSERT(type
!= F_SOFTLOCK
|| lpgeaddr
- a
== maxpgsz
);
3879 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
3881 VM_STAT_COND_ADD(type
== F_SOFTLOCK
, segvnvmstats
.fltvnpages
[0]);
3882 VM_STAT_COND_ADD(type
!= F_SOFTLOCK
, segvnvmstats
.fltvnpages
[1]);
3884 if (svd
->flags
& MAP_TEXT
) {
3885 hat_flag
|= HAT_LOAD_TEXT
;
3888 if (svd
->pageprot
) {
3891 protchk
= PROT_READ
;
3894 protchk
= PROT_WRITE
;
3897 protchk
= PROT_EXEC
;
3901 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
3906 /* caller has already done segment level protection check. */
3909 if (seg
->s_as
->a_hat
!= hat
) {
3913 if (rw
== S_WRITE
&& segtype
== MAP_PRIVATE
) {
3914 SEGVN_VMSTAT_FLTVNPAGES(2);
3920 ppa
= kmem_alloc(ppasize
, KM_SLEEP
);
3922 VM_STAT_COND_ADD(amp
!= NULL
, segvnvmstats
.fltvnpages
[3]);
3926 for (; a
< lpgeaddr
; a
+= pgsz
, off
+= pgsz
, aindx
+= pages
) {
3928 while (szc
< seg
->s_szc
) {
3931 tszc
= segvn_anypgsz_vnode
? szc
+ 1 :
3933 ppgsz
= page_get_pagesize(tszc
);
3934 if (!IS_P2ALIGNED(a
, ppgsz
) ||
3935 ((alloc_failed
>> tszc
) & 0x1)) {
3938 SEGVN_VMSTAT_FLTVNPAGES(4);
3942 e
= P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
3943 lpgeaddr
= (caddr_t
)e
;
3948 if (IS_P2ALIGNED(a
, maxpgsz
) && amp
!= NULL
) {
3949 ASSERT(IS_P2ALIGNED(aindx
, maxpages
));
3950 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
3951 anon_array_enter(amp
, aindx
, &an_cookie
);
3952 if (anon_get_ptr(amp
->ahp
, aindx
) != NULL
) {
3953 SEGVN_VMSTAT_FLTVNPAGES(5);
3954 ASSERT(anon_pages(amp
->ahp
, aindx
,
3955 maxpages
) == maxpages
);
3956 anon_array_exit(&an_cookie
);
3957 ANON_LOCK_EXIT(&
->a_rwlock
);
3958 err
= segvn_fault_anonpages(hat
, seg
,
3959 a
, a
+ maxpgsz
, type
, rw
,
3961 MIN(a
+ maxpgsz
, eaddr
), brkcow
);
3963 SEGVN_VMSTAT_FLTVNPAGES(6);
3966 if (szc
< seg
->s_szc
) {
3970 lpgeaddr
= maxlpgeaddr
;
3974 ASSERT(anon_pages(amp
->ahp
, aindx
,
3976 SEGVN_VMSTAT_FLTVNPAGES(7);
3977 anon_array_exit(&an_cookie
);
3978 ANON_LOCK_EXIT(&
->a_rwlock
);
3981 ASSERT(!brkcow
|| IS_P2ALIGNED(a
, maxpgsz
));
3982 ASSERT(!tron
|| IS_P2ALIGNED(a
, maxpgsz
));
3984 if (svd
->pageprot
!= 0 && IS_P2ALIGNED(a
, maxpgsz
)) {
3985 ASSERT(vpage
!= NULL
);
3986 prot
= VPP_PROT(vpage
);
3987 ASSERT(sameprot(seg
, a
, maxpgsz
));
3988 if ((prot
& protchk
) == 0) {
3989 SEGVN_VMSTAT_FLTVNPAGES(8);
3994 if (type
== F_SOFTLOCK
) {
3995 atomic_add_long((ulong_t
*)&svd
->softlockcnt
,
4002 if (!brkcow
&& !tron
&& szc
&&
4003 !page_exists_physcontig(vp
, off
, szc
,
4004 segtype
== MAP_PRIVATE
? ppa
: NULL
)) {
4005 SEGVN_VMSTAT_FLTVNPAGES(9);
4006 if (page_alloc_pages(vp
, seg
, a
, &pplist
, NULL
,
4007 szc
, 0, 0) && type
!= F_SOFTLOCK
) {
4008 SEGVN_VMSTAT_FLTVNPAGES(10);
4011 alloc_failed
|= (1 << szc
);
4014 if (pplist
!= NULL
&&
4015 vp
->v_mpssdata
== SEGVN_PAGEIO
) {
4017 SEGVN_VMSTAT_FLTVNPAGES(11);
4018 physcontig
= segvn_fill_vp_pages(svd
,
4019 vp
, off
, szc
, ppa
, &pplist
,
4021 ASSERT(!physcontig
|| pplist
== NULL
);
4022 if (!physcontig
&& downsize
&&
4023 type
!= F_SOFTLOCK
) {
4024 ASSERT(pplist
== NULL
);
4025 SEGVN_VMSTAT_FLTVNPAGES(12);
4029 ASSERT(!physcontig
||
4030 segtype
== MAP_PRIVATE
||
4032 if (physcontig
&& ppa
[0] == NULL
) {
4036 } else if (!brkcow
&& !tron
&& szc
&& ppa
[0] != NULL
) {
4037 SEGVN_VMSTAT_FLTVNPAGES(13);
4038 ASSERT(segtype
== MAP_PRIVATE
);
4043 SEGVN_VMSTAT_FLTVNPAGES(14);
4045 ierr
= VOP_GETPAGE(vp
, (offset_t
)off
, pgsz
,
4046 &vpprot
, ppa
, pgsz
, seg
, a
, arw
,
4050 for (i
= 0; i
< pages
; i
++) {
4051 ASSERT(PAGE_LOCKED(ppa
[i
]));
4052 ASSERT(!PP_ISFREE(ppa
[i
]));
4053 ASSERT(ppa
[i
]->p_vnode
== vp
);
4054 ASSERT(ppa
[i
]->p_offset
==
4055 off
+ (i
<< PAGESHIFT
));
4059 if (segtype
== MAP_PRIVATE
) {
4060 SEGVN_VMSTAT_FLTVNPAGES(15);
4061 vpprot
&= ~PROT_WRITE
;
4064 ASSERT(segtype
== MAP_PRIVATE
);
4065 SEGVN_VMSTAT_FLTVNPAGES(16);
4066 vpprot
= PROT_ALL
& ~PROT_WRITE
;
4071 SEGVN_VMSTAT_FLTVNPAGES(17);
4072 if (pplist
!= NULL
) {
4073 SEGVN_VMSTAT_FLTVNPAGES(18);
4074 page_free_replacement_page(pplist
);
4075 page_create_putback(pages
);
4077 SEGVN_RESTORE_SOFTLOCK_VP(type
, pages
);
4078 if (a
+ pgsz
<= eaddr
) {
4079 SEGVN_VMSTAT_FLTVNPAGES(19);
4080 err
= FC_MAKE_ERR(ierr
);
4083 va
.va_mask
= AT_SIZE
;
4084 if (VOP_GETATTR(vp
, &va
, 0, svd
->cred
, NULL
)) {
4085 SEGVN_VMSTAT_FLTVNPAGES(20);
4086 err
= FC_MAKE_ERR(EIO
);
4089 if (btopr(va
.va_size
) >= btopr(off
+ pgsz
)) {
4090 SEGVN_VMSTAT_FLTVNPAGES(21);
4091 err
= FC_MAKE_ERR(ierr
);
4094 if (btopr(va
.va_size
) <
4095 btopr(off
+ (eaddr
- a
))) {
4096 SEGVN_VMSTAT_FLTVNPAGES(22);
4097 err
= FC_MAKE_ERR(ierr
);
4100 if (brkcow
|| tron
|| type
== F_SOFTLOCK
) {
4101 /* can't reduce map area */
4102 SEGVN_VMSTAT_FLTVNPAGES(23);
4106 SEGVN_VMSTAT_FLTVNPAGES(24);
4114 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
4115 anon_array_enter(amp
, aindx
, &an_cookie
);
4118 anon_get_ptr(amp
->ahp
, aindx
) != NULL
) {
4119 ulong_t taindx
= P2ALIGN(aindx
, maxpages
);
4121 SEGVN_VMSTAT_FLTVNPAGES(25);
4122 ASSERT(anon_pages(amp
->ahp
, taindx
,
4123 maxpages
) == maxpages
);
4124 for (i
= 0; i
< pages
; i
++) {
4125 page_unlock(ppa
[i
]);
4127 anon_array_exit(&an_cookie
);
4128 ANON_LOCK_EXIT(&
->a_rwlock
);
4129 if (pplist
!= NULL
) {
4130 page_free_replacement_page(pplist
);
4131 page_create_putback(pages
);
4133 SEGVN_RESTORE_SOFTLOCK_VP(type
, pages
);
4134 if (szc
< seg
->s_szc
) {
4135 SEGVN_VMSTAT_FLTVNPAGES(26);
4137 * For private segments SOFTLOCK
4138 * either always breaks cow (any rw
4139 * type except S_READ_NOCOW) or
4140 * address space is locked as writer
4141 * (S_READ_NOCOW case) and anon slots
4142 * can't show up on second check.
4143 * Therefore if we are here for
4144 * SOFTLOCK case it must be a cow
4145 * break but cow break never reduces
4146 * szc. text replication (tron) in
4147 * this case works as cow break.
4148 * Thus the assert below.
4150 ASSERT(!brkcow
&& !tron
&&
4151 type
!= F_SOFTLOCK
);
4156 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
4161 ulong_t taindx
= P2ALIGN(aindx
, maxpages
);
4162 ASSERT(!anon_pages(amp
->ahp
, taindx
, maxpages
));
4166 if (brkcow
|| tron
) {
4167 ASSERT(amp
!= NULL
);
4168 ASSERT(pplist
== NULL
);
4169 ASSERT(szc
== seg
->s_szc
);
4170 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
4171 ASSERT(IS_P2ALIGNED(aindx
, maxpages
));
4172 SEGVN_VMSTAT_FLTVNPAGES(27);
4173 ierr
= anon_map_privatepages(amp
, aindx
, szc
,
4174 seg
, a
, prot
, ppa
, vpage
, segvn_anypgsz
,
4175 tron
? PG_LOCAL
: 0, svd
->cred
);
4177 SEGVN_VMSTAT_FLTVNPAGES(28);
4178 anon_array_exit(&an_cookie
);
4179 ANON_LOCK_EXIT(&
->a_rwlock
);
4180 SEGVN_RESTORE_SOFTLOCK_VP(type
, pages
);
4181 err
= FC_MAKE_ERR(ierr
);
4185 ASSERT(!IS_VMODSORT(ppa
[0]->p_vnode
));
4187 * p_szc can't be changed for locked
4190 ASSERT(svd
->rcookie
==
4191 HAT_INVALID_REGION_COOKIE
);
4192 hat_memload_array(hat
, a
, pgsz
, ppa
, prot
,
4195 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4196 SEGVN_VMSTAT_FLTVNPAGES(29);
4197 for (i
= 0; i
< pages
; i
++) {
4198 page_unlock(ppa
[i
]);
4201 anon_array_exit(&an_cookie
);
4202 ANON_LOCK_EXIT(&
->a_rwlock
);
4206 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
||
4207 (!svd
->pageprot
&& svd
->prot
== (prot
& vpprot
)));
4209 pfn
= page_pptonum(ppa
[0]);
4211 * hat_page_demote() needs an SE_EXCL lock on one of
4212 * constituent page_t's and it decreases root's p_szc
4213 * last. This means if root's p_szc is equal szc and
4214 * all its constituent pages are locked
4215 * hat_page_demote() that could have changed p_szc to
4216 * szc is already done and no new have page_demote()
4217 * can start for this large page.
4221 * we need to make sure same mapping size is used for
4222 * the same address range if there's a possibility the
4223 * adddress is already mapped because hat layer panics
4224 * when translation is loaded for the range already
4225 * mapped with a different page size. We achieve it
4226 * by always using largest page size possible subject
4227 * to the constraints of page size, segment page size
4228 * and page alignment. Since mappings are invalidated
4229 * when those constraints change and make it
4230 * impossible to use previously used mapping size no
4231 * mapping size conflicts should happen.
4235 if ((pszc
= ppa
[0]->p_szc
) == szc
&&
4236 IS_P2ALIGNED(pfn
, pages
)) {
4238 SEGVN_VMSTAT_FLTVNPAGES(30);
4240 for (i
= 0; i
< pages
; i
++) {
4241 ASSERT(PAGE_LOCKED(ppa
[i
]));
4242 ASSERT(!PP_ISFREE(ppa
[i
]));
4243 ASSERT(page_pptonum(ppa
[i
]) ==
4245 ASSERT(ppa
[i
]->p_szc
== szc
);
4246 ASSERT(ppa
[i
]->p_vnode
== vp
);
4247 ASSERT(ppa
[i
]->p_offset
==
4248 off
+ (i
<< PAGESHIFT
));
4252 * All pages are of szc we need and they are
4253 * all locked so they can't change szc. load
4256 * if page got promoted since last check
4257 * we don't need pplist.
4259 if (pplist
!= NULL
) {
4260 page_free_replacement_page(pplist
);
4261 page_create_putback(pages
);
4263 if (PP_ISMIGRATE(ppa
[0])) {
4264 page_migrate(seg
, a
, ppa
, pages
);
4266 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
,
4269 hat_memload_array_region(hat
, a
, pgsz
,
4270 ppa
, prot
& vpprot
, hat_flag
,
4274 * avoid large xhat mappings to FS
4275 * pages so that hat_page_demote()
4276 * doesn't need to check for xhat
4278 * Don't use regions with xhats.
4280 for (i
= 0; i
< pages
; i
++) {
4282 a
+ (i
<< PAGESHIFT
),
4283 ppa
[i
], prot
& vpprot
,
4288 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4289 for (i
= 0; i
< pages
; i
++) {
4290 page_unlock(ppa
[i
]);
4294 anon_array_exit(&an_cookie
);
4295 ANON_LOCK_EXIT(&
->a_rwlock
);
4301 * See if upsize is possible.
4303 if (pszc
> szc
&& szc
< seg
->s_szc
&&
4304 (segvn_anypgsz_vnode
|| pszc
>= seg
->s_szc
)) {
4306 uint_t pszc1
= MIN(pszc
, seg
->s_szc
);
4307 ppgsz
= page_get_pagesize(pszc1
);
4308 ppages
= btop(ppgsz
);
4309 aphase
= btop(P2PHASE((uintptr_t)a
, ppgsz
));
4311 ASSERT(type
!= F_SOFTLOCK
);
4313 SEGVN_VMSTAT_FLTVNPAGES(31);
4314 if (aphase
!= P2PHASE(pfn
, ppages
)) {
4315 segvn_faultvnmpss_align_err4
++;
4317 SEGVN_VMSTAT_FLTVNPAGES(32);
4318 if (pplist
!= NULL
) {
4319 page_t
*pl
= pplist
;
4320 page_free_replacement_page(pl
);
4321 page_create_putback(pages
);
4323 for (i
= 0; i
< pages
; i
++) {
4324 page_unlock(ppa
[i
]);
4327 anon_array_exit(&an_cookie
);
4328 ANON_LOCK_EXIT(&
->a_rwlock
);
4337 * check if we should use smallest mapping size.
4340 if (szc
== 0 || xhat
||
4342 !IS_P2ALIGNED(pfn
, pages
)) ||
4344 !segvn_full_szcpages(ppa
, szc
, &upgrdfail
,
4347 if (upgrdfail
&& type
!= F_SOFTLOCK
) {
4349 * segvn_full_szcpages failed to lock
4350 * all pages EXCL. Size down.
4354 SEGVN_VMSTAT_FLTVNPAGES(33);
4356 if (pplist
!= NULL
) {
4357 page_t
*pl
= pplist
;
4358 page_free_replacement_page(pl
);
4359 page_create_putback(pages
);
4362 for (i
= 0; i
< pages
; i
++) {
4363 page_unlock(ppa
[i
]);
4366 anon_array_exit(&an_cookie
);
4367 ANON_LOCK_EXIT(&
->a_rwlock
);
4372 if (szc
!= 0 && !xhat
&& !upgrdfail
) {
4373 segvn_faultvnmpss_align_err5
++;
4375 SEGVN_VMSTAT_FLTVNPAGES(34);
4376 if (pplist
!= NULL
) {
4377 page_free_replacement_page(pplist
);
4378 page_create_putback(pages
);
4380 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
,
4382 if (upgrdfail
&& segvn_anypgsz_vnode
) {
4384 hat_memload_array_region(hat
, a
, pgsz
,
4385 ppa
, prot
& vpprot
, hat_flag
,
4388 for (i
= 0; i
< pages
; i
++) {
4389 hat_memload_region(hat
,
4390 a
+ (i
<< PAGESHIFT
),
4391 ppa
[i
], prot
& vpprot
,
4392 hat_flag
, svd
->rcookie
);
4395 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4396 for (i
= 0; i
< pages
; i
++) {
4397 page_unlock(ppa
[i
]);
4401 anon_array_exit(&an_cookie
);
4402 ANON_LOCK_EXIT(&
->a_rwlock
);
4409 * segvn_full_szcpages() upgraded pages szc.
4411 ASSERT(pszc
== ppa
[0]->p_szc
);
4412 ASSERT(IS_P2ALIGNED(pfn
, pages
));
4418 SEGVN_VMSTAT_FLTVNPAGES(35);
4420 * p_szc of ppa[0] can change since we haven't
4421 * locked all constituent pages. Call
4422 * page_lock_szc() to prevent szc changes.
4423 * This should be a rare case that happens when
4424 * multiple segments use a different page size
4425 * to map the same file offsets.
4427 szcmtx
= page_szc_lock(ppa
[0]);
4428 pszc
= ppa
[0]->p_szc
;
4429 ASSERT(szcmtx
!= NULL
|| pszc
== 0);
4430 ASSERT(ppa
[0]->p_szc
<= pszc
);
4432 SEGVN_VMSTAT_FLTVNPAGES(36);
4433 if (szcmtx
!= NULL
) {
4438 if (pplist
!= NULL
) {
4440 * page got promoted since last check.
4441 * we don't need preaalocated large
4444 SEGVN_VMSTAT_FLTVNPAGES(37);
4445 page_free_replacement_page(pplist
);
4446 page_create_putback(pages
);
4448 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
,
4450 hat_memload_array_region(hat
, a
, pgsz
, ppa
,
4451 prot
& vpprot
, hat_flag
, svd
->rcookie
);
4453 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4454 for (i
= 0; i
< pages
; i
++) {
4455 page_unlock(ppa
[i
]);
4459 anon_array_exit(&an_cookie
);
4460 ANON_LOCK_EXIT(&
->a_rwlock
);
4466 * if page got demoted since last check
4467 * we could have not allocated larger page.
4470 if (pplist
== NULL
&&
4471 page_alloc_pages(vp
, seg
, a
, &pplist
, NULL
,
4472 szc
, 0, 0) && type
!= F_SOFTLOCK
) {
4473 SEGVN_VMSTAT_FLTVNPAGES(38);
4474 for (i
= 0; i
< pages
; i
++) {
4475 page_unlock(ppa
[i
]);
4478 anon_array_exit(&an_cookie
);
4479 ANON_LOCK_EXIT(&
->a_rwlock
);
4482 alloc_failed
|= (1 << szc
);
4486 SEGVN_VMSTAT_FLTVNPAGES(39);
4488 if (pplist
!= NULL
) {
4489 segvn_relocate_pages(ppa
, pplist
);
4492 ASSERT(type
== F_SOFTLOCK
);
4493 SEGVN_VMSTAT_FLTVNPAGES(40);
4497 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
, prot
, vpprot
);
4499 if (pplist
== NULL
&& segvn_anypgsz_vnode
== 0) {
4500 ASSERT(type
== F_SOFTLOCK
);
4501 for (i
= 0; i
< pages
; i
++) {
4502 ASSERT(ppa
[i
]->p_szc
< szc
);
4503 hat_memload_region(hat
,
4504 a
+ (i
<< PAGESHIFT
),
4505 ppa
[i
], prot
& vpprot
, hat_flag
,
4509 ASSERT(pplist
!= NULL
|| type
== F_SOFTLOCK
);
4510 hat_memload_array_region(hat
, a
, pgsz
, ppa
,
4511 prot
& vpprot
, hat_flag
, svd
->rcookie
);
4513 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4514 for (i
= 0; i
< pages
; i
++) {
4515 ASSERT(PAGE_SHARED(ppa
[i
]));
4516 page_unlock(ppa
[i
]);
4520 anon_array_exit(&an_cookie
);
4521 ANON_LOCK_EXIT(&
->a_rwlock
);
4525 if (vpage
!= NULL
) {
4532 ASSERT(a
< lpgeaddr
);
4534 ASSERT(!brkcow
&& !tron
&& type
!= F_SOFTLOCK
);
4537 * ierr == -1 means we failed to map with a large page.
4538 * (either due to allocation/relocation failures or
4539 * misalignment with other mappings to this file.
4541 * ierr == -2 means some other thread allocated a large page
4542 * after we gave up tp map with a large page. retry with
4545 ASSERT(ierr
== -1 || ierr
== -2);
4546 ASSERT(ierr
== -2 || szc
!= 0);
4547 ASSERT(ierr
== -1 || szc
< seg
->s_szc
);
4549 SEGVN_VMSTAT_FLTVNPAGES(41);
4550 ASSERT(pszc
> szc
&& pszc
<= seg
->s_szc
);
4552 } else if (segvn_anypgsz_vnode
) {
4553 SEGVN_VMSTAT_FLTVNPAGES(42);
4556 SEGVN_VMSTAT_FLTVNPAGES(43);
4559 * other process created pszc large page.
4560 * but we still have to drop to 0 szc.
4565 pgsz
= page_get_pagesize(szc
);
4569 * Size up case. Note lpgaddr may only be needed for
4570 * softlock case so we don't adjust it here.
4572 a
= (caddr_t
)P2ALIGN((uintptr_t)a
, pgsz
);
4573 ASSERT(a
>= lpgaddr
);
4574 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4575 off
= svd
->offset
+ (uintptr_t)(a
- seg
->s_base
);
4576 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4577 vpage
= (svd
->vpage
!= NULL
) ?
4578 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4581 * Size down case. Note lpgaddr may only be needed for
4582 * softlock case so we don't adjust it here.
4584 ASSERT(IS_P2ALIGNED(a
, pgsz
));
4585 ASSERT(IS_P2ALIGNED(lpgeaddr
, pgsz
));
4586 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4587 ASSERT(a
< lpgeaddr
);
4589 SEGVN_VMSTAT_FLTVNPAGES(44);
4591 * The beginning of the large page region can
4592 * be pulled to the right to make a smaller
4593 * region. We haven't yet faulted a single
4596 a
= (caddr_t
)P2ALIGN((uintptr_t)addr
, pgsz
);
4597 ASSERT(a
>= lpgaddr
);
4599 (uintptr_t)(a
- seg
->s_base
);
4600 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4601 vpage
= (svd
->vpage
!= NULL
) ?
4602 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4607 kmem_free(ppa
, ppasize
);
4608 if (!err
&& !vop_size_err
) {
4609 SEGVN_VMSTAT_FLTVNPAGES(45);
4612 if (type
== F_SOFTLOCK
&& a
> lpgaddr
) {
4613 SEGVN_VMSTAT_FLTVNPAGES(46);
4614 segvn_softunlock(seg
, lpgaddr
, a
- lpgaddr
, S_OTHER
);
4616 if (!vop_size_err
) {
4617 SEGVN_VMSTAT_FLTVNPAGES(47);
4620 ASSERT(brkcow
|| tron
|| type
== F_SOFTLOCK
);
4622 * Large page end is mapped beyond the end of file and it's a cow
4623 * fault (can be a text replication induced cow) or softlock so we can't
4624 * reduce the map area. For now just demote the segment. This should
4625 * really only happen if the end of the file changed after the mapping
4626 * was established since when large page segments are created we make
4627 * sure they don't extend beyond the end of the file.
4629 SEGVN_VMSTAT_FLTVNPAGES(48);
4631 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4632 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
4634 if (seg
->s_szc
!= 0) {
4635 segvn_fltvnpages_clrszc_cnt
++;
4636 ASSERT(svd
->softlockcnt
== 0);
4637 err
= segvn_clrszc(seg
);
4639 segvn_fltvnpages_clrszc_err
++;
4642 ASSERT(err
|| seg
->s_szc
== 0);
4643 SEGVN_LOCK_DOWNGRADE(seg
->s_as
, &svd
->lock
);
4644 /* segvn_fault will do its job as if szc had been zero to begin with */
4645 return (err
== 0 ? IE_RETRY
: FC_MAKE_ERR(err
));
4649 * This routine will attempt to fault in one large page.
4650 * it will use smaller pages if that fails.
4651 * It should only be called for pure anonymous segments.
4654 segvn_fault_anonpages(struct hat
*hat
, struct seg
*seg
, caddr_t lpgaddr
,
4655 caddr_t lpgeaddr
, enum fault_type type
, enum seg_rw rw
, caddr_t addr
,
4656 caddr_t eaddr
, int brkcow
)
4658 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
4659 struct anon_map
*amp
= svd
->amp
;
4660 uchar_t segtype
= svd
->type
;
4661 uint_t szc
= seg
->s_szc
;
4662 size_t pgsz
= page_get_pagesize(szc
);
4663 size_t maxpgsz
= pgsz
;
4664 pgcnt_t pages
= btop(pgsz
);
4665 uint_t ppaszc
= szc
;
4666 caddr_t a
= lpgaddr
;
4667 ulong_t aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4668 struct vpage
*vpage
= (svd
->vpage
!= NULL
) ?
4669 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4674 uint_t protchk
, prot
, vpprot
;
4676 int hat_flag
= (type
== F_SOFTLOCK
) ? HAT_LOAD_LOCK
: HAT_LOAD
;
4677 anon_sync_obj_t cookie
;
4679 int pgflags
= (svd
->tr_state
== SEGVN_TR_ON
) ? PG_LOCAL
: 0;
4682 ASSERT(amp
!= NULL
);
4683 ASSERT(enable_mbit_wa
== 0); /* no mbit simulations with large pages */
4684 ASSERT(!(svd
->flags
& MAP_NORESERVE
));
4685 ASSERT(type
!= F_SOFTUNLOCK
);
4686 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
4687 ASSERT(!brkcow
|| svd
->tr_state
== SEGVN_TR_OFF
);
4688 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
4690 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
4692 VM_STAT_COND_ADD(type
== F_SOFTLOCK
, segvnvmstats
.fltanpages
[0]);
4693 VM_STAT_COND_ADD(type
!= F_SOFTLOCK
, segvnvmstats
.fltanpages
[1]);
4695 if (svd
->flags
& MAP_TEXT
) {
4696 hat_flag
|= HAT_LOAD_TEXT
;
4699 if (svd
->pageprot
) {
4702 protchk
= PROT_READ
;
4705 protchk
= PROT_WRITE
;
4708 protchk
= PROT_EXEC
;
4712 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
4715 VM_STAT_ADD(segvnvmstats
.fltanpages
[2]);
4718 /* caller has already done segment level protection check. */
4721 ppa
= kmem_cache_alloc(segvn_szc_cache
[ppaszc
], KM_SLEEP
);
4722 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
4725 for (; a
< lpgeaddr
; a
+= pgsz
, aindx
+= pages
) {
4726 if (svd
->pageprot
!= 0 && IS_P2ALIGNED(a
, maxpgsz
)) {
4727 VM_STAT_ADD(segvnvmstats
.fltanpages
[3]);
4728 ASSERT(vpage
!= NULL
);
4729 prot
= VPP_PROT(vpage
);
4730 ASSERT(sameprot(seg
, a
, maxpgsz
));
4731 if ((prot
& protchk
) == 0) {
4736 if (adjszc_chk
&& IS_P2ALIGNED(a
, maxpgsz
) &&
4738 ASSERT(a
> lpgaddr
);
4742 ASSERT(IS_P2ALIGNED(aindx
, pages
));
4743 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
,
4746 if (type
== F_SOFTLOCK
) {
4747 atomic_add_long((ulong_t
*)&svd
->softlockcnt
,
4750 anon_array_enter(amp
, aindx
, &cookie
);
4751 ppa_szc
= (uint_t
)-1;
4752 ierr
= anon_map_getpages(amp
, aindx
, szc
, seg
, a
,
4753 prot
, &vpprot
, ppa
, &ppa_szc
, vpage
, rw
, brkcow
,
4754 segvn_anypgsz
, pgflags
, svd
->cred
);
4756 anon_array_exit(&cookie
);
4757 VM_STAT_ADD(segvnvmstats
.fltanpages
[4]);
4758 if (type
== F_SOFTLOCK
) {
4760 (ulong_t
*)&svd
->softlockcnt
,
4764 VM_STAT_ADD(segvnvmstats
.fltanpages
[6]);
4765 err
= FC_MAKE_ERR(ierr
);
4771 ASSERT(!IS_VMODSORT(ppa
[0]->p_vnode
));
4773 ASSERT(segtype
== MAP_SHARED
||
4774 ppa
[0]->p_szc
<= szc
);
4775 ASSERT(segtype
== MAP_PRIVATE
||
4776 ppa
[0]->p_szc
>= szc
);
4779 * Handle pages that have been marked for migration
4781 if (lgrp_optimizations())
4782 page_migrate(seg
, a
, ppa
, pages
);
4784 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
4786 if (segtype
== MAP_SHARED
) {
4787 vpprot
|= PROT_WRITE
;
4790 hat_memload_array(hat
, a
, pgsz
, ppa
,
4791 prot
& vpprot
, hat_flag
);
4793 if (hat_flag
& HAT_LOAD_LOCK
) {
4794 VM_STAT_ADD(segvnvmstats
.fltanpages
[7]);
4796 VM_STAT_ADD(segvnvmstats
.fltanpages
[8]);
4797 for (i
= 0; i
< pages
; i
++)
4798 page_unlock(ppa
[i
]);
4803 anon_array_exit(&cookie
);
4808 ASSERT(a
< lpgeaddr
);
4810 * ierr == -1 means we failed to allocate a large page.
4811 * so do a size down operation.
4813 * ierr == -2 means some other process that privately shares
4814 * pages with this process has allocated a larger page and we
4815 * need to retry with larger pages. So do a size up
4816 * operation. This relies on the fact that large pages are
4817 * never partially shared i.e. if we share any constituent
4818 * page of a large page with another process we must share the
4819 * entire large page. Note this cannot happen for SOFTLOCK
4820 * case, unless current address (a) is at the beginning of the
4821 * next page size boundary because the other process couldn't
4822 * have relocated locked pages.
4824 ASSERT(ierr
== -1 || ierr
== -2);
4826 if (segvn_anypgsz
) {
4827 ASSERT(ierr
== -2 || szc
!= 0);
4828 ASSERT(ierr
== -1 || szc
< seg
->s_szc
);
4829 szc
= (ierr
== -1) ? szc
- 1 : szc
+ 1;
4832 * For non COW faults and segvn_anypgsz == 0
4833 * we need to be careful not to loop forever
4834 * if existing page is found with szc other
4835 * than 0 or seg->s_szc. This could be due
4836 * to page relocations on behalf of DR or
4837 * more likely large page creation. For this
4838 * case simply re-size to existing page's szc
4839 * if returned by anon_map_getpages().
4841 if (ppa_szc
== (uint_t
)-1) {
4842 szc
= (ierr
== -1) ? 0 : seg
->s_szc
;
4844 ASSERT(ppa_szc
<= seg
->s_szc
);
4845 ASSERT(ierr
== -2 || ppa_szc
< szc
);
4846 ASSERT(ierr
== -1 || ppa_szc
> szc
);
4851 pgsz
= page_get_pagesize(szc
);
4853 ASSERT(type
!= F_SOFTLOCK
|| ierr
== -1 ||
4854 (IS_P2ALIGNED(a
, pgsz
) && IS_P2ALIGNED(lpgeaddr
, pgsz
)));
4855 if (type
== F_SOFTLOCK
) {
4857 * For softlocks we cannot reduce the fault area
4858 * (calculated based on the largest page size for this
4859 * segment) for size down and a is already next
4860 * page size aligned as assertted above for size
4861 * ups. Therefore just continue in case of softlock.
4863 VM_STAT_ADD(segvnvmstats
.fltanpages
[9]);
4864 continue; /* keep lint happy */
4865 } else if (ierr
== -2) {
4868 * Size up case. Note lpgaddr may only be needed for
4869 * softlock case so we don't adjust it here.
4871 VM_STAT_ADD(segvnvmstats
.fltanpages
[10]);
4872 a
= (caddr_t
)P2ALIGN((uintptr_t)a
, pgsz
);
4873 ASSERT(a
>= lpgaddr
);
4874 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4875 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4876 vpage
= (svd
->vpage
!= NULL
) ?
4877 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4880 * Size down case. Note lpgaddr may only be needed for
4881 * softlock case so we don't adjust it here.
4883 VM_STAT_ADD(segvnvmstats
.fltanpages
[11]);
4884 ASSERT(IS_P2ALIGNED(a
, pgsz
));
4885 ASSERT(IS_P2ALIGNED(lpgeaddr
, pgsz
));
4886 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4887 ASSERT(a
< lpgeaddr
);
4890 * The beginning of the large page region can
4891 * be pulled to the right to make a smaller
4892 * region. We haven't yet faulted a single
4895 VM_STAT_ADD(segvnvmstats
.fltanpages
[12]);
4896 a
= (caddr_t
)P2ALIGN((uintptr_t)addr
, pgsz
);
4897 ASSERT(a
>= lpgaddr
);
4898 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4899 vpage
= (svd
->vpage
!= NULL
) ?
4900 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4904 VM_STAT_ADD(segvnvmstats
.fltanpages
[13]);
4905 ANON_LOCK_EXIT(&
->a_rwlock
);
4906 kmem_cache_free(segvn_szc_cache
[ppaszc
], ppa
);
4909 VM_STAT_ADD(segvnvmstats
.fltanpages
[14]);
4910 ANON_LOCK_EXIT(&
->a_rwlock
);
4911 kmem_cache_free(segvn_szc_cache
[ppaszc
], ppa
);
4912 if (type
== F_SOFTLOCK
&& a
> lpgaddr
) {
4913 VM_STAT_ADD(segvnvmstats
.fltanpages
[15]);
4914 segvn_softunlock(seg
, lpgaddr
, a
- lpgaddr
, S_OTHER
);
4919 int fltadvice
= 1; /* set to free behind pages for sequential access */
4922 * This routine is called via a machine specific fault handling routine.
4923 * It is also called by software routines wishing to lock or unlock
4924 * a range of addresses.
4926 * Here is the basic algorithm:
4928 * Call segvn_softunlock
4931 * Checking and set up work
4932 * If we will need some non-anonymous pages
4933 * Call VOP_GETPAGE over the range of non-anonymous pages
4935 * Loop over all addresses requested
4936 * Call segvn_faultpage passing in page list
4937 * to load up translations and handle anonymous pages
4939 * Load up translation to any additional pages in page list not
4940 * already handled that fit into this segment
4943 segvn_fault(struct hat
*hat
, struct seg
*seg
, caddr_t addr
, size_t len
,
4944 enum fault_type type
, enum seg_rw rw
)
4946 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
4947 page_t
**plp
, **ppp
, *pp
;
4950 struct vpage
*vpage
;
4951 uint_t vpprot
, prot
;
4953 page_t
*pl
[PVN_GETPAGE_NUM
+ 1];
4954 size_t plsz
, pl_alloc_sz
;
4957 struct anon_map
*amp
;
4959 caddr_t lpgaddr
, lpgeaddr
;
4961 anon_sync_obj_t cookie
;
4962 int brkcow
= BREAK_COW_SHARE(rw
, type
, svd
->type
);
4964 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
4965 ASSERT(svd
->amp
== NULL
|| svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
4968 * First handle the easy stuff
4970 if (type
== F_SOFTUNLOCK
) {
4971 if (rw
== S_READ_NOCOW
) {
4973 ASSERT(AS_WRITE_HELD(seg
->s_as
));
4975 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
4976 pgsz
= (seg
->s_szc
== 0) ? PAGESIZE
:
4977 page_get_pagesize(seg
->s_szc
);
4978 VM_STAT_COND_ADD(pgsz
> PAGESIZE
, segvnvmstats
.fltanpages
[16]);
4979 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
4980 segvn_softunlock(seg
, lpgaddr
, lpgeaddr
- lpgaddr
, rw
);
4981 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4985 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
||
4986 !HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
4988 if (svd
->tr_state
== SEGVN_TR_INIT
) {
4989 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
4990 if (svd
->tr_state
== SEGVN_TR_INIT
) {
4991 ASSERT(svd
->vp
!= NULL
&& svd
->amp
== NULL
);
4992 ASSERT(svd
->flags
& MAP_TEXT
);
4993 ASSERT(svd
->type
== MAP_PRIVATE
);
4994 segvn_textrepl(seg
);
4995 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
4996 ASSERT(svd
->tr_state
!= SEGVN_TR_ON
||
4999 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5001 } else if (svd
->tr_state
!= SEGVN_TR_OFF
) {
5002 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5004 if (rw
== S_WRITE
&& svd
->tr_state
!= SEGVN_TR_OFF
) {
5005 ASSERT(!svd
->pageprot
&& !(svd
->prot
& PROT_WRITE
));
5006 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5010 if (svd
->tr_state
== SEGVN_TR_ON
) {
5011 ASSERT(svd
->vp
!= NULL
&& svd
->amp
!= NULL
);
5012 segvn_textunrepl(seg
, 0);
5013 ASSERT(svd
->amp
== NULL
&&
5014 svd
->tr_state
== SEGVN_TR_OFF
);
5015 } else if (svd
->tr_state
!= SEGVN_TR_OFF
) {
5016 svd
->tr_state
= SEGVN_TR_OFF
;
5018 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
5019 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5023 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
5026 * If we have the same protections for the entire segment,
5027 * insure that the access being attempted is legitimate.
5030 if (svd
->pageprot
== 0) {
5036 protchk
= PROT_READ
;
5039 protchk
= PROT_WRITE
;
5042 protchk
= PROT_EXEC
;
5046 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
5050 if ((svd
->prot
& protchk
) == 0) {
5051 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5052 return (FC_PROT
); /* illegal access type */
5056 if (brkcow
&& HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
5057 /* this must be SOFTLOCK S_READ fault */
5058 ASSERT(svd
->amp
== NULL
);
5059 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
5060 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5061 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5062 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
5064 * this must be the first ever non S_READ_NOCOW
5065 * softlock for this segment.
5067 ASSERT(svd
->softlockcnt
== 0);
5068 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
5070 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
5072 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5077 * We can't allow the long term use of softlocks for vmpss segments,
5078 * because in some file truncation cases we should be able to demote
5079 * the segment, which requires that there are no softlocks. The
5080 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5081 * segment is S_READ_NOCOW, where the caller holds the address space
5082 * locked as writer and calls softunlock before dropping the as lock.
5083 * S_READ_NOCOW is used by /proc to read memory from another user.
5085 * Another deadlock between SOFTLOCK and file truncation can happen
5086 * because segvn_fault_vnodepages() calls the FS one pagesize at
5087 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5088 * can cause a deadlock because the first set of page_t's remain
5089 * locked SE_SHARED. To avoid this, we demote segments on a first
5090 * SOFTLOCK if they have a length greater than the segment's
5093 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5094 * the access type is S_READ_NOCOW and the fault length is less than
5095 * or equal to the segment's page size. While this is quite restrictive,
5096 * it should be the most common case of SOFTLOCK against a vmpss
5099 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5100 * caller makes sure no COW will be caused by another thread for a
5103 if (type
== F_SOFTLOCK
&& svd
->vp
!= NULL
&& seg
->s_szc
!= 0) {
5106 if (rw
!= S_READ_NOCOW
) {
5109 if (!demote
&& len
> PAGESIZE
) {
5110 pgsz
= page_get_pagesize(seg
->s_szc
);
5111 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
,
5113 if (lpgeaddr
- lpgaddr
> pgsz
) {
5118 ASSERT(demote
|| AS_WRITE_HELD(seg
->s_as
));
5121 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5122 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5123 if (seg
->s_szc
!= 0) {
5124 segvn_vmpss_clrszc_cnt
++;
5125 ASSERT(svd
->softlockcnt
== 0);
5126 err
= segvn_clrszc(seg
);
5128 segvn_vmpss_clrszc_err
++;
5129 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5130 return (FC_MAKE_ERR(err
));
5133 ASSERT(seg
->s_szc
== 0);
5134 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5140 * Check to see if we need to allocate an anon_map structure.
5142 if (svd
->amp
== NULL
&& (svd
->vp
== NULL
|| brkcow
)) {
5143 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
5145 * Drop the "read" lock on the segment and acquire
5146 * the "write" version since we have to allocate the
5149 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5150 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5152 if (svd
->amp
== NULL
) {
5153 svd
->amp
= anonmap_alloc(seg
->s_size
, 0, ANON_SLEEP
);
5154 svd
->amp
->a_szc
= seg
->s_szc
;
5156 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5159 * Start all over again since segment protections
5160 * may have changed after we dropped the "read" lock.
5166 * S_READ_NOCOW vs S_READ distinction was
5167 * only needed for the code above. After
5168 * that we treat it as S_READ.
5170 if (rw
== S_READ_NOCOW
) {
5171 ASSERT(type
== F_SOFTLOCK
);
5172 ASSERT(AS_WRITE_HELD(seg
->s_as
));
5179 * MADV_SEQUENTIAL work is ignored for large page segments.
5181 if (seg
->s_szc
!= 0) {
5182 pgsz
= page_get_pagesize(seg
->s_szc
);
5183 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
5184 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
5185 if (svd
->vp
== NULL
) {
5186 err
= segvn_fault_anonpages(hat
, seg
, lpgaddr
,
5187 lpgeaddr
, type
, rw
, addr
, addr
+ len
, brkcow
);
5189 err
= segvn_fault_vnodepages(hat
, seg
, lpgaddr
,
5190 lpgeaddr
, type
, rw
, addr
, addr
+ len
, brkcow
);
5191 if (err
== IE_RETRY
) {
5192 ASSERT(seg
->s_szc
== 0);
5193 ASSERT(SEGVN_READ_HELD(seg
->s_as
, &svd
->lock
));
5194 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5198 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5202 page
= seg_page(seg
, addr
);
5204 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
5205 anon_index
= svd
->anon_index
+ page
;
5207 if (type
== F_PROT
&& rw
== S_READ
&&
5208 svd
->tr_state
== SEGVN_TR_OFF
&&
5209 svd
->type
== MAP_PRIVATE
&& svd
->pageprot
== 0) {
5210 size_t index
= anon_index
;
5213 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5215 * The fast path could apply to S_WRITE also, except
5216 * that the protection fault could be caused by lazy
5217 * tlb flush when ro->rw. In this case, the pte is
5218 * RW already. But RO in the other cpu's tlb causes
5219 * the fault. Since hat_chgprot won't do anything if
5220 * pte doesn't change, we may end up faulting
5221 * indefinitely until the RO tlb entry gets replaced.
5223 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
, index
++) {
5224 anon_array_enter(amp
, index
, &cookie
);
5225 ap
= anon_get_ptr(amp
->ahp
, index
);
5226 anon_array_exit(&cookie
);
5227 if ((ap
== NULL
) || (ap
->an_refcnt
!= 1)) {
5228 ANON_LOCK_EXIT(&
->a_rwlock
);
5232 hat_chgprot(seg
->s_as
->a_hat
, addr
, len
, svd
->prot
);
5233 ANON_LOCK_EXIT(&
->a_rwlock
);
5234 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5240 if (svd
->vpage
== NULL
)
5243 vpage
= &svd
->vpage
[page
];
5245 off
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
5248 * If MADV_SEQUENTIAL has been set for the particular page we
5249 * are faulting on, free behind all pages in the segment and put
5250 * them on the free list.
5253 if ((page
!= 0) && fltadvice
&& svd
->tr_state
!= SEGVN_TR_ON
) {
5255 ulong_t fanon_index
;
5257 u_offset_t pgoff
, fpgoff
;
5259 struct anon
*fap
= NULL
;
5261 if (svd
->advice
== MADV_SEQUENTIAL
||
5263 VPP_ADVICE(vpage
) == MADV_SEQUENTIAL
)) {
5264 pgoff
= off
- PAGESIZE
;
5267 vpp
= &svd
->vpage
[fpage
];
5269 fanon_index
= svd
->anon_index
+ fpage
;
5271 while (pgoff
> svd
->offset
) {
5272 if (svd
->advice
!= MADV_SEQUENTIAL
&&
5273 (!svd
->pageadvice
|| (vpage
&&
5274 VPP_ADVICE(vpp
) != MADV_SEQUENTIAL
)))
5278 * If this is an anon page, we must find the
5279 * correct <vp, offset> for it
5283 ANON_LOCK_ENTER(&
->a_rwlock
,
5285 anon_array_enter(amp
, fanon_index
,
5287 fap
= anon_get_ptr(amp
->ahp
,
5290 swap_xlate(fap
, &fvp
, &fpgoff
);
5295 anon_array_exit(&cookie
);
5296 ANON_LOCK_EXIT(&
->a_rwlock
);
5304 * Skip pages that are free or have an
5307 pp
= page_lookup_nowait(fvp
, fpgoff
, SE_SHARED
);
5311 * We don't need the page_struct_lock to test
5312 * as this is only advisory; even if we
5313 * acquire it someone might race in and lock
5314 * the page after we unlock and before the
5315 * PUTPAGE, then VOP_PUTPAGE will do nothing.
5317 if (pp
->p_lckcnt
== 0 && pp
->p_cowcnt
== 0) {
5319 * Hold the vnode before releasing
5320 * the page lock to prevent it from
5321 * being freed and re-used by some
5327 * We should build a page list
5328 * to kluster putpages XXX
5330 (void) VOP_PUTPAGE(fvp
,
5331 (offset_t
)fpgoff
, PAGESIZE
,
5332 (B_DONTNEED
|B_FREE
|B_ASYNC
),
5337 * XXX - Should the loop terminate if
5338 * the page is `locked'?
5354 * See if we need to call VOP_GETPAGE for
5355 * *any* of the range being faulted on.
5356 * We can skip all of this work if there
5357 * was no original vnode.
5359 if (svd
->vp
!= NULL
) {
5372 * Only acquire reader lock to prevent amp->ahp
5373 * from being changed. It's ok to miss pages,
5374 * hence we don't do anon_array_enter
5376 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5377 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
5379 if (len
<= PAGESIZE
)
5380 /* inline non_anon() */
5381 dogetpage
= (ap
== NULL
);
5383 dogetpage
= non_anon(amp
->ahp
, anon_index
,
5385 ANON_LOCK_EXIT(&
->a_rwlock
);
5390 struct as
*as
= seg
->s_as
;
5392 if (len
> ptob((sizeof (pl
) / sizeof (pl
[0])) - 1)) {
5394 * Page list won't fit in local array,
5395 * allocate one of the needed size.
5398 (btop(len
) + 1) * sizeof (page_t
*);
5399 plp
= kmem_alloc(pl_alloc_sz
, KM_SLEEP
);
5402 } else if (rw
== S_WRITE
&& svd
->type
== MAP_PRIVATE
||
5403 svd
->tr_state
== SEGVN_TR_ON
|| rw
== S_OTHER
||
5404 (((size_t)(addr
+ PAGESIZE
) <
5405 (size_t)(seg
->s_base
+ seg
->s_size
)) &&
5406 hat_probe(as
->a_hat
, addr
+ PAGESIZE
))) {
5408 * Ask VOP_GETPAGE to return the exact number
5410 * (a) this is a COW fault, or
5411 * (b) this is a software fault, or
5412 * (c) next page is already mapped.
5417 * Ask VOP_GETPAGE to return adjacent pages
5418 * within the segment.
5420 plsz
= MIN((size_t)PVN_GETPAGE_SZ
, (size_t)
5421 ((seg
->s_base
+ seg
->s_size
) - addr
));
5422 ASSERT((addr
+ plsz
) <=
5423 (seg
->s_base
+ seg
->s_size
));
5427 * Need to get some non-anonymous pages.
5428 * We need to make only one call to GETPAGE to do
5429 * this to prevent certain deadlocking conditions
5430 * when we are doing locking. In this case
5431 * non_anon() should have picked up the smallest
5432 * range which includes all the non-anonymous
5433 * pages in the requested range. We have to
5434 * be careful regarding which rw flag to pass in
5435 * because on a private mapping, the underlying
5436 * object is never allowed to be written.
5438 if (rw
== S_WRITE
&& svd
->type
== MAP_PRIVATE
) {
5444 TRACE_3(TR_FAC_VM
, TR_SEGVN_GETPAGE
,
5445 "segvn_getpage:seg %p addr %p vp %p",
5447 err
= VOP_GETPAGE(vp
, (offset_t
)vp_off
, vp_len
,
5448 &vpprot
, plp
, plsz
, seg
, addr
+ (vp_off
- off
), arw
,
5451 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5452 segvn_pagelist_rele(plp
);
5454 kmem_free(plp
, pl_alloc_sz
);
5455 return (FC_MAKE_ERR(err
));
5457 if (svd
->type
== MAP_PRIVATE
)
5458 vpprot
&= ~PROT_WRITE
;
5463 * N.B. at this time the plp array has all the needed non-anon
5464 * pages in addition to (possibly) having some adjacent pages.
5468 * Always acquire the anon_array_lock to prevent
5469 * 2 threads from allocating separate anon slots for
5472 * If this is a copy-on-write fault and we don't already
5473 * have the anon_array_lock, acquire it to prevent the
5474 * fault routine from handling multiple copy-on-write faults
5475 * on the same "addr" in the same address space.
5477 * Only one thread should deal with the fault since after
5478 * it is handled, the other threads can acquire a translation
5479 * to the newly created private page. This prevents two or
5480 * more threads from creating different private pages for the
5483 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5484 * to prevent deadlock between this thread and another thread
5485 * which has soft-locked this page and wants to acquire serial_lock.
5488 * The fix for bug 4026339 becomes unnecessary when using the
5489 * locking scheme with per amp rwlock and a global set of hash
5490 * lock, anon_array_lock. If we steal a vnode page when low
5491 * on memory and upgrad the page lock through page_rename,
5492 * then the page is PAGE_HANDLED, nothing needs to be done
5493 * for this page after returning from segvn_faultpage.
5495 * But really, the page lock should be downgraded after
5496 * the stolen page is page_rename'd.
5500 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5503 * Ok, now loop over the address range and handle faults
5505 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
, off
+= PAGESIZE
) {
5506 err
= segvn_faultpage(hat
, seg
, a
, off
, vpage
, plp
, vpprot
,
5510 ANON_LOCK_EXIT(&
->a_rwlock
);
5511 if (type
== F_SOFTLOCK
&& a
> addr
) {
5512 segvn_softunlock(seg
, addr
, (a
- addr
),
5515 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5516 segvn_pagelist_rele(plp
);
5518 kmem_free(plp
, pl_alloc_sz
);
5523 } else if (svd
->vpage
) {
5524 page
= seg_page(seg
, addr
);
5525 vpage
= &svd
->vpage
[++page
];
5529 /* Didn't get pages from the underlying fs so we're done */
5534 * Now handle any other pages in the list returned.
5535 * If the page can be used, load up the translations now.
5536 * Note that the for loop will only be entered if "plp"
5537 * is pointing to a non-NULL page pointer which means that
5538 * VOP_GETPAGE() was called and vpprot has been initialized.
5540 if (svd
->pageprot
== 0)
5541 prot
= svd
->prot
& vpprot
;
5545 * Large Files: diff should be unsigned value because we started
5546 * supporting > 2GB segment sizes from 2.5.1 and when a
5547 * large file of size > 2GB gets mapped to address space
5548 * the diff value can be > 2GB.
5551 for (ppp
= plp
; (pp
= *ppp
) != NULL
; ppp
++) {
5555 anon_sync_obj_t cookie
;
5556 int hat_flag
= HAT_LOAD_ADV
;
5558 if (svd
->flags
& MAP_TEXT
) {
5559 hat_flag
|= HAT_LOAD_TEXT
;
5562 if (pp
== PAGE_HANDLED
)
5565 if (svd
->tr_state
!= SEGVN_TR_ON
&&
5566 pp
->p_offset
>= svd
->offset
&&
5567 pp
->p_offset
< svd
->offset
+ seg
->s_size
) {
5569 diff
= pp
->p_offset
- svd
->offset
;
5572 * Large Files: Following is the assertion
5573 * validating the above cast.
5575 ASSERT(svd
->vp
== pp
->p_vnode
);
5579 prot
= VPP_PROT(&svd
->vpage
[page
]) & vpprot
;
5582 * Prevent other threads in the address space from
5583 * creating private pages (i.e., allocating anon slots)
5584 * while we are in the process of loading translations
5585 * to additional pages returned by the underlying
5589 anon_index
= svd
->anon_index
+ page
;
5590 anon_array_enter(amp
, anon_index
, &cookie
);
5591 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
5593 if ((amp
== NULL
) || (ap
== NULL
)) {
5594 if (IS_VMODSORT(pp
->p_vnode
) ||
5598 else if (rw
!= S_OTHER
&&
5600 prot
&= ~PROT_WRITE
;
5603 * Skip mapping read ahead pages marked
5604 * for migration, so they will get migrated
5607 ASSERT(amp
== NULL
||
5608 svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
5609 if ((prot
& PROT_READ
) && !PP_ISMIGRATE(pp
)) {
5610 hat_memload_region(hat
,
5617 anon_array_exit(&cookie
);
5623 ANON_LOCK_EXIT(&
->a_rwlock
);
5624 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5626 kmem_free(plp
, pl_alloc_sz
);
5631 * This routine is used to start I/O on pages asynchronously. XXX it will
5632 * only create PAGESIZE pages. At fault time they will be relocated into
5636 segvn_faulta(struct seg
*seg
, caddr_t addr
)
5638 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
5640 struct anon_map
*amp
;
5643 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
5645 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
5646 if ((amp
= svd
->amp
) != NULL
) {
5650 * Reader lock to prevent amp->ahp from being changed.
5651 * This is advisory, it's ok to miss a page, so
5652 * we don't do anon_array_enter lock.
5654 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5655 if ((ap
= anon_get_ptr(amp
->ahp
,
5656 svd
->anon_index
+ seg_page(seg
, addr
))) != NULL
) {
5658 err
= anon_getpage(&ap
, NULL
, NULL
,
5659 0, seg
, addr
, S_READ
, svd
->cred
);
5661 ANON_LOCK_EXIT(&
->a_rwlock
);
5662 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5664 return (FC_MAKE_ERR(err
));
5667 ANON_LOCK_EXIT(&
->a_rwlock
);
5670 if (svd
->vp
== NULL
) {
5671 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5672 return (0); /* zfod page - do nothing now */
5676 TRACE_3(TR_FAC_VM
, TR_SEGVN_GETPAGE
,
5677 "segvn_getpage:seg %p addr %p vp %p", seg
, addr
, vp
);
5678 err
= VOP_GETPAGE(vp
,
5679 (offset_t
)(svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
)),
5680 PAGESIZE
, NULL
, NULL
, 0, seg
, addr
,
5681 S_OTHER
, svd
->cred
, NULL
);
5683 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5685 return (FC_MAKE_ERR(err
));
5690 segvn_setprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t prot
)
5692 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
5693 struct vpage
*cvp
, *svp
, *evp
;
5697 anon_sync_obj_t cookie
;
5698 int unload_done
= 0;
5700 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
5702 if ((svd
->maxprot
& prot
) != prot
)
5703 return (EACCES
); /* violated maxprot */
5705 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5707 /* return if prot is the same */
5708 if (!svd
->pageprot
&& svd
->prot
== prot
) {
5709 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5714 * Since we change protections we first have to flush the cache.
5715 * This makes sure all the pagelock calls have to recheck
5718 if (svd
->softlockcnt
> 0) {
5719 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
5722 * If this is shared segment non 0 softlockcnt
5723 * means locked pages are still in use.
5725 if (svd
->type
== MAP_SHARED
) {
5726 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5731 * Since we do have the segvn writers lock nobody can fill
5732 * the cache with entries belonging to this seg during
5733 * the purge. The flush either succeeds or we still have
5737 if (svd
->softlockcnt
> 0) {
5738 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5743 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
5744 ASSERT(svd
->amp
== NULL
);
5745 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
5746 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
5748 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
5750 } else if (svd
->tr_state
== SEGVN_TR_INIT
) {
5751 svd
->tr_state
= SEGVN_TR_OFF
;
5752 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
5753 ASSERT(svd
->amp
!= NULL
);
5754 segvn_textunrepl(seg
, 0);
5755 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
5759 if ((prot
& PROT_WRITE
) && svd
->type
== MAP_SHARED
&&
5760 svd
->vp
!= NULL
&& (svd
->vp
->v_flag
& VVMEXEC
)) {
5761 ASSERT(vn_is_mapped(svd
->vp
, V_WRITE
));
5762 segvn_inval_trcache(svd
->vp
);
5764 if (seg
->s_szc
!= 0) {
5766 pgsz
= page_get_pagesize(seg
->s_szc
);
5767 pgcnt
= pgsz
>> PAGESHIFT
;
5768 ASSERT(IS_P2ALIGNED(pgcnt
, pgcnt
));
5769 if (!IS_P2ALIGNED(addr
, pgsz
) || !IS_P2ALIGNED(len
, pgsz
)) {
5770 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5771 ASSERT(seg
->s_base
!= addr
|| seg
->s_size
!= len
);
5773 * If we are holding the as lock as a reader then
5774 * we need to return IE_RETRY and let the as
5775 * layer drop and re-acquire the lock as a writer.
5777 if (AS_READ_HELD(seg
->s_as
))
5779 VM_STAT_ADD(segvnvmstats
.demoterange
[1]);
5780 if (svd
->type
== MAP_PRIVATE
|| svd
->vp
!= NULL
) {
5781 err
= segvn_demote_range(seg
, addr
, len
,
5784 uint_t szcvec
= map_pgszcvec(seg
->s_base
,
5785 pgsz
, (uintptr_t)seg
->s_base
,
5786 (svd
->flags
& MAP_TEXT
), MAPPGSZC_SHM
, 0);
5787 err
= segvn_demote_range(seg
, addr
, len
,
5800 * If it's a private mapping and we're making it writable then we
5801 * may have to reserve the additional swap space now. If we are
5802 * making writable only a part of the segment then we use its vpage
5803 * array to keep a record of the pages for which we have reserved
5804 * swap. In this case we set the pageswap field in the segment's
5805 * segvn structure to record this.
5807 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5808 * removing write permission on the entire segment and we haven't
5809 * modified any pages, we can release the swap space.
5811 if (svd
->type
== MAP_PRIVATE
) {
5812 if (prot
& PROT_WRITE
) {
5813 if (!(svd
->flags
& MAP_NORESERVE
) &&
5814 !(svd
->swresv
&& svd
->pageswap
== 0)) {
5818 * Start by determining how much swap
5819 * space is required.
5821 if (addr
== seg
->s_base
&&
5822 len
== seg
->s_size
&&
5823 svd
->pageswap
== 0) {
5824 /* The whole segment */
5828 * Make sure that the vpage array
5829 * exists, and make a note of the
5830 * range of elements corresponding
5834 if (svd
->vpage
== NULL
) {
5835 SEGVN_LOCK_EXIT(seg
->s_as
,
5839 svp
= &svd
->vpage
[seg_page(seg
, addr
)];
5840 evp
= &svd
->vpage
[seg_page(seg
,
5843 if (svd
->pageswap
== 0) {
5845 * This is the first time we've
5846 * asked for a part of this
5847 * segment, so we need to
5848 * reserve everything we've
5854 * We have to count the number
5855 * of pages required.
5857 for (cvp
= svp
; cvp
< evp
;
5859 if (!VPP_ISSWAPRES(cvp
))
5866 /* Try to reserve the necessary swap. */
5867 if (anon_resv_zone(sz
,
5868 seg
->s_as
->a_proc
->p_zone
) == 0) {
5869 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5874 * Make a note of how much swap space
5877 if (svd
->pageswap
== 0 && sz
== seg
->s_size
) {
5880 ASSERT(svd
->vpage
!= NULL
);
5883 for (cvp
= svp
; cvp
< evp
; cvp
++) {
5884 if (!VPP_ISSWAPRES(cvp
))
5885 VPP_SETSWAPRES(cvp
);
5891 * Swap space is released only if this segment
5892 * does not map anonymous memory, since read faults
5893 * on such segments still need an anon slot to read
5896 if (svd
->swresv
!= 0 && svd
->vp
!= NULL
&&
5897 svd
->amp
== NULL
&& addr
== seg
->s_base
&&
5898 len
== seg
->s_size
&& svd
->pageprot
== 0) {
5899 ASSERT(svd
->pageswap
== 0);
5900 anon_unresv_zone(svd
->swresv
,
5901 seg
->s_as
->a_proc
->p_zone
);
5903 TRACE_3(TR_FAC_VM
, TR_ANON_PROC
,
5904 "anon proc:%p %lu %u", seg
, 0, 0);
5909 if (addr
== seg
->s_base
&& len
== seg
->s_size
&& svd
->vpage
== NULL
) {
5910 if (svd
->prot
== prot
) {
5911 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5912 return (0); /* all done */
5914 svd
->prot
= (uchar_t
)prot
;
5915 } else if (svd
->type
== MAP_PRIVATE
) {
5916 struct anon
*ap
= NULL
;
5918 u_offset_t offset
, off
;
5919 struct anon_map
*amp
;
5920 ulong_t anon_idx
= 0;
5923 * A vpage structure exists or else the change does not
5924 * involve the entire segment. Establish a vpage structure
5925 * if none is there. Then, for each page in the range,
5926 * adjust its individual permissions. Note that write-
5927 * enabling a MAP_PRIVATE page can affect the claims for
5928 * locked down memory. Overcommitting memory terminates
5932 if (svd
->vpage
== NULL
) {
5933 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5937 if ((amp
= svd
->amp
) != NULL
) {
5938 anon_idx
= svd
->anon_index
+ seg_page(seg
, addr
);
5939 ASSERT(seg
->s_szc
== 0 ||
5940 IS_P2ALIGNED(anon_idx
, pgcnt
));
5941 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5944 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
5945 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
5948 * See Statement at the beginning of segvn_lockop regarding
5949 * the way cowcnts and lckcnts are handled.
5951 for (svp
= &svd
->vpage
[seg_page(seg
, addr
)]; svp
< evp
; svp
++) {
5953 if (seg
->s_szc
!= 0) {
5955 anon_array_enter(amp
, anon_idx
,
5958 if (IS_P2ALIGNED(anon_idx
, pgcnt
) &&
5959 !segvn_claim_pages(seg
, svp
, offset
,
5962 anon_array_exit(&cookie
);
5967 anon_array_exit(&cookie
);
5972 anon_array_enter(amp
, anon_idx
,
5974 ap
= anon_get_ptr(amp
->ahp
, anon_idx
++);
5977 if (VPP_ISPPLOCK(svp
) &&
5978 VPP_PROT(svp
) != prot
) {
5980 if (amp
== NULL
|| ap
== NULL
) {
5984 swap_xlate(ap
, &vp
, &off
);
5986 anon_array_exit(&cookie
);
5988 if ((pp
= page_lookup(vp
, off
,
5989 SE_SHARED
)) == NULL
) {
5990 panic("segvn_setprot: no page");
5993 ASSERT(seg
->s_szc
== 0);
5994 if ((VPP_PROT(svp
) ^ prot
) &
5996 if (prot
& PROT_WRITE
) {
6011 } else if (amp
!= NULL
)
6012 anon_array_exit(&cookie
);
6014 VPP_SETPROT(svp
, prot
);
6018 ANON_LOCK_EXIT(&
->a_rwlock
);
6021 * Did we terminate prematurely? If so, simply unload
6022 * the translations to the things we've updated so far.
6026 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6029 len
= (svp
- &svd
->vpage
[seg_page(seg
, addr
)]) *
6031 ASSERT(seg
->s_szc
== 0 || IS_P2ALIGNED(len
, pgsz
));
6033 hat_unload(seg
->s_as
->a_hat
, addr
,
6035 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6040 if (svd
->vpage
== NULL
) {
6041 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6045 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
6046 for (svp
= &svd
->vpage
[seg_page(seg
, addr
)]; svp
< evp
; svp
++) {
6047 VPP_SETPROT(svp
, prot
);
6052 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6056 if (((prot
& PROT_WRITE
) != 0 &&
6057 (svd
->vp
!= NULL
|| svd
->type
== MAP_PRIVATE
)) ||
6058 (prot
& ~PROT_USER
) == PROT_NONE
) {
6060 * Either private or shared data with write access (in
6061 * which case we need to throw out all former translations
6062 * so that we get the right translations set up on fault
6063 * and we don't allow write access to any copy-on-write pages
6064 * that might be around or to prevent write access to pages
6065 * representing holes in a file), or we don't have permission
6066 * to access the memory at all (in which case we have to
6067 * unload any current translations that might exist).
6069 hat_unload(seg
->s_as
->a_hat
, addr
, len
, HAT_UNLOAD
);
6072 * A shared mapping or a private mapping in which write
6073 * protection is going to be denied - just change all the
6074 * protections over the range of addresses in question.
6075 * segvn does not support any other attributes other
6076 * than prot so we can use hat_chgattr.
6078 hat_chgattr(seg
->s_as
->a_hat
, addr
, len
, prot
);
6081 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6087 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize,
6088 * to determine if the seg is capable of mapping the requested szc.
6091 segvn_setpagesize(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t szc
)
6093 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6094 struct segvn_data
*nsvd
;
6095 struct anon_map
*amp
= svd
->amp
;
6097 caddr_t eaddr
= addr
+ len
, a
;
6098 size_t pgsz
= page_get_pagesize(szc
);
6099 pgcnt_t pgcnt
= page_get_pagecnt(szc
);
6101 u_offset_t off
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
6103 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
6104 ASSERT(addr
>= seg
->s_base
&& eaddr
<= seg
->s_base
+ seg
->s_size
);
6106 if (seg
->s_szc
== szc
|| segvn_lpg_disable
!= 0) {
6111 * addr should always be pgsz aligned but eaddr may be misaligned if
6112 * it's at the end of the segment.
6114 * XXX we should assert this condition since as_setpagesize() logic
6117 if (!IS_P2ALIGNED(addr
, pgsz
) ||
6118 (!IS_P2ALIGNED(eaddr
, pgsz
) &&
6119 eaddr
!= seg
->s_base
+ seg
->s_size
)) {
6121 segvn_setpgsz_align_err
++;
6125 if (amp
!= NULL
&& svd
->type
== MAP_SHARED
) {
6126 ulong_t an_idx
= svd
->anon_index
+ seg_page(seg
, addr
);
6127 if (!IS_P2ALIGNED(an_idx
, pgcnt
)) {
6129 segvn_setpgsz_anon_align_err
++;
6134 if ((svd
->flags
& MAP_NORESERVE
) || seg
->s_as
== &kas
||
6135 szc
> segvn_maxpgszc
) {
6139 /* paranoid check */
6140 if (svd
->vp
!= NULL
&&
6141 (IS_SWAPFSVP(svd
->vp
) || VN_ISKAS(svd
->vp
))) {
6145 if (seg
->s_szc
== 0 && svd
->vp
!= NULL
&&
6146 map_addr_vacalign_check(addr
, off
)) {
6151 * Check that protections are the same within new page
6154 if (svd
->pageprot
) {
6155 for (a
= addr
; a
< eaddr
; a
+= pgsz
) {
6156 if ((a
+ pgsz
) > eaddr
) {
6157 if (!sameprot(seg
, a
, eaddr
- a
)) {
6161 if (!sameprot(seg
, a
, pgsz
)) {
6169 * Since we are changing page size we first have to flush
6170 * the cache. This makes sure all the pagelock calls have
6171 * to recheck protections.
6173 if (svd
->softlockcnt
> 0) {
6174 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6177 * If this is shared segment non 0 softlockcnt
6178 * means locked pages are still in use.
6180 if (svd
->type
== MAP_SHARED
) {
6185 * Since we do have the segvn writers lock nobody can fill
6186 * the cache with entries belonging to this seg during
6187 * the purge. The flush either succeeds or we still have
6191 if (svd
->softlockcnt
> 0) {
6196 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
6197 ASSERT(svd
->amp
== NULL
);
6198 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6199 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
6201 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
6202 } else if (svd
->tr_state
== SEGVN_TR_INIT
) {
6203 svd
->tr_state
= SEGVN_TR_OFF
;
6204 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
6205 ASSERT(svd
->amp
!= NULL
);
6206 segvn_textunrepl(seg
, 1);
6207 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
6212 * Operation for sub range of existing segment.
6214 if (addr
!= seg
->s_base
|| eaddr
!= (seg
->s_base
+ seg
->s_size
)) {
6215 if (szc
< seg
->s_szc
) {
6216 VM_STAT_ADD(segvnvmstats
.demoterange
[2]);
6217 err
= segvn_demote_range(seg
, addr
, len
, SDR_RANGE
, 0);
6221 if (err
== ENOMEM
) {
6226 if (addr
!= seg
->s_base
) {
6227 nseg
= segvn_split_seg(seg
, addr
);
6228 if (eaddr
!= (nseg
->s_base
+ nseg
->s_size
)) {
6229 /* eaddr is szc aligned */
6230 (void) segvn_split_seg(nseg
, eaddr
);
6234 if (eaddr
!= (seg
->s_base
+ seg
->s_size
)) {
6235 /* eaddr is szc aligned */
6236 (void) segvn_split_seg(seg
, eaddr
);
6242 * Break any low level sharing and reset seg->s_szc to 0.
6244 if ((err
= segvn_clrszc(seg
)) != 0) {
6245 if (err
== ENOMEM
) {
6250 ASSERT(seg
->s_szc
== 0);
6253 * If the end of the current segment is not pgsz aligned
6254 * then attempt to concatenate with the next segment.
6256 if (!IS_P2ALIGNED(eaddr
, pgsz
)) {
6257 nseg
= AS_SEGNEXT(seg
->s_as
, seg
);
6258 if (nseg
== NULL
|| nseg
== seg
|| eaddr
!= nseg
->s_base
) {
6261 if (nseg
->s_ops
!= &segvn_ops
) {
6264 nsvd
= (struct segvn_data
*)nseg
->s_data
;
6265 if (nsvd
->softlockcnt
> 0) {
6267 * If this is shared segment non 0 softlockcnt
6268 * means locked pages are still in use.
6270 if (nsvd
->type
== MAP_SHARED
) {
6274 if (nsvd
->softlockcnt
> 0) {
6278 err
= segvn_clrszc(nseg
);
6279 if (err
== ENOMEM
) {
6285 ASSERT(nsvd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6286 err
= segvn_concat(seg
, nseg
, 1);
6297 * May need to re-align anon array to
6301 if (!IS_P2ALIGNED(svd
->anon_index
, pgcnt
)) {
6302 struct anon_hdr
*nahp
;
6304 ASSERT(svd
->type
== MAP_PRIVATE
);
6306 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
6307 ASSERT(amp
->refcnt
== 1);
6308 nahp
= anon_create(btop(amp
->size
), ANON_NOSLEEP
);
6310 ANON_LOCK_EXIT(&
->a_rwlock
);
6313 if (anon_copy_ptr(amp
->ahp
, svd
->anon_index
,
6314 nahp
, 0, btop(seg
->s_size
), ANON_NOSLEEP
)) {
6315 anon_release(nahp
, btop(amp
->size
));
6316 ANON_LOCK_EXIT(&
->a_rwlock
);
6319 anon_release(amp
->ahp
, btop(amp
->size
));
6321 svd
->anon_index
= 0;
6322 ANON_LOCK_EXIT(&
->a_rwlock
);
6325 if (svd
->vp
!= NULL
&& szc
!= 0) {
6327 u_offset_t eoffpage
= svd
->offset
;
6328 va
.va_mask
= AT_SIZE
;
6329 eoffpage
+= seg
->s_size
;
6330 eoffpage
= btopr(eoffpage
);
6331 if (VOP_GETATTR(svd
->vp
, &va
, 0, svd
->cred
, NULL
) != 0) {
6332 segvn_setpgsz_getattr_err
++;
6335 if (btopr(va
.va_size
) < eoffpage
) {
6336 segvn_setpgsz_eof_err
++;
6341 * anon_fill_cow_holes() may call VOP_GETPAGE().
6342 * don't take anon map lock here to avoid holding it
6343 * across VOP_GETPAGE() calls that may call back into
6344 * segvn for klsutering checks. We don't really need
6345 * anon map lock here since it's a private segment and
6346 * we hold as level lock as writers.
6348 if ((err
= anon_fill_cow_holes(seg
, seg
->s_base
,
6349 amp
->ahp
, svd
->anon_index
, svd
->vp
, svd
->offset
,
6350 seg
->s_size
, szc
, svd
->prot
, svd
->vpage
,
6355 segvn_setvnode_mpss(svd
->vp
);
6359 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
6360 if (svd
->type
== MAP_PRIVATE
) {
6362 } else if (szc
> amp
->a_szc
) {
6365 ANON_LOCK_EXIT(&
->a_rwlock
);
6374 segvn_clrszc(struct seg
*seg
)
6376 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6377 struct anon_map
*amp
= svd
->amp
;
6381 caddr_t a
= seg
->s_base
;
6382 caddr_t ea
= a
+ seg
->s_size
;
6383 ulong_t an_idx
= svd
->anon_index
;
6384 vnode_t
*vp
= svd
->vp
;
6385 struct vpage
*vpage
= svd
->vpage
;
6386 page_t
*anon_pl
[1 + 1], *pp
;
6387 struct anon
*ap
, *oldap
;
6388 uint_t prot
= svd
->prot
, vpprot
;
6391 ASSERT(AS_WRITE_HELD(seg
->s_as
) ||
6392 SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
6393 ASSERT(svd
->softlockcnt
== 0);
6395 if (vp
== NULL
&& amp
== NULL
) {
6396 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6401 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
6402 ASSERT(svd
->amp
== NULL
);
6403 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6404 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
6406 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
6407 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
6408 ASSERT(svd
->amp
!= NULL
);
6409 segvn_textunrepl(seg
, 1);
6410 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
6413 if (svd
->tr_state
!= SEGVN_TR_OFF
) {
6414 ASSERT(svd
->tr_state
== SEGVN_TR_INIT
);
6415 svd
->tr_state
= SEGVN_TR_OFF
;
6419 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6420 * unload argument is 0 when we are freeing the segment
6421 * and unload was already done.
6423 hat_unload(seg
->s_as
->a_hat
, seg
->s_base
, seg
->s_size
,
6427 if (amp
== NULL
|| svd
->type
== MAP_SHARED
) {
6432 pgsz
= page_get_pagesize(seg
->s_szc
);
6436 * XXX anon rwlock is not really needed because this is a
6437 * private segment and we are writers.
6439 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
6441 for (; a
< ea
; a
+= pgsz
, an_idx
+= pages
) {
6442 if ((oldap
= anon_get_ptr(amp
->ahp
, an_idx
)) != NULL
) {
6443 ASSERT(vpage
!= NULL
|| svd
->pageprot
== 0);
6444 if (vpage
!= NULL
) {
6445 ASSERT(sameprot(seg
, a
, pgsz
));
6446 prot
= VPP_PROT(vpage
);
6447 pageflag
= VPP_ISPPLOCK(vpage
) ? LOCK_PAGE
: 0;
6449 if (seg
->s_szc
!= 0) {
6450 ASSERT(vp
== NULL
|| anon_pages(amp
->ahp
,
6451 an_idx
, pages
) == pages
);
6452 if ((err
= anon_map_demotepages(amp
, an_idx
,
6453 seg
, a
, prot
, vpage
, svd
->cred
)) != 0) {
6457 if (oldap
->an_refcnt
== 1) {
6460 if ((err
= anon_getpage(&oldap
, &vpprot
,
6461 anon_pl
, PAGESIZE
, seg
, a
, S_READ
,
6465 if ((pp
= anon_private(&ap
, seg
, a
, prot
,
6466 anon_pl
[0], pageflag
, svd
->cred
)) == NULL
) {
6471 (void) anon_set_ptr(amp
->ahp
, an_idx
, ap
,
6476 vpage
= (vpage
== NULL
) ? NULL
: vpage
+ pages
;
6482 ANON_LOCK_EXIT(&
->a_rwlock
);
6494 pgcnt_t pgcnt
= page_get_pagecnt(seg
->s_szc
);
6495 size_t ppasize
= (pgcnt
+ 1) * sizeof (page_t
*);
6497 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6498 struct anon_map
*amp
= svd
->amp
;
6499 struct vpage
*evp
= svp
+ pgcnt
;
6500 caddr_t addr
= ((uintptr_t)(svp
- svd
->vpage
) << PAGESHIFT
)
6503 struct vnode
*vp
= svd
->vp
;
6508 int anon
= (amp
!= NULL
) ? 1 : 0;
6510 ASSERT(svd
->type
== MAP_PRIVATE
);
6511 ASSERT(svd
->vpage
!= NULL
);
6512 ASSERT(seg
->s_szc
!= 0);
6513 ASSERT(IS_P2ALIGNED(pgcnt
, pgcnt
));
6514 ASSERT(amp
== NULL
|| IS_P2ALIGNED(anon_idx
, pgcnt
));
6515 ASSERT(sameprot(seg
, addr
, pgcnt
<< PAGESHIFT
));
6517 if (VPP_PROT(svp
) == prot
)
6519 if (!((VPP_PROT(svp
) ^ prot
) & PROT_WRITE
))
6522 ppa
= kmem_alloc(ppasize
, KM_SLEEP
);
6523 if (anon
&& vp
!= NULL
) {
6524 if (anon_get_ptr(amp
->ahp
, anon_idx
) == NULL
) {
6526 ASSERT(!anon_pages(amp
->ahp
, anon_idx
, pgcnt
));
6529 anon_pages(amp
->ahp
, anon_idx
, pgcnt
) == pgcnt
);
6532 for (*ppa
= NULL
, pg_idx
= 0; svp
< evp
; svp
++, anon_idx
++) {
6533 if (!VPP_ISPPLOCK(svp
))
6536 ap
= anon_get_ptr(amp
->ahp
, anon_idx
);
6538 panic("segvn_claim_pages: no anon slot");
6540 swap_xlate(ap
, &vp
, &aoff
);
6541 off
= (u_offset_t
)aoff
;
6544 if ((pp
= page_lookup(vp
,
6545 (u_offset_t
)off
, SE_SHARED
)) == NULL
) {
6546 panic("segvn_claim_pages: no page");
6552 if (ppa
[0] == NULL
) {
6553 kmem_free(ppa
, ppasize
);
6557 ASSERT(pg_idx
<= pgcnt
);
6561 /* Find each large page within ppa, and adjust its claim */
6563 /* Does ppa cover a single large page? */
6564 if (ppa
[0]->p_szc
== seg
->s_szc
) {
6565 if (prot
& PROT_WRITE
)
6566 err
= page_addclaim_pages(ppa
);
6568 err
= page_subclaim_pages(ppa
);
6570 for (i
= 0; ppa
[i
]; i
+= pgcnt
) {
6571 ASSERT(IS_P2ALIGNED(page_pptonum(ppa
[i
]), pgcnt
));
6572 if (prot
& PROT_WRITE
)
6573 err
= page_addclaim_pages(&ppa
[i
]);
6575 err
= page_subclaim_pages(&ppa
[i
]);
6581 for (i
= 0; i
< pg_idx
; i
++) {
6582 ASSERT(ppa
[i
] != NULL
);
6583 page_unlock(ppa
[i
]);
6586 kmem_free(ppa
, ppasize
);
6591 * Returns right (upper address) segment if split occurred.
6592 * If the address is equal to the beginning or end of its segment it returns
6593 * the current segment.
6596 segvn_split_seg(struct seg
*seg
, caddr_t addr
)
6598 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6601 struct segvn_data
*nsvd
;
6603 ASSERT(AS_WRITE_HELD(seg
->s_as
));
6604 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6606 ASSERT(addr
>= seg
->s_base
);
6607 ASSERT(addr
<= seg
->s_base
+ seg
->s_size
);
6608 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6610 if (addr
== seg
->s_base
|| addr
== seg
->s_base
+ seg
->s_size
)
6613 nsize
= seg
->s_base
+ seg
->s_size
- addr
;
6614 seg
->s_size
= addr
- seg
->s_base
;
6615 nseg
= seg_alloc(seg
->s_as
, addr
, nsize
);
6616 ASSERT(nseg
!= NULL
);
6617 nseg
->s_ops
= seg
->s_ops
;
6618 nsvd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
6619 nseg
->s_data
= (void *)nsvd
;
6620 nseg
->s_szc
= seg
->s_szc
;
6622 ASSERT(nsvd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6624 rw_init(&nsvd
->lock
, NULL
, RW_DEFAULT
, NULL
);
6626 if (nsvd
->vp
!= NULL
) {
6628 nsvd
->offset
= svd
->offset
+
6629 (uintptr_t)(nseg
->s_base
- seg
->s_base
);
6630 if (nsvd
->type
== MAP_SHARED
)
6631 lgrp_shm_policy_init(NULL
, nsvd
->vp
);
6634 * The offset for an anonymous segment has no signifigance in
6635 * terms of an offset into a file. If we were to use the above
6636 * calculation instead, the structures read out of
6637 * /proc/<pid>/xmap would be more difficult to decipher since
6638 * it would be unclear whether two seemingly contiguous
6639 * prxmap_t structures represented different segments or a
6640 * single segment that had been split up into multiple prxmap_t
6641 * structures (e.g. if some part of the segment had not yet
6647 ASSERT(svd
->softlockcnt
== 0);
6648 ASSERT(svd
->softlockcnt_sbase
== 0);
6649 ASSERT(svd
->softlockcnt_send
== 0);
6652 if (svd
->vpage
!= NULL
) {
6653 size_t bytes
= vpgtob(seg_pages(seg
));
6654 size_t nbytes
= vpgtob(seg_pages(nseg
));
6655 struct vpage
*ovpage
= svd
->vpage
;
6657 svd
->vpage
= kmem_alloc(bytes
, KM_SLEEP
);
6658 bcopy(ovpage
, svd
->vpage
, bytes
);
6659 nsvd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
6660 bcopy(ovpage
+ seg_pages(seg
), nsvd
->vpage
, nbytes
);
6661 kmem_free(ovpage
, bytes
+ nbytes
);
6663 if (svd
->amp
!= NULL
&& svd
->type
== MAP_PRIVATE
) {
6664 struct anon_map
*oamp
= svd
->amp
, *namp
;
6665 struct anon_hdr
*nahp
;
6667 ANON_LOCK_ENTER(&oamp
->a_rwlock
, RW_WRITER
);
6668 ASSERT(oamp
->refcnt
== 1);
6669 nahp
= anon_create(btop(seg
->s_size
), ANON_SLEEP
);
6670 (void) anon_copy_ptr(oamp
->ahp
, svd
->anon_index
,
6671 nahp
, 0, btop(seg
->s_size
), ANON_SLEEP
);
6673 namp
= anonmap_alloc(nseg
->s_size
, 0, ANON_SLEEP
);
6674 namp
->a_szc
= nseg
->s_szc
;
6675 (void) anon_copy_ptr(oamp
->ahp
,
6676 svd
->anon_index
+ btop(seg
->s_size
),
6677 namp
->ahp
, 0, btop(nseg
->s_size
), ANON_SLEEP
);
6678 anon_release(oamp
->ahp
, btop(oamp
->size
));
6680 oamp
->size
= seg
->s_size
;
6681 svd
->anon_index
= 0;
6683 nsvd
->anon_index
= 0;
6684 ANON_LOCK_EXIT(&oamp
->a_rwlock
);
6685 } else if (svd
->amp
!= NULL
) {
6686 pgcnt_t pgcnt
= page_get_pagecnt(seg
->s_szc
);
6687 ASSERT(svd
->amp
== nsvd
->amp
);
6688 ASSERT(seg
->s_szc
<= svd
->amp
->a_szc
);
6689 nsvd
->anon_index
= svd
->anon_index
+ seg_pages(seg
);
6690 ASSERT(IS_P2ALIGNED(nsvd
->anon_index
, pgcnt
));
6691 ANON_LOCK_ENTER(&svd
->amp
->a_rwlock
, RW_WRITER
);
6693 ANON_LOCK_EXIT(&svd
->amp
->a_rwlock
);
6697 * Split the amount of swap reserved.
6701 * For MAP_NORESERVE, only allocate swap reserve for pages
6702 * being used. Other segments get enough to cover whole
6705 if (svd
->flags
& MAP_NORESERVE
) {
6709 oswresv
= svd
->swresv
;
6710 svd
->swresv
= ptob(anon_pages(svd
->amp
->ahp
,
6711 svd
->anon_index
, btop(seg
->s_size
)));
6712 nsvd
->swresv
= ptob(anon_pages(nsvd
->amp
->ahp
,
6713 nsvd
->anon_index
, btop(nseg
->s_size
)));
6714 ASSERT(oswresv
>= (svd
->swresv
+ nsvd
->swresv
));
6716 if (svd
->pageswap
) {
6717 svd
->swresv
= segvn_count_swap_by_vpages(seg
);
6718 ASSERT(nsvd
->swresv
>= svd
->swresv
);
6719 nsvd
->swresv
-= svd
->swresv
;
6721 ASSERT(svd
->swresv
== seg
->s_size
+
6723 svd
->swresv
= seg
->s_size
;
6724 nsvd
->swresv
= nseg
->s_size
;
6733 * called on memory operations (unmap, setprot, setpagesize) for a subset
6734 * of a large page segment to either demote the memory range (SDR_RANGE)
6735 * or the ends (SDR_END) by addr/len.
6737 * returns 0 on success. returns errno, including ENOMEM, on failure.
6747 caddr_t eaddr
= addr
+ len
;
6748 caddr_t lpgaddr
, lpgeaddr
;
6750 struct seg
*badseg1
= NULL
;
6751 struct seg
*badseg2
= NULL
;
6753 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6755 uint_t szc
= seg
->s_szc
;
6758 ASSERT(AS_WRITE_HELD(seg
->s_as
));
6759 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6761 pgsz
= page_get_pagesize(szc
);
6762 ASSERT(seg
->s_base
!= addr
|| seg
->s_size
!= len
);
6763 ASSERT(addr
>= seg
->s_base
&& eaddr
<= seg
->s_base
+ seg
->s_size
);
6764 ASSERT(svd
->softlockcnt
== 0);
6765 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6766 ASSERT(szcvec
== 0 || (flag
== SDR_END
&& svd
->type
== MAP_SHARED
));
6768 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
6769 ASSERT(flag
== SDR_RANGE
|| eaddr
< lpgeaddr
|| addr
> lpgaddr
);
6770 if (flag
== SDR_RANGE
) {
6771 /* demote entire range */
6772 badseg1
= nseg
= segvn_split_seg(seg
, lpgaddr
);
6773 (void) segvn_split_seg(nseg
, lpgeaddr
);
6774 ASSERT(badseg1
->s_base
== lpgaddr
);
6775 ASSERT(badseg1
->s_size
== lpgeaddr
- lpgaddr
);
6776 } else if (addr
!= lpgaddr
) {
6777 ASSERT(flag
== SDR_END
);
6778 badseg1
= nseg
= segvn_split_seg(seg
, lpgaddr
);
6779 if (eaddr
!= lpgeaddr
&& eaddr
> lpgaddr
+ pgsz
&&
6780 eaddr
< lpgaddr
+ 2 * pgsz
) {
6781 (void) segvn_split_seg(nseg
, lpgeaddr
);
6782 ASSERT(badseg1
->s_base
== lpgaddr
);
6783 ASSERT(badseg1
->s_size
== 2 * pgsz
);
6785 nseg
= segvn_split_seg(nseg
, lpgaddr
+ pgsz
);
6786 ASSERT(badseg1
->s_base
== lpgaddr
);
6787 ASSERT(badseg1
->s_size
== pgsz
);
6788 if (eaddr
!= lpgeaddr
&& eaddr
> lpgaddr
+ pgsz
) {
6789 ASSERT(lpgeaddr
- lpgaddr
> 2 * pgsz
);
6790 nseg
= segvn_split_seg(nseg
, lpgeaddr
- pgsz
);
6792 (void) segvn_split_seg(nseg
, lpgeaddr
);
6793 ASSERT(badseg2
->s_base
== lpgeaddr
- pgsz
);
6794 ASSERT(badseg2
->s_size
== pgsz
);
6798 ASSERT(flag
== SDR_END
);
6799 ASSERT(eaddr
< lpgeaddr
);
6800 badseg1
= nseg
= segvn_split_seg(seg
, lpgeaddr
- pgsz
);
6801 (void) segvn_split_seg(nseg
, lpgeaddr
);
6802 ASSERT(badseg1
->s_base
== lpgeaddr
- pgsz
);
6803 ASSERT(badseg1
->s_size
== pgsz
);
6806 ASSERT(badseg1
!= NULL
);
6807 ASSERT(badseg1
->s_szc
== szc
);
6808 ASSERT(flag
== SDR_RANGE
|| badseg1
->s_size
== pgsz
||
6809 badseg1
->s_size
== 2 * pgsz
);
6810 ASSERT(sameprot(badseg1
, badseg1
->s_base
, pgsz
));
6811 ASSERT(badseg1
->s_size
== pgsz
||
6812 sameprot(badseg1
, badseg1
->s_base
+ pgsz
, pgsz
));
6813 if (err
= segvn_clrszc(badseg1
)) {
6816 ASSERT(badseg1
->s_szc
== 0);
6818 if (szc
> 1 && (tszcvec
= P2PHASE(szcvec
, 1 << szc
)) > 1) {
6819 uint_t tszc
= highbit(tszcvec
) - 1;
6820 caddr_t ta
= MAX(addr
, badseg1
->s_base
);
6822 size_t tpgsz
= page_get_pagesize(tszc
);
6824 ASSERT(svd
->type
== MAP_SHARED
);
6825 ASSERT(flag
== SDR_END
);
6826 ASSERT(tszc
< szc
&& tszc
> 0);
6828 if (eaddr
> badseg1
->s_base
+ badseg1
->s_size
) {
6829 te
= badseg1
->s_base
+ badseg1
->s_size
;
6835 badseg1
->s_szc
= tszc
;
6836 if (!IS_P2ALIGNED(ta
, tpgsz
) || !IS_P2ALIGNED(te
, tpgsz
)) {
6837 if (badseg2
!= NULL
) {
6838 err
= segvn_demote_range(badseg1
, ta
, te
- ta
,
6844 return (segvn_demote_range(badseg1
, ta
,
6845 te
- ta
, SDR_END
, tszcvec
));
6850 if (badseg2
== NULL
)
6852 ASSERT(badseg2
->s_szc
== szc
);
6853 ASSERT(badseg2
->s_size
== pgsz
);
6854 ASSERT(sameprot(badseg2
, badseg2
->s_base
, badseg2
->s_size
));
6855 if (err
= segvn_clrszc(badseg2
)) {
6858 ASSERT(badseg2
->s_szc
== 0);
6860 if (szc
> 1 && (tszcvec
= P2PHASE(szcvec
, 1 << szc
)) > 1) {
6861 uint_t tszc
= highbit(tszcvec
) - 1;
6862 size_t tpgsz
= page_get_pagesize(tszc
);
6864 ASSERT(svd
->type
== MAP_SHARED
);
6865 ASSERT(flag
== SDR_END
);
6866 ASSERT(tszc
< szc
&& tszc
> 0);
6867 ASSERT(badseg2
->s_base
> addr
);
6868 ASSERT(eaddr
> badseg2
->s_base
);
6869 ASSERT(eaddr
< badseg2
->s_base
+ badseg2
->s_size
);
6871 badseg2
->s_szc
= tszc
;
6872 if (!IS_P2ALIGNED(eaddr
, tpgsz
)) {
6873 return (segvn_demote_range(badseg2
, badseg2
->s_base
,
6874 eaddr
- badseg2
->s_base
, SDR_END
, tszcvec
));
6882 segvn_checkprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t prot
)
6884 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6885 struct vpage
*vp
, *evp
;
6887 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6889 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
6891 * If segment protection can be used, simply check against them.
6893 if (svd
->pageprot
== 0) {
6896 err
= ((svd
->prot
& prot
) != prot
) ? EACCES
: 0;
6897 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6902 * Have to check down to the vpage level.
6904 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
6905 for (vp
= &svd
->vpage
[seg_page(seg
, addr
)]; vp
< evp
; vp
++) {
6906 if ((VPP_PROT(vp
) & prot
) != prot
) {
6907 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6911 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6916 segvn_getprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t
*protv
)
6918 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6919 size_t pgno
= seg_page(seg
, addr
+ len
) - seg_page(seg
, addr
) + 1;
6921 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6924 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
6925 if (svd
->pageprot
== 0) {
6927 protv
[--pgno
] = svd
->prot
;
6928 } while (pgno
!= 0);
6930 size_t pgoff
= seg_page(seg
, addr
);
6934 protv
[pgno
] = VPP_PROT(&svd
->vpage
[pgno
+pgoff
]);
6935 } while (pgno
!= 0);
6937 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6943 segvn_getoffset(struct seg
*seg
, caddr_t addr
)
6945 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6947 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6949 return (svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
));
6954 segvn_gettype(struct seg
*seg
, caddr_t addr
)
6956 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6958 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6960 return (svd
->type
| (svd
->flags
& (MAP_NORESERVE
| MAP_TEXT
|
6966 segvn_getvp(struct seg
*seg
, caddr_t addr
, struct vnode
**vpp
)
6968 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6970 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6977 * Check to see if it makes sense to do kluster/read ahead to
6978 * addr + delta relative to the mapping at addr. We assume here
6979 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6981 * For segvn, we currently "approve" of the action if we are
6982 * still in the segment and it maps from the same vp/off,
6983 * or if the advice stored in segvn_data or vpages allows it.
6984 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6987 segvn_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
)
6989 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6990 struct anon
*oap
, *ap
;
6993 struct vnode
*vp1
, *vp2
;
6994 u_offset_t off1
, off2
;
6995 struct anon_map
*amp
;
6997 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6998 ASSERT(AS_WRITE_HELD(seg
->s_as
) ||
6999 SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
7001 if (addr
+ delta
< seg
->s_base
||
7002 addr
+ delta
>= (seg
->s_base
+ seg
->s_size
))
7003 return (-1); /* exceeded segment bounds */
7005 pd
= delta
/ (ssize_t
)PAGESIZE
; /* divide to preserve sign bit */
7006 page
= seg_page(seg
, addr
);
7009 * Check to see if either of the pages addr or addr + delta
7010 * have advice set that prevents klustering (if MADV_RANDOM advice
7011 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
7014 if (svd
->advice
== MADV_RANDOM
||
7015 svd
->advice
== MADV_SEQUENTIAL
&& delta
< 0)
7017 else if (svd
->pageadvice
&& svd
->vpage
) {
7018 struct vpage
*bvpp
, *evpp
;
7020 bvpp
= &svd
->vpage
[page
];
7021 evpp
= &svd
->vpage
[page
+ pd
];
7022 if (VPP_ADVICE(bvpp
) == MADV_RANDOM
||
7023 VPP_ADVICE(evpp
) == MADV_SEQUENTIAL
&& delta
< 0)
7025 if (VPP_ADVICE(bvpp
) != VPP_ADVICE(evpp
) &&
7026 VPP_ADVICE(evpp
) == MADV_RANDOM
)
7030 if (svd
->type
== MAP_SHARED
)
7031 return (0); /* shared mapping - all ok */
7033 if ((amp
= svd
->amp
) == NULL
)
7034 return (0); /* off original vnode */
7036 page
+= svd
->anon_index
;
7038 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7040 oap
= anon_get_ptr(amp
->ahp
, page
);
7041 ap
= anon_get_ptr(amp
->ahp
, page
+ pd
);
7043 ANON_LOCK_EXIT(&
->a_rwlock
);
7045 if ((oap
== NULL
&& ap
!= NULL
) || (oap
!= NULL
&& ap
== NULL
)) {
7046 return (-1); /* one with and one without an anon */
7049 if (oap
== NULL
) { /* implies that ap == NULL */
7050 return (0); /* off original vnode */
7054 * Now we know we have two anon pointers - check to
7055 * see if they happen to be properly allocated.
7059 * XXX We cheat here and don't lock the anon slots. We can't because
7060 * we may have been called from the anon layer which might already
7061 * have locked them. We are holding a refcnt on the slots so they
7062 * can't disappear. The worst that will happen is we'll get the wrong
7063 * names (vp, off) for the slots and make a poor klustering decision.
7065 swap_xlate(ap
, &vp1
, &off1
);
7066 swap_xlate(oap
, &vp2
, &off2
);
7069 if (!VOP_CMP(vp1
, vp2
, NULL
) || off1
- off2
!= delta
)
7075 * Swap the pages of seg out to secondary storage, returning the
7076 * number of bytes of storage freed.
7078 * The basic idea is first to unload all translations and then to call
7079 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the
7080 * swap device. Pages to which other segments have mappings will remain
7081 * mapped and won't be swapped. Our caller (as_swapout) has already
7082 * performed the unloading step.
7084 * The value returned is intended to correlate well with the process's
7085 * memory requirements. However, there are some caveats:
7086 * 1) When given a shared segment as argument, this routine will
7087 * only succeed in swapping out pages for the last sharer of the
7088 * segment. (Previous callers will only have decremented mapping
7089 * reference counts.)
7090 * 2) We assume that the hat layer maintains a large enough translation
7091 * cache to capture process reference patterns.
7094 segvn_swapout(struct seg
*seg
)
7096 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7097 struct anon_map
*amp
;
7103 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7105 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
7107 * Find pages unmapped by our caller and force them
7108 * out to the virtual swap device.
7110 if ((amp
= svd
->amp
) != NULL
)
7111 anon_index
= svd
->anon_index
;
7112 npages
= seg
->s_size
>> PAGESHIFT
;
7113 for (page
= 0; page
< npages
; page
++) {
7118 anon_sync_obj_t cookie
;
7121 * Obtain <vp, off> pair for the page, then look it up.
7123 * Note that this code is willing to consider regular
7124 * pages as well as anon pages. Is this appropriate here?
7128 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7129 if (anon_array_try_enter(amp
, anon_index
+ page
,
7131 ANON_LOCK_EXIT(&
->a_rwlock
);
7134 ap
= anon_get_ptr(amp
->ahp
, anon_index
+ page
);
7136 swap_xlate(ap
, &vp
, &off
);
7139 off
= svd
->offset
+ ptob(page
);
7141 anon_array_exit(&cookie
);
7142 ANON_LOCK_EXIT(&
->a_rwlock
);
7145 off
= svd
->offset
+ ptob(page
);
7147 if (vp
== NULL
) { /* untouched zfod page */
7152 pp
= page_lookup_nowait(vp
, off
, SE_SHARED
);
7158 * Examine the page to see whether it can be tossed out,
7159 * keeping track of how many we've found.
7161 if (!page_tryupgrade(pp
)) {
7163 * If the page has an i/o lock and no mappings,
7164 * it's very likely that the page is being
7165 * written out as a result of klustering.
7166 * Assume this is so and take credit for it here.
7168 if (!page_io_trylock(pp
)) {
7169 if (!hat_page_is_mapped(pp
))
7177 ASSERT(!page_iolock_assert(pp
));
7181 * Skip if page is locked or has mappings.
7182 * We don't need the page_struct_lock to look at lckcnt
7183 * and cowcnt because the page is exclusive locked.
7185 if (pp
->p_lckcnt
!= 0 || pp
->p_cowcnt
!= 0 ||
7186 hat_page_is_mapped(pp
)) {
7192 * dispose skips large pages so try to demote first.
7194 if (pp
->p_szc
!= 0 && !page_try_demote_pages(pp
)) {
7197 * XXX should skip the remaining page_t's of this
7203 ASSERT(pp
->p_szc
== 0);
7206 * No longer mapped -- we can toss it out. How
7207 * we do so depends on whether or not it's dirty.
7209 if (hat_ismod(pp
) && pp
->p_vnode
) {
7211 * We must clean the page before it can be
7212 * freed. Setting B_FREE will cause pvn_done
7213 * to free the page when the i/o completes.
7214 * XXX: This also causes it to be accounted
7215 * as a pageout instead of a swap: need
7216 * B_SWAPOUT bit to use instead of B_FREE.
7218 * Hold the vnode before releasing the page lock
7219 * to prevent it from being freed and re-used by
7220 * some other thread.
7226 * Queue all i/o requests for the pageout thread
7227 * to avoid saturating the pageout devices.
7229 if (!queue_io_request(vp
, off
))
7233 * The page was clean, free it.
7235 * XXX: Can we ever encounter modified pages
7236 * with no associated vnode here?
7238 ASSERT(pp
->p_vnode
!= NULL
);
7239 /*LINTED: constant in conditional context*/
7240 VN_DISPOSE(pp
, B_FREE
, 0, kcred
);
7244 * Credit now even if i/o is in progress.
7248 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7251 * Wakeup pageout to initiate i/o on all queued requests.
7253 cv_signal_pageout();
7254 return (ptob(pgcnt
));
7258 * Synchronize primary storage cache with real object in virtual memory.
7260 * XXX - Anonymous pages should not be sync'ed out at all.
7263 segvn_sync(struct seg
*seg
, caddr_t addr
, size_t len
, int attr
, uint_t flags
)
7265 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7278 struct anon_map
*amp
;
7280 anon_sync_obj_t cookie
;
7282 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7284 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
7286 if (svd
->softlockcnt
> 0) {
7288 * If this is shared segment non 0 softlockcnt
7289 * means locked pages are still in use.
7291 if (svd
->type
== MAP_SHARED
) {
7292 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7297 * flush all pages from seg cache
7298 * otherwise we may deadlock in swap_putpage
7299 * for B_INVAL page (4175402).
7301 * Even if we grab segvn WRITER's lock
7302 * here, there might be another thread which could've
7303 * successfully performed lookup/insert just before
7304 * we acquired the lock here. So, grabbing either
7305 * lock here is of not much use. Until we devise
7306 * a strategy at upper layers to solve the
7307 * synchronization issues completely, we expect
7308 * applications to handle this appropriately.
7311 if (svd
->softlockcnt
> 0) {
7312 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7315 } else if (svd
->type
== MAP_SHARED
&& svd
->amp
!= NULL
&&
7316 svd
->amp
->a_softlockcnt
> 0) {
7318 * Try to purge this amp's entries from pcache. It will
7319 * succeed only if other segments that share the amp have no
7320 * outstanding softlock's.
7323 if (svd
->amp
->a_softlockcnt
> 0 || svd
->softlockcnt
> 0) {
7324 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7330 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
7331 bflags
= ((flags
& MS_ASYNC
) ? B_ASYNC
: 0) |
7332 ((flags
& MS_INVALIDATE
) ? B_INVAL
: 0);
7335 pageprot
= attr
& ~(SHARED
|PRIVATE
);
7336 segtype
= (attr
& SHARED
) ? MAP_SHARED
: MAP_PRIVATE
;
7339 * We are done if the segment types don't match
7340 * or if we have segment level protections and
7343 if (svd
->type
!= segtype
) {
7344 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7348 if (svd
->prot
!= pageprot
) {
7349 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7354 vpp
= &svd
->vpage
[seg_page(seg
, addr
)];
7356 } else if (svd
->vp
&& svd
->amp
== NULL
&&
7357 (flags
& MS_INVALIDATE
) == 0) {
7360 * No attributes, no anonymous pages and MS_INVALIDATE flag
7361 * is not on, just use one big request.
7363 err
= VOP_PUTPAGE(svd
->vp
, (offset_t
)offset
, len
,
7364 bflags
, svd
->cred
, NULL
);
7365 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7369 if ((amp
= svd
->amp
) != NULL
)
7370 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
7372 for (eaddr
= addr
+ len
; addr
< eaddr
; addr
+= PAGESIZE
) {
7375 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7376 anon_array_enter(amp
, anon_index
, &cookie
);
7377 ap
= anon_get_ptr(amp
->ahp
, anon_index
++);
7379 swap_xlate(ap
, &vp
, &off
);
7384 anon_array_exit(&cookie
);
7385 ANON_LOCK_EXIT(&
->a_rwlock
);
7392 if (vp
== NULL
) /* untouched zfod page */
7397 prot
= VPP_PROT(vpp
);
7400 if (prot
!= pageprot
) {
7406 * See if any of these pages are locked -- if so, then we
7407 * will have to truncate an invalidate request at the first
7408 * locked one. We don't need the page_struct_lock to test
7409 * as this is only advisory; even if we acquire it someone
7410 * might race in and lock the page after we unlock and before
7411 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7413 if (flags
& MS_INVALIDATE
) {
7414 if ((pp
= page_lookup(vp
, off
, SE_SHARED
)) != NULL
) {
7415 if (pp
->p_lckcnt
!= 0 || pp
->p_cowcnt
!= 0) {
7417 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7420 if (ap
!= NULL
&& pp
->p_szc
!= 0 &&
7421 page_tryupgrade(pp
)) {
7422 if (pp
->p_lckcnt
== 0 &&
7423 pp
->p_cowcnt
== 0) {
7425 * swapfs VN_DISPOSE() won't
7426 * invalidate large pages.
7427 * Attempt to demote.
7428 * XXX can't help it if it
7429 * fails. But for swapfs
7430 * pages it is no big deal.
7432 (void) page_try_demote_pages(
7438 } else if (svd
->type
== MAP_SHARED
&& amp
!= NULL
) {
7440 * Avoid writing out to disk ISM's large pages
7441 * because segspt_free_pages() relies on NULL an_pvp
7442 * of anon slots of such pages.
7445 ASSERT(svd
->vp
== NULL
);
7447 * swapfs uses page_lookup_nowait if not freeing or
7448 * invalidating and skips a page if
7449 * page_lookup_nowait returns NULL.
7451 pp
= page_lookup_nowait(vp
, off
, SE_SHARED
);
7455 if (pp
->p_szc
!= 0) {
7461 * Note ISM pages are created large so (vp, off)'s
7462 * page cannot suddenly become large after we unlock
7468 * XXX - Should ultimately try to kluster
7469 * calls to VOP_PUTPAGE() for performance.
7472 err
= VOP_PUTPAGE(vp
, (offset_t
)off
, PAGESIZE
,
7473 (bflags
| (IS_SWAPFSVP(vp
) ? B_PAGE_NOWAIT
: 0)),
7480 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7485 * Determine if we have data corresponding to pages in the
7486 * primary storage virtual memory cache (i.e., "in core").
7489 segvn_incore(struct seg
*seg
, caddr_t addr
, size_t len
, char *vec
)
7491 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7492 struct vnode
*vp
, *avp
;
7493 u_offset_t offset
, aoffset
;
7499 struct anon_map
*amp
; /* XXX - for locknest */
7502 anon_sync_obj_t cookie
;
7504 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7506 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
7507 if (svd
->amp
== NULL
&& svd
->vp
== NULL
) {
7508 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7509 bzero(vec
, btopr(len
));
7510 return (len
); /* no anonymous pages created yet */
7513 p
= seg_page(seg
, addr
);
7514 ep
= seg_page(seg
, addr
+ len
);
7515 start
= svd
->vp
? SEG_PAGE_VNODEBACKED
: 0;
7518 for (; p
< ep
; p
++, addr
+= PAGESIZE
) {
7519 vpp
= (svd
->vpage
) ? &svd
->vpage
[p
]: NULL
;
7523 /* Grab the vnode/offset for the anon slot */
7525 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7526 anon_array_enter(amp
, svd
->anon_index
+ p
, &cookie
);
7527 ap
= anon_get_ptr(amp
->ahp
, svd
->anon_index
+ p
);
7529 swap_xlate(ap
, &avp
, &aoffset
);
7531 anon_array_exit(&cookie
);
7532 ANON_LOCK_EXIT(&
->a_rwlock
);
7534 if ((avp
!= NULL
) && page_exists(avp
, aoffset
)) {
7535 /* A page exists for the anon slot */
7536 ret
|= SEG_PAGE_INCORE
;
7539 * If page is mapped and writable
7542 if ((hat_getattr(seg
->s_as
->a_hat
, addr
,
7543 &attr
) != -1) && (attr
& PROT_WRITE
)) {
7544 ret
|= SEG_PAGE_ANON
;
7547 * Don't get page_struct lock for lckcnt and cowcnt,
7548 * since this is purely advisory.
7550 if ((pp
= page_lookup_nowait(avp
, aoffset
,
7551 SE_SHARED
)) != NULL
) {
7553 ret
|= SEG_PAGE_SOFTLOCK
;
7555 ret
|= SEG_PAGE_HASCOW
;
7560 /* Gather vnode statistics */
7562 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
7566 * Try to obtain a "shared" lock on the page
7567 * without blocking. If this fails, determine
7568 * if the page is in memory.
7570 pp
= page_lookup_nowait(vp
, offset
, SE_SHARED
);
7571 if ((pp
== NULL
) && (page_exists(vp
, offset
))) {
7572 /* Page is incore, and is named */
7573 ret
|= (SEG_PAGE_INCORE
| SEG_PAGE_VNODE
);
7576 * Don't get page_struct lock for lckcnt and cowcnt,
7577 * since this is purely advisory.
7580 ret
|= (SEG_PAGE_INCORE
| SEG_PAGE_VNODE
);
7582 ret
|= SEG_PAGE_SOFTLOCK
;
7584 ret
|= SEG_PAGE_HASCOW
;
7589 /* Gather virtual page information */
7591 if (VPP_ISPPLOCK(vpp
))
7592 ret
|= SEG_PAGE_LOCKED
;
7598 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7603 * Statement for p_cowcnts/p_lckcnts.
7605 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7606 * irrespective of the following factors or anything else:
7608 * (1) anon slots are populated or not
7609 * (2) cow is broken or not
7610 * (3) refcnt on ap is 1 or greater than 1
7612 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7616 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7618 * if vpage has PROT_WRITE
7619 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7621 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7623 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7624 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7626 * We may also break COW if softlocking on read access in the physio case.
7627 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7628 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7629 * vpage doesn't have PROT_WRITE.
7632 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7634 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7635 * increment p_lckcnt by calling page_subclaim() which takes care of
7636 * availrmem accounting and p_lckcnt overflow.
7638 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7639 * increment p_cowcnt by calling page_addclaim() which takes care of
7640 * availrmem availability and p_cowcnt overflow.
7644 * Lock down (or unlock) pages mapped by this segment.
7646 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7647 * At fault time they will be relocated into larger pages.
7650 segvn_lockop(struct seg
*seg
, caddr_t addr
, size_t len
,
7651 int attr
, int op
, ulong_t
*lockmap
, size_t pos
)
7653 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7664 struct anon_map
*amp
;
7667 anon_sync_obj_t cookie
;
7668 struct kshmid
*sp
= NULL
;
7669 struct proc
*p
= curproc
;
7670 kproject_t
*proj
= NULL
;
7672 size_t locked_bytes
= 0;
7673 size_t unlocked_bytes
= 0;
7677 * Hold write lock on address space because may split or concatenate
7680 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7683 * If this is a shm, use shm's project and zone, else use
7684 * project and zone of calling process
7687 /* Determine if this segment backs a sysV shm */
7688 if (svd
->amp
!= NULL
&& svd
->amp
->a_sp
!= NULL
) {
7689 ASSERT(svd
->type
== MAP_SHARED
);
7690 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
7691 sp
= svd
->amp
->a_sp
;
7692 proj
= sp
->shm_perm
.ipc_proj
;
7696 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
7698 pageprot
= attr
& ~(SHARED
|PRIVATE
);
7699 segtype
= attr
& SHARED
? MAP_SHARED
: MAP_PRIVATE
;
7702 * We are done if the segment types don't match
7703 * or if we have segment level protections and
7706 if (svd
->type
!= segtype
) {
7707 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7710 if (svd
->pageprot
== 0 && svd
->prot
!= pageprot
) {
7711 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7716 if (op
== MC_LOCK
) {
7717 if (svd
->tr_state
== SEGVN_TR_INIT
) {
7718 svd
->tr_state
= SEGVN_TR_OFF
;
7719 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
7720 ASSERT(svd
->amp
!= NULL
);
7721 segvn_textunrepl(seg
, 0);
7722 ASSERT(svd
->amp
== NULL
&&
7723 svd
->tr_state
== SEGVN_TR_OFF
);
7728 * If we're locking, then we must create a vpage structure if
7729 * none exists. If we're unlocking, then check to see if there
7730 * is a vpage -- if not, then we could not have locked anything.
7733 if ((vpp
= svd
->vpage
) == NULL
) {
7734 if (op
== MC_LOCK
) {
7736 if (svd
->vpage
== NULL
) {
7737 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7741 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7747 * The anonymous data vector (i.e., previously
7748 * unreferenced mapping to swap space) can be allocated
7749 * by lazily testing for its existence.
7751 if (op
== MC_LOCK
&& svd
->amp
== NULL
&& svd
->vp
== NULL
) {
7752 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
7753 svd
->amp
= anonmap_alloc(seg
->s_size
, 0, ANON_SLEEP
);
7754 svd
->amp
->a_szc
= seg
->s_szc
;
7757 if ((amp
= svd
->amp
) != NULL
) {
7758 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
7761 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
7762 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
7765 mutex_enter(&sp
->shm_mlock
);
7767 /* determine number of unlocked bytes in range for lock operation */
7768 if (op
== MC_LOCK
) {
7771 for (vpp
= &svd
->vpage
[seg_page(seg
, addr
)]; vpp
< evp
;
7773 if (!VPP_ISPPLOCK(vpp
))
7774 unlocked_bytes
+= PAGESIZE
;
7777 ulong_t i_idx
, i_edx
;
7778 anon_sync_obj_t i_cookie
;
7783 /* Only count sysV pages once for locked memory */
7784 i_edx
= svd
->anon_index
+ seg_page(seg
, addr
+ len
);
7785 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7786 for (i_idx
= anon_index
; i_idx
< i_edx
; i_idx
++) {
7787 anon_array_enter(amp
, i_idx
, &i_cookie
);
7788 i_ap
= anon_get_ptr(amp
->ahp
, i_idx
);
7790 unlocked_bytes
+= PAGESIZE
;
7791 anon_array_exit(&i_cookie
);
7794 swap_xlate(i_ap
, &i_vp
, &i_off
);
7795 anon_array_exit(&i_cookie
);
7796 pp
= page_lookup(i_vp
, i_off
, SE_SHARED
);
7798 unlocked_bytes
+= PAGESIZE
;
7800 } else if (pp
->p_lckcnt
== 0)
7801 unlocked_bytes
+= PAGESIZE
;
7804 ANON_LOCK_EXIT(&
->a_rwlock
);
7807 mutex_enter(&p
->p_lock
);
7808 err
= rctl_incr_locked_mem(p
, proj
, unlocked_bytes
,
7810 mutex_exit(&p
->p_lock
);
7814 mutex_exit(&sp
->shm_mlock
);
7815 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7820 * Loop over all pages in the range. Process if we're locking and
7821 * page has not already been locked in this mapping; or if we're
7822 * unlocking and the page has been locked.
7824 for (vpp
= &svd
->vpage
[seg_page(seg
, addr
)]; vpp
< evp
;
7825 vpp
++, pos
++, addr
+= PAGESIZE
, offset
+= PAGESIZE
, anon_index
++) {
7826 if ((attr
== 0 || VPP_PROT(vpp
) == pageprot
) &&
7827 ((op
== MC_LOCK
&& !VPP_ISPPLOCK(vpp
)) ||
7828 (op
== MC_UNLOCK
&& VPP_ISPPLOCK(vpp
)))) {
7831 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7833 * If this isn't a MAP_NORESERVE segment and
7834 * we're locking, allocate anon slots if they
7835 * don't exist. The page is brought in later on.
7837 if (op
== MC_LOCK
&& svd
->vp
== NULL
&&
7838 ((svd
->flags
& MAP_NORESERVE
) == 0) &&
7840 ((ap
= anon_get_ptr(amp
->ahp
, anon_index
))
7842 anon_array_enter(amp
, anon_index
, &cookie
);
7844 if ((ap
= anon_get_ptr(amp
->ahp
,
7845 anon_index
)) == NULL
) {
7846 pp
= anon_zero(seg
, addr
, &ap
,
7849 anon_array_exit(&cookie
);
7850 ANON_LOCK_EXIT(&
->a_rwlock
);
7854 ASSERT(anon_get_ptr(amp
->ahp
,
7855 anon_index
) == NULL
);
7856 (void) anon_set_ptr(amp
->ahp
,
7857 anon_index
, ap
, ANON_SLEEP
);
7860 anon_array_exit(&cookie
);
7864 * Get name for page, accounting for
7865 * existence of private copy.
7869 anon_array_enter(amp
, anon_index
, &cookie
);
7870 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
7872 swap_xlate(ap
, &vp
, &off
);
7874 if (svd
->vp
== NULL
&&
7875 (svd
->flags
& MAP_NORESERVE
)) {
7876 anon_array_exit(&cookie
);
7877 ANON_LOCK_EXIT(&
->a_rwlock
);
7883 if (op
!= MC_LOCK
|| ap
== NULL
) {
7884 anon_array_exit(&cookie
);
7885 ANON_LOCK_EXIT(&
->a_rwlock
);
7893 * Get page frame. It's ok if the page is
7894 * not available when we're unlocking, as this
7895 * may simply mean that a page we locked got
7896 * truncated out of existence after we locked it.
7898 * Invoke VOP_GETPAGE() to obtain the page struct
7899 * since we may need to read it from disk if its
7903 pp
= page_lookup(vp
, off
, SE_SHARED
);
7910 error
= VOP_GETPAGE(vp
, (offset_t
)off
, PAGESIZE
,
7911 (uint_t
*)NULL
, pl
, PAGESIZE
, seg
, addr
,
7912 S_OTHER
, svd
->cred
, NULL
);
7914 if (error
&& ap
!= NULL
) {
7915 anon_array_exit(&cookie
);
7916 ANON_LOCK_EXIT(&
->a_rwlock
);
7920 * If the error is EDEADLK then we must bounce
7921 * up and drop all vm subsystem locks and then
7922 * retry the operation later
7923 * This behavior is a temporary measure because
7924 * ufs/sds logging is badly designed and will
7925 * deadlock if we don't allow this bounce to
7926 * happen. The real solution is to re-design
7927 * the logging code to work properly. See bug
7928 * 4125102 for details of the problem.
7930 if (error
== EDEADLK
) {
7935 * Quit if we fail to fault in the page. Treat
7936 * the failure as an error, unless the addr
7937 * is mapped beyond the end of a file.
7939 if (error
&& svd
->vp
) {
7940 va
.va_mask
= AT_SIZE
;
7941 if (VOP_GETATTR(svd
->vp
, &va
, 0,
7942 svd
->cred
, NULL
) != 0) {
7946 if (btopr(va
.va_size
) >=
7962 * See Statement at the beginning of this routine.
7964 * claim is always set if MAP_PRIVATE and PROT_WRITE
7965 * irrespective of following factors:
7967 * (1) anon slots are populated or not
7968 * (2) cow is broken or not
7969 * (3) refcnt on ap is 1 or greater than 1
7971 * See 4140683 for details
7973 claim
= ((VPP_PROT(vpp
) & PROT_WRITE
) &&
7974 (svd
->type
== MAP_PRIVATE
));
7977 * Perform page-level operation appropriate to
7978 * operation. If locking, undo the SOFTLOCK
7979 * performed to bring the page into memory
7980 * after setting the lock. If unlocking,
7981 * and no page was found, account for the claim
7984 if (op
== MC_LOCK
) {
7985 int ret
= 1; /* Assume success */
7987 ASSERT(!VPP_ISPPLOCK(vpp
));
7989 ret
= page_pp_lock(pp
, claim
, 0);
7991 if (ap
->an_pvp
!= NULL
) {
7992 anon_swap_free(ap
, pp
);
7994 anon_array_exit(&cookie
);
7995 ANON_LOCK_EXIT(&
->a_rwlock
);
7998 /* locking page failed */
8005 if (pp
->p_lckcnt
== 1)
8006 locked_bytes
+= PAGESIZE
;
8008 locked_bytes
+= PAGESIZE
;
8010 if (lockmap
!= (ulong_t
*)NULL
)
8011 BT_SET(lockmap
, pos
);
8015 ASSERT(VPP_ISPPLOCK(vpp
));
8017 /* sysV pages should be locked */
8018 ASSERT(sp
== NULL
|| pp
->p_lckcnt
> 0);
8019 page_pp_unlock(pp
, claim
, 0);
8021 if (pp
->p_lckcnt
== 0)
8025 unlocked_bytes
+= PAGESIZE
;
8029 unlocked_bytes
+= PAGESIZE
;
8036 if (op
== MC_LOCK
) {
8037 /* Credit back bytes that did not get locked */
8038 if ((unlocked_bytes
- locked_bytes
) > 0) {
8040 mutex_enter(&p
->p_lock
);
8041 rctl_decr_locked_mem(p
, proj
,
8042 (unlocked_bytes
- locked_bytes
), chargeproc
);
8044 mutex_exit(&p
->p_lock
);
8048 /* Account bytes that were unlocked */
8049 if (unlocked_bytes
> 0) {
8051 mutex_enter(&p
->p_lock
);
8052 rctl_decr_locked_mem(p
, proj
, unlocked_bytes
,
8055 mutex_exit(&p
->p_lock
);
8059 mutex_exit(&sp
->shm_mlock
);
8060 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8066 * Set advice from user for specified pages
8067 * There are 9 types of advice:
8068 * MADV_NORMAL - Normal (default) behavior (whatever that is)
8069 * MADV_RANDOM - Random page references
8070 * do not allow readahead or 'klustering'
8071 * MADV_SEQUENTIAL - Sequential page references
8072 * Pages previous to the one currently being
8073 * accessed (determined by fault) are 'not needed'
8074 * and are freed immediately
8075 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
8076 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
8077 * MADV_FREE - Contents can be discarded
8078 * MADV_ACCESS_DEFAULT- Default access
8079 * MADV_ACCESS_LWP - Next LWP will access heavily
8080 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
8083 segvn_advise(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t behav
)
8085 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8089 struct anon_map
*amp
;
8092 lgrp_mem_policy_t policy
;
8096 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
8099 * In case of MADV_FREE, we won't be modifying any segment private
8100 * data structures; so, we only need to grab READER's lock
8102 if (behav
!= MADV_FREE
) {
8103 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
8104 if (svd
->tr_state
!= SEGVN_TR_OFF
) {
8105 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8109 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
8113 * Large pages are assumed to be only turned on when accesses to the
8114 * segment's address range have spatial and temporal locality. That
8115 * justifies ignoring MADV_SEQUENTIAL for large page segments.
8116 * Also, ignore advice affecting lgroup memory allocation
8117 * if don't need to do lgroup optimizations on this system
8120 if ((behav
== MADV_SEQUENTIAL
&&
8121 (seg
->s_szc
!= 0 || HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
))) ||
8122 (!lgrp_optimizations() && (behav
== MADV_ACCESS_DEFAULT
||
8123 behav
== MADV_ACCESS_LWP
|| behav
== MADV_ACCESS_MANY
))) {
8124 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8128 if (behav
== MADV_SEQUENTIAL
|| behav
== MADV_ACCESS_DEFAULT
||
8129 behav
== MADV_ACCESS_LWP
|| behav
== MADV_ACCESS_MANY
) {
8131 * Since we are going to unload hat mappings
8132 * we first have to flush the cache. Otherwise
8133 * this might lead to system panic if another
8134 * thread is doing physio on the range whose
8135 * mappings are unloaded by madvise(3C).
8137 if (svd
->softlockcnt
> 0) {
8139 * If this is shared segment non 0 softlockcnt
8140 * means locked pages are still in use.
8142 if (svd
->type
== MAP_SHARED
) {
8143 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8147 * Since we do have the segvn writers lock
8148 * nobody can fill the cache with entries
8149 * belonging to this seg during the purge.
8150 * The flush either succeeds or we still
8151 * have pending I/Os. In the later case,
8152 * madvise(3C) fails.
8155 if (svd
->softlockcnt
> 0) {
8157 * Since madvise(3C) is advisory and
8158 * it's not part of UNIX98, madvise(3C)
8159 * failure here doesn't cause any hardship.
8160 * Note that we don't block in "as" layer.
8162 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8165 } else if (svd
->type
== MAP_SHARED
&& svd
->amp
!= NULL
&&
8166 svd
->amp
->a_softlockcnt
> 0) {
8168 * Try to purge this amp's entries from pcache. It
8169 * will succeed only if other segments that share the
8170 * amp have no outstanding softlock's.
8178 if (behav
== MADV_FREE
) {
8180 * MADV_FREE is not supported for segments with
8181 * underlying object; if anonmap is NULL, anon slots
8182 * are not yet populated and there is nothing for
8183 * us to do. As MADV_FREE is advisory, we don't
8184 * return error in either case.
8186 if (vp
!= NULL
|| amp
== NULL
) {
8187 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8193 page
= seg_page(seg
, addr
);
8194 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
8195 anon_disclaim(amp
, svd
->anon_index
+ page
, len
);
8196 ANON_LOCK_EXIT(&
->a_rwlock
);
8197 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8202 * If advice is to be applied to entire segment,
8203 * use advice field in seg_data structure
8204 * otherwise use appropriate vpage entry.
8206 if ((addr
== seg
->s_base
) && (len
== seg
->s_size
)) {
8208 case MADV_ACCESS_LWP
:
8209 case MADV_ACCESS_MANY
:
8210 case MADV_ACCESS_DEFAULT
:
8212 * Set memory allocation policy for this segment
8214 policy
= lgrp_madv_to_policy(behav
, len
, svd
->type
);
8215 if (svd
->type
== MAP_SHARED
)
8216 already_set
= lgrp_shm_policy_set(policy
, amp
,
8217 svd
->anon_index
, vp
, svd
->offset
, len
);
8220 * For private memory, need writers lock on
8221 * address space because the segment may be
8222 * split or concatenated when changing policy
8224 if (AS_READ_HELD(seg
->s_as
)) {
8225 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8229 already_set
= lgrp_privm_policy_set(policy
,
8230 &svd
->policy_info
, len
);
8234 * If policy set already and it shouldn't be reapplied,
8235 * don't do anything.
8238 !LGRP_MEM_POLICY_REAPPLICABLE(policy
))
8242 * Mark any existing pages in given range for
8245 page_mark_migrate(seg
, addr
, len
, amp
, svd
->anon_index
,
8246 vp
, svd
->offset
, 1);
8249 * If same policy set already or this is a shared
8250 * memory segment, don't need to try to concatenate
8251 * segment with adjacent ones.
8253 if (already_set
|| svd
->type
== MAP_SHARED
)
8257 * Try to concatenate this segment with previous
8258 * one and next one, since we changed policy for
8259 * this one and it may be compatible with adjacent
8262 prev
= AS_SEGPREV(seg
->s_as
, seg
);
8263 next
= AS_SEGNEXT(seg
->s_as
, seg
);
8265 if (next
&& next
->s_ops
== &segvn_ops
&&
8266 addr
+ len
== next
->s_base
)
8267 (void) segvn_concat(seg
, next
, 1);
8269 if (prev
&& prev
->s_ops
== &segvn_ops
&&
8270 addr
== prev
->s_base
+ prev
->s_size
) {
8272 * Drop lock for private data of current
8273 * segment before concatenating (deleting) it
8274 * and return IE_REATTACH to tell as_ctl() that
8275 * current segment has changed
8277 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8278 if (!segvn_concat(prev
, seg
, 1))
8285 case MADV_SEQUENTIAL
:
8287 * unloading mapping guarantees
8288 * detection in segvn_fault
8290 ASSERT(seg
->s_szc
== 0);
8291 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
8292 hat_unload(seg
->s_as
->a_hat
, addr
, len
,
8297 svd
->advice
= (uchar_t
)behav
;
8298 svd
->pageadvice
= 0;
8300 case MADV_WILLNEED
: /* handled in memcntl */
8301 case MADV_DONTNEED
: /* handled in memcntl */
8302 case MADV_FREE
: /* handled above */
8309 struct seg
*new_seg
;
8310 struct segvn_data
*new_svd
;
8314 page
= seg_page(seg
, addr
);
8317 if (svd
->vpage
== NULL
) {
8318 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8323 struct vpage
*bvpp
, *evpp
;
8325 case MADV_ACCESS_LWP
:
8326 case MADV_ACCESS_MANY
:
8327 case MADV_ACCESS_DEFAULT
:
8329 * Set memory allocation policy for portion of this
8334 * Align address and length of advice to page
8335 * boundaries for large pages
8337 if (seg
->s_szc
!= 0) {
8340 pgsz
= page_get_pagesize(seg
->s_szc
);
8341 addr
= (caddr_t
)P2ALIGN((uintptr_t)addr
, pgsz
);
8342 len
= P2ROUNDUP(len
, pgsz
);
8346 * Check to see whether policy is set already
8348 policy
= lgrp_madv_to_policy(behav
, len
, svd
->type
);
8350 anon_index
= svd
->anon_index
+ page
;
8351 off
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
8353 if (svd
->type
== MAP_SHARED
)
8354 already_set
= lgrp_shm_policy_set(policy
, amp
,
8355 anon_index
, vp
, off
, len
);
8358 (policy
== svd
->policy_info
.mem_policy
);
8361 * If policy set already and it shouldn't be reapplied,
8362 * don't do anything.
8365 !LGRP_MEM_POLICY_REAPPLICABLE(policy
))
8369 * For private memory, need writers lock on
8370 * address space because the segment may be
8371 * split or concatenated when changing policy
8373 if (svd
->type
== MAP_PRIVATE
&&
8374 AS_READ_HELD(seg
->s_as
)) {
8375 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8380 * Mark any existing pages in given range for
8383 page_mark_migrate(seg
, addr
, len
, amp
, svd
->anon_index
,
8384 vp
, svd
->offset
, 1);
8387 * Don't need to try to split or concatenate
8388 * segments, since policy is same or this is a shared
8391 if (already_set
|| svd
->type
== MAP_SHARED
)
8394 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
8395 ASSERT(svd
->amp
== NULL
);
8396 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
8397 ASSERT(svd
->softlockcnt
== 0);
8398 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
8400 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
8404 * Split off new segment if advice only applies to a
8405 * portion of existing segment starting in middle
8409 oldeaddr
= seg
->s_base
+ seg
->s_size
;
8410 if (addr
> seg
->s_base
) {
8412 * Must flush I/O page cache
8413 * before splitting segment
8415 if (svd
->softlockcnt
> 0)
8419 * Split segment and return IE_REATTACH to tell
8420 * as_ctl() that current segment changed
8422 new_seg
= segvn_split_seg(seg
, addr
);
8423 new_svd
= (struct segvn_data
*)new_seg
->s_data
;
8427 * If new segment ends where old one
8428 * did, try to concatenate the new
8429 * segment with next one.
8431 if (eaddr
== oldeaddr
) {
8433 * Set policy for new segment
8435 (void) lgrp_privm_policy_set(policy
,
8436 &new_svd
->policy_info
,
8439 next
= AS_SEGNEXT(new_seg
->s_as
,
8443 next
->s_ops
== &segvn_ops
&&
8444 eaddr
== next
->s_base
)
8445 (void) segvn_concat(new_seg
,
8451 * Split off end of existing segment if advice only
8452 * applies to a portion of segment ending before
8453 * end of the existing segment
8455 if (eaddr
< oldeaddr
) {
8457 * Must flush I/O page cache
8458 * before splitting segment
8460 if (svd
->softlockcnt
> 0)
8464 * If beginning of old segment was already
8465 * split off, use new segment to split end off
8468 if (new_seg
!= NULL
&& new_seg
!= seg
) {
8472 (void) segvn_split_seg(new_seg
, eaddr
);
8475 * Set policy for new segment
8477 (void) lgrp_privm_policy_set(policy
,
8478 &new_svd
->policy_info
,
8482 * Split segment and return IE_REATTACH
8483 * to tell as_ctl() that current
8486 (void) segvn_split_seg(seg
, eaddr
);
8489 (void) lgrp_privm_policy_set(policy
,
8490 &svd
->policy_info
, seg
->s_size
);
8493 * If new segment starts where old one
8494 * did, try to concatenate it with
8497 if (addr
== seg
->s_base
) {
8498 prev
= AS_SEGPREV(seg
->s_as
,
8502 * Drop lock for private data
8503 * of current segment before
8504 * concatenating (deleting) it
8509 addr
== prev
->s_base
+
8514 (void) segvn_concat(
8522 case MADV_SEQUENTIAL
:
8523 ASSERT(seg
->s_szc
== 0);
8524 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
8525 hat_unload(seg
->s_as
->a_hat
, addr
, len
, HAT_UNLOAD
);
8529 bvpp
= &svd
->vpage
[page
];
8530 evpp
= &svd
->vpage
[page
+ (len
>> PAGESHIFT
)];
8531 for (; bvpp
< evpp
; bvpp
++)
8532 VPP_SETADVICE(bvpp
, behav
);
8533 svd
->advice
= MADV_NORMAL
;
8535 case MADV_WILLNEED
: /* handled in memcntl */
8536 case MADV_DONTNEED
: /* handled in memcntl */
8537 case MADV_FREE
: /* handled above */
8543 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8548 * There is one kind of inheritance that can be specified for pages:
8550 * SEGP_INH_ZERO - Pages should be zeroed in the child
8553 segvn_inherit(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t behav
)
8555 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8556 struct vpage
*bvpp
, *evpp
;
8560 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
8562 /* Can't support something we don't know about */
8563 if (behav
!= SEGP_INH_ZERO
)
8566 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
8569 * This must be a straightforward anonymous segment that is mapped
8570 * privately and is not backed by a vnode.
8572 if (svd
->tr_state
!= SEGVN_TR_OFF
||
8573 svd
->type
!= MAP_PRIVATE
||
8580 * If the entire segment has been marked as inherit zero, then no reason
8581 * to do anything else.
8583 if (svd
->svn_inz
== SEGVN_INZ_ALL
) {
8589 * If this applies to the entire segment, simply mark it and we're done.
8591 if ((addr
== seg
->s_base
) && (len
== seg
->s_size
)) {
8592 svd
->svn_inz
= SEGVN_INZ_ALL
;
8598 * We've been asked to mark a subset of this segment as inherit zero,
8599 * therefore we need to mainpulate its vpages.
8601 if (svd
->vpage
== NULL
) {
8603 if (svd
->vpage
== NULL
) {
8609 svd
->svn_inz
= SEGVN_INZ_VPP
;
8610 page
= seg_page(seg
, addr
);
8611 bvpp
= &svd
->vpage
[page
];
8612 evpp
= &svd
->vpage
[page
+ (len
>> PAGESHIFT
)];
8613 for (; bvpp
< evpp
; bvpp
++)
8614 VPP_SETINHZERO(bvpp
);
8618 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8623 * Create a vpage structure for this seg.
8626 segvn_vpage(struct seg
*seg
)
8628 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8629 struct vpage
*vp
, *evp
;
8630 static pgcnt_t page_limit
= 0;
8632 ASSERT(SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
8635 * If no vpage structure exists, allocate one. Copy the protections
8636 * and the advice from the segment itself to the individual pages.
8638 if (svd
->vpage
== NULL
) {
8640 * Start by calculating the number of pages we must allocate to
8641 * track the per-page vpage structs needs for this entire
8642 * segment. If we know now that it will require more than our
8643 * heuristic for the maximum amount of kmem we can consume then
8644 * fail. We do this here, instead of trying to detect this deep
8645 * in page_resv and propagating the error up, since the entire
8646 * memory allocation stack is not amenable to passing this
8647 * back. Instead, it wants to keep trying.
8649 * As a heuristic we set a page limit of 5/8s of total_pages
8650 * for this allocation. We use shifts so that no floating
8651 * point conversion takes place and only need to do the
8654 ulong_t mem_needed
= seg_pages(seg
) * sizeof (struct vpage
);
8655 pgcnt_t npages
= mem_needed
>> PAGESHIFT
;
8657 if (page_limit
== 0)
8658 page_limit
= (total_pages
>> 1) + (total_pages
>> 3);
8660 if (npages
> page_limit
)
8663 svd
->pageadvice
= 1;
8664 svd
->vpage
= kmem_zalloc(mem_needed
, KM_SLEEP
);
8665 evp
= &svd
->vpage
[seg_page(seg
, seg
->s_base
+ seg
->s_size
)];
8666 for (vp
= svd
->vpage
; vp
< evp
; vp
++) {
8667 VPP_SETPROT(vp
, svd
->prot
);
8668 VPP_SETADVICE(vp
, svd
->advice
);
8674 * Dump the pages belonging to this segvn segment.
8677 segvn_dump(struct seg
*seg
)
8679 struct segvn_data
*svd
;
8681 struct anon_map
*amp
;
8684 u_offset_t off
, offset
;
8686 pgcnt_t page
, npages
;
8689 npages
= seg_pages(seg
);
8690 svd
= (struct segvn_data
*)seg
->s_data
;
8692 off
= offset
= svd
->offset
;
8695 if ((amp
= svd
->amp
) != NULL
) {
8696 anon_index
= svd
->anon_index
;
8697 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
8700 for (page
= 0; page
< npages
; page
++, offset
+= PAGESIZE
) {
8704 if (amp
&& (ap
= anon_get_ptr(svd
->amp
->ahp
, anon_index
++))) {
8705 swap_xlate_nopanic(ap
, &vp
, &off
);
8712 * If pp == NULL, the page either does not exist
8713 * or is exclusively locked. So determine if it
8714 * exists before searching for it.
8717 if ((pp
= page_lookup_nowait(vp
, off
, SE_SHARED
)))
8720 pp
= page_exists(vp
, off
);
8723 pfn
= page_pptonum(pp
);
8724 dump_addpage(seg
->s_as
, addr
, pfn
);
8729 dump_timeleft
= dump_timeout
;
8733 ANON_LOCK_EXIT(&
->a_rwlock
);
8737 static uint32_t segvn_pglock_mtbf
= 0;
8740 #define PCACHE_SHWLIST ((page_t *)-2)
8741 #define NOPCACHE_SHWLIST ((page_t *)-1)
8744 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8745 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8746 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8747 * the same parts of the segment. Currently shadow list creation is only
8748 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8749 * tagged with segment pointer, starting virtual address and length. This
8750 * approach for MAP_SHARED segments may add many pcache entries for the same
8751 * set of pages and lead to long hash chains that decrease pcache lookup
8752 * performance. To avoid this issue for shared segments shared anon map and
8753 * starting anon index are used for pcache entry tagging. This allows all
8754 * segments to share pcache entries for the same anon range and reduces pcache
8755 * chain's length as well as memory overhead from duplicate shadow lists and
8758 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8759 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8760 * part of softlockcnt accounting is done differently for private and shared
8761 * segments. In private segment case softlock is only incremented when a new
8762 * shadow list is created but not when an existing one is found via
8763 * seg_plookup(). pcache entries have reference count incremented/decremented
8764 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8765 * reference count can be purged (and purging is needed before segment can be
8766 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8767 * decrement softlockcnt. Since in private segment case each of its pcache
8768 * entries only belongs to this segment we can expect that when
8769 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8770 * segment purge will succeed and softlockcnt will drop to 0. In shared
8771 * segment case reference count in pcache entry counts active locks from many
8772 * different segments so we can't expect segment purging to succeed even when
8773 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8774 * segment. To be able to determine when there're no pending pagelocks in
8775 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8776 * but instead softlockcnt is incremented and decremented for every
8777 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8778 * list was created or an existing one was found. When softlockcnt drops to 0
8779 * this segment no longer has any claims for pcached shadow lists and the
8780 * segment can be freed even if there're still active pcache entries
8781 * shared by this segment anon map. Shared segment pcache entries belong to
8782 * anon map and are typically removed when anon map is freed after all
8783 * processes destroy the segments that use this anon map.
8786 segvn_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
, struct page
***ppp
,
8787 enum lock_type type
, enum seg_rw rw
)
8789 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8791 pgcnt_t adjustpages
;
8794 uint_t protchk
= (rw
== S_READ
) ? PROT_READ
: PROT_WRITE
;
8796 struct anon_map
*amp
;
8798 struct page
**pplist
, **pl
, *pp
;
8801 caddr_t lpgaddr
, lpgeaddr
;
8802 anon_sync_obj_t cookie
;
8804 struct anon_map
*pamp
;
8806 seg_preclaim_cbfunc_t preclaim_callback
;
8811 int sftlck_sbase
= 0;
8812 int sftlck_send
= 0;
8815 if (type
== L_PAGELOCK
&& segvn_pglock_mtbf
) {
8816 hrtime_t ts
= gethrtime();
8817 if ((ts
% segvn_pglock_mtbf
) == 0) {
8820 if ((ts
% segvn_pglock_mtbf
) == 1) {
8826 TRACE_2(TR_FAC_PHYSIO
, TR_PHYSIO_SEGVN_START
,
8827 "segvn_pagelock: start seg %p addr %p", seg
, addr
);
8829 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
8830 ASSERT(type
== L_PAGELOCK
|| type
== L_PAGEUNLOCK
);
8832 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
8835 * for now we only support pagelock to anon memory. We would have to
8836 * check protections for vnode objects and call into the vnode driver.
8837 * That's too much for a fast path. Let the fault entry point handle
8840 if (svd
->vp
!= NULL
) {
8841 if (type
== L_PAGELOCK
) {
8845 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8847 if ((amp
= svd
->amp
) == NULL
) {
8848 if (type
== L_PAGELOCK
) {
8852 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8854 if (rw
!= S_READ
&& rw
!= S_WRITE
) {
8855 if (type
== L_PAGELOCK
) {
8859 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8862 if (seg
->s_szc
!= 0) {
8864 * We are adjusting the pagelock region to the large page size
8865 * boundary because the unlocked part of a large page cannot
8866 * be freed anyway unless all constituent pages of a large
8867 * page are locked. Bigger regions reduce pcache chain length
8868 * and improve lookup performance. The tradeoff is that the
8869 * very first segvn_pagelock() call for a given page is more
8870 * expensive if only 1 page_t is needed for IO. This is only
8871 * an issue if pcache entry doesn't get reused by several
8872 * subsequent calls. We optimize here for the case when pcache
8873 * is heavily used by repeated IOs to the same address range.
8875 * Note segment's page size cannot change while we are holding
8876 * as lock. And then it cannot change while softlockcnt is
8877 * not 0. This will allow us to correctly recalculate large
8878 * page size region for the matching pageunlock/reclaim call
8879 * since as_pageunlock() caller must always match
8880 * as_pagelock() call's addr and len.
8882 * For pageunlock *ppp points to the pointer of page_t that
8883 * corresponds to the real unadjusted start address. Similar
8884 * for pagelock *ppp must point to the pointer of page_t that
8885 * corresponds to the real unadjusted start address.
8887 pgsz
= page_get_pagesize(seg
->s_szc
);
8888 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
8889 adjustpages
= btop((uintptr_t)(addr
- lpgaddr
));
8890 } else if (len
< segvn_pglock_comb_thrshld
) {
8892 lpgeaddr
= addr
+ len
;
8897 * Align the address range of large enough requests to allow
8898 * combining of different shadow lists into 1 to reduce memory
8899 * overhead from potentially overlapping large shadow lists
8900 * (worst case is we have a 1MB IO into buffers with start
8901 * addresses separated by 4K). Alignment is only possible if
8902 * padded chunks have sufficient access permissions. Note
8903 * permissions won't change between L_PAGELOCK and
8904 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8905 * segvn_setprot() to wait until softlockcnt drops to 0. This
8906 * allows us to determine in L_PAGEUNLOCK the same range we
8907 * computed in L_PAGELOCK.
8909 * If alignment is limited by segment ends set
8910 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8911 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8912 * per segment counters. In L_PAGEUNLOCK case decrease
8913 * softlockcnt_sbase/softlockcnt_send counters if
8914 * sftlck_sbase/sftlck_send flags are set. When
8915 * softlockcnt_sbase/softlockcnt_send are non 0
8916 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8917 * won't merge the segments. This restriction combined with
8918 * restriction on segment unmapping and splitting for segments
8919 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8920 * correctly determine the same range that was previously
8921 * locked by matching L_PAGELOCK.
8923 pflags
= SEGP_PSHIFT
| (segvn_pglock_comb_bshift
<< 16);
8925 if (svd
->type
== MAP_PRIVATE
) {
8926 lpgaddr
= (caddr_t
)P2ALIGN((uintptr_t)addr
,
8927 segvn_pglock_comb_balign
);
8928 if (lpgaddr
< seg
->s_base
) {
8929 lpgaddr
= seg
->s_base
;
8933 ulong_t aix
= svd
->anon_index
+ seg_page(seg
, addr
);
8934 ulong_t aaix
= P2ALIGN(aix
, segvn_pglock_comb_palign
);
8935 if (aaix
< svd
->anon_index
) {
8936 lpgaddr
= seg
->s_base
;
8939 lpgaddr
= addr
- ptob(aix
- aaix
);
8940 ASSERT(lpgaddr
>= seg
->s_base
);
8943 if (svd
->pageprot
&& lpgaddr
!= addr
) {
8944 struct vpage
*vp
= &svd
->vpage
[seg_page(seg
, lpgaddr
)];
8945 struct vpage
*evp
= &svd
->vpage
[seg_page(seg
, addr
)];
8947 if ((VPP_PROT(vp
) & protchk
) == 0) {
8957 lpgeaddr
= addr
+ len
;
8959 if (svd
->type
== MAP_PRIVATE
) {
8960 lpgeaddr
= (caddr_t
)P2ROUNDUP(
8961 (uintptr_t)lpgeaddr
,
8962 segvn_pglock_comb_balign
);
8964 ulong_t aix
= svd
->anon_index
+
8965 seg_page(seg
, lpgeaddr
);
8966 ulong_t aaix
= P2ROUNDUP(aix
,
8967 segvn_pglock_comb_palign
);
8971 lpgeaddr
+= ptob(aaix
- aix
);
8974 if (lpgeaddr
== 0 ||
8975 lpgeaddr
> seg
->s_base
+ seg
->s_size
) {
8976 lpgeaddr
= seg
->s_base
+ seg
->s_size
;
8980 if (svd
->pageprot
&& lpgeaddr
!= addr
+ len
) {
8984 vp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
8985 evp
= &svd
->vpage
[seg_page(seg
, lpgeaddr
)];
8988 if ((VPP_PROT(vp
) & protchk
) == 0) {
8994 lpgeaddr
= addr
+ len
;
8997 adjustpages
= btop((uintptr_t)(addr
- lpgaddr
));
9001 * For MAP_SHARED segments we create pcache entries tagged by amp and
9002 * anon index so that we can share pcache entries with other segments
9003 * that map this amp. For private segments pcache entries are tagged
9004 * with segment and virtual address.
9006 if (svd
->type
== MAP_SHARED
) {
9008 paddr
= (caddr_t
)((lpgaddr
- seg
->s_base
) +
9009 ptob(svd
->anon_index
));
9010 preclaim_callback
= shamp_reclaim
;
9014 preclaim_callback
= segvn_reclaim
;
9017 if (type
== L_PAGEUNLOCK
) {
9018 VM_STAT_ADD(segvnvmstats
.pagelock
[0]);
9021 * update hat ref bits for /proc. We need to make sure
9022 * that threads tracing the ref and mod bits of the
9023 * address space get the right data.
9024 * Note: page ref and mod bits are updated at reclaim time
9026 if (seg
->s_as
->a_vbits
) {
9027 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
) {
9028 if (rw
== S_WRITE
) {
9029 hat_setstat(seg
->s_as
, a
,
9030 PAGESIZE
, P_REF
| P_MOD
);
9032 hat_setstat(seg
->s_as
, a
,
9039 * Check the shadow list entry after the last page used in
9040 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
9041 * was not inserted into pcache and is not large page
9042 * adjusted. In this case call reclaim callback directly and
9043 * don't adjust the shadow list start and size for large
9047 if ((*ppp
)[npages
] == NOPCACHE_SHWLIST
) {
9050 ASSERT(svd
->type
== MAP_SHARED
);
9051 ptag
= (void *)pamp
;
9052 paddr
= (caddr_t
)((addr
- seg
->s_base
) +
9053 ptob(svd
->anon_index
));
9058 (*preclaim_callback
)(ptag
, paddr
, len
, *ppp
, rw
, 0);
9060 ASSERT((*ppp
)[npages
] == PCACHE_SHWLIST
||
9061 IS_SWAPFSVP((*ppp
)[npages
]->p_vnode
));
9062 len
= lpgeaddr
- lpgaddr
;
9064 seg_pinactive(seg
, pamp
, paddr
, len
,
9065 *ppp
- adjustpages
, rw
, pflags
, preclaim_callback
);
9069 ASSERT(svd
->type
== MAP_SHARED
);
9070 ASSERT(svd
->softlockcnt
>= npages
);
9071 atomic_add_long((ulong_t
*)&svd
->softlockcnt
, -npages
);
9075 ASSERT(svd
->softlockcnt_sbase
> 0);
9076 atomic_dec_ulong((ulong_t
*)&svd
->softlockcnt_sbase
);
9079 ASSERT(svd
->softlockcnt_send
> 0);
9080 atomic_dec_ulong((ulong_t
*)&svd
->softlockcnt_send
);
9084 * If someone is blocked while unmapping, we purge
9085 * segment page cache and thus reclaim pplist synchronously
9086 * without waiting for seg_pasync_thread. This speeds up
9087 * unmapping in cases where munmap(2) is called, while
9088 * raw async i/o is still in progress or where a thread
9089 * exits on data fault in a multithreaded application.
9091 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
9092 if (svd
->softlockcnt
== 0) {
9093 mutex_enter(&seg
->s_as
->a_contents
);
9094 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
9095 AS_CLRUNMAPWAIT(seg
->s_as
);
9096 cv_broadcast(&seg
->s_as
->a_cv
);
9098 mutex_exit(&seg
->s_as
->a_contents
);
9099 } else if (pamp
== NULL
) {
9101 * softlockcnt is not 0 and this is a
9102 * MAP_PRIVATE segment. Try to purge its
9103 * pcache entries to reduce softlockcnt.
9104 * If it drops to 0 segvn_reclaim()
9105 * will wake up a thread waiting on
9108 * We don't purge MAP_SHARED segments with non
9109 * 0 softlockcnt since IO is still in progress
9110 * for such segments.
9112 ASSERT(svd
->type
== MAP_PRIVATE
);
9116 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9117 TRACE_2(TR_FAC_PHYSIO
, TR_PHYSIO_SEGVN_UNLOCK_END
,
9118 "segvn_pagelock: unlock seg %p addr %p", seg
, addr
);
9122 /* The L_PAGELOCK case ... */
9124 VM_STAT_ADD(segvnvmstats
.pagelock
[1]);
9127 * For MAP_SHARED segments we have to check protections before
9128 * seg_plookup() since pcache entries may be shared by many segments
9129 * with potentially different page protections.
9132 ASSERT(svd
->type
== MAP_SHARED
);
9133 if (svd
->pageprot
== 0) {
9134 if ((svd
->prot
& protchk
) == 0) {
9140 * check page protections
9151 for (; a
< ea
; a
+= pgsz
) {
9154 ASSERT(seg
->s_szc
== 0 ||
9155 sameprot(seg
, a
, pgsz
));
9156 vp
= &svd
->vpage
[seg_page(seg
, a
)];
9157 if ((VPP_PROT(vp
) & protchk
) == 0) {
9166 * try to find pages in segment page cache
9168 pplist
= seg_plookup(seg
, pamp
, paddr
, lpgeaddr
- lpgaddr
, rw
, pflags
);
9169 if (pplist
!= NULL
) {
9171 npages
= btop((uintptr_t)(lpgeaddr
- lpgaddr
));
9172 ASSERT(svd
->type
== MAP_SHARED
);
9173 atomic_add_long((ulong_t
*)&svd
->softlockcnt
,
9177 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_sbase
);
9180 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_send
);
9182 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9183 *ppp
= pplist
+ adjustpages
;
9184 TRACE_2(TR_FAC_PHYSIO
, TR_PHYSIO_SEGVN_HIT_END
,
9185 "segvn_pagelock: cache hit seg %p addr %p", seg
, addr
);
9190 * For MAP_SHARED segments we already verified above that segment
9191 * protections allow this pagelock operation.
9194 ASSERT(svd
->type
== MAP_PRIVATE
);
9195 if (svd
->pageprot
== 0) {
9196 if ((svd
->prot
& protchk
) == 0) {
9200 if (svd
->prot
& PROT_WRITE
) {
9201 wlen
= lpgeaddr
- lpgaddr
;
9204 ASSERT(rw
== S_READ
);
9209 * check page protections
9211 for (a
= lpgaddr
, wlen
= 0; a
< lpgeaddr
; a
+= pgsz
) {
9214 ASSERT(seg
->s_szc
== 0 ||
9215 sameprot(seg
, a
, pgsz
));
9216 vp
= &svd
->vpage
[seg_page(seg
, a
)];
9217 if ((VPP_PROT(vp
) & protchk
) == 0) {
9221 if (wcont
&& (VPP_PROT(vp
) & PROT_WRITE
)) {
9225 ASSERT(rw
== S_READ
);
9229 ASSERT(rw
== S_READ
|| wlen
== lpgeaddr
- lpgaddr
);
9230 ASSERT(rw
== S_WRITE
|| wlen
<= lpgeaddr
- lpgaddr
);
9234 * Only build large page adjusted shadow list if we expect to insert
9235 * it into pcache. For large enough pages it's a big overhead to
9236 * create a shadow list of the entire large page. But this overhead
9237 * should be amortized over repeated pcache hits on subsequent reuse
9238 * of this shadow list (IO into any range within this shadow list will
9239 * find it in pcache since we large page align the request for pcache
9240 * lookups). pcache performance is improved with bigger shadow lists
9241 * as it reduces the time to pcache the entire big segment and reduces
9242 * pcache chain length.
9244 if (seg_pinsert_check(seg
, pamp
, paddr
,
9245 lpgeaddr
- lpgaddr
, pflags
) == SEGP_SUCCESS
) {
9247 len
= lpgeaddr
- lpgaddr
;
9252 * Since this entry will not be inserted into the pcache, we
9253 * will not do any adjustments to the starting address or
9254 * size of the memory to be locked.
9260 pplist
= kmem_alloc(sizeof (page_t
*) * (npages
+ 1), KM_SLEEP
);
9262 *ppp
= pplist
+ adjustpages
;
9264 * If use_pcache is 0 this shadow list is not large page adjusted.
9265 * Record this info in the last entry of shadow array so that
9266 * L_PAGEUNLOCK can determine if it should large page adjust the
9267 * address range to find the real range that was locked.
9269 pl
[npages
] = use_pcache
? PCACHE_SHWLIST
: NOPCACHE_SHWLIST
;
9271 page
= seg_page(seg
, addr
);
9272 anon_index
= svd
->anon_index
+ page
;
9275 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
9276 ASSERT(amp
->a_szc
>= seg
->s_szc
);
9277 anpgcnt
= page_get_pagecnt(amp
->a_szc
);
9278 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
, anon_index
++) {
9284 * Lock and unlock anon array only once per large page.
9285 * anon_array_enter() locks the root anon slot according to
9286 * a_szc which can't change while anon map is locked. We lock
9287 * anon the first time through this loop and each time we
9288 * reach anon index that corresponds to a root of a large
9291 if (a
== addr
|| P2PHASE(anon_index
, anpgcnt
) == 0) {
9292 ASSERT(anlock
== 0);
9293 anon_array_enter(amp
, anon_index
, &cookie
);
9296 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
9299 * We must never use seg_pcache for COW pages
9300 * because we might end up with original page still
9301 * lying in seg_pcache even after private page is
9302 * created. This leads to data corruption as
9303 * aio_write refers to the page still in cache
9304 * while all other accesses refer to the private
9307 if (ap
== NULL
|| ap
->an_refcnt
!= 1) {
9308 struct vpage
*vpage
;
9314 if (svd
->vpage
!= NULL
) {
9315 vpage
= &svd
->vpage
[seg_page(seg
, a
)];
9320 anon_array_exit(&cookie
);
9323 error
= segvn_faultpage(seg
->s_as
->a_hat
, seg
, a
, 0,
9324 vpage
, &pp
, 0, F_INVAL
, rw
, 1);
9326 error
= fc_decode(error
);
9329 anon_array_enter(amp
, anon_index
, &cookie
);
9331 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
9332 if (ap
== NULL
|| ap
->an_refcnt
!= 1) {
9337 swap_xlate(ap
, &vp
, &off
);
9338 pp
= page_lookup_nowait(vp
, off
, SE_SHARED
);
9343 if (ap
->an_pvp
!= NULL
) {
9344 anon_swap_free(ap
, pp
);
9347 * Unlock anon if this is the last slot in a large page.
9349 if (P2PHASE(anon_index
, anpgcnt
) == anpgcnt
- 1) {
9351 anon_array_exit(&cookie
);
9356 if (anlock
) { /* Ensure the lock is dropped */
9357 anon_array_exit(&cookie
);
9359 ANON_LOCK_EXIT(&
->a_rwlock
);
9361 if (a
>= addr
+ len
) {
9362 atomic_add_long((ulong_t
*)&svd
->softlockcnt
, npages
);
9364 ASSERT(svd
->type
== MAP_SHARED
);
9365 atomic_add_long((ulong_t
*)&pamp
->a_softlockcnt
,
9370 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_sbase
);
9373 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_send
);
9376 (void) seg_pinsert(seg
, pamp
, paddr
, len
, wlen
, pl
,
9377 rw
, pflags
, preclaim_callback
);
9379 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9380 TRACE_2(TR_FAC_PHYSIO
, TR_PHYSIO_SEGVN_FILL_END
,
9381 "segvn_pagelock: cache fill seg %p addr %p", seg
, addr
);
9386 np
= ((uintptr_t)(a
- addr
)) >> PAGESHIFT
;
9387 while (np
> (uint_t
)0) {
9388 ASSERT(PAGE_LOCKED(*pplist
));
9389 page_unlock(*pplist
);
9393 kmem_free(pl
, sizeof (page_t
*) * (npages
+ 1));
9395 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9397 TRACE_2(TR_FAC_PHYSIO
, TR_PHYSIO_SEGVN_MISS_END
,
9398 "segvn_pagelock: cache miss seg %p addr %p", seg
, addr
);
9403 * purge any cached pages in the I/O page cache
9406 segvn_purge(struct seg
*seg
)
9408 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9411 * pcache is only used by pure anon segments.
9413 if (svd
->amp
== NULL
|| svd
->vp
!= NULL
) {
9418 * For MAP_SHARED segments non 0 segment's softlockcnt means
9419 * active IO is still in progress via this segment. So we only
9420 * purge MAP_SHARED segments when their softlockcnt is 0.
9422 if (svd
->type
== MAP_PRIVATE
) {
9423 if (svd
->softlockcnt
) {
9424 seg_ppurge(seg
, NULL
, 0);
9426 } else if (svd
->softlockcnt
== 0 && svd
->amp
->a_softlockcnt
!= 0) {
9427 seg_ppurge(seg
, svd
->amp
, 0);
9432 * If async argument is not 0 we are called from pcache async thread and don't
9438 segvn_reclaim(void *ptag
, caddr_t addr
, size_t len
, struct page
**pplist
,
9439 enum seg_rw rw
, int async
)
9441 struct seg
*seg
= (struct seg
*)ptag
;
9442 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9446 npages
= np
= btop(len
);
9449 ASSERT(svd
->vp
== NULL
&& svd
->amp
!= NULL
);
9450 ASSERT(svd
->softlockcnt
>= npages
);
9451 ASSERT(async
|| AS_LOCK_HELD(seg
->s_as
));
9455 ASSERT(pl
[np
] == NOPCACHE_SHWLIST
|| pl
[np
] == PCACHE_SHWLIST
);
9456 ASSERT(!async
|| pl
[np
] == PCACHE_SHWLIST
);
9458 while (np
> (uint_t
)0) {
9459 if (rw
== S_WRITE
) {
9460 hat_setrefmod(*pplist
);
9462 hat_setref(*pplist
);
9464 page_unlock(*pplist
);
9469 kmem_free(pl
, sizeof (page_t
*) * (npages
+ 1));
9472 * If we are pcache async thread we don't hold AS lock. This means if
9473 * softlockcnt drops to 0 after the decrement below address space may
9474 * get freed. We can't allow it since after softlock derement to 0 we
9475 * still need to access as structure for possible wakeup of unmap
9476 * waiters. To prevent the disappearance of as we take this segment
9477 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9478 * make sure this routine completes before segment is freed.
9480 * The second complication we have to deal with in async case is a
9481 * possibility of missed wake up of unmap wait thread. When we don't
9482 * hold as lock here we may take a_contents lock before unmap wait
9483 * thread that was first to see softlockcnt was still not 0. As a
9484 * result we'll fail to wake up an unmap wait thread. To avoid this
9485 * race we set nounmapwait flag in as structure if we drop softlockcnt
9486 * to 0 when we were called by pcache async thread. unmapwait thread
9487 * will not block if this flag is set.
9490 mutex_enter(&svd
->segfree_syncmtx
);
9493 if (!atomic_add_long_nv((ulong_t
*)&svd
->softlockcnt
, -npages
)) {
9494 if (async
|| AS_ISUNMAPWAIT(seg
->s_as
)) {
9495 mutex_enter(&seg
->s_as
->a_contents
);
9497 AS_SETNOUNMAPWAIT(seg
->s_as
);
9499 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
9500 AS_CLRUNMAPWAIT(seg
->s_as
);
9501 cv_broadcast(&seg
->s_as
->a_cv
);
9503 mutex_exit(&seg
->s_as
->a_contents
);
9508 mutex_exit(&svd
->segfree_syncmtx
);
9515 shamp_reclaim(void *ptag
, caddr_t addr
, size_t len
, struct page
**pplist
,
9516 enum seg_rw rw
, int async
)
9518 amp_t
*amp
= (amp_t
*)ptag
;
9522 npages
= np
= btop(len
);
9524 ASSERT(amp
->a_softlockcnt
>= npages
);
9528 ASSERT(pl
[np
] == NOPCACHE_SHWLIST
|| pl
[np
] == PCACHE_SHWLIST
);
9529 ASSERT(!async
|| pl
[np
] == PCACHE_SHWLIST
);
9531 while (np
> (uint_t
)0) {
9532 if (rw
== S_WRITE
) {
9533 hat_setrefmod(*pplist
);
9535 hat_setref(*pplist
);
9537 page_unlock(*pplist
);
9542 kmem_free(pl
, sizeof (page_t
*) * (npages
+ 1));
9545 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9546 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9547 * and anonmap_purge() acquires a_purgemtx.
9549 mutex_enter(&
->a_purgemtx
);
9550 if (!atomic_add_long_nv((ulong_t
*)&
->a_softlockcnt
, -npages
) &&
9552 amp
->a_purgewait
= 0;
9553 cv_broadcast(&
->a_purgecv
);
9555 mutex_exit(&
->a_purgemtx
);
9560 * get a memory ID for an addr in a given segment
9562 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9563 * At fault time they will be relocated into larger pages.
9566 segvn_getmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
)
9568 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9569 struct anon
*ap
= NULL
;
9571 struct anon_map
*amp
;
9572 anon_sync_obj_t cookie
;
9574 if (svd
->type
== MAP_PRIVATE
) {
9575 memidp
->val
[0] = (uintptr_t)seg
->s_as
;
9576 memidp
->val
[1] = (uintptr_t)addr
;
9580 if (svd
->type
== MAP_SHARED
) {
9582 memidp
->val
[0] = (uintptr_t)svd
->vp
;
9583 memidp
->val
[1] = (u_longlong_t
)svd
->offset
+
9584 (uintptr_t)(addr
- seg
->s_base
);
9588 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
9589 if ((amp
= svd
->amp
) != NULL
) {
9590 anon_index
= svd
->anon_index
+
9591 seg_page(seg
, addr
);
9593 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9595 ASSERT(amp
!= NULL
);
9597 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
9598 anon_array_enter(amp
, anon_index
, &cookie
);
9599 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
9603 pp
= anon_zero(seg
, addr
, &ap
, svd
->cred
);
9605 anon_array_exit(&cookie
);
9606 ANON_LOCK_EXIT(&
->a_rwlock
);
9609 ASSERT(anon_get_ptr(amp
->ahp
, anon_index
)
9611 (void) anon_set_ptr(amp
->ahp
, anon_index
,
9616 anon_array_exit(&cookie
);
9617 ANON_LOCK_EXIT(&
->a_rwlock
);
9619 memidp
->val
[0] = (uintptr_t)ap
;
9620 memidp
->val
[1] = (uintptr_t)addr
& PAGEOFFSET
;
9628 sameprot(struct seg
*seg
, caddr_t a
, size_t len
)
9630 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9631 struct vpage
*vpage
;
9632 spgcnt_t pages
= btop(len
);
9635 if (svd
->pageprot
== 0)
9638 ASSERT(svd
->vpage
!= NULL
);
9640 vpage
= &svd
->vpage
[seg_page(seg
, a
)];
9641 prot
= VPP_PROT(vpage
);
9644 while (pages
-- > 0) {
9645 if (prot
!= VPP_PROT(vpage
))
9653 * Get memory allocation policy info for specified address in given segment
9655 static lgrp_mem_policy_info_t
*
9656 segvn_getpolicy(struct seg
*seg
, caddr_t addr
)
9658 struct anon_map
*amp
;
9660 lgrp_mem_policy_info_t
*policy_info
;
9661 struct segvn_data
*svn_data
;
9665 ASSERT(seg
!= NULL
);
9667 svn_data
= (struct segvn_data
*)seg
->s_data
;
9668 if (svn_data
== NULL
)
9672 * Get policy info for private or shared memory
9674 if (svn_data
->type
!= MAP_SHARED
) {
9675 if (svn_data
->tr_state
!= SEGVN_TR_ON
) {
9676 policy_info
= &svn_data
->policy_info
;
9678 policy_info
= &svn_data
->tr_policy_info
;
9679 ASSERT(policy_info
->mem_policy
==
9680 LGRP_MEM_POLICY_NEXT_SEG
);
9683 amp
= svn_data
->amp
;
9684 anon_index
= svn_data
->anon_index
+ seg_page(seg
, addr
);
9686 vn_off
= svn_data
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
9687 policy_info
= lgrp_shm_policy_get(amp
, anon_index
, vp
, vn_off
);
9690 return (policy_info
);
9695 segvn_capable(struct seg
*seg
, segcapability_t capability
)
9701 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9702 * established to per vnode mapping per lgroup amp pages instead of to vnode
9703 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9704 * may share the same text replication amp. If a suitable amp doesn't already
9705 * exist in svntr hash table create a new one. We may fail to bind to amp if
9706 * segment is not eligible for text replication. Code below first checks for
9707 * these conditions. If binding is successful segment tr_state is set to on
9708 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9709 * svd->amp remains as NULL.
9712 segvn_textrepl(struct seg
*seg
)
9714 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9715 vnode_t
*vp
= svd
->vp
;
9716 u_offset_t off
= svd
->offset
;
9717 size_t size
= seg
->s_size
;
9718 u_offset_t eoff
= off
+ size
;
9719 uint_t szc
= seg
->s_szc
;
9720 ulong_t hash
= SVNTR_HASH_FUNC(vp
);
9723 proc_t
*p
= seg
->s_as
->a_proc
;
9727 struct anon_map
*amp
;
9729 ASSERT(AS_LOCK_HELD(seg
->s_as
));
9730 ASSERT(SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
9732 ASSERT(svd
->tr_state
== SEGVN_TR_INIT
);
9733 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
9734 ASSERT(svd
->flags
& MAP_TEXT
);
9735 ASSERT(svd
->type
== MAP_PRIVATE
);
9736 ASSERT(vp
!= NULL
&& svd
->amp
== NULL
);
9737 ASSERT(!svd
->pageprot
&& !(svd
->prot
& PROT_WRITE
));
9738 ASSERT(!(svd
->flags
& MAP_NORESERVE
) && svd
->swresv
== 0);
9739 ASSERT(seg
->s_as
!= &kas
);
9741 ASSERT(svntr_hashtab
!= NULL
);
9744 * If numa optimizations are no longer desired bail out.
9746 if (!lgrp_optimizations()) {
9747 svd
->tr_state
= SEGVN_TR_OFF
;
9752 * Avoid creating anon maps with size bigger than the file size.
9753 * If VOP_GETATTR() call fails bail out.
9755 va
.va_mask
= AT_SIZE
| AT_MTIME
| AT_CTIME
;
9756 if (VOP_GETATTR(vp
, &va
, 0, svd
->cred
, NULL
) != 0) {
9757 svd
->tr_state
= SEGVN_TR_OFF
;
9758 SEGVN_TR_ADDSTAT(gaerr
);
9761 if (btopr(va
.va_size
) < btopr(eoff
)) {
9762 svd
->tr_state
= SEGVN_TR_OFF
;
9763 SEGVN_TR_ADDSTAT(overmap
);
9768 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9769 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9770 * mapping that checks if trcache for this vnode needs to be
9771 * invalidated can't miss us.
9773 if (!(vp
->v_flag
& VVMEXEC
)) {
9774 mutex_enter(&vp
->v_lock
);
9775 vp
->v_flag
|= VVMEXEC
;
9776 mutex_exit(&vp
->v_lock
);
9778 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
9780 * Bail out if potentially MAP_SHARED writable mappings exist to this
9781 * vnode. We don't want to use old file contents from existing
9782 * replicas if this mapping was established after the original file
9785 if (vn_is_mapped(vp
, V_WRITE
)) {
9786 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9787 svd
->tr_state
= SEGVN_TR_OFF
;
9788 SEGVN_TR_ADDSTAT(wrcnt
);
9791 svntrp
= svntr_hashtab
[hash
].tr_head
;
9792 for (; svntrp
!= NULL
; svntrp
= svntrp
->tr_next
) {
9793 ASSERT(svntrp
->tr_refcnt
!= 0);
9794 if (svntrp
->tr_vp
!= vp
) {
9799 * Bail out if the file or its attributes were changed after
9800 * this replication entry was created since we need to use the
9801 * latest file contents. Note that mtime test alone is not
9802 * sufficient because a user can explicitly change mtime via
9803 * utimes(2) interfaces back to the old value after modifiying
9804 * the file contents. To detect this case we also have to test
9805 * ctime which among other things records the time of the last
9806 * mtime change by utimes(2). ctime is not changed when the file
9807 * is only read or executed so we expect that typically existing
9808 * replication amp's can be used most of the time.
9810 if (!svntrp
->tr_valid
||
9811 svntrp
->tr_mtime
.tv_sec
!= va
.va_mtime
.tv_sec
||
9812 svntrp
->tr_mtime
.tv_nsec
!= va
.va_mtime
.tv_nsec
||
9813 svntrp
->tr_ctime
.tv_sec
!= va
.va_ctime
.tv_sec
||
9814 svntrp
->tr_ctime
.tv_nsec
!= va
.va_ctime
.tv_nsec
) {
9815 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9816 svd
->tr_state
= SEGVN_TR_OFF
;
9817 SEGVN_TR_ADDSTAT(stale
);
9821 * if off, eoff and szc match current segment we found the
9822 * existing entry we can use.
9824 if (svntrp
->tr_off
== off
&& svntrp
->tr_eoff
== eoff
&&
9825 svntrp
->tr_szc
== szc
) {
9829 * Don't create different but overlapping in file offsets
9830 * entries to avoid replication of the same file pages more
9831 * than once per lgroup.
9833 if ((off
>= svntrp
->tr_off
&& off
< svntrp
->tr_eoff
) ||
9834 (eoff
> svntrp
->tr_off
&& eoff
<= svntrp
->tr_eoff
)) {
9835 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9836 svd
->tr_state
= SEGVN_TR_OFF
;
9837 SEGVN_TR_ADDSTAT(overlap
);
9842 * If we didn't find existing entry create a new one.
9844 if (svntrp
== NULL
) {
9845 svntrp
= kmem_cache_alloc(svntr_cache
, KM_NOSLEEP
);
9846 if (svntrp
== NULL
) {
9847 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9848 svd
->tr_state
= SEGVN_TR_OFF
;
9849 SEGVN_TR_ADDSTAT(nokmem
);
9855 for (i
= 0; i
< NLGRPS_MAX
; i
++) {
9856 ASSERT(svntrp
->tr_amp
[i
] == NULL
);
9861 svntrp
->tr_off
= off
;
9862 svntrp
->tr_eoff
= eoff
;
9863 svntrp
->tr_szc
= szc
;
9864 svntrp
->tr_valid
= 1;
9865 svntrp
->tr_mtime
= va
.va_mtime
;
9866 svntrp
->tr_ctime
= va
.va_ctime
;
9867 svntrp
->tr_refcnt
= 0;
9868 svntrp
->tr_next
= svntr_hashtab
[hash
].tr_head
;
9869 svntr_hashtab
[hash
].tr_head
= svntrp
;
9874 * We want to pick a replica with pages on main thread's (t_tid = 1,
9875 * aka T1) lgrp. Currently text replication is only optimized for
9876 * workloads that either have all threads of a process on the same
9877 * lgrp or execute their large text primarily on main thread.
9879 lgrp_id
= p
->p_t1_lgrpid
;
9880 if (lgrp_id
== LGRP_NONE
) {
9882 * In case exec() prefaults text on non main thread use
9883 * current thread lgrpid. It will become main thread anyway
9886 lgrp_id
= lgrp_home_id(curthread
);
9889 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9890 * just set it to NLGRPS_MAX if it's different from current process T1
9891 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9892 * replication and T1 new home is different from lgrp used for text
9893 * replication. When this happens asyncronous segvn thread rechecks if
9894 * segments should change lgrps used for text replication. If we fail
9895 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9896 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9897 * we want to use. We don't need to use cas in this case because
9898 * another thread that races in between our non atomic check and set
9899 * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9901 ASSERT(lgrp_id
!= LGRP_NONE
&& lgrp_id
< NLGRPS_MAX
);
9902 olid
= p
->p_tr_lgrpid
;
9903 if (lgrp_id
!= olid
&& olid
!= NLGRPS_MAX
) {
9904 lgrp_id_t nlid
= (olid
== LGRP_NONE
) ? lgrp_id
: NLGRPS_MAX
;
9905 if (atomic_cas_32((uint32_t *)&p
->p_tr_lgrpid
, olid
, nlid
) !=
9907 olid
= p
->p_tr_lgrpid
;
9908 ASSERT(olid
!= LGRP_NONE
);
9909 if (olid
!= lgrp_id
&& olid
!= NLGRPS_MAX
) {
9910 p
->p_tr_lgrpid
= NLGRPS_MAX
;
9913 ASSERT(p
->p_tr_lgrpid
!= LGRP_NONE
);
9916 * lgrp_move_thread() won't schedule async recheck after
9917 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9918 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9921 if (first
&& p
->p_t1_lgrpid
!= LGRP_NONE
&&
9922 p
->p_t1_lgrpid
!= lgrp_id
) {
9928 * If no amp was created yet for lgrp_id create a new one as long as
9929 * we have enough memory to afford it.
9931 if ((amp
= svntrp
->tr_amp
[lgrp_id
]) == NULL
) {
9932 size_t trmem
= atomic_add_long_nv(&segvn_textrepl_bytes
, size
);
9933 if (trmem
> segvn_textrepl_max_bytes
) {
9934 SEGVN_TR_ADDSTAT(normem
);
9937 if (anon_try_resv_zone(size
, NULL
) == 0) {
9938 SEGVN_TR_ADDSTAT(noanon
);
9941 amp
= anonmap_alloc(size
, size
, ANON_NOSLEEP
);
9943 anon_unresv_zone(size
, NULL
);
9944 SEGVN_TR_ADDSTAT(nokmem
);
9947 ASSERT(amp
->refcnt
== 1);
9949 svntrp
->tr_amp
[lgrp_id
] = amp
;
9950 SEGVN_TR_ADDSTAT(newamp
);
9952 svntrp
->tr_refcnt
++;
9953 ASSERT(svd
->svn_trnext
== NULL
);
9954 ASSERT(svd
->svn_trprev
== NULL
);
9955 svd
->svn_trnext
= svntrp
->tr_svnhead
;
9956 svd
->svn_trprev
= NULL
;
9957 if (svntrp
->tr_svnhead
!= NULL
) {
9958 svntrp
->tr_svnhead
->svn_trprev
= svd
;
9960 svntrp
->tr_svnhead
= svd
;
9961 ASSERT(amp
->a_szc
== szc
&& amp
->size
== size
&& amp
->swresv
== size
);
9962 ASSERT(amp
->refcnt
>= 1);
9964 svd
->anon_index
= 0;
9965 svd
->tr_policy_info
.mem_policy
= LGRP_MEM_POLICY_NEXT_SEG
;
9966 svd
->tr_policy_info
.mem_lgrpid
= lgrp_id
;
9967 svd
->tr_state
= SEGVN_TR_ON
;
9968 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9969 SEGVN_TR_ADDSTAT(repl
);
9972 ASSERT(segvn_textrepl_bytes
>= size
);
9973 atomic_add_long(&segvn_textrepl_bytes
, -size
);
9974 ASSERT(svntrp
!= NULL
);
9975 ASSERT(svntrp
->tr_amp
[lgrp_id
] == NULL
);
9976 if (svntrp
->tr_refcnt
== 0) {
9977 ASSERT(svntrp
== svntr_hashtab
[hash
].tr_head
);
9978 svntr_hashtab
[hash
].tr_head
= svntrp
->tr_next
;
9979 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9980 kmem_cache_free(svntr_cache
, svntrp
);
9982 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9984 svd
->tr_state
= SEGVN_TR_OFF
;
9988 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9989 * replication amp. This routine is most typically called when segment is
9990 * unmapped but can also be called when segment no longer qualifies for text
9991 * replication (e.g. due to protection changes). If unload_unmap is set use
9992 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9993 * svntr free all its anon maps and remove it from the hash table.
9996 segvn_textunrepl(struct seg
*seg
, int unload_unmap
)
9998 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9999 vnode_t
*vp
= svd
->vp
;
10000 u_offset_t off
= svd
->offset
;
10001 size_t size
= seg
->s_size
;
10002 u_offset_t eoff
= off
+ size
;
10003 uint_t szc
= seg
->s_szc
;
10004 ulong_t hash
= SVNTR_HASH_FUNC(vp
);
10006 svntr_t
**prv_svntrp
;
10007 lgrp_id_t lgrp_id
= svd
->tr_policy_info
.mem_lgrpid
;
10010 ASSERT(AS_LOCK_HELD(seg
->s_as
));
10011 ASSERT(AS_WRITE_HELD(seg
->s_as
) ||
10012 SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
10013 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
10014 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
10015 ASSERT(svd
->amp
!= NULL
);
10016 ASSERT(svd
->amp
->refcnt
>= 1);
10017 ASSERT(svd
->anon_index
== 0);
10018 ASSERT(lgrp_id
!= LGRP_NONE
&& lgrp_id
< NLGRPS_MAX
);
10019 ASSERT(svntr_hashtab
!= NULL
);
10021 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
10022 prv_svntrp
= &svntr_hashtab
[hash
].tr_head
;
10023 for (; (svntrp
= *prv_svntrp
) != NULL
; prv_svntrp
= &svntrp
->tr_next
) {
10024 ASSERT(svntrp
->tr_refcnt
!= 0);
10025 if (svntrp
->tr_vp
== vp
&& svntrp
->tr_off
== off
&&
10026 svntrp
->tr_eoff
== eoff
&& svntrp
->tr_szc
== szc
) {
10030 if (svntrp
== NULL
) {
10031 panic("segvn_textunrepl: svntr record not found");
10033 if (svntrp
->tr_amp
[lgrp_id
] != svd
->amp
) {
10034 panic("segvn_textunrepl: amp mismatch");
10036 svd
->tr_state
= SEGVN_TR_OFF
;
10038 if (svd
->svn_trprev
== NULL
) {
10039 ASSERT(svntrp
->tr_svnhead
== svd
);
10040 svntrp
->tr_svnhead
= svd
->svn_trnext
;
10041 if (svntrp
->tr_svnhead
!= NULL
) {
10042 svntrp
->tr_svnhead
->svn_trprev
= NULL
;
10044 svd
->svn_trnext
= NULL
;
10046 svd
->svn_trprev
->svn_trnext
= svd
->svn_trnext
;
10047 if (svd
->svn_trnext
!= NULL
) {
10048 svd
->svn_trnext
->svn_trprev
= svd
->svn_trprev
;
10049 svd
->svn_trnext
= NULL
;
10051 svd
->svn_trprev
= NULL
;
10053 if (--svntrp
->tr_refcnt
) {
10054 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10057 *prv_svntrp
= svntrp
->tr_next
;
10058 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10059 for (i
= 0; i
< NLGRPS_MAX
; i
++) {
10060 struct anon_map
*amp
= svntrp
->tr_amp
[i
];
10064 ASSERT(amp
->refcnt
== 1);
10065 ASSERT(amp
->swresv
== size
);
10066 ASSERT(amp
->size
== size
);
10067 ASSERT(amp
->a_szc
== szc
);
10068 if (amp
->a_szc
!= 0) {
10069 anon_free_pages(amp
->ahp
, 0, size
, szc
);
10071 anon_free(amp
->ahp
, 0, size
);
10073 svntrp
->tr_amp
[i
] = NULL
;
10074 ASSERT(segvn_textrepl_bytes
>= size
);
10075 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10076 anon_unresv_zone(amp
->swresv
, NULL
);
10080 kmem_cache_free(svntr_cache
, svntrp
);
10082 hat_unload_callback(seg
->s_as
->a_hat
, seg
->s_base
, size
,
10083 unload_unmap
? HAT_UNLOAD_UNMAP
: 0, NULL
);
10087 * This is called when a MAP_SHARED writable mapping is created to a vnode
10088 * that is currently used for execution (VVMEXEC flag is set). In this case we
10089 * need to prevent further use of existing replicas.
10092 segvn_inval_trcache(vnode_t
*vp
)
10094 ulong_t hash
= SVNTR_HASH_FUNC(vp
);
10097 ASSERT(vp
->v_flag
& VVMEXEC
);
10099 if (svntr_hashtab
== NULL
) {
10103 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
10104 svntrp
= svntr_hashtab
[hash
].tr_head
;
10105 for (; svntrp
!= NULL
; svntrp
= svntrp
->tr_next
) {
10106 ASSERT(svntrp
->tr_refcnt
!= 0);
10107 if (svntrp
->tr_vp
== vp
&& svntrp
->tr_valid
) {
10108 svntrp
->tr_valid
= 0;
10111 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10115 segvn_trasync_thread(void)
10117 callb_cpr_t cpr_info
;
10118 kmutex_t cpr_lock
; /* just for CPR stuff */
10120 mutex_init(&cpr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
10122 CALLB_CPR_INIT(&cpr_info
, &cpr_lock
,
10123 callb_generic_cpr
, "segvn_async");
10125 if (segvn_update_textrepl_interval
== 0) {
10126 segvn_update_textrepl_interval
= segvn_update_tr_time
* hz
;
10128 segvn_update_textrepl_interval
*= hz
;
10130 (void) timeout(segvn_trupdate_wakeup
, NULL
,
10131 segvn_update_textrepl_interval
);
10134 mutex_enter(&cpr_lock
);
10135 CALLB_CPR_SAFE_BEGIN(&cpr_info
);
10136 mutex_exit(&cpr_lock
);
10137 sema_p(&segvn_trasync_sem
);
10138 mutex_enter(&cpr_lock
);
10139 CALLB_CPR_SAFE_END(&cpr_info
, &cpr_lock
);
10140 mutex_exit(&cpr_lock
);
10145 static uint64_t segvn_lgrp_trthr_migrs_snpsht
= 0;
10148 segvn_trupdate_wakeup(void *dummy
)
10150 uint64_t cur_lgrp_trthr_migrs
= lgrp_get_trthr_migrations();
10152 if (cur_lgrp_trthr_migrs
!= segvn_lgrp_trthr_migrs_snpsht
) {
10153 segvn_lgrp_trthr_migrs_snpsht
= cur_lgrp_trthr_migrs
;
10154 sema_v(&segvn_trasync_sem
);
10157 if (!segvn_disable_textrepl_update
&&
10158 segvn_update_textrepl_interval
!= 0) {
10159 (void) timeout(segvn_trupdate_wakeup
, dummy
,
10160 segvn_update_textrepl_interval
);
10165 segvn_trupdate(void)
10171 ASSERT(svntr_hashtab
!= NULL
);
10173 for (hash
= 0; hash
< svntr_hashtab_sz
; hash
++) {
10174 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
10175 svntrp
= svntr_hashtab
[hash
].tr_head
;
10176 for (; svntrp
!= NULL
; svntrp
= svntrp
->tr_next
) {
10177 ASSERT(svntrp
->tr_refcnt
!= 0);
10178 svd
= svntrp
->tr_svnhead
;
10179 for (; svd
!= NULL
; svd
= svd
->svn_trnext
) {
10180 segvn_trupdate_seg(svd
->seg
, svd
, svntrp
,
10184 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10189 segvn_trupdate_seg(struct seg
*seg
,
10198 struct anon_map
*amp
;
10200 ASSERT(svd
->vp
!= NULL
);
10201 ASSERT(svd
->vp
== svntrp
->tr_vp
);
10202 ASSERT(svd
->offset
== svntrp
->tr_off
);
10203 ASSERT(svd
->offset
+ seg
->s_size
== svntrp
->tr_eoff
);
10204 ASSERT(seg
!= NULL
);
10205 ASSERT(svd
->seg
== seg
);
10206 ASSERT(seg
->s_data
== (void *)svd
);
10207 ASSERT(seg
->s_szc
== svntrp
->tr_szc
);
10208 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
10209 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
10210 ASSERT(svd
->amp
!= NULL
);
10211 ASSERT(svd
->tr_policy_info
.mem_policy
== LGRP_MEM_POLICY_NEXT_SEG
);
10212 ASSERT(svd
->tr_policy_info
.mem_lgrpid
!= LGRP_NONE
);
10213 ASSERT(svd
->tr_policy_info
.mem_lgrpid
< NLGRPS_MAX
);
10214 ASSERT(svntrp
->tr_amp
[svd
->tr_policy_info
.mem_lgrpid
] == svd
->amp
);
10215 ASSERT(svntrp
->tr_refcnt
!= 0);
10216 ASSERT(mutex_owned(&svntr_hashtab
[hash
].tr_lock
));
10219 ASSERT(as
!= NULL
&& as
!= &kas
);
10222 ASSERT(p
->p_tr_lgrpid
!= LGRP_NONE
);
10223 lgrp_id
= p
->p_t1_lgrpid
;
10224 if (lgrp_id
== LGRP_NONE
) {
10227 ASSERT(lgrp_id
< NLGRPS_MAX
);
10228 if (svd
->tr_policy_info
.mem_lgrpid
== lgrp_id
) {
10233 * Use tryenter locking since we are locking as/seg and svntr hash
10234 * lock in reverse from syncrounous thread order.
10236 if (!AS_LOCK_TRYENTER(as
, RW_READER
)) {
10237 SEGVN_TR_ADDSTAT(nolock
);
10238 if (segvn_lgrp_trthr_migrs_snpsht
) {
10239 segvn_lgrp_trthr_migrs_snpsht
= 0;
10243 if (!SEGVN_LOCK_TRYENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
)) {
10245 SEGVN_TR_ADDSTAT(nolock
);
10246 if (segvn_lgrp_trthr_migrs_snpsht
) {
10247 segvn_lgrp_trthr_migrs_snpsht
= 0;
10251 size
= seg
->s_size
;
10252 if (svntrp
->tr_amp
[lgrp_id
] == NULL
) {
10253 size_t trmem
= atomic_add_long_nv(&segvn_textrepl_bytes
, size
);
10254 if (trmem
> segvn_textrepl_max_bytes
) {
10255 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10257 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10258 SEGVN_TR_ADDSTAT(normem
);
10261 if (anon_try_resv_zone(size
, NULL
) == 0) {
10262 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10264 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10265 SEGVN_TR_ADDSTAT(noanon
);
10268 amp
= anonmap_alloc(size
, size
, KM_NOSLEEP
);
10270 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10272 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10273 anon_unresv_zone(size
, NULL
);
10274 SEGVN_TR_ADDSTAT(nokmem
);
10277 ASSERT(amp
->refcnt
== 1);
10278 amp
->a_szc
= seg
->s_szc
;
10279 svntrp
->tr_amp
[lgrp_id
] = amp
;
10282 * We don't need to drop the bucket lock but here we give other
10283 * threads a chance. svntr and svd can't be unlinked as long as
10284 * segment lock is held as a writer and AS held as well. After we
10285 * retake bucket lock we'll continue from where we left. We'll be able
10286 * to reach the end of either list since new entries are always added
10287 * to the beginning of the lists.
10289 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10290 hat_unload_callback(as
->a_hat
, seg
->s_base
, size
, 0, NULL
);
10291 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
10293 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
10294 ASSERT(svd
->amp
!= NULL
);
10295 ASSERT(svd
->tr_policy_info
.mem_policy
== LGRP_MEM_POLICY_NEXT_SEG
);
10296 ASSERT(svd
->tr_policy_info
.mem_lgrpid
!= lgrp_id
);
10297 ASSERT(svd
->amp
!= svntrp
->tr_amp
[lgrp_id
]);
10299 svd
->tr_policy_info
.mem_lgrpid
= lgrp_id
;
10300 svd
->amp
= svntrp
->tr_amp
[lgrp_id
];
10301 p
->p_tr_lgrpid
= NLGRPS_MAX
;
10302 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10305 ASSERT(svntrp
->tr_refcnt
!= 0);
10306 ASSERT(svd
->vp
== svntrp
->tr_vp
);
10307 ASSERT(svd
->tr_policy_info
.mem_lgrpid
== lgrp_id
);
10308 ASSERT(svd
->amp
!= NULL
&& svd
->amp
== svntrp
->tr_amp
[lgrp_id
]);
10309 ASSERT(svd
->seg
== seg
);
10310 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
10312 SEGVN_TR_ADDSTAT(asyncrepl
);