uts: make emu10k non-verbose
[unleashed.git] / kernel / vm / vm_pvn.c
blob61c71fcb60a0fb25418be5cb9ead2024e24f1458
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
30 * University Copyright- Copyright (c) 1982, 1986, 1988
31 * The Regents of the University of California
32 * All Rights Reserved
34 * University Acknowledgment- Portions of this document are derived from
35 * software developed by the University of California, Berkeley, and its
36 * contributors.
40 * VM - paged vnode.
42 * This file supplies vm support for the vnode operations that deal with pages.
44 #include <sys/types.h>
45 #include <sys/t_lock.h>
46 #include <sys/param.h>
47 #include <sys/sysmacros.h>
48 #include <sys/systm.h>
49 #include <sys/time.h>
50 #include <sys/buf.h>
51 #include <sys/vnode.h>
52 #include <sys/uio.h>
53 #include <sys/vmsystm.h>
54 #include <sys/mman.h>
55 #include <sys/vfs.h>
56 #include <sys/cred.h>
57 #include <sys/user.h>
58 #include <sys/kmem.h>
59 #include <sys/cmn_err.h>
60 #include <sys/debug.h>
61 #include <sys/cpuvar.h>
62 #include <sys/vtrace.h>
63 #include <sys/tnf_probe.h>
65 #include <vm/hat.h>
66 #include <vm/as.h>
67 #include <vm/seg.h>
68 #include <vm/rm.h>
69 #include <vm/pvn.h>
70 #include <vm/page.h>
71 #include <vm/seg_map.h>
72 #include <vm/seg_kmem.h>
73 #include <sys/fs/swapnode.h>
75 int pvn_nofodklust = 0;
76 int pvn_write_noklust = 0;
78 static struct kmem_cache *marker_cache = NULL;
81 * Find the largest contiguous block which contains `addr' for file offset
82 * `offset' in it while living within the file system block sizes (`vp_off'
83 * and `vp_len') and the address space limits for which no pages currently
84 * exist and which map to consecutive file offsets.
86 page_t *
87 pvn_read_kluster(
88 struct vnode *vp,
89 uoff_t off,
90 struct seg *seg,
91 caddr_t addr,
92 uoff_t *offp, /* return values */
93 size_t *lenp, /* return values */
94 uoff_t vp_off,
95 size_t vp_len,
96 int isra)
98 ssize_t deltaf, deltab;
99 page_t *pp;
100 page_t *plist = NULL;
101 spgcnt_t pagesavail;
102 uoff_t vp_end;
104 ASSERT(off >= vp_off && off < vp_off + vp_len);
107 * We only want to do klustering/read ahead if there
108 * is more than minfree pages currently available.
110 pagesavail = freemem - minfree;
112 if (pagesavail <= 0)
113 if (isra)
114 return (NULL); /* ra case - give up */
115 else
116 pagesavail = 1; /* must return a page */
118 /* We calculate in pages instead of bytes due to 32-bit overflows */
119 if (pagesavail < (spgcnt_t)btopr(vp_len)) {
121 * Don't have enough free memory for the
122 * max request, try sizing down vp request.
124 deltab = (ssize_t)(off - vp_off);
125 vp_len -= deltab;
126 vp_off += deltab;
127 if (pagesavail < btopr(vp_len)) {
129 * Still not enough memory, just settle for
130 * pagesavail which is at least 1.
132 vp_len = ptob(pagesavail);
136 vp_end = vp_off + vp_len;
137 ASSERT(off >= vp_off && off < vp_end);
139 if (isra && segop_kluster(seg, addr, 0))
140 return (NULL); /* segment driver says no */
142 if ((plist = page_create_va(vp, off,
143 PAGESIZE, PG_EXCL | PG_WAIT, seg, addr)) == NULL)
144 return (NULL);
146 if (vp_len <= PAGESIZE || pvn_nofodklust) {
147 *offp = off;
148 *lenp = MIN(vp_len, PAGESIZE);
149 } else {
151 * Scan back from front by incrementing "deltab" and
152 * comparing "off" with "vp_off + deltab" to avoid
153 * "signed" versus "unsigned" conversion problems.
155 for (deltab = PAGESIZE; off >= vp_off + deltab;
156 deltab += PAGESIZE) {
158 * Call back to the segment driver to verify that
159 * the klustering/read ahead operation makes sense.
161 if (segop_kluster(seg, addr, -deltab))
162 break; /* page not eligible */
163 if ((pp = page_create_va(vp, off - deltab,
164 PAGESIZE, PG_EXCL, seg, addr - deltab))
165 == NULL)
166 break; /* already have the page */
168 * Add page to front of page list.
170 page_add(&plist, pp);
172 deltab -= PAGESIZE;
174 /* scan forward from front */
175 for (deltaf = PAGESIZE; off + deltaf < vp_end;
176 deltaf += PAGESIZE) {
178 * Call back to the segment driver to verify that
179 * the klustering/read ahead operation makes sense.
181 if (segop_kluster(seg, addr, deltaf))
182 break; /* page not file extension */
183 if ((pp = page_create_va(vp, off + deltaf,
184 PAGESIZE, PG_EXCL, seg, addr + deltaf))
185 == NULL)
186 break; /* already have page */
189 * Add page to end of page list.
191 page_add(&plist, pp);
192 plist = plist->p_next;
194 *offp = off = off - deltab;
195 *lenp = deltab + deltaf;
196 ASSERT(off >= vp_off);
199 * If we ended up getting more than was actually
200 * requested, retract the returned length to only
201 * reflect what was requested. This might happen
202 * if we were allowed to kluster pages across a
203 * span of (say) 5 frags, and frag size is less
204 * than PAGESIZE. We need a whole number of
205 * pages to contain those frags, but the returned
206 * size should only allow the returned range to
207 * extend as far as the end of the frags.
209 if ((vp_off + vp_len) < (off + *lenp)) {
210 ASSERT(vp_end > off);
211 *lenp = vp_end - off;
214 return (plist);
218 * Handle pages for this vnode on either side of the page "pp"
219 * which has been locked by the caller. This routine will also
220 * do klustering in the range [vp_off, vp_off + vp_len] up
221 * until a page which is not found. The offset and length
222 * of pages included is returned in "*offp" and "*lenp".
224 * Returns a list of dirty locked pages all ready to be
225 * written back.
227 page_t *
228 pvn_write_kluster(
229 struct vnode *vp,
230 page_t *pp,
231 uoff_t *offp, /* return values */
232 size_t *lenp, /* return values */
233 uoff_t vp_off,
234 size_t vp_len,
235 int flags)
237 uoff_t off;
238 page_t *dirty;
239 size_t deltab, deltaf;
240 se_t se;
241 uoff_t vp_end;
243 off = pp->p_offset;
246 * Kustering should not be done if we are invalidating
247 * pages since we could destroy pages that belong to
248 * some other process if this is a swap vnode.
250 if (pvn_write_noklust || ((flags & B_INVAL) && IS_SWAPVP(vp))) {
251 *offp = off;
252 *lenp = PAGESIZE;
253 return (pp);
256 if (flags & (B_FREE | B_INVAL))
257 se = SE_EXCL;
258 else
259 se = SE_SHARED;
261 dirty = pp;
263 * Scan backwards looking for pages to kluster by incrementing
264 * "deltab" and comparing "off" with "vp_off + deltab" to
265 * avoid "signed" versus "unsigned" conversion problems.
267 for (deltab = PAGESIZE; off >= vp_off + deltab; deltab += PAGESIZE) {
268 pp = page_lookup_nowait(vp, off - deltab, se);
269 if (pp == NULL)
270 break; /* page not found */
271 if (pvn_getdirty(pp, flags | B_DELWRI) == 0)
272 break;
273 page_add(&dirty, pp);
275 deltab -= PAGESIZE;
277 vp_end = vp_off + vp_len;
278 /* now scan forwards looking for pages to kluster */
279 for (deltaf = PAGESIZE; off + deltaf < vp_end; deltaf += PAGESIZE) {
280 pp = page_lookup_nowait(vp, off + deltaf, se);
281 if (pp == NULL)
282 break; /* page not found */
283 if (pvn_getdirty(pp, flags | B_DELWRI) == 0)
284 break;
285 page_add(&dirty, pp);
286 dirty = dirty->p_next;
289 *offp = off - deltab;
290 *lenp = deltab + deltaf;
291 return (dirty);
295 * Generic entry point used to release the "shared/exclusive" lock
296 * and the "p_iolock" on pages after i/o is complete.
298 void
299 pvn_io_done(page_t *plist)
301 page_t *pp;
303 while (plist != NULL) {
304 pp = plist;
305 page_sub(&plist, pp);
306 page_io_unlock(pp);
307 page_unlock(pp);
312 * Entry point to be used by file system getpage subr's and
313 * other such routines which either want to unlock pages (B_ASYNC
314 * request) or destroy a list of pages if an error occurred.
316 void
317 pvn_read_done(page_t *plist, int flags)
319 page_t *pp;
321 while (plist != NULL) {
322 pp = plist;
323 page_sub(&plist, pp);
324 page_io_unlock(pp);
325 if (flags & B_ERROR) {
326 VN_DISPOSE(pp, B_INVAL, 0, kcred);
327 } else {
328 (void) page_release(pp, 0);
334 * Automagic pageout.
335 * When memory gets tight, start freeing pages popping out of the
336 * write queue.
338 int write_free = 1;
339 pgcnt_t pages_before_pager = 200; /* LMXXX */
342 * Routine to be called when page-out's complete.
343 * The caller, typically fop_putpage, has to explicity call this routine
344 * after waiting for i/o to complete (biowait) to free the list of
345 * pages associated with the buffer. These pages must be locked
346 * before i/o is initiated.
348 * If a write error occurs, the pages are marked as modified
349 * so the write will be re-tried later.
352 void
353 pvn_write_done(page_t *plist, int flags)
355 int dfree = 0;
356 int pgrec = 0;
357 int pgout = 0;
358 int pgpgout = 0;
359 int anonpgout = 0;
360 int anonfree = 0;
361 int fspgout = 0;
362 int fsfree = 0;
363 int execpgout = 0;
364 int execfree = 0;
365 page_t *pp;
366 struct cpu *cpup;
367 struct vnode *vp = NULL; /* for probe */
368 uint_t ppattr;
370 ASSERT((flags & B_READ) == 0);
373 * If we are about to start paging anyway, start freeing pages.
375 if (write_free && freemem < lotsfree + pages_before_pager &&
376 (flags & B_ERROR) == 0) {
377 flags |= B_FREE;
381 * Handle each page involved in the i/o operation.
383 while (plist != NULL) {
384 pp = plist;
385 ASSERT(PAGE_LOCKED(pp) && page_iolock_assert(pp));
386 page_sub(&plist, pp);
388 /* Kernel probe support */
389 if (vp == NULL)
390 vp = pp->p_vnode;
392 if (((flags & B_ERROR) == 0) && IS_VMODSORT(vp)) {
394 * Move page to the top of the v_page list.
395 * Skip pages modified during IO.
397 mutex_enter(page_vnode_mutex(vp));
398 if (!hat_ismod(pp))
399 vnode_move_page_tail(vp, pp);
400 mutex_exit(page_vnode_mutex(vp));
403 if (flags & B_ERROR) {
405 * Write operation failed. We don't want
406 * to destroy (or free) the page unless B_FORCE
407 * is set. We set the mod bit again and release
408 * all locks on the page so that it will get written
409 * back again later when things are hopefully
410 * better again.
411 * If B_INVAL and B_FORCE is set we really have
412 * to destroy the page.
414 if ((flags & (B_INVAL|B_FORCE)) == (B_INVAL|B_FORCE)) {
415 page_io_unlock(pp);
416 VN_DISPOSE(pp, B_INVAL, 0, kcred);
417 } else {
418 hat_setmod_only(pp);
419 page_io_unlock(pp);
420 page_unlock(pp);
422 } else if (flags & B_INVAL) {
424 * XXX - Failed writes with B_INVAL set are
425 * not handled appropriately.
427 page_io_unlock(pp);
428 VN_DISPOSE(pp, B_INVAL, 0, kcred);
429 } else if (flags & B_FREE ||!hat_page_is_mapped(pp)) {
431 * Update statistics for pages being paged out
433 if (pp->p_vnode) {
434 if (IS_SWAPFSVP(pp->p_vnode)) {
435 anonpgout++;
436 } else {
437 if (pp->p_vnode->v_flag & VVMEXEC) {
438 execpgout++;
439 } else {
440 fspgout++;
444 page_io_unlock(pp);
445 pgout = 1;
446 pgpgout++;
449 * The page_struct_lock need not be acquired to
450 * examine "p_lckcnt" and "p_cowcnt" since we'll
451 * have an "exclusive" lock if the upgrade succeeds.
453 if (page_tryupgrade(pp) &&
454 pp->p_lckcnt == 0 && pp->p_cowcnt == 0) {
456 * Check if someone has reclaimed the
457 * page. If ref and mod are not set, no
458 * one is using it so we can free it.
459 * The rest of the system is careful
460 * to use the NOSYNC flag to unload
461 * translations set up for i/o w/o
462 * affecting ref and mod bits.
464 * Obtain a copy of the real hardware
465 * mod bit using hat_pagesync(pp, HAT_DONTZERO)
466 * to avoid having to flush the cache.
468 ppattr = hat_pagesync(pp, HAT_SYNC_DONTZERO |
469 HAT_SYNC_STOPON_MOD);
470 ck_refmod:
471 if (!(ppattr & (P_REF | P_MOD))) {
472 if (hat_page_is_mapped(pp)) {
474 * Doesn't look like the page
475 * was modified so now we
476 * really have to unload the
477 * translations. Meanwhile
478 * another CPU could've
479 * modified it so we have to
480 * check again. We don't loop
481 * forever here because now
482 * the translations are gone
483 * and no one can get a new one
484 * since we have the "exclusive"
485 * lock on the page.
487 (void) hat_pageunload(pp,
488 HAT_FORCE_PGUNLOAD);
489 ppattr = hat_page_getattr(pp,
490 P_REF | P_MOD);
491 goto ck_refmod;
494 * Update statistics for pages being
495 * freed
497 if (pp->p_vnode) {
498 if (IS_SWAPFSVP(pp->p_vnode)) {
499 anonfree++;
500 } else {
501 if (pp->p_vnode->v_flag
502 & VVMEXEC) {
503 execfree++;
504 } else {
505 fsfree++;
510 VN_DISPOSE(pp, B_FREE,
511 (flags & B_DONTNEED), kcred);
512 dfree++;
513 } else {
514 page_unlock(pp);
515 pgrec++;
517 } else {
519 * Page is either `locked' in memory
520 * or was reclaimed and now has a
521 * "shared" lock, so release it.
523 page_unlock(pp);
525 } else {
527 * Neither B_FREE nor B_INVAL nor B_ERROR.
528 * Just release locks.
530 page_io_unlock(pp);
531 page_unlock(pp);
535 CPU_STATS_ENTER_K();
536 cpup = CPU; /* get cpup now that CPU cannot change */
537 CPU_STATS_ADDQ(cpup, vm, dfree, dfree);
538 CPU_STATS_ADDQ(cpup, vm, pgrec, pgrec);
539 CPU_STATS_ADDQ(cpup, vm, pgout, pgout);
540 CPU_STATS_ADDQ(cpup, vm, pgpgout, pgpgout);
541 CPU_STATS_ADDQ(cpup, vm, anonpgout, anonpgout);
542 CPU_STATS_ADDQ(cpup, vm, anonfree, anonfree);
543 CPU_STATS_ADDQ(cpup, vm, fspgout, fspgout);
544 CPU_STATS_ADDQ(cpup, vm, fsfree, fsfree);
545 CPU_STATS_ADDQ(cpup, vm, execpgout, execpgout);
546 CPU_STATS_ADDQ(cpup, vm, execfree, execfree);
547 CPU_STATS_EXIT_K();
551 * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED, B_DELWRI,
552 * B_TRUNC, B_FORCE}. B_DELWRI indicates that this page is part of a kluster
553 * operation and is only to be considered if it doesn't involve any
554 * waiting here. B_TRUNC indicates that the file is being truncated
555 * and so no i/o needs to be done. B_FORCE indicates that the page
556 * must be destroyed so don't try wrting it out.
558 * The caller must ensure that the page is locked. Returns 1, if
559 * the page should be written back (the "iolock" is held in this
560 * case), or 0 if the page has been dealt with or has been
561 * unlocked.
564 pvn_getdirty(page_t *pp, int flags)
566 ASSERT((flags & (B_INVAL | B_FREE)) ?
567 PAGE_EXCL(pp) : PAGE_SHARED(pp));
568 ASSERT(PP_ISFREE(pp) == 0);
571 * If trying to invalidate or free a logically `locked' page,
572 * forget it. Don't need page_struct_lock to check p_lckcnt and
573 * p_cowcnt as the page is exclusively locked.
575 if ((flags & (B_INVAL | B_FREE)) && !(flags & (B_TRUNC|B_FORCE)) &&
576 (pp->p_lckcnt != 0 || pp->p_cowcnt != 0)) {
577 page_unlock(pp);
578 return (0);
582 * Now acquire the i/o lock so we can add it to the dirty
583 * list (if necessary). We avoid blocking on the i/o lock
584 * in the following cases:
586 * If B_DELWRI is set, which implies that this request is
587 * due to a klustering operartion.
589 * If this is an async (B_ASYNC) operation and we are not doing
590 * invalidation (B_INVAL) [The current i/o or fsflush will ensure
591 * that the the page is written out].
593 if ((flags & B_DELWRI) || ((flags & (B_INVAL | B_ASYNC)) == B_ASYNC)) {
594 if (!page_io_trylock(pp)) {
595 page_unlock(pp);
596 return (0);
598 } else {
599 page_io_lock(pp);
603 * If we want to free or invalidate the page then
604 * we need to unload it so that anyone who wants
605 * it will have to take a minor fault to get it.
606 * Otherwise, we're just writing the page back so we
607 * need to sync up the hardwre and software mod bit to
608 * detect any future modifications. We clear the
609 * software mod bit when we put the page on the dirty
610 * list.
612 if (flags & (B_INVAL | B_FREE)) {
613 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
614 } else {
615 (void) hat_pagesync(pp, HAT_SYNC_ZERORM);
618 if (!hat_ismod(pp) || (flags & B_TRUNC)) {
620 * Don't need to add it to the
621 * list after all.
623 page_io_unlock(pp);
624 if (flags & B_INVAL) {
625 VN_DISPOSE(pp, B_INVAL, 0, kcred);
626 } else if (flags & B_FREE) {
627 VN_DISPOSE(pp, B_FREE, (flags & B_DONTNEED), kcred);
628 } else {
630 * This is advisory path for the callers
631 * of fop_putpage() who prefer freeing the
632 * page _only_ if no one else is accessing it.
633 * E.g. segmap_release()
635 * The above hat_ismod() check is useless because:
636 * (1) we may not be holding SE_EXCL lock;
637 * (2) we've not unloaded _all_ translations
639 * Let page_release() do the heavy-lifting.
641 (void) page_release(pp, 1);
643 return (0);
647 * Page is dirty, get it ready for the write back
648 * and add page to the dirty list.
650 hat_clrrefmod(pp);
653 * If we're going to free the page when we're done
654 * then we can let others try to use it starting now.
655 * We'll detect the fact that they used it when the
656 * i/o is done and avoid freeing the page.
658 if (flags & B_FREE)
659 page_downgrade(pp);
661 return (1);
665 /*ARGSUSED*/
666 static int
667 marker_constructor(void *buf, void *cdrarg, int kmflags)
669 page_t *mark = buf;
670 bzero(mark, sizeof (page_t));
671 PP_SETPVN_TAG(mark);
672 return (0);
675 void
676 pvn_init()
678 marker_cache = kmem_cache_create("marker_cache",
679 sizeof (page_t), 0, marker_constructor,
680 NULL, NULL, NULL, NULL, 0);
683 static inline void
684 move_marker(struct vnode *vnode, struct page *ref, struct page *mark)
686 list_remove(&vnode->v_pagecache_list, mark);
687 list_insert_before(&vnode->v_pagecache_list, ref, mark);
691 * Process a vnode's page list for all pages whose offset is >= off.
692 * Pages are to either be free'd, invalidated, or written back to disk.
694 * An "exclusive" lock is acquired for each page if B_INVAL or B_FREE
695 * is specified, otherwise they are "shared" locked.
697 * Flags are {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED, B_TRUNC}
699 * Special marker page_t's are inserted in the list in order
700 * to keep track of where we are in the list when locks are dropped.
702 * Note the list is circular and insertions can happen only at the
703 * head and tail of the list. The algorithm ensures visiting all pages
704 * on the list in the following way:
706 * Drop two marker pages at the end of the list.
708 * Move one marker page backwards towards the start of the list until
709 * it is at the list head, processing the pages passed along the way.
711 * Due to race conditions when the vnode page mutex is dropped,
712 * additional pages can be added to either end of the list, so we'll
713 * continue to move the marker and process pages until it is up against
714 * the end marker.
716 * There is one special exit condition. If we are processing a VMODSORT
717 * vnode and only writing back modified pages, we can stop as soon as
718 * we run into an unmodified page. This makes fsync(3) operations fast.
721 pvn_vplist_dirty(
722 vnode_t *vp,
723 uoff_t off,
724 int (*putapage)(vnode_t *, page_t *, uoff_t *,
725 size_t *, int, cred_t *),
726 int flags,
727 cred_t *cred)
729 page_t *pp;
730 page_t *mark; /* marker page that moves toward head */
731 page_t *end; /* marker page at end of list */
732 int err = 0;
733 int error;
734 se_t se;
736 ASSERT(vp->v_type != VCHR);
738 if (!vn_has_cached_data(vp))
739 return (0);
743 * Serialize vplist_dirty operations on this vnode by setting VVMLOCK.
745 * Don't block on VVMLOCK if B_ASYNC is set. This prevents sync()
746 * from getting blocked while flushing pages to a dead NFS server.
748 mutex_enter(&vp->v_lock);
749 if ((vp->v_flag & VVMLOCK) && (flags & B_ASYNC)) {
750 mutex_exit(&vp->v_lock);
751 return (EAGAIN);
754 while (vp->v_flag & VVMLOCK)
755 cv_wait(&vp->v_cv, &vp->v_lock);
757 if (!vn_has_cached_data(vp)) {
758 mutex_exit(&vp->v_lock);
759 return (0);
762 vp->v_flag |= VVMLOCK;
763 mutex_exit(&vp->v_lock);
767 * Set up the marker pages used to walk the list
769 end = kmem_cache_alloc(marker_cache, KM_SLEEP);
770 end->p_vnode = vp;
771 end->p_offset = (uoff_t)-2;
772 mark = kmem_cache_alloc(marker_cache, KM_SLEEP);
773 mark->p_vnode = vp;
774 mark->p_offset = (uoff_t)-1;
777 * Grab the lock protecting the vnode's page list
778 * note that this lock is dropped at times in the loop.
780 mutex_enter(page_vnode_mutex(vp));
781 if (!vn_has_cached_data(vp))
782 goto leave;
785 * insert the markers and loop through the list of pages
787 vnode_add_page_tail(vp, mark);
788 vnode_add_page_tail(vp, end);
790 for (;;) {
793 * If only doing an async write back, then we can
794 * stop as soon as we get to start of the list.
796 if (flags == B_ASYNC && vnode_get_head(vp) == mark)
797 break;
799 pp = vnode_get_prev_loop(vp, mark);
802 * otherwise stop when we've gone through all the pages
804 if (pp == end)
805 break;
807 ASSERT(pp->p_vnode == vp);
810 * If just flushing dirty pages to disk and this vnode
811 * is using a sorted list of pages, we can stop processing
812 * as soon as we find an unmodified page. Since all the
813 * modified pages are visited first.
815 if (IS_VMODSORT(vp) &&
816 !(flags & (B_INVAL | B_FREE | B_TRUNC))) {
817 if (!hat_ismod(pp) && !page_io_locked(pp)) {
818 #ifdef DEBUG
820 * For debug kernels examine what should be
821 * all the remaining clean pages, asserting
822 * that they are not modified.
824 page_t *chk = pp;
825 int attr;
827 move_marker(vp, pp, mark);
829 do {
830 chk = vnode_get_prev_loop(vp, chk);
831 ASSERT(chk != end);
832 if (chk == mark)
833 continue;
834 attr = hat_page_getattr(chk, P_MOD |
835 P_REF);
836 if ((attr & P_MOD) == 0)
837 continue;
838 panic("v_pagecache_list not all clean: "
839 "page_t*=%p vnode=%p off=%lx "
840 "attr=0x%x last clean page_t*=%p\n",
841 chk, chk->p_vnode,
842 (long)chk->p_offset, attr, pp);
843 } while (chk != vnode_get_head(vp));
844 #endif
845 break;
846 } else if (!(flags & B_ASYNC) && !hat_ismod(pp)) {
848 * Couldn't get io lock, wait until IO is done.
849 * Block only for sync IO since we don't want
850 * to block async IO.
852 mutex_exit(page_vnode_mutex(vp));
853 page_io_wait(pp);
854 mutex_enter(page_vnode_mutex(vp));
855 continue;
860 * Skip this page if the offset is out of the desired range.
861 * Just move the marker and continue.
863 if (pp->p_offset < off) {
864 move_marker(vp, pp, mark);
865 continue;
869 * If we are supposed to invalidate or free this
870 * page, then we need an exclusive lock.
872 se = (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED;
875 * We must acquire the page lock for all synchronous
876 * operations (invalidate, free and write).
878 if ((flags & B_INVAL) != 0 || (flags & B_ASYNC) == 0) {
880 * If the page_lock() drops the mutex
881 * we must retry the loop.
883 if (!page_lock(pp, se, vp, P_NO_RECLAIM))
884 continue;
887 * It's ok to move the marker page now.
889 move_marker(vp, pp, mark);
890 } else {
893 * update the marker page for all remaining cases
895 move_marker(vp, pp, mark);
898 * For write backs, If we can't lock the page, it's
899 * invalid or in the process of being destroyed. Skip
900 * it, assuming someone else is writing it.
902 if (!page_trylock(pp, se))
903 continue;
906 ASSERT(pp->p_vnode == vp);
909 * Successfully locked the page, now figure out what to
910 * do with it. Free pages are easily dealt with, invalidate
911 * if desired or just go on to the next page.
913 if (PP_ISFREE(pp)) {
914 if ((flags & B_INVAL) == 0) {
915 page_unlock(pp);
916 continue;
920 * Invalidate (destroy) the page.
922 mutex_exit(page_vnode_mutex(vp));
923 page_destroy_free(pp);
924 mutex_enter(page_vnode_mutex(vp));
925 continue;
929 * pvn_getdirty() figures out what do do with a dirty page.
930 * If the page is dirty, the putapage() routine will write it
931 * and will kluster any other adjacent dirty pages it can.
933 * pvn_getdirty() and `(*putapage)' unlock the page.
935 mutex_exit(page_vnode_mutex(vp));
936 if (pvn_getdirty(pp, flags)) {
937 error = (*putapage)(vp, pp, NULL, NULL, flags, cred);
938 if (!err)
939 err = error;
941 mutex_enter(page_vnode_mutex(vp));
943 vnode_remove_page(vp, mark);
944 vnode_remove_page(vp, end);
946 leave:
948 * Release v_pagecache_list mutex, also VVMLOCK and wakeup blocked
949 * threads
951 mutex_exit(page_vnode_mutex(vp));
952 kmem_cache_free(marker_cache, mark);
953 kmem_cache_free(marker_cache, end);
954 mutex_enter(&vp->v_lock);
955 vp->v_flag &= ~VVMLOCK;
956 cv_broadcast(&vp->v_cv);
957 mutex_exit(&vp->v_lock);
958 return (err);
962 * Walk the vp->v_pagecache_list, for every page call the callback function
963 * pointed by *page_check. If page_check returns non-zero, then mark the
964 * page as modified and if VMODSORT is set, move it to the end of
965 * v_pagecache_list. Moving makes sense only if we have at least two pages.
967 void
968 pvn_vplist_setdirty(vnode_t *vp, int (*page_check)(page_t *))
970 page_t *pp, *next, *end;
971 int shuffle;
973 mutex_enter(page_vnode_mutex(vp));
975 if (!vn_has_cached_data(vp)) {
976 mutex_exit(page_vnode_mutex(vp));
977 return;
980 end = vnode_get_tail(vp);
981 pp = vnode_get_head(vp);
982 shuffle = IS_VMODSORT(vp) && (pp != end);
984 for (;;) {
985 next = vnode_get_next_loop(vp, pp);
986 if (!PP_ISPVN_TAG(pp) && page_check(pp)) {
988 * hat_setmod_only() in contrast to hat_setmod() does
989 * not shuffle the pages and does not grab the mutex
990 * page_vnode_mutex. Exactly what we need.
992 hat_setmod_only(pp);
993 if (shuffle)
994 vnode_move_page_tail(vp, pp);
996 /* Stop if we have just processed the last page. */
997 if (pp == end)
998 break;
999 pp = next;
1002 mutex_exit(page_vnode_mutex(vp));
1006 * Zero out zbytes worth of data. Caller should be aware that this
1007 * routine may enter back into the fs layer (xxx_getpage). Locks
1008 * that the xxx_getpage routine may need should not be held while
1009 * calling this.
1011 void
1012 pvn_vpzero(struct vnode *vp, uoff_t vplen, size_t zbytes)
1014 caddr_t addr;
1016 ASSERT(vp->v_type != VCHR);
1018 if (!vn_has_cached_data(vp))
1019 return;
1022 * zbytes may be zero but there still may be some portion of
1023 * a page which needs clearing (since zbytes is a function
1024 * of filesystem block size, not pagesize.)
1026 if (zbytes == 0 && (PAGESIZE - (vplen & PAGEOFFSET)) == 0)
1027 return;
1030 * We get the last page and handle the partial
1031 * zeroing via kernel mappings. This will make the page
1032 * dirty so that we know that when this page is written
1033 * back, the zeroed information will go out with it. If
1034 * the page is not currently in memory, then the kzero
1035 * operation will cause it to be brought it. We use kzero
1036 * instead of bzero so that if the page cannot be read in
1037 * for any reason, the system will not panic. We need
1038 * to zero out a minimum of the fs given zbytes, but we
1039 * might also have to do more to get the entire last page.
1042 if ((zbytes + (vplen & MAXBOFFSET)) > MAXBSIZE)
1043 panic("pvn_vptrunc zbytes");
1044 addr = segmap_getmapflt(segkmap, vp, vplen,
1045 MAX(zbytes, PAGESIZE - (vplen & PAGEOFFSET)), 1, S_WRITE);
1046 (void) kzero(addr + (vplen & MAXBOFFSET),
1047 MAX(zbytes, PAGESIZE - (vplen & PAGEOFFSET)));
1048 (void) segmap_release(segkmap, addr, SM_WRITE | SM_ASYNC);
1052 * Handles common work of the fop_getpage routines by iterating page by page
1053 * calling the getpage helper for each.
1056 pvn_getpages(
1057 int (*getpage)(vnode_t *, uoff_t, size_t, uint_t *, page_t *[],
1058 size_t, struct seg *, caddr_t, enum seg_rw, cred_t *),
1059 struct vnode *vp,
1060 uoff_t off,
1061 size_t len,
1062 uint_t *protp,
1063 page_t *pl[],
1064 size_t plsz,
1065 struct seg *seg,
1066 caddr_t addr,
1067 enum seg_rw rw,
1068 struct cred *cred)
1070 page_t **ppp;
1071 uoff_t o, eoff;
1072 size_t sz, xlen;
1073 int err;
1075 /* ensure that we have enough space */
1076 ASSERT(pl == NULL || plsz >= len);
1079 * Loop one page at a time and let getapage function fill
1080 * in the next page in array. We only allow one page to be
1081 * returned at a time (except for the last page) so that we
1082 * don't have any problems with duplicates and other such
1083 * painful problems. This is a very simple minded algorithm,
1084 * but it does the job correctly. We hope that the cost of a
1085 * getapage call for a resident page that we might have been
1086 * able to get from an earlier call doesn't cost too much.
1088 ppp = pl;
1089 sz = (pl != NULL) ? PAGESIZE : 0;
1090 eoff = off + len;
1091 xlen = len;
1092 for (o = off; o < eoff; o += PAGESIZE, addr += PAGESIZE,
1093 xlen -= PAGESIZE) {
1094 if (o + PAGESIZE >= eoff && pl != NULL) {
1096 * Last time through - allow the all of
1097 * what's left of the pl[] array to be used.
1099 sz = plsz - (o - off);
1101 err = (*getpage)(vp, o, xlen, protp, ppp, sz, seg, addr,
1102 rw, cred);
1103 if (err) {
1105 * Release any pages we already got.
1107 if (o > off && pl != NULL) {
1108 for (ppp = pl; *ppp != NULL; *ppp++ = NULL)
1109 (void) page_release(*ppp, 1);
1111 break;
1113 if (pl != NULL)
1114 ppp++;
1116 return (err);
1120 * Initialize the page list array.
1122 /*ARGSUSED*/
1123 void
1124 pvn_plist_init(page_t *pp, page_t *pl[], size_t plsz,
1125 uoff_t off, size_t io_len, enum seg_rw rw)
1127 ssize_t sz;
1128 page_t *ppcur, **ppp;
1131 * Set up to load plsz worth
1132 * starting at the needed page.
1134 while (pp != NULL && pp->p_offset != off) {
1136 * Remove page from the i/o list,
1137 * release the i/o and the page lock.
1139 ppcur = pp;
1140 page_sub(&pp, ppcur);
1141 page_io_unlock(ppcur);
1142 (void) page_release(ppcur, 1);
1145 if (pp == NULL) {
1146 pl[0] = NULL;
1147 return;
1150 sz = plsz;
1153 * Initialize the page list array.
1155 ppp = pl;
1156 do {
1157 ppcur = pp;
1158 *ppp++ = ppcur;
1159 page_sub(&pp, ppcur);
1160 page_io_unlock(ppcur);
1161 if (rw != S_CREATE)
1162 page_downgrade(ppcur);
1163 sz -= PAGESIZE;
1164 } while (sz > 0 && pp != NULL);
1165 *ppp = NULL; /* terminate list */
1168 * Now free the remaining pages that weren't
1169 * loaded in the page list.
1171 while (pp != NULL) {
1172 ppcur = pp;
1173 page_sub(&pp, ppcur);
1174 page_io_unlock(ppcur);
1175 (void) page_release(ppcur, 1);