acpi: restructure genwakecode.sh
[dragonfly.git] / sys / vfs / nfs / nfs_bio.c
blob7779e194a5a350e8ef8a49962d2c2b345edfea5c
1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
37 * $FreeBSD: /repoman/r/ncvs/src/sys/nfsclient/nfs_bio.c,v 1.130 2004/04/14 23:23:55 peadar Exp $
38 * $DragonFly: src/sys/vfs/nfs/nfs_bio.c,v 1.45 2008/07/18 00:09:39 dillon Exp $
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/resourcevar.h>
45 #include <sys/signalvar.h>
46 #include <sys/proc.h>
47 #include <sys/buf.h>
48 #include <sys/vnode.h>
49 #include <sys/mount.h>
50 #include <sys/kernel.h>
51 #include <sys/mbuf.h>
52 #include <sys/msfbuf.h>
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vnode_pager.h>
61 #include <sys/buf2.h>
62 #include <sys/thread2.h>
63 #include <vm/vm_page2.h>
65 #include "rpcv2.h"
66 #include "nfsproto.h"
67 #include "nfs.h"
68 #include "nfsmount.h"
69 #include "nfsnode.h"
70 #include "xdr_subs.h"
71 #include "nfsm_subs.h"
74 static struct buf *nfs_getcacheblk(struct vnode *vp, off_t loffset,
75 int size, struct thread *td);
76 static int nfs_check_dirent(struct nfs_dirent *dp, int maxlen);
77 static void nfsiodone_sync(struct bio *bio);
78 static void nfs_readrpc_bio_done(nfsm_info_t info);
79 static void nfs_writerpc_bio_done(nfsm_info_t info);
80 static void nfs_commitrpc_bio_done(nfsm_info_t info);
83 * Vnode op for VM getpages.
85 * nfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
86 * int a_reqpage, vm_ooffset_t a_offset)
88 int
89 nfs_getpages(struct vop_getpages_args *ap)
91 struct thread *td = curthread; /* XXX */
92 int i, error, nextoff, size, toff, count, npages;
93 struct uio uio;
94 struct iovec iov;
95 char *kva;
96 struct vnode *vp;
97 struct nfsmount *nmp;
98 vm_page_t *pages;
99 vm_page_t m;
100 struct msf_buf *msf;
102 vp = ap->a_vp;
103 nmp = VFSTONFS(vp->v_mount);
104 pages = ap->a_m;
105 count = ap->a_count;
107 if (vp->v_object == NULL) {
108 kprintf("nfs_getpages: called with non-merged cache vnode??\n");
109 return VM_PAGER_ERROR;
112 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
113 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
114 (void)nfs_fsinfo(nmp, vp, td);
116 npages = btoc(count);
119 * NOTE that partially valid pages may occur in cases other
120 * then file EOF, such as when a file is partially written and
121 * ftruncate()-extended to a larger size. It is also possible
122 * for the valid bits to be set on garbage beyond the file EOF and
123 * clear in the area before EOF (e.g. m->valid == 0xfc), which can
124 * occur due to vtruncbuf() and the buffer cache's handling of
125 * pages which 'straddle' buffers or when b_bufsize is not a
126 * multiple of PAGE_SIZE.... the buffer cache cannot normally
127 * clear the extra bits. This kind of situation occurs when you
128 * make a small write() (m->valid == 0x03) and then mmap() and
129 * fault in the buffer(m->valid = 0xFF). When NFS flushes the
130 * buffer (vinvalbuf() m->valid = 0xFC) we are left with a mess.
132 * This is combined with the possibility that the pages are partially
133 * dirty or that there is a buffer backing the pages that is dirty
134 * (even if m->dirty is 0).
136 * To solve this problem several hacks have been made: (1) NFS
137 * guarentees that the IO block size is a multiple of PAGE_SIZE and
138 * (2) The buffer cache, when invalidating an NFS buffer, will
139 * disregard the buffer's fragmentory b_bufsize and invalidate
140 * the whole page rather then just the piece the buffer owns.
142 * This allows us to assume that a partially valid page found here
143 * is fully valid (vm_fault will zero'd out areas of the page not
144 * marked as valid).
146 m = pages[ap->a_reqpage];
147 if (m->valid != 0) {
148 for (i = 0; i < npages; ++i) {
149 if (i != ap->a_reqpage)
150 vnode_pager_freepage(pages[i]);
152 return(0);
156 * Use an MSF_BUF as a medium to retrieve data from the pages.
158 msf_map_pagelist(&msf, pages, npages, 0);
159 KKASSERT(msf);
160 kva = msf_buf_kva(msf);
162 iov.iov_base = kva;
163 iov.iov_len = count;
164 uio.uio_iov = &iov;
165 uio.uio_iovcnt = 1;
166 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
167 uio.uio_resid = count;
168 uio.uio_segflg = UIO_SYSSPACE;
169 uio.uio_rw = UIO_READ;
170 uio.uio_td = td;
172 error = nfs_readrpc_uio(vp, &uio);
173 msf_buf_free(msf);
175 if (error && ((int)uio.uio_resid == count)) {
176 kprintf("nfs_getpages: error %d\n", error);
177 for (i = 0; i < npages; ++i) {
178 if (i != ap->a_reqpage)
179 vnode_pager_freepage(pages[i]);
181 return VM_PAGER_ERROR;
185 * Calculate the number of bytes read and validate only that number
186 * of bytes. Note that due to pending writes, size may be 0. This
187 * does not mean that the remaining data is invalid!
190 size = count - (int)uio.uio_resid;
192 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
193 nextoff = toff + PAGE_SIZE;
194 m = pages[i];
196 m->flags &= ~PG_ZERO;
199 * NOTE: vm_page_undirty/clear_dirty etc do not clear the
200 * pmap modified bit.
202 if (nextoff <= size) {
204 * Read operation filled an entire page
206 m->valid = VM_PAGE_BITS_ALL;
207 vm_page_undirty(m);
208 } else if (size > toff) {
210 * Read operation filled a partial page.
212 m->valid = 0;
213 vm_page_set_valid(m, 0, size - toff);
214 vm_page_clear_dirty_end_nonincl(m, 0, size - toff);
215 /* handled by vm_fault now */
216 /* vm_page_zero_invalid(m, TRUE); */
217 } else {
219 * Read operation was short. If no error occured
220 * we may have hit a zero-fill section. We simply
221 * leave valid set to 0.
225 if (i != ap->a_reqpage) {
227 * Whether or not to leave the page activated is up in
228 * the air, but we should put the page on a page queue
229 * somewhere (it already is in the object). Result:
230 * It appears that emperical results show that
231 * deactivating pages is best.
235 * Just in case someone was asking for this page we
236 * now tell them that it is ok to use.
238 if (!error) {
239 if (m->flags & PG_WANTED)
240 vm_page_activate(m);
241 else
242 vm_page_deactivate(m);
243 vm_page_wakeup(m);
244 } else {
245 vnode_pager_freepage(m);
249 return 0;
253 * Vnode op for VM putpages.
255 * The pmap modified bit was cleared prior to the putpages and probably
256 * couldn't get set again until after our I/O completed, since the page
257 * should not be mapped. But don't count on it. The m->dirty bits must
258 * be completely cleared when we finish even if the count is truncated.
260 * nfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, int a_sync,
261 * int *a_rtvals, vm_ooffset_t a_offset)
264 nfs_putpages(struct vop_putpages_args *ap)
266 struct thread *td = curthread;
267 struct uio uio;
268 struct iovec iov;
269 char *kva;
270 int iomode, must_commit, i, error, npages, count;
271 off_t offset;
272 int *rtvals;
273 struct vnode *vp;
274 struct nfsmount *nmp;
275 struct nfsnode *np;
276 vm_page_t *pages;
277 struct msf_buf *msf;
279 vp = ap->a_vp;
280 np = VTONFS(vp);
281 nmp = VFSTONFS(vp->v_mount);
282 pages = ap->a_m;
283 count = ap->a_count;
284 rtvals = ap->a_rtvals;
285 npages = btoc(count);
286 offset = IDX_TO_OFF(pages[0]->pindex);
288 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
289 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
290 (void)nfs_fsinfo(nmp, vp, td);
292 for (i = 0; i < npages; i++) {
293 rtvals[i] = VM_PAGER_AGAIN;
297 * When putting pages, do not extend file past EOF.
300 if (offset + count > np->n_size) {
301 count = np->n_size - offset;
302 if (count < 0)
303 count = 0;
307 * Use an MSF_BUF as a medium to retrieve data from the pages.
309 msf_map_pagelist(&msf, pages, npages, 0);
310 KKASSERT(msf);
311 kva = msf_buf_kva(msf);
313 iov.iov_base = kva;
314 iov.iov_len = count;
315 uio.uio_iov = &iov;
316 uio.uio_iovcnt = 1;
317 uio.uio_offset = offset;
318 uio.uio_resid = (size_t)count;
319 uio.uio_segflg = UIO_SYSSPACE;
320 uio.uio_rw = UIO_WRITE;
321 uio.uio_td = td;
323 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
324 iomode = NFSV3WRITE_UNSTABLE;
325 else
326 iomode = NFSV3WRITE_FILESYNC;
328 error = nfs_writerpc_uio(vp, &uio, &iomode, &must_commit);
330 msf_buf_free(msf);
332 if (error == 0) {
333 int nwritten;
335 nwritten = round_page(count - (int)uio.uio_resid) / PAGE_SIZE;
336 for (i = 0; i < nwritten; i++) {
337 rtvals[i] = VM_PAGER_OK;
338 vm_page_undirty(pages[i]);
340 if (must_commit)
341 nfs_clearcommit(vp->v_mount);
343 return rtvals[0];
347 * Vnode op for read using bio
350 nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag)
352 struct nfsnode *np = VTONFS(vp);
353 int biosize, i;
354 struct buf *bp, *rabp;
355 struct vattr vattr;
356 struct thread *td;
357 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
358 off_t lbn, rabn;
359 off_t raoffset;
360 off_t loffset;
361 int seqcount;
362 int nra, error = 0;
363 int boff = 0;
364 size_t n;
366 #ifdef DIAGNOSTIC
367 if (uio->uio_rw != UIO_READ)
368 panic("nfs_read mode");
369 #endif
370 if (uio->uio_resid == 0)
371 return (0);
372 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
373 return (EINVAL);
374 td = uio->uio_td;
376 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
377 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
378 (void)nfs_fsinfo(nmp, vp, td);
379 if (vp->v_type != VDIR &&
380 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
381 return (EFBIG);
382 biosize = vp->v_mount->mnt_stat.f_iosize;
383 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
386 * For nfs, cache consistency can only be maintained approximately.
387 * Although RFC1094 does not specify the criteria, the following is
388 * believed to be compatible with the reference port.
390 * NFS: If local changes have been made and this is a
391 * directory, the directory must be invalidated and
392 * the attribute cache must be cleared.
394 * GETATTR is called to synchronize the file size.
396 * If remote changes are detected local data is flushed
397 * and the cache is invalidated.
399 * NOTE: In the normal case the attribute cache is not
400 * cleared which means GETATTR may use cached data and
401 * not immediately detect changes made on the server.
403 if ((np->n_flag & NLMODIFIED) && vp->v_type == VDIR) {
404 nfs_invaldir(vp);
405 error = nfs_vinvalbuf(vp, V_SAVE, 1);
406 if (error)
407 return (error);
408 np->n_attrstamp = 0;
410 error = VOP_GETATTR(vp, &vattr);
411 if (error)
412 return (error);
413 if (np->n_flag & NRMODIFIED) {
414 if (vp->v_type == VDIR)
415 nfs_invaldir(vp);
416 error = nfs_vinvalbuf(vp, V_SAVE, 1);
417 if (error)
418 return (error);
419 np->n_flag &= ~NRMODIFIED;
423 * Loop until uio exhausted or we hit EOF
425 do {
426 bp = NULL;
428 switch (vp->v_type) {
429 case VREG:
430 nfsstats.biocache_reads++;
431 lbn = uio->uio_offset / biosize;
432 boff = uio->uio_offset & (biosize - 1);
433 loffset = (off_t)lbn * biosize;
436 * Start the read ahead(s), as required.
438 if (nmp->nm_readahead > 0 && nfs_asyncok(nmp)) {
439 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
440 (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
441 rabn = lbn + 1 + nra;
442 raoffset = (off_t)rabn * biosize;
443 if (findblk(vp, raoffset, FINDBLK_TEST) == NULL) {
444 rabp = nfs_getcacheblk(vp, raoffset, biosize, td);
445 if (!rabp)
446 return (EINTR);
447 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
448 rabp->b_cmd = BUF_CMD_READ;
449 vfs_busy_pages(vp, rabp);
450 nfs_asyncio(vp, &rabp->b_bio2);
451 } else {
452 brelse(rabp);
459 * Obtain the buffer cache block. Figure out the buffer size
460 * when we are at EOF. If we are modifying the size of the
461 * buffer based on an EOF condition we need to hold
462 * nfs_rslock() through obtaining the buffer to prevent
463 * a potential writer-appender from messing with n_size.
464 * Otherwise we may accidently truncate the buffer and
465 * lose dirty data.
467 * Note that bcount is *not* DEV_BSIZE aligned.
469 if (loffset + boff >= np->n_size) {
470 n = 0;
471 break;
473 bp = nfs_getcacheblk(vp, loffset, biosize, td);
475 if (bp == NULL)
476 return (EINTR);
479 * If B_CACHE is not set, we must issue the read. If this
480 * fails, we return an error.
482 if ((bp->b_flags & B_CACHE) == 0) {
483 bp->b_cmd = BUF_CMD_READ;
484 bp->b_bio2.bio_done = nfsiodone_sync;
485 bp->b_bio2.bio_flags |= BIO_SYNC;
486 vfs_busy_pages(vp, bp);
487 error = nfs_doio(vp, &bp->b_bio2, td);
488 if (error) {
489 brelse(bp);
490 return (error);
495 * on is the offset into the current bp. Figure out how many
496 * bytes we can copy out of the bp. Note that bcount is
497 * NOT DEV_BSIZE aligned.
499 * Then figure out how many bytes we can copy into the uio.
501 n = biosize - boff;
502 if (n > uio->uio_resid)
503 n = uio->uio_resid;
504 if (loffset + boff + n > np->n_size)
505 n = np->n_size - loffset - boff;
506 break;
507 case VLNK:
508 biosize = min(NFS_MAXPATHLEN, np->n_size);
509 nfsstats.biocache_readlinks++;
510 bp = nfs_getcacheblk(vp, (off_t)0, biosize, td);
511 if (bp == NULL)
512 return (EINTR);
513 if ((bp->b_flags & B_CACHE) == 0) {
514 bp->b_cmd = BUF_CMD_READ;
515 bp->b_bio2.bio_done = nfsiodone_sync;
516 bp->b_bio2.bio_flags |= BIO_SYNC;
517 vfs_busy_pages(vp, bp);
518 error = nfs_doio(vp, &bp->b_bio2, td);
519 if (error) {
520 bp->b_flags |= B_ERROR | B_INVAL;
521 brelse(bp);
522 return (error);
525 n = szmin(uio->uio_resid, (size_t)bp->b_bcount - bp->b_resid);
526 boff = 0;
527 break;
528 case VDIR:
529 nfsstats.biocache_readdirs++;
530 if (np->n_direofoffset &&
531 uio->uio_offset >= np->n_direofoffset
533 return (0);
535 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
536 boff = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
537 loffset = uio->uio_offset - boff;
538 bp = nfs_getcacheblk(vp, loffset, NFS_DIRBLKSIZ, td);
539 if (bp == NULL)
540 return (EINTR);
542 if ((bp->b_flags & B_CACHE) == 0) {
543 bp->b_cmd = BUF_CMD_READ;
544 bp->b_bio2.bio_done = nfsiodone_sync;
545 bp->b_bio2.bio_flags |= BIO_SYNC;
546 vfs_busy_pages(vp, bp);
547 error = nfs_doio(vp, &bp->b_bio2, td);
548 if (error)
549 brelse(bp);
550 while (error == NFSERR_BAD_COOKIE) {
551 kprintf("got bad cookie vp %p bp %p\n", vp, bp);
552 nfs_invaldir(vp);
553 error = nfs_vinvalbuf(vp, 0, 1);
555 * Yuck! The directory has been modified on the
556 * server. The only way to get the block is by
557 * reading from the beginning to get all the
558 * offset cookies.
560 * Leave the last bp intact unless there is an error.
561 * Loop back up to the while if the error is another
562 * NFSERR_BAD_COOKIE (double yuch!).
564 for (i = 0; i <= lbn && !error; i++) {
565 if (np->n_direofoffset
566 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
567 return (0);
568 bp = nfs_getcacheblk(vp, (off_t)i * NFS_DIRBLKSIZ,
569 NFS_DIRBLKSIZ, td);
570 if (!bp)
571 return (EINTR);
572 if ((bp->b_flags & B_CACHE) == 0) {
573 bp->b_cmd = BUF_CMD_READ;
574 bp->b_bio2.bio_done = nfsiodone_sync;
575 bp->b_bio2.bio_flags |= BIO_SYNC;
576 vfs_busy_pages(vp, bp);
577 error = nfs_doio(vp, &bp->b_bio2, td);
579 * no error + B_INVAL == directory EOF,
580 * use the block.
582 if (error == 0 && (bp->b_flags & B_INVAL))
583 break;
586 * An error will throw away the block and the
587 * for loop will break out. If no error and this
588 * is not the block we want, we throw away the
589 * block and go for the next one via the for loop.
591 if (error || i < lbn)
592 brelse(bp);
596 * The above while is repeated if we hit another cookie
597 * error. If we hit an error and it wasn't a cookie error,
598 * we give up.
600 if (error)
601 return (error);
605 * If not eof and read aheads are enabled, start one.
606 * (You need the current block first, so that you have the
607 * directory offset cookie of the next block.)
609 if (nmp->nm_readahead > 0 && nfs_asyncok(nmp) &&
610 (bp->b_flags & B_INVAL) == 0 &&
611 (np->n_direofoffset == 0 ||
612 loffset + NFS_DIRBLKSIZ < np->n_direofoffset) &&
613 findblk(vp, loffset + NFS_DIRBLKSIZ, FINDBLK_TEST) == NULL
615 rabp = nfs_getcacheblk(vp, loffset + NFS_DIRBLKSIZ,
616 NFS_DIRBLKSIZ, td);
617 if (rabp) {
618 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
619 rabp->b_cmd = BUF_CMD_READ;
620 vfs_busy_pages(vp, rabp);
621 nfs_asyncio(vp, &rabp->b_bio2);
622 } else {
623 brelse(rabp);
628 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
629 * chopped for the EOF condition, we cannot tell how large
630 * NFS directories are going to be until we hit EOF. So
631 * an NFS directory buffer is *not* chopped to its EOF. Now,
632 * it just so happens that b_resid will effectively chop it
633 * to EOF. *BUT* this information is lost if the buffer goes
634 * away and is reconstituted into a B_CACHE state ( due to
635 * being VMIO ) later. So we keep track of the directory eof
636 * in np->n_direofoffset and chop it off as an extra step
637 * right here.
639 n = szmin(uio->uio_resid,
640 NFS_DIRBLKSIZ - bp->b_resid - (size_t)boff);
641 if (np->n_direofoffset &&
642 n > (size_t)(np->n_direofoffset - uio->uio_offset)) {
643 n = (size_t)(np->n_direofoffset - uio->uio_offset);
645 break;
646 default:
647 kprintf(" nfs_bioread: type %x unexpected\n",vp->v_type);
648 n = 0;
649 break;
652 switch (vp->v_type) {
653 case VREG:
654 if (n > 0)
655 error = uiomove(bp->b_data + boff, n, uio);
656 break;
657 case VLNK:
658 if (n > 0)
659 error = uiomove(bp->b_data + boff, n, uio);
660 n = 0;
661 break;
662 case VDIR:
663 if (n > 0) {
664 off_t old_off = uio->uio_offset;
665 caddr_t cpos, epos;
666 struct nfs_dirent *dp;
669 * We are casting cpos to nfs_dirent, it must be
670 * int-aligned.
672 if (boff & 3) {
673 error = EINVAL;
674 break;
677 cpos = bp->b_data + boff;
678 epos = bp->b_data + boff + n;
679 while (cpos < epos && error == 0 && uio->uio_resid > 0) {
680 dp = (struct nfs_dirent *)cpos;
681 error = nfs_check_dirent(dp, (int)(epos - cpos));
682 if (error)
683 break;
684 if (vop_write_dirent(&error, uio, dp->nfs_ino,
685 dp->nfs_type, dp->nfs_namlen, dp->nfs_name)) {
686 break;
688 cpos += dp->nfs_reclen;
690 n = 0;
691 if (error == 0) {
692 uio->uio_offset = old_off + cpos -
693 bp->b_data - boff;
696 break;
697 default:
698 kprintf(" nfs_bioread: type %x unexpected\n",vp->v_type);
700 if (bp)
701 brelse(bp);
702 } while (error == 0 && uio->uio_resid > 0 && n > 0);
703 return (error);
707 * Userland can supply any 'seek' offset when reading a NFS directory.
708 * Validate the structure so we don't panic the kernel. Note that
709 * the element name is nul terminated and the nul is not included
710 * in nfs_namlen.
712 static
714 nfs_check_dirent(struct nfs_dirent *dp, int maxlen)
716 int nfs_name_off = offsetof(struct nfs_dirent, nfs_name[0]);
718 if (nfs_name_off >= maxlen)
719 return (EINVAL);
720 if (dp->nfs_reclen < nfs_name_off || dp->nfs_reclen > maxlen)
721 return (EINVAL);
722 if (nfs_name_off + dp->nfs_namlen >= dp->nfs_reclen)
723 return (EINVAL);
724 if (dp->nfs_reclen & 3)
725 return (EINVAL);
726 return (0);
730 * Vnode op for write using bio
732 * nfs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
733 * struct ucred *a_cred)
736 nfs_write(struct vop_write_args *ap)
738 struct uio *uio = ap->a_uio;
739 struct thread *td = uio->uio_td;
740 struct vnode *vp = ap->a_vp;
741 struct nfsnode *np = VTONFS(vp);
742 int ioflag = ap->a_ioflag;
743 struct buf *bp;
744 struct vattr vattr;
745 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
746 off_t loffset;
747 int boff, bytes;
748 int error = 0;
749 int haverslock = 0;
750 int bcount;
751 int biosize;
753 #ifdef DIAGNOSTIC
754 if (uio->uio_rw != UIO_WRITE)
755 panic("nfs_write mode");
756 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
757 panic("nfs_write proc");
758 #endif
759 if (vp->v_type != VREG)
760 return (EIO);
761 if (np->n_flag & NWRITEERR) {
762 np->n_flag &= ~NWRITEERR;
763 return (np->n_error);
765 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
766 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
767 (void)nfs_fsinfo(nmp, vp, td);
770 * Synchronously flush pending buffers if we are in synchronous
771 * mode or if we are appending.
773 if (ioflag & (IO_APPEND | IO_SYNC)) {
774 if (np->n_flag & NLMODIFIED) {
775 np->n_attrstamp = 0;
776 error = nfs_flush(vp, MNT_WAIT, td, 0);
777 /* error = nfs_vinvalbuf(vp, V_SAVE, 1); */
778 if (error)
779 return (error);
784 * If IO_APPEND then load uio_offset. We restart here if we cannot
785 * get the append lock.
787 restart:
788 if (ioflag & IO_APPEND) {
789 np->n_attrstamp = 0;
790 error = VOP_GETATTR(vp, &vattr);
791 if (error)
792 return (error);
793 uio->uio_offset = np->n_size;
796 if (uio->uio_offset < 0)
797 return (EINVAL);
798 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
799 return (EFBIG);
800 if (uio->uio_resid == 0)
801 return (0);
804 * We need to obtain the rslock if we intend to modify np->n_size
805 * in order to guarentee the append point with multiple contending
806 * writers, to guarentee that no other appenders modify n_size
807 * while we are trying to obtain a truncated buffer (i.e. to avoid
808 * accidently truncating data written by another appender due to
809 * the race), and to ensure that the buffer is populated prior to
810 * our extending of the file. We hold rslock through the entire
811 * operation.
813 * Note that we do not synchronize the case where someone truncates
814 * the file while we are appending to it because attempting to lock
815 * this case may deadlock other parts of the system unexpectedly.
817 if ((ioflag & IO_APPEND) ||
818 uio->uio_offset + uio->uio_resid > np->n_size) {
819 switch(nfs_rslock(np)) {
820 case ENOLCK:
821 goto restart;
822 /* not reached */
823 case EINTR:
824 case ERESTART:
825 return(EINTR);
826 /* not reached */
827 default:
828 break;
830 haverslock = 1;
834 * Maybe this should be above the vnode op call, but so long as
835 * file servers have no limits, i don't think it matters
837 if (td->td_proc && uio->uio_offset + uio->uio_resid >
838 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
839 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
840 if (haverslock)
841 nfs_rsunlock(np);
842 return (EFBIG);
845 biosize = vp->v_mount->mnt_stat.f_iosize;
847 do {
848 nfsstats.biocache_writes++;
849 boff = uio->uio_offset & (biosize-1);
850 loffset = uio->uio_offset - boff;
851 bytes = (int)szmin((unsigned)(biosize - boff), uio->uio_resid);
852 again:
854 * Handle direct append and file extension cases, calculate
855 * unaligned buffer size. When extending B_CACHE will be
856 * set if possible. See UIO_NOCOPY note below.
858 if (uio->uio_offset + bytes > np->n_size) {
859 np->n_flag |= NLMODIFIED;
860 bp = nfs_meta_setsize(vp, td, loffset, boff, bytes);
861 } else {
862 bp = nfs_getcacheblk(vp, loffset, biosize, td);
864 if (bp == NULL) {
865 error = EINTR;
866 break;
870 * Actual bytes in buffer which we care about
872 if (loffset + biosize < np->n_size)
873 bcount = biosize;
874 else
875 bcount = (int)(np->n_size - loffset);
878 * Avoid a read by setting B_CACHE where the data we
879 * intend to write covers the entire buffer. Note
880 * that the buffer may have been set to B_CACHE by
881 * nfs_meta_setsize() above or otherwise inherited the
882 * flag, but if B_CACHE isn't set the buffer may be
883 * uninitialized and must be zero'd to accomodate
884 * future seek+write's.
886 * See the comments in kern/vfs_bio.c's getblk() for
887 * more information.
889 * When doing a UIO_NOCOPY write the buffer is not
890 * overwritten and we cannot just set B_CACHE unconditionally
891 * for full-block writes.
893 if (boff == 0 && bytes == biosize &&
894 uio->uio_segflg != UIO_NOCOPY) {
895 bp->b_flags |= B_CACHE;
896 bp->b_flags &= ~(B_ERROR | B_INVAL);
900 * b_resid may be set due to file EOF if we extended out.
901 * The NFS bio code will zero the difference anyway so
902 * just acknowledged the fact and set b_resid to 0.
904 if ((bp->b_flags & B_CACHE) == 0) {
905 bp->b_cmd = BUF_CMD_READ;
906 bp->b_bio2.bio_done = nfsiodone_sync;
907 bp->b_bio2.bio_flags |= BIO_SYNC;
908 vfs_busy_pages(vp, bp);
909 error = nfs_doio(vp, &bp->b_bio2, td);
910 if (error) {
911 brelse(bp);
912 break;
914 bp->b_resid = 0;
916 np->n_flag |= NLMODIFIED;
919 * If dirtyend exceeds file size, chop it down. This should
920 * not normally occur but there is an append race where it
921 * might occur XXX, so we log it.
923 * If the chopping creates a reverse-indexed or degenerate
924 * situation with dirtyoff/end, we 0 both of them.
926 if (bp->b_dirtyend > bcount) {
927 kprintf("NFS append race @%08llx:%d\n",
928 (long long)bp->b_bio2.bio_offset,
929 bp->b_dirtyend - bcount);
930 bp->b_dirtyend = bcount;
933 if (bp->b_dirtyoff >= bp->b_dirtyend)
934 bp->b_dirtyoff = bp->b_dirtyend = 0;
937 * If the new write will leave a contiguous dirty
938 * area, just update the b_dirtyoff and b_dirtyend,
939 * otherwise force a write rpc of the old dirty area.
941 * While it is possible to merge discontiguous writes due to
942 * our having a B_CACHE buffer ( and thus valid read data
943 * for the hole), we don't because it could lead to
944 * significant cache coherency problems with multiple clients,
945 * especially if locking is implemented later on.
947 * as an optimization we could theoretically maintain
948 * a linked list of discontinuous areas, but we would still
949 * have to commit them separately so there isn't much
950 * advantage to it except perhaps a bit of asynchronization.
952 if (bp->b_dirtyend > 0 &&
953 (boff > bp->b_dirtyend ||
954 (boff + bytes) < bp->b_dirtyoff)
956 if (bwrite(bp) == EINTR) {
957 error = EINTR;
958 break;
960 goto again;
963 error = uiomove(bp->b_data + boff, bytes, uio);
966 * Since this block is being modified, it must be written
967 * again and not just committed. Since write clustering does
968 * not work for the stage 1 data write, only the stage 2
969 * commit rpc, we have to clear B_CLUSTEROK as well.
971 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
973 if (error) {
974 brelse(bp);
975 break;
979 * Only update dirtyoff/dirtyend if not a degenerate
980 * condition.
982 * The underlying VM pages have been marked valid by
983 * virtue of acquiring the bp. Because the entire buffer
984 * is marked dirty we do not have to worry about cleaning
985 * out the related dirty bits (and wouldn't really know
986 * how to deal with byte ranges anyway)
988 if (bytes) {
989 if (bp->b_dirtyend > 0) {
990 bp->b_dirtyoff = imin(boff, bp->b_dirtyoff);
991 bp->b_dirtyend = imax(boff + bytes,
992 bp->b_dirtyend);
993 } else {
994 bp->b_dirtyoff = boff;
995 bp->b_dirtyend = boff + bytes;
1000 * If the lease is non-cachable or IO_SYNC do bwrite().
1002 * IO_INVAL appears to be unused. The idea appears to be
1003 * to turn off caching in this case. Very odd. XXX
1005 * If nfs_async is set bawrite() will use an unstable write
1006 * (build dirty bufs on the server), so we might as well
1007 * push it out with bawrite(). If nfs_async is not set we
1008 * use bdwrite() to cache dirty bufs on the client.
1010 if (ioflag & IO_SYNC) {
1011 if (ioflag & IO_INVAL)
1012 bp->b_flags |= B_NOCACHE;
1013 error = bwrite(bp);
1014 if (error)
1015 break;
1016 } else if (boff + bytes == biosize && nfs_async) {
1017 bawrite(bp);
1018 } else {
1019 bdwrite(bp);
1021 } while (uio->uio_resid > 0 && bytes > 0);
1023 if (haverslock)
1024 nfs_rsunlock(np);
1026 return (error);
1030 * Get an nfs cache block.
1032 * Allocate a new one if the block isn't currently in the cache
1033 * and return the block marked busy. If the calling process is
1034 * interrupted by a signal for an interruptible mount point, return
1035 * NULL.
1037 * The caller must carefully deal with the possible B_INVAL state of
1038 * the buffer. nfs_startio() clears B_INVAL (and nfs_asyncio() clears it
1039 * indirectly), so synchronous reads can be issued without worrying about
1040 * the B_INVAL state. We have to be a little more careful when dealing
1041 * with writes (see comments in nfs_write()) when extending a file past
1042 * its EOF.
1044 static struct buf *
1045 nfs_getcacheblk(struct vnode *vp, off_t loffset, int size, struct thread *td)
1047 struct buf *bp;
1048 struct mount *mp;
1049 struct nfsmount *nmp;
1051 mp = vp->v_mount;
1052 nmp = VFSTONFS(mp);
1054 if (nmp->nm_flag & NFSMNT_INT) {
1055 bp = getblk(vp, loffset, size, GETBLK_PCATCH, 0);
1056 while (bp == NULL) {
1057 if (nfs_sigintr(nmp, NULL, td))
1058 return (NULL);
1059 bp = getblk(vp, loffset, size, 0, 2 * hz);
1061 } else {
1062 bp = getblk(vp, loffset, size, 0, 0);
1066 * bio2, the 'device' layer. Since BIOs use 64 bit byte offsets
1067 * now, no translation is necessary.
1069 bp->b_bio2.bio_offset = loffset;
1070 return (bp);
1074 * Flush and invalidate all dirty buffers. If another process is already
1075 * doing the flush, just wait for completion.
1078 nfs_vinvalbuf(struct vnode *vp, int flags, int intrflg)
1080 struct nfsnode *np = VTONFS(vp);
1081 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1082 int error = 0, slpflag, slptimeo;
1083 thread_t td = curthread;
1085 if (vp->v_flag & VRECLAIMED)
1086 return (0);
1088 if ((nmp->nm_flag & NFSMNT_INT) == 0)
1089 intrflg = 0;
1090 if (intrflg) {
1091 slpflag = PCATCH;
1092 slptimeo = 2 * hz;
1093 } else {
1094 slpflag = 0;
1095 slptimeo = 0;
1098 * First wait for any other process doing a flush to complete.
1100 while (np->n_flag & NFLUSHINPROG) {
1101 np->n_flag |= NFLUSHWANT;
1102 error = tsleep((caddr_t)&np->n_flag, 0, "nfsvinval", slptimeo);
1103 if (error && intrflg && nfs_sigintr(nmp, NULL, td))
1104 return (EINTR);
1108 * Now, flush as required.
1110 np->n_flag |= NFLUSHINPROG;
1111 error = vinvalbuf(vp, flags, slpflag, 0);
1112 while (error) {
1113 if (intrflg && nfs_sigintr(nmp, NULL, td)) {
1114 np->n_flag &= ~NFLUSHINPROG;
1115 if (np->n_flag & NFLUSHWANT) {
1116 np->n_flag &= ~NFLUSHWANT;
1117 wakeup((caddr_t)&np->n_flag);
1119 return (EINTR);
1121 error = vinvalbuf(vp, flags, 0, slptimeo);
1123 np->n_flag &= ~(NLMODIFIED | NFLUSHINPROG);
1124 if (np->n_flag & NFLUSHWANT) {
1125 np->n_flag &= ~NFLUSHWANT;
1126 wakeup((caddr_t)&np->n_flag);
1128 return (0);
1132 * Return true (non-zero) if the txthread and rxthread are operational
1133 * and we do not already have too many not-yet-started BIO's built up.
1136 nfs_asyncok(struct nfsmount *nmp)
1138 return (nmp->nm_bioqlen < nfs_maxasyncbio &&
1139 nmp->nm_bioqlen < nmp->nm_maxasync_scaled / NFS_ASYSCALE &&
1140 nmp->nm_rxstate <= NFSSVC_PENDING &&
1141 nmp->nm_txstate <= NFSSVC_PENDING);
1145 * The read-ahead code calls this to queue a bio to the txthread.
1147 * We don't touch the bio otherwise... that is, we do not even
1148 * construct or send the initial rpc. The txthread will do it
1149 * for us.
1151 * NOTE! nm_bioqlen is not decremented until the request completes,
1152 * so it does not reflect the number of bio's on bioq.
1154 void
1155 nfs_asyncio(struct vnode *vp, struct bio *bio)
1157 struct buf *bp = bio->bio_buf;
1158 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1160 KKASSERT(vp->v_tag == VT_NFS);
1161 BUF_KERNPROC(bp);
1162 bio->bio_driver_info = vp;
1163 crit_enter();
1164 TAILQ_INSERT_TAIL(&nmp->nm_bioq, bio, bio_act);
1165 atomic_add_int(&nmp->nm_bioqlen, 1);
1166 crit_exit();
1167 nfssvc_iod_writer_wakeup(nmp);
1171 * nfs_dio() - Execute a BIO operation synchronously. The BIO will be
1172 * completed and its error returned. The caller is responsible
1173 * for brelse()ing it. ONLY USE FOR BIO_SYNC IOs! Otherwise
1174 * our error probe will be against an invalid pointer.
1176 * nfs_startio()- Execute a BIO operation assynchronously.
1178 * NOTE: nfs_asyncio() is used to initiate an asynchronous BIO operation,
1179 * which basically just queues it to the txthread. nfs_startio()
1180 * actually initiates the I/O AFTER it has gotten to the txthread.
1182 * NOTE: td might be NULL.
1184 * NOTE: Caller has already busied the I/O.
1186 void
1187 nfs_startio(struct vnode *vp, struct bio *bio, struct thread *td)
1189 struct buf *bp = bio->bio_buf;
1190 struct nfsnode *np;
1191 struct nfsmount *nmp;
1193 KKASSERT(vp->v_tag == VT_NFS);
1194 np = VTONFS(vp);
1195 nmp = VFSTONFS(vp->v_mount);
1198 * clear B_ERROR and B_INVAL state prior to initiating the I/O. We
1199 * do this here so we do not have to do it in all the code that
1200 * calls us.
1202 bp->b_flags &= ~(B_ERROR | B_INVAL);
1204 KASSERT(bp->b_cmd != BUF_CMD_DONE,
1205 ("nfs_doio: bp %p already marked done!", bp));
1207 if (bp->b_cmd == BUF_CMD_READ) {
1208 switch (vp->v_type) {
1209 case VREG:
1210 nfsstats.read_bios++;
1211 nfs_readrpc_bio(vp, bio);
1212 break;
1213 case VLNK:
1214 #if 0
1215 bio->bio_offset = 0;
1216 nfsstats.readlink_bios++;
1217 nfs_readlinkrpc_bio(vp, bio);
1218 #else
1219 nfs_doio(vp, bio, td);
1220 #endif
1221 break;
1222 case VDIR:
1224 * NOTE: If nfs_readdirplusrpc_bio() is requested but
1225 * not supported, it will chain to
1226 * nfs_readdirrpc_bio().
1228 #if 0
1229 nfsstats.readdir_bios++;
1230 uiop->uio_offset = bio->bio_offset;
1231 if (nmp->nm_flag & NFSMNT_RDIRPLUS)
1232 nfs_readdirplusrpc_bio(vp, bio);
1233 else
1234 nfs_readdirrpc_bio(vp, bio);
1235 #else
1236 nfs_doio(vp, bio, td);
1237 #endif
1238 break;
1239 default:
1240 kprintf("nfs_doio: type %x unexpected\n",vp->v_type);
1241 bp->b_flags |= B_ERROR;
1242 bp->b_error = EINVAL;
1243 biodone(bio);
1244 break;
1246 } else {
1248 * If we only need to commit, try to commit. If this fails
1249 * it will chain through to the write. Basically all the logic
1250 * in nfs_doio() is replicated.
1252 KKASSERT(bp->b_cmd == BUF_CMD_WRITE);
1253 if (bp->b_flags & B_NEEDCOMMIT)
1254 nfs_commitrpc_bio(vp, bio);
1255 else
1256 nfs_writerpc_bio(vp, bio);
1261 nfs_doio(struct vnode *vp, struct bio *bio, struct thread *td)
1263 struct buf *bp = bio->bio_buf;
1264 struct uio *uiop;
1265 struct nfsnode *np;
1266 struct nfsmount *nmp;
1267 int error = 0;
1268 int iomode, must_commit;
1269 size_t n;
1270 struct uio uio;
1271 struct iovec io;
1273 KKASSERT(vp->v_tag == VT_NFS);
1274 np = VTONFS(vp);
1275 nmp = VFSTONFS(vp->v_mount);
1276 uiop = &uio;
1277 uiop->uio_iov = &io;
1278 uiop->uio_iovcnt = 1;
1279 uiop->uio_segflg = UIO_SYSSPACE;
1280 uiop->uio_td = td;
1283 * clear B_ERROR and B_INVAL state prior to initiating the I/O. We
1284 * do this here so we do not have to do it in all the code that
1285 * calls us.
1287 bp->b_flags &= ~(B_ERROR | B_INVAL);
1289 KASSERT(bp->b_cmd != BUF_CMD_DONE,
1290 ("nfs_doio: bp %p already marked done!", bp));
1292 if (bp->b_cmd == BUF_CMD_READ) {
1293 io.iov_len = uiop->uio_resid = (size_t)bp->b_bcount;
1294 io.iov_base = bp->b_data;
1295 uiop->uio_rw = UIO_READ;
1297 switch (vp->v_type) {
1298 case VREG:
1300 * When reading from a regular file zero-fill any residual.
1301 * Note that this residual has nothing to do with NFS short
1302 * reads, which nfs_readrpc_uio() will handle for us.
1304 * We have to do this because when we are write extending
1305 * a file the server may not have the same notion of
1306 * filesize as we do. Our BIOs should already be sized
1307 * (b_bcount) to account for the file EOF.
1309 nfsstats.read_bios++;
1310 uiop->uio_offset = bio->bio_offset;
1311 error = nfs_readrpc_uio(vp, uiop);
1312 if (error == 0 && uiop->uio_resid) {
1313 n = (size_t)bp->b_bcount - uiop->uio_resid;
1314 bzero(bp->b_data + n, bp->b_bcount - n);
1315 uiop->uio_resid = 0;
1317 if (td && td->td_proc && (vp->v_flag & VTEXT) &&
1318 np->n_mtime != np->n_vattr.va_mtime.tv_sec) {
1319 uprintf("Process killed due to text file modification\n");
1320 ksignal(td->td_proc, SIGKILL);
1322 break;
1323 case VLNK:
1324 uiop->uio_offset = 0;
1325 nfsstats.readlink_bios++;
1326 error = nfs_readlinkrpc_uio(vp, uiop);
1327 break;
1328 case VDIR:
1329 nfsstats.readdir_bios++;
1330 uiop->uio_offset = bio->bio_offset;
1331 if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
1332 error = nfs_readdirplusrpc_uio(vp, uiop);
1333 if (error == NFSERR_NOTSUPP)
1334 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1336 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1337 error = nfs_readdirrpc_uio(vp, uiop);
1339 * end-of-directory sets B_INVAL but does not generate an
1340 * error.
1342 if (error == 0 && uiop->uio_resid == bp->b_bcount)
1343 bp->b_flags |= B_INVAL;
1344 break;
1345 default:
1346 kprintf("nfs_doio: type %x unexpected\n",vp->v_type);
1347 break;
1349 if (error) {
1350 bp->b_flags |= B_ERROR;
1351 bp->b_error = error;
1353 bp->b_resid = uiop->uio_resid;
1354 } else {
1356 * If we only need to commit, try to commit.
1358 * NOTE: The I/O has already been staged for the write and
1359 * its pages busied, so b_dirtyoff/end is valid.
1361 KKASSERT(bp->b_cmd == BUF_CMD_WRITE);
1362 if (bp->b_flags & B_NEEDCOMMIT) {
1363 int retv;
1364 off_t off;
1366 off = bio->bio_offset + bp->b_dirtyoff;
1367 retv = nfs_commitrpc_uio(vp, off,
1368 bp->b_dirtyend - bp->b_dirtyoff,
1369 td);
1370 if (retv == 0) {
1371 bp->b_dirtyoff = bp->b_dirtyend = 0;
1372 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1373 bp->b_resid = 0;
1374 biodone(bio);
1375 return(0);
1377 if (retv == NFSERR_STALEWRITEVERF) {
1378 nfs_clearcommit(vp->v_mount);
1383 * Setup for actual write
1385 if (bio->bio_offset + bp->b_dirtyend > np->n_size)
1386 bp->b_dirtyend = np->n_size - bio->bio_offset;
1388 if (bp->b_dirtyend > bp->b_dirtyoff) {
1389 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1390 - bp->b_dirtyoff;
1391 uiop->uio_offset = bio->bio_offset + bp->b_dirtyoff;
1392 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1393 uiop->uio_rw = UIO_WRITE;
1394 nfsstats.write_bios++;
1396 if ((bp->b_flags & (B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == 0)
1397 iomode = NFSV3WRITE_UNSTABLE;
1398 else
1399 iomode = NFSV3WRITE_FILESYNC;
1401 must_commit = 0;
1402 error = nfs_writerpc_uio(vp, uiop, &iomode, &must_commit);
1405 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1406 * to cluster the buffers needing commit. This will allow
1407 * the system to submit a single commit rpc for the whole
1408 * cluster. We can do this even if the buffer is not 100%
1409 * dirty (relative to the NFS blocksize), so we optimize the
1410 * append-to-file-case.
1412 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1413 * cleared because write clustering only works for commit
1414 * rpc's, not for the data portion of the write).
1417 if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1418 bp->b_flags |= B_NEEDCOMMIT;
1419 if (bp->b_dirtyoff == 0
1420 && bp->b_dirtyend == bp->b_bcount)
1421 bp->b_flags |= B_CLUSTEROK;
1422 } else {
1423 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1427 * For an interrupted write, the buffer is still valid
1428 * and the write hasn't been pushed to the server yet,
1429 * so we can't set B_ERROR and report the interruption
1430 * by setting B_EINTR. For the async case, B_EINTR
1431 * is not relevant, so the rpc attempt is essentially
1432 * a noop. For the case of a V3 write rpc not being
1433 * committed to stable storage, the block is still
1434 * dirty and requires either a commit rpc or another
1435 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1436 * the block is reused. This is indicated by setting
1437 * the B_DELWRI and B_NEEDCOMMIT flags.
1439 * If the buffer is marked B_PAGING, it does not reside on
1440 * the vp's paging queues so we cannot call bdirty(). The
1441 * bp in this case is not an NFS cache block so we should
1442 * be safe. XXX
1444 if (error == EINTR
1445 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1446 crit_enter();
1447 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1448 if ((bp->b_flags & B_PAGING) == 0)
1449 bdirty(bp);
1450 if (error)
1451 bp->b_flags |= B_EINTR;
1452 crit_exit();
1453 } else {
1454 if (error) {
1455 bp->b_flags |= B_ERROR;
1456 bp->b_error = np->n_error = error;
1457 np->n_flag |= NWRITEERR;
1459 bp->b_dirtyoff = bp->b_dirtyend = 0;
1461 if (must_commit)
1462 nfs_clearcommit(vp->v_mount);
1463 bp->b_resid = uiop->uio_resid;
1464 } else {
1465 bp->b_resid = 0;
1470 * I/O was run synchronously, biodone() it and calculate the
1471 * error to return.
1473 biodone(bio);
1474 KKASSERT(bp->b_cmd == BUF_CMD_DONE);
1475 if (bp->b_flags & B_EINTR)
1476 return (EINTR);
1477 if (bp->b_flags & B_ERROR)
1478 return (bp->b_error ? bp->b_error : EIO);
1479 return (0);
1483 * Used to aid in handling ftruncate() and non-trivial write-extend
1484 * operations on the NFS client side. Note that trivial write-extend
1485 * operations (appending with no write hole) are handled by nfs_write()
1486 * directly to avoid silly flushes.
1488 * Truncation creates a number of special problems for NFS. We have to
1489 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1490 * we have to properly handle VM pages or (potentially dirty) buffers
1491 * that straddle the truncation point.
1493 * File extension no longer has an issue now that the buffer size is
1494 * fixed. When extending the intended overwrite area is specified
1495 * by (boff, bytes). This function uses the parameters to determine
1496 * what areas must be zerod. If there are no gaps we set B_CACHE.
1498 struct buf *
1499 nfs_meta_setsize(struct vnode *vp, struct thread *td, off_t nbase,
1500 int boff, int bytes)
1503 struct nfsnode *np = VTONFS(vp);
1504 off_t osize = np->n_size;
1505 off_t nsize;
1506 int biosize = vp->v_mount->mnt_stat.f_iosize;
1507 int error = 0;
1508 struct buf *bp;
1510 nsize = nbase + boff + bytes;
1511 np->n_size = nsize;
1513 if (nsize < osize) {
1515 * vtruncbuf() doesn't get the buffer overlapping the
1516 * truncation point, but it will invalidate pages in
1517 * that buffer and zero the appropriate byte range in
1518 * the page straddling EOF.
1520 error = vtruncbuf(vp, nsize, biosize);
1523 * NFS doesn't do a good job tracking changes in the EOF
1524 * so it may not revisit the buffer if the file is extended.
1526 * After truncating just clear B_CACHE on the buffer
1527 * straddling EOF. If the buffer is dirty then clean
1528 * out the portion beyond the file EOF.
1530 if (error) {
1531 bp = NULL;
1532 } else {
1533 bp = nfs_getcacheblk(vp, nbase, biosize, td);
1534 if (bp->b_flags & B_DELWRI) {
1535 if (bp->b_dirtyoff > bp->b_bcount)
1536 bp->b_dirtyoff = bp->b_bcount;
1537 if (bp->b_dirtyend > bp->b_bcount)
1538 bp->b_dirtyend = bp->b_bcount;
1539 boff = (int)nsize & (biosize - 1);
1540 bzero(bp->b_data + boff, biosize - boff);
1541 } else if (nsize != nbase) {
1542 boff = (int)nsize & (biosize - 1);
1543 bzero(bp->b_data + boff, biosize - boff);
1546 } else {
1548 * The newly expanded portions of the buffer should already
1549 * be zero'd out if B_CACHE is set. If B_CACHE is not
1550 * set and the buffer is beyond osize we can safely zero it
1551 * and set B_CACHE to avoid issuing unnecessary degenerate
1552 * read rpcs.
1554 * Don't do this if the caller is going to overwrite the
1555 * entire buffer anyway (and also don't set B_CACHE!).
1556 * This allows the caller to optimize the operation.
1558 KKASSERT(nsize >= 0);
1559 vnode_pager_setsize(vp, (vm_ooffset_t)nsize);
1561 bp = nfs_getcacheblk(vp, nbase, biosize, td);
1562 if ((bp->b_flags & B_CACHE) == 0 && nbase >= osize &&
1563 !(boff == 0 && bytes == biosize)
1565 bzero(bp->b_data, biosize);
1566 bp->b_flags |= B_CACHE;
1567 bp->b_flags &= ~(B_ERROR | B_INVAL);
1570 return(bp);
1574 * Synchronous completion for nfs_doio. Call bpdone() with elseit=FALSE.
1575 * Caller is responsible for brelse()'ing the bp.
1577 static void
1578 nfsiodone_sync(struct bio *bio)
1580 bio->bio_flags = 0;
1581 bpdone(bio->bio_buf, 0);
1585 * nfs read rpc - BIO version
1587 void
1588 nfs_readrpc_bio(struct vnode *vp, struct bio *bio)
1590 struct buf *bp = bio->bio_buf;
1591 u_int32_t *tl;
1592 struct nfsmount *nmp;
1593 int error = 0, len, tsiz;
1594 struct nfsm_info *info;
1596 info = kmalloc(sizeof(*info), M_NFSREQ, M_WAITOK);
1597 info->mrep = NULL;
1598 info->v3 = NFS_ISV3(vp);
1600 nmp = VFSTONFS(vp->v_mount);
1601 tsiz = bp->b_bcount;
1602 KKASSERT(tsiz <= nmp->nm_rsize);
1603 if (bio->bio_offset + tsiz > nmp->nm_maxfilesize) {
1604 error = EFBIG;
1605 goto nfsmout;
1607 nfsstats.rpccnt[NFSPROC_READ]++;
1608 len = tsiz;
1609 nfsm_reqhead(info, vp, NFSPROC_READ,
1610 NFSX_FH(info->v3) + NFSX_UNSIGNED * 3);
1611 ERROROUT(nfsm_fhtom(info, vp));
1612 tl = nfsm_build(info, NFSX_UNSIGNED * 3);
1613 if (info->v3) {
1614 txdr_hyper(bio->bio_offset, tl);
1615 *(tl + 2) = txdr_unsigned(len);
1616 } else {
1617 *tl++ = txdr_unsigned(bio->bio_offset);
1618 *tl++ = txdr_unsigned(len);
1619 *tl = 0;
1621 info->bio = bio;
1622 info->done = nfs_readrpc_bio_done;
1623 nfsm_request_bio(info, vp, NFSPROC_READ, NULL,
1624 nfs_vpcred(vp, ND_READ));
1625 return;
1626 nfsmout:
1627 kfree(info, M_NFSREQ);
1628 bp->b_error = error;
1629 bp->b_flags |= B_ERROR;
1630 biodone(bio);
1633 static void
1634 nfs_readrpc_bio_done(nfsm_info_t info)
1636 struct nfsmount *nmp = VFSTONFS(info->vp->v_mount);
1637 struct bio *bio = info->bio;
1638 struct buf *bp = bio->bio_buf;
1639 u_int32_t *tl;
1640 int attrflag;
1641 int retlen;
1642 int eof;
1643 int error = 0;
1645 KKASSERT(info->state == NFSM_STATE_DONE);
1647 if (info->v3) {
1648 ERROROUT(nfsm_postop_attr(info, info->vp, &attrflag,
1649 NFS_LATTR_NOSHRINK));
1650 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED));
1651 eof = fxdr_unsigned(int, *(tl + 1));
1652 } else {
1653 ERROROUT(nfsm_loadattr(info, info->vp, NULL));
1654 eof = 0;
1656 NEGATIVEOUT(retlen = nfsm_strsiz(info, nmp->nm_rsize));
1657 ERROROUT(nfsm_mtobio(info, bio, retlen));
1658 m_freem(info->mrep);
1659 info->mrep = NULL;
1662 * No error occured, if retlen is less then bcount and no EOF
1663 * and NFSv3 a zero-fill short read occured.
1665 * For NFSv2 a short-read indicates EOF.
1667 if (retlen < bp->b_bcount && info->v3 && eof == 0) {
1668 bzero(bp->b_data + retlen, bp->b_bcount - retlen);
1669 retlen = bp->b_bcount;
1673 * If we hit an EOF we still zero-fill, but return the expected
1674 * b_resid anyway. This should normally not occur since async
1675 * BIOs are not used for read-before-write case. Races against
1676 * the server can cause it though and we don't want to leave
1677 * garbage in the buffer.
1679 if (retlen < bp->b_bcount) {
1680 bzero(bp->b_data + retlen, bp->b_bcount - retlen);
1682 bp->b_resid = 0;
1683 /* bp->b_resid = bp->b_bcount - retlen; */
1684 nfsmout:
1685 kfree(info, M_NFSREQ);
1686 if (error) {
1687 bp->b_error = error;
1688 bp->b_flags |= B_ERROR;
1690 biodone(bio);
1694 * nfs write call - BIO version
1696 * NOTE: Caller has already busied the I/O.
1698 void
1699 nfs_writerpc_bio(struct vnode *vp, struct bio *bio)
1701 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1702 struct nfsnode *np = VTONFS(vp);
1703 struct buf *bp = bio->bio_buf;
1704 u_int32_t *tl;
1705 int len;
1706 int iomode;
1707 int error = 0;
1708 struct nfsm_info *info;
1709 off_t offset;
1712 * Setup for actual write. Just clean up the bio if there
1713 * is nothing to do. b_dirtyoff/end have already been staged
1714 * by the bp's pages getting busied.
1716 if (bio->bio_offset + bp->b_dirtyend > np->n_size)
1717 bp->b_dirtyend = np->n_size - bio->bio_offset;
1719 if (bp->b_dirtyend <= bp->b_dirtyoff) {
1720 bp->b_resid = 0;
1721 biodone(bio);
1722 return;
1724 len = bp->b_dirtyend - bp->b_dirtyoff;
1725 offset = bio->bio_offset + bp->b_dirtyoff;
1726 if (offset + len > nmp->nm_maxfilesize) {
1727 bp->b_flags |= B_ERROR;
1728 bp->b_error = EFBIG;
1729 biodone(bio);
1730 return;
1732 bp->b_resid = len;
1733 nfsstats.write_bios++;
1735 info = kmalloc(sizeof(*info), M_NFSREQ, M_WAITOK);
1736 info->mrep = NULL;
1737 info->v3 = NFS_ISV3(vp);
1738 info->info_writerpc.must_commit = 0;
1739 if ((bp->b_flags & (B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == 0)
1740 iomode = NFSV3WRITE_UNSTABLE;
1741 else
1742 iomode = NFSV3WRITE_FILESYNC;
1744 KKASSERT(len <= nmp->nm_wsize);
1746 nfsstats.rpccnt[NFSPROC_WRITE]++;
1747 nfsm_reqhead(info, vp, NFSPROC_WRITE,
1748 NFSX_FH(info->v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1749 ERROROUT(nfsm_fhtom(info, vp));
1750 if (info->v3) {
1751 tl = nfsm_build(info, 5 * NFSX_UNSIGNED);
1752 txdr_hyper(offset, tl);
1753 tl += 2;
1754 *tl++ = txdr_unsigned(len);
1755 *tl++ = txdr_unsigned(iomode);
1756 *tl = txdr_unsigned(len);
1757 } else {
1758 u_int32_t x;
1760 tl = nfsm_build(info, 4 * NFSX_UNSIGNED);
1761 /* Set both "begin" and "current" to non-garbage. */
1762 x = txdr_unsigned((u_int32_t)offset);
1763 *tl++ = x; /* "begin offset" */
1764 *tl++ = x; /* "current offset" */
1765 x = txdr_unsigned(len);
1766 *tl++ = x; /* total to this offset */
1767 *tl = x; /* size of this write */
1769 ERROROUT(nfsm_biotom(info, bio, bp->b_dirtyoff, len));
1770 info->bio = bio;
1771 info->done = nfs_writerpc_bio_done;
1772 nfsm_request_bio(info, vp, NFSPROC_WRITE, NULL,
1773 nfs_vpcred(vp, ND_WRITE));
1774 return;
1775 nfsmout:
1776 kfree(info, M_NFSREQ);
1777 bp->b_error = error;
1778 bp->b_flags |= B_ERROR;
1779 biodone(bio);
1782 static void
1783 nfs_writerpc_bio_done(nfsm_info_t info)
1785 struct nfsmount *nmp = VFSTONFS(info->vp->v_mount);
1786 struct nfsnode *np = VTONFS(info->vp);
1787 struct bio *bio = info->bio;
1788 struct buf *bp = bio->bio_buf;
1789 int wccflag = NFSV3_WCCRATTR;
1790 int iomode = NFSV3WRITE_FILESYNC;
1791 int commit;
1792 int rlen;
1793 int error;
1794 int len = bp->b_resid; /* b_resid was set to shortened length */
1795 u_int32_t *tl;
1797 if (info->v3) {
1799 * The write RPC returns a before and after mtime. The
1800 * nfsm_wcc_data() macro checks the before n_mtime
1801 * against the before time and stores the after time
1802 * in the nfsnode's cached vattr and n_mtime field.
1803 * The NRMODIFIED bit will be set if the before
1804 * time did not match the original mtime.
1806 wccflag = NFSV3_WCCCHK;
1807 ERROROUT(nfsm_wcc_data(info, info->vp, &wccflag));
1808 if (error == 0) {
1809 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED + NFSX_V3WRITEVERF));
1810 rlen = fxdr_unsigned(int, *tl++);
1811 if (rlen == 0) {
1812 error = NFSERR_IO;
1813 m_freem(info->mrep);
1814 info->mrep = NULL;
1815 goto nfsmout;
1816 } else if (rlen < len) {
1817 #if 0
1819 * XXX what do we do here?
1821 backup = len - rlen;
1822 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base - backup;
1823 uiop->uio_iov->iov_len += backup;
1824 uiop->uio_offset -= backup;
1825 uiop->uio_resid += backup;
1826 len = rlen;
1827 #endif
1829 commit = fxdr_unsigned(int, *tl++);
1832 * Return the lowest committment level
1833 * obtained by any of the RPCs.
1835 if (iomode == NFSV3WRITE_FILESYNC)
1836 iomode = commit;
1837 else if (iomode == NFSV3WRITE_DATASYNC &&
1838 commit == NFSV3WRITE_UNSTABLE)
1839 iomode = commit;
1840 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1841 bcopy(tl, (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF);
1842 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1843 } else if (bcmp(tl, nmp->nm_verf, NFSX_V3WRITEVERF)) {
1844 info->info_writerpc.must_commit = 1;
1845 bcopy(tl, (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF);
1848 } else {
1849 ERROROUT(nfsm_loadattr(info, info->vp, NULL));
1851 m_freem(info->mrep);
1852 info->mrep = NULL;
1853 len = 0;
1854 nfsmout:
1855 if (info->vp->v_mount->mnt_flag & MNT_ASYNC)
1856 iomode = NFSV3WRITE_FILESYNC;
1857 bp->b_resid = len;
1860 * End of RPC. Now clean up the bp.
1862 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1863 * to cluster the buffers needing commit. This will allow
1864 * the system to submit a single commit rpc for the whole
1865 * cluster. We can do this even if the buffer is not 100%
1866 * dirty (relative to the NFS blocksize), so we optimize the
1867 * append-to-file-case.
1869 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1870 * cleared because write clustering only works for commit
1871 * rpc's, not for the data portion of the write).
1873 if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1874 bp->b_flags |= B_NEEDCOMMIT;
1875 if (bp->b_dirtyoff == 0 && bp->b_dirtyend == bp->b_bcount)
1876 bp->b_flags |= B_CLUSTEROK;
1877 } else {
1878 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1882 * For an interrupted write, the buffer is still valid
1883 * and the write hasn't been pushed to the server yet,
1884 * so we can't set B_ERROR and report the interruption
1885 * by setting B_EINTR. For the async case, B_EINTR
1886 * is not relevant, so the rpc attempt is essentially
1887 * a noop. For the case of a V3 write rpc not being
1888 * committed to stable storage, the block is still
1889 * dirty and requires either a commit rpc or another
1890 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1891 * the block is reused. This is indicated by setting
1892 * the B_DELWRI and B_NEEDCOMMIT flags.
1894 * If the buffer is marked B_PAGING, it does not reside on
1895 * the vp's paging queues so we cannot call bdirty(). The
1896 * bp in this case is not an NFS cache block so we should
1897 * be safe. XXX
1899 if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1900 crit_enter();
1901 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1902 if ((bp->b_flags & B_PAGING) == 0)
1903 bdirty(bp);
1904 if (error)
1905 bp->b_flags |= B_EINTR;
1906 crit_exit();
1907 } else {
1908 if (error) {
1909 bp->b_flags |= B_ERROR;
1910 bp->b_error = np->n_error = error;
1911 np->n_flag |= NWRITEERR;
1913 bp->b_dirtyoff = bp->b_dirtyend = 0;
1915 if (info->info_writerpc.must_commit)
1916 nfs_clearcommit(info->vp->v_mount);
1917 kfree(info, M_NFSREQ);
1918 if (error) {
1919 bp->b_flags |= B_ERROR;
1920 bp->b_error = error;
1922 biodone(bio);
1926 * Nfs Version 3 commit rpc - BIO version
1928 * This function issues the commit rpc and will chain to a write
1929 * rpc if necessary.
1931 void
1932 nfs_commitrpc_bio(struct vnode *vp, struct bio *bio)
1934 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1935 struct buf *bp = bio->bio_buf;
1936 struct nfsm_info *info;
1937 int error = 0;
1938 u_int32_t *tl;
1940 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) {
1941 bp->b_dirtyoff = bp->b_dirtyend = 0;
1942 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1943 bp->b_resid = 0;
1944 biodone(bio);
1945 return;
1948 info = kmalloc(sizeof(*info), M_NFSREQ, M_WAITOK);
1949 info->mrep = NULL;
1950 info->v3 = 1;
1952 nfsstats.rpccnt[NFSPROC_COMMIT]++;
1953 nfsm_reqhead(info, vp, NFSPROC_COMMIT, NFSX_FH(1));
1954 ERROROUT(nfsm_fhtom(info, vp));
1955 tl = nfsm_build(info, 3 * NFSX_UNSIGNED);
1956 txdr_hyper(bio->bio_offset + bp->b_dirtyoff, tl);
1957 tl += 2;
1958 *tl = txdr_unsigned(bp->b_dirtyend - bp->b_dirtyoff);
1959 info->bio = bio;
1960 info->done = nfs_commitrpc_bio_done;
1961 nfsm_request_bio(info, vp, NFSPROC_COMMIT, NULL,
1962 nfs_vpcred(vp, ND_WRITE));
1963 return;
1964 nfsmout:
1966 * Chain to write RPC on (early) error
1968 kfree(info, M_NFSREQ);
1969 nfs_writerpc_bio(vp, bio);
1972 static void
1973 nfs_commitrpc_bio_done(nfsm_info_t info)
1975 struct nfsmount *nmp = VFSTONFS(info->vp->v_mount);
1976 struct bio *bio = info->bio;
1977 struct buf *bp = bio->bio_buf;
1978 u_int32_t *tl;
1979 int wccflag = NFSV3_WCCRATTR;
1980 int error = 0;
1982 ERROROUT(nfsm_wcc_data(info, info->vp, &wccflag));
1983 if (error == 0) {
1984 NULLOUT(tl = nfsm_dissect(info, NFSX_V3WRITEVERF));
1985 if (bcmp(nmp->nm_verf, tl, NFSX_V3WRITEVERF)) {
1986 bcopy(tl, nmp->nm_verf, NFSX_V3WRITEVERF);
1987 error = NFSERR_STALEWRITEVERF;
1990 m_freem(info->mrep);
1991 info->mrep = NULL;
1994 * On completion we must chain to a write bio if an
1995 * error occurred.
1997 nfsmout:
1998 kfree(info, M_NFSREQ);
1999 if (error == 0) {
2000 bp->b_dirtyoff = bp->b_dirtyend = 0;
2001 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
2002 bp->b_resid = 0;
2003 biodone(bio);
2004 } else {
2005 nfs_writerpc_bio(info->vp, bio);