2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
37 * $FreeBSD: src/sys/nfs/nfs_vnops.c,v 1.150.2.5 2001/12/20 19:56:28 dillon Exp $
38 * $DragonFly: src/sys/vfs/nfs/nfs_vnops.c,v 1.77 2008/06/08 08:38:06 sephe Exp $
43 * vnode op calls for Sun NFS version 2 and 3
48 #include <sys/param.h>
49 #include <sys/kernel.h>
50 #include <sys/systm.h>
51 #include <sys/resourcevar.h>
53 #include <sys/mount.h>
55 #include <sys/malloc.h>
57 #include <sys/namei.h>
58 #include <sys/nlookup.h>
59 #include <sys/socket.h>
60 #include <sys/vnode.h>
61 #include <sys/dirent.h>
62 #include <sys/fcntl.h>
63 #include <sys/lockf.h>
65 #include <sys/sysctl.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_zone.h>
74 #include <vfs/fifofs/fifo.h>
75 #include <vfs/ufs/dir.h>
85 #include "nfsm_subs.h"
88 #include <netinet/in.h>
89 #include <netinet/in_var.h>
91 #include <sys/thread2.h>
97 static int nfsspec_read (struct vop_read_args
*);
98 static int nfsspec_write (struct vop_write_args
*);
99 static int nfsfifo_read (struct vop_read_args
*);
100 static int nfsfifo_write (struct vop_write_args
*);
101 static int nfsspec_close (struct vop_close_args
*);
102 static int nfsfifo_close (struct vop_close_args
*);
103 #define nfs_poll vop_nopoll
104 static int nfs_setattrrpc (struct vnode
*,struct vattr
*,struct ucred
*,struct thread
*);
105 static int nfs_lookup (struct vop_old_lookup_args
*);
106 static int nfs_create (struct vop_old_create_args
*);
107 static int nfs_mknod (struct vop_old_mknod_args
*);
108 static int nfs_open (struct vop_open_args
*);
109 static int nfs_close (struct vop_close_args
*);
110 static int nfs_access (struct vop_access_args
*);
111 static int nfs_getattr (struct vop_getattr_args
*);
112 static int nfs_setattr (struct vop_setattr_args
*);
113 static int nfs_read (struct vop_read_args
*);
114 static int nfs_mmap (struct vop_mmap_args
*);
115 static int nfs_fsync (struct vop_fsync_args
*);
116 static int nfs_remove (struct vop_old_remove_args
*);
117 static int nfs_link (struct vop_old_link_args
*);
118 static int nfs_rename (struct vop_old_rename_args
*);
119 static int nfs_mkdir (struct vop_old_mkdir_args
*);
120 static int nfs_rmdir (struct vop_old_rmdir_args
*);
121 static int nfs_symlink (struct vop_old_symlink_args
*);
122 static int nfs_readdir (struct vop_readdir_args
*);
123 static int nfs_bmap (struct vop_bmap_args
*);
124 static int nfs_strategy (struct vop_strategy_args
*);
125 static int nfs_lookitup (struct vnode
*, const char *, int,
126 struct ucred
*, struct thread
*, struct nfsnode
**);
127 static int nfs_sillyrename (struct vnode
*,struct vnode
*,struct componentname
*);
128 static int nfsspec_access (struct vop_access_args
*);
129 static int nfs_readlink (struct vop_readlink_args
*);
130 static int nfs_print (struct vop_print_args
*);
131 static int nfs_advlock (struct vop_advlock_args
*);
133 static int nfs_nresolve (struct vop_nresolve_args
*);
135 * Global vfs data structures for nfs
137 struct vop_ops nfsv2_vnode_vops
= {
138 .vop_default
= vop_defaultop
,
139 .vop_access
= nfs_access
,
140 .vop_advlock
= nfs_advlock
,
141 .vop_bmap
= nfs_bmap
,
142 .vop_close
= nfs_close
,
143 .vop_old_create
= nfs_create
,
144 .vop_fsync
= nfs_fsync
,
145 .vop_getattr
= nfs_getattr
,
146 .vop_getpages
= nfs_getpages
,
147 .vop_putpages
= nfs_putpages
,
148 .vop_inactive
= nfs_inactive
,
149 .vop_old_link
= nfs_link
,
150 .vop_old_lookup
= nfs_lookup
,
151 .vop_old_mkdir
= nfs_mkdir
,
152 .vop_old_mknod
= nfs_mknod
,
153 .vop_mmap
= nfs_mmap
,
154 .vop_open
= nfs_open
,
155 .vop_poll
= nfs_poll
,
156 .vop_print
= nfs_print
,
157 .vop_read
= nfs_read
,
158 .vop_readdir
= nfs_readdir
,
159 .vop_readlink
= nfs_readlink
,
160 .vop_reclaim
= nfs_reclaim
,
161 .vop_old_remove
= nfs_remove
,
162 .vop_old_rename
= nfs_rename
,
163 .vop_old_rmdir
= nfs_rmdir
,
164 .vop_setattr
= nfs_setattr
,
165 .vop_strategy
= nfs_strategy
,
166 .vop_old_symlink
= nfs_symlink
,
167 .vop_write
= nfs_write
,
168 .vop_nresolve
= nfs_nresolve
172 * Special device vnode ops
174 struct vop_ops nfsv2_spec_vops
= {
175 .vop_default
= spec_vnoperate
,
176 .vop_access
= nfsspec_access
,
177 .vop_close
= nfsspec_close
,
178 .vop_fsync
= nfs_fsync
,
179 .vop_getattr
= nfs_getattr
,
180 .vop_inactive
= nfs_inactive
,
181 .vop_print
= nfs_print
,
182 .vop_read
= nfsspec_read
,
183 .vop_reclaim
= nfs_reclaim
,
184 .vop_setattr
= nfs_setattr
,
185 .vop_write
= nfsspec_write
188 struct vop_ops nfsv2_fifo_vops
= {
189 .vop_default
= fifo_vnoperate
,
190 .vop_access
= nfsspec_access
,
191 .vop_close
= nfsfifo_close
,
192 .vop_fsync
= nfs_fsync
,
193 .vop_getattr
= nfs_getattr
,
194 .vop_inactive
= nfs_inactive
,
195 .vop_print
= nfs_print
,
196 .vop_read
= nfsfifo_read
,
197 .vop_reclaim
= nfs_reclaim
,
198 .vop_setattr
= nfs_setattr
,
199 .vop_write
= nfsfifo_write
202 static int nfs_mknodrpc (struct vnode
*dvp
, struct vnode
**vpp
,
203 struct componentname
*cnp
,
205 static int nfs_removerpc (struct vnode
*dvp
, const char *name
,
207 struct ucred
*cred
, struct thread
*td
);
208 static int nfs_renamerpc (struct vnode
*fdvp
, const char *fnameptr
,
209 int fnamelen
, struct vnode
*tdvp
,
210 const char *tnameptr
, int tnamelen
,
211 struct ucred
*cred
, struct thread
*td
);
212 static int nfs_renameit (struct vnode
*sdvp
,
213 struct componentname
*scnp
,
214 struct sillyrename
*sp
);
219 extern u_int32_t nfs_true
, nfs_false
;
220 extern u_int32_t nfs_xdrneg1
;
221 extern struct nfsstats nfsstats
;
222 extern nfstype nfsv3_type
[9];
223 struct thread
*nfs_iodwant
[NFS_MAXASYNCDAEMON
];
224 struct nfsmount
*nfs_iodmount
[NFS_MAXASYNCDAEMON
];
225 int nfs_numasync
= 0;
227 SYSCTL_DECL(_vfs_nfs
);
229 static int nfsaccess_cache_timeout
= NFS_DEFATTRTIMO
;
230 SYSCTL_INT(_vfs_nfs
, OID_AUTO
, access_cache_timeout
, CTLFLAG_RW
,
231 &nfsaccess_cache_timeout
, 0, "NFS ACCESS cache timeout");
233 static int nfsneg_cache_timeout
= NFS_MINATTRTIMO
;
234 SYSCTL_INT(_vfs_nfs
, OID_AUTO
, neg_cache_timeout
, CTLFLAG_RW
,
235 &nfsneg_cache_timeout
, 0, "NFS NEGATIVE NAMECACHE timeout");
237 static int nfspos_cache_timeout
= NFS_MINATTRTIMO
;
238 SYSCTL_INT(_vfs_nfs
, OID_AUTO
, pos_cache_timeout
, CTLFLAG_RW
,
239 &nfspos_cache_timeout
, 0, "NFS POSITIVE NAMECACHE timeout");
241 static int nfsv3_commit_on_close
= 0;
242 SYSCTL_INT(_vfs_nfs
, OID_AUTO
, nfsv3_commit_on_close
, CTLFLAG_RW
,
243 &nfsv3_commit_on_close
, 0, "write+commit on close, else only write");
245 SYSCTL_INT(_vfs_nfs
, OID_AUTO
, access_cache_hits
, CTLFLAG_RD
,
246 &nfsstats
.accesscache_hits
, 0, "NFS ACCESS cache hit count");
248 SYSCTL_INT(_vfs_nfs
, OID_AUTO
, access_cache_misses
, CTLFLAG_RD
,
249 &nfsstats
.accesscache_misses
, 0, "NFS ACCESS cache miss count");
252 #define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY \
253 | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE \
254 | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
256 nfs3_access_otw(struct vnode
*vp
, int wmode
,
257 struct thread
*td
, struct ucred
*cred
)
261 int error
= 0, attrflag
;
263 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
264 caddr_t bpos
, dpos
, cp2
;
268 struct nfsnode
*np
= VTONFS(vp
);
270 nfsstats
.rpccnt
[NFSPROC_ACCESS
]++;
271 nfsm_reqhead(vp
, NFSPROC_ACCESS
, NFSX_FH(v3
) + NFSX_UNSIGNED
);
273 nfsm_build(tl
, u_int32_t
*, NFSX_UNSIGNED
);
274 *tl
= txdr_unsigned(wmode
);
275 nfsm_request(vp
, NFSPROC_ACCESS
, td
, cred
);
276 nfsm_postop_attr(vp
, attrflag
, NFS_LATTR_NOSHRINK
);
278 nfsm_dissect(tl
, u_int32_t
*, NFSX_UNSIGNED
);
279 rmode
= fxdr_unsigned(u_int32_t
, *tl
);
281 np
->n_modeuid
= cred
->cr_uid
;
282 np
->n_modestamp
= mycpu
->gd_time_seconds
;
290 * nfs access vnode op.
291 * For nfs version 2, just return ok. File accesses may fail later.
292 * For nfs version 3, use the access rpc to check accessibility. If file modes
293 * are changed on the server, accesses might still fail later.
295 * nfs_access(struct vnode *a_vp, int a_mode, struct ucred *a_cred)
298 nfs_access(struct vop_access_args
*ap
)
300 struct vnode
*vp
= ap
->a_vp
;
301 thread_t td
= curthread
;
303 u_int32_t mode
, wmode
;
304 int v3
= NFS_ISV3(vp
);
305 struct nfsnode
*np
= VTONFS(vp
);
308 * Disallow write attempts on filesystems mounted read-only;
309 * unless the file is a socket, fifo, or a block or character
310 * device resident on the filesystem.
312 if ((ap
->a_mode
& VWRITE
) && (vp
->v_mount
->mnt_flag
& MNT_RDONLY
)) {
313 switch (vp
->v_type
) {
323 * For nfs v3, check to see if we have done this recently, and if
324 * so return our cached result instead of making an ACCESS call.
325 * If not, do an access rpc, otherwise you are stuck emulating
326 * ufs_access() locally using the vattr. This may not be correct,
327 * since the server may apply other access criteria such as
328 * client uid-->server uid mapping that we do not know about.
331 if (ap
->a_mode
& VREAD
)
332 mode
= NFSV3ACCESS_READ
;
335 if (vp
->v_type
!= VDIR
) {
336 if (ap
->a_mode
& VWRITE
)
337 mode
|= (NFSV3ACCESS_MODIFY
| NFSV3ACCESS_EXTEND
);
338 if (ap
->a_mode
& VEXEC
)
339 mode
|= NFSV3ACCESS_EXECUTE
;
341 if (ap
->a_mode
& VWRITE
)
342 mode
|= (NFSV3ACCESS_MODIFY
| NFSV3ACCESS_EXTEND
|
344 if (ap
->a_mode
& VEXEC
)
345 mode
|= NFSV3ACCESS_LOOKUP
;
347 /* XXX safety belt, only make blanket request if caching */
348 if (nfsaccess_cache_timeout
> 0) {
349 wmode
= NFSV3ACCESS_READ
| NFSV3ACCESS_MODIFY
|
350 NFSV3ACCESS_EXTEND
| NFSV3ACCESS_EXECUTE
|
351 NFSV3ACCESS_DELETE
| NFSV3ACCESS_LOOKUP
;
357 * Does our cached result allow us to give a definite yes to
360 if (np
->n_modestamp
&&
361 (mycpu
->gd_time_seconds
< (np
->n_modestamp
+ nfsaccess_cache_timeout
)) &&
362 (ap
->a_cred
->cr_uid
== np
->n_modeuid
) &&
363 ((np
->n_mode
& mode
) == mode
)) {
364 nfsstats
.accesscache_hits
++;
367 * Either a no, or a don't know. Go to the wire.
369 nfsstats
.accesscache_misses
++;
370 error
= nfs3_access_otw(vp
, wmode
, td
, ap
->a_cred
);
372 if ((np
->n_mode
& mode
) != mode
) {
378 if ((error
= nfsspec_access(ap
)) != 0)
382 * Attempt to prevent a mapped root from accessing a file
383 * which it shouldn't. We try to read a byte from the file
384 * if the user is root and the file is not zero length.
385 * After calling nfsspec_access, we should have the correct
388 if (ap
->a_cred
->cr_uid
== 0 && (ap
->a_mode
& VREAD
)
389 && VTONFS(vp
)->n_size
> 0) {
396 auio
.uio_iov
= &aiov
;
400 auio
.uio_segflg
= UIO_SYSSPACE
;
401 auio
.uio_rw
= UIO_READ
;
404 if (vp
->v_type
== VREG
) {
405 error
= nfs_readrpc(vp
, &auio
);
406 } else if (vp
->v_type
== VDIR
) {
408 bp
= kmalloc(NFS_DIRBLKSIZ
, M_TEMP
, M_WAITOK
);
410 aiov
.iov_len
= auio
.uio_resid
= NFS_DIRBLKSIZ
;
411 error
= nfs_readdirrpc(vp
, &auio
);
413 } else if (vp
->v_type
== VLNK
) {
414 error
= nfs_readlinkrpc(vp
, &auio
);
421 * [re]record creds for reading and/or writing if access
422 * was granted. Assume the NFS server will grant read access
423 * for execute requests.
426 if ((ap
->a_mode
& (VREAD
|VEXEC
)) && ap
->a_cred
!= np
->n_rucred
) {
429 crfree(np
->n_rucred
);
430 np
->n_rucred
= ap
->a_cred
;
432 if ((ap
->a_mode
& VWRITE
) && ap
->a_cred
!= np
->n_wucred
) {
435 crfree(np
->n_wucred
);
436 np
->n_wucred
= ap
->a_cred
;
444 * Check to see if the type is ok
445 * and that deletion is not in progress.
446 * For paged in text files, you will need to flush the page cache
447 * if consistency is lost.
449 * nfs_open(struct vnode *a_vp, int a_mode, struct ucred *a_cred,
454 nfs_open(struct vop_open_args
*ap
)
456 struct vnode
*vp
= ap
->a_vp
;
457 struct nfsnode
*np
= VTONFS(vp
);
461 if (vp
->v_type
!= VREG
&& vp
->v_type
!= VDIR
&& vp
->v_type
!= VLNK
) {
463 kprintf("open eacces vtyp=%d\n",vp
->v_type
);
469 * Clear the attribute cache only if opening with write access. It
470 * is unclear if we should do this at all here, but we certainly
471 * should not clear the cache unconditionally simply because a file
474 if (ap
->a_mode
& FWRITE
)
478 * For normal NFS, reconcile changes made locally verses
479 * changes made remotely. Note that VOP_GETATTR only goes
480 * to the wire if the cached attribute has timed out or been
483 * If local modifications have been made clear the attribute
484 * cache to force an attribute and modified time check. If
485 * GETATTR detects that the file has been changed by someone
486 * other then us it will set NRMODIFIED.
488 * If we are opening a directory and local changes have been
489 * made we have to invalidate the cache in order to ensure
490 * that we get the most up-to-date information from the
493 if (np
->n_flag
& NLMODIFIED
) {
495 if (vp
->v_type
== VDIR
) {
496 error
= nfs_vinvalbuf(vp
, V_SAVE
, 1);
502 error
= VOP_GETATTR(vp
, &vattr
);
505 if (np
->n_flag
& NRMODIFIED
) {
506 if (vp
->v_type
== VDIR
)
508 error
= nfs_vinvalbuf(vp
, V_SAVE
, 1);
511 np
->n_flag
&= ~NRMODIFIED
;
514 return (vop_stdopen(ap
));
519 * What an NFS client should do upon close after writing is a debatable issue.
520 * Most NFS clients push delayed writes to the server upon close, basically for
522 * 1 - So that any write errors may be reported back to the client process
523 * doing the close system call. By far the two most likely errors are
524 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
525 * 2 - To put a worst case upper bound on cache inconsistency between
526 * multiple clients for the file.
527 * There is also a consistency problem for Version 2 of the protocol w.r.t.
528 * not being able to tell if other clients are writing a file concurrently,
529 * since there is no way of knowing if the changed modify time in the reply
530 * is only due to the write for this client.
531 * (NFS Version 3 provides weak cache consistency data in the reply that
532 * should be sufficient to detect and handle this case.)
534 * The current code does the following:
535 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
536 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
537 * or commit them (this satisfies 1 and 2 except for the
538 * case where the server crashes after this close but
539 * before the commit RPC, which is felt to be "good
540 * enough". Changing the last argument to nfs_flush() to
541 * a 1 would force a commit operation, if it is felt a
542 * commit is necessary now.
543 * for NQNFS - do nothing now, since 2 is dealt with via leases and
544 * 1 should be dealt with via an fsync() system call for
545 * cases where write errors are important.
547 * nfs_close(struct vnode *a_vp, int a_fflag)
551 nfs_close(struct vop_close_args
*ap
)
553 struct vnode
*vp
= ap
->a_vp
;
554 struct nfsnode
*np
= VTONFS(vp
);
556 thread_t td
= curthread
;
558 if (vp
->v_type
== VREG
) {
559 if (np
->n_flag
& NLMODIFIED
) {
562 * Under NFSv3 we have dirty buffers to dispose of. We
563 * must flush them to the NFS server. We have the option
564 * of waiting all the way through the commit rpc or just
565 * waiting for the initial write. The default is to only
566 * wait through the initial write so the data is in the
567 * server's cache, which is roughly similar to the state
568 * a standard disk subsystem leaves the file in on close().
570 * We cannot clear the NLMODIFIED bit in np->n_flag due to
571 * potential races with other processes, and certainly
572 * cannot clear it if we don't commit.
574 int cm
= nfsv3_commit_on_close
? 1 : 0;
575 error
= nfs_flush(vp
, MNT_WAIT
, td
, cm
);
576 /* np->n_flag &= ~NLMODIFIED; */
578 error
= nfs_vinvalbuf(vp
, V_SAVE
, 1);
582 if (np
->n_flag
& NWRITEERR
) {
583 np
->n_flag
&= ~NWRITEERR
;
592 * nfs getattr call from vfs.
594 * nfs_getattr(struct vnode *a_vp, struct vattr *a_vap)
597 nfs_getattr(struct vop_getattr_args
*ap
)
599 struct vnode
*vp
= ap
->a_vp
;
600 struct nfsnode
*np
= VTONFS(vp
);
606 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
607 int v3
= NFS_ISV3(vp
);
608 thread_t td
= curthread
;
611 * Update local times for special files.
613 if (np
->n_flag
& (NACC
| NUPD
))
616 * First look in the cache.
618 if (nfs_getattrcache(vp
, ap
->a_vap
) == 0)
621 if (v3
&& nfsaccess_cache_timeout
> 0) {
622 nfsstats
.accesscache_misses
++;
623 nfs3_access_otw(vp
, NFSV3ACCESS_ALL
, td
, nfs_vpcred(vp
, ND_CHECK
));
624 if (nfs_getattrcache(vp
, ap
->a_vap
) == 0)
628 nfsstats
.rpccnt
[NFSPROC_GETATTR
]++;
629 nfsm_reqhead(vp
, NFSPROC_GETATTR
, NFSX_FH(v3
));
631 nfsm_request(vp
, NFSPROC_GETATTR
, td
, nfs_vpcred(vp
, ND_CHECK
));
633 nfsm_loadattr(vp
, ap
->a_vap
);
643 * nfs_setattr(struct vnode *a_vp, struct vattr *a_vap, struct ucred *a_cred)
646 nfs_setattr(struct vop_setattr_args
*ap
)
648 struct vnode
*vp
= ap
->a_vp
;
649 struct nfsnode
*np
= VTONFS(vp
);
650 struct vattr
*vap
= ap
->a_vap
;
653 thread_t td
= curthread
;
660 * Setting of flags is not supported.
662 if (vap
->va_flags
!= VNOVAL
)
666 * Disallow write attempts if the filesystem is mounted read-only.
668 if ((vap
->va_flags
!= VNOVAL
|| vap
->va_uid
!= (uid_t
)VNOVAL
||
669 vap
->va_gid
!= (gid_t
)VNOVAL
|| vap
->va_atime
.tv_sec
!= VNOVAL
||
670 vap
->va_mtime
.tv_sec
!= VNOVAL
|| vap
->va_mode
!= (mode_t
)VNOVAL
) &&
671 (vp
->v_mount
->mnt_flag
& MNT_RDONLY
))
673 if (vap
->va_size
!= VNOVAL
) {
674 switch (vp
->v_type
) {
681 if (vap
->va_mtime
.tv_sec
== VNOVAL
&&
682 vap
->va_atime
.tv_sec
== VNOVAL
&&
683 vap
->va_mode
== (mode_t
)VNOVAL
&&
684 vap
->va_uid
== (uid_t
)VNOVAL
&&
685 vap
->va_gid
== (gid_t
)VNOVAL
)
687 vap
->va_size
= VNOVAL
;
691 * Disallow write attempts if the filesystem is
694 if (vp
->v_mount
->mnt_flag
& MNT_RDONLY
)
698 * This is nasty. The RPCs we send to flush pending
699 * data often return attribute information which is
700 * cached via a callback to nfs_loadattrcache(), which
701 * has the effect of changing our notion of the file
702 * size. Due to flushed appends and other operations
703 * the file size can be set to virtually anything,
704 * including values that do not match either the old
705 * or intended file size.
707 * When this condition is detected we must loop to
708 * try the operation again. Hopefully no more
709 * flushing is required on the loop so it works the
710 * second time around. THIS CASE ALMOST ALWAYS
715 error
= nfs_meta_setsize(vp
, td
, vap
->va_size
);
717 if (np
->n_flag
& NLMODIFIED
) {
718 if (vap
->va_size
== 0)
719 error
= nfs_vinvalbuf(vp
, 0, 1);
721 error
= nfs_vinvalbuf(vp
, V_SAVE
, 1);
724 * note: this loop case almost always happens at
725 * least once per truncation.
727 if (error
== 0 && np
->n_size
!= vap
->va_size
)
729 np
->n_vattr
.va_size
= vap
->va_size
;
732 } else if ((vap
->va_mtime
.tv_sec
!= VNOVAL
||
733 vap
->va_atime
.tv_sec
!= VNOVAL
) && (np
->n_flag
& NLMODIFIED
) &&
734 vp
->v_type
== VREG
&&
735 (error
= nfs_vinvalbuf(vp
, V_SAVE
, 1)) == EINTR
739 error
= nfs_setattrrpc(vp
, vap
, ap
->a_cred
, td
);
742 * Sanity check if a truncation was issued. This should only occur
743 * if multiple processes are racing on the same file.
745 if (error
== 0 && vap
->va_size
!= VNOVAL
&&
746 np
->n_size
!= vap
->va_size
) {
747 kprintf("NFS ftruncate: server disagrees on the file size: %lld/%lld/%lld\n", tsize
, vap
->va_size
, np
->n_size
);
750 if (error
&& vap
->va_size
!= VNOVAL
) {
751 np
->n_size
= np
->n_vattr
.va_size
= tsize
;
752 vnode_pager_setsize(vp
, np
->n_size
);
758 * Do an nfs setattr rpc.
761 nfs_setattrrpc(struct vnode
*vp
, struct vattr
*vap
,
762 struct ucred
*cred
, struct thread
*td
)
764 struct nfsv2_sattr
*sp
;
765 struct nfsnode
*np
= VTONFS(vp
);
768 caddr_t bpos
, dpos
, cp2
;
770 int error
= 0, wccflag
= NFSV3_WCCRATTR
;
771 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
772 int v3
= NFS_ISV3(vp
);
774 nfsstats
.rpccnt
[NFSPROC_SETATTR
]++;
775 nfsm_reqhead(vp
, NFSPROC_SETATTR
, NFSX_FH(v3
) + NFSX_SATTR(v3
));
778 nfsm_v3attrbuild(vap
, TRUE
);
779 nfsm_build(tl
, u_int32_t
*, NFSX_UNSIGNED
);
782 nfsm_build(sp
, struct nfsv2_sattr
*, NFSX_V2SATTR
);
783 if (vap
->va_mode
== (mode_t
)VNOVAL
)
784 sp
->sa_mode
= nfs_xdrneg1
;
786 sp
->sa_mode
= vtonfsv2_mode(vp
->v_type
, vap
->va_mode
);
787 if (vap
->va_uid
== (uid_t
)VNOVAL
)
788 sp
->sa_uid
= nfs_xdrneg1
;
790 sp
->sa_uid
= txdr_unsigned(vap
->va_uid
);
791 if (vap
->va_gid
== (gid_t
)VNOVAL
)
792 sp
->sa_gid
= nfs_xdrneg1
;
794 sp
->sa_gid
= txdr_unsigned(vap
->va_gid
);
795 sp
->sa_size
= txdr_unsigned(vap
->va_size
);
796 txdr_nfsv2time(&vap
->va_atime
, &sp
->sa_atime
);
797 txdr_nfsv2time(&vap
->va_mtime
, &sp
->sa_mtime
);
799 nfsm_request(vp
, NFSPROC_SETATTR
, td
, cred
);
802 nfsm_wcc_data(vp
, wccflag
);
804 nfsm_loadattr(vp
, (struct vattr
*)0);
812 nfs_cache_setvp(struct nchandle
*nch
, struct vnode
*vp
, int nctimeout
)
818 cache_setvp(nch
, vp
);
819 cache_settimeout(nch
, nctimeout
);
823 * NEW API CALL - replaces nfs_lookup(). However, we cannot remove
824 * nfs_lookup() until all remaining new api calls are implemented.
826 * Resolve a namecache entry. This function is passed a locked ncp and
827 * must call nfs_cache_setvp() on it as appropriate to resolve the entry.
830 nfs_nresolve(struct vop_nresolve_args
*ap
)
832 struct thread
*td
= curthread
;
833 struct namecache
*ncp
;
844 /******NFSM MACROS********/
845 struct mbuf
*mb
, *mrep
, *mreq
, *mb2
, *md
;
846 caddr_t bpos
, dpos
, cp
, cp2
;
853 if ((error
= vget(dvp
, LK_SHARED
)) != 0)
858 nfsstats
.lookupcache_misses
++;
859 nfsstats
.rpccnt
[NFSPROC_LOOKUP
]++;
860 ncp
= ap
->a_nch
->ncp
;
862 nfsm_reqhead(dvp
, NFSPROC_LOOKUP
,
863 NFSX_FH(v3
) + NFSX_UNSIGNED
+ nfsm_rndup(len
));
865 nfsm_strtom(ncp
->nc_name
, len
, NFS_MAXNAMLEN
);
866 nfsm_request(dvp
, NFSPROC_LOOKUP
, td
, ap
->a_cred
);
869 * Cache negatve lookups to reduce NFS traffic, but use
870 * a fast timeout. Otherwise use a timeout of 1 tick.
871 * XXX we should add a namecache flag for no-caching
872 * to uncache the negative hit as soon as possible, but
873 * we cannot simply destroy the entry because it is used
874 * as a placeholder by the caller.
877 nfs_cache_setvp(ap
->a_nch
, NULL
, nfsneg_cache_timeout
);
878 nfsm_postop_attr(dvp
, attrflag
, NFS_LATTR_NOSHRINK
);
884 * Success, get the file handle, do various checks, and load
885 * post-operation data from the reply packet. Theoretically
886 * we should never be looking up "." so, theoretically, we
887 * should never get the same file handle as our directory. But
888 * we check anyway. XXX
890 * Note that no timeout is set for the positive cache hit. We
891 * assume, theoretically, that ESTALE returns will be dealt with
892 * properly to handle NFS races and in anycase we cannot depend
893 * on a timeout to deal with NFS open/create/excl issues so instead
894 * of a bad hack here the rest of the NFS client code needs to do
897 nfsm_getfh(fhp
, fhsize
, v3
);
900 if (NFS_CMPFH(np
, fhp
, fhsize
)) {
904 error
= nfs_nget(dvp
->v_mount
, fhp
, fhsize
, &np
);
913 nfsm_postop_attr(nvp
, attrflag
, NFS_LATTR_NOSHRINK
);
914 nfsm_postop_attr(dvp
, attrflag
, NFS_LATTR_NOSHRINK
);
916 nfsm_loadattr(nvp
, NULL
);
918 nfs_cache_setvp(ap
->a_nch
, nvp
, nfspos_cache_timeout
);
932 * 'cached' nfs directory lookup
934 * NOTE: cannot be removed until NFS implements all the new n*() API calls.
936 * nfs_lookup(struct vnode *a_dvp, struct vnode **a_vpp,
937 * struct componentname *a_cnp)
940 nfs_lookup(struct vop_old_lookup_args
*ap
)
942 struct componentname
*cnp
= ap
->a_cnp
;
943 struct vnode
*dvp
= ap
->a_dvp
;
944 struct vnode
**vpp
= ap
->a_vpp
;
945 int flags
= cnp
->cn_flags
;
950 struct nfsmount
*nmp
;
951 caddr_t bpos
, dpos
, cp2
;
952 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
956 int lockparent
, wantparent
, error
= 0, attrflag
, fhsize
;
957 int v3
= NFS_ISV3(dvp
);
960 * Read-only mount check and directory check.
963 if ((dvp
->v_mount
->mnt_flag
& MNT_RDONLY
) &&
964 (cnp
->cn_nameiop
== NAMEI_DELETE
|| cnp
->cn_nameiop
== NAMEI_RENAME
))
967 if (dvp
->v_type
!= VDIR
)
971 * Look it up in the cache. Note that ENOENT is only returned if we
972 * previously entered a negative hit (see later on). The additional
973 * nfsneg_cache_timeout check causes previously cached results to
974 * be instantly ignored if the negative caching is turned off.
976 lockparent
= flags
& CNP_LOCKPARENT
;
977 wantparent
= flags
& (CNP_LOCKPARENT
|CNP_WANTPARENT
);
978 nmp
= VFSTONFS(dvp
->v_mount
);
986 nfsstats
.lookupcache_misses
++;
987 nfsstats
.rpccnt
[NFSPROC_LOOKUP
]++;
988 len
= cnp
->cn_namelen
;
989 nfsm_reqhead(dvp
, NFSPROC_LOOKUP
,
990 NFSX_FH(v3
) + NFSX_UNSIGNED
+ nfsm_rndup(len
));
992 nfsm_strtom(cnp
->cn_nameptr
, len
, NFS_MAXNAMLEN
);
993 nfsm_request(dvp
, NFSPROC_LOOKUP
, cnp
->cn_td
, cnp
->cn_cred
);
995 nfsm_postop_attr(dvp
, attrflag
, NFS_LATTR_NOSHRINK
);
999 nfsm_getfh(fhp
, fhsize
, v3
);
1002 * Handle RENAME case...
1004 if (cnp
->cn_nameiop
== NAMEI_RENAME
&& wantparent
) {
1005 if (NFS_CMPFH(np
, fhp
, fhsize
)) {
1009 error
= nfs_nget(dvp
->v_mount
, fhp
, fhsize
, &np
);
1016 nfsm_postop_attr(newvp
, attrflag
, NFS_LATTR_NOSHRINK
);
1017 nfsm_postop_attr(dvp
, attrflag
, NFS_LATTR_NOSHRINK
);
1019 nfsm_loadattr(newvp
, (struct vattr
*)0);
1024 cnp
->cn_flags
|= CNP_PDIRUNLOCK
;
1029 if (flags
& CNP_ISDOTDOT
) {
1031 cnp
->cn_flags
|= CNP_PDIRUNLOCK
;
1032 error
= nfs_nget(dvp
->v_mount
, fhp
, fhsize
, &np
);
1034 vn_lock(dvp
, LK_EXCLUSIVE
| LK_RETRY
);
1035 cnp
->cn_flags
&= ~CNP_PDIRUNLOCK
;
1036 return (error
); /* NOTE: return error from nget */
1040 error
= vn_lock(dvp
, LK_EXCLUSIVE
);
1045 cnp
->cn_flags
|= CNP_PDIRUNLOCK
;
1047 } else if (NFS_CMPFH(np
, fhp
, fhsize
)) {
1051 error
= nfs_nget(dvp
->v_mount
, fhp
, fhsize
, &np
);
1058 cnp
->cn_flags
|= CNP_PDIRUNLOCK
;
1063 nfsm_postop_attr(newvp
, attrflag
, NFS_LATTR_NOSHRINK
);
1064 nfsm_postop_attr(dvp
, attrflag
, NFS_LATTR_NOSHRINK
);
1066 nfsm_loadattr(newvp
, (struct vattr
*)0);
1068 /* XXX MOVE TO nfs_nremove() */
1069 if ((cnp
->cn_flags
& CNP_MAKEENTRY
) &&
1070 cnp
->cn_nameiop
!= NAMEI_DELETE
) {
1071 np
->n_ctime
= np
->n_vattr
.va_ctime
.tv_sec
; /* XXX */
1078 if (newvp
!= NULLVP
) {
1082 if ((cnp
->cn_nameiop
== NAMEI_CREATE
||
1083 cnp
->cn_nameiop
== NAMEI_RENAME
) &&
1087 cnp
->cn_flags
|= CNP_PDIRUNLOCK
;
1089 if (dvp
->v_mount
->mnt_flag
& MNT_RDONLY
)
1092 error
= EJUSTRETURN
;
1100 * Just call nfs_bioread() to do the work.
1102 * nfs_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1103 * struct ucred *a_cred)
1106 nfs_read(struct vop_read_args
*ap
)
1108 struct vnode
*vp
= ap
->a_vp
;
1110 return (nfs_bioread(vp
, ap
->a_uio
, ap
->a_ioflag
));
1111 switch (vp
->v_type
) {
1113 return (nfs_bioread(vp
, ap
->a_uio
, ap
->a_ioflag
));
1124 * nfs_readlink(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred)
1127 nfs_readlink(struct vop_readlink_args
*ap
)
1129 struct vnode
*vp
= ap
->a_vp
;
1131 if (vp
->v_type
!= VLNK
)
1133 return (nfs_bioread(vp
, ap
->a_uio
, 0));
1137 * Do a readlink rpc.
1138 * Called by nfs_doio() from below the buffer cache.
1141 nfs_readlinkrpc(struct vnode
*vp
, struct uio
*uiop
)
1146 caddr_t bpos
, dpos
, cp2
;
1147 int error
= 0, len
, attrflag
;
1148 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
1149 int v3
= NFS_ISV3(vp
);
1151 nfsstats
.rpccnt
[NFSPROC_READLINK
]++;
1152 nfsm_reqhead(vp
, NFSPROC_READLINK
, NFSX_FH(v3
));
1154 nfsm_request(vp
, NFSPROC_READLINK
, uiop
->uio_td
, nfs_vpcred(vp
, ND_CHECK
));
1156 nfsm_postop_attr(vp
, attrflag
, NFS_LATTR_NOSHRINK
);
1158 nfsm_strsiz(len
, NFS_MAXPATHLEN
);
1159 if (len
== NFS_MAXPATHLEN
) {
1160 struct nfsnode
*np
= VTONFS(vp
);
1161 if (np
->n_size
&& np
->n_size
< NFS_MAXPATHLEN
)
1164 nfsm_mtouio(uiop
, len
);
1176 nfs_readrpc(struct vnode
*vp
, struct uio
*uiop
)
1181 caddr_t bpos
, dpos
, cp2
;
1182 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
1183 struct nfsmount
*nmp
;
1184 int error
= 0, len
, retlen
, tsiz
, eof
, attrflag
;
1185 int v3
= NFS_ISV3(vp
);
1190 nmp
= VFSTONFS(vp
->v_mount
);
1191 tsiz
= uiop
->uio_resid
;
1192 if (uiop
->uio_offset
+ tsiz
> nmp
->nm_maxfilesize
)
1195 nfsstats
.rpccnt
[NFSPROC_READ
]++;
1196 len
= (tsiz
> nmp
->nm_rsize
) ? nmp
->nm_rsize
: tsiz
;
1197 nfsm_reqhead(vp
, NFSPROC_READ
, NFSX_FH(v3
) + NFSX_UNSIGNED
* 3);
1199 nfsm_build(tl
, u_int32_t
*, NFSX_UNSIGNED
* 3);
1201 txdr_hyper(uiop
->uio_offset
, tl
);
1202 *(tl
+ 2) = txdr_unsigned(len
);
1204 *tl
++ = txdr_unsigned(uiop
->uio_offset
);
1205 *tl
++ = txdr_unsigned(len
);
1208 nfsm_request(vp
, NFSPROC_READ
, uiop
->uio_td
, nfs_vpcred(vp
, ND_READ
));
1210 nfsm_postop_attr(vp
, attrflag
, NFS_LATTR_NOSHRINK
);
1215 nfsm_dissect(tl
, u_int32_t
*, 2 * NFSX_UNSIGNED
);
1216 eof
= fxdr_unsigned(int, *(tl
+ 1));
1218 nfsm_loadattr(vp
, (struct vattr
*)0);
1219 nfsm_strsiz(retlen
, nmp
->nm_rsize
);
1220 nfsm_mtouio(uiop
, retlen
);
1224 if (eof
|| retlen
== 0) {
1227 } else if (retlen
< len
) {
1239 nfs_writerpc(struct vnode
*vp
, struct uio
*uiop
, int *iomode
, int *must_commit
)
1243 int32_t t1
, t2
, backup
;
1244 caddr_t bpos
, dpos
, cp2
;
1245 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
1246 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
1247 int error
= 0, len
, tsiz
, wccflag
= NFSV3_WCCRATTR
, rlen
, commit
;
1248 int v3
= NFS_ISV3(vp
), committed
= NFSV3WRITE_FILESYNC
;
1251 if (uiop
->uio_iovcnt
!= 1)
1252 panic("nfs: writerpc iovcnt > 1");
1255 tsiz
= uiop
->uio_resid
;
1256 if (uiop
->uio_offset
+ tsiz
> nmp
->nm_maxfilesize
)
1259 nfsstats
.rpccnt
[NFSPROC_WRITE
]++;
1260 len
= (tsiz
> nmp
->nm_wsize
) ? nmp
->nm_wsize
: tsiz
;
1261 nfsm_reqhead(vp
, NFSPROC_WRITE
,
1262 NFSX_FH(v3
) + 5 * NFSX_UNSIGNED
+ nfsm_rndup(len
));
1265 nfsm_build(tl
, u_int32_t
*, 5 * NFSX_UNSIGNED
);
1266 txdr_hyper(uiop
->uio_offset
, tl
);
1268 *tl
++ = txdr_unsigned(len
);
1269 *tl
++ = txdr_unsigned(*iomode
);
1270 *tl
= txdr_unsigned(len
);
1274 nfsm_build(tl
, u_int32_t
*, 4 * NFSX_UNSIGNED
);
1275 /* Set both "begin" and "current" to non-garbage. */
1276 x
= txdr_unsigned((u_int32_t
)uiop
->uio_offset
);
1277 *tl
++ = x
; /* "begin offset" */
1278 *tl
++ = x
; /* "current offset" */
1279 x
= txdr_unsigned(len
);
1280 *tl
++ = x
; /* total to this offset */
1281 *tl
= x
; /* size of this write */
1283 nfsm_uiotom(uiop
, len
);
1284 nfsm_request(vp
, NFSPROC_WRITE
, uiop
->uio_td
, nfs_vpcred(vp
, ND_WRITE
));
1287 * The write RPC returns a before and after mtime. The
1288 * nfsm_wcc_data() macro checks the before n_mtime
1289 * against the before time and stores the after time
1290 * in the nfsnode's cached vattr and n_mtime field.
1291 * The NRMODIFIED bit will be set if the before
1292 * time did not match the original mtime.
1294 wccflag
= NFSV3_WCCCHK
;
1295 nfsm_wcc_data(vp
, wccflag
);
1297 nfsm_dissect(tl
, u_int32_t
*, 2 * NFSX_UNSIGNED
1298 + NFSX_V3WRITEVERF
);
1299 rlen
= fxdr_unsigned(int, *tl
++);
1304 } else if (rlen
< len
) {
1305 backup
= len
- rlen
;
1306 uiop
->uio_iov
->iov_base
-= backup
;
1307 uiop
->uio_iov
->iov_len
+= backup
;
1308 uiop
->uio_offset
-= backup
;
1309 uiop
->uio_resid
+= backup
;
1312 commit
= fxdr_unsigned(int, *tl
++);
1315 * Return the lowest committment level
1316 * obtained by any of the RPCs.
1318 if (committed
== NFSV3WRITE_FILESYNC
)
1320 else if (committed
== NFSV3WRITE_DATASYNC
&&
1321 commit
== NFSV3WRITE_UNSTABLE
)
1323 if ((nmp
->nm_state
& NFSSTA_HASWRITEVERF
) == 0){
1324 bcopy((caddr_t
)tl
, (caddr_t
)nmp
->nm_verf
,
1326 nmp
->nm_state
|= NFSSTA_HASWRITEVERF
;
1327 } else if (bcmp((caddr_t
)tl
,
1328 (caddr_t
)nmp
->nm_verf
, NFSX_V3WRITEVERF
)) {
1330 bcopy((caddr_t
)tl
, (caddr_t
)nmp
->nm_verf
,
1335 nfsm_loadattr(vp
, (struct vattr
*)0);
1343 if (vp
->v_mount
->mnt_flag
& MNT_ASYNC
)
1344 committed
= NFSV3WRITE_FILESYNC
;
1345 *iomode
= committed
;
1347 uiop
->uio_resid
= tsiz
;
1353 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1354 * mode set to specify the file type and the size field for rdev.
1357 nfs_mknodrpc(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
1360 struct nfsv2_sattr
*sp
;
1364 struct vnode
*newvp
= (struct vnode
*)0;
1365 struct nfsnode
*np
= (struct nfsnode
*)0;
1369 int error
= 0, wccflag
= NFSV3_WCCRATTR
, gotvp
= 0;
1370 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
1372 int v3
= NFS_ISV3(dvp
);
1374 if (vap
->va_type
== VCHR
|| vap
->va_type
== VBLK
) {
1375 rmajor
= txdr_unsigned(vap
->va_rmajor
);
1376 rminor
= txdr_unsigned(vap
->va_rminor
);
1377 } else if (vap
->va_type
== VFIFO
|| vap
->va_type
== VSOCK
) {
1378 rmajor
= nfs_xdrneg1
;
1379 rminor
= nfs_xdrneg1
;
1381 return (EOPNOTSUPP
);
1383 if ((error
= VOP_GETATTR(dvp
, &vattr
)) != 0) {
1386 nfsstats
.rpccnt
[NFSPROC_MKNOD
]++;
1387 nfsm_reqhead(dvp
, NFSPROC_MKNOD
, NFSX_FH(v3
) + 4 * NFSX_UNSIGNED
+
1388 + nfsm_rndup(cnp
->cn_namelen
) + NFSX_SATTR(v3
));
1389 nfsm_fhtom(dvp
, v3
);
1390 nfsm_strtom(cnp
->cn_nameptr
, cnp
->cn_namelen
, NFS_MAXNAMLEN
);
1392 nfsm_build(tl
, u_int32_t
*, NFSX_UNSIGNED
);
1393 *tl
++ = vtonfsv3_type(vap
->va_type
);
1394 nfsm_v3attrbuild(vap
, FALSE
);
1395 if (vap
->va_type
== VCHR
|| vap
->va_type
== VBLK
) {
1396 nfsm_build(tl
, u_int32_t
*, 2 * NFSX_UNSIGNED
);
1397 *tl
++ = txdr_unsigned(vap
->va_rmajor
);
1398 *tl
= txdr_unsigned(vap
->va_rminor
);
1401 nfsm_build(sp
, struct nfsv2_sattr
*, NFSX_V2SATTR
);
1402 sp
->sa_mode
= vtonfsv2_mode(vap
->va_type
, vap
->va_mode
);
1403 sp
->sa_uid
= nfs_xdrneg1
;
1404 sp
->sa_gid
= nfs_xdrneg1
;
1405 sp
->sa_size
= makeudev(rmajor
, rminor
);
1406 txdr_nfsv2time(&vap
->va_atime
, &sp
->sa_atime
);
1407 txdr_nfsv2time(&vap
->va_mtime
, &sp
->sa_mtime
);
1409 nfsm_request(dvp
, NFSPROC_MKNOD
, cnp
->cn_td
, cnp
->cn_cred
);
1411 nfsm_mtofh(dvp
, newvp
, v3
, gotvp
);
1415 newvp
= (struct vnode
*)0;
1417 error
= nfs_lookitup(dvp
, cnp
->cn_nameptr
,
1418 cnp
->cn_namelen
, cnp
->cn_cred
, cnp
->cn_td
, &np
);
1424 nfsm_wcc_data(dvp
, wccflag
);
1433 VTONFS(dvp
)->n_flag
|= NLMODIFIED
;
1435 VTONFS(dvp
)->n_attrstamp
= 0;
1441 * just call nfs_mknodrpc() to do the work.
1443 * nfs_mknod(struct vnode *a_dvp, struct vnode **a_vpp,
1444 * struct componentname *a_cnp, struct vattr *a_vap)
1448 nfs_mknod(struct vop_old_mknod_args
*ap
)
1450 return nfs_mknodrpc(ap
->a_dvp
, ap
->a_vpp
, ap
->a_cnp
, ap
->a_vap
);
1453 static u_long create_verf
;
1455 * nfs file create call
1457 * nfs_create(struct vnode *a_dvp, struct vnode **a_vpp,
1458 * struct componentname *a_cnp, struct vattr *a_vap)
1461 nfs_create(struct vop_old_create_args
*ap
)
1463 struct vnode
*dvp
= ap
->a_dvp
;
1464 struct vattr
*vap
= ap
->a_vap
;
1465 struct componentname
*cnp
= ap
->a_cnp
;
1466 struct nfsv2_sattr
*sp
;
1470 struct nfsnode
*np
= (struct nfsnode
*)0;
1471 struct vnode
*newvp
= (struct vnode
*)0;
1472 caddr_t bpos
, dpos
, cp2
;
1473 int error
= 0, wccflag
= NFSV3_WCCRATTR
, gotvp
= 0, fmode
= 0;
1474 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
1476 int v3
= NFS_ISV3(dvp
);
1479 * Oops, not for me..
1481 if (vap
->va_type
== VSOCK
)
1482 return (nfs_mknodrpc(dvp
, ap
->a_vpp
, cnp
, vap
));
1484 if ((error
= VOP_GETATTR(dvp
, &vattr
)) != 0) {
1487 if (vap
->va_vaflags
& VA_EXCLUSIVE
)
1490 nfsstats
.rpccnt
[NFSPROC_CREATE
]++;
1491 nfsm_reqhead(dvp
, NFSPROC_CREATE
, NFSX_FH(v3
) + 2 * NFSX_UNSIGNED
+
1492 nfsm_rndup(cnp
->cn_namelen
) + NFSX_SATTR(v3
));
1493 nfsm_fhtom(dvp
, v3
);
1494 nfsm_strtom(cnp
->cn_nameptr
, cnp
->cn_namelen
, NFS_MAXNAMLEN
);
1496 nfsm_build(tl
, u_int32_t
*, NFSX_UNSIGNED
);
1497 if (fmode
& O_EXCL
) {
1498 *tl
= txdr_unsigned(NFSV3CREATE_EXCLUSIVE
);
1499 nfsm_build(tl
, u_int32_t
*, NFSX_V3CREATEVERF
);
1501 if (!TAILQ_EMPTY(&in_ifaddrheads
[mycpuid
]))
1502 *tl
++ = IA_SIN(TAILQ_FIRST(&in_ifaddrheads
[mycpuid
])->ia
)->sin_addr
.s_addr
;
1505 *tl
++ = create_verf
;
1506 *tl
= ++create_verf
;
1508 *tl
= txdr_unsigned(NFSV3CREATE_UNCHECKED
);
1509 nfsm_v3attrbuild(vap
, FALSE
);
1512 nfsm_build(sp
, struct nfsv2_sattr
*, NFSX_V2SATTR
);
1513 sp
->sa_mode
= vtonfsv2_mode(vap
->va_type
, vap
->va_mode
);
1514 sp
->sa_uid
= nfs_xdrneg1
;
1515 sp
->sa_gid
= nfs_xdrneg1
;
1517 txdr_nfsv2time(&vap
->va_atime
, &sp
->sa_atime
);
1518 txdr_nfsv2time(&vap
->va_mtime
, &sp
->sa_mtime
);
1520 nfsm_request(dvp
, NFSPROC_CREATE
, cnp
->cn_td
, cnp
->cn_cred
);
1522 nfsm_mtofh(dvp
, newvp
, v3
, gotvp
);
1526 newvp
= (struct vnode
*)0;
1528 error
= nfs_lookitup(dvp
, cnp
->cn_nameptr
,
1529 cnp
->cn_namelen
, cnp
->cn_cred
, cnp
->cn_td
, &np
);
1535 nfsm_wcc_data(dvp
, wccflag
);
1539 if (v3
&& (fmode
& O_EXCL
) && error
== NFSERR_NOTSUPP
) {
1545 } else if (v3
&& (fmode
& O_EXCL
)) {
1547 * We are normally called with only a partially initialized
1548 * VAP. Since the NFSv3 spec says that server may use the
1549 * file attributes to store the verifier, the spec requires
1550 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1551 * in atime, but we can't really assume that all servers will
1552 * so we ensure that our SETATTR sets both atime and mtime.
1554 if (vap
->va_mtime
.tv_sec
== VNOVAL
)
1555 vfs_timestamp(&vap
->va_mtime
);
1556 if (vap
->va_atime
.tv_sec
== VNOVAL
)
1557 vap
->va_atime
= vap
->va_mtime
;
1558 error
= nfs_setattrrpc(newvp
, vap
, cnp
->cn_cred
, cnp
->cn_td
);
1562 * The new np may have enough info for access
1563 * checks, make sure rucred and wucred are
1564 * initialized for read and write rpc's.
1567 if (np
->n_rucred
== NULL
)
1568 np
->n_rucred
= crhold(cnp
->cn_cred
);
1569 if (np
->n_wucred
== NULL
)
1570 np
->n_wucred
= crhold(cnp
->cn_cred
);
1573 VTONFS(dvp
)->n_flag
|= NLMODIFIED
;
1575 VTONFS(dvp
)->n_attrstamp
= 0;
1580 * nfs file remove call
1581 * To try and make nfs semantics closer to ufs semantics, a file that has
1582 * other processes using the vnode is renamed instead of removed and then
1583 * removed later on the last close.
1584 * - If v_sysref.refcnt > 1
1585 * If a rename is not already in the works
1586 * call nfs_sillyrename() to set it up
1590 * nfs_remove(struct vnode *a_dvp, struct vnode *a_vp,
1591 * struct componentname *a_cnp)
1594 nfs_remove(struct vop_old_remove_args
*ap
)
1596 struct vnode
*vp
= ap
->a_vp
;
1597 struct vnode
*dvp
= ap
->a_dvp
;
1598 struct componentname
*cnp
= ap
->a_cnp
;
1599 struct nfsnode
*np
= VTONFS(vp
);
1604 if (vp
->v_sysref
.refcnt
< 1)
1605 panic("nfs_remove: bad v_sysref.refcnt");
1607 if (vp
->v_type
== VDIR
)
1609 else if (vp
->v_sysref
.refcnt
== 1 || (np
->n_sillyrename
&&
1610 VOP_GETATTR(vp
, &vattr
) == 0 &&
1611 vattr
.va_nlink
> 1)) {
1613 * throw away biocache buffers, mainly to avoid
1614 * unnecessary delayed writes later.
1616 error
= nfs_vinvalbuf(vp
, 0, 1);
1619 error
= nfs_removerpc(dvp
, cnp
->cn_nameptr
,
1620 cnp
->cn_namelen
, cnp
->cn_cred
, cnp
->cn_td
);
1622 * Kludge City: If the first reply to the remove rpc is lost..
1623 * the reply to the retransmitted request will be ENOENT
1624 * since the file was in fact removed
1625 * Therefore, we cheat and return success.
1627 if (error
== ENOENT
)
1629 } else if (!np
->n_sillyrename
) {
1630 error
= nfs_sillyrename(dvp
, vp
, cnp
);
1632 np
->n_attrstamp
= 0;
1637 * nfs file remove rpc called from nfs_inactive
1640 nfs_removeit(struct sillyrename
*sp
)
1642 return (nfs_removerpc(sp
->s_dvp
, sp
->s_name
, sp
->s_namlen
,
1647 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1650 nfs_removerpc(struct vnode
*dvp
, const char *name
, int namelen
,
1651 struct ucred
*cred
, struct thread
*td
)
1656 caddr_t bpos
, dpos
, cp2
;
1657 int error
= 0, wccflag
= NFSV3_WCCRATTR
;
1658 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
1659 int v3
= NFS_ISV3(dvp
);
1661 nfsstats
.rpccnt
[NFSPROC_REMOVE
]++;
1662 nfsm_reqhead(dvp
, NFSPROC_REMOVE
,
1663 NFSX_FH(v3
) + NFSX_UNSIGNED
+ nfsm_rndup(namelen
));
1664 nfsm_fhtom(dvp
, v3
);
1665 nfsm_strtom(name
, namelen
, NFS_MAXNAMLEN
);
1666 nfsm_request(dvp
, NFSPROC_REMOVE
, td
, cred
);
1668 nfsm_wcc_data(dvp
, wccflag
);
1671 VTONFS(dvp
)->n_flag
|= NLMODIFIED
;
1673 VTONFS(dvp
)->n_attrstamp
= 0;
1678 * nfs file rename call
1680 * nfs_rename(struct vnode *a_fdvp, struct vnode *a_fvp,
1681 * struct componentname *a_fcnp, struct vnode *a_tdvp,
1682 * struct vnode *a_tvp, struct componentname *a_tcnp)
1685 nfs_rename(struct vop_old_rename_args
*ap
)
1687 struct vnode
*fvp
= ap
->a_fvp
;
1688 struct vnode
*tvp
= ap
->a_tvp
;
1689 struct vnode
*fdvp
= ap
->a_fdvp
;
1690 struct vnode
*tdvp
= ap
->a_tdvp
;
1691 struct componentname
*tcnp
= ap
->a_tcnp
;
1692 struct componentname
*fcnp
= ap
->a_fcnp
;
1695 /* Check for cross-device rename */
1696 if ((fvp
->v_mount
!= tdvp
->v_mount
) ||
1697 (tvp
&& (fvp
->v_mount
!= tvp
->v_mount
))) {
1703 * We have to flush B_DELWRI data prior to renaming
1704 * the file. If we don't, the delayed-write buffers
1705 * can be flushed out later after the file has gone stale
1706 * under NFSV3. NFSV2 does not have this problem because
1707 * ( as far as I can tell ) it flushes dirty buffers more
1711 VOP_FSYNC(fvp
, MNT_WAIT
);
1713 VOP_FSYNC(tvp
, MNT_WAIT
);
1716 * If the tvp exists and is in use, sillyrename it before doing the
1717 * rename of the new file over it.
1719 * XXX Can't sillyrename a directory.
1721 * We do not attempt to do any namecache purges in this old API
1722 * routine. The new API compat functions have access to the actual
1723 * namecache structures and will do it for us.
1725 if (tvp
&& tvp
->v_sysref
.refcnt
> 1 && !VTONFS(tvp
)->n_sillyrename
&&
1726 tvp
->v_type
!= VDIR
&& !nfs_sillyrename(tdvp
, tvp
, tcnp
)) {
1733 error
= nfs_renamerpc(fdvp
, fcnp
->cn_nameptr
, fcnp
->cn_namelen
,
1734 tdvp
, tcnp
->cn_nameptr
, tcnp
->cn_namelen
, tcnp
->cn_cred
,
1747 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1749 if (error
== ENOENT
)
1755 * nfs file rename rpc called from nfs_remove() above
1758 nfs_renameit(struct vnode
*sdvp
, struct componentname
*scnp
,
1759 struct sillyrename
*sp
)
1761 return (nfs_renamerpc(sdvp
, scnp
->cn_nameptr
, scnp
->cn_namelen
,
1762 sdvp
, sp
->s_name
, sp
->s_namlen
, scnp
->cn_cred
, scnp
->cn_td
));
1766 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1769 nfs_renamerpc(struct vnode
*fdvp
, const char *fnameptr
, int fnamelen
,
1770 struct vnode
*tdvp
, const char *tnameptr
, int tnamelen
,
1771 struct ucred
*cred
, struct thread
*td
)
1776 caddr_t bpos
, dpos
, cp2
;
1777 int error
= 0, fwccflag
= NFSV3_WCCRATTR
, twccflag
= NFSV3_WCCRATTR
;
1778 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
1779 int v3
= NFS_ISV3(fdvp
);
1781 nfsstats
.rpccnt
[NFSPROC_RENAME
]++;
1782 nfsm_reqhead(fdvp
, NFSPROC_RENAME
,
1783 (NFSX_FH(v3
) + NFSX_UNSIGNED
)*2 + nfsm_rndup(fnamelen
) +
1784 nfsm_rndup(tnamelen
));
1785 nfsm_fhtom(fdvp
, v3
);
1786 nfsm_strtom(fnameptr
, fnamelen
, NFS_MAXNAMLEN
);
1787 nfsm_fhtom(tdvp
, v3
);
1788 nfsm_strtom(tnameptr
, tnamelen
, NFS_MAXNAMLEN
);
1789 nfsm_request(fdvp
, NFSPROC_RENAME
, td
, cred
);
1791 nfsm_wcc_data(fdvp
, fwccflag
);
1792 nfsm_wcc_data(tdvp
, twccflag
);
1796 VTONFS(fdvp
)->n_flag
|= NLMODIFIED
;
1797 VTONFS(tdvp
)->n_flag
|= NLMODIFIED
;
1799 VTONFS(fdvp
)->n_attrstamp
= 0;
1801 VTONFS(tdvp
)->n_attrstamp
= 0;
1806 * nfs hard link create call
1808 * nfs_link(struct vnode *a_tdvp, struct vnode *a_vp,
1809 * struct componentname *a_cnp)
1812 nfs_link(struct vop_old_link_args
*ap
)
1814 struct vnode
*vp
= ap
->a_vp
;
1815 struct vnode
*tdvp
= ap
->a_tdvp
;
1816 struct componentname
*cnp
= ap
->a_cnp
;
1820 caddr_t bpos
, dpos
, cp2
;
1821 int error
= 0, wccflag
= NFSV3_WCCRATTR
, attrflag
= 0;
1822 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
1825 if (vp
->v_mount
!= tdvp
->v_mount
) {
1830 * Push all writes to the server, so that the attribute cache
1831 * doesn't get "out of sync" with the server.
1832 * XXX There should be a better way!
1834 VOP_FSYNC(vp
, MNT_WAIT
);
1837 nfsstats
.rpccnt
[NFSPROC_LINK
]++;
1838 nfsm_reqhead(vp
, NFSPROC_LINK
,
1839 NFSX_FH(v3
)*2 + NFSX_UNSIGNED
+ nfsm_rndup(cnp
->cn_namelen
));
1841 nfsm_fhtom(tdvp
, v3
);
1842 nfsm_strtom(cnp
->cn_nameptr
, cnp
->cn_namelen
, NFS_MAXNAMLEN
);
1843 nfsm_request(vp
, NFSPROC_LINK
, cnp
->cn_td
, cnp
->cn_cred
);
1845 nfsm_postop_attr(vp
, attrflag
, NFS_LATTR_NOSHRINK
);
1846 nfsm_wcc_data(tdvp
, wccflag
);
1850 VTONFS(tdvp
)->n_flag
|= NLMODIFIED
;
1852 VTONFS(vp
)->n_attrstamp
= 0;
1854 VTONFS(tdvp
)->n_attrstamp
= 0;
1856 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1858 if (error
== EEXIST
)
1864 * nfs symbolic link create call
1866 * nfs_symlink(struct vnode *a_dvp, struct vnode **a_vpp,
1867 * struct componentname *a_cnp, struct vattr *a_vap,
1871 nfs_symlink(struct vop_old_symlink_args
*ap
)
1873 struct vnode
*dvp
= ap
->a_dvp
;
1874 struct vattr
*vap
= ap
->a_vap
;
1875 struct componentname
*cnp
= ap
->a_cnp
;
1876 struct nfsv2_sattr
*sp
;
1880 caddr_t bpos
, dpos
, cp2
;
1881 int slen
, error
= 0, wccflag
= NFSV3_WCCRATTR
, gotvp
;
1882 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
1883 struct vnode
*newvp
= (struct vnode
*)0;
1884 int v3
= NFS_ISV3(dvp
);
1886 nfsstats
.rpccnt
[NFSPROC_SYMLINK
]++;
1887 slen
= strlen(ap
->a_target
);
1888 nfsm_reqhead(dvp
, NFSPROC_SYMLINK
, NFSX_FH(v3
) + 2*NFSX_UNSIGNED
+
1889 nfsm_rndup(cnp
->cn_namelen
) + nfsm_rndup(slen
) + NFSX_SATTR(v3
));
1890 nfsm_fhtom(dvp
, v3
);
1891 nfsm_strtom(cnp
->cn_nameptr
, cnp
->cn_namelen
, NFS_MAXNAMLEN
);
1893 nfsm_v3attrbuild(vap
, FALSE
);
1895 nfsm_strtom(ap
->a_target
, slen
, NFS_MAXPATHLEN
);
1897 nfsm_build(sp
, struct nfsv2_sattr
*, NFSX_V2SATTR
);
1898 sp
->sa_mode
= vtonfsv2_mode(VLNK
, vap
->va_mode
);
1899 sp
->sa_uid
= nfs_xdrneg1
;
1900 sp
->sa_gid
= nfs_xdrneg1
;
1901 sp
->sa_size
= nfs_xdrneg1
;
1902 txdr_nfsv2time(&vap
->va_atime
, &sp
->sa_atime
);
1903 txdr_nfsv2time(&vap
->va_mtime
, &sp
->sa_mtime
);
1907 * Issue the NFS request and get the rpc response.
1909 * Only NFSv3 responses returning an error of 0 actually return
1910 * a file handle that can be converted into newvp without having
1911 * to do an extra lookup rpc.
1913 nfsm_request(dvp
, NFSPROC_SYMLINK
, cnp
->cn_td
, cnp
->cn_cred
);
1916 nfsm_mtofh(dvp
, newvp
, v3
, gotvp
);
1917 nfsm_wcc_data(dvp
, wccflag
);
1921 * out code jumps -> here, mrep is also freed.
1928 * If we get an EEXIST error, silently convert it to no-error
1929 * in case of an NFS retry.
1931 if (error
== EEXIST
)
1935 * If we do not have (or no longer have) an error, and we could
1936 * not extract the newvp from the response due to the request being
1937 * NFSv2 or the error being EEXIST. We have to do a lookup in order
1938 * to obtain a newvp to return.
1940 if (error
== 0 && newvp
== NULL
) {
1941 struct nfsnode
*np
= NULL
;
1943 error
= nfs_lookitup(dvp
, cnp
->cn_nameptr
, cnp
->cn_namelen
,
1944 cnp
->cn_cred
, cnp
->cn_td
, &np
);
1954 VTONFS(dvp
)->n_flag
|= NLMODIFIED
;
1956 VTONFS(dvp
)->n_attrstamp
= 0;
1963 * nfs_mkdir(struct vnode *a_dvp, struct vnode **a_vpp,
1964 * struct componentname *a_cnp, struct vattr *a_vap)
1967 nfs_mkdir(struct vop_old_mkdir_args
*ap
)
1969 struct vnode
*dvp
= ap
->a_dvp
;
1970 struct vattr
*vap
= ap
->a_vap
;
1971 struct componentname
*cnp
= ap
->a_cnp
;
1972 struct nfsv2_sattr
*sp
;
1977 struct nfsnode
*np
= (struct nfsnode
*)0;
1978 struct vnode
*newvp
= (struct vnode
*)0;
1979 caddr_t bpos
, dpos
, cp2
;
1980 int error
= 0, wccflag
= NFSV3_WCCRATTR
;
1982 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
1984 int v3
= NFS_ISV3(dvp
);
1986 if ((error
= VOP_GETATTR(dvp
, &vattr
)) != 0) {
1989 len
= cnp
->cn_namelen
;
1990 nfsstats
.rpccnt
[NFSPROC_MKDIR
]++;
1991 nfsm_reqhead(dvp
, NFSPROC_MKDIR
,
1992 NFSX_FH(v3
) + NFSX_UNSIGNED
+ nfsm_rndup(len
) + NFSX_SATTR(v3
));
1993 nfsm_fhtom(dvp
, v3
);
1994 nfsm_strtom(cnp
->cn_nameptr
, len
, NFS_MAXNAMLEN
);
1996 nfsm_v3attrbuild(vap
, FALSE
);
1998 nfsm_build(sp
, struct nfsv2_sattr
*, NFSX_V2SATTR
);
1999 sp
->sa_mode
= vtonfsv2_mode(VDIR
, vap
->va_mode
);
2000 sp
->sa_uid
= nfs_xdrneg1
;
2001 sp
->sa_gid
= nfs_xdrneg1
;
2002 sp
->sa_size
= nfs_xdrneg1
;
2003 txdr_nfsv2time(&vap
->va_atime
, &sp
->sa_atime
);
2004 txdr_nfsv2time(&vap
->va_mtime
, &sp
->sa_mtime
);
2006 nfsm_request(dvp
, NFSPROC_MKDIR
, cnp
->cn_td
, cnp
->cn_cred
);
2008 nfsm_mtofh(dvp
, newvp
, v3
, gotvp
);
2010 nfsm_wcc_data(dvp
, wccflag
);
2013 VTONFS(dvp
)->n_flag
|= NLMODIFIED
;
2015 VTONFS(dvp
)->n_attrstamp
= 0;
2017 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2018 * if we can succeed in looking up the directory.
2020 if (error
== EEXIST
|| (!error
&& !gotvp
)) {
2023 newvp
= (struct vnode
*)0;
2025 error
= nfs_lookitup(dvp
, cnp
->cn_nameptr
, len
, cnp
->cn_cred
,
2029 if (newvp
->v_type
!= VDIR
)
2042 * nfs remove directory call
2044 * nfs_rmdir(struct vnode *a_dvp, struct vnode *a_vp,
2045 * struct componentname *a_cnp)
2048 nfs_rmdir(struct vop_old_rmdir_args
*ap
)
2050 struct vnode
*vp
= ap
->a_vp
;
2051 struct vnode
*dvp
= ap
->a_dvp
;
2052 struct componentname
*cnp
= ap
->a_cnp
;
2056 caddr_t bpos
, dpos
, cp2
;
2057 int error
= 0, wccflag
= NFSV3_WCCRATTR
;
2058 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
2059 int v3
= NFS_ISV3(dvp
);
2063 nfsstats
.rpccnt
[NFSPROC_RMDIR
]++;
2064 nfsm_reqhead(dvp
, NFSPROC_RMDIR
,
2065 NFSX_FH(v3
) + NFSX_UNSIGNED
+ nfsm_rndup(cnp
->cn_namelen
));
2066 nfsm_fhtom(dvp
, v3
);
2067 nfsm_strtom(cnp
->cn_nameptr
, cnp
->cn_namelen
, NFS_MAXNAMLEN
);
2068 nfsm_request(dvp
, NFSPROC_RMDIR
, cnp
->cn_td
, cnp
->cn_cred
);
2070 nfsm_wcc_data(dvp
, wccflag
);
2073 VTONFS(dvp
)->n_flag
|= NLMODIFIED
;
2075 VTONFS(dvp
)->n_attrstamp
= 0;
2077 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2079 if (error
== ENOENT
)
2087 * nfs_readdir(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred)
2090 nfs_readdir(struct vop_readdir_args
*ap
)
2092 struct vnode
*vp
= ap
->a_vp
;
2093 struct nfsnode
*np
= VTONFS(vp
);
2094 struct uio
*uio
= ap
->a_uio
;
2098 if (vp
->v_type
!= VDIR
)
2101 if ((error
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
)) != 0)
2105 * If we have a valid EOF offset cache we must call VOP_GETATTR()
2106 * and then check that is still valid, or if this is an NQNFS mount
2107 * we call NQNFS_CKCACHEABLE() instead of VOP_GETATTR(). Note that
2108 * VOP_GETATTR() does not necessarily go to the wire.
2110 if (np
->n_direofoffset
> 0 && uio
->uio_offset
>= np
->n_direofoffset
&&
2111 (np
->n_flag
& (NLMODIFIED
|NRMODIFIED
)) == 0) {
2112 if (VOP_GETATTR(vp
, &vattr
) == 0 &&
2113 (np
->n_flag
& (NLMODIFIED
|NRMODIFIED
)) == 0
2115 nfsstats
.direofcache_hits
++;
2121 * Call nfs_bioread() to do the real work. nfs_bioread() does its
2122 * own cache coherency checks so we do not have to.
2124 tresid
= uio
->uio_resid
;
2125 error
= nfs_bioread(vp
, uio
, 0);
2127 if (!error
&& uio
->uio_resid
== tresid
)
2128 nfsstats
.direofcache_misses
++;
2135 * Readdir rpc call. nfs_bioread->nfs_doio->nfs_readdirrpc.
2137 * Note that for directories, nfs_bioread maintains the underlying nfs-centric
2138 * offset/block and converts the nfs formatted directory entries for userland
2139 * consumption as well as deals with offsets into the middle of blocks.
2140 * nfs_doio only deals with logical blocks. In particular, uio_offset will
2141 * be block-bounded. It must convert to cookies for the actual RPC.
2144 nfs_readdirrpc(struct vnode
*vp
, struct uio
*uiop
)
2147 struct nfs_dirent
*dp
= NULL
;
2152 caddr_t bpos
, dpos
, cp2
;
2153 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
2155 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
2156 struct nfsnode
*dnp
= VTONFS(vp
);
2158 int error
= 0, tlen
, more_dirs
= 1, blksiz
= 0, bigenough
= 1;
2160 int v3
= NFS_ISV3(vp
);
2163 if (uiop
->uio_iovcnt
!= 1 || (uiop
->uio_offset
& (DIRBLKSIZ
- 1)) ||
2164 (uiop
->uio_resid
& (DIRBLKSIZ
- 1)))
2165 panic("nfs readdirrpc bad uio");
2169 * If there is no cookie, assume directory was stale.
2171 cookiep
= nfs_getcookie(dnp
, uiop
->uio_offset
, 0);
2175 return (NFSERR_BAD_COOKIE
);
2177 * Loop around doing readdir rpc's of size nm_readdirsize
2178 * truncated to a multiple of DIRBLKSIZ.
2179 * The stopping criteria is EOF or buffer full.
2181 while (more_dirs
&& bigenough
) {
2182 nfsstats
.rpccnt
[NFSPROC_READDIR
]++;
2183 nfsm_reqhead(vp
, NFSPROC_READDIR
, NFSX_FH(v3
) +
2187 nfsm_build(tl
, u_int32_t
*, 5 * NFSX_UNSIGNED
);
2188 *tl
++ = cookie
.nfsuquad
[0];
2189 *tl
++ = cookie
.nfsuquad
[1];
2190 *tl
++ = dnp
->n_cookieverf
.nfsuquad
[0];
2191 *tl
++ = dnp
->n_cookieverf
.nfsuquad
[1];
2193 nfsm_build(tl
, u_int32_t
*, 2 * NFSX_UNSIGNED
);
2194 *tl
++ = cookie
.nfsuquad
[0];
2196 *tl
= txdr_unsigned(nmp
->nm_readdirsize
);
2197 nfsm_request(vp
, NFSPROC_READDIR
, uiop
->uio_td
, nfs_vpcred(vp
, ND_READ
));
2199 nfsm_postop_attr(vp
, attrflag
, NFS_LATTR_NOSHRINK
);
2201 nfsm_dissect(tl
, u_int32_t
*,
2203 dnp
->n_cookieverf
.nfsuquad
[0] = *tl
++;
2204 dnp
->n_cookieverf
.nfsuquad
[1] = *tl
;
2210 nfsm_dissect(tl
, u_int32_t
*, NFSX_UNSIGNED
);
2211 more_dirs
= fxdr_unsigned(int, *tl
);
2213 /* loop thru the dir entries, converting them to std form */
2214 while (more_dirs
&& bigenough
) {
2216 nfsm_dissect(tl
, u_int32_t
*,
2218 fileno
= fxdr_hyper(tl
);
2219 len
= fxdr_unsigned(int, *(tl
+ 2));
2221 nfsm_dissect(tl
, u_int32_t
*,
2223 fileno
= fxdr_unsigned(u_quad_t
, *tl
++);
2224 len
= fxdr_unsigned(int, *tl
);
2226 if (len
<= 0 || len
> NFS_MAXNAMLEN
) {
2233 * len is the number of bytes in the path element
2234 * name, not including the \0 termination.
2236 * tlen is the number of bytes w have to reserve for
2237 * the path element name.
2239 tlen
= nfsm_rndup(len
);
2241 tlen
+= 4; /* To ensure null termination */
2244 * If the entry would cross a DIRBLKSIZ boundary,
2245 * extend the previous nfs_dirent to cover the
2248 left
= DIRBLKSIZ
- blksiz
;
2249 if ((tlen
+ sizeof(struct nfs_dirent
)) > left
) {
2250 dp
->nfs_reclen
+= left
;
2251 uiop
->uio_iov
->iov_base
+= left
;
2252 uiop
->uio_iov
->iov_len
-= left
;
2253 uiop
->uio_offset
+= left
;
2254 uiop
->uio_resid
-= left
;
2257 if ((tlen
+ sizeof(struct nfs_dirent
)) > uiop
->uio_resid
)
2260 dp
= (struct nfs_dirent
*)uiop
->uio_iov
->iov_base
;
2261 dp
->nfs_ino
= fileno
;
2262 dp
->nfs_namlen
= len
;
2263 dp
->nfs_reclen
= tlen
+ sizeof(struct nfs_dirent
);
2264 dp
->nfs_type
= DT_UNKNOWN
;
2265 blksiz
+= dp
->nfs_reclen
;
2266 if (blksiz
== DIRBLKSIZ
)
2268 uiop
->uio_offset
+= sizeof(struct nfs_dirent
);
2269 uiop
->uio_resid
-= sizeof(struct nfs_dirent
);
2270 uiop
->uio_iov
->iov_base
+= sizeof(struct nfs_dirent
);
2271 uiop
->uio_iov
->iov_len
-= sizeof(struct nfs_dirent
);
2272 nfsm_mtouio(uiop
, len
);
2275 * The uiop has advanced by nfs_dirent + len
2276 * but really needs to advance by
2279 cp
= uiop
->uio_iov
->iov_base
;
2281 *cp
= '\0'; /* null terminate */
2282 uiop
->uio_iov
->iov_base
+= tlen
;
2283 uiop
->uio_iov
->iov_len
-= tlen
;
2284 uiop
->uio_offset
+= tlen
;
2285 uiop
->uio_resid
-= tlen
;
2288 * NFS strings must be rounded up (nfsm_myouio
2289 * handled that in the bigenough case).
2291 nfsm_adv(nfsm_rndup(len
));
2294 nfsm_dissect(tl
, u_int32_t
*,
2297 nfsm_dissect(tl
, u_int32_t
*,
2302 * If we were able to accomodate the last entry,
2303 * get the cookie for the next one. Otherwise
2304 * hold-over the cookie for the one we were not
2305 * able to accomodate.
2308 cookie
.nfsuquad
[0] = *tl
++;
2310 cookie
.nfsuquad
[1] = *tl
++;
2316 more_dirs
= fxdr_unsigned(int, *tl
);
2319 * If at end of rpc data, get the eof boolean
2322 nfsm_dissect(tl
, u_int32_t
*, NFSX_UNSIGNED
);
2323 more_dirs
= (fxdr_unsigned(int, *tl
) == 0);
2328 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2329 * by increasing d_reclen for the last record.
2332 left
= DIRBLKSIZ
- blksiz
;
2333 dp
->nfs_reclen
+= left
;
2334 uiop
->uio_iov
->iov_base
+= left
;
2335 uiop
->uio_iov
->iov_len
-= left
;
2336 uiop
->uio_offset
+= left
;
2337 uiop
->uio_resid
-= left
;
2342 * We hit the end of the directory, update direofoffset.
2344 dnp
->n_direofoffset
= uiop
->uio_offset
;
2347 * There is more to go, insert the link cookie so the
2348 * next block can be read.
2350 if (uiop
->uio_resid
> 0)
2351 kprintf("EEK! readdirrpc resid > 0\n");
2352 cookiep
= nfs_getcookie(dnp
, uiop
->uio_offset
, 1);
2360 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2363 nfs_readdirplusrpc(struct vnode
*vp
, struct uio
*uiop
)
2366 struct nfs_dirent
*dp
;
2370 struct vnode
*newvp
;
2372 caddr_t bpos
, dpos
, cp2
, dpossav1
, dpossav2
;
2373 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
, *mdsav1
, *mdsav2
;
2375 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
2376 struct nfsnode
*dnp
= VTONFS(vp
), *np
;
2379 int error
= 0, tlen
, more_dirs
= 1, blksiz
= 0, doit
, bigenough
= 1, i
;
2380 int attrflag
, fhsize
;
2381 struct nchandle nch
;
2382 struct nchandle dnch
;
2383 struct nlcomponent nlc
;
2389 if (uiop
->uio_iovcnt
!= 1 || (uiop
->uio_offset
& (DIRBLKSIZ
- 1)) ||
2390 (uiop
->uio_resid
& (DIRBLKSIZ
- 1)))
2391 panic("nfs readdirplusrpc bad uio");
2394 * Obtain the namecache record for the directory so we have something
2395 * to use as a basis for creating the entries. This function will
2396 * return a held (but not locked) ncp. The ncp may be disconnected
2397 * from the tree and cannot be used for upward traversals, and the
2398 * ncp may be unnamed. Note that other unrelated operations may
2399 * cause the ncp to be named at any time.
2401 cache_fromdvp(vp
, NULL
, 0, &dnch
);
2402 bzero(&nlc
, sizeof(nlc
));
2406 * If there is no cookie, assume directory was stale.
2408 cookiep
= nfs_getcookie(dnp
, uiop
->uio_offset
, 0);
2412 return (NFSERR_BAD_COOKIE
);
2414 * Loop around doing readdir rpc's of size nm_readdirsize
2415 * truncated to a multiple of DIRBLKSIZ.
2416 * The stopping criteria is EOF or buffer full.
2418 while (more_dirs
&& bigenough
) {
2419 nfsstats
.rpccnt
[NFSPROC_READDIRPLUS
]++;
2420 nfsm_reqhead(vp
, NFSPROC_READDIRPLUS
,
2421 NFSX_FH(1) + 6 * NFSX_UNSIGNED
);
2423 nfsm_build(tl
, u_int32_t
*, 6 * NFSX_UNSIGNED
);
2424 *tl
++ = cookie
.nfsuquad
[0];
2425 *tl
++ = cookie
.nfsuquad
[1];
2426 *tl
++ = dnp
->n_cookieverf
.nfsuquad
[0];
2427 *tl
++ = dnp
->n_cookieverf
.nfsuquad
[1];
2428 *tl
++ = txdr_unsigned(nmp
->nm_readdirsize
);
2429 *tl
= txdr_unsigned(nmp
->nm_rsize
);
2430 nfsm_request(vp
, NFSPROC_READDIRPLUS
, uiop
->uio_td
, nfs_vpcred(vp
, ND_READ
));
2431 nfsm_postop_attr(vp
, attrflag
, NFS_LATTR_NOSHRINK
);
2436 nfsm_dissect(tl
, u_int32_t
*, 3 * NFSX_UNSIGNED
);
2437 dnp
->n_cookieverf
.nfsuquad
[0] = *tl
++;
2438 dnp
->n_cookieverf
.nfsuquad
[1] = *tl
++;
2439 more_dirs
= fxdr_unsigned(int, *tl
);
2441 /* loop thru the dir entries, doctoring them to 4bsd form */
2442 while (more_dirs
&& bigenough
) {
2443 nfsm_dissect(tl
, u_int32_t
*, 3 * NFSX_UNSIGNED
);
2444 fileno
= fxdr_hyper(tl
);
2445 len
= fxdr_unsigned(int, *(tl
+ 2));
2446 if (len
<= 0 || len
> NFS_MAXNAMLEN
) {
2451 tlen
= nfsm_rndup(len
);
2453 tlen
+= 4; /* To ensure null termination*/
2454 left
= DIRBLKSIZ
- blksiz
;
2455 if ((tlen
+ sizeof(struct nfs_dirent
)) > left
) {
2456 dp
->nfs_reclen
+= left
;
2457 uiop
->uio_iov
->iov_base
+= left
;
2458 uiop
->uio_iov
->iov_len
-= left
;
2459 uiop
->uio_offset
+= left
;
2460 uiop
->uio_resid
-= left
;
2463 if ((tlen
+ sizeof(struct nfs_dirent
)) > uiop
->uio_resid
)
2466 dp
= (struct nfs_dirent
*)uiop
->uio_iov
->iov_base
;
2467 dp
->nfs_ino
= fileno
;
2468 dp
->nfs_namlen
= len
;
2469 dp
->nfs_reclen
= tlen
+ sizeof(struct nfs_dirent
);
2470 dp
->nfs_type
= DT_UNKNOWN
;
2471 blksiz
+= dp
->nfs_reclen
;
2472 if (blksiz
== DIRBLKSIZ
)
2474 uiop
->uio_offset
+= sizeof(struct nfs_dirent
);
2475 uiop
->uio_resid
-= sizeof(struct nfs_dirent
);
2476 uiop
->uio_iov
->iov_base
+= sizeof(struct nfs_dirent
);
2477 uiop
->uio_iov
->iov_len
-= sizeof(struct nfs_dirent
);
2478 nlc
.nlc_nameptr
= uiop
->uio_iov
->iov_base
;
2479 nlc
.nlc_namelen
= len
;
2480 nfsm_mtouio(uiop
, len
);
2481 cp
= uiop
->uio_iov
->iov_base
;
2484 uiop
->uio_iov
->iov_base
+= tlen
;
2485 uiop
->uio_iov
->iov_len
-= tlen
;
2486 uiop
->uio_offset
+= tlen
;
2487 uiop
->uio_resid
-= tlen
;
2489 nfsm_adv(nfsm_rndup(len
));
2490 nfsm_dissect(tl
, u_int32_t
*, 3 * NFSX_UNSIGNED
);
2492 cookie
.nfsuquad
[0] = *tl
++;
2493 cookie
.nfsuquad
[1] = *tl
++;
2498 * Since the attributes are before the file handle
2499 * (sigh), we must skip over the attributes and then
2500 * come back and get them.
2502 attrflag
= fxdr_unsigned(int, *tl
);
2506 nfsm_adv(NFSX_V3FATTR
);
2507 nfsm_dissect(tl
, u_int32_t
*, NFSX_UNSIGNED
);
2508 doit
= fxdr_unsigned(int, *tl
);
2510 nfsm_getfh(fhp
, fhsize
, 1);
2511 if (NFS_CMPFH(dnp
, fhp
, fhsize
)) {
2516 error
= nfs_nget(vp
->v_mount
, fhp
,
2524 if (doit
&& bigenough
) {
2529 nfsm_loadattr(newvp
, (struct vattr
*)0);
2533 IFTODT(VTTOIF(np
->n_vattr
.va_type
));
2535 kprintf("NFS/READDIRPLUS, ENTER %*.*s\n",
2536 nlc
.nlc_namelen
, nlc
.nlc_namelen
,
2538 nch
= cache_nlookup(&dnch
, &nlc
);
2539 cache_setunresolved(&nch
);
2540 nfs_cache_setvp(&nch
, newvp
,
2541 nfspos_cache_timeout
);
2544 kprintf("NFS/READDIRPLUS, UNABLE TO ENTER"
2546 nlc
.nlc_namelen
, nlc
.nlc_namelen
,
2551 /* Just skip over the file handle */
2552 nfsm_dissect(tl
, u_int32_t
*, NFSX_UNSIGNED
);
2553 i
= fxdr_unsigned(int, *tl
);
2554 nfsm_adv(nfsm_rndup(i
));
2556 if (newvp
!= NULLVP
) {
2563 nfsm_dissect(tl
, u_int32_t
*, NFSX_UNSIGNED
);
2564 more_dirs
= fxdr_unsigned(int, *tl
);
2567 * If at end of rpc data, get the eof boolean
2570 nfsm_dissect(tl
, u_int32_t
*, NFSX_UNSIGNED
);
2571 more_dirs
= (fxdr_unsigned(int, *tl
) == 0);
2576 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2577 * by increasing d_reclen for the last record.
2580 left
= DIRBLKSIZ
- blksiz
;
2581 dp
->nfs_reclen
+= left
;
2582 uiop
->uio_iov
->iov_base
+= left
;
2583 uiop
->uio_iov
->iov_len
-= left
;
2584 uiop
->uio_offset
+= left
;
2585 uiop
->uio_resid
-= left
;
2589 * We are now either at the end of the directory or have filled the
2593 dnp
->n_direofoffset
= uiop
->uio_offset
;
2595 if (uiop
->uio_resid
> 0)
2596 kprintf("EEK! readdirplusrpc resid > 0\n");
2597 cookiep
= nfs_getcookie(dnp
, uiop
->uio_offset
, 1);
2601 if (newvp
!= NULLVP
) {
2614 * Silly rename. To make the NFS filesystem that is stateless look a little
2615 * more like the "ufs" a remove of an active vnode is translated to a rename
2616 * to a funny looking filename that is removed by nfs_inactive on the
2617 * nfsnode. There is the potential for another process on a different client
2618 * to create the same funny name between the nfs_lookitup() fails and the
2619 * nfs_rename() completes, but...
2622 nfs_sillyrename(struct vnode
*dvp
, struct vnode
*vp
, struct componentname
*cnp
)
2624 struct sillyrename
*sp
;
2629 * We previously purged dvp instead of vp. I don't know why, it
2630 * completely destroys performance. We can't do it anyway with the
2631 * new VFS API since we would be breaking the namecache topology.
2633 cache_purge(vp
); /* XXX */
2636 if (vp
->v_type
== VDIR
)
2637 panic("nfs: sillyrename dir");
2639 MALLOC(sp
, struct sillyrename
*, sizeof (struct sillyrename
),
2640 M_NFSREQ
, M_WAITOK
);
2641 sp
->s_cred
= crdup(cnp
->cn_cred
);
2645 /* Fudge together a funny name */
2646 sp
->s_namlen
= ksprintf(sp
->s_name
, ".nfsA%08x4.4", (int)cnp
->cn_td
);
2648 /* Try lookitups until we get one that isn't there */
2649 while (nfs_lookitup(dvp
, sp
->s_name
, sp
->s_namlen
, sp
->s_cred
,
2650 cnp
->cn_td
, (struct nfsnode
**)0) == 0) {
2652 if (sp
->s_name
[4] > 'z') {
2657 error
= nfs_renameit(dvp
, cnp
, sp
);
2660 error
= nfs_lookitup(dvp
, sp
->s_name
, sp
->s_namlen
, sp
->s_cred
,
2662 np
->n_sillyrename
= sp
;
2667 kfree((caddr_t
)sp
, M_NFSREQ
);
2672 * Look up a file name and optionally either update the file handle or
2673 * allocate an nfsnode, depending on the value of npp.
2674 * npp == NULL --> just do the lookup
2675 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2677 * *npp != NULL --> update the file handle in the vnode
2680 nfs_lookitup(struct vnode
*dvp
, const char *name
, int len
, struct ucred
*cred
,
2681 struct thread
*td
, struct nfsnode
**npp
)
2686 struct vnode
*newvp
= (struct vnode
*)0;
2687 struct nfsnode
*np
, *dnp
= VTONFS(dvp
);
2688 caddr_t bpos
, dpos
, cp2
;
2689 int error
= 0, fhlen
, attrflag
;
2690 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
2692 int v3
= NFS_ISV3(dvp
);
2694 nfsstats
.rpccnt
[NFSPROC_LOOKUP
]++;
2695 nfsm_reqhead(dvp
, NFSPROC_LOOKUP
,
2696 NFSX_FH(v3
) + NFSX_UNSIGNED
+ nfsm_rndup(len
));
2697 nfsm_fhtom(dvp
, v3
);
2698 nfsm_strtom(name
, len
, NFS_MAXNAMLEN
);
2699 nfsm_request(dvp
, NFSPROC_LOOKUP
, td
, cred
);
2700 if (npp
&& !error
) {
2701 nfsm_getfh(nfhp
, fhlen
, v3
);
2704 if (np
->n_fhsize
> NFS_SMALLFH
&& fhlen
<= NFS_SMALLFH
) {
2705 kfree((caddr_t
)np
->n_fhp
, M_NFSBIGFH
);
2706 np
->n_fhp
= &np
->n_fh
;
2707 } else if (np
->n_fhsize
<= NFS_SMALLFH
&& fhlen
>NFS_SMALLFH
)
2708 np
->n_fhp
=(nfsfh_t
*)kmalloc(fhlen
,M_NFSBIGFH
,M_WAITOK
);
2709 bcopy((caddr_t
)nfhp
, (caddr_t
)np
->n_fhp
, fhlen
);
2710 np
->n_fhsize
= fhlen
;
2712 } else if (NFS_CMPFH(dnp
, nfhp
, fhlen
)) {
2716 error
= nfs_nget(dvp
->v_mount
, nfhp
, fhlen
, &np
);
2724 nfsm_postop_attr(newvp
, attrflag
, NFS_LATTR_NOSHRINK
);
2725 if (!attrflag
&& *npp
== NULL
) {
2734 nfsm_loadattr(newvp
, (struct vattr
*)0);
2738 if (npp
&& *npp
== NULL
) {
2753 * Nfs Version 3 commit rpc
2756 nfs_commit(struct vnode
*vp
, u_quad_t offset
, int cnt
, struct thread
*td
)
2761 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
2762 caddr_t bpos
, dpos
, cp2
;
2763 int error
= 0, wccflag
= NFSV3_WCCRATTR
;
2764 struct mbuf
*mreq
, *mrep
, *md
, *mb
, *mb2
;
2766 if ((nmp
->nm_state
& NFSSTA_HASWRITEVERF
) == 0)
2768 nfsstats
.rpccnt
[NFSPROC_COMMIT
]++;
2769 nfsm_reqhead(vp
, NFSPROC_COMMIT
, NFSX_FH(1));
2771 nfsm_build(tl
, u_int32_t
*, 3 * NFSX_UNSIGNED
);
2772 txdr_hyper(offset
, tl
);
2774 *tl
= txdr_unsigned(cnt
);
2775 nfsm_request(vp
, NFSPROC_COMMIT
, td
, nfs_vpcred(vp
, ND_WRITE
));
2776 nfsm_wcc_data(vp
, wccflag
);
2778 nfsm_dissect(tl
, u_int32_t
*, NFSX_V3WRITEVERF
);
2779 if (bcmp((caddr_t
)nmp
->nm_verf
, (caddr_t
)tl
,
2780 NFSX_V3WRITEVERF
)) {
2781 bcopy((caddr_t
)tl
, (caddr_t
)nmp
->nm_verf
,
2783 error
= NFSERR_STALEWRITEVERF
;
2793 * - make nfs_bmap() essentially a no-op that does no translation
2794 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2795 * (Maybe I could use the process's page mapping, but I was concerned that
2796 * Kernel Write might not be enabled and also figured copyout() would do
2797 * a lot more work than bcopy() and also it currently happens in the
2798 * context of the swapper process (2).
2800 * nfs_bmap(struct vnode *a_vp, off_t a_loffset,
2801 * off_t *a_doffsetp, int *a_runp, int *a_runb)
2804 nfs_bmap(struct vop_bmap_args
*ap
)
2806 if (ap
->a_doffsetp
!= NULL
)
2807 *ap
->a_doffsetp
= ap
->a_loffset
;
2808 if (ap
->a_runp
!= NULL
)
2810 if (ap
->a_runb
!= NULL
)
2818 * For async requests when nfsiod(s) are running, queue the request by
2819 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2823 nfs_strategy(struct vop_strategy_args
*ap
)
2825 struct bio
*bio
= ap
->a_bio
;
2827 struct buf
*bp
= bio
->bio_buf
;
2831 KASSERT(bp
->b_cmd
!= BUF_CMD_DONE
,
2832 ("nfs_strategy: buffer %p unexpectedly marked done", bp
));
2833 KASSERT(BUF_REFCNT(bp
) > 0,
2834 ("nfs_strategy: buffer %p not locked", bp
));
2836 if (bp
->b_flags
& B_ASYNC
)
2839 td
= curthread
; /* XXX */
2842 * We probably don't need to push an nbio any more since no
2843 * block conversion is required due to the use of 64 bit byte
2844 * offsets, but do it anyway.
2846 nbio
= push_bio(bio
);
2847 nbio
->bio_offset
= bio
->bio_offset
;
2850 * If the op is asynchronous and an i/o daemon is waiting
2851 * queue the request, wake it up and wait for completion
2852 * otherwise just do it ourselves.
2854 if ((bp
->b_flags
& B_ASYNC
) == 0 || nfs_asyncio(ap
->a_vp
, nbio
, td
))
2855 error
= nfs_doio(ap
->a_vp
, nbio
, td
);
2862 * NB Currently unsupported.
2864 * nfs_mmap(struct vnode *a_vp, int a_fflags, struct ucred *a_cred)
2868 nfs_mmap(struct vop_mmap_args
*ap
)
2874 * fsync vnode op. Just call nfs_flush() with commit == 1.
2876 * nfs_fsync(struct vnode *a_vp, int a_waitfor)
2880 nfs_fsync(struct vop_fsync_args
*ap
)
2882 return (nfs_flush(ap
->a_vp
, ap
->a_waitfor
, curthread
, 1));
2886 * Flush all the blocks associated with a vnode. Dirty NFS buffers may be
2887 * in one of two states: If B_NEEDCOMMIT is clear then the buffer contains
2888 * new NFS data which needs to be written to the server. If B_NEEDCOMMIT is
2889 * set the buffer contains data that has already been written to the server
2890 * and which now needs a commit RPC.
2892 * If commit is 0 we only take one pass and only flush buffers containing new
2895 * If commit is 1 we take two passes, issuing a commit RPC in the second
2898 * If waitfor is MNT_WAIT and commit is 1, we loop as many times as required
2899 * to completely flush all pending data.
2901 * Note that the RB_SCAN code properly handles the case where the
2902 * callback might block and directly or indirectly (another thread) cause
2903 * the RB tree to change.
2906 #ifndef NFS_COMMITBVECSIZ
2907 #define NFS_COMMITBVECSIZ 16
2910 struct nfs_flush_info
{
2911 enum { NFI_FLUSHNEW
, NFI_COMMIT
} mode
;
2918 struct buf
*bvary
[NFS_COMMITBVECSIZ
];
2924 static int nfs_flush_bp(struct buf
*bp
, void *data
);
2925 static int nfs_flush_docommit(struct nfs_flush_info
*info
, int error
);
2928 nfs_flush(struct vnode
*vp
, int waitfor
, struct thread
*td
, int commit
)
2930 struct nfsnode
*np
= VTONFS(vp
);
2931 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
2932 struct nfs_flush_info info
;
2935 bzero(&info
, sizeof(info
));
2938 info
.waitfor
= waitfor
;
2939 info
.slpflag
= (nmp
->nm_flag
& NFSMNT_INT
) ? PCATCH
: 0;
2946 info
.mode
= NFI_FLUSHNEW
;
2947 error
= RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
, NULL
,
2948 nfs_flush_bp
, &info
);
2951 * Take a second pass if committing and no error occured.
2952 * Clean up any left over collection (whether an error
2955 if (commit
&& error
== 0) {
2956 info
.mode
= NFI_COMMIT
;
2957 error
= RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
, NULL
,
2958 nfs_flush_bp
, &info
);
2960 error
= nfs_flush_docommit(&info
, error
);
2964 * Wait for pending I/O to complete before checking whether
2965 * any further dirty buffers exist.
2967 while (waitfor
== MNT_WAIT
&& vp
->v_track_write
.bk_active
) {
2968 vp
->v_track_write
.bk_waitflag
= 1;
2969 error
= tsleep(&vp
->v_track_write
,
2970 info
.slpflag
, "nfsfsync", info
.slptimeo
);
2973 * We have to be able to break out if this
2974 * is an 'intr' mount.
2976 if (nfs_sigintr(nmp
, (struct nfsreq
*)0, td
)) {
2982 * Since we do not process pending signals,
2983 * once we get a PCATCH our tsleep() will no
2984 * longer sleep, switch to a fixed timeout
2987 if (info
.slpflag
== PCATCH
) {
2989 info
.slptimeo
= 2 * hz
;
2996 * Loop if we are flushing synchronous as well as committing,
2997 * and dirty buffers are still present. Otherwise we might livelock.
2999 } while (waitfor
== MNT_WAIT
&& commit
&&
3000 error
== 0 && !RB_EMPTY(&vp
->v_rbdirty_tree
));
3003 * The callbacks have to return a negative error to terminate the
3010 * Deal with any error collection
3012 if (np
->n_flag
& NWRITEERR
) {
3013 error
= np
->n_error
;
3014 np
->n_flag
&= ~NWRITEERR
;
3022 nfs_flush_bp(struct buf
*bp
, void *data
)
3024 struct nfs_flush_info
*info
= data
;
3029 switch(info
->mode
) {
3032 if (info
->loops
&& info
->waitfor
== MNT_WAIT
) {
3033 error
= BUF_LOCK(bp
, LK_EXCLUSIVE
| LK_NOWAIT
);
3035 int lkflags
= LK_EXCLUSIVE
| LK_SLEEPFAIL
;
3036 if (info
->slpflag
& PCATCH
)
3037 lkflags
|= LK_PCATCH
;
3038 error
= BUF_TIMELOCK(bp
, lkflags
, "nfsfsync",
3042 error
= BUF_LOCK(bp
, LK_EXCLUSIVE
| LK_NOWAIT
);
3045 KKASSERT(bp
->b_vp
== info
->vp
);
3047 if ((bp
->b_flags
& B_DELWRI
) == 0)
3048 panic("nfs_fsync: not dirty");
3049 if (bp
->b_flags
& B_NEEDCOMMIT
) {
3056 bp
->b_flags
|= B_ASYNC
;
3066 * Only process buffers in need of a commit which we can
3067 * immediately lock. This may prevent a buffer from being
3068 * committed, but the normal flush loop will block on the
3069 * same buffer so we shouldn't get into an endless loop.
3072 if ((bp
->b_flags
& (B_DELWRI
| B_NEEDCOMMIT
)) !=
3073 (B_DELWRI
| B_NEEDCOMMIT
) ||
3074 BUF_LOCK(bp
, LK_EXCLUSIVE
| LK_NOWAIT
) != 0) {
3079 KKASSERT(bp
->b_vp
== info
->vp
);
3083 * NOTE: storing the bp in the bvary[] basically sets
3084 * it up for a commit operation.
3086 * We must call vfs_busy_pages() now so the commit operation
3087 * is interlocked with user modifications to memory mapped
3090 * Note: to avoid loopback deadlocks, we do not
3091 * assign b_runningbufspace.
3093 bp
->b_cmd
= BUF_CMD_WRITE
;
3094 vfs_busy_pages(bp
->b_vp
, bp
);
3095 info
->bvary
[info
->bvsize
] = bp
;
3096 toff
= bp
->b_bio2
.bio_offset
+ bp
->b_dirtyoff
;
3097 if (info
->bvsize
== 0 || toff
< info
->beg_off
)
3098 info
->beg_off
= toff
;
3099 toff
+= (off_t
)(bp
->b_dirtyend
- bp
->b_dirtyoff
);
3100 if (info
->bvsize
== 0 || toff
> info
->end_off
)
3101 info
->end_off
= toff
;
3103 if (info
->bvsize
== NFS_COMMITBVECSIZ
) {
3104 error
= nfs_flush_docommit(info
, 0);
3105 KKASSERT(info
->bvsize
== 0);
3114 nfs_flush_docommit(struct nfs_flush_info
*info
, int error
)
3124 if (info
->bvsize
> 0) {
3126 * Commit data on the server, as required. Note that
3127 * nfs_commit will use the vnode's cred for the commit.
3128 * The NFSv3 commit RPC is limited to a 32 bit byte count.
3130 bytes
= info
->end_off
- info
->beg_off
;
3131 if (bytes
> 0x40000000)
3136 retv
= nfs_commit(vp
, info
->beg_off
,
3137 (int)bytes
, info
->td
);
3138 if (retv
== NFSERR_STALEWRITEVERF
)
3139 nfs_clearcommit(vp
->v_mount
);
3143 * Now, either mark the blocks I/O done or mark the
3144 * blocks dirty, depending on whether the commit
3147 for (i
= 0; i
< info
->bvsize
; ++i
) {
3148 bp
= info
->bvary
[i
];
3149 bp
->b_flags
&= ~(B_NEEDCOMMIT
| B_CLUSTEROK
);
3152 * Error, leave B_DELWRI intact
3154 vfs_unbusy_pages(bp
);
3155 bp
->b_cmd
= BUF_CMD_DONE
;
3159 * Success, remove B_DELWRI ( bundirty() ).
3161 * b_dirtyoff/b_dirtyend seem to be NFS
3162 * specific. We should probably move that
3163 * into bundirty(). XXX
3165 * We are faking an I/O write, we have to
3166 * start the transaction in order to
3167 * immediately biodone() it.
3170 bp
->b_flags
|= B_ASYNC
;
3172 bp
->b_flags
&= ~B_ERROR
;
3173 bp
->b_dirtyoff
= bp
->b_dirtyend
= 0;
3175 biodone(&bp
->b_bio1
);
3184 * NFS advisory byte-level locks.
3185 * Currently unsupported.
3187 * nfs_advlock(struct vnode *a_vp, caddr_t a_id, int a_op, struct flock *a_fl,
3191 nfs_advlock(struct vop_advlock_args
*ap
)
3193 struct nfsnode
*np
= VTONFS(ap
->a_vp
);
3196 * The following kludge is to allow diskless support to work
3197 * until a real NFS lockd is implemented. Basically, just pretend
3198 * that this is a local lock.
3200 return (lf_advlock(ap
, &(np
->n_lockf
), np
->n_size
));
3204 * Print out the contents of an nfsnode.
3206 * nfs_print(struct vnode *a_vp)
3209 nfs_print(struct vop_print_args
*ap
)
3211 struct vnode
*vp
= ap
->a_vp
;
3212 struct nfsnode
*np
= VTONFS(vp
);
3214 kprintf("tag VT_NFS, fileid %lld fsid 0x%x",
3215 np
->n_vattr
.va_fileid
, np
->n_vattr
.va_fsid
);
3216 if (vp
->v_type
== VFIFO
)
3223 * nfs special file access vnode op.
3224 * Essentially just get vattr and then imitate iaccess() since the device is
3225 * local to the client.
3227 * nfsspec_access(struct vnode *a_vp, int a_mode, struct ucred *a_cred)
3230 nfsspec_access(struct vop_access_args
*ap
)
3234 struct ucred
*cred
= ap
->a_cred
;
3235 struct vnode
*vp
= ap
->a_vp
;
3236 mode_t mode
= ap
->a_mode
;
3242 * Disallow write attempts on filesystems mounted read-only;
3243 * unless the file is a socket, fifo, or a block or character
3244 * device resident on the filesystem.
3246 if ((mode
& VWRITE
) && (vp
->v_mount
->mnt_flag
& MNT_RDONLY
)) {
3247 switch (vp
->v_type
) {
3257 * If you're the super-user,
3258 * you always get access.
3260 if (cred
->cr_uid
== 0)
3263 error
= VOP_GETATTR(vp
, vap
);
3267 * Access check is based on only one of owner, group, public.
3268 * If not owner, then check group. If not a member of the
3269 * group, then check public access.
3271 if (cred
->cr_uid
!= vap
->va_uid
) {
3273 gp
= cred
->cr_groups
;
3274 for (i
= 0; i
< cred
->cr_ngroups
; i
++, gp
++)
3275 if (vap
->va_gid
== *gp
)
3281 error
= (vap
->va_mode
& mode
) == mode
? 0 : EACCES
;
3286 * Read wrapper for special devices.
3288 * nfsspec_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
3289 * struct ucred *a_cred)
3292 nfsspec_read(struct vop_read_args
*ap
)
3294 struct nfsnode
*np
= VTONFS(ap
->a_vp
);
3300 getnanotime(&np
->n_atim
);
3301 return (VOCALL(&spec_vnode_vops
, &ap
->a_head
));
3305 * Write wrapper for special devices.
3307 * nfsspec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
3308 * struct ucred *a_cred)
3311 nfsspec_write(struct vop_write_args
*ap
)
3313 struct nfsnode
*np
= VTONFS(ap
->a_vp
);
3319 getnanotime(&np
->n_mtim
);
3320 return (VOCALL(&spec_vnode_vops
, &ap
->a_head
));
3324 * Close wrapper for special devices.
3326 * Update the times on the nfsnode then do device close.
3328 * nfsspec_close(struct vnode *a_vp, int a_fflag)
3331 nfsspec_close(struct vop_close_args
*ap
)
3333 struct vnode
*vp
= ap
->a_vp
;
3334 struct nfsnode
*np
= VTONFS(vp
);
3337 if (np
->n_flag
& (NACC
| NUPD
)) {
3339 if (vp
->v_sysref
.refcnt
== 1 &&
3340 (vp
->v_mount
->mnt_flag
& MNT_RDONLY
) == 0) {
3342 if (np
->n_flag
& NACC
)
3343 vattr
.va_atime
= np
->n_atim
;
3344 if (np
->n_flag
& NUPD
)
3345 vattr
.va_mtime
= np
->n_mtim
;
3346 (void)VOP_SETATTR(vp
, &vattr
, nfs_vpcred(vp
, ND_WRITE
));
3349 return (VOCALL(&spec_vnode_vops
, &ap
->a_head
));
3353 * Read wrapper for fifos.
3355 * nfsfifo_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
3356 * struct ucred *a_cred)
3359 nfsfifo_read(struct vop_read_args
*ap
)
3361 struct nfsnode
*np
= VTONFS(ap
->a_vp
);
3367 getnanotime(&np
->n_atim
);
3368 return (VOCALL(&fifo_vnode_vops
, &ap
->a_head
));
3372 * Write wrapper for fifos.
3374 * nfsfifo_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
3375 * struct ucred *a_cred)
3378 nfsfifo_write(struct vop_write_args
*ap
)
3380 struct nfsnode
*np
= VTONFS(ap
->a_vp
);
3386 getnanotime(&np
->n_mtim
);
3387 return (VOCALL(&fifo_vnode_vops
, &ap
->a_head
));
3391 * Close wrapper for fifos.
3393 * Update the times on the nfsnode then do fifo close.
3395 * nfsfifo_close(struct vnode *a_vp, int a_fflag)
3398 nfsfifo_close(struct vop_close_args
*ap
)
3400 struct vnode
*vp
= ap
->a_vp
;
3401 struct nfsnode
*np
= VTONFS(vp
);
3405 if (np
->n_flag
& (NACC
| NUPD
)) {
3407 if (np
->n_flag
& NACC
)
3409 if (np
->n_flag
& NUPD
)
3412 if (vp
->v_sysref
.refcnt
== 1 &&
3413 (vp
->v_mount
->mnt_flag
& MNT_RDONLY
) == 0) {
3415 if (np
->n_flag
& NACC
)
3416 vattr
.va_atime
= np
->n_atim
;
3417 if (np
->n_flag
& NUPD
)
3418 vattr
.va_mtime
= np
->n_mtim
;
3419 (void)VOP_SETATTR(vp
, &vattr
, nfs_vpcred(vp
, ND_WRITE
));
3422 return (VOCALL(&fifo_vnode_vops
, &ap
->a_head
));