dma: factor out mail handling code
[dragonfly.git] / sys / vfs / nwfs / nwfs_io.c
blobf56efab6cfda12c9ab491158ed0ed6a717332c4a
1 /*
2 * Copyright (c) 1999, Boris Popov
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * $FreeBSD: src/sys/nwfs/nwfs_io.c,v 1.6.2.1 2000/10/25 02:11:10 bp Exp $
33 * $DragonFly: src/sys/vfs/nwfs/nwfs_io.c,v 1.24 2007/02/22 15:50:50 corecode Exp $
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/resourcevar.h> /* defines plimit structure in proc struct */
39 #include <sys/kernel.h>
40 #include <sys/buf.h>
41 #include <sys/proc.h>
42 #include <sys/mount.h>
43 #include <sys/namei.h>
44 #include <sys/vnode.h>
45 #include <sys/dirent.h>
46 #include <sys/signalvar.h>
47 #include <sys/sysctl.h>
49 #include <vm/vm.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vnode_pager.h>
56 #include <netproto/ncp/ncp.h>
57 #include <netproto/ncp/ncp_conn.h>
58 #include <netproto/ncp/ncp_subr.h>
60 #include <sys/thread2.h>
62 #include <machine/limits.h>
64 #include "nwfs.h"
65 #include "nwfs_node.h"
66 #include "nwfs_subr.h"
68 static int nwfs_fastlookup = 1;
70 SYSCTL_DECL(_vfs_nwfs);
71 SYSCTL_INT(_vfs_nwfs, OID_AUTO, fastlookup, CTLFLAG_RW, &nwfs_fastlookup, 0, "");
74 extern int nwfs_pbuf_freecnt;
76 #define NWFS_RWCACHE
78 static int
79 nwfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
81 struct nwmount *nmp = VTONWFS(vp);
82 int error, i;
83 struct nwnode *np;
84 struct nw_entry_info fattr;
85 struct vnode *newvp;
86 ncpfid fid;
87 ino_t d_ino;
88 size_t d_namlen;
89 const char *d_name;
90 uint8_t d_type;
92 np = VTONW(vp);
93 NCPVNDEBUG("dirname='%s'\n",np->n_name);
94 if (uio->uio_offset < 0 || uio->uio_offset > INT_MAX)
95 return (EINVAL);
96 error = 0;
97 i = (int)uio->uio_offset; /* offset in directory */
98 if (i == 0) {
99 error = ncp_initsearch(vp, uio->uio_td, cred);
100 if (error) {
101 NCPVNDEBUG("cannot initialize search, error=%d",error);
102 return( error );
106 for (; !error && uio->uio_resid > 0; i++) {
107 switch (i) {
108 case 0: /* `.' */
109 d_ino = np->n_fid.f_id;
110 if (d_ino == 0)
111 d_ino = NWFS_ROOT_INO;
112 d_namlen = 1;
113 d_name = ".";
114 d_type = DT_DIR;
115 break;
116 case 1: /* `..' */
117 d_ino = np->n_parent.f_id;
118 if (d_ino == 0)
119 d_ino = NWFS_ROOT_INO;
120 d_namlen = 2;
121 d_name = "..";
122 d_type = DT_DIR;
123 break;
124 default:
125 error = ncp_search_for_file_or_subdir(nmp, &np->n_seq, &fattr, uio->uio_td, cred);
126 if (error && error < 0x80)
127 goto done;
128 d_ino = fattr.dirEntNum;
129 d_type = (fattr.attributes & aDIR) ? DT_DIR : DT_REG;
130 d_namlen = fattr.nameLen;
131 d_name = fattr.entryName;
132 #if 0
133 if (error && eofflag) {
134 /* *eofflag = 1;*/
135 break;
137 #endif
138 break;
140 if (nwfs_fastlookup && !error && i > 1) {
141 fid.f_id = fattr.dirEntNum;
142 fid.f_parent = np->n_fid.f_id;
143 error = nwfs_nget(vp->v_mount, fid, &fattr, vp, &newvp);
144 if (!error) {
145 VTONW(newvp)->n_ctime = VTONW(newvp)->n_vattr.va_ctime.tv_sec;
146 vput(newvp);
147 } else
148 error = 0;
150 if (error >= 0x80) {
151 error = 0;
152 break;
154 if (vop_write_dirent(&error, uio, d_ino, d_type, d_namlen, d_name))
155 break;
157 done:
158 uio->uio_offset = i;
159 return (error);
163 nwfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
165 struct nwmount *nmp = VFSTONWFS(vp->v_mount);
166 struct nwnode *np = VTONW(vp);
167 struct thread *td;
168 struct vattr vattr;
169 int error, biosize;
171 if (vp->v_type != VREG && vp->v_type != VDIR) {
172 kprintf("%s: vn types other than VREG or VDIR are unsupported !\n",__func__);
173 return EIO;
175 if (uiop->uio_resid == 0) return 0;
176 if (uiop->uio_offset < 0) return EINVAL;
177 td = uiop->uio_td;
178 if (vp->v_type == VDIR) {
179 error = nwfs_readvdir(vp, uiop, cred);
180 return error;
182 biosize = NWFSTOCONN(nmp)->buffer_size;
183 if (np->n_flag & NMODIFIED) {
184 nwfs_attr_cacheremove(vp);
185 error = VOP_GETATTR(vp, &vattr);
186 if (error) return (error);
187 np->n_mtime = vattr.va_mtime.tv_sec;
188 } else {
189 error = VOP_GETATTR(vp, &vattr);
190 if (error) return (error);
191 if (np->n_mtime != vattr.va_mtime.tv_sec) {
192 error = nwfs_vinvalbuf(vp, V_SAVE, 1);
193 if (error) return (error);
194 np->n_mtime = vattr.va_mtime.tv_sec;
197 error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop,cred);
198 return (error);
202 nwfs_writevnode(struct vnode *vp, struct uio *uiop, struct ucred *cred,
203 int ioflag)
205 struct nwmount *nmp = VTONWFS(vp);
206 struct nwnode *np = VTONW(vp);
207 struct thread *td;
208 /* struct vattr vattr;*/
209 int error = 0;
211 if (vp->v_type != VREG) {
212 kprintf("%s: vn types other than VREG unsupported !\n",__func__);
213 return EIO;
215 NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
216 if (uiop->uio_offset < 0) return EINVAL;
217 td = uiop->uio_td;
218 if (ioflag & (IO_APPEND | IO_SYNC)) {
219 if (np->n_flag & NMODIFIED) {
220 nwfs_attr_cacheremove(vp);
221 error = nwfs_vinvalbuf(vp, V_SAVE, 1);
222 if (error) return (error);
224 if (ioflag & IO_APPEND) {
225 /* We can relay only on local information about file size,
226 * because until file is closed NetWare will not return
227 * the correct size. */
228 #if notyet
229 nwfs_attr_cacheremove(vp);
230 error = VOP_GETATTR(vp, &vattr);
231 if (error) return (error);
232 #endif
233 uiop->uio_offset = np->n_size;
236 if (uiop->uio_resid == 0) return 0;
237 if (td->td_proc && uiop->uio_offset + uiop->uio_resid >
238 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
239 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
240 return (EFBIG);
242 error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cred);
243 NCPVNDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
244 if (!error) {
245 if (uiop->uio_offset > np->n_size) {
246 np->n_vattr.va_size = np->n_size = uiop->uio_offset;
247 vnode_pager_setsize(vp, np->n_size);
250 return (error);
254 * Do an I/O operation to/from a cache block.
257 nwfs_doio(struct vnode *vp, struct bio *bio, struct ucred *cr, struct thread *td)
259 struct buf *bp = bio->bio_buf;
260 struct uio *uiop;
261 struct nwnode *np;
262 struct nwmount *nmp;
263 int error = 0;
264 struct uio uio;
265 struct iovec io;
267 np = VTONW(vp);
268 nmp = VFSTONWFS(vp->v_mount);
269 uiop = &uio;
270 uiop->uio_iov = &io;
271 uiop->uio_iovcnt = 1;
272 uiop->uio_segflg = UIO_SYSSPACE;
273 uiop->uio_td = td;
275 if (bp->b_cmd == BUF_CMD_READ) {
276 io.iov_len = uiop->uio_resid = (size_t)bp->b_bcount;
277 io.iov_base = bp->b_data;
278 uiop->uio_rw = UIO_READ;
279 switch (vp->v_type) {
280 case VREG:
281 uiop->uio_offset = bio->bio_offset;
282 error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);
283 if (error)
284 break;
285 if (uiop->uio_resid) {
286 size_t left = uiop->uio_resid;
287 size_t nread = bp->b_bcount - left;
288 if (left > 0)
289 bzero((char *)bp->b_data + nread, left);
291 break;
292 /* case VDIR:
293 nfsstats.readdir_bios++;
294 uiop->uio_offset = bio->bio_offset;
295 if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
296 error = nfs_readdirplusrpc(vp, uiop, cr);
297 if (error == NFSERR_NOTSUPP)
298 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
300 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
301 error = nfs_readdirrpc(vp, uiop, cr);
302 if (error == 0 && uiop->uio_resid == (size_t)bp->b_bcount)
303 bp->b_flags |= B_INVAL;
304 break;
306 default:
307 kprintf("nwfs_doio: type %x unexpected\n",vp->v_type);
308 break;
310 if (error) {
311 bp->b_flags |= B_ERROR;
312 bp->b_error = error;
314 } else { /* write */
315 KKASSERT(bp->b_cmd == BUF_CMD_WRITE);
316 if (bio->bio_offset + bp->b_dirtyend > np->n_size)
317 bp->b_dirtyend = np->n_size - bio->bio_offset;
319 if (bp->b_dirtyend > bp->b_dirtyoff) {
320 io.iov_len = uiop->uio_resid =
321 (size_t)(bp->b_dirtyend - bp->b_dirtyoff);
322 uiop->uio_offset = bio->bio_offset + bp->b_dirtyoff;
323 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
324 uiop->uio_rw = UIO_WRITE;
325 error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);
328 * For an interrupted write, the buffer is still valid
329 * and the write hasn't been pushed to the server yet,
330 * so we can't set B_ERROR and report the interruption
331 * by setting B_EINTR. For the async case, B_EINTR
332 * is not relevant, so the rpc attempt is essentially
333 * a noop. For the case of a V3 write rpc not being
334 * committed to stable storage, the block is still
335 * dirty and requires either a commit rpc or another
336 * write rpc with iomode == NFSV3WRITE_FILESYNC before
337 * the block is reused. This is indicated by setting
338 * the B_DELWRI and B_NEEDCOMMIT flags.
340 if (error == EINTR
341 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
343 crit_enter();
344 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
345 if ((bp->b_flags & B_PAGING) == 0)
346 bdirty(bp);
347 bp->b_flags |= B_EINTR;
348 crit_exit();
349 } else {
350 if (error) {
351 bp->b_flags |= B_ERROR;
352 bp->b_error /*= np->n_error */= error;
353 /* np->n_flag |= NWRITEERR;*/
355 bp->b_dirtyoff = bp->b_dirtyend = 0;
357 } else {
358 bp->b_resid = 0;
359 biodone(bio);
360 return (0);
363 bp->b_resid = (int)uiop->uio_resid;
364 biodone(bio);
365 return (error);
369 * Vnode op for VM getpages.
370 * Wish wish .... get rid from multiple IO routines
372 * nwfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
373 * int a_reqpage, vm_ooffset_t a_offset)
376 nwfs_getpages(struct vop_getpages_args *ap)
378 #ifndef NWFS_RWCACHE
379 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
380 ap->a_reqpage);
381 #else
382 int i, error, npages;
383 size_t nextoff, toff;
384 size_t count;
385 size_t size;
386 struct uio uio;
387 struct iovec iov;
388 vm_offset_t kva;
389 struct buf *bp;
390 struct vnode *vp;
391 struct thread *td = curthread; /* XXX */
392 struct ucred *cred;
393 struct nwmount *nmp;
394 struct nwnode *np;
395 vm_page_t *pages;
397 KKASSERT(td->td_proc);
398 cred = td->td_proc->p_ucred;
400 vp = ap->a_vp;
401 np = VTONW(vp);
402 nmp = VFSTONWFS(vp->v_mount);
403 pages = ap->a_m;
404 count = (size_t)ap->a_count;
406 if (vp->v_object == NULL) {
407 kprintf("nwfs_getpages: called with non-merged cache vnode??\n");
408 return VM_PAGER_ERROR;
411 bp = getpbuf(&nwfs_pbuf_freecnt);
412 npages = btoc(count);
413 kva = (vm_offset_t) bp->b_data;
414 pmap_qenter(kva, pages, npages);
416 iov.iov_base = (caddr_t) kva;
417 iov.iov_len = count;
418 uio.uio_iov = &iov;
419 uio.uio_iovcnt = 1;
420 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
421 uio.uio_resid = count;
422 uio.uio_segflg = UIO_SYSSPACE;
423 uio.uio_rw = UIO_READ;
424 uio.uio_td = td;
426 error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, &uio,cred);
427 pmap_qremove(kva, npages);
429 relpbuf(bp, &nwfs_pbuf_freecnt);
431 if (error && (uio.uio_resid == count)) {
432 kprintf("nwfs_getpages: error %d\n",error);
433 for (i = 0; i < npages; i++) {
434 if (ap->a_reqpage != i)
435 vnode_pager_freepage(pages[i]);
437 return VM_PAGER_ERROR;
440 size = count - uio.uio_resid;
442 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
443 vm_page_t m;
444 nextoff = toff + PAGE_SIZE;
445 m = pages[i];
447 m->flags &= ~PG_ZERO;
449 if (nextoff <= size) {
450 m->valid = VM_PAGE_BITS_ALL;
451 m->dirty = 0;
452 } else {
453 int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
454 vm_page_set_validclean(m, 0, nvalid);
457 if (i != ap->a_reqpage) {
459 * Whether or not to leave the page activated is up in
460 * the air, but we should put the page on a page queue
461 * somewhere (it already is in the object). Result:
462 * It appears that emperical results show that
463 * deactivating pages is best.
467 * Just in case someone was asking for this page we
468 * now tell them that it is ok to use.
470 if (!error) {
471 if (m->flags & PG_WANTED)
472 vm_page_activate(m);
473 else
474 vm_page_deactivate(m);
475 vm_page_wakeup(m);
476 } else {
477 vnode_pager_freepage(m);
481 return 0;
482 #endif /* NWFS_RWCACHE */
486 * Vnode op for VM putpages.
487 * possible bug: all IO done in sync mode
488 * Note that vop_close always invalidate pages before close, so it's
489 * not necessary to open vnode.
491 * nwfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
492 * int a_sync, int *a_rtvals, vm_ooffset_t a_offset)
495 nwfs_putpages(struct vop_putpages_args *ap)
497 int error;
498 struct thread *td = curthread; /* XXX */
499 struct vnode *vp = ap->a_vp;
500 struct ucred *cred;
502 #ifndef NWFS_RWCACHE
503 KKASSERT(td->td_proc);
504 cred = td->td_proc->p_ucred; /* XXX */
505 VOP_OPEN(vp, FWRITE, cred, NULL);
506 error = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
507 ap->a_sync, ap->a_rtvals);
508 VOP_CLOSE(vp, FWRITE, cred);
509 return error;
510 #else
511 struct uio uio;
512 struct iovec iov;
513 vm_offset_t kva;
514 struct buf *bp;
515 int i, npages, count;
516 int *rtvals;
517 struct nwmount *nmp;
518 struct nwnode *np;
519 vm_page_t *pages;
521 KKASSERT(td->td_proc);
522 cred = td->td_proc->p_ucred; /* XXX */
524 /* VOP_OPEN(vp, FWRITE, cred, NULL);*/
525 np = VTONW(vp);
526 nmp = VFSTONWFS(vp->v_mount);
527 pages = ap->a_m;
528 count = ap->a_count;
529 rtvals = ap->a_rtvals;
530 npages = btoc(count);
532 for (i = 0; i < npages; i++) {
533 rtvals[i] = VM_PAGER_AGAIN;
536 bp = getpbuf(&nwfs_pbuf_freecnt);
537 kva = (vm_offset_t) bp->b_data;
538 pmap_qenter(kva, pages, npages);
540 iov.iov_base = (caddr_t) kva;
541 iov.iov_len = count;
542 uio.uio_iov = &iov;
543 uio.uio_iovcnt = 1;
544 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
545 uio.uio_resid = count;
546 uio.uio_segflg = UIO_SYSSPACE;
547 uio.uio_rw = UIO_WRITE;
548 uio.uio_td = td;
549 NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
551 error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, &uio, cred);
552 /* VOP_CLOSE(vp, FWRITE, cred);*/
553 NCPVNDEBUG("paged write done: %d\n", error);
555 pmap_qremove(kva, npages);
556 relpbuf(bp, &nwfs_pbuf_freecnt);
558 if (!error) {
559 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
560 for (i = 0; i < nwritten; i++) {
561 rtvals[i] = VM_PAGER_OK;
562 pages[i]->dirty = 0;
565 return rtvals[0];
566 #endif /* NWFS_RWCACHE */
569 * Flush and invalidate all dirty buffers. If another process is already
570 * doing the flush, just wait for completion.
573 nwfs_vinvalbuf(struct vnode *vp, int flags, int intrflg)
575 struct nwnode *np = VTONW(vp);
576 /* struct nwmount *nmp = VTONWFS(vp);*/
577 int error = 0, slpflag, slptimeo;
579 if (vp->v_flag & VRECLAIMED) {
580 return (0);
582 if (intrflg) {
583 slpflag = PCATCH;
584 slptimeo = 2 * hz;
585 } else {
586 slpflag = 0;
587 slptimeo = 0;
589 while (np->n_flag & NFLUSHINPROG) {
590 np->n_flag |= NFLUSHWANT;
591 error = tsleep((caddr_t)&np->n_flag, 0, "nwfsvinv", slptimeo);
592 error = ncp_chkintr(NWFSTOCONN(VTONWFS(vp)), curthread);
593 if (error == EINTR && intrflg)
594 return EINTR;
596 np->n_flag |= NFLUSHINPROG;
597 error = vinvalbuf(vp, flags, slpflag, 0);
598 while (error) {
599 if (intrflg && (error == ERESTART || error == EINTR)) {
600 np->n_flag &= ~NFLUSHINPROG;
601 if (np->n_flag & NFLUSHWANT) {
602 np->n_flag &= ~NFLUSHWANT;
603 wakeup((caddr_t)&np->n_flag);
605 return EINTR;
607 error = vinvalbuf(vp, flags, slpflag, 0);
609 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
610 if (np->n_flag & NFLUSHWANT) {
611 np->n_flag &= ~NFLUSHWANT;
612 wakeup((caddr_t)&np->n_flag);
614 return (error);