usr.sbin/makefs: Sync with sys/vfs/hammer2
[dragonfly.git] / sys / vfs / ufs / ufs_readwrite.c
blob68a2743d95d20a0e22f84e629fc5b307f585e9de
1 /*-
2 * Copyright (c) 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
29 * @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
30 * $FreeBSD: src/sys/ufs/ufs/ufs_readwrite.c,v 1.65.2.14 2003/04/04 22:21:29 tegge Exp $
33 #define BLKSIZE(a, b, c) blksize(a, b, c)
34 #define FS struct fs
35 #define I_FS i_fs
37 #include <vm/vm.h>
38 #include <vm/vm_object.h>
39 #include <vm/vm_pager.h>
40 #include <vm/vm_map.h>
41 #include <vm/vnode_pager.h>
42 #include <sys/event.h>
43 #include <sys/vmmeter.h>
44 #include <sys/sysctl.h>
45 #include <vm/vm_page2.h>
47 #define VN_KNOTE(vp, b) KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, (b))
49 SYSCTL_DECL(_vfs_ffs);
52 * Vnode op for reading.
54 * ffs_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
55 * struct ucred *a_cred)
57 /* ARGSUSED */
58 int
59 ffs_read(struct vop_read_args *ap)
61 struct vnode *vp;
62 struct inode *ip;
63 struct uio *uio;
64 FS *fs;
65 struct buf *bp;
66 off_t bytesinfile;
67 int xfersize, blkoffset;
68 int error, orig_resid;
69 int seqcount;
70 int ioflag;
72 vp = ap->a_vp;
73 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
74 ip = VTOI(vp);
75 uio = ap->a_uio;
76 ioflag = ap->a_ioflag;
77 #ifdef DIAGNOSTIC
78 if (uio->uio_rw != UIO_READ)
79 panic("ffs_read: mode");
81 if (vp->v_type == VLNK) {
82 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
83 panic("ffs_read: short symlink");
84 } else if (vp->v_type != VREG && vp->v_type != VDIR)
85 panic("ffs_read: type %d", vp->v_type);
86 #endif
87 fs = ip->I_FS;
88 if ((uint64_t)uio->uio_offset > fs->fs_maxfilesize)
89 return (EFBIG);
91 orig_resid = uio->uio_resid;
92 if (orig_resid <= 0)
93 return (0);
95 bytesinfile = ip->i_size - uio->uio_offset;
96 if (bytesinfile <= 0) {
97 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
98 ip->i_flag |= IN_ACCESS;
99 return 0;
103 * Ok so we couldn't do it all in one vm trick...
104 * so cycle around trying smaller bites..
106 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
107 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
108 break;
110 error = ffs_blkatoff_ra(vp, uio->uio_offset, NULL,
111 &bp, seqcount);
112 if (error)
113 break;
116 * If IO_DIRECT then set B_DIRECT for the buffer. This
117 * will cause us to attempt to release the buffer later on
118 * and will cause the buffer cache to attempt to free the
119 * underlying pages.
121 if (ioflag & IO_DIRECT)
122 bp->b_flags |= B_DIRECT;
125 * We should only get non-zero b_resid when an I/O error
126 * has occurred, which should cause us to break above.
127 * However, if the short read did not cause an error,
128 * then we want to ensure that we do not uiomove bad
129 * or uninitialized data.
131 * XXX b_resid is only valid when an actual I/O has occured
132 * and may be incorrect if the buffer is B_CACHE or if the
133 * last op on the buffer was a failed write. This KASSERT
134 * is a precursor to removing it from the UFS code.
136 KASSERT(bp->b_resid == 0, ("bp->b_resid != 0"));
139 * Calculate how much data we can copy
141 blkoffset = blkoff(fs, uio->uio_offset);
142 xfersize = bp->b_bufsize - blkoffset;
143 if (xfersize > uio->uio_resid)
144 xfersize = uio->uio_resid;
145 if (xfersize > bytesinfile)
146 xfersize = bytesinfile;
147 if (xfersize <= 0) {
148 panic("ufs_readwrite: impossible xfersize: %d",
149 xfersize);
153 * otherwise use the general form
155 error = uiomovebp(bp, bp->b_data + blkoffset, xfersize, uio);
157 if (error)
158 break;
160 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
161 (LIST_FIRST(&bp->b_dep) == NULL)) {
163 * If there are no dependencies, and it's VMIO,
164 * then we don't need the buf, mark it available
165 * for freeing. The VM has the data.
167 bp->b_flags |= B_RELBUF;
168 brelse(bp);
169 } else {
171 * Otherwise let whoever
172 * made the request take care of
173 * freeing it. We just queue
174 * it onto another list.
176 bqrelse(bp);
181 * This can only happen in the case of an error
182 * because the loop above resets bp to NULL on each iteration
183 * and on normal completion has not set a new value into it.
184 * so it must have come from a 'break' statement
186 if (bp != NULL) {
187 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
188 (LIST_FIRST(&bp->b_dep) == NULL)) {
189 bp->b_flags |= B_RELBUF;
190 brelse(bp);
191 } else {
192 bqrelse(bp);
196 if ((error == 0 || uio->uio_resid != orig_resid) &&
197 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
198 ip->i_flag |= IN_ACCESS;
199 return (error);
203 * Vnode op for writing.
205 * ffs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
206 * struct ucred *a_cred)
209 ffs_write(struct vop_write_args *ap)
211 struct vnode *vp;
212 struct uio *uio;
213 struct inode *ip;
214 FS *fs;
215 struct buf *bp;
216 ufs_daddr_t lbn;
217 off_t osize;
218 off_t nsize;
219 int seqcount;
220 int blkoffset, error, extended, flags, ioflag, resid, size, xfersize;
221 struct thread *td;
223 extended = 0;
224 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
225 ioflag = ap->a_ioflag;
226 uio = ap->a_uio;
227 vp = ap->a_vp;
228 ip = VTOI(vp);
230 #ifdef DIAGNOSTIC
231 if (uio->uio_rw != UIO_WRITE)
232 panic("ffs_write: mode");
233 #endif
235 switch (vp->v_type) {
236 case VREG:
237 if (ioflag & IO_APPEND)
238 uio->uio_offset = ip->i_size;
239 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
240 return (EPERM);
241 /* FALLTHROUGH */
242 case VLNK:
243 break;
244 case VDIR:
245 panic("ffs_write: dir write");
246 break;
247 default:
248 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
249 (int)uio->uio_offset,
250 (int)uio->uio_resid
254 fs = ip->I_FS;
255 if (uio->uio_offset < 0 ||
256 (uint64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) {
257 return (EFBIG);
260 * Maybe this should be above the vnode op call, but so long as
261 * file servers have no limits, I don't think it matters.
263 td = uio->uio_td;
264 if (vp->v_type == VREG && td && td->td_proc &&
265 uio->uio_offset + uio->uio_resid >
266 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
267 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
268 return (EFBIG);
271 resid = uio->uio_resid;
272 osize = ip->i_size;
275 * NOTE! These B_ flags are actually balloc-only flags, not buffer
276 * flags. They are similar to the BA_ flags in fbsd.
278 if (seqcount > B_SEQMAX)
279 flags = B_SEQMAX << B_SEQSHIFT;
280 else
281 flags = seqcount << B_SEQSHIFT;
282 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
283 flags |= B_SYNC;
285 if (uio->uio_segflg == UIO_NOCOPY)
286 ip->i_flag |= IN_NOCOPYWRITE;
287 else
288 vclrflags(vp, VLASTWRITETS);
290 for (error = 0; uio->uio_resid > 0;) {
291 lbn = lblkno(fs, uio->uio_offset);
292 blkoffset = blkoff(fs, uio->uio_offset);
293 xfersize = fs->fs_bsize - blkoffset;
294 if (uio->uio_resid < xfersize)
295 xfersize = uio->uio_resid;
297 if (uio->uio_offset + xfersize > ip->i_size) {
298 nsize = uio->uio_offset + xfersize;
299 nvnode_pager_setsize(vp, nsize,
300 blkoffresize(fs, nsize), blkoff(fs, nsize));
303 #if 0
305 * If doing a dummy write to flush the buffer for a
306 * putpages we must perform a read-before-write to
307 * fill in any missing spots and clear any invalid
308 * areas. Otherwise a multi-page buffer may not properly
309 * flush.
311 * We must clear any invalid areas
313 if (uio->uio_segflg == UIO_NOCOPY) {
314 error = ffs_blkatoff(vp, uio->uio_offset, NULL, &bp);
315 if (error)
316 break;
317 bqrelse(bp);
319 #endif
322 * We must clear invalid areas.
324 if (xfersize < fs->fs_bsize || uio->uio_segflg == UIO_NOCOPY)
325 flags |= B_CLRBUF;
326 else
327 flags &= ~B_CLRBUF;
328 /* XXX is uio->uio_offset the right thing here? */
329 error = VOP_BALLOC(vp, uio->uio_offset, xfersize,
330 ap->a_cred, flags, &bp);
331 if (error != 0)
332 break;
334 * If the buffer is not valid and we did not clear garbage
335 * out above, we have to do so here even though the write
336 * covers the entire buffer in order to avoid a mmap()/write
337 * race where another process may see the garbage prior to
338 * the uiomove() for a write replacing it.
340 if ((bp->b_flags & B_CACHE) == 0 && (flags & B_CLRBUF) == 0)
341 vfs_bio_clrbuf(bp);
342 if (ioflag & IO_DIRECT)
343 bp->b_flags |= B_DIRECT;
344 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
345 bp->b_flags |= B_NOCACHE;
347 if (uio->uio_offset + xfersize > ip->i_size) {
348 ip->i_size = uio->uio_offset + xfersize;
349 extended = 1;
352 size = BLKSIZE(fs, ip, lbn) - bp->b_resid;
353 if (size < xfersize)
354 xfersize = size;
356 error = uiomovebp(bp, bp->b_data + blkoffset, xfersize, uio);
357 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
358 (LIST_FIRST(&bp->b_dep) == NULL)) {
359 bp->b_flags |= B_RELBUF;
363 * If IO_SYNC each buffer is written synchronously. Otherwise
364 * if we have a severe page deficiency write the buffer
365 * asynchronously. Otherwise try to cluster, and if that
366 * doesn't do it then either do an async write (if O_DIRECT),
367 * or a delayed write (if not).
370 if (ioflag & IO_SYNC) {
371 (void)bwrite(bp);
372 } else if (vm_paging_severe() ||
373 buf_dirty_count_severe() ||
374 (ioflag & IO_ASYNC)) {
375 bp->b_flags |= B_CLUSTEROK;
376 bawrite(bp);
377 } else if (xfersize + blkoffset == fs->fs_bsize) {
378 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
379 bp->b_flags |= B_CLUSTEROK;
380 cluster_write(bp, (off_t)ip->i_size, fs->fs_bsize, seqcount);
381 } else {
382 bawrite(bp);
384 } else if (ioflag & IO_DIRECT) {
385 bp->b_flags |= B_CLUSTEROK;
386 bawrite(bp);
387 } else {
388 bp->b_flags |= B_CLUSTEROK;
389 bdwrite(bp);
391 if (error || xfersize == 0)
392 break;
393 ip->i_flag |= IN_CHANGE | IN_UPDATE;
396 * If we successfully wrote any data, and we are not the superuser
397 * we clear the setuid and setgid bits as a precaution against
398 * tampering.
400 if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0)
401 ip->i_mode &= ~(ISUID | ISGID);
402 if (resid > uio->uio_resid)
403 VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
404 if (error) {
405 if (ioflag & IO_UNIT) {
406 (void)ffs_truncate(vp, osize, ioflag & IO_SYNC,
407 ap->a_cred);
408 uio->uio_offset -= resid - uio->uio_resid;
409 uio->uio_resid = resid;
411 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) {
412 error = ffs_update(vp, 1);
413 } else {
414 ufs_itimes(vp);
416 if (uio->uio_segflg == UIO_NOCOPY)
417 ip->i_flag &= ~IN_NOCOPYWRITE;
419 return (error);