4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1984, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
31 * Portions of this source code were derived from Berkeley 4.3 BSD
32 * under license from the Regents of the University of California.
35 #include <sys/types.h>
36 #include <sys/t_lock.h>
37 #include <sys/ksynch.h>
38 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/sysmacros.h>
42 #include <sys/resource.h>
43 #include <sys/signal.h>
48 #include <sys/vfs_opreg.h>
49 #include <sys/vnode.h>
53 #include <sys/fcntl.h>
54 #include <sys/flock.h>
55 #include <sys/atomic.h>
61 #include <sys/pathname.h>
62 #include <sys/debug.h>
63 #include <sys/vmsystm.h>
64 #include <sys/cmn_err.h>
65 #include <sys/filio.h>
66 #include <sys/policy.h>
68 #include <sys/fs/ufs_fs.h>
69 #include <sys/fs/ufs_lockfs.h>
70 #include <sys/fs/ufs_filio.h>
71 #include <sys/fs/ufs_inode.h>
72 #include <sys/fs/ufs_fsdir.h>
73 #include <sys/fs/ufs_quota.h>
74 #include <sys/fs/ufs_log.h>
75 #include <sys/fs/ufs_snap.h>
76 #include <sys/fs/ufs_trans.h>
77 #include <sys/fs/ufs_panic.h>
78 #include <sys/fs/ufs_bio.h>
79 #include <sys/dirent.h> /* must be AFTER <sys/fs/fsdir.h>! */
80 #include <sys/errno.h>
81 #include <sys/fssnap_if.h>
82 #include <sys/unistd.h>
83 #include <sys/sunddi.h>
85 #include <sys/filio.h> /* _FIOIO */
92 #include <vm/seg_map.h>
93 #include <vm/seg_vn.h>
94 #include <vm/seg_kmem.h>
98 #include <fs/fs_subr.h>
100 #include <sys/fs/decomp.h>
102 static struct instats ins
;
104 static int ufs_getpage_ra(struct vnode
*, u_offset_t
, struct seg
*, caddr_t
);
105 static int ufs_getpage_miss(struct vnode
*, u_offset_t
, size_t, struct seg
*,
106 caddr_t
, struct page
**, size_t, enum seg_rw
, int);
107 static int ufs_open(struct vnode
**, int, struct cred
*, caller_context_t
*);
108 static int ufs_close(struct vnode
*, int, int, offset_t
, struct cred
*,
110 static int ufs_read(struct vnode
*, struct uio
*, int, struct cred
*,
111 struct caller_context
*);
112 static int ufs_write(struct vnode
*, struct uio
*, int, struct cred
*,
113 struct caller_context
*);
114 static int ufs_ioctl(struct vnode
*, int, intptr_t, int, struct cred
*,
115 int *, caller_context_t
*);
116 static int ufs_getattr(struct vnode
*, struct vattr
*, int, struct cred
*,
118 static int ufs_setattr(struct vnode
*, struct vattr
*, int, struct cred
*,
120 static int ufs_access(struct vnode
*, int, int, struct cred
*,
122 static int ufs_lookup(struct vnode
*, char *, struct vnode
**,
123 struct pathname
*, int, struct vnode
*, struct cred
*,
124 caller_context_t
*, int *, pathname_t
*);
125 static int ufs_create(struct vnode
*, char *, struct vattr
*, enum vcexcl
,
126 int, struct vnode
**, struct cred
*, int,
127 caller_context_t
*, vsecattr_t
*);
128 static int ufs_remove(struct vnode
*, char *, struct cred
*,
129 caller_context_t
*, int);
130 static int ufs_link(struct vnode
*, struct vnode
*, char *, struct cred
*,
131 caller_context_t
*, int);
132 static int ufs_rename(struct vnode
*, char *, struct vnode
*, char *,
133 struct cred
*, caller_context_t
*, int);
134 static int ufs_mkdir(struct vnode
*, char *, struct vattr
*, struct vnode
**,
135 struct cred
*, caller_context_t
*, int, vsecattr_t
*);
136 static int ufs_rmdir(struct vnode
*, char *, struct vnode
*, struct cred
*,
137 caller_context_t
*, int);
138 static int ufs_readdir(struct vnode
*, struct uio
*, struct cred
*, int *,
139 caller_context_t
*, int);
140 static int ufs_symlink(struct vnode
*, char *, struct vattr
*, char *,
141 struct cred
*, caller_context_t
*, int);
142 static int ufs_readlink(struct vnode
*, struct uio
*, struct cred
*,
144 static int ufs_fsync(struct vnode
*, int, struct cred
*, caller_context_t
*);
145 static void ufs_inactive(struct vnode
*, struct cred
*, caller_context_t
*);
146 static int ufs_fid(struct vnode
*, struct fid
*, caller_context_t
*);
147 static int ufs_rwlock(struct vnode
*, int, caller_context_t
*);
148 static void ufs_rwunlock(struct vnode
*, int, caller_context_t
*);
149 static int ufs_seek(struct vnode
*, offset_t
, offset_t
*, caller_context_t
*);
150 static int ufs_frlock(struct vnode
*, int, struct flock64
*, int, offset_t
,
151 struct flk_callback
*, struct cred
*,
153 static int ufs_space(struct vnode
*, int, struct flock64
*, int, offset_t
,
154 cred_t
*, caller_context_t
*);
155 static int ufs_getpage(struct vnode
*, offset_t
, size_t, uint_t
*,
156 struct page
**, size_t, struct seg
*, caddr_t
,
157 enum seg_rw
, struct cred
*, caller_context_t
*);
158 static int ufs_putpage(struct vnode
*, offset_t
, size_t, int, struct cred
*,
160 static int ufs_putpages(struct vnode
*, offset_t
, size_t, int, struct cred
*);
161 static int ufs_map(struct vnode
*, offset_t
, struct as
*, caddr_t
*, size_t,
162 uchar_t
, uchar_t
, uint_t
, struct cred
*, caller_context_t
*);
163 static int ufs_addmap(struct vnode
*, offset_t
, struct as
*, caddr_t
, size_t,
164 uchar_t
, uchar_t
, uint_t
, struct cred
*, caller_context_t
*);
165 static int ufs_delmap(struct vnode
*, offset_t
, struct as
*, caddr_t
, size_t,
166 uint_t
, uint_t
, uint_t
, struct cred
*, caller_context_t
*);
167 static int ufs_poll(vnode_t
*, short, int, short *, struct pollhead
**,
169 static int ufs_dump(vnode_t
*, caddr_t
, offset_t
, offset_t
,
171 static int ufs_l_pathconf(struct vnode
*, int, ulong_t
*, struct cred
*,
173 static int ufs_pageio(struct vnode
*, struct page
*, u_offset_t
, size_t, int,
174 struct cred
*, caller_context_t
*);
175 static int ufs_dumpctl(vnode_t
*, int, offset_t
*, caller_context_t
*);
176 static daddr32_t
*save_dblks(struct inode
*, struct ufsvfs
*, daddr32_t
*,
177 daddr32_t
*, int, int);
178 static int ufs_getsecattr(struct vnode
*, vsecattr_t
*, int, struct cred
*,
180 static int ufs_setsecattr(struct vnode
*, vsecattr_t
*, int, struct cred
*,
182 static int ufs_priv_access(void *, int, struct cred
*);
183 static int ufs_eventlookup(struct vnode
*, char *, struct cred
*,
185 extern int as_map_locked(struct as
*, caddr_t
, size_t, int ((*)()), void *);
188 * For lockfs: ulockfs begin/end is now inlined in the ufs_xxx functions.
190 * XXX - ULOCKFS in fs_pathconf and ufs_ioctl is not inlined yet.
192 struct vnodeops
*ufs_vnodeops
;
194 /* NOTE: "not blkd" below means that the operation isn't blocked by lockfs */
195 const fs_operation_def_t ufs_vnodeops_template
[] = {
196 VOPNAME_OPEN
, { .vop_open
= ufs_open
}, /* not blkd */
197 VOPNAME_CLOSE
, { .vop_close
= ufs_close
}, /* not blkd */
198 VOPNAME_READ
, { .vop_read
= ufs_read
},
199 VOPNAME_WRITE
, { .vop_write
= ufs_write
},
200 VOPNAME_IOCTL
, { .vop_ioctl
= ufs_ioctl
},
201 VOPNAME_GETATTR
, { .vop_getattr
= ufs_getattr
},
202 VOPNAME_SETATTR
, { .vop_setattr
= ufs_setattr
},
203 VOPNAME_ACCESS
, { .vop_access
= ufs_access
},
204 VOPNAME_LOOKUP
, { .vop_lookup
= ufs_lookup
},
205 VOPNAME_CREATE
, { .vop_create
= ufs_create
},
206 VOPNAME_REMOVE
, { .vop_remove
= ufs_remove
},
207 VOPNAME_LINK
, { .vop_link
= ufs_link
},
208 VOPNAME_RENAME
, { .vop_rename
= ufs_rename
},
209 VOPNAME_MKDIR
, { .vop_mkdir
= ufs_mkdir
},
210 VOPNAME_RMDIR
, { .vop_rmdir
= ufs_rmdir
},
211 VOPNAME_READDIR
, { .vop_readdir
= ufs_readdir
},
212 VOPNAME_SYMLINK
, { .vop_symlink
= ufs_symlink
},
213 VOPNAME_READLINK
, { .vop_readlink
= ufs_readlink
},
214 VOPNAME_FSYNC
, { .vop_fsync
= ufs_fsync
},
215 VOPNAME_INACTIVE
, { .vop_inactive
= ufs_inactive
}, /* not blkd */
216 VOPNAME_FID
, { .vop_fid
= ufs_fid
},
217 VOPNAME_RWLOCK
, { .vop_rwlock
= ufs_rwlock
}, /* not blkd */
218 VOPNAME_RWUNLOCK
, { .vop_rwunlock
= ufs_rwunlock
}, /* not blkd */
219 VOPNAME_SEEK
, { .vop_seek
= ufs_seek
},
220 VOPNAME_FRLOCK
, { .vop_frlock
= ufs_frlock
},
221 VOPNAME_SPACE
, { .vop_space
= ufs_space
},
222 VOPNAME_GETPAGE
, { .vop_getpage
= ufs_getpage
},
223 VOPNAME_PUTPAGE
, { .vop_putpage
= ufs_putpage
},
224 VOPNAME_MAP
, { .vop_map
= ufs_map
},
225 VOPNAME_ADDMAP
, { .vop_addmap
= ufs_addmap
}, /* not blkd */
226 VOPNAME_DELMAP
, { .vop_delmap
= ufs_delmap
}, /* not blkd */
227 VOPNAME_POLL
, { .vop_poll
= ufs_poll
}, /* not blkd */
228 VOPNAME_DUMP
, { .vop_dump
= ufs_dump
},
229 VOPNAME_PATHCONF
, { .vop_pathconf
= ufs_l_pathconf
},
230 VOPNAME_PAGEIO
, { .vop_pageio
= ufs_pageio
},
231 VOPNAME_DUMPCTL
, { .vop_dumpctl
= ufs_dumpctl
},
232 VOPNAME_GETSECATTR
, { .vop_getsecattr
= ufs_getsecattr
},
233 VOPNAME_SETSECATTR
, { .vop_setsecattr
= ufs_setsecattr
},
234 VOPNAME_VNEVENT
, { .vop_vnevent
= fs_vnevent_support
},
238 #define MAX_BACKFILE_COUNT 9999
241 * Created by ufs_dumpctl() to store a file's disk block info into memory.
242 * Used by ufs_dump() to dump data to disk directly.
245 struct inode
*ip
; /* the file we contain */
246 daddr_t fsbs
; /* number of blocks stored */
247 struct timeval32 time
; /* time stamp for the struct */
248 daddr32_t dblk
[1]; /* place holder for block info */
251 static struct dump
*dump_info
= NULL
;
254 * Previously there was no special action required for ordinary files.
255 * (Devices are handled through the device file system.)
256 * Now we support Large Files and Large File API requires open to
257 * fail if file is large.
258 * We could take care to prevent data corruption
259 * by doing an atomic check of size and truncate if file is opened with
260 * FTRUNC flag set but traditionally this is being done by the vfs/vnode
261 * layers. So taking care of truncation here is a change in the existing
262 * semantics of VOP_OPEN and therefore we chose not to implement any thing
263 * here. The check for the size of the file > 2GB is being done at the
264 * vfs layer in routine vn_open().
269 ufs_open(struct vnode
**vpp
, int flag
, struct cred
*cr
, caller_context_t
*ct
)
276 ufs_close(struct vnode
*vp
, int flag
, int count
, offset_t offset
,
277 struct cred
*cr
, caller_context_t
*ct
)
279 cleanlocks(vp
, ttoproc(curthread
)->p_pid
, 0);
280 cleanshares(vp
, ttoproc(curthread
)->p_pid
);
283 * Push partially filled cluster at last close.
284 * ``last close'' is approximated because the dnlc
285 * may have a hold on the vnode.
286 * Checking for VBAD here will also act as a forced umount check.
288 if (vp
->v_count
<= 2 && vp
->v_type
!= VBAD
) {
289 struct inode
*ip
= VTOI(vp
);
290 if (ip
->i_delaylen
) {
291 ins
.in_poc
.value
.ul
++;
292 (void) ufs_putpages(vp
, ip
->i_delayoff
, ip
->i_delaylen
,
293 B_ASYNC
| B_FREE
, cr
);
303 ufs_read(struct vnode
*vp
, struct uio
*uiop
, int ioflag
, struct cred
*cr
,
304 struct caller_context
*ct
)
306 struct inode
*ip
= VTOI(vp
);
307 struct ufsvfs
*ufsvfsp
;
308 struct ulockfs
*ulp
= NULL
;
312 ASSERT(RW_READ_HELD(&ip
->i_rwlock
));
315 * Mandatory locking needs to be done before ufs_lockfs_begin()
316 * and TRANS_BEGIN_SYNC() calls since mandatory locks can sleep.
318 if (MANDLOCK(vp
, ip
->i_mode
)) {
320 * ufs_getattr ends up being called by chklock
322 error
= chklock(vp
, FREAD
, uiop
->uio_loffset
,
323 uiop
->uio_resid
, uiop
->uio_fmode
, ct
);
328 ufsvfsp
= ip
->i_ufsvfs
;
329 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_READ_MASK
);
334 * In the case that a directory is opened for reading as a file
335 * (eg "cat .") with the O_RSYNC, O_SYNC and O_DSYNC flags set.
336 * The locking order had to be changed to avoid a deadlock with
337 * an update taking place on that directory at the same time.
339 if ((ip
->i_mode
& IFMT
) == IFDIR
) {
341 rw_enter(&ip
->i_contents
, RW_READER
);
342 error
= rdip(ip
, uiop
, ioflag
, cr
);
343 rw_exit(&ip
->i_contents
);
351 if (ulp
&& (ioflag
& FRSYNC
) && (ioflag
& (FSYNC
| FDSYNC
)) &&
352 TRANS_ISTRANS(ufsvfsp
)) {
353 rw_exit(&ip
->i_rwlock
);
354 TRANS_BEGIN_SYNC(ufsvfsp
, TOP_READ_SYNC
, TOP_READ_SIZE
,
357 TRANS_END_SYNC(ufsvfsp
, error
, TOP_READ_SYNC
,
359 rw_enter(&ip
->i_rwlock
, RW_READER
);
363 * Only transact reads to files opened for sync-read and
364 * sync-write on a file system that is not write locked.
366 * The ``not write locked'' check prevents problems with
367 * enabling/disabling logging on a busy file system. E.g.,
368 * logging exists at the beginning of the read but does not
372 if (ulp
&& (ioflag
& FRSYNC
) && (ioflag
& (FSYNC
| FDSYNC
)) &&
373 TRANS_ISTRANS(ufsvfsp
)) {
374 TRANS_BEGIN_SYNC(ufsvfsp
, TOP_READ_SYNC
, TOP_READ_SIZE
,
380 rw_enter(&ip
->i_contents
, RW_READER
);
381 error
= rdip(ip
, uiop
, ioflag
, cr
);
382 rw_exit(&ip
->i_contents
);
385 TRANS_END_SYNC(ufsvfsp
, error
, TOP_READ_SYNC
,
398 extern int ufs_HW
; /* high water mark */
399 extern int ufs_LW
; /* low water mark */
400 int ufs_WRITES
= 1; /* XXX - enable/disable */
401 int ufs_throttles
= 0; /* throttling count */
402 int ufs_allow_shared_writes
= 1; /* directio shared writes */
405 ufs_check_rewrite(struct inode
*ip
, struct uio
*uiop
, int ioflag
)
410 * If the FDSYNC flag is set then ignore the global
411 * ufs_allow_shared_writes in this case.
413 shared_write
= (ioflag
& FDSYNC
) | ufs_allow_shared_writes
;
416 * Filter to determine if this request is suitable as a
417 * concurrent rewrite. This write must not allocate blocks
418 * by extending the file or filling in holes. No use trying
419 * through FSYNC descriptors as the inode will be synchronously
420 * updated after the write. The uio structure has not yet been
421 * checked for sanity, so assume nothing.
423 return (((ip
->i_mode
& IFMT
) == IFREG
) && !(ioflag
& FAPPEND
) &&
424 (uiop
->uio_loffset
>= (offset_t
)0) &&
425 (uiop
->uio_loffset
< ip
->i_size
) && (uiop
->uio_resid
> 0) &&
426 ((ip
->i_size
- uiop
->uio_loffset
) >= uiop
->uio_resid
) &&
427 !(ioflag
& FSYNC
) && !bmap_has_holes(ip
) &&
433 ufs_write(struct vnode
*vp
, struct uio
*uiop
, int ioflag
, cred_t
*cr
,
434 caller_context_t
*ct
)
436 struct inode
*ip
= VTOI(vp
);
437 struct ufsvfs
*ufsvfsp
;
440 int error
, resv
, resid
= 0;
444 long start_resid
= uiop
->uio_resid
;
446 ASSERT(RW_LOCK_HELD(&ip
->i_rwlock
));
450 * Mandatory locking needs to be done before ufs_lockfs_begin()
451 * and TRANS_BEGIN_[A]SYNC() calls since mandatory locks can sleep.
452 * Check for forced unmounts normally done in ufs_lockfs_begin().
454 if ((ufsvfsp
= ip
->i_ufsvfs
) == NULL
) {
458 if (MANDLOCK(vp
, ip
->i_mode
)) {
460 ASSERT(RW_WRITE_HELD(&ip
->i_rwlock
));
463 * ufs_getattr ends up being called by chklock
465 error
= chklock(vp
, FWRITE
, uiop
->uio_loffset
,
466 uiop
->uio_resid
, uiop
->uio_fmode
, ct
);
471 /* i_rwlock can change in chklock */
472 exclusive
= rw_write_held(&ip
->i_rwlock
);
473 rewriteflg
= ufs_check_rewrite(ip
, uiop
, ioflag
);
476 * Check for fast-path special case of directio re-writes.
478 if ((ip
->i_flag
& IDIRECTIO
|| ufsvfsp
->vfs_forcedirectio
) &&
479 !exclusive
&& rewriteflg
) {
481 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_WRITE_MASK
);
485 rw_enter(&ip
->i_contents
, RW_READER
);
486 error
= ufs_directio_write(ip
, uiop
, ioflag
, 1, cr
,
488 if (directio_status
== DIRECTIO_SUCCESS
) {
491 if (start_resid
!= uiop
->uio_resid
)
494 * Special treatment of access times for re-writes.
495 * If IMOD is not already set, then convert it
496 * to IMODACC for this operation. This defers
497 * entering a delta into the log until the inode
498 * is flushed. This mimics what is done for read
499 * operations and inode access time.
501 mutex_enter(&ip
->i_tlock
);
502 i_flag_save
= ip
->i_flag
;
503 ip
->i_flag
|= IUPD
| ICHG
;
506 if ((i_flag_save
& IMOD
) == 0) {
508 ip
->i_flag
|= IMODACC
;
510 mutex_exit(&ip
->i_tlock
);
511 rw_exit(&ip
->i_contents
);
516 rw_exit(&ip
->i_contents
);
521 if (!exclusive
&& !rw_tryupgrade(&ip
->i_rwlock
)) {
522 rw_exit(&ip
->i_rwlock
);
523 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
525 * Mandatory locking could have been enabled
526 * after dropping the i_rwlock.
528 if (MANDLOCK(vp
, ip
->i_mode
))
532 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_WRITE_MASK
);
537 * Amount of log space needed for this write
539 if (!rewriteflg
|| !(ioflag
& FDSYNC
))
540 TRANS_WRITE_RESV(ip
, uiop
, ulp
, &resv
, &resid
);
545 if (ufs_WRITES
&& (ip
->i_writes
> ufs_HW
)) {
546 mutex_enter(&ip
->i_tlock
);
547 while (ip
->i_writes
> ufs_HW
) {
549 cv_wait(&ip
->i_wrcv
, &ip
->i_tlock
);
551 mutex_exit(&ip
->i_tlock
);
557 * If the write is a rewrite there is no need to open a transaction
558 * if the FDSYNC flag is set and not the FSYNC. In this case just
559 * set the IMODACC flag to modify do the update at a later time
560 * thus avoiding the overhead of the logging transaction that is
563 if (ioflag
& (FSYNC
|FDSYNC
)) {
568 rw_enter(&ip
->i_contents
, RW_READER
);
569 mutex_enter(&ip
->i_tlock
);
570 i_flag_save
= ip
->i_flag
;
571 ip
->i_flag
|= IUPD
| ICHG
;
574 if ((i_flag_save
& IMOD
) == 0) {
576 ip
->i_flag
|= IMODACC
;
578 mutex_exit(&ip
->i_tlock
);
579 rw_exit(&ip
->i_contents
);
582 TRANS_BEGIN_SYNC(ufsvfsp
, TOP_WRITE_SYNC
, resv
,
589 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_WRITE
, resv
);
595 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
596 rw_enter(&ip
->i_contents
, RW_WRITER
);
597 if ((ioflag
& FAPPEND
) != 0 && (ip
->i_mode
& IFMT
) == IFREG
) {
599 * In append mode start at end of file.
601 uiop
->uio_loffset
= ip
->i_size
;
605 * Mild optimisation, don't call ufs_trans_write() unless we have to
606 * Also, suppress file system full messages if we will retry.
609 ip
->i_flag
|= IQUIET
;
611 TRANS_WRITE(ip
, uiop
, ioflag
, error
, ulp
, cr
, resv
, resid
);
613 error
= wrip(ip
, uiop
, ioflag
, cr
);
615 ip
->i_flag
&= ~IQUIET
;
617 rw_exit(&ip
->i_contents
);
618 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
624 if (ioflag
& (FSYNC
|FDSYNC
)) {
628 TRANS_END_SYNC(ufsvfsp
, terr
, TOP_WRITE_SYNC
,
634 TRANS_END_ASYNC(ufsvfsp
, TOP_WRITE
, resv
);
639 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
641 * Any blocks tied up in pending deletes?
643 ufs_delete_drain_wait(ufsvfsp
, 1);
648 if (error
== ENOSPC
&& (start_resid
!= uiop
->uio_resid
))
655 * Don't cache write blocks to files with the sticky bit set.
656 * Used to keep swap files from blowing the page cache on a server.
661 * Free behind hacks. The pager is busted.
662 * XXX - need to pass the information down to writedone() in a flag like B_SEQ
663 * or B_FREE_IF_TIGHT_ON_MEMORY.
667 u_offset_t smallfile64
= 32 * 1024;
670 * While we should, in most cases, cache the pages for write, we
671 * may also want to cache the pages for read as long as they are
672 * frequently re-usable.
674 * If cache_read_ahead = 1, the pages for read will go to the tail
675 * of the cache list when they are released, otherwise go to the head.
677 int cache_read_ahead
= 0;
680 * Freebehind exists so that as we read large files sequentially we
681 * don't consume most of memory with pages from a few files. It takes
682 * longer to re-read from disk multiple small files as it does reading
683 * one large one sequentially. As system memory grows customers need
684 * to retain bigger chunks of files in memory. The advent of the
685 * cachelist opens up of the possibility freeing pages to the head or
688 * Not freeing a page is a bet that the page will be read again before
689 * it's segmap slot is needed for something else. If we loose the bet,
690 * it means some other thread is burdened with the page free we did
691 * not do. If we win we save a free and reclaim.
693 * Freeing it at the tail vs the head of cachelist is a bet that the
694 * page will survive until the next read. It's also saying that this
695 * page is more likely to be re-used than a page freed some time ago
696 * and never reclaimed.
698 * Freebehind maintains a range of file offset [smallfile1; smallfile2]
700 * 0 < offset < smallfile1 : pages are not freed.
701 * smallfile1 < offset < smallfile2 : pages freed to tail of cachelist.
702 * smallfile2 < offset : pages freed to head of cachelist.
704 * The range is computed at most once per second and depends on
705 * freemem and ncpus_online. Both parameters are bounded to be
706 * >= smallfile && >= smallfile64.
708 * smallfile1 = (free memory / ncpu) / 1000
709 * smallfile2 = (free memory / ncpu) / 10
711 * A few examples values:
713 * Free Mem (in Bytes) [smallfile1; smallfile2] [smallfile1; smallfile2]
714 * ncpus_online = 4 ncpus_online = 64
715 * ------------------ ----------------------- -----------------------
716 * 1G [256K; 25M] [32K; 1.5M]
717 * 10G [2.5M; 250M] [156K; 15M]
718 * 100G [25M; 2.5G] [1.5M; 150M]
722 #define SMALLFILE1_D 1000
723 #define SMALLFILE2_D 10
724 static u_offset_t smallfile1
= 32 * 1024;
725 static u_offset_t smallfile2
= 32 * 1024;
726 static clock_t smallfile_update
= 0; /* lbolt value of when to recompute */
727 uint_t smallfile1_d
= SMALLFILE1_D
;
728 uint_t smallfile2_d
= SMALLFILE2_D
;
731 * wrip does the real work of write requests for ufs.
734 wrip(struct inode
*ip
, struct uio
*uio
, int ioflag
, struct cred
*cr
)
736 rlim64_t limit
= uio
->uio_llimit
;
738 u_offset_t old_i_size
;
741 struct ufsvfs
*ufsvfsp
;
743 long start_resid
= uio
->uio_resid
; /* save starting resid */
744 long premove_resid
; /* resid before uiomove() */
747 int iupdat_flag
, directio_status
;
749 int error
, pagecreate
;
750 int do_dqrwlock
; /* drop/reacquire vfs_dqrwlock */
755 * ip->i_size is incremented before the uiomove
756 * is done on a write. If the move fails (bad user
757 * address) reset ip->i_size.
758 * The better way would be to increment ip->i_size
759 * only if the uiomove succeeds.
761 int i_size_changed
= 0;
763 int i_seq_needed
= 0;
768 * check for forced unmount - should not happen as
769 * the request passed the lockfs checks.
771 if ((ufsvfsp
= ip
->i_ufsvfs
) == NULL
)
776 ASSERT(RW_WRITE_HELD(&ip
->i_contents
));
778 /* check for valid filetype */
779 type
= ip
->i_mode
& IFMT
;
780 if ((type
!= IFREG
) && (type
!= IFDIR
) && (type
!= IFATTRDIR
) &&
781 (type
!= IFLNK
) && (type
!= IFSHAD
)) {
786 * the actual limit of UFS file size
789 if (limit
== RLIM64_INFINITY
|| limit
> MAXOFFSET_T
)
792 if (uio
->uio_loffset
>= limit
) {
793 proc_t
*p
= ttoproc(curthread
);
795 mutex_enter(&p
->p_lock
);
796 (void) rctl_action(rctlproc_legacy
[RLIMIT_FSIZE
], p
->p_rctls
,
797 p
, RCA_UNSAFE_SIGINFO
);
798 mutex_exit(&p
->p_lock
);
803 * if largefiles are disallowed, the limit is
804 * the pre-largefiles value of 2GB
806 if (ufsvfsp
->vfs_lfflags
& UFS_LARGEFILES
)
807 limit
= MIN(UFS_MAXOFFSET_T
, limit
);
809 limit
= MIN(MAXOFF32_T
, limit
);
811 if (uio
->uio_loffset
< (offset_t
)0) {
814 if (uio
->uio_resid
== 0) {
818 if (uio
->uio_loffset
>= limit
)
821 ip
->i_flag
|= INOACC
; /* don't update ref time in getpage */
823 if (ioflag
& (FSYNC
|FDSYNC
)) {
830 if (ip
->i_flag
& IDIRECTIO
|| ufsvfsp
->vfs_forcedirectio
) {
831 uio
->uio_llimit
= limit
;
832 error
= ufs_directio_write(ip
, uio
, ioflag
, 0, cr
,
835 * If ufs_directio wrote to the file or set the flags,
836 * we need to update i_seq, but it may be deferred.
838 if (start_resid
!= uio
->uio_resid
||
839 (ip
->i_flag
& (ICHG
|IUPD
))) {
843 if (directio_status
== DIRECTIO_SUCCESS
)
848 * Behavior with respect to dropping/reacquiring vfs_dqrwlock:
850 * o shadow inodes: vfs_dqrwlock is not held at all
851 * o quota updates: vfs_dqrwlock is read or write held
852 * o other updates: vfs_dqrwlock is read held
854 * The first case is the only one where we do not hold
855 * vfs_dqrwlock at all while entering wrip().
856 * We must make sure not to downgrade/drop vfs_dqrwlock if we
857 * have it as writer, i.e. if we are updating the quota inode.
858 * There is no potential deadlock scenario in this case as
859 * ufs_getpage() takes care of this and avoids reacquiring
860 * vfs_dqrwlock in that case.
862 * This check is done here since the above conditions do not change
863 * and we possibly loop below, so save a few cycles.
865 if ((type
== IFSHAD
) ||
866 (rw_owner(&ufsvfsp
->vfs_dqrwlock
) == curthread
)) {
873 * Large Files: We cast MAXBMASK to offset_t
874 * inorder to mask out the higher bits. Since offset_t
875 * is a signed value, the high order bit set in MAXBMASK
876 * value makes it do the right thing by having all bits 1
877 * in the higher word. May be removed for _SOLARIS64_.
882 u_offset_t uoff
= uio
->uio_loffset
;
883 off
= uoff
& (offset_t
)MAXBMASK
;
884 mapon
= (int)(uoff
& (offset_t
)MAXBOFFSET
);
885 on
= (int)blkoff(fs
, uoff
);
886 n
= (int)MIN(fs
->fs_bsize
- on
, uio
->uio_resid
);
889 if (type
== IFREG
&& uoff
+ n
>= limit
) {
895 * since uoff + n >= limit,
896 * therefore n >= limit - uoff, and n is an int
897 * so it is safe to cast it to an int
899 n
= (int)(limit
- (rlim64_t
)uoff
);
901 if (uoff
+ n
> ip
->i_size
) {
903 * We are extending the length of the file.
904 * bmap is used so that we are sure that
905 * if we need to allocate new blocks, that it
906 * is done here before we up the file size.
908 error
= bmap_write(ip
, uoff
, (int)(on
+ n
),
909 mapon
== 0, NULL
, cr
);
911 * bmap_write never drops i_contents so if
912 * the flags are set it changed the file.
914 if (ip
->i_flag
& (ICHG
|IUPD
)) {
921 * There is a window of vulnerability here.
922 * The sequence of operations: allocate file
923 * system blocks, uiomove the data into pages,
924 * and then update the size of the file in the
925 * inode, must happen atomically. However, due
926 * to current locking constraints, this can not
929 ASSERT(ip
->i_writer
== NULL
);
930 ip
->i_writer
= curthread
;
933 * If we are writing from the beginning of
934 * the mapping, we can just create the
935 * pages without having to read them.
937 pagecreate
= (mapon
== 0);
938 } else if (n
== MAXBSIZE
) {
940 * Going to do a whole mappings worth,
941 * so we can just create the pages w/o
942 * having to read them in. But before
943 * we do that, we need to make sure any
944 * needed blocks are allocated first.
946 iblocks
= ip
->i_blocks
;
947 error
= bmap_write(ip
, uoff
, (int)(on
+ n
),
948 BI_ALLOC_ONLY
, NULL
, cr
);
950 * bmap_write never drops i_contents so if
951 * the flags are set it changed the file.
953 if (ip
->i_flag
& (ICHG
|IUPD
)) {
961 * check if the new created page needed the
962 * allocation of new disk blocks.
964 if (iblocks
== ip
->i_blocks
)
965 new_iblocks
= 0; /* no new blocks allocated */
969 * In sync mode flush the indirect blocks which
970 * may have been allocated and not written on
971 * disk. In above cases bmap_write will allocate
974 if (ioflag
& (FSYNC
|FDSYNC
)) {
975 error
= ufs_indirblk_sync(ip
, uoff
);
982 * At this point we can enter ufs_getpage() in one
984 * 1) segmap_getmapflt() calls ufs_getpage() when the
985 * forcefault parameter is true (pagecreate == 0)
986 * 2) uiomove() causes a page fault.
988 * We have to drop the contents lock to prevent the VM
989 * system from trying to reacquire it in ufs_getpage()
990 * should the uiomove cause a pagefault.
992 * We have to drop the reader vfs_dqrwlock here as well.
994 rw_exit(&ip
->i_contents
);
996 ASSERT(RW_LOCK_HELD(&ufsvfsp
->vfs_dqrwlock
));
997 ASSERT(!(RW_WRITE_HELD(&ufsvfsp
->vfs_dqrwlock
)));
998 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
1002 premove_resid
= uio
->uio_resid
;
1005 * Touch the page and fault it in if it is not in core
1006 * before segmap_getmapflt or vpm_data_copy can lock it.
1007 * This is to avoid the deadlock if the buffer is mapped
1008 * to the same file through mmap which we want to write.
1010 uio_prefaultpages((long)n
, uio
);
1014 * Copy data. If new pages are created, part of
1015 * the page that is not written will be initizliazed
1018 error
= vpm_data_copy(vp
, (off
+ mapon
), (uint_t
)n
,
1019 uio
, !pagecreate
, &newpage
, 0, S_WRITE
);
1022 base
= segmap_getmapflt(segkmap
, vp
, (off
+ mapon
),
1023 (uint_t
)n
, !pagecreate
, S_WRITE
);
1026 * segmap_pagecreate() returns 1 if it calls
1027 * page_create_va() to allocate any pages.
1031 newpage
= segmap_pagecreate(segkmap
, base
,
1034 error
= uiomove(base
+ mapon
, (long)n
, UIO_WRITE
, uio
);
1038 * If "newpage" is set, then a new page was created and it
1039 * does not contain valid data, so it needs to be initialized
1041 * Otherwise the page contains old data, which was overwritten
1042 * partially or as a whole in uiomove.
1043 * If there is only one iovec structure within uio, then
1044 * on error uiomove will not be able to update uio->uio_loffset
1045 * and we would zero the whole page here!
1047 * If uiomove fails because of an error, the old valid data
1048 * is kept instead of filling the rest of the page with zero's.
1050 if (!vpm_enable
&& newpage
&&
1051 uio
->uio_loffset
< roundup(off
+ mapon
+ n
, PAGESIZE
)) {
1053 * We created pages w/o initializing them completely,
1054 * thus we need to zero the part that wasn't set up.
1055 * This happens on most EOF write cases and if
1056 * we had some sort of error during the uiomove.
1060 nmoved
= (int)(uio
->uio_loffset
- (off
+ mapon
));
1061 ASSERT(nmoved
>= 0 && nmoved
<= n
);
1062 nzero
= roundup(on
+ n
, PAGESIZE
) - nmoved
;
1063 ASSERT(nzero
> 0 && mapon
+ nmoved
+ nzero
<= MAXBSIZE
);
1064 (void) kzero(base
+ mapon
+ nmoved
, (uint_t
)nzero
);
1068 * Unlock the pages allocated by page_create_va()
1069 * in segmap_pagecreate()
1071 if (!vpm_enable
&& newpage
)
1072 segmap_pageunlock(segkmap
, base
, (size_t)n
, S_WRITE
);
1075 * If the size of the file changed, then update the
1076 * size field in the inode now. This can't be done
1077 * before the call to segmap_pageunlock or there is
1078 * a potential deadlock with callers to ufs_putpage().
1079 * They will be holding i_contents and trying to lock
1080 * a page, while this thread is holding a page locked
1081 * and trying to acquire i_contents.
1083 if (i_size_changed
) {
1084 rw_enter(&ip
->i_contents
, RW_WRITER
);
1085 old_i_size
= ip
->i_size
;
1086 UFS_SET_ISIZE(uoff
+ n
, ip
);
1087 TRANS_INODE(ufsvfsp
, ip
);
1089 * file has grown larger than 2GB. Set flag
1090 * in superblock to indicate this, if it
1091 * is not already set.
1093 if ((ip
->i_size
> MAXOFF32_T
) &&
1094 !(fs
->fs_flags
& FSLARGEFILES
)) {
1095 ASSERT(ufsvfsp
->vfs_lfflags
& UFS_LARGEFILES
);
1096 mutex_enter(&ufsvfsp
->vfs_lock
);
1097 fs
->fs_flags
|= FSLARGEFILES
;
1098 ufs_sbwrite(ufsvfsp
);
1099 mutex_exit(&ufsvfsp
->vfs_lock
);
1101 mutex_enter(&ip
->i_tlock
);
1102 ip
->i_writer
= NULL
;
1103 cv_broadcast(&ip
->i_wrcv
);
1104 mutex_exit(&ip
->i_tlock
);
1105 rw_exit(&ip
->i_contents
);
1110 * If we failed on a write, we may have already
1111 * allocated file blocks as well as pages. It's
1112 * hard to undo the block allocation, but we must
1113 * be sure to invalidate any pages that may have
1116 * If the page was created without initialization
1117 * then we must check if it should be possible
1118 * to destroy the new page and to keep the old data
1121 * It is possible to destroy the page without
1122 * having to write back its contents only when
1123 * - the size of the file keeps unchanged
1124 * - bmap_write() did not allocate new disk blocks
1125 * it is possible to create big files using "seek" and
1126 * write to the end of the file. A "write" to a
1127 * position before the end of the file would not
1128 * change the size of the file but it would allocate
1130 * - uiomove intended to overwrite the whole page.
1131 * - a new page was created (newpage == 1).
1134 if (i_size_changed
== 0 && new_iblocks
== 0 &&
1137 /* unwind what uiomove eventually last did */
1138 uio
->uio_resid
= premove_resid
;
1141 * destroy the page, do not write ambiguous
1147 * write the page back to the disk, if dirty,
1148 * and remove the page from the cache.
1157 (void) vpm_sync_pages(vp
, off
, n
, flags
);
1159 (void) segmap_release(segkmap
, base
, flags
);
1164 * Force write back for synchronous write cases.
1166 if ((ioflag
& (FSYNC
|FDSYNC
)) || type
== IFDIR
) {
1168 * If the sticky bit is set but the
1169 * execute bit is not set, we do a
1170 * synchronous write back and free
1171 * the page when done. We set up swap
1172 * files to be handled this way to
1173 * prevent servers from keeping around
1174 * the client's swap pages too long.
1175 * XXX - there ought to be a better way.
1177 if (IS_SWAPVP(vp
)) {
1178 flags
= SM_WRITE
| SM_FREE
|
1184 } else if (n
+ on
== MAXBSIZE
|| IS_SWAPVP(vp
)) {
1186 * Have written a whole block.
1187 * Start an asynchronous write and
1188 * mark the buffer to indicate that
1189 * it won't be needed again soon.
1191 flags
= SM_WRITE
| SM_ASYNC
| SM_DONTNEED
;
1197 error
= vpm_sync_pages(vp
, off
, n
, flags
);
1199 error
= segmap_release(segkmap
, base
, flags
);
1202 * If the operation failed and is synchronous,
1203 * then we need to unwind what uiomove() last
1204 * did so we can potentially return an error to
1205 * the caller. If this write operation was
1206 * done in two pieces and the first succeeded,
1207 * then we won't return an error for the second
1208 * piece that failed. However, we only want to
1209 * return a resid value that reflects what was
1212 * Failures for non-synchronous operations can
1213 * be ignored since the page subsystem will
1214 * retry the operation until it succeeds or the
1215 * file system is unmounted.
1218 if ((ioflag
& (FSYNC
| FDSYNC
)) ||
1220 uio
->uio_resid
= premove_resid
;
1228 * Re-acquire contents lock.
1229 * If it was dropped, reacquire reader vfs_dqrwlock as well.
1232 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
1233 rw_enter(&ip
->i_contents
, RW_WRITER
);
1236 * If the uiomove() failed or if a synchronous
1237 * page push failed, fix up i_size.
1240 if (i_size_changed
) {
1242 * The uiomove failed, and we
1243 * allocated blocks,so get rid
1246 (void) ufs_itrunc(ip
, old_i_size
, 0, cr
);
1250 * XXX - Can this be out of the loop?
1252 ip
->i_flag
|= IUPD
| ICHG
;
1254 * Only do one increase of i_seq for multiple
1255 * pieces. Because we drop locks, record
1256 * the fact that we changed the timestamp and
1257 * are deferring the increase in case another thread
1258 * pushes our timestamp update.
1263 ip
->i_flag
|= IATTCHG
;
1264 if ((ip
->i_mode
& (IEXEC
| (IEXEC
>> 3) |
1265 (IEXEC
>> 6))) != 0 &&
1266 (ip
->i_mode
& (ISUID
| ISGID
)) != 0 &&
1267 secpolicy_vnode_setid_retain(cr
,
1268 (ip
->i_mode
& ISUID
) != 0 && ip
->i_uid
== 0) != 0) {
1270 * Clear Set-UID & Set-GID bits on
1271 * successful write if not privileged
1272 * and at least one of the execute bits
1273 * is set. If we always clear Set-GID,
1274 * mandatory file and record locking is
1277 ip
->i_mode
&= ~(ISUID
| ISGID
);
1281 * In the case the FDSYNC flag is set and this is a
1282 * "rewrite" we won't log a delta.
1283 * The FSYNC flag overrides all cases.
1285 if (!ufs_check_rewrite(ip
, uio
, ioflag
) || !(ioflag
& FDSYNC
)) {
1286 TRANS_INODE(ufsvfsp
, ip
);
1288 } while (error
== 0 && uio
->uio_resid
> 0 && n
!= 0);
1292 * Make sure i_seq is increased at least once per write
1296 ip
->i_flag
&= ~ISEQ
; /* no longer deferred */
1300 * Inode is updated according to this table -
1302 * FSYNC FDSYNC(posix.4)
1303 * --------------------------
1304 * always@ IATTCHG|IBDWRITE
1306 * @ - If we are doing synchronous write the only time we should
1307 * not be sync'ing the ip here is if we have the stickyhack
1308 * activated, the file is marked with the sticky bit and
1309 * no exec bit, the file length has not been changed and
1310 * no new blocks have been allocated during this write.
1313 if ((ip
->i_flag
& ISYNC
) != 0) {
1315 * we have eliminated nosync
1317 if ((ip
->i_flag
& (IATTCHG
|IBDWRITE
)) ||
1318 ((ioflag
& FSYNC
) && iupdat_flag
)) {
1324 * If we've already done a partial-write, terminate
1325 * the write but return no error unless the error is ENOSPC
1326 * because the caller can detect this and free resources and
1329 if ((start_resid
!= uio
->uio_resid
) && (error
!= ENOSPC
))
1332 ip
->i_flag
&= ~(INOACC
| ISYNC
);
1338 * rdip does the real work of read requests for ufs.
1341 rdip(struct inode
*ip
, struct uio
*uio
, int ioflag
, cred_t
*cr
)
1346 struct ufsvfs
*ufsvfsp
;
1348 long oresid
= uio
->uio_resid
;
1349 u_offset_t n
, on
, mapon
;
1353 int dofree
, directio_status
;
1360 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
1362 ufsvfsp
= ip
->i_ufsvfs
;
1364 if (ufsvfsp
== NULL
)
1367 fs
= ufsvfsp
->vfs_fs
;
1369 /* check for valid filetype */
1370 type
= ip
->i_mode
& IFMT
;
1371 if ((type
!= IFREG
) && (type
!= IFDIR
) && (type
!= IFATTRDIR
) &&
1372 (type
!= IFLNK
) && (type
!= IFSHAD
)) {
1376 if (uio
->uio_loffset
> UFS_MAXOFFSET_T
) {
1380 if (uio
->uio_loffset
< (offset_t
)0) {
1383 if (uio
->uio_resid
== 0) {
1387 if (!ULOCKFS_IS_NOIACC(ITOUL(ip
)) && (fs
->fs_ronly
== 0) &&
1388 (!ufsvfsp
->vfs_noatime
)) {
1389 mutex_enter(&ip
->i_tlock
);
1391 mutex_exit(&ip
->i_tlock
);
1396 if (ip
->i_flag
& IDIRECTIO
|| ufsvfsp
->vfs_forcedirectio
) {
1397 error
= ufs_directio_read(ip
, uio
, cr
, &directio_status
);
1398 if (directio_status
== DIRECTIO_SUCCESS
)
1402 rwtype
= (rw_write_held(&ip
->i_contents
)?RW_WRITER
:RW_READER
);
1406 u_offset_t uoff
= uio
->uio_loffset
;
1407 off
= uoff
& (offset_t
)MAXBMASK
;
1408 mapon
= (u_offset_t
)(uoff
& (offset_t
)MAXBOFFSET
);
1409 on
= (u_offset_t
)blkoff(fs
, uoff
);
1410 n
= MIN((u_offset_t
)fs
->fs_bsize
- on
,
1411 (u_offset_t
)uio
->uio_resid
);
1413 diff
= ip
->i_size
- uoff
;
1415 if (diff
<= (offset_t
)0) {
1419 if (diff
< (offset_t
)n
)
1423 * We update smallfile2 and smallfile1 at most every second.
1425 now
= ddi_get_lbolt();
1426 if (now
>= smallfile_update
) {
1427 uint64_t percpufreeb
;
1428 if (smallfile1_d
== 0) smallfile1_d
= SMALLFILE1_D
;
1429 if (smallfile2_d
== 0) smallfile2_d
= SMALLFILE2_D
;
1430 percpufreeb
= ptob((uint64_t)freemem
) / ncpus_online
;
1431 smallfile1
= percpufreeb
/ smallfile1_d
;
1432 smallfile2
= percpufreeb
/ smallfile2_d
;
1433 smallfile1
= MAX(smallfile1
, smallfile
);
1434 smallfile1
= MAX(smallfile1
, smallfile64
);
1435 smallfile2
= MAX(smallfile1
, smallfile2
);
1436 smallfile_update
= now
+ hz
;
1439 dofree
= freebehind
&&
1440 ip
->i_nextr
== (off
& PAGEMASK
) && off
> smallfile1
;
1443 * At this point we can enter ufs_getpage() in one of two
1445 * 1) segmap_getmapflt() calls ufs_getpage() when the
1446 * forcefault parameter is true (value of 1 is passed)
1447 * 2) uiomove() causes a page fault.
1449 * We cannot hold onto an i_contents reader lock without
1450 * risking deadlock in ufs_getpage() so drop a reader lock.
1451 * The ufs_getpage() dolock logic already allows for a
1452 * thread holding i_contents as writer to work properly
1453 * so we keep a writer lock.
1455 if (rwtype
== RW_READER
)
1456 rw_exit(&ip
->i_contents
);
1462 error
= vpm_data_copy(vp
, (off
+ mapon
), (uint_t
)n
,
1463 uio
, 1, NULL
, 0, S_READ
);
1465 base
= segmap_getmapflt(segkmap
, vp
, (off
+ mapon
),
1466 (uint_t
)n
, 1, S_READ
);
1467 error
= uiomove(base
+ mapon
, (long)n
, UIO_READ
, uio
);
1473 * If reading sequential we won't need this
1474 * buffer again soon. For offsets in range
1475 * [smallfile1, smallfile2] release the pages
1476 * at the tail of the cache list, larger
1477 * offsets are released at the head.
1480 flags
= SM_FREE
| SM_ASYNC
;
1481 if ((cache_read_ahead
== 0) &&
1483 flags
|= SM_DONTNEED
;
1486 * In POSIX SYNC (FSYNC and FDSYNC) read mode,
1487 * we want to make sure that the page which has
1488 * been read, is written on disk if it is dirty.
1489 * And corresponding indirect blocks should also
1492 if ((ioflag
& FRSYNC
) && (ioflag
& (FSYNC
|FDSYNC
))) {
1497 error
= vpm_sync_pages(vp
, off
, n
, flags
);
1499 error
= segmap_release(segkmap
, base
, flags
);
1503 (void) vpm_sync_pages(vp
, off
, n
, flags
);
1505 (void) segmap_release(segkmap
, base
, flags
);
1509 if (rwtype
== RW_READER
)
1510 rw_enter(&ip
->i_contents
, rwtype
);
1511 } while (error
== 0 && uio
->uio_resid
> 0 && n
!= 0);
1514 * Inode is updated according to this table if FRSYNC is set.
1516 * FSYNC FDSYNC(posix.4)
1517 * --------------------------
1518 * always IATTCHG|IBDWRITE
1521 * The inode is not updated if we're logging and the inode is a
1522 * directory with FRSYNC, FSYNC and FDSYNC flags set.
1524 if (ioflag
& FRSYNC
) {
1525 if (TRANS_ISTRANS(ufsvfsp
) && ((ip
->i_mode
& IFMT
) == IFDIR
)) {
1529 if ((ioflag
& FSYNC
) ||
1530 ((ioflag
& FDSYNC
) &&
1531 (ip
->i_flag
& (IATTCHG
|IBDWRITE
)))) {
1537 * If we've already done a partial read, terminate
1538 * the read but return no error.
1540 if (oresid
!= uio
->uio_resid
)
1556 caller_context_t
*ct
)
1558 struct lockfs lockfs
, lockfs_out
;
1559 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
1560 char *comment
, *original_comment
;
1562 struct ulockfs
*ulp
;
1571 * forcibly unmounted
1573 if (ufsvfsp
== NULL
|| vp
->v_vfsp
== NULL
||
1574 vp
->v_vfsp
->vfs_flag
& VFS_UNMOUNTED
)
1576 fs
= ufsvfsp
->vfs_fs
;
1578 if (cmd
== Q_QUOTACTL
) {
1579 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_QUOTA_MASK
);
1584 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_QUOTA
,
1585 TOP_SETQUOTA_SIZE(fs
));
1588 error
= quotactl(vp
, arg
, flag
, cr
);
1591 TRANS_END_ASYNC(ufsvfsp
, TOP_QUOTA
,
1592 TOP_SETQUOTA_SIZE(fs
));
1593 ufs_lockfs_end(ulp
);
1601 * file system locking
1603 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1606 if ((flag
& DATAMODEL_MASK
) == DATAMODEL_NATIVE
) {
1607 if (copyin((caddr_t
)arg
, &lockfs
,
1608 sizeof (struct lockfs
)))
1611 #ifdef _SYSCALL32_IMPL
1613 struct lockfs32 lockfs32
;
1614 /* Translate ILP32 lockfs to LP64 lockfs */
1615 if (copyin((caddr_t
)arg
, &lockfs32
,
1616 sizeof (struct lockfs32
)))
1618 lockfs
.lf_lock
= (ulong_t
)lockfs32
.lf_lock
;
1619 lockfs
.lf_flags
= (ulong_t
)lockfs32
.lf_flags
;
1620 lockfs
.lf_key
= (ulong_t
)lockfs32
.lf_key
;
1621 lockfs
.lf_comlen
= (ulong_t
)lockfs32
.lf_comlen
;
1623 (caddr_t
)(uintptr_t)lockfs32
.lf_comment
;
1625 #endif /* _SYSCALL32_IMPL */
1627 if (lockfs
.lf_comlen
) {
1628 if (lockfs
.lf_comlen
> LOCKFS_MAXCOMMENTLEN
)
1629 return (ENAMETOOLONG
);
1631 kmem_alloc(lockfs
.lf_comlen
, KM_SLEEP
);
1632 if (copyin(lockfs
.lf_comment
, comment
,
1633 lockfs
.lf_comlen
)) {
1634 kmem_free(comment
, lockfs
.lf_comlen
);
1637 original_comment
= lockfs
.lf_comment
;
1638 lockfs
.lf_comment
= comment
;
1640 if ((error
= ufs_fiolfs(vp
, &lockfs
, 0)) == 0) {
1641 lockfs
.lf_comment
= original_comment
;
1643 if ((flag
& DATAMODEL_MASK
) ==
1645 (void) copyout(&lockfs
, (caddr_t
)arg
,
1646 sizeof (struct lockfs
));
1648 #ifdef _SYSCALL32_IMPL
1650 struct lockfs32 lockfs32
;
1651 /* Translate LP64 to ILP32 lockfs */
1653 (uint32_t)lockfs
.lf_lock
;
1655 (uint32_t)lockfs
.lf_flags
;
1657 (uint32_t)lockfs
.lf_key
;
1658 lockfs32
.lf_comlen
=
1659 (uint32_t)lockfs
.lf_comlen
;
1660 lockfs32
.lf_comment
=
1661 (uint32_t)(uintptr_t)
1663 (void) copyout(&lockfs32
, (caddr_t
)arg
,
1664 sizeof (struct lockfs32
));
1666 #endif /* _SYSCALL32_IMPL */
1669 if (lockfs
.lf_comlen
)
1670 kmem_free(comment
, lockfs
.lf_comlen
);
1676 * get file system locking status
1679 if ((flag
& DATAMODEL_MASK
) == DATAMODEL_NATIVE
) {
1680 if (copyin((caddr_t
)arg
, &lockfs
,
1681 sizeof (struct lockfs
)))
1684 #ifdef _SYSCALL32_IMPL
1686 struct lockfs32 lockfs32
;
1687 /* Translate ILP32 lockfs to LP64 lockfs */
1688 if (copyin((caddr_t
)arg
, &lockfs32
,
1689 sizeof (struct lockfs32
)))
1691 lockfs
.lf_lock
= (ulong_t
)lockfs32
.lf_lock
;
1692 lockfs
.lf_flags
= (ulong_t
)lockfs32
.lf_flags
;
1693 lockfs
.lf_key
= (ulong_t
)lockfs32
.lf_key
;
1694 lockfs
.lf_comlen
= (ulong_t
)lockfs32
.lf_comlen
;
1696 (caddr_t
)(uintptr_t)lockfs32
.lf_comment
;
1698 #endif /* _SYSCALL32_IMPL */
1700 if (error
= ufs_fiolfss(vp
, &lockfs_out
))
1702 lockfs
.lf_lock
= lockfs_out
.lf_lock
;
1703 lockfs
.lf_key
= lockfs_out
.lf_key
;
1704 lockfs
.lf_flags
= lockfs_out
.lf_flags
;
1705 lockfs
.lf_comlen
= MIN(lockfs
.lf_comlen
,
1706 lockfs_out
.lf_comlen
);
1708 if ((flag
& DATAMODEL_MASK
) == DATAMODEL_NATIVE
) {
1709 if (copyout(&lockfs
, (caddr_t
)arg
,
1710 sizeof (struct lockfs
)))
1713 #ifdef _SYSCALL32_IMPL
1715 /* Translate LP64 to ILP32 lockfs */
1716 struct lockfs32 lockfs32
;
1717 lockfs32
.lf_lock
= (uint32_t)lockfs
.lf_lock
;
1718 lockfs32
.lf_flags
= (uint32_t)lockfs
.lf_flags
;
1719 lockfs32
.lf_key
= (uint32_t)lockfs
.lf_key
;
1720 lockfs32
.lf_comlen
= (uint32_t)lockfs
.lf_comlen
;
1721 lockfs32
.lf_comment
=
1722 (uint32_t)(uintptr_t)lockfs
.lf_comment
;
1723 if (copyout(&lockfs32
, (caddr_t
)arg
,
1724 sizeof (struct lockfs32
)))
1727 #endif /* _SYSCALL32_IMPL */
1729 if (lockfs
.lf_comlen
&&
1730 lockfs
.lf_comment
&& lockfs_out
.lf_comment
)
1731 if (copyout(lockfs_out
.lf_comment
,
1732 lockfs
.lf_comment
, lockfs
.lf_comlen
))
1742 * if mounted w/o atime, return quietly.
1743 * I briefly thought about returning ENOSYS, but
1744 * figured that most apps would consider this fatal
1745 * but the idea is to make this as seamless as poss.
1747 if (ufsvfsp
->vfs_noatime
)
1750 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
1751 ULOCKFS_SETATTR_MASK
);
1756 trans_size
= (int)TOP_SETATTR_SIZE(VTOI(vp
));
1757 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
,
1758 TOP_SETATTR
, trans_size
);
1761 error
= ufs_fiosatime(vp
, (struct timeval
*)arg
,
1765 TRANS_END_CSYNC(ufsvfsp
, error
, issync
,
1766 TOP_SETATTR
, trans_size
);
1767 ufs_lockfs_end(ulp
);
1775 return (ufs_fiosdio(vp
, (uint_t
*)arg
, flag
, cr
));
1781 return (ufs_fiogdio(vp
, (uint_t
*)arg
, flag
, cr
));
1787 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
1792 error
= ufs_fioio(vp
, (struct fioio
*)arg
, flag
, cr
);
1795 ufs_lockfs_end(ulp
);
1801 * file system flush (push w/invalidate)
1803 if ((caddr_t
)arg
!= NULL
)
1805 return (ufs_fioffs(vp
, NULL
, cr
));
1809 * Contract-private interface for Legato
1810 * Purge this vnode from the DNLC and decide
1811 * if this vnode is busy (*arg == 1) or not
1814 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1816 error
= ufs_fioisbusy(vp
, (int *)arg
, cr
);
1820 return (ufs_fiodirectio(vp
, (int)arg
, cr
));
1824 * Tune the file system (aka setting fs attributes)
1826 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
1827 ULOCKFS_SETATTR_MASK
);
1831 error
= ufs_fiotune(vp
, (struct fiotune
*)arg
, cr
);
1834 ufs_lockfs_end(ulp
);
1838 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1840 return (ufs_fiologenable(vp
, (void *)arg
, cr
, flag
));
1842 case _FIOLOGDISABLE
:
1843 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1845 return (ufs_fiologdisable(vp
, (void *)arg
, cr
, flag
));
1848 return (ufs_fioislog(vp
, (void *)arg
, cr
, flag
));
1850 case _FIOSNAPSHOTCREATE_MULTI
:
1852 struct fiosnapcreate_multi fc
, *fcp
;
1855 if (copyin((void *)arg
, &fc
, sizeof (fc
)))
1857 if (fc
.backfilecount
> MAX_BACKFILE_COUNT
)
1859 fcm_size
= sizeof (struct fiosnapcreate_multi
) +
1860 (fc
.backfilecount
- 1) * sizeof (int);
1861 fcp
= (struct fiosnapcreate_multi
*)
1862 kmem_alloc(fcm_size
, KM_SLEEP
);
1863 if (copyin((void *)arg
, fcp
, fcm_size
)) {
1864 kmem_free(fcp
, fcm_size
);
1867 error
= ufs_snap_create(vp
, fcp
, cr
);
1869 * Do copyout even if there is an error because
1870 * the details of error is stored in fcp.
1872 if (copyout(fcp
, (void *)arg
, fcm_size
))
1874 kmem_free(fcp
, fcm_size
);
1878 case _FIOSNAPSHOTDELETE
:
1880 struct fiosnapdelete fc
;
1882 if (copyin((void *)arg
, &fc
, sizeof (fc
)))
1884 error
= ufs_snap_delete(vp
, &fc
, cr
);
1885 if (!error
&& copyout(&fc
, (void *)arg
, sizeof (fc
)))
1890 case _FIOGETSUPERBLOCK
:
1891 if (copyout(fs
, (void *)arg
, SBSIZE
))
1895 case _FIOGETMAXPHYS
:
1896 if (copyout(&maxphys
, (void *)arg
, sizeof (maxphys
)))
1901 * The following 3 ioctls are for TSufs support
1902 * although could potentially be used elsewhere
1904 case _FIO_SET_LUFS_DEBUG
:
1905 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1907 lufs_debug
= (uint32_t)arg
;
1910 case _FIO_SET_LUFS_ERROR
:
1911 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1913 TRANS_SETERROR(ufsvfsp
);
1916 case _FIO_GET_TOP_STATS
:
1918 fio_lufs_stats_t
*ls
;
1919 ml_unit_t
*ul
= ufsvfsp
->vfs_log
;
1921 ls
= kmem_zalloc(sizeof (*ls
), KM_SLEEP
);
1922 ls
->ls_debug
= ul
->un_debug
; /* return debug value */
1923 /* Copy stucture if statistics are being kept */
1924 if (ul
->un_logmap
->mtm_tops
) {
1925 ls
->ls_topstats
= *(ul
->un_logmap
->mtm_tops
);
1928 if (copyout(ls
, (void *)arg
, sizeof (*ls
)))
1930 kmem_free(ls
, sizeof (*ls
));
1934 case _FIO_SEEK_DATA
:
1935 case _FIO_SEEK_HOLE
:
1936 if (ddi_copyin((void *)arg
, &off
, sizeof (off
), flag
))
1938 /* offset paramater is in/out */
1939 error
= ufs_fio_holey(vp
, cmd
, &off
);
1942 if (ddi_copyout(&off
, (void *)arg
, sizeof (off
), flag
))
1946 case _FIO_COMPRESSED
:
1949 * This is a project private ufs ioctl() to mark
1950 * the inode as that belonging to a compressed
1951 * file. This is used to mark individual
1952 * compressed files in a miniroot archive.
1953 * The files compressed in this manner are
1954 * automatically decompressed by the dcfs filesystem
1955 * (via an interception in ufs_lookup - see decompvp())
1956 * which is layered on top of ufs on a system running
1957 * from the archive. See uts/common/fs/dcfs for details.
1958 * This ioctl only marks the file as compressed - the
1959 * actual compression is done by fiocompress (a
1960 * userland utility) which invokes this ioctl().
1962 struct inode
*ip
= VTOI(vp
);
1964 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
1965 ULOCKFS_SETATTR_MASK
);
1970 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_IUPDAT
,
1971 TOP_IUPDAT_SIZE(ip
));
1974 error
= ufs_mark_compressed(vp
);
1977 TRANS_END_ASYNC(ufsvfsp
, TOP_IUPDAT
,
1978 TOP_IUPDAT_SIZE(ip
));
1979 ufs_lockfs_end(ulp
);
1994 ufs_getattr(struct vnode
*vp
, struct vattr
*vap
, int flags
,
1995 struct cred
*cr
, caller_context_t
*ct
)
1997 struct inode
*ip
= VTOI(vp
);
1998 struct ufsvfs
*ufsvfsp
;
2001 if (vap
->va_mask
== AT_SIZE
) {
2003 * for performance, if only the size is requested don't bother
2004 * with anything else.
2006 UFS_GET_ISIZE(&vap
->va_size
, ip
);
2011 * inlined lockfs checks
2013 ufsvfsp
= ip
->i_ufsvfs
;
2014 if ((ufsvfsp
== NULL
) || ULOCKFS_IS_HLOCK(&ufsvfsp
->vfs_ulockfs
)) {
2019 rw_enter(&ip
->i_contents
, RW_READER
);
2021 * Return all the attributes. This should be refined so
2022 * that it only returns what's asked for.
2026 * Copy from inode table.
2028 vap
->va_type
= vp
->v_type
;
2029 vap
->va_mode
= ip
->i_mode
& MODEMASK
;
2031 * If there is an ACL and there is a mask entry, then do the
2032 * extra work that completes the equivalent of an acltomode(3)
2033 * call. According to POSIX P1003.1e, the acl mask should be
2034 * returned in the group permissions field.
2036 * - start with the original permission and mode bits (from above)
2037 * - clear the group owner bits
2038 * - add in the mask bits.
2040 if (ip
->i_ufs_acl
&& ip
->i_ufs_acl
->aclass
.acl_ismask
) {
2041 vap
->va_mode
&= ~((VREAD
| VWRITE
| VEXEC
) >> 3);
2043 (ip
->i_ufs_acl
->aclass
.acl_maskbits
& PERMMASK
) << 3;
2045 vap
->va_uid
= ip
->i_uid
;
2046 vap
->va_gid
= ip
->i_gid
;
2047 vap
->va_fsid
= ip
->i_dev
;
2048 vap
->va_nodeid
= (ino64_t
)ip
->i_number
;
2049 vap
->va_nlink
= ip
->i_nlink
;
2050 vap
->va_size
= ip
->i_size
;
2051 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)
2052 vap
->va_rdev
= ip
->i_rdev
;
2054 vap
->va_rdev
= 0; /* not a b/c spec. */
2055 mutex_enter(&ip
->i_tlock
);
2056 ITIMES_NOLOCK(ip
); /* mark correct time in inode */
2057 vap
->va_seq
= ip
->i_seq
;
2058 vap
->va_atime
.tv_sec
= (time_t)ip
->i_atime
.tv_sec
;
2059 vap
->va_atime
.tv_nsec
= ip
->i_atime
.tv_usec
*1000;
2060 vap
->va_mtime
.tv_sec
= (time_t)ip
->i_mtime
.tv_sec
;
2061 vap
->va_mtime
.tv_nsec
= ip
->i_mtime
.tv_usec
*1000;
2062 vap
->va_ctime
.tv_sec
= (time_t)ip
->i_ctime
.tv_sec
;
2063 vap
->va_ctime
.tv_nsec
= ip
->i_ctime
.tv_usec
*1000;
2064 mutex_exit(&ip
->i_tlock
);
2066 switch (ip
->i_mode
& IFMT
) {
2069 vap
->va_blksize
= MAXBSIZE
; /* was BLKDEV_IOSIZE */
2073 vap
->va_blksize
= MAXBSIZE
;
2077 vap
->va_blksize
= ip
->i_fs
->fs_bsize
;
2080 vap
->va_nblocks
= (fsblkcnt64_t
)ip
->i_blocks
;
2081 rw_exit(&ip
->i_contents
);
2089 * Special wrapper to provide a callback for secpolicy_vnode_setattr().
2090 * The i_contents lock is already held by the caller and we need to
2091 * declare the inode as 'void *' argument.
2094 ufs_priv_access(void *vip
, int mode
, struct cred
*cr
)
2096 struct inode
*ip
= vip
;
2098 return (ufs_iaccess(ip
, mode
, cr
, 0));
2108 caller_context_t
*ct
)
2110 struct inode
*ip
= VTOI(vp
);
2111 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
2113 struct ulockfs
*ulp
;
2117 long int mask
= vap
->va_mask
;
2132 * Cannot set these attributes.
2134 if ((mask
& AT_NOSET
) || (mask
& AT_XVATTR
))
2138 * check for forced unmount
2140 if (ufsvfsp
== NULL
)
2143 fs
= ufsvfsp
->vfs_fs
;
2144 if (fs
->fs_ronly
!= 0)
2154 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_SETATTR_MASK
);
2159 * Acquire i_rwlock before TRANS_BEGIN_CSYNC() if this is a file.
2160 * This follows the protocol for read()/write().
2162 if (vp
->v_type
!= VDIR
) {
2164 * ufs_tryirwlock uses rw_tryenter and checks for SLOCK to
2165 * avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
2166 * possible, retries the operation.
2168 ufs_tryirwlock(&ip
->i_rwlock
, RW_WRITER
, retry_file
);
2171 ufs_lockfs_end(ulp
);
2178 * Truncate file. Must have write permission and not be a directory.
2180 if (mask
& AT_SIZE
) {
2181 rw_enter(&ip
->i_contents
, RW_WRITER
);
2182 if (vp
->v_type
== VDIR
) {
2186 if (error
= ufs_iaccess(ip
, IWRITE
, cr
, 0))
2189 rw_exit(&ip
->i_contents
);
2190 error
= TRANS_ITRUNC(ip
, vap
->va_size
, 0, cr
);
2192 rw_enter(&ip
->i_contents
, RW_WRITER
);
2196 if (error
== 0 && vap
->va_size
)
2197 vnevent_truncate(vp
, ct
);
2201 trans_size
= (int)TOP_SETATTR_SIZE(ip
);
2202 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_SETATTR
, trans_size
);
2207 * Acquire i_rwlock after TRANS_BEGIN_CSYNC() if this is a directory.
2208 * This follows the protocol established by
2209 * ufs_link/create/remove/rename/mkdir/rmdir/symlink.
2211 if (vp
->v_type
== VDIR
) {
2212 ufs_tryirwlock_trans(&ip
->i_rwlock
, RW_WRITER
, TOP_SETATTR
,
2220 * Grab quota lock if we are changing the file's owner.
2222 if (mask
& AT_UID
) {
2223 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
2226 rw_enter(&ip
->i_contents
, RW_WRITER
);
2228 oldva
.va_mode
= ip
->i_mode
;
2229 oldva
.va_uid
= ip
->i_uid
;
2230 oldva
.va_gid
= ip
->i_gid
;
2232 vap
->va_mask
&= ~AT_SIZE
;
2234 error
= secpolicy_vnode_setattr(cr
, vp
, vap
, &oldva
, flags
,
2235 ufs_priv_access
, ip
);
2239 mask
= vap
->va_mask
;
2242 * Change file access modes.
2244 if (mask
& AT_MODE
) {
2245 ip
->i_mode
= (ip
->i_mode
& IFMT
) | (vap
->va_mode
& ~IFMT
);
2246 TRANS_INODE(ufsvfsp
, ip
);
2249 mutex_enter(&vp
->v_lock
);
2250 if ((ip
->i_mode
& (ISVTX
| IEXEC
| IFDIR
)) == ISVTX
)
2251 vp
->v_flag
|= VSWAPLIKE
;
2253 vp
->v_flag
&= ~VSWAPLIKE
;
2254 mutex_exit(&vp
->v_lock
);
2257 if (mask
& (AT_UID
|AT_GID
)) {
2258 if (mask
& AT_UID
) {
2260 * Don't change ownership of the quota inode.
2262 if (ufsvfsp
->vfs_qinod
== ip
) {
2263 ASSERT(ufsvfsp
->vfs_qflags
& MQ_ENABLED
);
2269 * No real ownership change.
2271 if (ip
->i_uid
== vap
->va_uid
) {
2276 * Remove the blocks and the file, from the old user's
2280 blocks
= ip
->i_blocks
;
2283 (void) chkdq(ip
, -blocks
, /* force */ 1, cr
,
2284 (char **)NULL
, (size_t *)NULL
);
2285 (void) chkiq(ufsvfsp
, /* change */ -1, ip
,
2286 (uid_t
)ip
->i_uid
, /* force */ 1, cr
,
2287 (char **)NULL
, (size_t *)NULL
);
2288 dqrele(ip
->i_dquot
);
2291 ip
->i_uid
= vap
->va_uid
;
2294 * There is a real ownership change.
2298 * Add the blocks and the file to the new
2301 ip
->i_dquot
= getinoquota(ip
);
2302 (void) chkdq(ip
, blocks
, /* force */ 1, cr
,
2304 (void) chkiq(ufsvfsp
, /* change */ 1,
2305 (struct inode
*)NULL
, (uid_t
)ip
->i_uid
,
2306 /* force */ 1, cr
, &errmsg2
, &len2
);
2309 if (mask
& AT_GID
) {
2310 ip
->i_gid
= vap
->va_gid
;
2312 TRANS_INODE(ufsvfsp
, ip
);
2316 * Change file access or modified times.
2318 if (mask
& (AT_ATIME
|AT_MTIME
)) {
2319 /* Check that the time value is within ufs range */
2320 if (((mask
& AT_ATIME
) && TIMESPEC_OVERFLOW(&vap
->va_atime
)) ||
2321 ((mask
& AT_MTIME
) && TIMESPEC_OVERFLOW(&vap
->va_mtime
))) {
2327 * if the "noaccess" mount option is set and only atime
2328 * update is requested, do nothing. No error is returned.
2330 if ((ufsvfsp
->vfs_noatime
) &&
2331 ((mask
& (AT_ATIME
|AT_MTIME
)) == AT_ATIME
))
2334 if (mask
& AT_ATIME
) {
2335 ip
->i_atime
.tv_sec
= vap
->va_atime
.tv_sec
;
2336 ip
->i_atime
.tv_usec
= vap
->va_atime
.tv_nsec
/ 1000;
2337 ip
->i_flag
&= ~IACC
;
2339 if (mask
& AT_MTIME
) {
2340 ip
->i_mtime
.tv_sec
= vap
->va_mtime
.tv_sec
;
2341 ip
->i_mtime
.tv_usec
= vap
->va_mtime
.tv_nsec
/ 1000;
2343 if (now
.tv_sec
> TIME32_MAX
) {
2345 * In 2038, ctime sticks forever..
2347 ip
->i_ctime
.tv_sec
= TIME32_MAX
;
2348 ip
->i_ctime
.tv_usec
= 0;
2350 ip
->i_ctime
.tv_sec
= now
.tv_sec
;
2351 ip
->i_ctime
.tv_usec
= now
.tv_nsec
/ 1000;
2353 ip
->i_flag
&= ~(IUPD
|ICHG
);
2354 ip
->i_flag
|= IMODTIME
;
2356 TRANS_INODE(ufsvfsp
, ip
);
2362 * The presence of a shadow inode may indicate an ACL, but does
2363 * not imply an ACL. Future FSD types should be handled here too
2364 * and check for the presence of the attribute-specific data
2365 * before referencing it.
2369 * XXX if ufs_iupdat is changed to sandbagged write fix
2370 * ufs_acl_setattr to push ip to keep acls consistent
2372 * Suppress out of inodes messages if we will retry.
2375 ip
->i_flag
|= IQUIET
;
2376 error
= ufs_acl_setattr(ip
, vap
, cr
);
2377 ip
->i_flag
&= ~IQUIET
;
2382 * Setattr always increases the sequence number
2387 * if nfsd and not logging; push synchronously
2389 if ((curthread
->t_flag
& T_DONTPEND
) && !TRANS_ISTRANS(ufsvfsp
)) {
2395 rw_exit(&ip
->i_contents
);
2397 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
2400 rw_exit(&ip
->i_rwlock
);
2405 TRANS_END_CSYNC(ufsvfsp
, terr
, issync
, TOP_SETATTR
,
2410 ufs_lockfs_end(ulp
);
2414 * If out of inodes or blocks, see if we can free something
2415 * up from the delete queue.
2417 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
2418 ufs_delete_drain_wait(ufsvfsp
, 1);
2420 if (errmsg1
!= NULL
)
2421 kmem_free(errmsg1
, len1
);
2422 if (errmsg2
!= NULL
)
2423 kmem_free(errmsg2
, len2
);
2426 if (errmsg1
!= NULL
) {
2428 kmem_free(errmsg1
, len1
);
2430 if (errmsg2
!= NULL
) {
2432 kmem_free(errmsg2
, len2
);
2439 ufs_access(struct vnode
*vp
, int mode
, int flags
, struct cred
*cr
,
2440 caller_context_t
*ct
)
2442 struct inode
*ip
= VTOI(vp
);
2444 if (ip
->i_ufsvfs
== NULL
)
2448 * The ufs_iaccess function wants to be called with
2449 * mode bits expressed as "ufs specific" bits.
2450 * I.e., VWRITE|VREAD|VEXEC do not make sense to
2451 * ufs_iaccess() but IWRITE|IREAD|IEXEC do.
2452 * But since they're the same we just pass the vnode mode
2453 * bit but just verify that assumption at compile time.
2455 #if IWRITE != VWRITE || IREAD != VREAD || IEXEC != VEXEC
2456 #error "ufs_access needs to map Vmodes to Imodes"
2458 return (ufs_iaccess(ip
, mode
, cr
, 1));
2463 ufs_readlink(struct vnode
*vp
, struct uio
*uiop
, struct cred
*cr
,
2464 caller_context_t
*ct
)
2466 struct inode
*ip
= VTOI(vp
);
2467 struct ufsvfs
*ufsvfsp
;
2468 struct ulockfs
*ulp
;
2472 if (vp
->v_type
!= VLNK
) {
2478 * If the symbolic link is empty there is nothing to read.
2479 * Fast-track these empty symbolic links
2481 if (ip
->i_size
== 0) {
2486 ufsvfsp
= ip
->i_ufsvfs
;
2487 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_READLINK_MASK
);
2491 * The ip->i_rwlock protects the data blocks used for FASTSYMLINK
2495 if (ip
->i_flag
& IFASTSYMLNK
) {
2496 rw_enter(&ip
->i_rwlock
, RW_READER
);
2497 rw_enter(&ip
->i_contents
, RW_READER
);
2498 if (ip
->i_flag
& IFASTSYMLNK
) {
2499 if (!ULOCKFS_IS_NOIACC(ITOUL(ip
)) &&
2500 (ip
->i_fs
->fs_ronly
== 0) &&
2501 (!ufsvfsp
->vfs_noatime
)) {
2502 mutex_enter(&ip
->i_tlock
);
2504 mutex_exit(&ip
->i_tlock
);
2506 error
= uiomove((caddr_t
)&ip
->i_db
[1],
2507 MIN(ip
->i_size
, uiop
->uio_resid
),
2512 rw_exit(&ip
->i_contents
);
2513 rw_exit(&ip
->i_rwlock
);
2516 ssize_t size
; /* number of bytes read */
2517 caddr_t basep
; /* pointer to input data */
2520 struct uio tuio
; /* temp uio struct */
2522 iovec_t tiov
; /* temp iovec struct */
2523 char kbuf
[FSL_SIZE
]; /* buffer to hold fast symlink */
2524 int tflag
= 0; /* flag to indicate temp vars used */
2528 size
= uiop
->uio_resid
;
2529 basep
= uiop
->uio_iov
->iov_base
;
2532 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
2533 rw_enter(&ip
->i_contents
, RW_WRITER
);
2534 if (ip
->i_flag
& IFASTSYMLNK
) {
2535 rw_exit(&ip
->i_contents
);
2536 rw_exit(&ip
->i_rwlock
);
2540 /* can this be a fast symlink and is it a user buffer? */
2541 if (ip
->i_size
<= FSL_SIZE
&&
2542 (uiop
->uio_segflg
== UIO_USERSPACE
||
2543 uiop
->uio_segflg
== UIO_USERISPACE
)) {
2545 bzero(&tuio
, sizeof (struct uio
));
2547 * setup a kernel buffer to read link into. this
2548 * is to fix a race condition where the user buffer
2549 * got corrupted before copying it into the inode.
2552 tiov
.iov_len
= size
;
2553 tiov
.iov_base
= kbuf
;
2554 tuio
.uio_iov
= &tiov
;
2555 tuio
.uio_iovcnt
= 1;
2556 tuio
.uio_offset
= uiop
->uio_offset
;
2557 tuio
.uio_segflg
= UIO_SYSSPACE
;
2558 tuio
.uio_fmode
= uiop
->uio_fmode
;
2559 tuio
.uio_extflg
= uiop
->uio_extflg
;
2560 tuio
.uio_limit
= uiop
->uio_limit
;
2561 tuio
.uio_resid
= size
;
2563 basep
= tuio
.uio_iov
->iov_base
;
2568 error
= rdip(ip
, tuiop
, 0, cr
);
2569 if (!(error
== 0 && ip
->i_number
== ino
&& ip
->i_gen
== igen
)) {
2570 rw_exit(&ip
->i_contents
);
2571 rw_exit(&ip
->i_rwlock
);
2576 size
-= uiop
->uio_resid
;
2578 if ((tflag
== 0 && ip
->i_size
<= FSL_SIZE
&&
2579 ip
->i_size
== size
) || (tflag
== 1 &&
2580 tuio
.uio_resid
== 0)) {
2581 error
= kcopy(basep
, &ip
->i_db
[1], ip
->i_size
);
2583 ip
->i_flag
|= IFASTSYMLNK
;
2587 (void) VOP_PUTPAGE(ITOV(ip
),
2588 (offset_t
)0, PAGESIZE
,
2589 (B_DONTNEED
| B_FREE
| B_FORCE
| B_ASYNC
),
2593 /* error, clear garbage left behind */
2594 for (i
= 1; i
< NDADDR
; i
++)
2596 for (i
= 0; i
< NIADDR
; i
++)
2601 /* now, copy it into the user buffer */
2602 error
= uiomove((caddr_t
)kbuf
,
2603 MIN(size
, uiop
->uio_resid
),
2606 rw_exit(&ip
->i_contents
);
2607 rw_exit(&ip
->i_rwlock
);
2611 ufs_lockfs_end(ulp
);
2619 ufs_fsync(struct vnode
*vp
, int syncflag
, struct cred
*cr
,
2620 caller_context_t
*ct
)
2622 struct inode
*ip
= VTOI(vp
);
2623 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
2624 struct ulockfs
*ulp
;
2627 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_FSYNC_MASK
);
2631 if (TRANS_ISTRANS(ufsvfsp
)) {
2633 * First push out any data pages
2635 if (vn_has_cached_data(vp
) && !(syncflag
& FNODSYNC
) &&
2636 (vp
->v_type
!= VCHR
) && !(IS_SWAPVP(vp
))) {
2637 error
= VOP_PUTPAGE(vp
, (offset_t
)0, (size_t)0,
2644 * Delta any delayed inode times updates
2645 * and push inode to log.
2646 * All other inode deltas will have already been delta'd
2647 * and will be pushed during the commit.
2649 if (!(syncflag
& FDSYNC
) &&
2650 ((ip
->i_flag
& (IMOD
|IMODACC
)) == IMODACC
)) {
2652 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_FSYNC
,
2655 rw_enter(&ip
->i_contents
, RW_READER
);
2656 mutex_enter(&ip
->i_tlock
);
2657 ip
->i_flag
&= ~IMODTIME
;
2658 mutex_exit(&ip
->i_tlock
);
2659 ufs_iupdat(ip
, I_SYNC
);
2660 rw_exit(&ip
->i_contents
);
2662 TRANS_END_ASYNC(ufsvfsp
, TOP_FSYNC
,
2668 * Commit the Moby transaction
2670 * Deltas have already been made so we just need to
2671 * commit them with a synchronous transaction.
2672 * TRANS_BEGIN_SYNC() will return an error
2673 * if there are no deltas to commit, for an
2674 * empty transaction.
2677 TRANS_BEGIN_SYNC(ufsvfsp
, TOP_FSYNC
, TOP_COMMIT_SIZE
,
2680 error
= 0; /* commit wasn't needed */
2683 TRANS_END_SYNC(ufsvfsp
, error
, TOP_FSYNC
,
2686 } else { /* not logging */
2687 if (!(IS_SWAPVP(vp
)))
2688 if (syncflag
& FNODSYNC
) {
2689 /* Just update the inode only */
2690 TRANS_IUPDAT(ip
, 1);
2692 } else if (syncflag
& FDSYNC
)
2693 /* Do data-synchronous writes */
2694 error
= TRANS_SYNCIP(ip
, 0, I_DSYNC
, TOP_FSYNC
);
2696 /* Do synchronous writes */
2697 error
= TRANS_SYNCIP(ip
, 0, I_SYNC
, TOP_FSYNC
);
2699 rw_enter(&ip
->i_contents
, RW_WRITER
);
2701 error
= ufs_sync_indir(ip
);
2702 rw_exit(&ip
->i_contents
);
2706 ufs_lockfs_end(ulp
);
2713 ufs_inactive(struct vnode
*vp
, struct cred
*cr
, caller_context_t
*ct
)
2715 ufs_iinactive(VTOI(vp
));
2719 * Unix file system operations having to do with directory manipulation.
2721 int ufs_lookup_idle_count
= 2; /* Number of inodes to idle each time */
2724 ufs_lookup(struct vnode
*dvp
, char *nm
, struct vnode
**vpp
,
2725 struct pathname
*pnp
, int flags
, struct vnode
*rdir
, struct cred
*cr
,
2726 caller_context_t
*ct
, int *direntflags
, pathname_t
*realpnp
)
2731 struct ufsvfs
*ufsvfsp
;
2732 struct ulockfs
*ulp
;
2737 * Check flags for type of lookup (regular file or attribute file)
2742 if (flags
& LOOKUP_XATTR
) {
2745 * If not mounted with XATTR support then return EINVAL
2748 if (!(ip
->i_ufsvfs
->vfs_vfs
->vfs_flag
& VFS_XATTR
))
2751 * We don't allow recursive attributes...
2752 * Maybe someday we will.
2754 if ((ip
->i_cflags
& IXATTR
)) {
2758 if ((vp
= dnlc_lookup(dvp
, XATTR_DIR_NAME
)) == NULL
) {
2759 error
= ufs_xattr_getattrdir(dvp
, &sip
, flags
, cr
);
2766 dnlc_update(dvp
, XATTR_DIR_NAME
, vp
);
2770 * Check accessibility of directory.
2772 if (vp
== DNLC_NO_VNODE
) {
2777 if ((error
= ufs_iaccess(VTOI(vp
), IEXEC
, cr
, 1)) != 0) {
2787 * Check for a null component, which we should treat as
2788 * looking at dvp from within it's parent, so we don't
2789 * need a call to ufs_iaccess(), as it has already been
2800 * Check for "." ie itself. this is a quick check and
2801 * avoids adding "." into the dnlc (which have been seen
2802 * to occupy >10% of the cache).
2804 if ((nm
[0] == '.') && (nm
[1] == 0)) {
2806 * Don't return without checking accessibility
2807 * of the directory. We only need the lock if
2808 * we are going to return it.
2810 if ((error
= ufs_iaccess(ip
, IEXEC
, cr
, 1)) == 0) {
2818 * Fast path: Check the directory name lookup cache.
2820 if (vp
= dnlc_lookup(dvp
, nm
)) {
2822 * Check accessibility of directory.
2824 if ((error
= ufs_iaccess(ip
, IEXEC
, cr
, 1)) != 0) {
2828 if (vp
== DNLC_NO_VNODE
) {
2839 * Keep the idle queue from getting too long by
2840 * idling two inodes before attempting to allocate another.
2841 * This operation must be performed before entering
2842 * lockfs or a transaction.
2844 if (ufs_idle_q
.uq_ne
> ufs_idle_q
.uq_hiwat
)
2845 if ((curthread
->t_flag
& T_DONTBLOCK
) == 0) {
2846 ins
.in_lidles
.value
.ul
+= ufs_lookup_idle_count
;
2847 ufs_idle_some(ufs_lookup_idle_count
);
2852 * Check accessibility of directory.
2854 if (error
= ufs_diraccess(ip
, IEXEC
, cr
))
2857 ufsvfsp
= ip
->i_ufsvfs
;
2858 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_LOOKUP_MASK
);
2862 error
= ufs_dirlook(ip
, nm
, &xip
, cr
, 1, 0);
2870 * If vnode is a device return special vnode instead.
2872 if (IS_DEVVP(*vpp
)) {
2873 struct vnode
*newvp
;
2875 newvp
= specvp(*vpp
, (*vpp
)->v_rdev
, (*vpp
)->v_type
,
2882 } else if (ip
->i_cflags
& ICOMPRESS
) {
2883 struct vnode
*newvp
;
2886 * Compressed file, substitute dcfs vnode
2888 newvp
= decompvp(*vpp
, cr
, ct
);
2897 ufs_lockfs_end(ulp
);
2900 if (error
== EAGAIN
)
2909 ufs_create(struct vnode
*dvp
, char *name
, struct vattr
*vap
, enum vcexcl excl
,
2910 int mode
, struct vnode
**vpp
, struct cred
*cr
, int flag
,
2911 caller_context_t
*ct
, vsecattr_t
*vsecp
)
2917 struct ufsvfs
*ufsvfsp
;
2918 struct ulockfs
*ulp
;
2924 int defer_dip_seq_update
= 0; /* need to defer update of dip->i_seq */
2930 ufsvfsp
= ip
->i_ufsvfs
;
2933 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_CREATE_MASK
);
2938 trans_size
= (int)TOP_CREATE_SIZE(ip
);
2939 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_CREATE
, trans_size
);
2942 if ((vap
->va_mode
& VSVTX
) && secpolicy_vnode_stky_modify(cr
) != 0)
2943 vap
->va_mode
&= ~VSVTX
;
2945 if (*name
== '\0') {
2947 * Null component name refers to the directory itself.
2951 * Even though this is an error case, we need to grab the
2952 * quota lock since the error handling code below is common.
2954 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
2955 rw_enter(&ip
->i_contents
, RW_WRITER
);
2961 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
2962 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
2963 * possible, retries the operation.
2965 ufs_tryirwlock_trans(&ip
->i_rwlock
, RW_WRITER
, TOP_CREATE
,
2970 xvp
= dnlc_lookup(dvp
, name
);
2971 if (xvp
== DNLC_NO_VNODE
) {
2977 rw_exit(&ip
->i_rwlock
);
2978 if (error
= ufs_iaccess(ip
, IEXEC
, cr
, 1)) {
2986 * Suppress file system full message if we will retry
2988 error
= ufs_direnter_cm(ip
, name
, DE_CREATE
,
2989 vap
, &xip
, cr
, (noentry
| (retry
? IQUIET
: 0)));
2990 if (error
== EAGAIN
) {
2992 TRANS_END_CSYNC(ufsvfsp
, error
, issync
,
2993 TOP_CREATE
, trans_size
);
2994 ufs_lockfs_end(ulp
);
2998 rw_exit(&ip
->i_rwlock
);
3002 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
3003 rw_enter(&ip
->i_contents
, RW_WRITER
);
3008 * If the file already exists and this is a non-exclusive create,
3009 * check permissions and allow access for non-directories.
3010 * Read-only create of an existing directory is also allowed.
3011 * We fail an exclusive create of anything which already exists.
3013 if (error
== EEXIST
) {
3015 if (excl
== NONEXCL
) {
3016 if ((((ip
->i_mode
& IFMT
) == IFDIR
) ||
3017 ((ip
->i_mode
& IFMT
) == IFATTRDIR
)) &&
3021 error
= ufs_iaccess(ip
, mode
, cr
, 0);
3026 rw_exit(&ip
->i_contents
);
3027 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3032 * If the error EEXIST was set, then i_seq can not
3033 * have been updated. The sequence number interface
3034 * is defined such that a non-error VOP_CREATE must
3035 * increase the dir va_seq it by at least one. If we
3036 * have cleared the error, increase i_seq. Note that
3037 * we are increasing the dir i_seq and in rare cases
3038 * ip may actually be from the dvp, so we already have
3039 * the locks and it will not be subject to truncation.
3040 * In case we have to update i_seq of the parent
3041 * directory dip, we have to defer it till we have
3042 * released our locks on ip due to lock ordering requirements.
3045 defer_dip_seq_update
= 1;
3049 if (((ip
->i_mode
& IFMT
) == IFREG
) &&
3050 (vap
->va_mask
& AT_SIZE
) && vap
->va_size
== 0) {
3052 * Truncate regular files, if requested by caller.
3053 * Grab i_rwlock to make sure no one else is
3054 * currently writing to the file (we promised
3055 * bmap we would do this).
3056 * Must get the locks in the correct order.
3058 if (ip
->i_size
== 0) {
3059 ip
->i_flag
|= ICHG
| IUPD
;
3061 TRANS_INODE(ufsvfsp
, ip
);
3064 * Large Files: Why this check here?
3065 * Though we do it in vn_create() we really
3066 * want to guarantee that we do not destroy
3067 * Large file data by atomically checking
3068 * the size while holding the contents
3071 if (flag
&& !(flag
& FOFFMAX
) &&
3072 ((ip
->i_mode
& IFMT
) == IFREG
) &&
3073 (ip
->i_size
> (offset_t
)MAXOFF32_T
)) {
3074 rw_exit(&ip
->i_contents
);
3075 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3079 if (TRANS_ISTRANS(ufsvfsp
))
3082 rw_exit(&ip
->i_contents
);
3083 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3084 ufs_tryirwlock_trans(&ip
->i_rwlock
,
3085 RW_WRITER
, TOP_CREATE
,
3091 rw_enter(&ufsvfsp
->vfs_dqrwlock
,
3093 rw_enter(&ip
->i_contents
, RW_WRITER
);
3094 (void) ufs_itrunc(ip
, (u_offset_t
)0, 0,
3096 rw_exit(&ip
->i_rwlock
);
3101 vnevent_create(ITOV(ip
), ct
);
3108 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3109 rw_exit(&ip
->i_contents
);
3116 rw_exit(&ip
->i_contents
);
3117 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3120 * If vnode is a device return special vnode instead.
3122 if (!error
&& IS_DEVVP(*vpp
)) {
3123 struct vnode
*newvp
;
3125 newvp
= specvp(*vpp
, (*vpp
)->v_rdev
, (*vpp
)->v_type
, cr
);
3127 if (newvp
== NULL
) {
3137 * Do the deferred update of the parent directory's sequence
3140 if (defer_dip_seq_update
== 1) {
3141 rw_enter(&dip
->i_contents
, RW_READER
);
3142 mutex_enter(&dip
->i_tlock
);
3144 mutex_exit(&dip
->i_tlock
);
3145 rw_exit(&dip
->i_contents
);
3151 TRANS_END_CSYNC(ufsvfsp
, terr
, issync
, TOP_CREATE
,
3155 * If we haven't had a more interesting failure
3156 * already, then anything that might've happened
3157 * here should be reported.
3163 if (!error
&& truncflag
) {
3164 ufs_tryirwlock(&ip
->i_rwlock
, RW_WRITER
, retry_trunc
);
3167 ufs_lockfs_end(ulp
);
3171 (void) TRANS_ITRUNC(ip
, (u_offset_t
)0, 0, cr
);
3172 rw_exit(&ip
->i_rwlock
);
3176 ufs_lockfs_end(ulp
);
3179 * If no inodes available, try to free one up out of the
3180 * pending delete queue.
3182 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
3183 ufs_delete_drain_wait(ufsvfsp
, 1);
3192 extern int ufs_idle_max
;
3195 ufs_remove(struct vnode
*vp
, char *nm
, struct cred
*cr
,
3196 caller_context_t
*ct
, int flags
)
3198 struct inode
*ip
= VTOI(vp
);
3199 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
3200 struct ulockfs
*ulp
;
3201 vnode_t
*rmvp
= NULL
; /* Vnode corresponding to name being removed */
3208 * don't let the delete queue get too long
3210 if (ufsvfsp
== NULL
) {
3214 if (ufsvfsp
->vfs_delete
.uq_ne
> ufs_idle_max
)
3215 ufs_delete_drain(vp
->v_vfsp
, 1, 1);
3217 error
= ufs_eventlookup(vp
, nm
, cr
, &rmvp
);
3219 /* Only send the event if there were no errors */
3221 vnevent_remove(rmvp
, vp
, nm
, ct
);
3226 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_REMOVE_MASK
);
3231 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_REMOVE
,
3232 trans_size
= (int)TOP_REMOVE_SIZE(VTOI(vp
)));
3235 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3236 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3237 * possible, retries the operation.
3239 ufs_tryirwlock_trans(&ip
->i_rwlock
, RW_WRITER
, TOP_REMOVE
, retry
);
3242 error
= ufs_dirremove(ip
, nm
, (struct inode
*)0, (struct vnode
*)0,
3244 rw_exit(&ip
->i_rwlock
);
3247 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_REMOVE
, trans_size
);
3248 ufs_lockfs_end(ulp
);
3256 * Link a file or a directory. Only privileged processes are allowed to
3257 * make links to directories.
3261 ufs_link(struct vnode
*tdvp
, struct vnode
*svp
, char *tnm
, struct cred
*cr
,
3262 caller_context_t
*ct
, int flags
)
3265 struct inode
*tdp
= VTOI(tdvp
);
3266 struct ufsvfs
*ufsvfsp
= tdp
->i_ufsvfs
;
3267 struct ulockfs
*ulp
;
3268 struct vnode
*realvp
;
3276 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_LINK_MASK
);
3281 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_LINK
,
3282 trans_size
= (int)TOP_LINK_SIZE(VTOI(tdvp
)));
3284 if (VOP_REALVP(svp
, &realvp
, ct
) == 0)
3288 * Make sure link for extended attributes is valid
3289 * We only support hard linking of attr in ATTRDIR to ATTRDIR
3291 * Make certain we don't attempt to look at a device node as
3295 isdev
= IS_DEVVP(svp
);
3296 if (((isdev
== 0) && ((VTOI(svp
)->i_cflags
& IXATTR
) == 0) &&
3297 ((tdp
->i_mode
& IFMT
) == IFATTRDIR
)) ||
3298 ((isdev
== 0) && (VTOI(svp
)->i_cflags
& IXATTR
) &&
3299 ((tdp
->i_mode
& IFMT
) == IFDIR
))) {
3305 if ((svp
->v_type
== VDIR
&&
3306 secpolicy_fs_linkdir(cr
, ufsvfsp
->vfs_vfs
) != 0) ||
3307 (sip
->i_uid
!= crgetuid(cr
) && secpolicy_basic_link(cr
) != 0)) {
3313 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3314 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3315 * possible, retries the operation.
3317 ufs_tryirwlock_trans(&tdp
->i_rwlock
, RW_WRITER
, TOP_LINK
, retry
);
3320 error
= ufs_direnter_lr(tdp
, tnm
, DE_LINK
, (struct inode
*)0,
3322 rw_exit(&tdp
->i_rwlock
);
3326 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_LINK
, trans_size
);
3327 ufs_lockfs_end(ulp
);
3331 vnevent_link(svp
, ct
);
3337 uint64_t ufs_rename_retry_cnt
;
3338 uint64_t ufs_rename_upgrade_retry_cnt
;
3339 uint64_t ufs_rename_dircheck_retry_cnt
;
3340 clock_t ufs_rename_backoff_delay
= 1;
3343 * Rename a file or directory.
3344 * We are given the vnode and entry string of the source and the
3345 * vnode and entry string of the place we want to move the source
3346 * to (the target). The essential operation is:
3348 * link(source, target);
3350 * but "atomically". Can't do full commit without saving state in
3351 * the inode on disk, which isn't feasible at this time. Best we
3352 * can do is always guarantee that the TARGET exists.
3358 struct vnode
*sdvp
, /* old (source) parent vnode */
3359 char *snm
, /* old (source) entry name */
3360 struct vnode
*tdvp
, /* new (target) parent vnode */
3361 char *tnm
, /* new (target) entry name */
3363 caller_context_t
*ct
,
3366 struct inode
*sip
= NULL
; /* source inode */
3367 struct inode
*ip
= NULL
; /* check inode */
3368 struct inode
*sdp
; /* old (source) parent inode */
3369 struct inode
*tdp
; /* new (target) parent inode */
3370 struct vnode
*svp
= NULL
; /* source vnode */
3371 struct vnode
*tvp
= NULL
; /* target vnode, if it exists */
3372 struct vnode
*realvp
;
3373 struct ufsvfs
*ufsvfsp
;
3374 struct ulockfs
*ulp
;
3375 struct ufs_slot slot
;
3380 krwlock_t
*first_lock
;
3381 krwlock_t
*second_lock
;
3382 krwlock_t
*reverse_lock
;
3387 ufsvfsp
= sdp
->i_ufsvfs
;
3389 if (VOP_REALVP(tdvp
, &realvp
, ct
) == 0)
3392 terr
= ufs_eventlookup(tdvp
, tnm
, cr
, &tvp
);
3393 serr
= ufs_eventlookup(sdvp
, snm
, cr
, &svp
);
3395 if ((serr
== 0) && ((terr
== 0) || (terr
== ENOENT
))) {
3397 vnevent_rename_dest(tvp
, tdvp
, tnm
, ct
);
3400 * Notify the target directory of the rename event
3401 * if source and target directories are not the same.
3404 vnevent_rename_dest_dir(tdvp
, ct
);
3407 vnevent_rename_src(svp
, sdvp
, snm
, ct
);
3417 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_RENAME_MASK
);
3422 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_RENAME
,
3423 trans_size
= (int)TOP_RENAME_SIZE(sdp
));
3425 if (VOP_REALVP(tdvp
, &realvp
, ct
) == 0)
3431 * We only allow renaming of attributes from ATTRDIR to ATTRDIR.
3433 if ((tdp
->i_mode
& IFMT
) != (sdp
->i_mode
& IFMT
)) {
3439 * Check accessibility of directory.
3441 if (error
= ufs_diraccess(sdp
, IEXEC
, cr
))
3445 * Look up inode of file we're supposed to rename.
3448 if (error
= ufs_dirlook(sdp
, snm
, &sip
, cr
, 0, 0)) {
3449 if (error
== EAGAIN
) {
3451 TRANS_END_CSYNC(ufsvfsp
, error
, issync
,
3452 TOP_RENAME
, trans_size
);
3453 ufs_lockfs_end(ulp
);
3462 * Lock both the source and target directories (they may be
3463 * the same) to provide the atomicity semantics that was
3464 * previously provided by the per file system vfs_rename_lock
3466 * with vfs_rename_lock removed to allow simultaneous renames
3467 * within a file system, ufs_dircheckpath can deadlock while
3468 * traversing back to ensure that source is not a parent directory
3469 * of target parent directory. This is because we get into
3470 * ufs_dircheckpath with the sdp and tdp locks held as RW_WRITER.
3471 * If the tdp and sdp of the simultaneous renames happen to be
3472 * in the path of each other, it can lead to a deadlock. This
3473 * can be avoided by getting the locks as RW_READER here and then
3474 * upgrading to RW_WRITER after completing the ufs_dircheckpath.
3476 * We hold the target directory's i_rwlock after calling
3477 * ufs_lockfs_begin but in many other operations (like ufs_readdir)
3478 * VOP_RWLOCK is explicitly called by the filesystem independent code
3479 * before calling the file system operation. In these cases the order
3480 * is reversed (i.e i_rwlock is taken first and then ufs_lockfs_begin
3481 * is called). This is fine as long as ufs_lockfs_begin acts as a VOP
3482 * counter but with ufs_quiesce setting the SLOCK bit this becomes a
3483 * synchronizing object which might lead to a deadlock. So we use
3484 * rw_tryenter instead of rw_enter. If we fail to get this lock and
3485 * find that SLOCK bit is set, we call ufs_lockfs_end and restart the
3489 first_lock
= &tdp
->i_rwlock
;
3490 second_lock
= &sdp
->i_rwlock
;
3492 if (!rw_tryenter(first_lock
, RW_READER
)) {
3494 * We didn't get the lock. Check if the SLOCK is set in the
3495 * ufsvfs. If yes, we might be in a deadlock. Safer to give up
3496 * and wait for SLOCK to be cleared.
3499 if (ulp
&& ULOCKFS_IS_SLOCK(ulp
)) {
3500 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_RENAME
,
3502 ufs_lockfs_end(ulp
);
3507 * SLOCK isn't set so this is a genuine synchronization
3508 * case. Let's try again after giving them a breather.
3510 delay(RETRY_LOCK_DELAY
);
3511 goto retry_firstlock
;
3515 * Need to check if the tdp and sdp are same !!!
3517 if ((tdp
!= sdp
) && (!rw_tryenter(second_lock
, RW_READER
))) {
3519 * We didn't get the lock. Check if the SLOCK is set in the
3520 * ufsvfs. If yes, we might be in a deadlock. Safer to give up
3521 * and wait for SLOCK to be cleared.
3524 rw_exit(first_lock
);
3525 if (ulp
&& ULOCKFS_IS_SLOCK(ulp
)) {
3526 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_RENAME
,
3528 ufs_lockfs_end(ulp
);
3533 * So we couldn't get the second level peer lock *and*
3534 * the SLOCK bit isn't set. Too bad we can be
3535 * contentding with someone wanting these locks otherway
3536 * round. Reverse the locks in case there is a heavy
3537 * contention for the second level lock.
3539 reverse_lock
= first_lock
;
3540 first_lock
= second_lock
;
3541 second_lock
= reverse_lock
;
3542 ufs_rename_retry_cnt
++;
3543 goto retry_firstlock
;
3552 * Make sure we can delete the source entry. This requires
3553 * write permission on the containing directory.
3554 * Check for sticky directories.
3556 rw_enter(&sdp
->i_contents
, RW_READER
);
3557 rw_enter(&sip
->i_contents
, RW_READER
);
3558 if ((error
= ufs_iaccess(sdp
, IWRITE
, cr
, 0)) != 0 ||
3559 (error
= ufs_sticky_remove_access(sdp
, sip
, cr
)) != 0) {
3560 rw_exit(&sip
->i_contents
);
3561 rw_exit(&sdp
->i_contents
);
3566 * If this is a rename of a directory and the parent is
3567 * different (".." must be changed), then the source
3568 * directory must not be in the directory hierarchy
3569 * above the target, as this would orphan everything
3570 * below the source directory. Also the user must have
3571 * write permission in the source so as to be able to
3574 if ((((sip
->i_mode
& IFMT
) == IFDIR
) ||
3575 ((sip
->i_mode
& IFMT
) == IFATTRDIR
)) && sdp
!= tdp
) {
3578 if (error
= ufs_iaccess(sip
, IWRITE
, cr
, 0)) {
3579 rw_exit(&sip
->i_contents
);
3580 rw_exit(&sdp
->i_contents
);
3583 inum
= sip
->i_number
;
3584 rw_exit(&sip
->i_contents
);
3585 rw_exit(&sdp
->i_contents
);
3586 if ((error
= ufs_dircheckpath(inum
, tdp
, sdp
, cr
))) {
3588 * If we got EAGAIN ufs_dircheckpath detected a
3589 * potential deadlock and backed out. We need
3590 * to retry the operation since sdp and tdp have
3591 * to be released to avoid the deadlock.
3593 if (error
== EAGAIN
) {
3594 rw_exit(&tdp
->i_rwlock
);
3596 rw_exit(&sdp
->i_rwlock
);
3597 delay(ufs_rename_backoff_delay
);
3598 ufs_rename_dircheck_retry_cnt
++;
3604 rw_exit(&sip
->i_contents
);
3605 rw_exit(&sdp
->i_contents
);
3610 * Check for renaming '.' or '..' or alias of '.'
3612 if (strcmp(snm
, ".") == 0 || strcmp(snm
, "..") == 0 || sdp
== sip
) {
3618 * Simultaneous renames can deadlock in ufs_dircheckpath since it
3619 * tries to traverse back the file tree with both tdp and sdp held
3620 * as RW_WRITER. To avoid that we have to hold the tdp and sdp locks
3621 * as RW_READERS till ufs_dircheckpath is done.
3622 * Now that ufs_dircheckpath is done with, we can upgrade the locks
3625 if (!rw_tryupgrade(&tdp
->i_rwlock
)) {
3627 * The upgrade failed. We got to give away the lock
3628 * as to avoid deadlocking with someone else who is
3629 * waiting for writer lock. With the lock gone, we
3630 * cannot be sure the checks done above will hold
3631 * good when we eventually get them back as writer.
3632 * So if we can't upgrade we drop the locks and retry
3635 rw_exit(&tdp
->i_rwlock
);
3637 rw_exit(&sdp
->i_rwlock
);
3638 delay(ufs_rename_backoff_delay
);
3639 ufs_rename_upgrade_retry_cnt
++;
3643 if (!rw_tryupgrade(&sdp
->i_rwlock
)) {
3645 * The upgrade failed. We got to give away the lock
3646 * as to avoid deadlocking with someone else who is
3647 * waiting for writer lock. With the lock gone, we
3648 * cannot be sure the checks done above will hold
3649 * good when we eventually get them back as writer.
3650 * So if we can't upgrade we drop the locks and retry
3653 rw_exit(&tdp
->i_rwlock
);
3654 rw_exit(&sdp
->i_rwlock
);
3655 delay(ufs_rename_backoff_delay
);
3656 ufs_rename_upgrade_retry_cnt
++;
3662 * Now that all the locks are held check to make sure another thread
3663 * didn't slip in and take out the sip.
3666 if ((sip
->i_ctime
.tv_usec
* 1000) > now
.tv_nsec
||
3667 sip
->i_ctime
.tv_sec
> now
.tv_sec
) {
3668 rw_enter(&sdp
->i_ufsvfs
->vfs_dqrwlock
, RW_READER
);
3669 rw_enter(&sdp
->i_contents
, RW_WRITER
);
3670 error
= ufs_dircheckforname(sdp
, snm
, strlen(snm
), &slot
,
3672 rw_exit(&sdp
->i_contents
);
3673 rw_exit(&sdp
->i_ufsvfs
->vfs_dqrwlock
);
3682 * If the inode was found need to drop the v_count
3683 * so as not to keep the filesystem from being
3684 * unmounted at a later time.
3690 * Release the slot.fbp that has the page mapped and
3691 * locked SE_SHARED, and could be used in in
3692 * ufs_direnter_lr() which needs to get the SE_EXCL lock
3696 fbrelse(slot
.fbp
, S_OTHER
);
3702 * Link source to the target.
3704 if (error
= ufs_direnter_lr(tdp
, tnm
, DE_RENAME
, sdp
, sip
, cr
)) {
3706 * ESAME isn't really an error; it indicates that the
3707 * operation should not be done because the source and target
3708 * are the same file, but that no error should be reported.
3716 * Unlink the source.
3717 * Remove the source entry. ufs_dirremove() checks that the entry
3718 * still reflects sip, and returns an error if it doesn't.
3719 * If the entry has changed just forget about it. Release
3722 if ((error
= ufs_dirremove(sdp
, snm
, sip
, (struct vnode
*)0,
3723 DR_RENAME
, cr
)) == ENOENT
)
3728 fbrelse(slot
.fbp
, S_OTHER
);
3730 rw_exit(&tdp
->i_rwlock
);
3732 rw_exit(&sdp
->i_rwlock
);
3739 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_RENAME
, trans_size
);
3740 ufs_lockfs_end(ulp
);
3749 ufs_mkdir(struct vnode
*dvp
, char *dirname
, struct vattr
*vap
,
3750 struct vnode
**vpp
, struct cred
*cr
, caller_context_t
*ct
, int flags
,
3755 struct ufsvfs
*ufsvfsp
;
3756 struct ulockfs
*ulp
;
3763 ASSERT((vap
->va_mask
& (AT_TYPE
|AT_MODE
)) == (AT_TYPE
|AT_MODE
));
3766 * Can't make directory in attr hidden dir
3768 if ((VTOI(dvp
)->i_mode
& IFMT
) == IFATTRDIR
)
3773 ufsvfsp
= ip
->i_ufsvfs
;
3774 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_MKDIR_MASK
);
3778 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_MKDIR
,
3779 trans_size
= (int)TOP_MKDIR_SIZE(ip
));
3782 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3783 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3784 * possible, retries the operation.
3786 ufs_tryirwlock_trans(&ip
->i_rwlock
, RW_WRITER
, TOP_MKDIR
, retry
);
3790 error
= ufs_direnter_cm(ip
, dirname
, DE_MKDIR
, vap
, &xip
, cr
,
3791 (retry
? IQUIET
: 0));
3792 if (error
== EAGAIN
) {
3794 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_MKDIR
,
3796 ufs_lockfs_end(ulp
);
3801 rw_exit(&ip
->i_rwlock
);
3805 } else if (error
== EEXIST
)
3810 TRANS_END_CSYNC(ufsvfsp
, terr
, issync
, TOP_MKDIR
, trans_size
);
3811 ufs_lockfs_end(ulp
);
3816 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
3817 ufs_delete_drain_wait(ufsvfsp
, 1);
3827 ufs_rmdir(struct vnode
*vp
, char *nm
, struct vnode
*cdir
, struct cred
*cr
,
3828 caller_context_t
*ct
, int flags
)
3830 struct inode
*ip
= VTOI(vp
);
3831 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
3832 struct ulockfs
*ulp
;
3833 vnode_t
*rmvp
= NULL
; /* Vnode of removed directory */
3840 * don't let the delete queue get too long
3842 if (ufsvfsp
== NULL
) {
3846 if (ufsvfsp
->vfs_delete
.uq_ne
> ufs_idle_max
)
3847 ufs_delete_drain(vp
->v_vfsp
, 1, 1);
3849 error
= ufs_eventlookup(vp
, nm
, cr
, &rmvp
);
3851 /* Only send the event if there were no errors */
3853 vnevent_rmdir(rmvp
, vp
, nm
, ct
);
3858 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_RMDIR_MASK
);
3863 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_RMDIR
,
3864 trans_size
= TOP_RMDIR_SIZE
);
3867 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3868 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3869 * possible, retries the operation.
3871 ufs_tryirwlock_trans(&ip
->i_rwlock
, RW_WRITER
, TOP_RMDIR
, retry
);
3874 error
= ufs_dirremove(ip
, nm
, (struct inode
*)0, cdir
, DR_RMDIR
, cr
);
3876 rw_exit(&ip
->i_rwlock
);
3879 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_RMDIR
,
3881 ufs_lockfs_end(ulp
);
3895 caller_context_t
*ct
,
3901 struct dirent64
*odp
;
3903 struct ufsvfs
*ufsvfsp
;
3904 struct ulockfs
*ulp
;
3908 uint_t bytes_wanted
, total_bytes_wanted
;
3914 ASSERT(RW_READ_HELD(&ip
->i_rwlock
));
3916 if (uiop
->uio_loffset
>= MAXOFF32_T
) {
3923 * Check if we have been called with a valid iov_len
3924 * and bail out if not, otherwise we may potentially loop
3925 * forever further down.
3927 if (uiop
->uio_iov
->iov_len
<= 0) {
3933 * Large Files: When we come here we are guaranteed that
3934 * uio_offset can be used safely. The high word is zero.
3937 ufsvfsp
= ip
->i_ufsvfs
;
3938 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_READDIR_MASK
);
3942 iovp
= uiop
->uio_iov
;
3943 total_bytes_wanted
= iovp
->iov_len
;
3945 /* Large Files: directory files should not be "large" */
3947 ASSERT(ip
->i_size
<= MAXOFF32_T
);
3949 /* Force offset to be valid (to guard against bogus lseek() values) */
3950 offset
= (uint_t
)uiop
->uio_offset
& ~(DIRBLKSIZ
- 1);
3952 /* Quit if at end of file or link count of zero (posix) */
3953 if (offset
>= (uint_t
)ip
->i_size
|| ip
->i_nlink
<= 0) {
3961 * Get space to change directory entries into fs independent format.
3962 * Do fast alloc for the most commonly used-request size (filesystem
3965 if (uiop
->uio_segflg
!= UIO_SYSSPACE
|| uiop
->uio_iovcnt
!= 1) {
3966 bufsize
= total_bytes_wanted
;
3967 outbuf
= kmem_alloc(bufsize
, KM_SLEEP
);
3968 odp
= (struct dirent64
*)outbuf
;
3970 bufsize
= total_bytes_wanted
;
3971 odp
= (struct dirent64
*)iovp
->iov_base
;
3975 bytes_wanted
= total_bytes_wanted
;
3977 /* Truncate request to file size */
3978 if (offset
+ bytes_wanted
> (int)ip
->i_size
)
3979 bytes_wanted
= (int)(ip
->i_size
- offset
);
3981 /* Comply with MAXBSIZE boundary restrictions of fbread() */
3982 if ((offset
& MAXBOFFSET
) + bytes_wanted
> MAXBSIZE
)
3983 bytes_wanted
= MAXBSIZE
- (offset
& MAXBOFFSET
);
3986 * Read in the next chunk.
3987 * We are still holding the i_rwlock.
3989 error
= fbread(vp
, (offset_t
)offset
, bytes_wanted
, S_OTHER
, &fbp
);
3993 if (!ULOCKFS_IS_NOIACC(ITOUL(ip
)) && (ip
->i_fs
->fs_ronly
== 0) &&
3994 (!ufsvfsp
->vfs_noatime
)) {
3998 idp
= (struct direct
*)fbp
->fb_addr
;
3999 if (idp
->d_ino
== 0 && idp
->d_reclen
== 0 && idp
->d_namlen
== 0) {
4000 cmn_err(CE_WARN
, "ufs_readdir: bad dir, inumber = %llu, "
4002 (u_longlong_t
)ip
->i_number
, ufsvfsp
->vfs_fs
->fs_fsmnt
);
4003 fbrelse(fbp
, S_OTHER
);
4007 /* Transform to file-system independent format */
4008 while (incount
< bytes_wanted
) {
4010 * If the current directory entry is mangled, then skip
4011 * to the next block. It would be nice to set the FSBAD
4012 * flag in the super-block so that a fsck is forced on
4013 * next reboot, but locking is a problem.
4015 if (idp
->d_reclen
& 0x3) {
4016 offset
= (offset
+ DIRBLKSIZ
) & ~(DIRBLKSIZ
-1);
4020 /* Skip to requested offset and skip empty entries */
4021 if (idp
->d_ino
!= 0 && offset
>= (uint_t
)uiop
->uio_offset
) {
4022 ushort_t this_reclen
=
4023 DIRENT64_RECLEN(idp
->d_namlen
);
4024 /* Buffer too small for any entries */
4025 if (!outcount
&& this_reclen
> bufsize
) {
4026 fbrelse(fbp
, S_OTHER
);
4030 /* If would overrun the buffer, quit */
4031 if (outcount
+ this_reclen
> bufsize
) {
4034 /* Take this entry */
4035 odp
->d_ino
= (ino64_t
)idp
->d_ino
;
4036 odp
->d_reclen
= (ushort_t
)this_reclen
;
4037 odp
->d_off
= (offset_t
)(offset
+ idp
->d_reclen
);
4039 /* use strncpy(9f) to zero out uninitialized bytes */
4041 ASSERT(strlen(idp
->d_name
) + 1 <=
4042 DIRENT64_NAMELEN(this_reclen
));
4043 (void) strncpy(odp
->d_name
, idp
->d_name
,
4044 DIRENT64_NAMELEN(this_reclen
));
4045 outcount
+= odp
->d_reclen
;
4046 odp
= (struct dirent64
*)
4047 ((intptr_t)odp
+ odp
->d_reclen
);
4048 ASSERT(outcount
<= bufsize
);
4050 if (idp
->d_reclen
) {
4051 incount
+= idp
->d_reclen
;
4052 offset
+= idp
->d_reclen
;
4053 idp
= (struct direct
*)((intptr_t)idp
+ idp
->d_reclen
);
4055 offset
= (offset
+ DIRBLKSIZ
) & ~(DIRBLKSIZ
-1);
4059 /* Release the chunk */
4060 fbrelse(fbp
, S_OTHER
);
4062 /* Read whole block, but got no entries, read another if not eof */
4065 * Large Files: casting i_size to int here is not a problem
4066 * because directory sizes are always less than MAXOFF32_T.
4067 * See assertion above.
4070 if (offset
< (int)ip
->i_size
&& !outcount
)
4073 /* Copy out the entry data */
4074 if (uiop
->uio_segflg
== UIO_SYSSPACE
&& uiop
->uio_iovcnt
== 1) {
4075 iovp
->iov_base
+= outcount
;
4076 iovp
->iov_len
-= outcount
;
4077 uiop
->uio_resid
-= outcount
;
4078 uiop
->uio_offset
= offset
;
4079 } else if ((error
= uiomove(outbuf
, (long)outcount
, UIO_READ
,
4081 uiop
->uio_offset
= offset
;
4084 if (uiop
->uio_segflg
!= UIO_SYSSPACE
|| uiop
->uio_iovcnt
!= 1)
4085 kmem_free(outbuf
, bufsize
);
4087 if (eofp
&& error
== 0)
4088 *eofp
= (uiop
->uio_offset
>= (int)ip
->i_size
);
4091 ufs_lockfs_end(ulp
);
4100 struct vnode
*dvp
, /* ptr to parent dir vnode */
4101 char *linkname
, /* name of symbolic link */
4102 struct vattr
*vap
, /* attributes */
4103 char *target
, /* target path */
4104 struct cred
*cr
, /* user credentials */
4105 caller_context_t
*ct
,
4108 struct inode
*ip
, *dip
= VTOI(dvp
);
4109 struct ufsvfs
*ufsvfsp
= dip
->i_ufsvfs
;
4110 struct ulockfs
*ulp
;
4119 * No symlinks in attrdirs at this time
4121 if ((VTOI(dvp
)->i_mode
& IFMT
) == IFATTRDIR
)
4125 ip
= (struct inode
*)NULL
;
4126 vap
->va_type
= VLNK
;
4129 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_SYMLINK_MASK
);
4134 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_SYMLINK
,
4135 trans_size
= (int)TOP_SYMLINK_SIZE(dip
));
4138 * We must create the inode before the directory entry, to avoid
4139 * racing with readlink(). ufs_dirmakeinode requires that we
4140 * hold the quota lock as reader, and directory locks as writer.
4143 rw_enter(&dip
->i_rwlock
, RW_WRITER
);
4144 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
4145 rw_enter(&dip
->i_contents
, RW_WRITER
);
4148 * Suppress any out of inodes messages if we will retry on
4152 dip
->i_flag
|= IQUIET
;
4154 error
= ufs_dirmakeinode(dip
, &ip
, vap
, DE_SYMLINK
, cr
);
4156 dip
->i_flag
&= ~IQUIET
;
4158 rw_exit(&dip
->i_contents
);
4159 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4160 rw_exit(&dip
->i_rwlock
);
4166 * OK. The inode has been created. Write out the data of the
4167 * symbolic link. Since symbolic links are metadata, and should
4168 * remain consistent across a system crash, we need to force the
4169 * data out synchronously.
4171 * (This is a change from the semantics in earlier releases, which
4172 * only created symbolic links synchronously if the semi-documented
4173 * 'syncdir' option was set, or if we were being invoked by the NFS
4174 * server, which requires symbolic links to be created synchronously.)
4176 * We need to pass in a pointer for the residual length; otherwise
4177 * ufs_rdwri() will always return EIO if it can't write the data,
4178 * even if the error was really ENOSPC or EDQUOT.
4181 ioflag
= FWRITE
| FDSYNC
;
4184 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
4185 rw_enter(&ip
->i_contents
, RW_WRITER
);
4188 * Suppress file system full messages if we will retry
4191 ip
->i_flag
|= IQUIET
;
4193 error
= ufs_rdwri(UIO_WRITE
, ioflag
, ip
, target
, strlen(target
),
4194 (offset_t
)0, UIO_SYSSPACE
, &residual
, cr
);
4196 ip
->i_flag
&= ~IQUIET
;
4199 rw_exit(&ip
->i_contents
);
4200 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4205 * If the link's data is small enough, we can cache it in the inode.
4206 * This is a "fast symbolic link". We don't use the first direct
4207 * block because that's actually used to point at the symbolic link's
4208 * contents on disk; but we know that none of the other direct or
4209 * indirect blocks can be used because symbolic links are restricted
4210 * to be smaller than a file system block.
4213 ASSERT(MAXPATHLEN
<= VBSIZE(ITOV(ip
)));
4215 if (ip
->i_size
> 0 && ip
->i_size
<= FSL_SIZE
) {
4216 if (kcopy(target
, &ip
->i_db
[1], ip
->i_size
) == 0) {
4217 ip
->i_flag
|= IFASTSYMLNK
;
4220 /* error, clear garbage left behind */
4221 for (i
= 1; i
< NDADDR
; i
++)
4223 for (i
= 0; i
< NIADDR
; i
++)
4228 rw_exit(&ip
->i_contents
);
4229 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4232 * OK. We've successfully created the symbolic link. All that
4233 * remains is to insert it into the appropriate directory.
4236 rw_enter(&dip
->i_rwlock
, RW_WRITER
);
4237 error
= ufs_direnter_lr(dip
, linkname
, DE_SYMLINK
, NULL
, ip
, cr
);
4238 rw_exit(&dip
->i_rwlock
);
4241 * Fall through into remove-on-error code. We're either done, or we
4242 * need to remove the inode (if we couldn't insert it).
4246 if (error
&& (ip
!= NULL
)) {
4247 rw_enter(&ip
->i_contents
, RW_WRITER
);
4252 rw_exit(&ip
->i_contents
);
4262 TRANS_END_CSYNC(ufsvfsp
, terr
, issync
, TOP_SYMLINK
,
4264 ufs_lockfs_end(ulp
);
4270 * We may have failed due to lack of an inode or of a block to
4271 * store the target in. Try flushing the delete queue to free
4272 * logically-available things up and try again.
4274 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
4275 ufs_delete_drain_wait(ufsvfsp
, 1);
4285 * Ufs specific routine used to do ufs io.
4288 ufs_rdwri(enum uio_rw rw
, int ioflag
, struct inode
*ip
, caddr_t base
,
4289 ssize_t len
, offset_t offset
, enum uio_seg seg
, int *aresid
,
4296 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
4298 bzero((caddr_t
)&auio
, sizeof (uio_t
));
4299 bzero((caddr_t
)&aiov
, sizeof (iovec_t
));
4301 aiov
.iov_base
= base
;
4303 auio
.uio_iov
= &aiov
;
4304 auio
.uio_iovcnt
= 1;
4305 auio
.uio_loffset
= offset
;
4306 auio
.uio_segflg
= (short)seg
;
4307 auio
.uio_resid
= len
;
4309 if (rw
== UIO_WRITE
) {
4310 auio
.uio_fmode
= FWRITE
;
4311 auio
.uio_extflg
= UIO_COPY_DEFAULT
;
4312 auio
.uio_llimit
= curproc
->p_fsz_ctl
;
4313 error
= wrip(ip
, &auio
, ioflag
, cr
);
4315 auio
.uio_fmode
= FREAD
;
4316 auio
.uio_extflg
= UIO_COPY_CACHED
;
4317 auio
.uio_llimit
= MAXOFFSET_T
;
4318 error
= rdip(ip
, &auio
, ioflag
, cr
);
4322 *aresid
= auio
.uio_resid
;
4323 } else if (auio
.uio_resid
) {
4331 ufs_fid(struct vnode
*vp
, struct fid
*fidp
, caller_context_t
*ct
)
4334 struct inode
*ip
= VTOI(vp
);
4336 if (ip
->i_ufsvfs
== NULL
)
4339 if (fidp
->fid_len
< (sizeof (struct ufid
) - sizeof (ushort_t
))) {
4340 fidp
->fid_len
= sizeof (struct ufid
) - sizeof (ushort_t
);
4344 ufid
= (struct ufid
*)fidp
;
4345 bzero((char *)ufid
, sizeof (struct ufid
));
4346 ufid
->ufid_len
= sizeof (struct ufid
) - sizeof (ushort_t
);
4347 ufid
->ufid_ino
= ip
->i_number
;
4348 ufid
->ufid_gen
= ip
->i_gen
;
4355 ufs_rwlock(struct vnode
*vp
, int write_lock
, caller_context_t
*ctp
)
4357 struct inode
*ip
= VTOI(vp
);
4358 struct ufsvfs
*ufsvfsp
;
4362 * Read case is easy.
4365 rw_enter(&ip
->i_rwlock
, RW_READER
);
4366 return (V_WRITELOCK_FALSE
);
4370 * Caller has requested a writer lock, but that inhibits any
4371 * concurrency in the VOPs that follow. Acquire the lock shared
4372 * and defer exclusive access until it is known to be needed in
4373 * other VOP handlers. Some cases can be determined here.
4377 * If directio is not set, there is no chance of concurrency,
4378 * so just acquire the lock exclusive. Beware of a forced
4379 * unmount before looking at the mount option.
4381 ufsvfsp
= ip
->i_ufsvfs
;
4382 forcedirectio
= ufsvfsp
? ufsvfsp
->vfs_forcedirectio
: 0;
4383 if (!(ip
->i_flag
& IDIRECTIO
|| forcedirectio
) ||
4384 !ufs_allow_shared_writes
) {
4385 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
4386 return (V_WRITELOCK_TRUE
);
4390 * Mandatory locking forces acquiring i_rwlock exclusive.
4392 if (MANDLOCK(vp
, ip
->i_mode
)) {
4393 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
4394 return (V_WRITELOCK_TRUE
);
4398 * Acquire the lock shared in case a concurrent write follows.
4399 * Mandatory locking could have become enabled before the lock
4400 * was acquired. Re-check and upgrade if needed.
4402 rw_enter(&ip
->i_rwlock
, RW_READER
);
4403 if (MANDLOCK(vp
, ip
->i_mode
)) {
4404 rw_exit(&ip
->i_rwlock
);
4405 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
4406 return (V_WRITELOCK_TRUE
);
4408 return (V_WRITELOCK_FALSE
);
4413 ufs_rwunlock(struct vnode
*vp
, int write_lock
, caller_context_t
*ctp
)
4415 struct inode
*ip
= VTOI(vp
);
4417 rw_exit(&ip
->i_rwlock
);
4422 ufs_seek(struct vnode
*vp
, offset_t ooff
, offset_t
*noffp
,
4423 caller_context_t
*ct
)
4425 return ((*noffp
< 0 || *noffp
> MAXOFFSET_T
) ? EINVAL
: 0);
4430 ufs_frlock(struct vnode
*vp
, int cmd
, struct flock64
*bfp
, int flag
,
4431 offset_t offset
, struct flk_callback
*flk_cbp
, struct cred
*cr
,
4432 caller_context_t
*ct
)
4434 struct inode
*ip
= VTOI(vp
);
4436 if (ip
->i_ufsvfs
== NULL
)
4440 * If file is being mapped, disallow frlock.
4441 * XXX I am not holding tlock while checking i_mapcnt because the
4442 * current locking strategy drops all locks before calling fs_frlock.
4443 * So, mapcnt could change before we enter fs_frlock making is
4444 * meaningless to have held tlock in the first place.
4446 if (ip
->i_mapcnt
> 0 && MANDLOCK(vp
, ip
->i_mode
))
4448 return (fs_frlock(vp
, cmd
, bfp
, flag
, offset
, flk_cbp
, cr
, ct
));
4453 ufs_space(struct vnode
*vp
, int cmd
, struct flock64
*bfp
, int flag
,
4454 offset_t offset
, cred_t
*cr
, caller_context_t
*ct
)
4456 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
4457 struct ulockfs
*ulp
;
4460 if ((error
= convoff(vp
, bfp
, 0, offset
)) == 0) {
4461 if (cmd
== F_FREESP
) {
4462 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
4463 ULOCKFS_SPACE_MASK
);
4466 error
= ufs_freesp(vp
, bfp
, flag
, cr
);
4468 if (error
== 0 && bfp
->l_start
== 0)
4469 vnevent_truncate(vp
, ct
);
4470 } else if (cmd
== F_ALLOCSP
) {
4471 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
4472 ULOCKFS_FALLOCATE_MASK
);
4475 error
= ufs_allocsp(vp
, bfp
, cr
);
4477 return (EINVAL
); /* Command not handled here */
4480 ufs_lockfs_end(ulp
);
4487 * Used to determine if read ahead should be done. Also used to
4488 * to determine when write back occurs.
4490 #define CLUSTSZ(ip) ((ip)->i_ufsvfs->vfs_ioclustsz)
4493 * A faster version of ufs_getpage.
4495 * We optimize by inlining the pvn_getpages iterator, eliminating
4496 * calls to bmap_read if file doesn't have UFS holes, and avoiding
4497 * the overhead of page_exists().
4499 * When files has UFS_HOLES and ufs_getpage is called with S_READ,
4500 * we set *protp to PROT_READ to avoid calling bmap_read. This approach
4501 * victimizes performance when a file with UFS holes is faulted
4502 * first in the S_READ mode, and then in the S_WRITE mode. We will get
4503 * two MMU faults in this case.
4505 * XXX - the inode fields which control the sequential mode are not
4506 * protected by any mutex. The read ahead will act wild if
4507 * multiple processes will access the file concurrently and
4508 * some of them in sequential mode. One particulary bad case
4509 * is if another thread will change the value of i_nextrio between
4510 * the time this thread tests the i_nextrio value and then reads it
4511 * again to use it as the offset for the read ahead.
4515 ufs_getpage(struct vnode
*vp
, offset_t off
, size_t len
, uint_t
*protp
,
4516 page_t
*plarr
[], size_t plsz
, struct seg
*seg
, caddr_t addr
,
4517 enum seg_rw rw
, struct cred
*cr
, caller_context_t
*ct
)
4519 u_offset_t uoff
= (u_offset_t
)off
; /* type conversion */
4522 struct inode
*ip
= VTOI(vp
);
4523 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
4525 struct ulockfs
*ulp
;
4533 int pgsize
= PAGESIZE
;
4538 ASSERT((uoff
& PAGEOFFSET
) == 0);
4544 * Obey the lockfs protocol
4546 err
= ufs_lockfs_begin_getpage(ufsvfsp
, &ulp
, seg
,
4547 rw
== S_READ
|| rw
== S_EXEC
, protp
);
4551 fs
= ufsvfsp
->vfs_fs
;
4553 if (ulp
&& (rw
== S_CREATE
|| rw
== S_WRITE
) &&
4554 !(vp
->v_flag
& VISSWAP
)) {
4556 * Try to start a transaction, will return if blocking is
4557 * expected to occur and the address space is not the
4558 * kernel address space.
4560 trans_size
= TOP_GETPAGE_SIZE(ip
);
4561 if (seg
->s_as
!= &kas
) {
4562 TRANS_TRY_BEGIN_ASYNC(ufsvfsp
, TOP_GETPAGE
,
4564 if (err
== EWOULDBLOCK
) {
4566 * Use EDEADLK here because the VM code
4567 * can normally never see this error.
4570 ufs_lockfs_end(ulp
);
4574 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_GETPAGE
, trans_size
);
4578 if (vp
->v_flag
& VNOMAP
) {
4583 seqmode
= ip
->i_nextr
== uoff
&& rw
!= S_CREATE
;
4585 rwtype
= RW_READER
; /* start as a reader */
4586 dolock
= (rw_owner(&ip
->i_contents
) != curthread
);
4588 * If this thread owns the lock, i.e., this thread grabbed it
4589 * as writer somewhere above, then we don't need to grab the
4590 * lock as reader in this routine.
4592 do_qlock
= (rw_owner(&ufsvfsp
->vfs_dqrwlock
) != curthread
);
4597 * Grab the quota lock if we need to call
4598 * bmap_write() below (with i_contents as writer).
4600 if (do_qlock
&& rwtype
== RW_WRITER
)
4601 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
4602 rw_enter(&ip
->i_contents
, rwtype
);
4606 * We may be getting called as a side effect of a bmap using
4607 * fbread() when the blocks might be being allocated and the
4608 * size has not yet been up'ed. In this case we want to be
4609 * able to return zero pages if we get back UFS_HOLE from
4610 * calling bmap for a non write case here. We also might have
4611 * to read some frags from the disk into a page if we are
4612 * extending the number of frags for a given lbn in bmap().
4613 * Large Files: The read of i_size here is atomic because
4614 * i_contents is held here. If dolock is zero, the lock
4615 * is held in bmap routines.
4617 beyond_eof
= uoff
+ len
>
4618 P2ROUNDUP_TYPED(ip
->i_size
, PAGESIZE
, u_offset_t
);
4619 if (beyond_eof
&& seg
!= segkmap
) {
4621 rw_exit(&ip
->i_contents
);
4622 if (do_qlock
&& rwtype
== RW_WRITER
)
4623 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4630 * Must hold i_contents lock throughout the call to pvn_getpages
4631 * since locked pages are returned from each call to ufs_getapage.
4632 * Must *not* return locked pages and then try for contents lock
4633 * due to lock ordering requirements (inode > page)
4636 has_holes
= bmap_has_holes(ip
);
4638 if ((rw
== S_WRITE
|| rw
== S_CREATE
) && has_holes
&& !beyond_eof
) {
4643 * We must acquire the RW_WRITER lock in order to
4644 * call bmap_write().
4646 if (dolock
&& rwtype
== RW_READER
) {
4650 * Grab the quota lock before
4651 * upgrading i_contents, but if we can't grab it
4652 * don't wait here due to lock order:
4653 * vfs_dqrwlock > i_contents.
4656 rw_tryenter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
)
4658 rw_exit(&ip
->i_contents
);
4661 if (!rw_tryupgrade(&ip
->i_contents
)) {
4662 rw_exit(&ip
->i_contents
);
4664 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4670 * May be allocating disk blocks for holes here as
4671 * a result of mmap faults. write(2) does the bmap_write
4672 * in rdip/wrip, not here. We are not dealing with frags
4676 * Large Files: We cast fs_bmask field to offset_t
4677 * just as we do for MAXBMASK because uoff is a 64-bit
4678 * data type. fs_bmask will still be a 32-bit type
4679 * as we cannot change any ondisk data structures.
4682 offset
= uoff
& (offset_t
)fs
->fs_bmask
;
4683 while (offset
< uoff
+ len
) {
4684 blk_size
= (int)blksize(fs
, ip
, lblkno(fs
, offset
));
4685 err
= bmap_write(ip
, offset
, blk_size
,
4686 BI_NORMAL
, NULL
, cr
);
4687 if (ip
->i_flag
& (ICHG
|IUPD
))
4691 offset
+= blk_size
; /* XXX - make this contig */
4696 * Can be a reader from now on.
4698 if (dolock
&& rwtype
== RW_WRITER
) {
4699 rw_downgrade(&ip
->i_contents
);
4701 * We can release vfs_dqrwlock early so do it, but make
4702 * sure we don't try to release it again at the bottom.
4705 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4711 * We remove PROT_WRITE in cases when the file has UFS holes
4712 * because we don't want to call bmap_read() to check each
4713 * page if it is backed with a disk block.
4715 if (protp
&& has_holes
&& rw
!= S_WRITE
&& rw
!= S_CREATE
)
4716 *protp
&= ~PROT_WRITE
;
4721 * The loop looks up pages in the range [off, off + len).
4722 * For each page, we first check if we should initiate an asynchronous
4723 * read ahead before we call page_lookup (we may sleep in page_lookup
4724 * for a previously initiated disk read).
4726 eoff
= (uoff
+ len
);
4727 for (pgoff
= uoff
, pgaddr
= addr
, pl
= plarr
;
4728 pgoff
< eoff
; /* empty */) {
4734 se
= ((rw
== S_CREATE
|| rw
== S_OTHER
) ? SE_EXCL
: SE_SHARED
);
4736 /* Handle async getpage (faultahead) */
4737 if (plarr
== NULL
) {
4738 ip
->i_nextrio
= pgoff
;
4739 (void) ufs_getpage_ra(vp
, pgoff
, seg
, pgaddr
);
4745 * Check if we should initiate read ahead of next cluster.
4746 * We call page_exists only when we need to confirm that
4747 * we have the current page before we initiate the read ahead.
4749 nextrio
= ip
->i_nextrio
;
4751 pgoff
+ CLUSTSZ(ip
) >= nextrio
&& pgoff
<= nextrio
&&
4752 nextrio
< ip
->i_size
&& page_exists(vp
, pgoff
)) {
4753 retval
= ufs_getpage_ra(vp
, pgoff
, seg
, pgaddr
);
4755 * We always read ahead the next cluster of data
4756 * starting from i_nextrio. If the page (vp,nextrio)
4757 * is actually in core at this point, the routine
4758 * ufs_getpage_ra() will stop pre-fetching data
4759 * until we read that page in a synchronized manner
4760 * through ufs_getpage_miss(). So, we should increase
4761 * i_nextrio if the page (vp, nextrio) exists.
4763 if ((retval
== 0) && page_exists(vp
, nextrio
)) {
4764 ip
->i_nextrio
= nextrio
+ pgsize
;
4768 if ((pp
= page_lookup(vp
, pgoff
, se
)) != NULL
) {
4770 * We found the page in the page cache.
4779 * We have to create the page, or read it from disk.
4781 if (err
= ufs_getpage_miss(vp
, pgoff
, len
, seg
, pgaddr
,
4782 pl
, plsz
, rw
, seqmode
))
4785 while (*pl
!= NULL
) {
4796 * Return pages up to plsz if they are in the page cache.
4797 * We cannot return pages if there is a chance that they are
4798 * backed with a UFS hole and rw is S_WRITE or S_CREATE.
4800 if (plarr
&& !(has_holes
&& (rw
== S_WRITE
|| rw
== S_CREATE
))) {
4802 ASSERT((protp
== NULL
) ||
4803 !(has_holes
&& (*protp
& PROT_WRITE
)));
4805 eoff
= pgoff
+ plsz
;
4806 while (pgoff
< eoff
) {
4809 if ((pp
= page_lookup_nowait(vp
, pgoff
,
4810 SE_SHARED
)) == NULL
)
4820 *pl
= NULL
; /* Terminate page list */
4821 ip
->i_nextr
= pgoff
;
4826 * Release any pages we have locked.
4828 while (pl
> &plarr
[0])
4836 * If the inode is not already marked for IACC (in rdip() for read)
4837 * and the inode is not marked for no access time update (in wrip()
4838 * for write) then update the inode access time and mod time now.
4840 if ((ip
->i_flag
& (IACC
| INOACC
)) == 0) {
4841 if ((rw
!= S_OTHER
) && (ip
->i_mode
& IFMT
) != IFDIR
) {
4842 if (!ULOCKFS_IS_NOIACC(ITOUL(ip
)) &&
4843 (fs
->fs_ronly
== 0) &&
4844 (!ufsvfsp
->vfs_noatime
)) {
4845 mutex_enter(&ip
->i_tlock
);
4848 mutex_exit(&ip
->i_tlock
);
4854 rw_exit(&ip
->i_contents
);
4855 if (do_qlock
&& rwtype
== RW_WRITER
)
4856 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4861 if ((rw
== S_CREATE
|| rw
== S_WRITE
) &&
4862 !(vp
->v_flag
& VISSWAP
)) {
4863 TRANS_END_ASYNC(ufsvfsp
, TOP_GETPAGE
, trans_size
);
4865 ufs_lockfs_end(ulp
);
4872 * ufs_getpage_miss is called when ufs_getpage missed the page in the page
4873 * cache. The page is either read from the disk, or it's created.
4874 * A page is created (without disk read) if rw == S_CREATE, or if
4875 * the page is not backed with a real disk block (UFS hole).
4879 ufs_getpage_miss(struct vnode
*vp
, u_offset_t off
, size_t len
, struct seg
*seg
,
4880 caddr_t addr
, page_t
*pl
[], size_t plsz
, enum seg_rw rw
, int seq
)
4882 struct inode
*ip
= VTOI(vp
);
4889 int bsize
= ip
->i_fs
->fs_bsize
;
4892 * Figure out whether the page can be created, or must be
4893 * must be read from the disk.
4899 if (err
= bmap_read(ip
, off
, &bn
, &contig
))
4902 crpage
= (bn
== UFS_HOLE
);
4905 * If its also a fallocated block that hasn't been written to
4906 * yet, we will treat it just like a UFS_HOLE and create
4907 * a zero page for it
4909 if (ISFALLOCBLK(ip
, bn
))
4914 if ((pp
= page_create_va(vp
, off
, PAGESIZE
, PG_WAIT
, seg
,
4916 return (ufs_fault(vp
,
4917 "ufs_getpage_miss: page_create == NULL"));
4921 pagezero(pp
, 0, PAGESIZE
);
4928 ufsvfs_t
*ufsvfsp
= ip
->i_ufsvfs
;
4931 * If access is not in sequential order, we read from disk
4934 * We limit the size of the transfer to bsize if we are reading
4935 * from the beginning of the file. Note in this situation we
4936 * will hedge our bets and initiate an async read ahead of
4939 if (!seq
|| off
== 0)
4940 contig
= MIN(contig
, bsize
);
4942 pp
= pvn_read_kluster(vp
, off
, seg
, addr
, &io_off
,
4943 &io_len
, off
, contig
, 0);
4946 * Some other thread has entered the page.
4947 * ufs_getpage will retry page_lookup.
4955 * Zero part of the page which we are not
4956 * going to read from the disk.
4958 xlen
= io_len
& PAGEOFFSET
;
4960 pagezero(pp
->p_prev
, xlen
, PAGESIZE
- xlen
);
4962 bp
= pageio_setup(pp
, io_len
, ip
->i_devvp
, B_READ
);
4963 bp
->b_edev
= ip
->i_dev
;
4964 bp
->b_dev
= cmpdev(ip
->i_dev
);
4966 bp
->b_un
.b_addr
= (caddr_t
)0;
4967 bp
->b_file
= ip
->i_vnode
;
4970 if (ufsvfsp
->vfs_log
) {
4971 lufs_read_strategy(ufsvfsp
->vfs_log
, bp
);
4972 } else if (ufsvfsp
->vfs_snapshot
) {
4973 fssnap_strategy(&ufsvfsp
->vfs_snapshot
, bp
);
4975 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
4976 ub
.ub_getpages
.value
.ul
++;
4977 (void) bdev_strategy(bp
);
4978 lwp_stat_update(LWP_STAT_INBLK
, 1);
4981 ip
->i_nextrio
= off
+ ((io_len
+ PAGESIZE
- 1) & PAGEMASK
);
4984 * If the file access is sequential, initiate read ahead
4985 * of the next cluster.
4987 if (seq
&& ip
->i_nextrio
< ip
->i_size
)
4988 (void) ufs_getpage_ra(vp
, off
, seg
, addr
);
4993 pvn_read_done(pp
, B_ERROR
);
4998 pvn_plist_init(pp
, pl
, plsz
, off
, io_len
, rw
);
5003 * Read ahead a cluster from the disk. Returns the length in bytes.
5006 ufs_getpage_ra(struct vnode
*vp
, u_offset_t off
, struct seg
*seg
, caddr_t addr
)
5008 struct inode
*ip
= VTOI(vp
);
5010 u_offset_t io_off
= ip
->i_nextrio
;
5012 caddr_t addr2
= addr
+ (io_off
- off
);
5019 int bsize
= ip
->i_fs
->fs_bsize
;
5022 * If the directio advisory is in effect on this file,
5023 * then do not do buffered read ahead. Read ahead makes
5024 * it more difficult on threads using directio as they
5025 * will be forced to flush the pages from this vnode.
5027 if ((ufsvfsp
= ip
->i_ufsvfs
) == NULL
)
5029 if (ip
->i_flag
& IDIRECTIO
|| ufsvfsp
->vfs_forcedirectio
)
5033 * Is this test needed?
5035 if (addr2
>= seg
->s_base
+ seg
->s_size
)
5039 err
= bmap_read(ip
, io_off
, &bn
, &contig
);
5041 * If its a UFS_HOLE or a fallocated block, do not perform
5042 * any read ahead's since there probably is nothing to read ahead
5044 if (err
|| bn
== UFS_HOLE
|| ISFALLOCBLK(ip
, bn
))
5048 * Limit the transfer size to bsize if this is the 2nd block.
5050 if (io_off
== (u_offset_t
)bsize
)
5051 contig
= MIN(contig
, bsize
);
5053 if ((pp
= pvn_read_kluster(vp
, io_off
, seg
, addr2
, &io_off
,
5054 &io_len
, io_off
, contig
, 1)) == NULL
)
5058 * Zero part of page which we are not going to read from disk
5060 if ((xlen
= (io_len
& PAGEOFFSET
)) > 0)
5061 pagezero(pp
->p_prev
, xlen
, PAGESIZE
- xlen
);
5063 ip
->i_nextrio
= (io_off
+ io_len
+ PAGESIZE
- 1) & PAGEMASK
;
5065 bp
= pageio_setup(pp
, io_len
, ip
->i_devvp
, B_READ
| B_ASYNC
);
5066 bp
->b_edev
= ip
->i_dev
;
5067 bp
->b_dev
= cmpdev(ip
->i_dev
);
5069 bp
->b_un
.b_addr
= (caddr_t
)0;
5070 bp
->b_file
= ip
->i_vnode
;
5073 if (ufsvfsp
->vfs_log
) {
5074 lufs_read_strategy(ufsvfsp
->vfs_log
, bp
);
5075 } else if (ufsvfsp
->vfs_snapshot
) {
5076 fssnap_strategy(&ufsvfsp
->vfs_snapshot
, bp
);
5078 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
5079 ub
.ub_getras
.value
.ul
++;
5080 (void) bdev_strategy(bp
);
5081 lwp_stat_update(LWP_STAT_INBLK
, 1);
5089 * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE, B_ASYNC}
5091 * LMXXX - the inode really ought to contain a pointer to one of these
5092 * async args. Stuff gunk in there and just hand the whole mess off.
5093 * This would replace i_delaylen, i_delayoff.
5097 ufs_putpage(struct vnode
*vp
, offset_t off
, size_t len
, int flags
,
5098 struct cred
*cr
, caller_context_t
*ct
)
5100 struct inode
*ip
= VTOI(vp
);
5103 if (vp
->v_count
== 0) {
5104 return (ufs_fault(vp
, "ufs_putpage: bad v_count == 0"));
5108 * XXX - Why should this check be made here?
5110 if (vp
->v_flag
& VNOMAP
) {
5115 if (ip
->i_ufsvfs
== NULL
) {
5120 if (flags
& B_ASYNC
) {
5121 if (ufs_delay
&& len
&&
5122 (flags
& ~(B_ASYNC
|B_DONTNEED
|B_FREE
)) == 0) {
5123 mutex_enter(&ip
->i_tlock
);
5125 * If nobody stalled, start a new cluster.
5127 if (ip
->i_delaylen
== 0) {
5128 ip
->i_delayoff
= off
;
5129 ip
->i_delaylen
= len
;
5130 mutex_exit(&ip
->i_tlock
);
5134 * If we have a full cluster or they are not contig,
5135 * then push last cluster and start over.
5137 if (ip
->i_delaylen
>= CLUSTSZ(ip
) ||
5138 ip
->i_delayoff
+ ip
->i_delaylen
!= off
) {
5142 doff
= ip
->i_delayoff
;
5143 dlen
= ip
->i_delaylen
;
5144 ip
->i_delayoff
= off
;
5145 ip
->i_delaylen
= len
;
5146 mutex_exit(&ip
->i_tlock
);
5147 err
= ufs_putpages(vp
, doff
, dlen
,
5149 /* LMXXX - flags are new val, not old */
5153 * There is something there, it's not full, and
5156 ip
->i_delaylen
+= len
;
5157 mutex_exit(&ip
->i_tlock
);
5161 * Must have weird flags or we are not clustering.
5165 err
= ufs_putpages(vp
, off
, len
, flags
, cr
);
5172 * If len == 0, do from off to EOF.
5174 * The normal cases should be len == 0 & off == 0 (entire vp list),
5175 * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
5189 struct inode
*ip
= VTOI(vp
);
5195 if (vp
->v_count
== 0)
5196 return (ufs_fault(vp
, "ufs_putpages: v_count == 0"));
5198 * Acquire the readers/write inode lock before locking
5199 * any pages in this inode.
5200 * The inode lock is held during i/o.
5203 mutex_enter(&ip
->i_tlock
);
5204 ip
->i_delayoff
= ip
->i_delaylen
= 0;
5205 mutex_exit(&ip
->i_tlock
);
5207 dolock
= (rw_owner(&ip
->i_contents
) != curthread
);
5210 * Must synchronize this thread and any possible thread
5211 * operating in the window of vulnerability in wrip().
5212 * It is dangerous to allow both a thread doing a putpage
5213 * and a thread writing, so serialize them. The exception
5214 * is when the thread in wrip() does something which causes
5215 * a putpage operation. Then, the thread must be allowed
5216 * to continue. It may encounter a bmap_read problem in
5217 * ufs_putapage, but that is handled in ufs_putapage.
5218 * Allow async writers to proceed, we don't want to block
5219 * the pageout daemon.
5221 if (ip
->i_writer
== curthread
)
5222 rw_enter(&ip
->i_contents
, RW_READER
);
5225 rw_enter(&ip
->i_contents
, RW_READER
);
5226 mutex_enter(&ip
->i_tlock
);
5228 * If there is no thread in the critical
5229 * section of wrip(), then proceed.
5230 * Otherwise, wait until there isn't one.
5232 if (ip
->i_writer
== NULL
) {
5233 mutex_exit(&ip
->i_tlock
);
5236 rw_exit(&ip
->i_contents
);
5238 * Bounce async writers when we have a writer
5239 * working on this file so we don't deadlock
5240 * the pageout daemon.
5242 if (flags
& B_ASYNC
) {
5243 mutex_exit(&ip
->i_tlock
);
5246 cv_wait(&ip
->i_wrcv
, &ip
->i_tlock
);
5247 mutex_exit(&ip
->i_tlock
);
5252 if (!vn_has_cached_data(vp
)) {
5254 rw_exit(&ip
->i_contents
);
5260 * Search the entire vp list for pages >= off.
5262 err
= pvn_vplist_dirty(vp
, (u_offset_t
)off
, ufs_putapage
,
5266 * Loop over all offsets in the range looking for
5267 * pages to deal with.
5269 if ((eoff
= blkroundup(ip
->i_fs
, ip
->i_size
)) != 0)
5270 eoff
= MIN(off
+ len
, eoff
);
5274 for (io_off
= off
; io_off
< eoff
; io_off
+= io_len
) {
5276 * If we are not invalidating, synchronously
5277 * freeing or writing pages, use the routine
5278 * page_lookup_nowait() to prevent reclaiming
5279 * them from the free list.
5281 if ((flags
& B_INVAL
) || ((flags
& B_ASYNC
) == 0)) {
5282 pp
= page_lookup(vp
, io_off
,
5283 (flags
& (B_INVAL
| B_FREE
)) ?
5284 SE_EXCL
: SE_SHARED
);
5286 pp
= page_lookup_nowait(vp
, io_off
,
5287 (flags
& B_FREE
) ? SE_EXCL
: SE_SHARED
);
5290 if (pp
== NULL
|| pvn_getdirty(pp
, flags
) == 0)
5293 u_offset_t
*io_offp
= &io_off
;
5295 err
= ufs_putapage(vp
, pp
, io_offp
, &io_len
,
5300 * "io_off" and "io_len" are returned as
5301 * the range of pages we actually wrote.
5302 * This allows us to skip ahead more quickly
5303 * since several pages may've been dealt
5304 * with by this iteration of the loop.
5309 if (err
== 0 && off
== 0 && (len
== 0 || len
>= ip
->i_size
)) {
5311 * We have just sync'ed back all the pages on
5312 * the inode, turn off the IMODTIME flag.
5314 mutex_enter(&ip
->i_tlock
);
5315 ip
->i_flag
&= ~IMODTIME
;
5316 mutex_exit(&ip
->i_tlock
);
5319 rw_exit(&ip
->i_contents
);
5324 ufs_iodone(buf_t
*bp
)
5328 ASSERT((bp
->b_pages
->p_vnode
!= NULL
) && !(bp
->b_flags
& B_READ
));
5330 bp
->b_iodone
= NULL
;
5332 ip
= VTOI(bp
->b_pages
->p_vnode
);
5334 mutex_enter(&ip
->i_tlock
);
5335 if (ip
->i_writes
>= ufs_LW
) {
5336 if ((ip
->i_writes
-= bp
->b_bcount
) <= ufs_LW
)
5338 cv_broadcast(&ip
->i_wrcv
); /* wake all up */
5340 ip
->i_writes
-= bp
->b_bcount
;
5343 mutex_exit(&ip
->i_tlock
);
5348 * Write out a single page, possibly klustering adjacent
5349 * dirty pages. The inode lock must be held.
5351 * LMXXX - bsize < pagesize not done.
5359 size_t *lenp
, /* return values */
5365 struct inode
*ip
= VTOI(vp
);
5366 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
5375 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
5377 if (ufsvfsp
== NULL
) {
5383 ASSERT(fs
->fs_ronly
== 0);
5386 * If the modified time on the inode has not already been
5387 * set elsewhere (e.g. for write/setattr) we set the time now.
5388 * This gives us approximate modified times for mmap'ed files
5389 * which are modified via stores in the user address space.
5391 if ((ip
->i_flag
& IMODTIME
) == 0) {
5392 mutex_enter(&ip
->i_tlock
);
5396 mutex_exit(&ip
->i_tlock
);
5400 * Align the request to a block boundry (for old file systems),
5401 * and go ask bmap() how contiguous things are for this file.
5403 off
= pp
->p_offset
& (offset_t
)fs
->fs_bmask
; /* block align it */
5405 err
= bmap_read(ip
, off
, &bn
, &contig
);
5408 if (bn
== UFS_HOLE
) { /* putpage never allocates */
5410 * logging device is in error mode; simply return EIO
5412 if (TRANS_ISERROR(ufsvfsp
)) {
5417 * Oops, the thread in the window in wrip() did some
5418 * sort of operation which caused a putpage in the bad
5419 * range. In this case, just return an error which will
5420 * cause the software modified bit on the page to set
5421 * and the page will get written out again later.
5423 if (ip
->i_writer
== curthread
) {
5428 * If the pager is trying to push a page in the bad range
5429 * just tell him to try again later when things are better.
5431 if (flags
& B_ASYNC
) {
5435 err
= ufs_fault(ITOV(ip
), "ufs_putapage: bn == UFS_HOLE");
5440 * If it is an fallocate'd block, reverse the negativity since
5441 * we are now writing to it
5443 if (ISFALLOCBLK(ip
, bn
)) {
5444 err
= bmap_set_bn(vp
, off
, dbtofsb(fs
, -bn
));
5452 * Take the length (of contiguous bytes) passed back from bmap()
5453 * and _try_ and get a set of pages covering that extent.
5455 pp
= pvn_write_kluster(vp
, pp
, &io_off
, &io_len
, off
, contig
, flags
);
5458 * May have run out of memory and not clustered backwards.
5462 * We told bmap off, so we have to adjust the bn accordingly.
5465 bn
+= btod(io_off
- off
);
5466 contig
-= (io_off
- off
);
5470 * bmap was carefull to tell us the right size so use that.
5471 * There might be unallocated frags at the end.
5472 * LMXXX - bzero the end of the page? We must be writing after EOF.
5474 if (io_len
> contig
) {
5475 ASSERT(io_len
- contig
< fs
->fs_bsize
);
5476 io_len
-= (io_len
- contig
);
5480 * Handle the case where we are writing the last page after EOF.
5482 * XXX - just a patch for i-mt3.
5485 ASSERT(pp
->p_offset
>=
5486 (u_offset_t
)(roundup(ip
->i_size
, PAGESIZE
)));
5490 bp
= pageio_setup(pp
, io_len
, ip
->i_devvp
, B_WRITE
| flags
);
5492 ULOCKFS_SET_MOD(ITOUL(ip
));
5494 bp
->b_edev
= ip
->i_dev
;
5495 bp
->b_dev
= cmpdev(ip
->i_dev
);
5497 bp
->b_un
.b_addr
= (caddr_t
)0;
5498 bp
->b_file
= ip
->i_vnode
;
5501 * File contents of shadow or quota inodes are metadata, and updates
5502 * to these need to be put into a logging transaction. All direct
5503 * callers in UFS do that, but fsflush can come here _before_ the
5504 * normal codepath. An example would be updating ACL information, for
5505 * which the normal codepath would be:
5511 * Here, fsflush can pick up the dirty page before segmap_release()
5512 * forces it out. If that happens, there's no transaction.
5513 * We therefore need to test whether a transaction exists, and if not
5514 * create one - for fsflush.
5517 (((ip
->i_mode
& IFMT
) == IFSHAD
|| ufsvfsp
->vfs_qinod
== ip
) &&
5518 ((curthread
->t_flag
& T_DONTBLOCK
) == 0) &&
5519 (TRANS_ISTRANS(ufsvfsp
)));
5522 curthread
->t_flag
|= T_DONTBLOCK
;
5523 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_PUTPAGE
, TOP_PUTPAGE_SIZE(ip
));
5525 if (TRANS_ISTRANS(ufsvfsp
)) {
5526 if ((ip
->i_mode
& IFMT
) == IFSHAD
) {
5527 TRANS_BUF(ufsvfsp
, 0, io_len
, bp
, DT_SHAD
);
5528 } else if (ufsvfsp
->vfs_qinod
== ip
) {
5529 TRANS_DELTA(ufsvfsp
, ldbtob(bn
), bp
->b_bcount
, DT_QR
,
5534 TRANS_END_ASYNC(ufsvfsp
, TOP_PUTPAGE
, TOP_PUTPAGE_SIZE(ip
));
5535 curthread
->t_flag
&= ~T_DONTBLOCK
;
5538 /* write throttle */
5540 ASSERT(bp
->b_iodone
== NULL
);
5541 bp
->b_iodone
= (int (*)())ufs_iodone
;
5542 mutex_enter(&ip
->i_tlock
);
5543 ip
->i_writes
+= bp
->b_bcount
;
5544 mutex_exit(&ip
->i_tlock
);
5546 if (bp
->b_flags
& B_ASYNC
) {
5547 if (ufsvfsp
->vfs_log
) {
5548 lufs_write_strategy(ufsvfsp
->vfs_log
, bp
);
5549 } else if (ufsvfsp
->vfs_snapshot
) {
5550 fssnap_strategy(&ufsvfsp
->vfs_snapshot
, bp
);
5552 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
5553 ub
.ub_putasyncs
.value
.ul
++;
5554 (void) bdev_strategy(bp
);
5555 lwp_stat_update(LWP_STAT_OUBLK
, 1);
5558 if (ufsvfsp
->vfs_log
) {
5559 lufs_write_strategy(ufsvfsp
->vfs_log
, bp
);
5560 } else if (ufsvfsp
->vfs_snapshot
) {
5561 fssnap_strategy(&ufsvfsp
->vfs_snapshot
, bp
);
5563 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
5564 ub
.ub_putsyncs
.value
.ul
++;
5565 (void) bdev_strategy(bp
);
5566 lwp_stat_update(LWP_STAT_OUBLK
, 1);
5570 pvn_write_done(pp
, ((err
) ? B_ERROR
: 0) | B_WRITE
| flags
);
5576 if (err
!= 0 && pp
!= NULL
)
5577 pvn_write_done(pp
, B_ERROR
| B_WRITE
| flags
);
5587 uint64_t ufs_map_alock_retry_cnt
;
5588 uint64_t ufs_map_lockfs_retry_cnt
;
5592 ufs_map(struct vnode
*vp
,
5601 caller_context_t
*ct
)
5603 struct segvn_crargs vn_a
;
5604 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
5605 struct ulockfs
*ulp
;
5608 caddr_t hint
= *addrp
;
5610 if (vp
->v_flag
& VNOMAP
) {
5615 if (off
< (offset_t
)0 || (offset_t
)(off
+ len
) < (offset_t
)0) {
5620 if (vp
->v_type
!= VREG
) {
5628 * If file is being locked, disallow mapping.
5630 if (vn_has_mandatory_locks(vp
, VTOI(vp
)->i_mode
)) {
5637 * Note that if we are retrying (because ufs_lockfs_trybegin failed in
5638 * the previous attempt), some other thread could have grabbed
5639 * the same VA range if MAP_FIXED is set. In that case, choose_addr
5640 * would unmap the valid VA range, that is ok.
5642 error
= choose_addr(as
, addrp
, len
, off
, ADDR_VACALIGN
, flags
);
5649 * a_lock has to be acquired before entering the lockfs protocol
5650 * because that is the order in which pagefault works. Also we cannot
5651 * block on a_lock here because this waiting writer will prevent
5652 * further readers like ufs_read from progressing and could cause
5653 * deadlock between ufs_read/ufs_map/pagefault when a quiesce is
5656 while (!AS_LOCK_TRYENTER(as
, &as
->a_lock
, RW_WRITER
)) {
5657 ufs_map_alock_retry_cnt
++;
5658 delay(RETRY_LOCK_DELAY
);
5662 * We can't hold as->a_lock and wait for lockfs to succeed because
5663 * the proc tools might hang on a_lock, so call ufs_lockfs_trybegin()
5666 if (error
= ufs_lockfs_trybegin(ufsvfsp
, &ulp
, ULOCKFS_MAP_MASK
)) {
5668 * ufs_lockfs_trybegin() did not succeed. It is safer to give up
5669 * as->a_lock and wait for ulp->ul_fs_lock status to change.
5671 ufs_map_lockfs_retry_cnt
++;
5672 AS_LOCK_EXIT(as
, &as
->a_lock
);
5677 mutex_enter(&ulp
->ul_lock
);
5678 while (ulp
->ul_fs_lock
& ULOCKFS_MAP_MASK
) {
5679 if (ULOCKFS_IS_SLOCK(ulp
) || ufsvfsp
->vfs_nointr
) {
5680 cv_wait(&ulp
->ul_cv
, &ulp
->ul_lock
);
5683 sig
= cv_wait_sig(&ulp
->ul_cv
, &ulp
->ul_lock
);
5685 if (((ulp
->ul_fs_lock
& ULOCKFS_MAP_MASK
) &&
5686 !sig
) || ufsvfsp
->vfs_dontblock
) {
5687 mutex_exit(&ulp
->ul_lock
);
5692 mutex_exit(&ulp
->ul_lock
);
5697 vn_a
.offset
= (u_offset_t
)off
;
5698 vn_a
.type
= flags
& MAP_TYPE
;
5700 vn_a
.maxprot
= maxprot
;
5703 vn_a
.flags
= flags
& ~MAP_TYPE
;
5705 vn_a
.lgrp_mem_policy_flags
= 0;
5707 error
= as_map_locked(as
, *addrp
, len
, segvn_create
, &vn_a
);
5709 ufs_lockfs_end(ulp
);
5717 ufs_addmap(struct vnode
*vp
,
5726 caller_context_t
*ct
)
5728 struct inode
*ip
= VTOI(vp
);
5730 if (vp
->v_flag
& VNOMAP
) {
5734 mutex_enter(&ip
->i_tlock
);
5735 ip
->i_mapcnt
+= btopr(len
);
5736 mutex_exit(&ip
->i_tlock
);
5742 ufs_delmap(struct vnode
*vp
, offset_t off
, struct as
*as
, caddr_t addr
,
5743 size_t len
, uint_t prot
, uint_t maxprot
, uint_t flags
,
5744 struct cred
*cr
, caller_context_t
*ct
)
5746 struct inode
*ip
= VTOI(vp
);
5748 if (vp
->v_flag
& VNOMAP
) {
5752 mutex_enter(&ip
->i_tlock
);
5753 ip
->i_mapcnt
-= btopr(len
); /* Count released mappings */
5754 ASSERT(ip
->i_mapcnt
>= 0);
5755 mutex_exit(&ip
->i_tlock
);
5759 * Return the answer requested to poll() for non-device files
5761 struct pollhead ufs_pollhd
;
5765 ufs_poll(vnode_t
*vp
, short ev
, int any
, short *revp
, struct pollhead
**phpp
,
5766 caller_context_t
*ct
)
5768 struct ufsvfs
*ufsvfsp
;
5771 ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
5778 if (ULOCKFS_IS_HLOCK(&ufsvfsp
->vfs_ulockfs
) ||
5779 ULOCKFS_IS_ELOCK(&ufsvfsp
->vfs_ulockfs
)) {
5783 if ((ev
& POLLOUT
) && !ufsvfsp
->vfs_fs
->fs_ronly
&&
5784 !ULOCKFS_IS_WLOCK(&ufsvfsp
->vfs_ulockfs
))
5787 if ((ev
& POLLWRBAND
) && !ufsvfsp
->vfs_fs
->fs_ronly
&&
5788 !ULOCKFS_IS_WLOCK(&ufsvfsp
->vfs_ulockfs
))
5789 *revp
|= POLLWRBAND
;
5794 if (ev
& POLLRDNORM
)
5795 *revp
|= POLLRDNORM
;
5797 if (ev
& POLLRDBAND
)
5798 *revp
|= POLLRDBAND
;
5801 if ((ev
& POLLPRI
) && (*revp
& (POLLERR
|POLLHUP
)))
5804 *phpp
= !any
&& !*revp
? &ufs_pollhd
: (struct pollhead
*)NULL
;
5811 ufs_l_pathconf(struct vnode
*vp
, int cmd
, ulong_t
*valp
, struct cred
*cr
,
5812 caller_context_t
*ct
)
5814 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
5815 struct ulockfs
*ulp
= NULL
;
5816 struct inode
*sip
= NULL
;
5818 struct inode
*ip
= VTOI(vp
);
5821 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_PATHCONF_MASK
);
5827 * Have to handle _PC_NAME_MAX here, because the normal way
5828 * [fs_pathconf() -> VOP_STATVFS() -> ufs_statvfs()]
5829 * results in a lock ordering reversal between
5830 * ufs_lockfs_{begin,end}() and
5831 * ufs_thread_{suspend,continue}().
5833 * Keep in sync with ufs_statvfs().
5839 case _PC_FILESIZEBITS
:
5840 if (ufsvfsp
->vfs_lfflags
& UFS_LARGEFILES
)
5841 *valp
= UFS_FILESIZE_BITS
;
5846 case _PC_XATTR_EXISTS
:
5847 if (vp
->v_vfsp
->vfs_flag
& VFS_XATTR
) {
5850 ufs_xattr_getattrdir(vp
, &sip
, LOOKUP_XATTR
, cr
);
5851 if (error
== 0 && sip
!= NULL
) {
5852 /* Start transaction */
5854 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
,
5855 TOP_RMDIR
, TOP_RMDIR_SIZE
);
5858 * Is directory empty
5860 rw_enter(&sip
->i_rwlock
, RW_WRITER
);
5861 rw_enter(&sip
->i_contents
, RW_WRITER
);
5862 if (ufs_xattrdirempty(sip
,
5863 sip
->i_number
, CRED())) {
5864 rw_enter(&ip
->i_contents
, RW_WRITER
);
5865 ufs_unhook_shadow(ip
, sip
);
5866 rw_exit(&ip
->i_contents
);
5872 rw_exit(&sip
->i_contents
);
5873 rw_exit(&sip
->i_rwlock
);
5875 TRANS_END_CSYNC(ufsvfsp
, error
, issync
,
5876 TOP_RMDIR
, TOP_RMDIR_SIZE
);
5879 } else if (error
== ENOENT
) {
5884 error
= fs_pathconf(vp
, cmd
, valp
, cr
, ct
);
5888 case _PC_ACL_ENABLED
:
5889 *valp
= _ACL_ACLENT_ENABLED
;
5892 case _PC_MIN_HOLE_SIZE
:
5893 *valp
= (ulong_t
)ip
->i_fs
->fs_bsize
;
5896 case _PC_SATTR_ENABLED
:
5897 case _PC_SATTR_EXISTS
:
5898 *valp
= vfs_has_feature(vp
->v_vfsp
, VFSFT_SYSATTR_VIEWS
) &&
5899 (vp
->v_type
== VREG
|| vp
->v_type
== VDIR
);
5902 case _PC_TIMESTAMP_RESOLUTION
:
5904 * UFS keeps only microsecond timestamp resolution.
5905 * This is historical and will probably never change.
5911 error
= fs_pathconf(vp
, cmd
, valp
, cr
, ct
);
5916 ufs_lockfs_end(ulp
);
5921 int ufs_pageio_writes
, ufs_pageio_reads
;
5925 ufs_pageio(struct vnode
*vp
, page_t
*pp
, u_offset_t io_off
, size_t io_len
,
5926 int flags
, struct cred
*cr
, caller_context_t
*ct
)
5928 struct inode
*ip
= VTOI(vp
);
5929 struct ufsvfs
*ufsvfsp
;
5930 page_t
*npp
= NULL
, *opp
= NULL
, *cpp
= pp
;
5933 size_t done_len
= 0, cur_len
= 0;
5938 struct ulockfs
*ulp
;
5940 if ((flags
& B_READ
) && pp
!= NULL
&& pp
->p_vnode
== vp
&&
5941 vp
->v_mpssdata
!= NULL
) {
5945 dolock
= (rw_owner(&ip
->i_contents
) != curthread
);
5947 * We need a better check. Ideally, we would use another
5948 * vnodeops so that hlocked and forcibly unmounted file
5949 * systems would return EIO where appropriate and w/o the
5950 * need for these checks.
5952 if ((ufsvfsp
= ip
->i_ufsvfs
) == NULL
)
5956 * For vmpss (pp can be NULL) case respect the quiesce protocol.
5957 * ul_lock must be taken before locking pages so we can't use it here
5958 * if pp is non NULL because segvn already locked pages
5959 * SE_EXCL. Instead we rely on the fact that a forced umount or
5960 * applying a filesystem lock via ufs_fiolfs() will block in the
5961 * implicit call to ufs_flush() until we unlock the pages after the
5962 * return to segvn. Other ufs_quiesce() callers keep ufs_quiesce_pend
5963 * above 0 until they are done. We have to be careful not to increment
5964 * ul_vnops_cnt here after forceful unmount hlocks the file system.
5966 * If pp is NULL use ul_lock to make sure we don't increment
5967 * ul_vnops_cnt after forceful unmount hlocks the file system.
5969 if (vmpss
|| pp
== NULL
) {
5970 ulp
= &ufsvfsp
->vfs_ulockfs
;
5972 mutex_enter(&ulp
->ul_lock
);
5973 if (ulp
->ul_fs_lock
& ULOCKFS_GETREAD_MASK
) {
5975 mutex_exit(&ulp
->ul_lock
);
5977 return (vmpss
? EIO
: EINVAL
);
5979 atomic_inc_ulong(&ulp
->ul_vnops_cnt
);
5981 mutex_exit(&ulp
->ul_lock
);
5982 if (ufs_quiesce_pend
) {
5983 if (!atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
5984 cv_broadcast(&ulp
->ul_cv
);
5985 return (vmpss
? EIO
: EINVAL
);
5991 * segvn may call VOP_PAGEIO() instead of VOP_GETPAGE() to
5992 * handle a fault against a segment that maps vnode pages with
5993 * large mappings. Segvn creates pages and holds them locked
5994 * SE_EXCL during VOP_PAGEIO() call. In this case we have to
5995 * use rw_tryenter() to avoid a potential deadlock since in
5996 * lock order i_contents needs to be taken first.
5997 * Segvn will retry via VOP_GETPAGE() if VOP_PAGEIO() fails.
6000 rw_enter(&ip
->i_contents
, RW_READER
);
6001 } else if (!rw_tryenter(&ip
->i_contents
, RW_READER
)) {
6002 if (!atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
6003 cv_broadcast(&ulp
->ul_cv
);
6009 * Return an error to segvn because the pagefault request is beyond
6010 * PAGESIZE rounded EOF.
6012 if (vmpss
&& btopr(io_off
+ io_len
) > btopr(ip
->i_size
)) {
6014 rw_exit(&ip
->i_contents
);
6015 if (!atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
6016 cv_broadcast(&ulp
->ul_cv
);
6021 if (bmap_has_holes(ip
)) {
6027 rw_exit(&ip
->i_contents
);
6028 if (!atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
6029 cv_broadcast(&ulp
->ul_cv
);
6034 * Break the io request into chunks, one for each contiguous
6035 * stretch of disk blocks in the target file.
6037 while (done_len
< io_len
) {
6040 if (err
= bmap_read(ip
, (u_offset_t
)(io_off
+ done_len
),
6044 if (bn
== UFS_HOLE
) { /* No holey swapfiles */
6049 err
= ufs_fault(ITOV(ip
), "ufs_pageio: bn == UFS_HOLE");
6053 cur_len
= MIN(io_len
- done_len
, contig
);
6055 * Zero out a page beyond EOF, when the last block of
6056 * a file is a UFS fragment so that ufs_pageio() can be used
6057 * instead of ufs_getpage() to handle faults against
6058 * segvn segments that use large pages.
6060 page_list_break(&cpp
, &npp
, btopr(cur_len
));
6061 if ((flags
& B_READ
) && (cur_len
& PAGEOFFSET
)) {
6062 size_t xlen
= cur_len
& PAGEOFFSET
;
6063 pagezero(cpp
->p_prev
, xlen
, PAGESIZE
- xlen
);
6066 bp
= pageio_setup(cpp
, cur_len
, ip
->i_devvp
, flags
);
6069 bp
->b_edev
= ip
->i_dev
;
6070 bp
->b_dev
= cmpdev(ip
->i_dev
);
6072 bp
->b_un
.b_addr
= (caddr_t
)0;
6073 bp
->b_file
= ip
->i_vnode
;
6075 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
6076 ub
.ub_pageios
.value
.ul
++;
6077 if (ufsvfsp
->vfs_snapshot
)
6078 fssnap_strategy(&(ufsvfsp
->vfs_snapshot
), bp
);
6080 (void) bdev_strategy(bp
);
6085 ufs_pageio_writes
++;
6087 lwp_stat_update(LWP_STAT_INBLK
, 1);
6089 lwp_stat_update(LWP_STAT_OUBLK
, 1);
6091 * If the request is not B_ASYNC, wait for i/o to complete
6092 * and re-assemble the page list to return to the caller.
6093 * If it is B_ASYNC we leave the page list in pieces and
6094 * cleanup() will dispose of them.
6096 if ((flags
& B_ASYNC
) == 0) {
6101 page_list_concat(&opp
, &cpp
);
6106 cur_len
= P2ROUNDUP_TYPED(cur_len
, PAGESIZE
, size_t);
6107 done_len
+= cur_len
;
6109 ASSERT(err
|| (cpp
== NULL
&& npp
== NULL
&& done_len
== io_len
));
6111 if (flags
& B_ASYNC
) {
6112 /* Cleanup unprocessed parts of list */
6113 page_list_concat(&cpp
, &npp
);
6115 pvn_read_done(cpp
, B_ERROR
);
6117 pvn_write_done(cpp
, B_ERROR
);
6119 /* Re-assemble list and let caller clean up */
6120 page_list_concat(&opp
, &cpp
);
6121 page_list_concat(&opp
, &npp
);
6125 if (vmpss
&& !(ip
->i_flag
& IACC
) && !ULOCKFS_IS_NOIACC(ulp
) &&
6126 ufsvfsp
->vfs_fs
->fs_ronly
== 0 && !ufsvfsp
->vfs_noatime
) {
6127 mutex_enter(&ip
->i_tlock
);
6130 mutex_exit(&ip
->i_tlock
);
6134 rw_exit(&ip
->i_contents
);
6135 if (vmpss
&& !atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
6136 cv_broadcast(&ulp
->ul_cv
);
6141 * Called when the kernel is in a frozen state to dump data
6142 * directly to the device. It uses a private dump data structure,
6143 * set up by dump_ctl, to locate the correct disk block to which to dump.
6147 ufs_dump(vnode_t
*vp
, caddr_t addr
, offset_t ldbn
, offset_t dblks
,
6148 caller_context_t
*ct
)
6150 u_offset_t file_size
;
6151 struct inode
*ip
= VTOI(vp
);
6152 struct fs
*fs
= ip
->i_fs
;
6154 int disk_blks
= fs
->fs_bsize
>> DEV_BSHIFT
;
6159 * forced unmount case
6161 if (ip
->i_ufsvfs
== NULL
)
6164 * Validate the inode that it has not been modified since
6165 * the dump structure is allocated.
6167 mutex_enter(&ip
->i_tlock
);
6168 if ((dump_info
== NULL
) ||
6169 (dump_info
->ip
!= ip
) ||
6170 (dump_info
->time
.tv_sec
!= ip
->i_mtime
.tv_sec
) ||
6171 (dump_info
->time
.tv_usec
!= ip
->i_mtime
.tv_usec
)) {
6172 mutex_exit(&ip
->i_tlock
);
6175 mutex_exit(&ip
->i_tlock
);
6178 * See that the file has room for this write
6180 UFS_GET_ISIZE(&file_size
, ip
);
6182 if (ldbtob(ldbn
+ dblks
) > file_size
)
6186 * Find the physical disk block numbers from the dump
6187 * private data structure directly and write out the data
6188 * in contiguous block lumps
6190 while (dblks
> 0 && !error
) {
6191 lfsbn
= (daddr_t
)lblkno(fs
, ldbtob(ldbn
));
6192 dbn
= fsbtodb(fs
, dump_info
->dblk
[lfsbn
]) + ldbn
% disk_blks
;
6194 ndbs
= disk_blks
- ldbn
% disk_blks
;
6195 while (ndbs
< dblks
&& fsbtodb(fs
, dump_info
->dblk
[lfsbn
+
6196 nfsbs
]) == dbn
+ ndbs
) {
6202 error
= bdev_dump(ip
->i_dev
, addr
, dbn
, ndbs
);
6203 addr
+= ldbtob((offset_t
)ndbs
);
6212 * Prepare the file system before and after the dump operation.
6214 * action = DUMP_ALLOC:
6215 * Preparation before dump, allocate dump private data structure
6216 * to hold all the direct and indirect block info for dump.
6218 * action = DUMP_FREE:
6219 * Clean up after dump, deallocate the dump private data structure.
6221 * action = DUMP_SCAN:
6222 * Scan dump_info for *blkp DEV_BSIZE blocks of contig fs space;
6223 * if found, the starting file-relative DEV_BSIZE lbn is written
6224 * to *bklp; that lbn is intended for use with VOP_DUMP()
6228 ufs_dumpctl(vnode_t
*vp
, int action
, offset_t
*blkp
, caller_context_t
*ct
)
6230 struct inode
*ip
= VTOI(vp
);
6231 ufsvfs_t
*ufsvfsp
= ip
->i_ufsvfs
;
6233 daddr32_t
*dblk
, *storeblk
;
6234 daddr32_t
*nextblk
, *endblk
;
6236 int i
, entry
, entries
;
6240 * check for forced unmount
6242 if (ufsvfsp
== NULL
)
6245 if (action
== DUMP_ALLOC
) {
6247 * alloc and record dump_info
6249 if (dump_info
!= NULL
)
6252 ASSERT(vp
->v_type
== VREG
);
6253 fs
= ufsvfsp
->vfs_fs
;
6255 rw_enter(&ip
->i_contents
, RW_READER
);
6257 if (bmap_has_holes(ip
)) {
6258 rw_exit(&ip
->i_contents
);
6263 * calculate and allocate space needed according to i_size
6265 entries
= (int)lblkno(fs
, blkroundup(fs
, ip
->i_size
));
6266 dump_info
= kmem_alloc(sizeof (struct dump
) +
6267 (entries
- 1) * sizeof (daddr32_t
), KM_NOSLEEP
);
6268 if (dump_info
== NULL
) {
6269 rw_exit(&ip
->i_contents
);
6273 /* Start saving the info */
6274 dump_info
->fsbs
= entries
;
6276 storeblk
= &dump_info
->dblk
[0];
6279 for (entry
= 0; entry
< NDADDR
&& entry
< entries
; entry
++)
6280 *storeblk
++ = ip
->i_db
[entry
];
6282 /* Indirect Blocks */
6283 for (i
= 0; i
< NIADDR
; i
++) {
6286 bp
= UFS_BREAD(ufsvfsp
,
6287 ip
->i_dev
, fsbtodb(fs
, ip
->i_ib
[i
]), fs
->fs_bsize
);
6288 if (bp
->b_flags
& B_ERROR
)
6291 dblk
= bp
->b_un
.b_daddr
;
6292 if ((storeblk
= save_dblks(ip
, ufsvfsp
,
6293 storeblk
, dblk
, i
, entries
)) == NULL
)
6300 kmem_free(dump_info
, sizeof (struct dump
) +
6301 (entries
- 1) * sizeof (daddr32_t
));
6302 rw_exit(&ip
->i_contents
);
6307 /* and time stamp the information */
6308 mutex_enter(&ip
->i_tlock
);
6309 dump_info
->time
= ip
->i_mtime
;
6310 mutex_exit(&ip
->i_tlock
);
6312 rw_exit(&ip
->i_contents
);
6313 } else if (action
== DUMP_FREE
) {
6317 if (dump_info
== NULL
)
6319 entries
= dump_info
->fsbs
- 1;
6320 kmem_free(dump_info
, sizeof (struct dump
) +
6321 entries
* sizeof (daddr32_t
));
6323 } else if (action
== DUMP_SCAN
) {
6327 if (dump_info
== NULL
)
6330 dblk
= dump_info
->dblk
;
6332 endblk
= dblk
+ dump_info
->fsbs
- 1;
6333 fs
= ufsvfsp
->vfs_fs
;
6334 ncontig
= *blkp
>> (fs
->fs_bshift
- DEV_BSHIFT
);
6337 * scan dblk[] entries; contig fs space is found when:
6338 * ((current blkno + frags per block) == next blkno)
6341 while (n
< ncontig
&& dblk
< endblk
) {
6342 if ((*dblk
+ fs
->fs_frag
) == *nextblk
)
6351 * index is where size bytes of contig space begins;
6352 * conversion from index to the file's DEV_BSIZE lbn
6353 * is equivalent to: (index * fs_bsize) / DEV_BSIZE
6356 i
= (dblk
- dump_info
->dblk
) - ncontig
;
6357 *blkp
= i
<< (fs
->fs_bshift
- DEV_BSHIFT
);
6365 * Recursive helper function for ufs_dumpctl(). It follows the indirect file
6366 * system blocks until it reaches the the disk block addresses, which are
6367 * then stored into the given buffer, storeblk.
6370 save_dblks(struct inode
*ip
, struct ufsvfs
*ufsvfsp
, daddr32_t
*storeblk
,
6371 daddr32_t
*dblk
, int level
, int entries
)
6373 struct fs
*fs
= ufsvfsp
->vfs_fs
;
6378 for (i
= 0; i
< NINDIR(fs
); i
++) {
6379 if (storeblk
- dump_info
->dblk
>= entries
)
6381 *storeblk
++ = dblk
[i
];
6385 for (i
= 0; i
< NINDIR(fs
); i
++) {
6386 if (storeblk
- dump_info
->dblk
>= entries
)
6388 bp
= UFS_BREAD(ufsvfsp
,
6389 ip
->i_dev
, fsbtodb(fs
, dblk
[i
]), fs
->fs_bsize
);
6390 if (bp
->b_flags
& B_ERROR
) {
6394 storeblk
= save_dblks(ip
, ufsvfsp
, storeblk
, bp
->b_un
.b_daddr
,
6395 level
- 1, entries
);
6398 if (storeblk
== NULL
)
6406 ufs_getsecattr(struct vnode
*vp
, vsecattr_t
*vsap
, int flag
,
6407 struct cred
*cr
, caller_context_t
*ct
)
6409 struct inode
*ip
= VTOI(vp
);
6410 struct ulockfs
*ulp
;
6411 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
6412 ulong_t vsa_mask
= vsap
->vsa_mask
;
6415 vsa_mask
&= (VSA_ACL
| VSA_ACLCNT
| VSA_DFACL
| VSA_DFACLCNT
);
6418 * Only grab locks if needed - they're not needed to check vsa_mask
6419 * or if the mask contains no acl flags.
6421 if (vsa_mask
!= 0) {
6422 if (err
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
6423 ULOCKFS_GETATTR_MASK
))
6426 rw_enter(&ip
->i_contents
, RW_READER
);
6427 err
= ufs_acl_get(ip
, vsap
, flag
, cr
);
6428 rw_exit(&ip
->i_contents
);
6431 ufs_lockfs_end(ulp
);
6438 ufs_setsecattr(struct vnode
*vp
, vsecattr_t
*vsap
, int flag
, struct cred
*cr
,
6439 caller_context_t
*ct
)
6441 struct inode
*ip
= VTOI(vp
);
6442 struct ulockfs
*ulp
= NULL
;
6443 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
6444 ulong_t vsa_mask
= vsap
->vsa_mask
;
6451 ASSERT(RW_LOCK_HELD(&ip
->i_rwlock
));
6453 /* Abort now if the request is either empty or invalid. */
6454 vsa_mask
&= (VSA_ACL
| VSA_ACLCNT
| VSA_DFACL
| VSA_DFACLCNT
);
6455 if ((vsa_mask
== 0) ||
6456 ((vsap
->vsa_aclentp
== NULL
) &&
6457 (vsap
->vsa_dfaclentp
== NULL
))) {
6463 * Following convention, if this is a directory then we acquire the
6464 * inode's i_rwlock after starting a UFS logging transaction;
6465 * otherwise, we acquire it beforehand. Since we were called (and
6466 * must therefore return) with the lock held, we will have to drop it,
6467 * and later reacquire it, if operating on a directory.
6469 if (vp
->v_type
== VDIR
) {
6470 rw_exit(&ip
->i_rwlock
);
6473 /* Upgrade the lock if required. */
6474 if (!rw_write_held(&ip
->i_rwlock
)) {
6475 rw_exit(&ip
->i_rwlock
);
6476 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
6481 ASSERT(!(vp
->v_type
== VDIR
&& haverwlock
));
6482 if (err
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_SETATTR_MASK
)) {
6489 * Check that the file system supports this operation. Note that
6490 * ufs_lockfs_begin() will have checked that the file system had
6491 * not been forcibly unmounted.
6493 if (ufsvfsp
->vfs_fs
->fs_ronly
) {
6497 if (ufsvfsp
->vfs_nosetsec
) {
6503 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_SETSECATTR
,
6504 trans_size
= TOP_SETSECATTR_SIZE(VTOI(vp
)));
6508 if (vp
->v_type
== VDIR
) {
6509 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
6515 /* Do the actual work. */
6516 rw_enter(&ip
->i_contents
, RW_WRITER
);
6518 * Suppress out of inodes messages if we will retry.
6521 ip
->i_flag
|= IQUIET
;
6522 err
= ufs_acl_set(ip
, vsap
, flag
, cr
);
6523 ip
->i_flag
&= ~IQUIET
;
6524 rw_exit(&ip
->i_contents
);
6530 * top_end_async() can eventually call
6531 * top_end_sync(), which can block. We must
6532 * therefore observe the lock-ordering protocol
6535 if (vp
->v_type
== VDIR
) {
6536 rw_exit(&ip
->i_rwlock
);
6539 TRANS_END_ASYNC(ufsvfsp
, TOP_SETSECATTR
, trans_size
);
6541 ufs_lockfs_end(ulp
);
6544 * If no inodes available, try scaring a logically-
6545 * free one out of the delete queue to someplace
6546 * that we can find it.
6548 if ((err
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
6549 ufs_delete_drain_wait(ufsvfsp
, 1);
6551 if (vp
->v_type
== VDIR
&& haverwlock
) {
6552 rw_exit(&ip
->i_rwlock
);
6558 * If we need to reacquire the lock then it is safe to do so
6559 * as a reader. This is because ufs_rwunlock(), which will be
6560 * called by our caller after we return, does not differentiate
6561 * between shared and exclusive locks.
6564 ASSERT(vp
->v_type
== VDIR
);
6565 rw_enter(&ip
->i_rwlock
, RW_READER
);
6572 * Locate the vnode to be used for an event notification. As this will
6573 * be called prior to the name space change perform basic verification
6574 * that the change will be allowed.
6578 ufs_eventlookup(struct vnode
*dvp
, char *nm
, struct cred
*cr
,
6586 struct ufsvfs
*ufsvfsp
;
6587 struct ulockfs
*ulp
;
6592 if ((namlen
= strlen(nm
)) == 0)
6598 else if ((namlen
== 2) && nm
[1] == '.') {
6604 * Check accessibility and write access of parent directory as we
6605 * only want to post the event if we're able to make a change.
6607 if (error
= ufs_diraccess(ip
, IEXEC
|IWRITE
, cr
))
6610 if (vp
= dnlc_lookup(dvp
, nm
)) {
6611 if (vp
== DNLC_NO_VNODE
) {
6621 * Keep the idle queue from getting too long by idling two
6622 * inodes before attempting to allocate another.
6623 * This operation must be performed before entering lockfs
6626 if (ufs_idle_q
.uq_ne
> ufs_idle_q
.uq_hiwat
)
6627 if ((curthread
->t_flag
& T_DONTBLOCK
) == 0) {
6628 ins
.in_lidles
.value
.ul
+= ufs_lookup_idle_count
;
6629 ufs_idle_some(ufs_lookup_idle_count
);
6632 ufsvfsp
= ip
->i_ufsvfs
;
6635 if (error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_LOOKUP_MASK
))
6638 if ((error
= ufs_dirlook(ip
, nm
, &xip
, cr
, 1, 1)) == 0) {
6644 ufs_lockfs_end(ulp
);
6647 if (error
== EAGAIN
)